diff --git a/.circleci/config.yml b/.circleci/config.yml
index 0364e99fa26..4a69a4a4964 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -1,15 +1,17 @@
version: 2.1
orbs:
go: gotest/tools@0.0.13
+ aws-cli: circleci/aws-cli@1.3.2
+ packer: salaxander/packer@0.0.3
executors:
golang:
docker:
- - image: circleci/golang:1.14.6
+ - image: circleci/golang:1.16.4
resource_class: 2xlarge
ubuntu:
docker:
- - image: ubuntu:19.10
+ - image: ubuntu:20.04
commands:
install-deps:
@@ -110,7 +112,7 @@ jobs:
- run:
command: make debug
- test: &test
+ test:
description: |
Run tests with gotestsum.
parameters: &test-params
@@ -121,20 +123,20 @@ jobs:
type: string
default: "-timeout 30m"
description: Flags passed to go test.
- packages:
+ target:
type: string
default: "./..."
description: Import paths of packages to be tested.
- winpost-test:
+ proofs-log-test:
type: string
default: "0"
- test-suite-name:
+ suite:
type: string
default: unit
description: Test suite name to report to CircleCI.
gotestsum-format:
type: string
- default: pkgname-and-test-fails
+ default: standard-verbose
description: gotestsum format. https://github.com/gotestyourself/gotestsum#format
coverage:
type: string
@@ -142,7 +144,7 @@ jobs:
description: Coverage flag. Set to the empty string to disable.
codecov-upload:
type: boolean
- default: false
+ default: true
description: |
Upload coverage report to https://codecov.io/. Requires the codecov API token to be
set as an environment variable for private projects.
@@ -160,24 +162,24 @@ jobs:
- run:
name: go test
environment:
- LOTUS_TEST_WINDOW_POST: << parameters.winpost-test >>
+ TEST_RUSTPROOFS_LOGS: << parameters.proofs-log-test >>
SKIP_CONFORMANCE: "1"
command: |
- mkdir -p /tmp/test-reports/<< parameters.test-suite-name >>
+ mkdir -p /tmp/test-reports/<< parameters.suite >>
mkdir -p /tmp/test-artifacts
gotestsum \
--format << parameters.gotestsum-format >> \
- --junitfile /tmp/test-reports/<< parameters.test-suite-name >>/junit.xml \
- --jsonfile /tmp/test-artifacts/<< parameters.test-suite-name >>.json \
+ --junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \
+ --jsonfile /tmp/test-artifacts/<< parameters.suite >>.json \
-- \
<< parameters.coverage >> \
<< parameters.go-test-flags >> \
- << parameters.packages >>
+ << parameters.target >>
no_output_timeout: 30m
- store_test_results:
path: /tmp/test-reports
- store_artifacts:
- path: /tmp/test-artifacts/<< parameters.test-suite-name >>.json
+ path: /tmp/test-artifacts/<< parameters.suite >>.json
- when:
condition: << parameters.codecov-upload >>
steps:
@@ -188,18 +190,6 @@ jobs:
command: |
bash <(curl -s https://codecov.io/bash)
- test-chain:
- <<: *test
- test-node:
- <<: *test
- test-storage:
- <<: *test
- test-cli:
- <<: *test
- test-short:
- <<: *test
- test-window-post:
- <<: *test
test-conformance:
description: |
Run tests using a corpus of interoperable test vectors for Filecoin
@@ -262,24 +252,97 @@ jobs:
path: /tmp/test-reports
- store_artifacts:
path: /tmp/test-artifacts/conformance-coverage.html
+ build-ntwk-calibration:
+ description: |
+ Compile lotus binaries for the calibration network
+ parameters:
+ <<: *test-params
+ executor: << parameters.executor >>
+ steps:
+ - install-deps
+ - prepare
+ - run: make calibnet
+ - run: mkdir linux-calibrationnet && mv lotus lotus-miner lotus-worker linux-calibrationnet
+ - persist_to_workspace:
+ root: "."
+ paths:
+ - linux-calibrationnet
+ build-ntwk-butterfly:
+ description: |
+ Compile lotus binaries for the butterfly network
+ parameters:
+ <<: *test-params
+ executor: << parameters.executor >>
+ steps:
+ - install-deps
+ - prepare
+ - run: make butterflynet
+ - run: mkdir linux-butterflynet && mv lotus lotus-miner lotus-worker linux-butterflynet
+ - persist_to_workspace:
+ root: "."
+ paths:
+ - linux-butterflynet
+ build-ntwk-nerpa:
+ description: |
+ Compile lotus binaries for the nerpa network
+ parameters:
+ <<: *test-params
+ executor: << parameters.executor >>
+ steps:
+ - install-deps
+ - prepare
+ - run: make nerpanet
+ - run: mkdir linux-nerpanet && mv lotus lotus-miner lotus-worker linux-nerpanet
+ - persist_to_workspace:
+ root: "."
+ paths:
+ - linux-nerpanet
build-lotus-soup:
description: |
- Compile `lotus-soup` Testground test plan using the current version of Lotus.
+ Compile `lotus-soup` Testground test plan
parameters:
<<: *test-params
executor: << parameters.executor >>
steps:
- install-deps
- prepare
- - run: cd extern/oni && git submodule sync
- - run: cd extern/oni && git submodule update --init
- run: cd extern/filecoin-ffi && make
- run:
- name: "replace lotus, filecoin-ffi, blst and fil-blst deps"
- command: cd extern/oni/lotus-soup && go mod edit -replace github.com/filecoin-project/lotus=../../../ && go mod edit -replace github.com/filecoin-project/filecoin-ffi=../../filecoin-ffi && go mod edit -replace github.com/supranational/blst=../../fil-blst/blst && go mod edit -replace github.com/filecoin-project/fil-blst=../../fil-blst
+ name: "go get lotus@master"
+ command: cd testplans/lotus-soup && go mod edit -replace=github.com/filecoin-project/lotus=../.. && go mod tidy
- run:
name: "build lotus-soup testplan"
- command: pushd extern/oni/lotus-soup && go build -tags=testground .
+ command: pushd testplans/lotus-soup && go build -tags=testground .
+ trigger-testplans:
+ description: |
+ Trigger `lotus-soup` test cases on TaaS
+ parameters:
+ <<: *test-params
+ executor: << parameters.executor >>
+ steps:
+ - install-deps
+ - prepare
+ - run:
+ name: "download testground"
+ command: wget https://gist.github.com/nonsense/5fbf3167cac79945f658771aed32fc44/raw/2e17eb0debf7ec6bdf027c1bdafc2c92dd97273b/testground-d3e9603 -O ~/testground-cli && chmod +x ~/testground-cli
+ - run:
+ name: "prepare .env.toml"
+ command: pushd testplans/lotus-soup && mkdir -p $HOME/testground && cp env-ci.toml $HOME/testground/.env.toml && echo 'endpoint="https://ci.testground.ipfs.team"' >> $HOME/testground/.env.toml && echo 'user="circleci"' >> $HOME/testground/.env.toml
+ - run:
+ name: "prepare testground home dir and link test plans"
+ command: mkdir -p $HOME/testground/plans && ln -s $(pwd)/testplans/lotus-soup $HOME/testground/plans/lotus-soup && ln -s $(pwd)/testplans/graphsync $HOME/testground/plans/graphsync
+ - run:
+ name: "go get lotus@master"
+ command: cd testplans/lotus-soup && go get github.com/filecoin-project/lotus@master
+ - run:
+ name: "trigger deals baseline testplan on taas"
+ command: ~/testground-cli run composition -f $HOME/testground/plans/lotus-soup/_compositions/baseline-k8s-3-1.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
+ - run:
+ name: "trigger payment channel stress testplan on taas"
+ command: ~/testground-cli run composition -f $HOME/testground/plans/lotus-soup/_compositions/paych-stress-k8s.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
+ - run:
+ name: "trigger graphsync testplan on taas"
+ command: ~/testground-cli run composition -f $HOME/testground/plans/graphsync/_compositions/stress-k8s.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
build-macos:
@@ -294,8 +357,8 @@ jobs:
- run:
name: Install go
command: |
- curl -O https://dl.google.com/go/go1.14.2.darwin-amd64.pkg && \
- sudo installer -pkg go1.14.2.darwin-amd64.pkg -target /
+ curl -O https://dl.google.com/go/go1.16.4.darwin-amd64.pkg && \
+ sudo installer -pkg go1.16.4.darwin-amd64.pkg -target /
- run:
name: Install pkg-config
command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config
@@ -309,6 +372,15 @@ jobs:
command: |
curl --location https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 --output /usr/local/bin/jq
chmod +x /usr/local/bin/jq
+ - run:
+ name: Install hwloc
+ command: |
+ mkdir ~/hwloc
+ curl --location https://download.open-mpi.org/release/hwloc/v2.4/hwloc-2.4.1.tar.gz --output ~/hwloc/hwloc-2.4.1.tar.gz
+ cd ~/hwloc
+ tar -xvzpf hwloc-2.4.1.tar.gz
+ cd hwloc-2.4.1
+ ./configure && make && sudo make install
- restore_cache:
name: restore cargo cache
key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }}
@@ -334,6 +406,41 @@ jobs:
- "~/.rustup"
- "~/.cargo"
+ build-appimage:
+ machine:
+ image: ubuntu-2004:202104-01
+ steps:
+ - checkout
+ - attach_workspace:
+ at: "."
+ - run:
+ name: install appimage-builder
+ command: |
+ # docs: https://appimage-builder.readthedocs.io/en/latest/intro/install.html
+ sudo apt update
+ sudo apt install -y python3-pip python3-setuptools patchelf desktop-file-utils libgdk-pixbuf2.0-dev fakeroot strace
+ sudo curl -Lo /usr/local/bin/appimagetool https://github.com/AppImage/AppImageKit/releases/download/continuous/appimagetool-x86_64.AppImage
+ sudo chmod +x /usr/local/bin/appimagetool
+ sudo pip3 install appimage-builder
+ - run:
+ name: install lotus dependencies
+ command: sudo apt install ocl-icd-opencl-dev libhwloc-dev
+ - run:
+ name: build appimage
+ command: |
+ sed -i "s/version: latest/version: ${CIRCLE_TAG:-latest}/" AppImageBuilder.yml
+ make appimage
+ - run:
+ name: prepare workspace
+ command: |
+ mkdir appimage
+ mv Lotus-*.AppImage appimage
+ - persist_to_workspace:
+ root: "."
+ paths:
+ - appimage
+
+
gofmt:
executor: golang
steps:
@@ -342,7 +449,7 @@ jobs:
- run:
command: "! go fmt ./... 2>&1 | read"
- cbor-gen-check:
+ gen-check:
executor: golang
steps:
- install-deps
@@ -350,7 +457,10 @@ jobs:
- run: make deps
- run: go install golang.org/x/tools/cmd/goimports
- run: go install github.com/hannahhoward/cbor-gen-for
- - run: go generate ./...
+ - run: make gen
+ - run: git --no-pager diff
+ - run: git --no-pager diff --quiet
+ - run: make docsgen-cli
- run: git --no-pager diff
- run: git --no-pager diff --quiet
@@ -359,8 +469,19 @@ jobs:
steps:
- install-deps
- prepare
+ - run: go install golang.org/x/tools/cmd/goimports
+ - run: zcat build/openrpc/full.json.gz | jq > ../pre-openrpc-full
+ - run: zcat build/openrpc/miner.json.gz | jq > ../pre-openrpc-miner
+ - run: zcat build/openrpc/worker.json.gz | jq > ../pre-openrpc-worker
+ - run: make deps
- run: make docsgen
+ - run: zcat build/openrpc/full.json.gz | jq > ../post-openrpc-full
+ - run: zcat build/openrpc/miner.json.gz | jq > ../post-openrpc-miner
+ - run: zcat build/openrpc/worker.json.gz | jq > ../post-openrpc-worker
- run: git --no-pager diff
+ - run: diff ../pre-openrpc-full ../post-openrpc-full
+ - run: diff ../pre-openrpc-miner ../post-openrpc-miner
+ - run: diff ../pre-openrpc-worker ../post-openrpc-worker
- run: git --no-pager diff --quiet
lint: &lint
@@ -422,6 +543,198 @@ jobs:
name: Publish release
command: ./scripts/publish-release.sh
+ publish-snapcraft:
+ description: build and push snapcraft
+ machine:
+ image: ubuntu-2004:202104-01
+ resource_class: 2xlarge
+ parameters:
+ channel:
+ type: string
+ default: "edge"
+ description: snapcraft channel
+ steps:
+ - checkout
+ - run:
+ name: install snapcraft
+ command: sudo snap install snapcraft --classic
+ - run:
+ name: create snapcraft config file
+ command: |
+ mkdir -p ~/.config/snapcraft
+ echo "$SNAPCRAFT_LOGIN_FILE" | base64 -d > ~/.config/snapcraft/snapcraft.cfg
+ - run:
+ name: build snap
+ command: snapcraft --use-lxd
+ - run:
+ name: publish snap
+ command: snapcraft push *.snap --release << parameters.channel >>
+
+ build-and-push-image:
+ description: build and push docker images to public AWS ECR registry
+ executor: aws-cli/default
+ parameters:
+ profile-name:
+ type: string
+ default: "default"
+ description: AWS profile name to be configured.
+
+ aws-access-key-id:
+ type: env_var_name
+ default: AWS_ACCESS_KEY_ID
+ description: >
+ AWS access key id for IAM role. Set this to the name of
+ the environment variable you will set to hold this
+ value, i.e. AWS_ACCESS_KEY.
+
+ aws-secret-access-key:
+ type: env_var_name
+ default: AWS_SECRET_ACCESS_KEY
+ description: >
+ AWS secret key for IAM role. Set this to the name of
+ the environment variable you will set to hold this
+ value, i.e. AWS_SECRET_ACCESS_KEY.
+
+ region:
+ type: env_var_name
+ default: AWS_REGION
+ description: >
+ Name of env var storing your AWS region information,
+ defaults to AWS_REGION
+
+ account-url:
+ type: env_var_name
+ default: AWS_ECR_ACCOUNT_URL
+ description: >
+ Env var storing Amazon ECR account URL that maps to an AWS account,
+ e.g. {awsAccountNum}.dkr.ecr.us-west-2.amazonaws.com
+ defaults to AWS_ECR_ACCOUNT_URL
+
+ dockerfile:
+ type: string
+ default: Dockerfile
+ description: Name of dockerfile to use. Defaults to Dockerfile.
+
+ path:
+ type: string
+ default: .
+ description: Path to the directory containing your Dockerfile and build context. Defaults to . (working directory).
+
+ extra-build-args:
+ type: string
+ default: ""
+ description: >
+ Extra flags to pass to docker build. For examples, see
+ https://docs.docker.com/engine/reference/commandline/build
+
+ repo:
+ type: string
+ description: Name of an Amazon ECR repository
+
+ tag:
+ type: string
+ default: "latest"
+ description: A comma-separated string containing docker image tags to build and push (default = latest)
+
+ steps:
+ - run:
+ name: Confirm that environment variables are set
+ command: |
+ if [ -z "$AWS_ACCESS_KEY_ID" ]; then
+ echo "No AWS_ACCESS_KEY_ID is set. Skipping build-and-push job ..."
+ circleci-agent step halt
+ fi
+
+ - aws-cli/setup:
+ profile-name: <>
+ aws-access-key-id: <>
+ aws-secret-access-key: <>
+ aws-region: <>
+
+ - run:
+ name: Log into Amazon ECR
+ command: |
+ aws ecr-public get-login-password --region $<> --profile <> | docker login --username AWS --password-stdin $<>
+
+ - checkout
+
+ - setup_remote_docker:
+ version: 19.03.13
+ docker_layer_caching: false
+
+ - run:
+ name: Build docker image
+ command: |
+ registry_id=$(echo $<> | sed "s;\..*;;g")
+
+ docker_tag_args=""
+ IFS="," read -ra DOCKER_TAGS \<<< "<< parameters.tag >>"
+ for tag in "${DOCKER_TAGS[@]}"; do
+ docker_tag_args="$docker_tag_args -t $<>/<>:$tag"
+ done
+
+ docker build \
+ <<#parameters.extra-build-args>><><> \
+ -f <>/<> \
+ $docker_tag_args \
+ <>
+
+ - run:
+ name: Push image to Amazon ECR
+ command: |
+ IFS="," read -ra DOCKER_TAGS \<<< "<< parameters.tag >>"
+ for tag in "${DOCKER_TAGS[@]}"; do
+ docker push $<>/<>:${tag}
+ done
+
+ publish-packer-mainnet:
+ description: build and push AWS IAM and DigitalOcean droplet.
+ executor:
+ name: packer/default
+ packer-version: 1.6.6
+ steps:
+ - checkout
+ - attach_workspace:
+ at: "."
+ - packer/build:
+ template: tools/packer/lotus.pkr.hcl
+ args: "-var ci_workspace_bins=./linux -var lotus_network=mainnet -var git_tag=$CIRCLE_TAG"
+ publish-packer-calibrationnet:
+ description: build and push AWS IAM and DigitalOcean droplet.
+ executor:
+ name: packer/default
+ packer-version: 1.6.6
+ steps:
+ - checkout
+ - attach_workspace:
+ at: "."
+ - packer/build:
+ template: tools/packer/lotus.pkr.hcl
+ args: "-var ci_workspace_bins=./linux-calibrationnet -var lotus_network=calibrationnet -var git_tag=$CIRCLE_TAG"
+ publish-packer-butterflynet:
+ description: build and push AWS IAM and DigitalOcean droplet.
+ executor:
+ name: packer/default
+ packer-version: 1.6.6
+ steps:
+ - checkout
+ - attach_workspace:
+ at: "."
+ - packer/build:
+ template: tools/packer/lotus.pkr.hcl
+ args: "-var ci_workspace_bins=./linux-butterflynet -var lotus_network=butterflynet -var git_tag=$CIRCLE_TAG"
+ publish-packer-nerpanet:
+ description: build and push AWS IAM and DigitalOcean droplet.
+ executor:
+ name: packer/default
+ packer-version: 1.6.6
+ steps:
+ - checkout
+ - attach_workspace:
+ at: "."
+ - packer/build:
+ template: tools/packer/lotus.pkr.hcl
+ args: "-var ci_workspace_bins=./linux-nerpanet -var lotus_network=nerpanet -var git_tag=$CIRCLE_TAG"
workflows:
version: 2.1
@@ -431,73 +744,289 @@ workflows:
concurrency: "16" # expend all docker 2xlarge CPUs.
- mod-tidy-check
- gofmt
- - cbor-gen-check
+ - gen-check
- docs-check
- test:
- codecov-upload: true
- test-suite-name: full
- - test-chain:
- codecov-upload: true
- test-suite-name: chain
- packages: "./chain/..."
- - test-node:
- codecov-upload: true
- test-suite-name: node
- packages: "./node/..."
- - test-storage:
- codecov-upload: true
- test-suite-name: storage
- packages: "./storage/... ./extern/..."
- - test-cli:
- codecov-upload: true
- test-suite-name: cli
- packages: "./cli/... ./cmd/... ./api/..."
- - test-window-post:
- go-test-flags: "-run=TestWindowedPost"
- winpost-test: "1"
- test-suite-name: window-post
- - test-short:
- go-test-flags: "--timeout 10m --short"
- test-suite-name: short
- filters:
- tags:
- only:
- - /^v\d+\.\d+\.\d+$/
+ name: test-itest-api
+ suite: itest-api
+ target: "./itests/api_test.go"
+
+ - test:
+ name: test-itest-batch_deal
+ suite: itest-batch_deal
+ target: "./itests/batch_deal_test.go"
+
+ - test:
+ name: test-itest-ccupgrade
+ suite: itest-ccupgrade
+ target: "./itests/ccupgrade_test.go"
+
+ - test:
+ name: test-itest-cli
+ suite: itest-cli
+ target: "./itests/cli_test.go"
+
+ - test:
+ name: test-itest-deadlines
+ suite: itest-deadlines
+ target: "./itests/deadlines_test.go"
+
+ - test:
+ name: test-itest-deals_concurrent
+ suite: itest-deals_concurrent
+ target: "./itests/deals_concurrent_test.go"
+
+ - test:
+ name: test-itest-deals_offline
+ suite: itest-deals_offline
+ target: "./itests/deals_offline_test.go"
+
+ - test:
+ name: test-itest-deals_power
+ suite: itest-deals_power
+ target: "./itests/deals_power_test.go"
+
+ - test:
+ name: test-itest-deals_pricing
+ suite: itest-deals_pricing
+ target: "./itests/deals_pricing_test.go"
+
+ - test:
+ name: test-itest-deals_publish
+ suite: itest-deals_publish
+ target: "./itests/deals_publish_test.go"
+
+ - test:
+ name: test-itest-deals
+ suite: itest-deals
+ target: "./itests/deals_test.go"
+
+ - test:
+ name: test-itest-gateway
+ suite: itest-gateway
+ target: "./itests/gateway_test.go"
+
+ - test:
+ name: test-itest-get_messages_in_ts
+ suite: itest-get_messages_in_ts
+ target: "./itests/get_messages_in_ts_test.go"
+
+ - test:
+ name: test-itest-multisig
+ suite: itest-multisig
+ target: "./itests/multisig_test.go"
+
+ - test:
+ name: test-itest-nonce
+ suite: itest-nonce
+ target: "./itests/nonce_test.go"
+
+ - test:
+ name: test-itest-paych_api
+ suite: itest-paych_api
+ target: "./itests/paych_api_test.go"
+
+ - test:
+ name: test-itest-paych_cli
+ suite: itest-paych_cli
+ target: "./itests/paych_cli_test.go"
+
+ - test:
+ name: test-itest-sdr_upgrade
+ suite: itest-sdr_upgrade
+ target: "./itests/sdr_upgrade_test.go"
+
+ - test:
+ name: test-itest-sector_finalize_early
+ suite: itest-sector_finalize_early
+ target: "./itests/sector_finalize_early_test.go"
+
+ - test:
+ name: test-itest-sector_miner_collateral
+ suite: itest-sector_miner_collateral
+ target: "./itests/sector_miner_collateral_test.go"
+
+ - test:
+ name: test-itest-sector_pledge
+ suite: itest-sector_pledge
+ target: "./itests/sector_pledge_test.go"
+
+ - test:
+ name: test-itest-sector_terminate
+ suite: itest-sector_terminate
+ target: "./itests/sector_terminate_test.go"
+
+ - test:
+ name: test-itest-tape
+ suite: itest-tape
+ target: "./itests/tape_test.go"
+
+ - test:
+ name: test-itest-verifreg
+ suite: itest-verifreg
+ target: "./itests/verifreg_test.go"
+
+ - test:
+ name: test-itest-wdpost_dispute
+ suite: itest-wdpost_dispute
+ target: "./itests/wdpost_dispute_test.go"
+
+ - test:
+ name: test-itest-wdpost
+ suite: itest-wdpost
+ target: "./itests/wdpost_test.go"
+
+ - test:
+ name: test-unit-cli
+ suite: utest-unit-cli
+ target: "./cli/... ./cmd/... ./api/..."
+ - test:
+ name: test-unit-node
+ suite: utest-unit-node
+ target: "./node/..."
+ - test:
+ name: test-unit-rest
+ suite: utest-unit-rest
+ target: "./api/... ./blockstore/... ./build/... ./chain/... ./cli/... ./cmd/... ./conformance/... ./extern/... ./gateway/... ./journal/... ./lib/... ./markets/... ./node/... ./paychmgr/... ./storage/... ./tools/..."
+ - test:
+ name: test-unit-storage
+ suite: utest-unit-storage
+ target: "./storage/... ./extern/..."
+ - test:
+ go-test-flags: "-run=TestMulticoreSDR"
+ suite: multicore-sdr-check
+ target: "./extern/sector-storage/ffiwrapper"
+ proofs-log-test: "1"
- test-conformance:
- test-suite-name: conformance
- packages: "./conformance"
+ suite: conformance
+ codecov-upload: false
+ target: "./conformance"
- test-conformance:
name: test-conformance-bleeding-edge
- test-suite-name: conformance-bleeding-edge
- packages: "./conformance"
+ codecov-upload: false
+ suite: conformance-bleeding-edge
+ target: "./conformance"
vectors-branch: master
- - build-lotus-soup
+ - trigger-testplans:
+ filters:
+ branches:
+ only:
+ - master
- build-debug
- build-all:
- requires:
- - test-short
filters:
tags:
only:
- - /^v\d+\.\d+\.\d+$/
+ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
+ - build-ntwk-calibration:
+ filters:
+ tags:
+ only:
+ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
+ - build-ntwk-butterfly:
+ filters:
+ tags:
+ only:
+ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
+ - build-ntwk-nerpa:
+ filters:
+ tags:
+ only:
+ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
+ - build-lotus-soup
- build-macos:
- requires:
- - test-short
filters:
branches:
ignore:
- /.*/
tags:
only:
- - /^v\d+\.\d+\.\d+$/
+ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
+ - build-appimage:
+ filters:
+ branches:
+ ignore:
+ - /.*/
+ tags:
+ only:
+ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
- publish:
requires:
- build-all
- build-macos
+ - build-appimage
+ filters:
+ branches:
+ ignore:
+ - /.*/
+ tags:
+ only:
+ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
+ - build-and-push-image:
+ dockerfile: Dockerfile.lotus
+ path: .
+ repo: lotus-dev
+ tag: '${CIRCLE_SHA1:0:8}'
+ - publish-packer-mainnet:
+ requires:
+ - build-all
+ filters:
+ branches:
+ ignore:
+ - /.*/
+ tags:
+ only:
+ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
+ - publish-packer-calibrationnet:
+ requires:
+ - build-ntwk-calibration
+ filters:
+ branches:
+ ignore:
+ - /.*/
+ tags:
+ only:
+ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
+ - publish-packer-butterflynet:
+ requires:
+ - build-ntwk-butterfly
+ filters:
+ branches:
+ ignore:
+ - /.*/
+ tags:
+ only:
+ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
+ - publish-packer-nerpanet:
+ requires:
+ - build-ntwk-nerpa
+ filters:
+ branches:
+ ignore:
+ - /.*/
+ tags:
+ only:
+ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
+ - publish-snapcraft:
+ name: publish-snapcraft-stable
+ channel: stable
filters:
branches:
ignore:
- /.*/
tags:
only:
- - /^v\d+\.\d+\.\d+$/
+ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
+
+ nightly:
+ triggers:
+ - schedule:
+ cron: "0 0 * * *"
+ filters:
+ branches:
+ only:
+ - master
+ jobs:
+ - publish-snapcraft:
+ name: publish-snapcraft-nightly
+ channel: edge
diff --git a/.circleci/gen.go b/.circleci/gen.go
new file mode 100644
index 00000000000..844348e29ae
--- /dev/null
+++ b/.circleci/gen.go
@@ -0,0 +1,136 @@
+package main
+
+import (
+ "embed"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "text/template"
+)
+
+//go:generate go run ./gen.go ..
+
+//go:embed template.yml
+var templateFile embed.FS
+
+type (
+ dirs = []string
+ suite = string
+)
+
+// groupedUnitTests maps suite names to top-level directories that should be
+// included in that suite. The program adds an implicit group "rest" that
+// includes all other top-level directories.
+var groupedUnitTests = map[suite]dirs{
+ "unit-node": {"node"},
+ "unit-storage": {"storage", "extern"},
+ "unit-cli": {"cli", "cmd", "api"},
+}
+
+func main() {
+ if len(os.Args) != 2 {
+ panic("expected path to repo as argument")
+ }
+
+ repo := os.Args[1]
+
+ tmpl := template.New("template.yml")
+ tmpl.Delims("[[", "]]")
+ tmpl.Funcs(template.FuncMap{
+ "stripSuffix": func(in string) string {
+ return strings.TrimSuffix(in, "_test.go")
+ },
+ })
+ tmpl = template.Must(tmpl.ParseFS(templateFile, "*"))
+
+ // list all itests.
+ itests, err := filepath.Glob(filepath.Join(repo, "./itests/*_test.go"))
+ if err != nil {
+ panic(err)
+ }
+
+ // strip the dir from all entries.
+ for i, f := range itests {
+ itests[i] = filepath.Base(f)
+ }
+
+ // calculate the exclusion set of unit test directories to exclude because
+ // they are already included in a grouped suite.
+ var excluded = map[string]struct{}{}
+ for _, ss := range groupedUnitTests {
+ for _, s := range ss {
+ e, err := filepath.Abs(filepath.Join(repo, s))
+ if err != nil {
+ panic(err)
+ }
+ excluded[e] = struct{}{}
+ }
+ }
+
+ // all unit tests top-level dirs that are not itests, nor included in other suites.
+ var rest = map[string]struct{}{}
+ err = filepath.Walk(repo, func(path string, f os.FileInfo, err error) error {
+ // include all tests that aren't in the itests directory.
+ if strings.Contains(path, "itests") {
+ return filepath.SkipDir
+ }
+ // exclude all tests included in other suites
+ if f.IsDir() {
+ if _, ok := excluded[path]; ok {
+ return filepath.SkipDir
+ }
+ }
+ if strings.HasSuffix(path, "_test.go") {
+ rel, err := filepath.Rel(repo, path)
+ if err != nil {
+ panic(err)
+ }
+ // take the first directory
+ rest[strings.Split(rel, string(os.PathSeparator))[0]] = struct{}{}
+ }
+ return err
+ })
+ if err != nil {
+ panic(err)
+ }
+
+ // add other directories to a 'rest' suite.
+ for k := range rest {
+ groupedUnitTests["unit-rest"] = append(groupedUnitTests["unit-rest"], k)
+ }
+
+ // map iteration guarantees no order, so sort the array in-place.
+ sort.Strings(groupedUnitTests["unit-rest"])
+
+ // form the input data.
+ type data struct {
+ ItestFiles []string
+ UnitSuites map[string]string
+ }
+ in := data{
+ ItestFiles: itests,
+ UnitSuites: func() map[string]string {
+ ret := make(map[string]string)
+ for name, dirs := range groupedUnitTests {
+ for i, d := range dirs {
+ dirs[i] = fmt.Sprintf("./%s/...", d) // turn into package
+ }
+ ret[name] = strings.Join(dirs, " ")
+ }
+ return ret
+ }(),
+ }
+
+ out, err := os.Create("./config.yml")
+ if err != nil {
+ panic(err)
+ }
+ defer out.Close()
+
+ // execute the template.
+ if err := tmpl.Execute(out, in); err != nil {
+ panic(err)
+ }
+}
diff --git a/.circleci/template.yml b/.circleci/template.yml
new file mode 100644
index 00000000000..fb59f23eafe
--- /dev/null
+++ b/.circleci/template.yml
@@ -0,0 +1,902 @@
+version: 2.1
+orbs:
+ go: gotest/tools@0.0.13
+ aws-cli: circleci/aws-cli@1.3.2
+ packer: salaxander/packer@0.0.3
+
+executors:
+ golang:
+ docker:
+ - image: circleci/golang:1.16.4
+ resource_class: 2xlarge
+ ubuntu:
+ docker:
+ - image: ubuntu:20.04
+
+commands:
+ install-deps:
+ steps:
+ - go/install-ssh
+ - go/install: {package: git}
+ prepare:
+ parameters:
+ linux:
+ default: true
+ description: is a linux build environment?
+ type: boolean
+ darwin:
+ default: false
+ description: is a darwin build environment?
+ type: boolean
+ steps:
+ - checkout
+ - git_fetch_all_tags
+ - checkout
+ - when:
+ condition: << parameters.linux >>
+ steps:
+ - run: sudo apt-get update
+ - run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev
+ - run: git submodule sync
+ - run: git submodule update --init
+ download-params:
+ steps:
+ - restore_cache:
+ name: Restore parameters cache
+ keys:
+ - 'v25-2k-lotus-params'
+ paths:
+ - /var/tmp/filecoin-proof-parameters/
+ - run: ./lotus fetch-params 2048
+ - save_cache:
+ name: Save parameters cache
+ key: 'v25-2k-lotus-params'
+ paths:
+ - /var/tmp/filecoin-proof-parameters/
+ install_ipfs:
+ steps:
+ - run: |
+ apt update
+ apt install -y wget
+ wget https://github.com/ipfs/go-ipfs/releases/download/v0.4.22/go-ipfs_v0.4.22_linux-amd64.tar.gz
+ wget https://github.com/ipfs/go-ipfs/releases/download/v0.4.22/go-ipfs_v0.4.22_linux-amd64.tar.gz.sha512
+ if [ "$(sha512sum go-ipfs_v0.4.22_linux-amd64.tar.gz)" != "$(cat go-ipfs_v0.4.22_linux-amd64.tar.gz.sha512)" ]
+ then
+ echo "ipfs failed checksum check"
+ exit 1
+ fi
+ tar -xf go-ipfs_v0.4.22_linux-amd64.tar.gz
+ mv go-ipfs/ipfs /usr/local/bin/ipfs
+ chmod +x /usr/local/bin/ipfs
+ git_fetch_all_tags:
+ steps:
+ - run:
+ name: fetch all tags
+ command: |
+ git fetch --all
+
+jobs:
+ mod-tidy-check:
+ executor: golang
+ steps:
+ - install-deps
+ - prepare
+ - go/mod-tidy-check
+
+ build-all:
+ executor: golang
+ steps:
+ - install-deps
+ - prepare
+ - run: sudo apt-get update
+ - run: sudo apt-get install npm
+ - run:
+ command: make buildall
+ - store_artifacts:
+ path: lotus
+ - store_artifacts:
+ path: lotus-miner
+ - store_artifacts:
+ path: lotus-worker
+ - run: mkdir linux && mv lotus lotus-miner lotus-worker linux/
+ - persist_to_workspace:
+ root: "."
+ paths:
+ - linux
+
+ build-debug:
+ executor: golang
+ steps:
+ - install-deps
+ - prepare
+ - run:
+ command: make debug
+
+ test:
+ description: |
+ Run tests with gotestsum.
+ parameters: &test-params
+ executor:
+ type: executor
+ default: golang
+ go-test-flags:
+ type: string
+ default: "-timeout 30m"
+ description: Flags passed to go test.
+ target:
+ type: string
+ default: "./..."
+ description: Import paths of packages to be tested.
+ proofs-log-test:
+ type: string
+ default: "0"
+ suite:
+ type: string
+ default: unit
+ description: Test suite name to report to CircleCI.
+ gotestsum-format:
+ type: string
+ default: standard-verbose
+ description: gotestsum format. https://github.com/gotestyourself/gotestsum#format
+ coverage:
+ type: string
+ default: -coverprofile=coverage.txt -coverpkg=github.com/filecoin-project/lotus/...
+ description: Coverage flag. Set to the empty string to disable.
+ codecov-upload:
+ type: boolean
+ default: true
+ description: |
+ Upload coverage report to https://codecov.io/. Requires the codecov API token to be
+ set as an environment variable for private projects.
+ executor: << parameters.executor >>
+ steps:
+ - install-deps
+ - prepare
+ - run:
+ command: make deps lotus
+ no_output_timeout: 30m
+ - download-params
+ - go/install-gotestsum:
+ gobin: $HOME/.local/bin
+ version: 0.5.2
+ - run:
+ name: go test
+ environment:
+ TEST_RUSTPROOFS_LOGS: << parameters.proofs-log-test >>
+ SKIP_CONFORMANCE: "1"
+ command: |
+ mkdir -p /tmp/test-reports/<< parameters.suite >>
+ mkdir -p /tmp/test-artifacts
+ gotestsum \
+ --format << parameters.gotestsum-format >> \
+ --junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \
+ --jsonfile /tmp/test-artifacts/<< parameters.suite >>.json \
+ -- \
+ << parameters.coverage >> \
+ << parameters.go-test-flags >> \
+ << parameters.target >>
+ no_output_timeout: 30m
+ - store_test_results:
+ path: /tmp/test-reports
+ - store_artifacts:
+ path: /tmp/test-artifacts/<< parameters.suite >>.json
+ - when:
+ condition: << parameters.codecov-upload >>
+ steps:
+ - go/install: {package: bash}
+ - go/install: {package: curl}
+ - run:
+ shell: /bin/bash -eo pipefail
+ command: |
+ bash <(curl -s https://codecov.io/bash)
+
+ test-conformance:
+ description: |
+ Run tests using a corpus of interoperable test vectors for Filecoin
+ implementations to test their correctness and compliance with the Filecoin
+ specifications.
+ parameters:
+ <<: *test-params
+ vectors-branch:
+ type: string
+ default: ""
+ description: |
+ Branch on github.com/filecoin-project/test-vectors to checkout and
+ test with. If empty (the default) the commit defined by the git
+ submodule is used.
+ executor: << parameters.executor >>
+ steps:
+ - install-deps
+ - prepare
+ - run:
+ command: make deps lotus
+ no_output_timeout: 30m
+ - download-params
+ - when:
+ condition:
+ not:
+ equal: [ "", << parameters.vectors-branch >> ]
+ steps:
+ - run:
+ name: checkout vectors branch
+ command: |
+ cd extern/test-vectors
+ git fetch
+ git checkout origin/<< parameters.vectors-branch >>
+ - go/install-gotestsum:
+ gobin: $HOME/.local/bin
+ version: 0.5.2
+ - run:
+ name: install statediff globally
+ command: |
+ ## statediff is optional; we succeed even if compilation fails.
+ mkdir -p /tmp/statediff
+ git clone https://github.com/filecoin-project/statediff.git /tmp/statediff
+ cd /tmp/statediff
+ go install ./cmd/statediff || exit 0
+ - run:
+ name: go test
+ environment:
+ SKIP_CONFORMANCE: "0"
+ command: |
+ mkdir -p /tmp/test-reports
+ mkdir -p /tmp/test-artifacts
+ gotestsum \
+ --format pkgname-and-test-fails \
+ --junitfile /tmp/test-reports/junit.xml \
+ -- \
+ -v -coverpkg ./chain/vm/,github.com/filecoin-project/specs-actors/... -coverprofile=/tmp/conformance.out ./conformance/
+ go tool cover -html=/tmp/conformance.out -o /tmp/test-artifacts/conformance-coverage.html
+ no_output_timeout: 30m
+ - store_test_results:
+ path: /tmp/test-reports
+ - store_artifacts:
+ path: /tmp/test-artifacts/conformance-coverage.html
+ build-ntwk-calibration:
+ description: |
+ Compile lotus binaries for the calibration network
+ parameters:
+ <<: *test-params
+ executor: << parameters.executor >>
+ steps:
+ - install-deps
+ - prepare
+ - run: make calibnet
+ - run: mkdir linux-calibrationnet && mv lotus lotus-miner lotus-worker linux-calibrationnet
+ - persist_to_workspace:
+ root: "."
+ paths:
+ - linux-calibrationnet
+ build-ntwk-butterfly:
+ description: |
+ Compile lotus binaries for the butterfly network
+ parameters:
+ <<: *test-params
+ executor: << parameters.executor >>
+ steps:
+ - install-deps
+ - prepare
+ - run: make butterflynet
+ - run: mkdir linux-butterflynet && mv lotus lotus-miner lotus-worker linux-butterflynet
+ - persist_to_workspace:
+ root: "."
+ paths:
+ - linux-butterflynet
+ build-ntwk-nerpa:
+ description: |
+ Compile lotus binaries for the nerpa network
+ parameters:
+ <<: *test-params
+ executor: << parameters.executor >>
+ steps:
+ - install-deps
+ - prepare
+ - run: make nerpanet
+ - run: mkdir linux-nerpanet && mv lotus lotus-miner lotus-worker linux-nerpanet
+ - persist_to_workspace:
+ root: "."
+ paths:
+ - linux-nerpanet
+ build-lotus-soup:
+ description: |
+ Compile `lotus-soup` Testground test plan
+ parameters:
+ <<: *test-params
+ executor: << parameters.executor >>
+ steps:
+ - install-deps
+ - prepare
+ - run: cd extern/filecoin-ffi && make
+ - run:
+ name: "go get lotus@master"
+ command: cd testplans/lotus-soup && go mod edit -replace=github.com/filecoin-project/lotus=../.. && go mod tidy
+ - run:
+ name: "build lotus-soup testplan"
+ command: pushd testplans/lotus-soup && go build -tags=testground .
+ trigger-testplans:
+ description: |
+ Trigger `lotus-soup` test cases on TaaS
+ parameters:
+ <<: *test-params
+ executor: << parameters.executor >>
+ steps:
+ - install-deps
+ - prepare
+ - run:
+ name: "download testground"
+ command: wget https://gist.github.com/nonsense/5fbf3167cac79945f658771aed32fc44/raw/2e17eb0debf7ec6bdf027c1bdafc2c92dd97273b/testground-d3e9603 -O ~/testground-cli && chmod +x ~/testground-cli
+ - run:
+ name: "prepare .env.toml"
+ command: pushd testplans/lotus-soup && mkdir -p $HOME/testground && cp env-ci.toml $HOME/testground/.env.toml && echo 'endpoint="https://ci.testground.ipfs.team"' >> $HOME/testground/.env.toml && echo 'user="circleci"' >> $HOME/testground/.env.toml
+ - run:
+ name: "prepare testground home dir and link test plans"
+ command: mkdir -p $HOME/testground/plans && ln -s $(pwd)/testplans/lotus-soup $HOME/testground/plans/lotus-soup && ln -s $(pwd)/testplans/graphsync $HOME/testground/plans/graphsync
+ - run:
+ name: "go get lotus@master"
+ command: cd testplans/lotus-soup && go get github.com/filecoin-project/lotus@master
+ - run:
+ name: "trigger deals baseline testplan on taas"
+ command: ~/testground-cli run composition -f $HOME/testground/plans/lotus-soup/_compositions/baseline-k8s-3-1.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
+ - run:
+ name: "trigger payment channel stress testplan on taas"
+ command: ~/testground-cli run composition -f $HOME/testground/plans/lotus-soup/_compositions/paych-stress-k8s.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
+ - run:
+ name: "trigger graphsync testplan on taas"
+ command: ~/testground-cli run composition -f $HOME/testground/plans/graphsync/_compositions/stress-k8s.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
+
+
+ build-macos:
+ description: build darwin lotus binary
+ macos:
+ xcode: "10.0.0"
+ working_directory: ~/go/src/github.com/filecoin-project/lotus
+ steps:
+ - prepare:
+ linux: false
+ darwin: true
+ - run:
+ name: Install go
+ command: |
+ curl -O https://dl.google.com/go/go1.16.4.darwin-amd64.pkg && \
+ sudo installer -pkg go1.16.4.darwin-amd64.pkg -target /
+ - run:
+ name: Install pkg-config
+ command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config
+ - run: go version
+ - run:
+ name: Install Rust
+ command: |
+ curl https://sh.rustup.rs -sSf | sh -s -- -y
+ - run:
+ name: Install jq
+ command: |
+ curl --location https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 --output /usr/local/bin/jq
+ chmod +x /usr/local/bin/jq
+ - run:
+ name: Install hwloc
+ command: |
+ mkdir ~/hwloc
+ curl --location https://download.open-mpi.org/release/hwloc/v2.4/hwloc-2.4.1.tar.gz --output ~/hwloc/hwloc-2.4.1.tar.gz
+ cd ~/hwloc
+ tar -xvzpf hwloc-2.4.1.tar.gz
+ cd hwloc-2.4.1
+ ./configure && make && sudo make install
+ - restore_cache:
+ name: restore cargo cache
+ key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }}
+ - install-deps
+ - run:
+ command: make build
+ no_output_timeout: 30m
+ - store_artifacts:
+ path: lotus
+ - store_artifacts:
+ path: lotus-miner
+ - store_artifacts:
+ path: lotus-worker
+ - run: mkdir darwin && mv lotus lotus-miner lotus-worker darwin/
+ - persist_to_workspace:
+ root: "."
+ paths:
+ - darwin
+ - save_cache:
+ name: save cargo cache
+ key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }}
+ paths:
+ - "~/.rustup"
+ - "~/.cargo"
+
+ build-appimage:
+ machine:
+ image: ubuntu-2004:202104-01
+ steps:
+ - checkout
+ - attach_workspace:
+ at: "."
+ - run:
+ name: install appimage-builder
+ command: |
+ # docs: https://appimage-builder.readthedocs.io/en/latest/intro/install.html
+ sudo apt update
+ sudo apt install -y python3-pip python3-setuptools patchelf desktop-file-utils libgdk-pixbuf2.0-dev fakeroot strace
+ sudo curl -Lo /usr/local/bin/appimagetool https://github.com/AppImage/AppImageKit/releases/download/continuous/appimagetool-x86_64.AppImage
+ sudo chmod +x /usr/local/bin/appimagetool
+ sudo pip3 install appimage-builder
+ - run:
+ name: install lotus dependencies
+ command: sudo apt install ocl-icd-opencl-dev libhwloc-dev
+ - run:
+ name: build appimage
+ command: |
+ sed -i "s/version: latest/version: ${CIRCLE_TAG:-latest}/" AppImageBuilder.yml
+ make appimage
+ - run:
+ name: prepare workspace
+ command: |
+ mkdir appimage
+ mv Lotus-*.AppImage appimage
+ - persist_to_workspace:
+ root: "."
+ paths:
+ - appimage
+
+
+ gofmt:
+ executor: golang
+ steps:
+ - install-deps
+ - prepare
+ - run:
+ command: "! go fmt ./... 2>&1 | read"
+
+ gen-check:
+ executor: golang
+ steps:
+ - install-deps
+ - prepare
+ - run: make deps
+ - run: go install golang.org/x/tools/cmd/goimports
+ - run: go install github.com/hannahhoward/cbor-gen-for
+ - run: make gen
+ - run: git --no-pager diff
+ - run: git --no-pager diff --quiet
+ - run: make docsgen-cli
+ - run: git --no-pager diff
+ - run: git --no-pager diff --quiet
+
+ docs-check:
+ executor: golang
+ steps:
+ - install-deps
+ - prepare
+ - run: go install golang.org/x/tools/cmd/goimports
+ - run: zcat build/openrpc/full.json.gz | jq > ../pre-openrpc-full
+ - run: zcat build/openrpc/miner.json.gz | jq > ../pre-openrpc-miner
+ - run: zcat build/openrpc/worker.json.gz | jq > ../pre-openrpc-worker
+ - run: make deps
+ - run: make docsgen
+ - run: zcat build/openrpc/full.json.gz | jq > ../post-openrpc-full
+ - run: zcat build/openrpc/miner.json.gz | jq > ../post-openrpc-miner
+ - run: zcat build/openrpc/worker.json.gz | jq > ../post-openrpc-worker
+ - run: git --no-pager diff
+ - run: diff ../pre-openrpc-full ../post-openrpc-full
+ - run: diff ../pre-openrpc-miner ../post-openrpc-miner
+ - run: diff ../pre-openrpc-worker ../post-openrpc-worker
+ - run: git --no-pager diff --quiet
+
+ lint: &lint
+ description: |
+ Run golangci-lint.
+ parameters:
+ executor:
+ type: executor
+ default: golang
+ golangci-lint-version:
+ type: string
+ default: 1.27.0
+ concurrency:
+ type: string
+ default: '2'
+ description: |
+ Concurrency used to run linters. Defaults to 2 because NumCPU is not
+ aware of container CPU limits.
+ args:
+ type: string
+ default: ''
+ description: |
+ Arguments to pass to golangci-lint
+ executor: << parameters.executor >>
+ steps:
+ - install-deps
+ - prepare
+ - run:
+ command: make deps
+ no_output_timeout: 30m
+ - go/install-golangci-lint:
+ gobin: $HOME/.local/bin
+ version: << parameters.golangci-lint-version >>
+ - run:
+ name: Lint
+ command: |
+ $HOME/.local/bin/golangci-lint run -v --timeout 2m \
+ --concurrency << parameters.concurrency >> << parameters.args >>
+ lint-all:
+ <<: *lint
+
+ publish:
+ description: publish binary artifacts
+ executor: ubuntu
+ steps:
+ - run:
+ name: Install git jq curl
+ command: apt update && apt install -y git jq curl
+ - checkout
+ - git_fetch_all_tags
+ - checkout
+ - install_ipfs
+ - attach_workspace:
+ at: "."
+ - run:
+ name: Create bundles
+ command: ./scripts/build-bundle.sh
+ - run:
+ name: Publish release
+ command: ./scripts/publish-release.sh
+
+ publish-snapcraft:
+ description: build and push snapcraft
+ machine:
+ image: ubuntu-2004:202104-01
+ resource_class: 2xlarge
+ parameters:
+ channel:
+ type: string
+ default: "edge"
+ description: snapcraft channel
+ steps:
+ - checkout
+ - run:
+ name: install snapcraft
+ command: sudo snap install snapcraft --classic
+ - run:
+ name: create snapcraft config file
+ command: |
+ mkdir -p ~/.config/snapcraft
+ echo "$SNAPCRAFT_LOGIN_FILE" | base64 -d > ~/.config/snapcraft/snapcraft.cfg
+ - run:
+ name: build snap
+ command: snapcraft --use-lxd
+ - run:
+ name: publish snap
+ command: snapcraft push *.snap --release << parameters.channel >>
+
+ build-and-push-image:
+ description: build and push docker images to public AWS ECR registry
+ executor: aws-cli/default
+ parameters:
+ profile-name:
+ type: string
+ default: "default"
+ description: AWS profile name to be configured.
+
+ aws-access-key-id:
+ type: env_var_name
+ default: AWS_ACCESS_KEY_ID
+ description: >
+ AWS access key id for IAM role. Set this to the name of
+ the environment variable you will set to hold this
+ value, i.e. AWS_ACCESS_KEY.
+
+ aws-secret-access-key:
+ type: env_var_name
+ default: AWS_SECRET_ACCESS_KEY
+ description: >
+ AWS secret key for IAM role. Set this to the name of
+ the environment variable you will set to hold this
+ value, i.e. AWS_SECRET_ACCESS_KEY.
+
+ region:
+ type: env_var_name
+ default: AWS_REGION
+ description: >
+ Name of env var storing your AWS region information,
+ defaults to AWS_REGION
+
+ account-url:
+ type: env_var_name
+ default: AWS_ECR_ACCOUNT_URL
+ description: >
+ Env var storing Amazon ECR account URL that maps to an AWS account,
+ e.g. {awsAccountNum}.dkr.ecr.us-west-2.amazonaws.com
+ defaults to AWS_ECR_ACCOUNT_URL
+
+ dockerfile:
+ type: string
+ default: Dockerfile
+ description: Name of dockerfile to use. Defaults to Dockerfile.
+
+ path:
+ type: string
+ default: .
+ description: Path to the directory containing your Dockerfile and build context. Defaults to . (working directory).
+
+ extra-build-args:
+ type: string
+ default: ""
+ description: >
+ Extra flags to pass to docker build. For examples, see
+ https://docs.docker.com/engine/reference/commandline/build
+
+ repo:
+ type: string
+ description: Name of an Amazon ECR repository
+
+ tag:
+ type: string
+ default: "latest"
+ description: A comma-separated string containing docker image tags to build and push (default = latest)
+
+ steps:
+ - run:
+ name: Confirm that environment variables are set
+ command: |
+ if [ -z "$AWS_ACCESS_KEY_ID" ]; then
+ echo "No AWS_ACCESS_KEY_ID is set. Skipping build-and-push job ..."
+ circleci-agent step halt
+ fi
+
+ - aws-cli/setup:
+ profile-name: <>
+ aws-access-key-id: <>
+ aws-secret-access-key: <>
+ aws-region: <>
+
+ - run:
+ name: Log into Amazon ECR
+ command: |
+ aws ecr-public get-login-password --region $<> --profile <> | docker login --username AWS --password-stdin $<>
+
+ - checkout
+
+ - setup_remote_docker:
+ version: 19.03.13
+ docker_layer_caching: false
+
+ - run:
+ name: Build docker image
+ command: |
+ registry_id=$(echo $<> | sed "s;\..*;;g")
+
+ docker_tag_args=""
+ IFS="," read -ra DOCKER_TAGS \<<< "<< parameters.tag >>"
+ for tag in "${DOCKER_TAGS[@]}"; do
+ docker_tag_args="$docker_tag_args -t $<>/<>:$tag"
+ done
+
+ docker build \
+ <<#parameters.extra-build-args>><><> \
+ -f <>/<> \
+ $docker_tag_args \
+ <>
+
+ - run:
+ name: Push image to Amazon ECR
+ command: |
+ IFS="," read -ra DOCKER_TAGS \<<< "<< parameters.tag >>"
+ for tag in "${DOCKER_TAGS[@]}"; do
+ docker push $<>/<>:${tag}
+ done
+
+ publish-packer-mainnet:
+ description: build and push AWS IAM and DigitalOcean droplet.
+ executor:
+ name: packer/default
+ packer-version: 1.6.6
+ steps:
+ - checkout
+ - attach_workspace:
+ at: "."
+ - packer/build:
+ template: tools/packer/lotus.pkr.hcl
+ args: "-var ci_workspace_bins=./linux -var lotus_network=mainnet -var git_tag=$CIRCLE_TAG"
+ publish-packer-calibrationnet:
+ description: build and push AWS IAM and DigitalOcean droplet.
+ executor:
+ name: packer/default
+ packer-version: 1.6.6
+ steps:
+ - checkout
+ - attach_workspace:
+ at: "."
+ - packer/build:
+ template: tools/packer/lotus.pkr.hcl
+ args: "-var ci_workspace_bins=./linux-calibrationnet -var lotus_network=calibrationnet -var git_tag=$CIRCLE_TAG"
+ publish-packer-butterflynet:
+ description: build and push AWS IAM and DigitalOcean droplet.
+ executor:
+ name: packer/default
+ packer-version: 1.6.6
+ steps:
+ - checkout
+ - attach_workspace:
+ at: "."
+ - packer/build:
+ template: tools/packer/lotus.pkr.hcl
+ args: "-var ci_workspace_bins=./linux-butterflynet -var lotus_network=butterflynet -var git_tag=$CIRCLE_TAG"
+ publish-packer-nerpanet:
+ description: build and push AWS IAM and DigitalOcean droplet.
+ executor:
+ name: packer/default
+ packer-version: 1.6.6
+ steps:
+ - checkout
+ - attach_workspace:
+ at: "."
+ - packer/build:
+ template: tools/packer/lotus.pkr.hcl
+ args: "-var ci_workspace_bins=./linux-nerpanet -var lotus_network=nerpanet -var git_tag=$CIRCLE_TAG"
+
+workflows:
+ version: 2.1
+ ci:
+ jobs:
+ - lint-all:
+ concurrency: "16" # expend all docker 2xlarge CPUs.
+ - mod-tidy-check
+ - gofmt
+ - gen-check
+ - docs-check
+
+ [[- range $file := .ItestFiles -]]
+ [[ with $name := $file | stripSuffix ]]
+ - test:
+ name: test-itest-[[ $name ]]
+ suite: itest-[[ $name ]]
+ target: "./itests/[[ $file ]]"
+ [[ end ]]
+ [[- end -]]
+
+ [[range $suite, $pkgs := .UnitSuites]]
+ - test:
+ name: test-[[ $suite ]]
+ suite: utest-[[ $suite ]]
+ target: "[[ $pkgs ]]"
+ [[- end]]
+ - test:
+ go-test-flags: "-run=TestMulticoreSDR"
+ suite: multicore-sdr-check
+ target: "./extern/sector-storage/ffiwrapper"
+ proofs-log-test: "1"
+ - test-conformance:
+ suite: conformance
+ codecov-upload: false
+ target: "./conformance"
+ - test-conformance:
+ name: test-conformance-bleeding-edge
+ codecov-upload: false
+ suite: conformance-bleeding-edge
+ target: "./conformance"
+ vectors-branch: master
+ - trigger-testplans:
+ filters:
+ branches:
+ only:
+ - master
+ - build-debug
+ - build-all:
+ filters:
+ tags:
+ only:
+ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
+ - build-ntwk-calibration:
+ filters:
+ tags:
+ only:
+ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
+ - build-ntwk-butterfly:
+ filters:
+ tags:
+ only:
+ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
+ - build-ntwk-nerpa:
+ filters:
+ tags:
+ only:
+ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
+ - build-lotus-soup
+ - build-macos:
+ filters:
+ branches:
+ ignore:
+ - /.*/
+ tags:
+ only:
+ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
+ - build-appimage:
+ filters:
+ branches:
+ ignore:
+ - /.*/
+ tags:
+ only:
+ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
+ - publish:
+ requires:
+ - build-all
+ - build-macos
+ - build-appimage
+ filters:
+ branches:
+ ignore:
+ - /.*/
+ tags:
+ only:
+ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
+ - build-and-push-image:
+ dockerfile: Dockerfile.lotus
+ path: .
+ repo: lotus-dev
+ tag: '${CIRCLE_SHA1:0:8}'
+ - publish-packer-mainnet:
+ requires:
+ - build-all
+ filters:
+ branches:
+ ignore:
+ - /.*/
+ tags:
+ only:
+ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
+ - publish-packer-calibrationnet:
+ requires:
+ - build-ntwk-calibration
+ filters:
+ branches:
+ ignore:
+ - /.*/
+ tags:
+ only:
+ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
+ - publish-packer-butterflynet:
+ requires:
+ - build-ntwk-butterfly
+ filters:
+ branches:
+ ignore:
+ - /.*/
+ tags:
+ only:
+ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
+ - publish-packer-nerpanet:
+ requires:
+ - build-ntwk-nerpa
+ filters:
+ branches:
+ ignore:
+ - /.*/
+ tags:
+ only:
+ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
+ - publish-snapcraft:
+ name: publish-snapcraft-stable
+ channel: stable
+ filters:
+ branches:
+ ignore:
+ - /.*/
+ tags:
+ only:
+ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
+
+ nightly:
+ triggers:
+ - schedule:
+ cron: "0 0 * * *"
+ filters:
+ branches:
+ only:
+ - master
+ jobs:
+ - publish-snapcraft:
+ name: publish-snapcraft-nightly
+ channel: edge
diff --git a/.codecov.yml b/.codecov.yml
index a53081be7fd..1967f6ecac4 100644
--- a/.codecov.yml
+++ b/.codecov.yml
@@ -5,5 +5,15 @@ ignore:
- "api/test/*"
- "gen/**/*"
- "gen/*"
+ - "cmd/lotus-shed/*"
+ - "cmd/tvx/*"
+ - "cmd/lotus-pcr/*"
+ - "cmd/tvx/*"
+ - "cmd/lotus-chainwatch/*"
+ - "cmd/lotus-health/*"
+ - "cmd/lotus-fountain/*"
+ - "cmd/lotus-townhall/*"
+ - "cmd/lotus-stats/*"
+ - "cmd/lotus-pcr/*"
github_checks:
annotations: false
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 6d717b44d69..b8ec66f00ea 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -1,16 +1,6 @@
-## filecoin-project/lotus CODEOWNERS
-## Refer to https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners.
-##
-## These users or groups will be automatically assigned as reviewers every time
-## a PR is submitted that modifies code in the specified locations.
-##
-## The Lotus repo configuration requires that at least ONE codeowner approves
-## the PR before merging.
+# Reference
+# https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/creating-a-repository-on-github/about-code-owners
-### Global owners.
-* @magik6k @whyrusleeping @Kubuxu
-
-### Conformance testing.
-conformance/ @raulk
-extern/test-vectors @raulk
-cmd/tvx @raulk
\ No newline at end of file
+# Global owners
+# Ensure maintainers team is a requested reviewer for non-draft PRs
+* @filecoin-project/lotus-maintainers
diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md
new file mode 100644
index 00000000000..23c7640b782
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug-report.md
@@ -0,0 +1,33 @@
+---
+name: Bug Report
+about: Create a report to help us improve
+title: "[BUG] "
+labels: hint/needs-triaging, kind/bug
+assignees: ''
+
+---
+
+> Note: For security-related bugs/issues, please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy).
+
+**Describe the bug**
+A clear and concise description of what the bug is.
+(If you are not sure what the bug is, try to figure it out via a [discussion](https://github.com/filecoin-project/lotus/discussions/new) first!
+
+**Version (run `lotus version`):**
+
+**To Reproduce**
+Steps to reproduce the behavior:
+1. Run '...'
+2. See error
+
+**Expected behavior**
+A clear and concise description of what you expected to happen.
+
+**Logs**
+Provide daemon/miner/worker logs, and goroutines(if available) for troubleshooting.
+
+**Screenshots**
+If applicable, add screenshots to help explain your problem.
+
+**Additional context**
+Add any other context about the problem here.
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
deleted file mode 100644
index 1ded8c36b70..00000000000
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ /dev/null
@@ -1,27 +0,0 @@
----
-name: Bug report
-about: Create a report to help us improve
-title: ''
-labels: ''
-assignees: ''
-
----
-
-**Describe the bug**
-A clear and concise description of what the bug is.
-
-**To Reproduce**
-Steps to reproduce the behavior:
-1. Run '...'
-2. See error
-
-**Expected behavior**
-A clear and concise description of what you expected to happen.
-
-**Screenshots**
-If applicable, add screenshots to help explain your problem.
-
-**Version (run `lotus version`):**
-
-**Additional context**
-Add any other context about the problem here.
diff --git a/.github/ISSUE_TEMPLATE/deal-making-issues.md b/.github/ISSUE_TEMPLATE/deal-making-issues.md
new file mode 100644
index 00000000000..bec800cb7ce
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/deal-making-issues.md
@@ -0,0 +1,49 @@
+---
+name: Deal Making Issues
+about: Create a report for help with deal making failures.
+title: "[Deal Making Issue]"
+labels: hint/needs-triaging, area/markets
+assignees: ''
+
+---
+
+> Note: For security-related bugs/issues, please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy).
+
+Please provide all the information requested here to help us troubleshoot "deal making failed" issues.
+If the information requested is missing, we will probably have to just ask you to provide it anyway,
+before we can help debug.
+
+**Basic Information**
+Including information like, Are you the client or the miner? Is this a storage deal or a retrieval deal? Is it an offline deal?
+
+**Describe the problem**
+
+A brief description of the problem you encountered while trying to make a deal.
+
+**Version**
+
+The output of `lotus --version`.
+
+**Setup**
+
+You miner(if applicable) and daemon setup, i.e: What hardware do you use, how much ram and etc.
+
+**To Reproduce**
+ Steps to reproduce the behavior:
+ 1. Run '...'
+ 2. See error
+
+**Deal status**
+
+The output of `lotus client list-deals -v` and/or `lotus-miner storage-deals|retrieval-deals|data-transfers list [-v]` commands for the deal(s) in question.
+
+**Lotus daemon and miner logs**
+
+Please go through the logs of your daemon and miner(if applicable), and include screenshots of any error/warning-like messages you find.
+
+Alternatively please upload full log files and share a link here
+
+** Code modifications **
+
+If you have modified parts of lotus, please describe which areas were modified,
+and the scope of those modifications
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 00000000000..0803a6db827
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,20 @@
+---
+name: Feature request
+about: Suggest an idea for this project
+title: "[Feature Request]"
+labels: hint/needs-triaging
+assignees: ''
+
+---
+
+**Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+
+**Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+
+**Describe alternatives you've considered**
+A clear and concise description of any alternative solutions or features you've considered.
+
+**Additional context**
+Add any other context or screenshots about the feature request here.
diff --git a/.github/ISSUE_TEMPLATE/mining-issues.md b/.github/ISSUE_TEMPLATE/mining-issues.md
new file mode 100644
index 00000000000..434e160d411
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/mining-issues.md
@@ -0,0 +1,35 @@
+---
+name: Mining Issues
+about: Create a report for help with mining failures.
+title: "[Mining Issue]"
+labels: hint/needs-triaging, area/mining
+assignees: ''
+
+---
+
+> Note: For security-related bugs/issues, please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy).
+
+Please provide all the information requested here to help us troubleshoot "mining/WinningPoSt failed" issues.
+If the information requested is missing, you may be asked you to provide it.
+
+**Describe the problem**
+A brief description of the problem you encountered while mining new blocks.
+
+**Version**
+
+The output of `lotus --version`.
+
+**Setup**
+
+You miner and daemon setup, including what hardware do you use, your environment variable settings, how do you run your miner and worker, do you use GPU and etc.
+
+**Lotus daemon and miner logs**
+
+Please go through the logs of your daemon and miner, and include screenshots of any error/warning-like messages you find, highlighting the one has "winning post" in it.
+
+Alternatively please upload full log files and share a link here
+
+** Code modifications **
+
+If you have modified parts of lotus, please describe which areas were modified,
+and the scope of those modifications
diff --git a/.github/ISSUE_TEMPLATE/proving-issues.md b/.github/ISSUE_TEMPLATE/proving-issues.md
new file mode 100644
index 00000000000..6187d546ee0
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/proving-issues.md
@@ -0,0 +1,46 @@
+---
+name: Proving Issues
+about: Create a report for help with proving failures.
+title: "[Proving Issue]"
+labels: area/proving, hint/needs-triaging
+assignees: ''
+
+---
+
+> Note: For security-related bugs/issues, please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy).
+
+Please provide all the information requested here to help us troubleshoot "proving/window PoSt failed" issues.
+If the information requested is missing, we will probably have to just ask you to provide it anyway,
+before we can help debug.
+
+**Describe the problem**
+A brief description of the problem you encountered while proving the storage.
+
+**Version**
+
+The output of `lotus --version`.
+
+**Setup**
+
+You miner and daemon setup, including what hardware do you use, your environment variable settings, how do you run your miner and worker, do you use GPU and etc.
+
+**Proving status**
+
+The output of `lotus-miner proving` info.
+
+**Lotus miner logs**
+
+Please go through the logs of your miner, and include screenshots of any error-like messages you find, highlighting the one has "window post" in it.
+
+Alternatively please upload full log files and share a link here
+
+**Lotus miner diagnostic info**
+
+Please collect the following diagnostic information, and share a link here
+
+* lotus-miner diagnostic info `lotus-miner info all > allinfo.txt`
+
+** Code modifications **
+
+If you have modified parts of lotus, please describe which areas were modified,
+and the scope of those modifications
diff --git a/.github/ISSUE_TEMPLATE/sealingfailed.md b/.github/ISSUE_TEMPLATE/sealing-issues.md
similarity index 67%
rename from .github/ISSUE_TEMPLATE/sealingfailed.md
rename to .github/ISSUE_TEMPLATE/sealing-issues.md
index ae14c32622c..7511849d3db 100644
--- a/.github/ISSUE_TEMPLATE/sealingfailed.md
+++ b/.github/ISSUE_TEMPLATE/sealing-issues.md
@@ -1,21 +1,32 @@
---
name: Sealing Issues
about: Create a report for help with sealing (commit) failures.
-title: ''
-labels: 'sealing'
+title: "[Sealing Issue]"
+labels: hint/needs-triaging, area/sealing
assignees: ''
---
+> Note: For security-related bugs/issues, please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy).
+
Please provide all the information requested here to help us troubleshoot "commit failed" issues.
If the information requested is missing, we will probably have to just ask you to provide it anyway,
before we can help debug.
**Describe the problem**
+A brief description of the problem you encountered while sealing a sector.
+
+**Version**
+
+The output of `lotus --version`.
+
+**Setup**
-A brief description of the problem you encountered while proving (sealing) a sector.
+You miner and daemon setup, including what hardware do you use, your environment variable settings, how do you run your miner and worker, do you use GPU and etc.
-Including what commands you ran, and a description of your setup, is very helpful.
+**Commands**
+
+Commands you ran.
**Sectors status**
@@ -37,7 +48,3 @@ Please collect the following diagnostic information, and share a link here
If you have modified parts of lotus, please describe which areas were modified,
and the scope of those modifications
-
-**Version**
-
-The output of `lotus --version`.
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
new file mode 100644
index 00000000000..33725d70d32
--- /dev/null
+++ b/.github/workflows/codeql-analysis.yml
@@ -0,0 +1,69 @@
+# For most projects, this workflow file will not need changing; you simply need
+# to commit it to your repository.
+#
+# You may wish to alter this file to override the set of languages analyzed,
+# or to provide custom queries or build logic.
+#
+# ******** NOTE ********
+# We have attempted to detect the languages in your repository. Please check
+# the `language` matrix defined below to confirm you have the correct set of
+# supported CodeQL languages.
+#
+name: "CodeQL"
+
+on:
+ push:
+ branches: [ master ]
+ pull_request:
+ # The branches below must be a subset of the branches above
+ branches: [ master ]
+
+jobs:
+ analyze:
+ name: Analyze
+ runs-on: ubuntu-latest
+
+ strategy:
+ fail-fast: false
+ matrix:
+ language: [ 'go' ]
+ # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
+ # Learn more:
+ # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v2
+
+ - uses: actions/setup-go@v1
+ with:
+ go-version: '1.16.4'
+
+ # Initializes the CodeQL tools for scanning.
+ - name: Initialize CodeQL
+ uses: github/codeql-action/init@v1
+ with:
+ languages: ${{ matrix.language }}
+ # If you wish to specify custom queries, you can do so here or in a config file.
+ # By default, queries listed here will override any specified in a config file.
+ # Prefix the list here with "+" to use these queries and those in the config file.
+ # queries: ./path/to/local/query, your-org/your-repo/queries@main
+
+ # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
+ # If this step fails, then you should remove it and run the build manually (see below)
+ - name: Autobuild
+ uses: github/codeql-action/autobuild@v1
+
+ # ℹ️ Command-line programs to run using the OS shell.
+ # 📚 https://git.io/JvXDl
+
+ # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
+ # and modify them (or add more) to build your code if your project
+ # uses a compiled language
+
+ #- run: |
+ # make bootstrap
+ # make release
+
+ - name: Perform CodeQL Analysis
+ uses: github/codeql-action/analyze@v1
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
new file mode 100644
index 00000000000..20b2feb8a95
--- /dev/null
+++ b/.github/workflows/stale.yml
@@ -0,0 +1,27 @@
+name: Close and mark stale issue
+
+on:
+ schedule:
+ - cron: '0 0 * * *'
+
+jobs:
+ stale:
+
+ runs-on: ubuntu-latest
+ permissions:
+ issues: write
+ pull-requests: write
+
+ steps:
+ - uses: actions/stale@v3
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ stale-issue-message: 'Oops, seems like we needed more information for this issue, please comment with more details or this issue will be closed in 24 hours.'
+ close-issue-message: 'This issue was closed because it is missing author input.'
+ stale-issue-label: 'kind/stale'
+ any-of-labels: 'hint/needs-author-input'
+ days-before-issue-stale: 5
+ days-before-issue-close: 1
+ enable-statistics: true
+
+
diff --git a/.github/workflows/testground-on-push.yml b/.github/workflows/testground-on-push.yml
new file mode 100644
index 00000000000..2a3c8af1d51
--- /dev/null
+++ b/.github/workflows/testground-on-push.yml
@@ -0,0 +1,29 @@
+---
+name: Testground PR Checker
+
+on: [push]
+
+jobs:
+ testground:
+ runs-on: ubuntu-latest
+ name: ${{ matrix.composition_file }}
+ strategy:
+ matrix:
+ include:
+ - backend_addr: ci.testground.ipfs.team
+ backend_proto: https
+ plan_directory: testplans/lotus-soup
+ composition_file: testplans/lotus-soup/_compositions/baseline-k8s-3-1.toml
+ - backend_addr: ci.testground.ipfs.team
+ backend_proto: https
+ plan_directory: testplans/lotus-soup
+ composition_file: testplans/lotus-soup/_compositions/paych-stress-k8s.toml
+ steps:
+ - uses: actions/checkout@v2
+ - name: testground run
+ uses: coryschwartz/testground-github-action@v1.1
+ with:
+ backend_addr: ${{ matrix.backend_addr }}
+ backend_proto: ${{ matrix.backend_proto }}
+ plan_directory: ${{ matrix.plan_directory }}
+ composition_file: ${{ matrix.composition_file }}
diff --git a/.gitignore b/.gitignore
index fd51881b788..467f315b8ef 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,6 @@
+/AppDir
+/appimage-builder-cache
+*.AppImage
/lotus
/lotus-miner
/lotus-worker
@@ -5,6 +8,7 @@
/lotus-health
/lotus-chainwatch
/lotus-shed
+/lotus-sim
/lotus-pond
/lotus-townhall
/lotus-fountain
@@ -13,6 +17,9 @@
/lotus-gateway
/lotus-pcr
/lotus-wallet
+/lotus-keygen
+/docgen-md
+/docgen-openrpc
/bench.json
/lotuspond/front/node_modules
/lotuspond/front/build
diff --git a/.gitmodules b/.gitmodules
index 35f5a3d3f9b..cdee35ce393 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,16 +1,9 @@
[submodule "extern/filecoin-ffi"]
path = extern/filecoin-ffi
url = https://github.com/filecoin-project/filecoin-ffi.git
- branch = master
[submodule "extern/serialization-vectors"]
path = extern/serialization-vectors
- url = https://github.com/filecoin-project/serialization-vectors
+ url = https://github.com/filecoin-project/serialization-vectors.git
[submodule "extern/test-vectors"]
path = extern/test-vectors
url = https://github.com/filecoin-project/test-vectors.git
-[submodule "extern/fil-blst"]
- path = extern/fil-blst
- url = https://github.com/filecoin-project/fil-blst.git
-[submodule "extern/oni"]
- path = extern/oni
- url = https://github.com/filecoin-project/oni
diff --git a/.golangci.yml b/.golangci.yml
index 8bdba64f0b6..87db745e427 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -16,6 +16,12 @@ linters:
- deadcode
- scopelint
+# We don't want to skip builtin/
+skip-dirs-use-default: false
+skip-dirs:
+ - vendor$
+ - testdata$
+ - examples$
issues:
exclude:
diff --git a/AppDir/usr/share/icons/icon.svg b/AppDir/usr/share/icons/icon.svg
new file mode 100644
index 00000000000..da992296a1a
--- /dev/null
+++ b/AppDir/usr/share/icons/icon.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/AppImageBuilder.yml b/AppImageBuilder.yml
new file mode 100644
index 00000000000..19c74e4a26a
--- /dev/null
+++ b/AppImageBuilder.yml
@@ -0,0 +1,73 @@
+version: 1
+AppDir:
+ path: ./AppDir
+ app_info:
+ id: io.filecoin.lotus
+ name: Lotus
+ icon: icon
+ version: latest
+ exec: usr/bin/lotus
+ exec_args: $@
+ apt:
+ arch: amd64
+ allow_unauthenticated: true
+ sources:
+ - sourceline: deb http://archive.ubuntu.com/ubuntu/ focal main restricted
+ - sourceline: deb http://archive.ubuntu.com/ubuntu/ focal-updates main restricted
+ - sourceline: deb http://archive.ubuntu.com/ubuntu/ focal universe
+ - sourceline: deb http://archive.ubuntu.com/ubuntu/ focal-updates universe
+ - sourceline: deb http://archive.ubuntu.com/ubuntu/ focal multiverse
+ - sourceline: deb http://archive.ubuntu.com/ubuntu/ focal-updates multiverse
+ - sourceline: deb http://archive.ubuntu.com/ubuntu/ focal-backports main restricted
+ universe multiverse
+ - sourceline: deb http://security.ubuntu.com/ubuntu focal-security main restricted
+ - sourceline: deb http://security.ubuntu.com/ubuntu focal-security universe
+ - sourceline: deb http://security.ubuntu.com/ubuntu focal-security multiverse
+ - sourceline: deb https://cli-assets.heroku.com/apt ./
+ - sourceline: deb http://ppa.launchpad.net/openjdk-r/ppa/ubuntu focal main
+ - sourceline: deb http://ppa.launchpad.net/git-core/ppa/ubuntu focal main
+ - sourceline: deb http://archive.canonical.com/ubuntu focal partner
+ include:
+ - ocl-icd-libopencl1
+ - libhwloc15
+ exclude: []
+ files:
+ include:
+ - /usr/lib/x86_64-linux-gnu/libgcc_s.so.1
+ - /usr/lib/x86_64-linux-gnu/libpthread-2.31.so
+ - /usr/lib/x86_64-linux-gnu/libm-2.31.so
+ - /usr/lib/x86_64-linux-gnu/libdl-2.31.so
+ - /usr/lib/x86_64-linux-gnu/libc-2.31.so
+ - /usr/lib/x86_64-linux-gnu/libudev.so.1.6.17
+ exclude:
+ - usr/share/man
+ - usr/share/doc/*/README.*
+ - usr/share/doc/*/changelog.*
+ - usr/share/doc/*/NEWS.*
+ - usr/share/doc/*/TODO.*
+ test:
+ fedora:
+ image: appimagecrafters/tests-env:fedora-30
+ command: ./AppRun
+ use_host_x: true
+ debian:
+ image: appimagecrafters/tests-env:debian-stable
+ command: ./AppRun
+ use_host_x: true
+ arch:
+ image: appimagecrafters/tests-env:archlinux-latest
+ command: ./AppRun
+ use_host_x: true
+ centos:
+ image: appimagecrafters/tests-env:centos-7
+ command: ./AppRun
+ use_host_x: true
+ ubuntu:
+ image: appimagecrafters/tests-env:ubuntu-xenial
+ command: ./AppRun
+ use_host_x: true
+AppImage:
+ arch: x86_64
+ update-information: guess
+ sign-key: None
+
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 88a30c91dd9..b45c6236d53 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,10 +1,867 @@
# Lotus changelog
+# 1.10.1 / 2021-07-05
+
+This is an optional but **highly recommended** release of Lotus for lotus miners that has many bug fixes and improvements based on the feedback we got from the community since HyperDrive.
+
+## New Features
+- commit batch: AggregateAboveBaseFee config #6650
+ - `AggregateAboveBaseFee` is added to miner sealing configuration for setting the network base fee to start aggregating proofs. When the network base fee is lower than this value, the prove commits will be submitted individually via `ProveCommitSector`. According to the [Batch Incentive Alignment](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0013.md#batch-incentive-alignment) introduced in FIP-0013, we recommend miners to set this value to 0.15 nanoFIL(which is the default value) to avoid unexpected aggregation fee in burn and enjoy the most benefits of aggregation!
+
+## Bug Fixes
+- storage: Fix FinalizeSector with sectors in storage paths #6652
+- Fix tiny error in check-client-datacap #6664
+- Fix: precommit_batch method used the wrong cfg.PreCommitBatchWait #6658
+- to optimize the batchwait #6636
+- fix getTicket: sector precommitted but expired case #6635
+- handleSubmitCommitAggregate() exception handling #6595
+- remove precommit check in handleCommitFailed #6634
+- ensure agg fee is adequate
+- fix: miner balance is not enough, so that ProveCommitAggregate msg exec failed #6623
+- commit batch: Initialize the FailedSectors map #6647
+
+Contributors
+
+| Contributor | Commits | Lines ± | Files Changed |
+|-------------|---------|---------|---------------|
+| @magik6k| 7 | +151/-56 | 21 |
+| @llifezou | 4 | +59/-20 | 4 |
+| @johnli-helloworld | 2 | +45/-14 | 4 |
+| @wangchao | 1 | +1/-27 | 1 |
+| Jerry | 2 | +9/-4 | 2 |
+| @zhoutian527 | 1 | +2/-2 | 1 |
+| @ribasushi| 1 | +1/-1 | 1 |
+
+
+# 1.10.0 / 2021-06-23
+
+This is a mandatory release of Lotus that introduces Filecoin network v13, codenamed the HyperDrive upgrade. The
+Filecoin mainnet will upgrade, which is epoch 892800, on 2021-06-30T22:00:00Z. The network upgrade introduces the
+following FIPs:
+
+- [FIP-0008](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0008.md): Add miner batched sector pre-commit method
+- [FIP-0011](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0011.md): Remove reward auction from reporting consensus faults
+- [FIP-0012](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0012.md): DataCap Top up for FIL+ Client Addresses
+- [FIP-0013](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0013.md): Add ProveCommitSectorAggregated method to reduce on-chain congestion
+- [FIP-0015](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0015.md): Revert FIP-0009(Exempt Window PoSts from BaseFee burn)
+
+Note that this release is built on top of Lotus v1.9.0. Enterprising users can use the `master` branch of Lotus to get the latest functionality, including all changes in this release candidate.
+
+## Proof batching and aggregation
+
+FIPs [0008](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0008.md) and [0013](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0013.md) combine to allow for a significant increase in the rate of onboarding storage on the Filecoin network. This aims to lead to more useful data being stored on the network, reduced network congestion, and lower network base fee.
+
+**Check out the documentation [here](https://docs.filecoin.io/mine/lotus/miner-configuration/#precommitsectorsbatch) for details on the new Lotus miner sealing config options, [here](https://docs.filecoin.io/mine/lotus/miner-configuration/#fees-section) for fee config options, and explanations of the new features.**
+
+Note:
+ - We recommend to keep `PreCommitSectorsBatch` as 1.
+ - We recommend miners to set `PreCommitBatchWait` lower than 30 hours.
+ - We recommend miners to set a longer `CommitBatchSlack` and `PreCommitBatchSlack` to prevent message failures
+ due to expirations.
+
+### Projected state tree growth
+
+In order to validate the Hyperdrive changes, we wrote a simulation to seal as many sectors as quickly as possible, assuming the same number and mix of 32GiB and 64GiB miners as the current network.
+
+Given these assumptions:
+
+- We'd expect a network storage growth rate of around 530PiB per day. 😳 🎉 🥳 😅
+- We'd expect network bandwidth dedicated to `SubmitWindowedPoSt` to grow by about 0.02% per day.
+- We'd expect the [state-tree](https://spec.filecoin.io/#section-systems.filecoin_vm.state_tree) (and therefore [snapshot](https://docs.filecoin.io/get-started/lotus/chain/#lightweight-snapshot)) size to grow by 1.16GiB per day.
+ - Nearly all of the state-tree growth is expected to come from new sector metadata.
+- We'd expect the daily lotus datastore growth rate to increase by about 10-15% (from current ~21GiB/day).
+ - Most "growth" of the lotus datastore is due to "churn", historical data that's no longer referenced by the latest state-tree.
+
+### Future improvements
+
+Various Lotus improvements are planned moving forward to mitigate the effects of the growing state tree size. The primary improvement is the [Lotus splitstore](https://github.com/filecoin-project/lotus/discussions/5788), which will soon be enabled by default. The feature allows for [online garbage collection](https://github.com/filecoin-project/lotus/issues/6577) for nodes that do not seek to maintain full chain and state history, thus eliminating the need for users to delete their datastores and sync from snapshots.
+
+Other improvements including better compressed snapshots, faster pre-migrations, and improved chain exports are in the roadmap.
+
+## WindowPost base fee burn
+
+Included in the HyperDrive upgrade is [FIP-0015](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0015.md) which eliminates the special-case gas treatment of `SubmitWindowedPoSt` messages that was introduced in [FIP-0009](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0009.md). Although `SubmitWindowedPoSt` messages will be relatively cheap, thanks to the introduction of optimistic acceptance of these proofs in [FIP-0010](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0010.md), storage providers should pay attention to their `MaxWindowPoStGasFee` config option: too low and PoSts may not land on chain; too high and they may cost an exorbitant amount!
+
+## Changelog
+
+### New Features
+
+- Implement FIP-0015 ([filecoin-project/lotus#6361](https://github.com/filecoin-project/lotus/pull/6361))
+- Integrate FIP0013 and FIP0008 ([filecoin-project/lotus#6235](https://github.com/filecoin-project/lotus/pull/6235))
+ - [Configuration docs and cli examples](https://docs.filecoin.io/mine/lotus/miner-configuration/#precommitsectorsbatch)
+ - [cli docs](https://github.com/filecoin-project/lotus/blob/master/documentation/en/cli-lotus-miner.md#lotus-miner-sectors-batching)
+ - Introduce gas prices for aggregate verifications ([filecoin-project/lotus#6347](https://github.com/filecoin-project/lotus/pull/6347))
+- Introduce v5 actors ([filecoin-project/lotus#6195](https://github.com/filecoin-project/lotus/pull/6195))
+- Robustify commit batcher ([filecoin-project/lotus#6367](https://github.com/filecoin-project/lotus/pull/6367))
+- Always flush when timer goes off ([filecoin-project/lotus#6563](https://github.com/filecoin-project/lotus/pull/6563))
+- Update default fees for aggregates ([filecoin-project/lotus#6548](https://github.com/filecoin-project/lotus/pull/6548))
+- sealing: Early finalization option ([filecoin-project/lotus#6452](https://github.com/filecoin-project/lotus/pull/6452))
+ - `./lotus-miner/config.toml/[Sealing.FinalizeEarly]`: default to false. Enable if you want to FinalizeSector before commiting
+- Add filplus utils to CLI ([filecoin-project/lotus#6351](https://github.com/filecoin-project/lotus/pull/6351))
+ - cli doc can be found [here](https://github.com/filecoin-project/lotus/blob/master/documentation/en/cli-lotus.md#lotus-filplus)
+- Add miner-side MaxDealStartDelay config ([filecoin-project/lotus#6576](https://github.com/filecoin-project/lotus/pull/6576))
+
+
+### Bug Fixes
+- chainstore: Don't take heaviestLk with backlogged reorgCh ([filecoin-project/lotus#6526](https://github.com/filecoin-project/lotus/pull/6526))
+- Backport #6041 - storagefsm: Fix batch deal packing behavior ([filecoin-project/lotus#6519](https://github.com/filecoin-project/lotus/pull/6519))
+- backport: pick the correct partitions-per-post limit ([filecoin-project/lotus#6503](https://github.com/filecoin-project/lotus/pull/6503))
+- failed sectors should be added into res correctly ([filecoin-project/lotus#6472](https://github.com/filecoin-project/lotus/pull/6472))
+- sealing: Fix restartSectors race ([filecoin-project/lotus#6491](https://github.com/filecoin-project/lotus/pull/6491))
+- Fund miners with the aggregate fee when ProveCommitting ([filecoin-project/lotus#6428](https://github.com/filecoin-project/lotus/pull/6428))
+- Commit and Precommit batcher cannot share a getSectorDeadline method ([filecoin-project/lotus#6416](https://github.com/filecoin-project/lotus/pull/6416))
+- Fix supported proof type manipulations for v5 actors ([filecoin-project/lotus#6366](https://github.com/filecoin-project/lotus/pull/6366))
+- events: Fix handling of multiple matched events per epoch ([filecoin-project/lotus#6362](https://github.com/filecoin-project/lotus/pull/6362))
+- Fix randomness fetching around null blocks ([filecoin-project/lotus#6240](https://github.com/filecoin-project/lotus/pull/6240))
+
+### Improvements
+- Appimage v1.10.0 rc3 ([filecoin-project/lotus#6492](https://github.com/filecoin-project/lotus/pull/6492))
+- Expand on Drand change testing ([filecoin-project/lotus#6500](https://github.com/filecoin-project/lotus/pull/6500))
+- Backport Fix logging around mineOne ([filecoin-project/lotus#6499](https://github.com/filecoin-project/lotus/pull/6499))
+- mpool: Add more metrics ([filecoin-project/lotus#6453](https://github.com/filecoin-project/lotus/pull/6453))
+- Merge backported PRs into v1.10 release branch ([filecoin-project/lotus#6436](https://github.com/filecoin-project/lotus/pull/6436))
+- Fix tests ([filecoin-project/lotus#6371](https://github.com/filecoin-project/lotus/pull/6371))
+- Extend the default deal start epoch delay ([filecoin-project/lotus#6350](https://github.com/filecoin-project/lotus/pull/6350))
+- sealing: Wire up context to batchers ([filecoin-project/lotus#6497](https://github.com/filecoin-project/lotus/pull/6497))
+- Improve address resolution for messages ([filecoin-project/lotus#6364](https://github.com/filecoin-project/lotus/pull/6364))
+
+### Dependency Updates
+- Proofs v8.0.2 ([filecoin-project/lotus#6524](https://github.com/filecoin-project/lotus/pull/6524))
+- Update to fixed Bellperson ([filecoin-project/lotus#6480](https://github.com/filecoin-project/lotus/pull/6480))
+- Update to go-praamfetch with fslocks ([filecoin-project/lotus#6473](https://github.com/filecoin-project/lotus/pull/6473))
+- Update ffi with fixed multicore sdr support ([filecoin-project/lotus#6471](https://github.com/filecoin-project/lotus/pull/6471))
+- github.com/filecoin-project/go-paramfetch (v0.0.2-0.20200701152213-3e0f0afdc261 -> v0.0.2-0.20210614165157-25a6c7769498)
+- github.com/filecoin-project/specs-actors/v5 (v5.0.0-20210512015452-4fe3889fff57 -> v5.0.0)
+- github.com/filecoin-project/go-hamt-ipld/v3 (v3.0.1 -> v3.1.0)
+- github.com/ipfs/go-log/v2 (v2.1.2-0.20200626104915-0016c0b4b3e4 -> v2.1.3)
+- github.com/filecoin-project/go-amt-ipld/v3 (v3.0.0 -> v3.1.0)
+
+### Network Version v13 HyperDrive Upgrade
+- Set HyperDrive upgrade epoch ([filecoin-project/lotus#6565](https://github.com/filecoin-project/lotus/pull/6565))
+- version bump to lotus v1.10.0-rc6 ([filecoin-project/lotus#6529](https://github.com/filecoin-project/lotus/pull/6529))
+- Upgrade epochs for calibration reset ([filecoin-project/lotus#6528](https://github.com/filecoin-project/lotus/pull/6528))
+- Lotus version 1.10.0-rc5 ([filecoin-project/lotus#6504](https://github.com/filecoin-project/lotus/pull/6504))
+- Merge releases into v1.10 release ([filecoin-project/lotus#6494](https://github.com/filecoin-project/lotus/pull/6494))
+- update lotus to v1.10.0-rc3 ([filecoin-project/lotus#6481](https://github.com/filecoin-project/lotus/pull/6481))
+- updated configuration comments for docs
+- Lotus version 1.10.0-rc2 ([filecoin-project/lotus#6443](https://github.com/filecoin-project/lotus/pull/6443))
+- Set ntwk v13 HyperDrive Calibration upgrade epoch ([filecoin-project/lotus#6442](https://github.com/filecoin-project/lotus/pull/6442))
+
+
+## Contributors
+
+💙Thank you to all the contributors!
+
+| Contributor | Commits | Lines ± | Files Changed |
+|--------------------|---------|-------------|---------------|
+| @magik6k | 81 | +9606/-1536 | 361 |
+| @arajasek | 41 | +6543/-679 | 189 |
+| @ZenGround0 | 11 | +4074/-727 | 110 |
+| @anorth | 10 | +2035/-1177 | 55 |
+| @iand | 1 | +779/-12 | 5 |
+| @frrist | 2 | +722/-6 | 6 |
+| @Stebalien | 6 | +368/-24 | 15 |
+| @jennijuju | 11 | +204/-111 | 19 |
+| @vyzo | 6 | +155/-66 | 13 |
+| @coryschwartz | 10 | +171/-27 | 14 |
+| @Kubuxu | 4 | +177/-13 | 7 |
+| @ribasushi | 4 | +65/-42 | 5 |
+| @travisperson | 2 | +11/-11 | 4 |
+| @kirk-baird | 1 | +1/-5 | 1 |
+| @wangchao | 2 | +3/-2 | 2 |
+
+
+# 1.9.0 / 2021-05-17
+
+This is an optional Lotus release that introduces various improvements to the sealing, mining, and deal-making processes.
+
+## Highlights
+
+- OpenRPC Support (https://github.com/filecoin-project/lotus/pull/5843)
+- Take latency into account when making interactive deals (https://github.com/filecoin-project/lotus/pull/5876)
+- Update go-commp-utils for >10x faster client commp calculation (https://github.com/filecoin-project/lotus/pull/5892)
+- add `lotus client cancel-retrieval` cmd to lotus CLI (https://github.com/filecoin-project/lotus/pull/5871)
+- add `inspect-deal` command to `lotus client` (https://github.com/filecoin-project/lotus/pull/5833)
+- Local retrieval support (https://github.com/filecoin-project/lotus/pull/5917)
+- go-fil-markets v1.1.9 -> v1.2.5
+ - For a detailed changelog see https://github.com/filecoin-project/go-fil-markets/blob/master/CHANGELOG.md
+- rust-fil-proofs v5.4.1 -> v7.0.1
+ - For a detailed changelog see https://github.com/filecoin-project/rust-fil-proofs/blob/master/CHANGELOG.md
+
+## Changes
+- storagefsm: Apply global events even in broken states (https://github.com/filecoin-project/lotus/pull/5962)
+- Default the AlwaysKeepUnsealedCopy flag to true (https://github.com/filecoin-project/lotus/pull/5743)
+- splitstore: compact hotstore prior to garbage collection (https://github.com/filecoin-project/lotus/pull/5778)
+- ipfs-force bootstrapper update (https://github.com/filecoin-project/lotus/pull/5799)
+- better logging when unsealing fails (https://github.com/filecoin-project/lotus/pull/5851)
+- perf: add cache for gas permium estimation (https://github.com/filecoin-project/lotus/pull/5709)
+- backupds: Compact log on restart (https://github.com/filecoin-project/lotus/pull/5875)
+- backupds: Improve truncated log handling (https://github.com/filecoin-project/lotus/pull/5891)
+- State CLI improvements (State CLI improvements)
+- API proxy struct codegen (https://github.com/filecoin-project/lotus/pull/5854)
+- move DI stuff for paychmgr into modules (https://github.com/filecoin-project/lotus/pull/5791)
+- Implement Event observer and Settings for 3rd party dep injection (https://github.com/filecoin-project/lotus/pull/5693)
+- Export developer and network commands for consumption by derivatives of Lotus (https://github.com/filecoin-project/lotus/pull/5864)
+- mock sealer: Simulate randomness sideeffects (https://github.com/filecoin-project/lotus/pull/5805)
+- localstorage: Demote reservation stat error to debug (https://github.com/filecoin-project/lotus/pull/5976)
+- shed command to unpack miner info dumps (https://github.com/filecoin-project/lotus/pull/5800)
+- Add two utils to Lotus-shed (https://github.com/filecoin-project/lotus/pull/5867)
+- add shed election estimate command (https://github.com/filecoin-project/lotus/pull/5092)
+- Add --actor flag in lotus-shed sectors terminate (https://github.com/filecoin-project/lotus/pull/5819)
+- Move lotus mpool clear to lotus-shed (https://github.com/filecoin-project/lotus/pull/5900)
+- Centralize everything on ipfs/go-log/v2 (https://github.com/filecoin-project/lotus/pull/5974)
+- expose NextID from nice market actor interface (https://github.com/filecoin-project/lotus/pull/5850)
+- add available options for perm on error (https://github.com/filecoin-project/lotus/pull/5814)
+- API docs clarification: Document StateSearchMsg replaced message behavior (https://github.com/filecoin-project/lotus/pull/5838)
+- api: Document StateReplay replaced message behavior (https://github.com/filecoin-project/lotus/pull/5840)
+- add godocs to miner objects (https://github.com/filecoin-project/lotus/pull/2184)
+- Add description to the client deal CLI command (https://github.com/filecoin-project/lotus/pull/5999)
+- lint: don't skip builtin (https://github.com/filecoin-project/lotus/pull/5881)
+- use deal duration from actors (https://github.com/filecoin-project/lotus/pull/5270)
+- remote calc winningpost proof (https://github.com/filecoin-project/lotus/pull/5884)
+- packer: other network images (https://github.com/filecoin-project/lotus/pull/5930)
+- Convert the chainstore lock to RW (https://github.com/filecoin-project/lotus/pull/5971)
+- Remove CachedBlockstore (https://github.com/filecoin-project/lotus/pull/5972)
+- remove messagepool CapGasFee duplicate code (https://github.com/filecoin-project/lotus/pull/5992)
+- Add a mining-heartbeat INFO line at every epoch (https://github.com/filecoin-project/lotus/pull/6183)
+- chore(ci): Enable build on RC tags (https://github.com/filecoin-project/lotus/pull/6245)
+- Upgrade nerpa to actor v4 and bump the version to rc4 (https://github.com/filecoin-project/lotus/pull/6249)
+## Fixes
+- return buffers after canceling badger operation (https://github.com/filecoin-project/lotus/pull/5796)
+- avoid holding a lock while calling the View callback (https://github.com/filecoin-project/lotus/pull/5792)
+- storagefsm: Trigger input processing when below limits (https://github.com/filecoin-project/lotus/pull/5801)
+- After importing a previously deleted key, be able to delete it again (https://github.com/filecoin-project/lotus/pull/4653)
+- fix StateManager.Replay on reward actor (https://github.com/filecoin-project/lotus/pull/5804)
+- make sure atomic 64bit fields are 64bit aligned (https://github.com/filecoin-project/lotus/pull/5794)
+- Import secp sigs in paych tests (https://github.com/filecoin-project/lotus/pull/5879)
+- fix ci build-macos (https://github.com/filecoin-project/lotus/pull/5934)
+- Fix creation of remainder account when it's not a multisig (https://github.com/filecoin-project/lotus/pull/5807)
+- Fix fallback chainstore (https://github.com/filecoin-project/lotus/pull/6003)
+- fix 4857: show help for set-addrs (https://github.com/filecoin-project/lotus/pull/5943)
+- fix health report (https://github.com/filecoin-project/lotus/pull/6011)
+- fix(ci): Use recent ubuntu LTS release; Update release params ((https://github.com/filecoin-project/lotus/pull/6011))
+
+# 1.8.0 / 2021-04-05
+
+This is a mandatory release of Lotus that upgrades the network to version 12, which introduces various performance improvements to the cron processing of the power actor. The network will upgrade at height 712320, which is 2021-04-29T06:00:00Z.
+
+## Changes
+
+- v4 specs-actors integration, nv12 migration (https://github.com/filecoin-project/lotus/pull/6116)
+
+# 1.6.0 / 2021-04-05
+
+This is a mandatory release of Lotus that upgrades the network to version 11, which implements [FIP-0014](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0014.md). The network will upgrade at height 665280, which is 2021-04-12T22:00:00Z.
+
+## v1 sector extension CLI
+
+This release also expands the `lotus-miner sectors extend` CLI, with a new option that automatically extends all extensible v1 sectors. The option can be run using `lotus-miner sectors extend --v1-sectors`.
+
+- The `tolerance` flag can be passed to indicate what durations aren't "worth" extending. It defaults to one week, which means that sectors whose current lifetime's are within one week of the maximum possible lifetime will not be extended.
+
+- The `expiration-cutoff` flag can be passed to skip sectors whose expiration is past a certain point from the current head. It defaults to infinity (no cutoff), but if, say, 28800 was specified, then only sectors expiring in the next 10 days would be extended (2880 epochs in 1 day).
+
+## Changes
+
+- Util for miners to extend all v1 sectors (https://github.com/filecoin-project/lotus/pull/5924)
+- Upgrade the butterfly network (https://github.com/filecoin-project/lotus/pull/5929)
+- Introduce the v11 network upgrade (https://github.com/filecoin-project/lotus/pull/5904)
+- Debug mode: Make upgrade heights controllable by an envvar (https://github.com/filecoin-project/lotus/pull/5919)
+
+# 1.5.3 / 2021-03-24
+
+This is a patch release of Lotus that introduces small fixes to the Storage FSM.
+
+## Changes
+
+- storagefsm: Fix double unlock with ready WaitDeals sectors (https://github.com/filecoin-project/lotus/pull/5783)
+- backupds: Allow larger values in write log (https://github.com/filecoin-project/lotus/pull/5776)
+- storagefsm: Don't log the SectorRestart event (https://github.com/filecoin-project/lotus/pull/5779)
+
+# 1.5.2 / 2021-03-11
+
+This is an hotfix release of Lotus that fixes a critical bug introduced in v1.5.1 in the miner windowPoSt logic. This upgrade is only affecting miner nodes.
+
+## Changes
+- fix window post rand check (https://github.com/filecoin-project/lotus/pull/5773)
+- wdpost: Always use head tipset to get randomness (https://github.com/filecoin-project/lotus/pull/5774)
+
+# 1.5.1 / 2021-03-10
+
+This is an optional release of Lotus that introduces an important fix to the WindowPoSt computation process. The change is to wait for some confidence before drawing beacon randomness for the proof. Without this, invalid proofs might be generated as the result of a null tipset.
+
+## Splitstore
+
+This release also introduces the splitstore, a new optional blockstore that segregates the monolithic blockstore into cold and hot regions. The hot region contains objects from the last 4-5 finalities plus all reachable objects from two finalities away. All other objects are moved to the cold region using a compaction process that executes every finality, once 5 finalities have elapsed.
+
+The splitstore allows us to separate the two regions quite effectively, using two separate badger blockstores. The separation
+means that the live working set is much smaller, which results in potentially significant performance improvements. In addition, it means that the coldstore can be moved to a separate (bigger, slower, cheaper) disk without loss of performance.
+
+The design also allows us to use different implementations for the two blockstores; for example, an append-only blockstore could be used for coldstore and a faster memory mapped blockstore could be used for the hotstore (eg LMDB). We plan to experiment with these options in the future.
+
+Once the splitstore has been enabled, the existing monolithic blockstore becomes the coldstore. On the first head change notification, the splitstore will warm up the hotstore by copying all reachable objects from the current tipset into the hotstore. All new writes go into the hotstore, with the splitstore tracking the write epoch. Once 5 finalities have elapsed, and every finality thereafter, the splitstore compacts by moving cold objects into the coldstore. There is also experimental support for garbage collection, whereby nunreachable objects are simply discarded.
+
+To enable the splitstore, add the following to config.toml:
+
+```
+[Chainstore]
+ EnableSplitstore = true
+```
+
+## Highlights
+
+Other highlights include:
+
+- Improved deal data handling - now multiple deals can be adding to sectors in parallel
+- Rewriten sector pledging - it now actually cares about max sealing sector limits
+- Better handling for sectors stuck in the RecoverDealIDs state
+- lotus-miner sectors extend command
+- Optional configurable storage path size limit
+- Config to disable owner/worker fallback from control addresses (useful when owner is a key on a hardware wallet)
+- A write log for node metadata, which can be restored as a backup when the metadata leveldb becomes corrupted (e.g. when you run out of disk space / system crashes in some bad way)
+
+## Changes
+
+- avoid use mp.cfg directly to avoid race (https://github.com/filecoin-project/lotus/pull/5350)
+- Show replacing message CID is state search-msg cli (https://github.com/filecoin-project/lotus/pull/5656)
+- Fix riceing by importing the main package (https://github.com/filecoin-project/lotus/pull/5675)
+- Remove sectors with all deals expired in RecoverDealIDs (https://github.com/filecoin-project/lotus/pull/5658)
+- storagefsm: Rewrite input handling (https://github.com/filecoin-project/lotus/pull/5375)
+- reintroduce Refactor send command for better testability (https://github.com/filecoin-project/lotus/pull/5668)
+- Improve error message with importing a chain (https://github.com/filecoin-project/lotus/pull/5669)
+- storagefsm: Cleanup CC sector creation (https://github.com/filecoin-project/lotus/pull/5612)
+- chain list --gas-stats display capacity (https://github.com/filecoin-project/lotus/pull/5676)
+- Correct some logs (https://github.com/filecoin-project/lotus/pull/5694)
+- refactor blockstores (https://github.com/filecoin-project/lotus/pull/5484)
+- Add idle to sync stage's String() (https://github.com/filecoin-project/lotus/pull/5702)
+- packer provisioner (https://github.com/filecoin-project/lotus/pull/5604)
+- add DeleteMany to Blockstore interface (https://github.com/filecoin-project/lotus/pull/5703)
+- segregate chain and state blockstores (https://github.com/filecoin-project/lotus/pull/5695)
+- fix(multisig): The format of the amount is not correct in msigLockApp (https://github.com/filecoin-project/lotus/pull/5718)
+- Update butterfly network (https://github.com/filecoin-project/lotus/pull/5627)
+- Collect worker task metrics (https://github.com/filecoin-project/lotus/pull/5648)
+- Correctly format disputer log (https://github.com/filecoin-project/lotus/pull/5716)
+- Log block CID in the large delay warning (https://github.com/filecoin-project/lotus/pull/5704)
+- Move api client builders to a cliutil package (https://github.com/filecoin-project/lotus/pull/5728)
+- Implement net peers --extended (https://github.com/filecoin-project/lotus/pull/5734)
+- Command to extend sector expiration (https://github.com/filecoin-project/lotus/pull/5666)
+- garbage collect hotstore after compaction (https://github.com/filecoin-project/lotus/pull/5744)
+- tune badger gc to repeatedly gc the value log until there is no rewrite (https://github.com/filecoin-project/lotus/pull/5745)
+- Add configuration option for pubsub IPColocationWhitelist subnets (https://github.com/filecoin-project/lotus/pull/5735)
+- hot/cold blockstore segregation (aka. splitstore) (https://github.com/filecoin-project/lotus/pull/4992)
+- Customize verifreg root key and remainder account when making genesis (https://github.com/filecoin-project/lotus/pull/5730)
+- chore: update go-graphsync to 0.6.0 (https://github.com/filecoin-project/lotus/pull/5746)
+- Add connmgr metadata to NetPeerInfo (https://github.com/filecoin-project/lotus/pull/5749)
+- test: attempt to make the splitstore test deterministic (https://github.com/filecoin-project/lotus/pull/5750)
+- Feat/api no dep build (https://github.com/filecoin-project/lotus/pull/5729)
+- Fix bootstrapper profile setting (https://github.com/filecoin-project/lotus/pull/5756)
+- Check liveness of sectors when processing termination batches (https://github.com/filecoin-project/lotus/pull/5759)
+- Configurable storage path storage limit (https://github.com/filecoin-project/lotus/pull/5624)
+- miner: Config to disable owner/worker address fallback (https://github.com/filecoin-project/lotus/pull/5620)
+- Fix TestUnpadReader on Go 1.16 (https://github.com/filecoin-project/lotus/pull/5761)
+- Metadata datastore log (https://github.com/filecoin-project/lotus/pull/5755)
+- Remove the SR2 stats, leave just the network totals (https://github.com/filecoin-project/lotus/pull/5757)
+- fix: wait a bit before starting to compute window post proofs (https://github.com/filecoin-project/lotus/pull/5764)
+- fix: retry proof when randomness changes (https://github.com/filecoin-project/lotus/pull/5768)
+
+
+# 1.5.0 / 2021-02-23
+
+This is a mandatory release of Lotus that introduces the fifth upgrade to the Filecoin network. The network upgrade occurs at height 550321, before which time all nodes must have updated to this release (or later). At this height, [v3 specs-actors](https://github.com/filecoin-project/specs-actors/releases/tag/v3.0.0) will take effect, which in turn implements the following two FIPs:
+
+- [FIP-0007 h/amt-v3](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0007.md) which improves the performance of the Filecoin HAMT and AMT.
+- [FIP-0010 off-chain Window PoSt Verification](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0010.md) which reduces the gas consumption of `SubmitWindowedPoSt` messages significantly by optimistically accepting Window PoSt proofs without verification, and allowing them to be disputed later by off-chain verifiers.
+
+Note that the integration of v3 actors was already completed in 1.4.2, this upgrade simply sets the epoch for the upgrade to occur.
+
+## Disputer
+
+FIP-0010 introduces the ability to dispute bad Window PoSts. Node operators are encouraged to run the new Lotus disputer alongside their Lotus daemons. For more information, see the announcement [here](https://github.com/filecoin-project/lotus/discussions/5617#discussioncomment-387333).
+
+## Changes
+
+- [#5341](https://github.com/filecoin-project/lotus/pull/5341) Add a `LOTUS_DISABLE_V3_ACTOR_MIGRATION` envvar
+ - Setting this envvar to 1 disables the v3 actor migration, should only be used in the event of a failed migration
+
+# 1.4.2 / 2021-02-17
+
+This is a large, and highly recommended, optional release with new features and improvements for lotus miner and deal-making UX. The release also integrates [v3 specs-actors](https://github.com/filecoin-project/specs-actors/releases/tag/v3.0.0), which implements two FIPs:
+
+- [FIP-0007 h/amt-v3](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0007.md) which improves the performance of the Filecoin HAMT and AMT.
+- [FIP-0010 off-chain Window PoSt Verification](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0010.md) which reduces the gas consumption of `SubmitWindowedPoSt` messages significantly by optimistically accepting Window PoSt proofs without verification, and allowing them to be disputed later by off-chain verifiers.
+
+Note that this release does NOT set an upgrade epoch for v3 actors to take effect. That will be done in the upcoming 1.5.0 release.
+
+## New Features
+
+- [#5341](https://github.com/filecoin-project/lotus/pull/5341) Added sector termination API and CLI
+ - Run `lotus-miner sectors terminate`
+- [#5342](https://github.com/filecoin-project/lotus/pull/5342) Added CLI for using a multisig wallet as miner's owner address
+ - See how to set it up [here](https://github.com/filecoin-project/lotus/pull/5342#issue-554009129)
+- [#5363](https://github.com/filecoin-project/lotus/pull/5363), [#5418](https://github.com/filecoin-project/lotus/pull/), [#5476](https://github.com/filecoin-project/lotus/pull/5476), [#5459](https://github.com/filecoin-project/lotus/pull/5459) Integrated [spec-actor v3](https://github.com/filecoin-pro5418ject/specs-actors/releases/tag/v3.0.0)
+ - [#5472](https://github.com/filecoin-project/lotus/pull/5472) Generate actor v3 methods for pond
+- [#5379](https://github.com/filecoin-project/lotus/pull/5379) Added WindowPoSt disputer
+ - This is to support [FIP-0010 off-chian Window PoSt verification](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0010.md)
+ - See how to run a disputer [here](https://github.com/filecoin-project/lotus/pull/5379#issuecomment-776482445)
+- [#5309](https://github.com/filecoin-project/lotus/pull/5309) Batch multiple deals in one `PublishStorageMessages`
+ - [#5411](https://github.com/filecoin-project/lotus/pull/5411) Handle batch `PublishStorageDeals` message in sealing recovery
+ - [#5505](https://github.com/filecoin-project/lotus/pull/5505) Exclude expired deals from batching in `PublishStorageDeals` messages
+ - Added `PublishMsgPeriod` and `MaxDealsPerPublishMsg` to miner `Dealmaking` [configuration](https://docs.filecoin.io/mine/lotus/miner-configuration/#dealmaking-section). See how they work [here](https://docs.filecoin.io/mine/lotus/miner-configuration/#publishing-several-deals-in-one-message).
+ - [#5538](https://github.com/filecoin-project/lotus/pull/5538), [#5549](https://github.com/filecoin-project/lotus/pull/5549) Added a command to list pending deals and force publish messages.
+ - Run `lotus-miner market pending-publish`
+ - [#5428](https://github.com/filecoin-project/lotus/pull/5428) Moved waiting for `PublishStorageDeals` messages' receipt from markets to lotus
+- [#5510](https://github.com/filecoin-project/lotus/pull/5510) Added `nerpanet` build option
+ - To build `nerpanet`, run `make nerpanet`
+- [#5433](https://github.com/filecoin-project/lotus/pull/5433) Added `AlwaysKeepUnsealedCopy` option to the miner configuration
+- [#5520](https://github.com/filecoin-project/lotus/pull/5520) Added `MsigGetPending` to get pending transactions for multisig wallets
+- [#5219](https://github.com/filecoin-project/lotus/pull/5219) Added interactive mode for lotus-wallet
+- [5529](https://github.com/filecoin-project/lotus/pull/5529) Added support for minder nodes in `lotus-shed rpc` util
+
+## Bug Fixes
+
+- [#5210](https://github.com/filecoin-project/lotus/pull/5210) Miner should not dial client on restart
+- [#5403](https://github.com/filecoin-project/lotus/pull/5403) When estimating GasLimit only apply prior messages up to the nonce
+- [#5410](https://github.com/filecoin-project/lotus/pull/510) Fix the calibnet build option
+- [#5492](https://github.com/filecoin-project/lotus/pull/5492) Fixed `has` for ipfsbstore for non-existing blocks
+- [#5361](https://github.com/filecoin-project/lotus/pull/5361) Fixed retrieval hangs when using `IpfsOnlineMode=true`
+- [#5493](https://github.com/filecoin-project/lotus/pull/5493) Fixed retrieval failure when price-per-byte is zero
+- [#5506](https://github.com/filecoin-project/lotus/pull/5506) Fixed contexts in the storage adpater
+- [#5515](https://github.com/filecoin-project/lotus/pull/5515) Properly wire up `StateReadState` on gateway API
+- [#5582](https://github.com/filecoin-project/lotus/pull/5582) Fixed error logging format strings
+- [#5614](https://github.com/filecoin-project/lotus/pull/5614) Fixed websocket reconnecting handling
+
+
+## Improvements
+
+- [#5389](https://github.com/filecoin-project/lotus/pull/5389) Show verified indicator for `./lotus-miner storage-deals list`
+- [#5229](https://github.com/filecoin-project/lotus/pull/5220) Show power for verified deals in `./lotus-miner setocr list`
+- [#5407](https://github.com/filecoin-project/lotus/pull/5407) Added explicit check of the miner address protocol
+- [#5399](https://github.com/filecoin-project/lotus/pull/5399) watchdog: increase heapprof capture threshold to 90%
+- [#5398](https://github.com/filecoin-project/lotus/pull/5398) storageadapter: Look at precommits on-chain since deal publish msg
+- [#5470](https://github.com/filecoin-project/lotus/pull/5470) Added `--no-timing` option for `./lotus state compute-state --html`
+- [#5417](https://github.com/filecoin-project/lotus/pull/5417) Storage Manager: Always unseal full sectors
+- [#5393](https://github.com/filecoin-project/lotus/pull/5393) Switched to [filecoin-ffi bls api ](https://github.com/filecoin-project/filecoin-ffi/pull/159)for bls signatures
+- [#5380](https://github.com/filecoin-project/lotus/pull/5210) Refactor deals API tests
+- [#5397](https://github.com/filecoin-project/lotus/pull/5397) Fixed a flake in the sync manager edge case test
+- [#5406](https://github.com/filecoin-project/lotus/pull/5406) Added a test to ensure a correct window post cannot be disputed
+- [#5294](https://github.com/filecoin-project/lotus/pull/5394) Added jobs to build Lotus docker image and push it to AWS ECR
+- [#5387](https://github.com/filecoin-project/lotus/pull/5387) Added network info(mainnet|calibnet) in version
+- [#5497](https://github.com/filecoin-project/lotus/pull/5497) Export metric for lotus-gateaway
+- [#4950](https://github.com/filecoin-project/lotus/pull/4950) Removed bench policy
+- [#5047](https://github.com/filecoin-project/lotus/pull/5047) Improved the UX for `./lotus-shed bitfield enc`
+- [#5282](https://github.com/filecoin-project/lotus/pull/5282) Snake a context through the chian blockstore creation
+- [#5350](https://github.com/filecoin-project/lotus/pull/5350) Avoid using `mp.cfg` directrly to prevent race condition
+- [#5449](https://github.com/filecoin-project/lotus/pull/5449) Documented the block-header better
+- [#5404](https://github.com/filecoin-project/lotus/pull/5404) Added retrying proofs if an incorrect one is generated
+- [#4545](https://github.com/filecoin-project/lotus/pull/4545) Made state tipset usage consistent in the API
+- [#5540](https://github.com/filecoin-project/lotus/pull/5540) Removed unnecessary database reads in validation check
+- [#5554](https://github.com/filecoin-project/lotus/pull/5554) Fixed `build lotus-soup` CI job
+- [#5552](https://github.com/filecoin-project/lotus/pull/5552) Updated CircleCI to halt gracefully
+- [#5555](https://github.com/filecoin-project/lotus/pull/5555) Cleanup and add docstrings of node builder
+- [#5564](https://github.com/filecoin-project/lotus/pull/5564) Stopped depending on gocheck with gomod
+- [#5574](https://github.com/filecoin-project/lotus/pull/5574) Updated CLI UI
+- [#5570](https://github.com/filecoin-project/lotus/pull/5570) Added code CID to `StateReadState` return object
+- [#5565](https://github.com/filecoin-project/lotus/pull/5565) Added storageadapter.PublishMsgConfig to miner in testkit for lotus-soup testplan
+- [#5571](https://github.com/filecoin-project/lotus/pull/5571) Added `lotus-seed gensis car` to generate lotus block for devnets
+- [#5613](https://github.com/filecoin-project/lotus/pull/5613) Check format in client commP util
+- [#5507](https://github.com/filecoin-project/lotus/pull/5507) Refactored coalescing logic into its own function and take both cancellation sets into account
+- [#5592](https://github.com/filecoin-project/lotus/pull/5592) Verify FFI version before building
+
+## Dependency Updates
+- [#5296](https://github.com/filecoin-project/lotus/pull/5396) Upgraded to [raulk/go-watchdog@v1.0.1](https://github.com/raulk/go-watchdog/releases/tag/v1.0.1)
+- [#5450](https://github.com/filecoin-project/lotus/pull/5450) Dependency updates
+- [#5425](https://github.com/filecoin-project/lotus/pull/5425) Fixed stale imports in testplans/lotus-soup
+- [#5535](https://github.com/filecoin-project/lotus/pull/5535) Updated to [go-fil-markets@v1.1.7](https://github.com/filecoin-project/go-fil-markets/releases/tag/v1.1.7)
+- [#5616](https://github.com/filecoin-project/lotus/pull/5600) Updated to [filecoin-ffi@b6e0b35fb49ed0fe](https://github.com/filecoin-project/filecoin-ffi/releases/tag/b6e0b35fb49ed0fe)
+- [#5599](https://github.com/filecoin-project/lotus/pull/5599) Updated to [go-bitfield@v0.2.4](https://github.com/filecoin-project/go-bitfield/releases/tag/v0.2.4)
+- [#5614](https://github.com/filecoin-project/lotus/pull/5614), , [#5621](https://github.com/filecoin-project/lotus/pull/5621) Updated to [go-jsonrpc@v0.1.3](https://github.com/filecoin-project/go-jsonrpc/releases/tag/v0.1.3)
+- [#5459](https://github.com/filecoin-project/lotus/pull/5459) Updated to [spec-actors@v3.0.1](https://github.com/filecoin-project/specs-actors/releases/tag/v3.0.1)
+
+
+## Network Version v10 Upgrade
+- [#5473](https://github.com/filecoin-project/lotus/pull/5473) Merged staging branch for v1.5.0
+- [#5603](https://github.com/filecoin-project/lotus/pull/5603) Set nerpanet's upgrade epochs up to v3 actors
+- [#5471](https://github.com/filecoin-project/lotus/pull/5471), [#5456](https://github.com/filecoin-project/lotus/pull/5456) Set calibration net actor v3 migration epochs for testing
+- [#5434](https://github.com/filecoin-project/lotus/pull/5434) Implemented pre-migration framework
+- [#5476](https://github.com/filecoin-project/lotus/pull/5477) Tune migration
+
+# 1.4.1 / 2021-01-20
+
+This is an optional Lotus release that introduces various improvements to the sealing, mining, and deal-making processes. In particular, [#5341](https://github.com/filecoin-project/lotus/pull/5341) introduces the ability for Lotus miners to terminate sectors.
+
+## Changes
+
+#### Core Lotus
+
+- fix(sync): enforce ForkLengthThreshold for synced chain (https://github.com/filecoin-project/lotus/pull/5182)
+- introduce memory watchdog; LOTUS_MAX_HEAP (https://github.com/filecoin-project/lotus/pull/5101)
+- Skip bootstrapping if no peers specified (https://github.com/filecoin-project/lotus/pull/5301)
+- Chainxchg write response timeout (https://github.com/filecoin-project/lotus/pull/5254)
+- update NewestNetworkVersion (https://github.com/filecoin-project/lotus/pull/5277)
+- fix(sync): remove checks bypass when we submit the block (https://github.com/filecoin-project/lotus/pull/4192)
+- chore: export vm.ShouldBurn (https://github.com/filecoin-project/lotus/pull/5355)
+- fix(sync): enforce fork len when changing head (https://github.com/filecoin-project/lotus/pull/5244)
+- Use 55th percentile instead of median for gas-price (https://github.com/filecoin-project/lotus/pull/5369)
+- update go-libp2p-pubsub to v0.4.1 (https://github.com/filecoin-project/lotus/pull/5329)
+
+#### Sealing
+
+- Sector termination support (https://github.com/filecoin-project/lotus/pull/5341)
+- update weight canSeal and canStore when attach (https://github.com/filecoin-project/lotus/pull/5242/files)
+- sector-storage/mock: improve mocked readpiece (https://github.com/filecoin-project/lotus/pull/5208)
+- Fix deadlock in runWorker in sched_worker.go (https://github.com/filecoin-project/lotus/pull/5251)
+- Skip checking terminated sectors provable (https://github.com/filecoin-project/lotus/pull/5217)
+- storagefsm: Fix unsealedInfoMap.lk init race (https://github.com/filecoin-project/lotus/pull/5319)
+- Multicore AddPiece CommP (https://github.com/filecoin-project/lotus/pull/5320)
+- storagefsm: Send correct event on ErrExpiredTicket in CommitFailed (https://github.com/filecoin-project/lotus/pull/5366)
+- expose StateSearchMessage on gateway (https://github.com/filecoin-project/lotus/pull/5382)
+- fix FileSize to return correct disk usage recursively (https://github.com/filecoin-project/lotus/pull/5384)
+
+#### Dealmaking
+
+- Better error message when withdrawing funds (https://github.com/filecoin-project/lotus/pull/5293)
+- add verbose for list transfers (https://github.com/filecoin-project/lotus/pull/5259)
+- cli - rename `client info` to `client balances` (https://github.com/filecoin-project/lotus/pull/5304)
+- Better CLI for wallet market withdraw and client info (https://github.com/filecoin-project/lotus/pull/5303)
+
+#### UX
+
+- correct flag usages for replace cmd (https://github.com/filecoin-project/lotus/pull/5255)
+- lotus state call will panic (https://github.com/filecoin-project/lotus/pull/5275)
+- fix get sector bug (https://github.com/filecoin-project/lotus/pull/4976)
+- feat: lotus wallet market add (adds funds to storage market actor) (https://github.com/filecoin-project/lotus/pull/5300)
+- Fix client flag parsing in client balances cli (https://github.com/filecoin-project/lotus/pull/5312)
+- delete slash-consensus miner (https://github.com/filecoin-project/lotus/pull/4577)
+- add fund sufficient check in send (https://github.com/filecoin-project/lotus/pull/5252)
+- enable parse and shorten negative FIL values (https://github.com/filecoin-project/lotus/pull/5315)
+- add limit and rate for chain noise (https://github.com/filecoin-project/lotus/pull/5223)
+- add bench env print (https://github.com/filecoin-project/lotus/pull/5222)
+- Implement full-node restore option (https://github.com/filecoin-project/lotus/pull/5362)
+- add color for token amount (https://github.com/filecoin-project/lotus/pull/5352)
+- correct log in maybeUseAddress (https://github.com/filecoin-project/lotus/pull/5359)
+- add slash-consensus from flag (https://github.com/filecoin-project/lotus/pull/5378)
+
+#### Testing
+
+- tvx extract: more tipset extraction goodness (https://github.com/filecoin-project/lotus/pull/5258)
+- Fix race in blockstore test suite (https://github.com/filecoin-project/lotus/pull/5297)
+
+
+#### Build & Networks
+
+- Remove LOTUS_DISABLE_V2_ACTOR_MIGRATION envvar (https://github.com/filecoin-project/lotus/pull/5289)
+- Create a calibnet build option (https://github.com/filecoin-project/lotus/pull/5288)
+- Calibnet: Set Orange epoch (https://github.com/filecoin-project/lotus/pull/5325)
+
+#### Management
+
+- Update SECURITY.md (https://github.com/filecoin-project/lotus/pull/5246)
+- README: Contribute section (https://github.com/filecoin-project/lotus/pull/5330)
+- README: refine Contribute section (https://github.com/filecoin-project/lotus/pull/5331)
+- Add misc tooling to codecov ignore list (https://github.com/filecoin-project/lotus/pull/5347)
+
+# 1.4.0 / 2020-12-19
+
+This is a MANDATORY hotfix release of Lotus that resolves a chain halt at height 336,459 caused by nondeterminism in specs-actors. The fix is to update actors to 2.3.3 in order to incorporate this fix https://github.com/filecoin-project/specs-actors/pull/1334.
+
+# 1.3.0 / 2020-12-16
+
+This is a mandatory release of Lotus that introduces the third post-liftoff upgrade to the Filecoin network. The network upgrade occurs at height 343200, before which time all nodes must have updated to this release (or later). The change that breaks consensus is an implementation of FIP-0009(https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0009.md).
+
+## Changes
+
+- Disable gas burning for window post messages (https://github.com/filecoin-project/lotus/pull/5200)
+- fix lock propose (https://github.com/filecoin-project/lotus/pull/5197)
+
+# 1.2.3 / 2020-12-15
+
+This is an optional Lotus release that introduces many performance improvements, bugfixes, and UX improvements.
+
+## Changes
+
+- When waiting for deal commit messages, ignore unsuccessful messages (https://github.com/filecoin-project/lotus/pull/5189)
+- Bigger copy buffer size for stores (https://github.com/filecoin-project/lotus/pull/5177)
+- Print MinPieceSize when querying ask (https://github.com/filecoin-project/lotus/pull/5178)
+- Optimize miner info & sectors list loading (https://github.com/filecoin-project/lotus/pull/5176)
+- Allow miners to filter (un)verified deals (https://github.com/filecoin-project/lotus/pull/5094)
+- Fix curSealing out of MaxSealingSectors limit (https://github.com/filecoin-project/lotus/pull/5166)
+- Add mpool pending from / to filter (https://github.com/filecoin-project/lotus/pull/5169)
+- Add metrics for delayed blocks (https://github.com/filecoin-project/lotus/pull/5171)
+- Fix PushUntrusted publishing -- the message is local (https://github.com/filecoin-project/lotus/pull/5173)
+- Avoid potential hang in events API when starting event listener (https://github.com/filecoin-project/lotus/pull/5159)
+- Show data transfer ID in list-deals (https://github.com/filecoin-project/lotus/pull/5150)
+- Fix events API mutex locking (https://github.com/filecoin-project/lotus/pull/5160)
+- Message pool refactors (https://github.com/filecoin-project/lotus/pull/5162)
+- Fix lotus-shed cid output (https://github.com/filecoin-project/lotus/pull/5072)
+- Use FundManager to withdraw funds, add MarketWithdraw API (https://github.com/filecoin-project/lotus/pull/5112)
+- Add keygen outfile (https://github.com/filecoin-project/lotus/pull/5118)
+- Update sr2 stat aggregation (https://github.com/filecoin-project/lotus/pull/5114)
+- Fix miner control address lookup (https://github.com/filecoin-project/lotus/pull/5119)
+- Fix send with declared nonce 0 (https://github.com/filecoin-project/lotus/pull/5111)
+- Introduce memory watchdog; LOTUS_MAX_HEAP (https://github.com/filecoin-project/lotus/pull/5101)
+- Miner control address config for (pre)commits (https://github.com/filecoin-project/lotus/pull/5103)
+- Delete repeated call func (https://github.com/filecoin-project/lotus/pull/5099)
+- lotus-shed ledger show command (https://github.com/filecoin-project/lotus/pull/5098)
+- Log a message when there aren't enough peers for sync (https://github.com/filecoin-project/lotus/pull/5105)
+- Miner code cleanup (https://github.com/filecoin-project/lotus/pull/5107)
+
+# 1.2.2 / 2020-12-03
+
+This is an optional Lotus release that introduces various improvements to the mining logic and deal-making workflow, as well as several new UX features.
+
+## Changes
+
+- Set lower feecap on PoSt messages with low balance (https://github.com/filecoin-project/lotus/pull/4217)
+- Add options to set BlockProfileRate and MutexProfileFraction (https://github.com/filecoin-project/lotus/pull/4140)
+- Shed/post find (https://github.com/filecoin-project/lotus/pull/4355)
+- tvx extract: make it work with secp messages.(https://github.com/filecoin-project/lotus/pull/4583)
+- update go from 1.14 to 1.15 (https://github.com/filecoin-project/lotus/pull/4909)
+- print multiple blocks from miner cid (https://github.com/filecoin-project/lotus/pull/4767)
+- Connection Gater support (https://github.com/filecoin-project/lotus/pull/4849)
+- just return storedask.NewStoredAsk to reduce unuseful code (https://github.com/filecoin-project/lotus/pull/4902)
+- add go main version (https://github.com/filecoin-project/lotus/pull/4910)
+- Use version0 when pre-sealing (https://github.com/filecoin-project/lotus/pull/4911)
+- optimize code UpgradeTapeHeight and go fmt (https://github.com/filecoin-project/lotus/pull/4913)
+- CLI to get network version (https://github.com/filecoin-project/lotus/pull/4914)
+- Improve error for ActorsVersionPredicate (https://github.com/filecoin-project/lotus/pull/4915)
+- upgrade to go-fil-markets 1.0.5 (https://github.com/filecoin-project/lotus/pull/4916)
+- bug:replace with func recordFailure (https://github.com/filecoin-project/lotus/pull/4919)
+- Remove unused key (https://github.com/filecoin-project/lotus/pull/4924)
+- change typeV7 make len (https://github.com/filecoin-project/lotus/pull/4943)
+- emit events for peer disconnections and act upon them in the blocksync tracker (https://github.com/filecoin-project/lotus/pull/4754)
+- Fix lotus bench error (https://github.com/filecoin-project/lotus/pull/4305)
+- Reduce badger ValueTreshold to 128 (https://github.com/filecoin-project/lotus/pull/4629)
+- Downgrade duplicate nonce logs to debug (https://github.com/filecoin-project/lotus/pull/4933)
+- readme update golang version from 1.14.7 to 1.15.5 (https://github.com/filecoin-project/lotus/pull/4974)
+- add data transfer logging (https://github.com/filecoin-project/lotus/pull/4975)
+- Remove all temp file generation for deals (https://github.com/filecoin-project/lotus/pull/4929)
+- fix get sector bug (https://github.com/filecoin-project/lotus/pull/4976)
+- fix nil pointer in StateSectorPreCommitInfo (https://github.com/filecoin-project/lotus/pull/4082)
+- Add logging on data-transfer to miner (https://github.com/filecoin-project/lotus/pull/4980)
+- bugfix: fixup devnet script (https://github.com/filecoin-project/lotus/pull/4956)
+- modify for unsafe (https://github.com/filecoin-project/lotus/pull/4024)
+- move testground/lotus-soup testplan from oni to lotus (https://github.com/filecoin-project/lotus/pull/4727)
+- Setup remainder msig signers when parsing genesis template (https://github.com/filecoin-project/lotus/pull/4904)
+- Update JSON RPC server to enforce a maximum request size (https://github.com/filecoin-project/lotus/pull/4923)
+- New SR-specific lotus-shed cmd (https://github.com/filecoin-project/lotus/pull/4971)
+- update index to sectorNumber (https://github.com/filecoin-project/lotus/pull/4987)
+- storagefsm: Fix expired ticket retry loop (https://github.com/filecoin-project/lotus/pull/4876)
+- add .sec scale to measurements; humanize for metric tags (https://github.com/filecoin-project/lotus/pull/4989)
+- Support seal proof type switching (https://github.com/filecoin-project/lotus/pull/4873)
+- fix log format (https://github.com/filecoin-project/lotus/pull/4984)
+- Format workerID as string (https://github.com/filecoin-project/lotus/pull/4973)
+- miner: Winning PoSt Warmup (https://github.com/filecoin-project/lotus/pull/4824)
+- Default StartDealParams's fast retrieval field to true over JSON (https://github.com/filecoin-project/lotus/pull/4998)
+- Fix actor not found in chain inspect-usage (https://github.com/filecoin-project/lotus/pull/5010)
+- storagefsm: Improve new deal sector logic (https://github.com/filecoin-project/lotus/pull/5007)
+- Configure simultaneous requests (https://github.com/filecoin-project/lotus/pull/4996)
+- miner: log winningPoSt duration separately (https://github.com/filecoin-project/lotus/pull/5005)
+- fix wallet dead lock (https://github.com/filecoin-project/lotus/pull/5002)
+- Update go-jsonrpc to v0.1.2 (https://github.com/filecoin-project/lotus/pull/5015)
+- markets - separate watching for pre-commit from prove-commit (https://github.com/filecoin-project/lotus/pull/4945)
+- storagefsm: Add missing planners (https://github.com/filecoin-project/lotus/pull/5016)
+- fix wallet delete address where address is default (https://github.com/filecoin-project/lotus/pull/5019)
+- worker: More robust remote checks (https://github.com/filecoin-project/lotus/pull/5008)
+- Add new booststrappers (https://github.com/filecoin-project/lotus/pull/4007)
+- add a tooling to make filecoin accounting a little easier (https://github.com/filecoin-project/lotus/pull/5025)
+- fix: start a new line in print miner-info to avoid ambiguous display (https://github.com/filecoin-project/lotus/pull/5029)
+- Print gas limit sum in mpool stat (https://github.com/filecoin-project/lotus/pull/5035)
+- Fix chainstore tipset leak (https://github.com/filecoin-project/lotus/pull/5037)
+- shed rpc: Allow calling with args (https://github.com/filecoin-project/lotus/pull/5036)
+- Make --gas-limit optional in mpool replace cli (https://github.com/filecoin-project/lotus/pull/5059)
+- client list-asks --by-ping (https://github.com/filecoin-project/lotus/pull/5060)
+- Ledger signature verification (https://github.com/filecoin-project/lotus/pull/5068)
+- Fix helptext for verified-deal default in client deal (https://github.com/filecoin-project/lotus/pull/5074)
+- worker: Support setting task types at runtime (https://github.com/filecoin-project/lotus/pull/5023)
+- Enable Callers tracing when GasTracing is enabled (https://github.com/filecoin-project/lotus/pull/5080)
+- Cancel transfer cancels storage deal (https://github.com/filecoin-project/lotus/pull/5032)
+- Sector check command (https://github.com/filecoin-project/lotus/pull/5041)
+- add commp-to-cid base64 decode (https://github.com/filecoin-project/lotus/pull/5079)
+- miner info cli improvements (https://github.com/filecoin-project/lotus/pull/5083)
+- miner: Add slow mode to proving check (https://github.com/filecoin-project/lotus/pull/5086)
+- Error out deals that are not activated by proposed deal start epoch (https://github.com/filecoin-project/lotus/pull/5061)
+
+# 1.2.1 / 2020-11-20
+
+This is a very small release of Lotus that fixes an issue users are experiencing when importing snapshots. There is no need to upgrade unless you experience an issue with creating a new datastore directory in the Lotus repo.
+
+## Changes
+
+- fix blockstore directory not created automatically (https://github.com/filecoin-project/lotus/pull/4922)
+- WindowPoStScheduler.checkSectors() delete useless judgment (https://github.com/filecoin-project/lotus/pull/4918)
+
+
+# 1.2.0 / 2020-11-18
+
+This is a mandatory release of Lotus that introduces the second post-liftoff upgrade to the Filecoin network. The network upgrade occurs at height 265200, before which time all nodes must have updated to this release (or later). This release also bumps the required version of Go to 1.15.
+
+The changes that break consensus are:
+
+- Upgrading to sepcs-actors 2.3.2 (https://github.com/filecoin-project/specs-actors/releases/tag/v2.3.2)
+- Introducing proofs v5.4.0 (https://github.com/filecoin-project/rust-fil-proofs/releases/tag/storage-proofs-v5.4.0), and switching between the proof types (https://github.com/filecoin-project/lotus/pull/4873)
+- Don't use terminated sectors for winning PoSt (https://github.com/filecoin-project/lotus/pull/4770)
+- Various small VM-level edge-case handling (https://github.com/filecoin-project/lotus/pull/4783)
+- Correction of the VM circulating supply calculation (https://github.com/filecoin-project/lotus/pull/4862)
+- Retuning gas costs (https://github.com/filecoin-project/lotus/pull/4830)
+- Avoid sending messages to the zero BLS address (https://github.com/filecoin-project/lotus/pull/4888)
+
+## Other Changes
+
+- delayed pubsub subscribe for messages topic (https://github.com/filecoin-project/lotus/pull/3646)
+- add chain base64 decode params (https://github.com/filecoin-project/lotus/pull/4748)
+- chore(dep): update bitswap to fix an initialization race that could panic (https://github.com/filecoin-project/lotus/pull/4855)
+- Chore/blockstore nits (https://github.com/filecoin-project/lotus/pull/4813)
+- Print Consensus Faults in miner info (https://github.com/filecoin-project/lotus/pull/4853)
+- Truncate genesis file before generating (https://github.com/filecoin-project/lotus/pull/4851)
+- miner: Winning PoSt Warmup (https://github.com/filecoin-project/lotus/pull/4824)
+- Fix init actor address map diffing (https://github.com/filecoin-project/lotus/pull/4875)
+- Bump API versions to 1.0.0 (https://github.com/filecoin-project/lotus/pull/4884)
+- Fix cid recording issue (https://github.com/filecoin-project/lotus/pull/4874)
+- Speed up worker key retrieval (https://github.com/filecoin-project/lotus/pull/4885)
+- Add error codes to worker return (https://github.com/filecoin-project/lotus/pull/4890)
+- Update go to 1.15.5 (https://github.com/filecoin-project/lotus/pull/4896)
+- Fix MaxSealingSectrosForDeals getting reset to 0 (https://github.com/filecoin-project/lotus/pull/4879)
+- add sanity check for maximum block size (https://github.com/filecoin-project/lotus/pull/3171)
+- Check (pre)commit receipt before other checks in failed states (https://github.com/filecoin-project/lotus/pull/4712)
+- fix badger double open on daemon --import-snapshot; chainstore lifecycle (https://github.com/filecoin-project/lotus/pull/4872)
+- Update to ipfs-blockstore 1.0.3 (https://github.com/filecoin-project/lotus/pull/4897)
+- break loop when found warm up sector (https://github.com/filecoin-project/lotus/pull/4869)
+- Tweak handling of bad beneficaries in DeleteActor (https://github.com/filecoin-project/lotus/pull/4903)
+- cap maximum number of messages per block in selection (https://github.com/filecoin-project/lotus/pull/4905)
+- Set Calico epoch (https://github.com/filecoin-project/lotus/pull/4889)
+
+# 1.1.3 / 2020-11-13
+
+This is an optional release of Lotus that upgrades Lotus dependencies, and includes many performance enhancements, bugfixes, and UX improvements.
+
+## Highlights
+
+- Refactored much of the miner code (https://github.com/filecoin-project/lotus/pull/3618), improving its recovery from restarts and overall sector success rate
+- Updated [proofs](https://github.com/filecoin-project/rust-fil-proofs) to v5.3.0, which brings significant performance improvements
+- Updated [markets](https://github.com/filecoin-project/go-fil-markets/releases/tag/v1.0.4) to v1.0.4, which reduces failures due to reorgs (https://github.com/filecoin-project/lotus/pull/4730) and uses the newly refactored fund manager (https://github.com/filecoin-project/lotus/pull/4736)
+
+## Changes
+
+#### Core Lotus
+
+- polish: add Equals method to MinerInfo shim (https://github.com/filecoin-project/lotus/pull/4604)
+- Fix messagepool accounting (https://github.com/filecoin-project/lotus/pull/4668)
+- Prep for gas balancing (https://github.com/filecoin-project/lotus/pull/4651)
+- Reduce badger ValueThreshold to 128 (https://github.com/filecoin-project/lotus/pull/4629)
+- Config for default max gas fee (https://github.com/filecoin-project/lotus/pull/4652)
+- bootstrap: don't return early when one drand resolution fails (https://github.com/filecoin-project/lotus/pull/4626)
+- polish: add ClaimsChanged and DiffClaims method to power shim (https://github.com/filecoin-project/lotus/pull/4628)
+- Simplify chain event Called API (https://github.com/filecoin-project/lotus/pull/4664)
+- Cache deal states for most recent old/new tipset (https://github.com/filecoin-project/lotus/pull/4623)
+- Add miner available balance and power info to state miner info (https://github.com/filecoin-project/lotus/pull/4618)
+- Call GetHeaviestTipSet() only once when syncing (https://github.com/filecoin-project/lotus/pull/4696)
+- modify runtime gasUsed printf (https://github.com/filecoin-project/lotus/pull/4704)
+- Rename builtin actor generators (https://github.com/filecoin-project/lotus/pull/4697)
+- Move gas multiplier as property of pricelist (https://github.com/filecoin-project/lotus/pull/4728)
+- polish: add msig pendingtxn diffing and comp (https://github.com/filecoin-project/lotus/pull/4719)
+- Optional chain Bitswap (https://github.com/filecoin-project/lotus/pull/4717)
+- rewrite sync manager (https://github.com/filecoin-project/lotus/pull/4599)
+- async connect to bootstrappers (https://github.com/filecoin-project/lotus/pull/4785)
+- head change coalescer (https://github.com/filecoin-project/lotus/pull/4688)
+- move to native badger blockstore; leverage zero-copy View() to deserialize in-place (https://github.com/filecoin-project/lotus/pull/4681)
+- badger blockstore: minor improvements (https://github.com/filecoin-project/lotus/pull/4811)
+- Do not fail wallet delete because of pre-existing trashed key (https://github.com/filecoin-project/lotus/pull/4589)
+- Correctly delete the default wallet address (https://github.com/filecoin-project/lotus/pull/4705)
+- Reduce badger ValueTreshold to 128 (https://github.com/filecoin-project/lotus/pull/4629)
+- predicates: Fast StateGetActor wrapper (https://github.com/filecoin-project/lotus/pull/4835)
+
+#### Mining
+
+- worker key should change when set sender found key not equal with the value on chain (https://github.com/filecoin-project/lotus/pull/4595)
+- extern/sector-storage: fix GPU usage overwrite bug (https://github.com/filecoin-project/lotus/pull/4627)
+- sectorstorage: Fix manager restart edge-case (https://github.com/filecoin-project/lotus/pull/4645)
+- storagefsm: Fix GetTicket loop when the sector is already precommitted (https://github.com/filecoin-project/lotus/pull/4643)
+- Debug flag to force running sealing scheduler (https://github.com/filecoin-project/lotus/pull/4662)
+- Fix worker reenabling, handle multiple restarts in worker (https://github.com/filecoin-project/lotus/pull/4666)
+- keep retrying the proof until we run out of sectors to skip (https://github.com/filecoin-project/lotus/pull/4633)
+- worker: Commands to pause/resume task processing (https://github.com/filecoin-project/lotus/pull/4615)
+- struct name incorrect (https://github.com/filecoin-project/lotus/pull/4699)
+- optimize code replace strings with constants (https://github.com/filecoin-project/lotus/pull/4769)
+- optimize pledge sector (https://github.com/filecoin-project/lotus/pull/4765)
+- Track sealing processes across lotus-miner restarts (https://github.com/filecoin-project/lotus/pull/3618)
+- Fix scheduler lockups after storage is freed (https://github.com/filecoin-project/lotus/pull/4778)
+- storage: Track worker hostnames with work (https://github.com/filecoin-project/lotus/pull/4779)
+- Expand sched-diag; Command to abort sealing calls (https://github.com/filecoin-project/lotus/pull/4804)
+- miner: Winning PoSt Warmup (https://github.com/filecoin-project/lotus/pull/4824)
+- docsgen: Support miner/worker (https://github.com/filecoin-project/lotus/pull/4817)
+- miner: Basic storage cleanup command (https://github.com/filecoin-project/lotus/pull/4834)
+
+#### Markets and Data Transfer
+
+- Flesh out data transfer features (https://github.com/filecoin-project/lotus/pull/4572)
+- Fix memory leaks in data transfer (https://github.com/filecoin-project/lotus/pull/4619)
+- Handle deal id changes in OnDealSectorCommitted (https://github.com/filecoin-project/lotus/pull/4730)
+- Refactor FundManager (https://github.com/filecoin-project/lotus/pull/4736)
+- refactor: integrate new FundManager (https://github.com/filecoin-project/lotus/pull/4787)
+- Fix race in paych manager when req context is cancelled (https://github.com/filecoin-project/lotus/pull/4803)
+- fix race in paych manager add funds (https://github.com/filecoin-project/lotus/pull/4597)
+- Fix panic in FundManager (https://github.com/filecoin-project/lotus/pull/4808)
+- Fix: dont crash on startup if funds migration fails (https://github.com/filecoin-project/lotus/pull/4827)
+
+#### UX
+
+- Make EarlyExpiration in sectors list less scary (https://github.com/filecoin-project/lotus/pull/4600)
+- Add commands to change the worker key (https://github.com/filecoin-project/lotus/pull/4513)
+- Expose ClientDealSize via CLI (https://github.com/filecoin-project/lotus/pull/4569)
+- client deal: Cache CommD when creating multiple deals (https://github.com/filecoin-project/lotus/pull/4535)
+- miner sectors list: flags for events/seal time (https://github.com/filecoin-project/lotus/pull/4649)
+- make IPFS online mode configurable (https://github.com/filecoin-project/lotus/pull/4650)
+- Add sync status to miner info command (https://github.com/filecoin-project/lotus/pull/4669)
+- Add a StateDecodeParams method (https://github.com/filecoin-project/lotus/pull/4105)
+- sched: Interactive RPC Shell (https://github.com/filecoin-project/lotus/pull/4692)
+- Add api for getting status given a code (https://github.com/filecoin-project/lotus/pull/4210)
+- Update lotus-stats with a richer cli (https://github.com/filecoin-project/lotus/pull/4718)
+- Use TSK passed to GasEstimateGasLimit (https://github.com/filecoin-project/lotus/pull/4739)
+- match data type for reward state api (https://github.com/filecoin-project/lotus/pull/4745)
+- Add `termination-estimate` to get an estimation for how much a termination penalty will be (https://github.com/filecoin-project/lotus/pull/4617)
+- Restrict `ParseFIL` input length (https://github.com/filecoin-project/lotus/pull/4780)
+- cmd sectors commitIDs len debug (https://github.com/filecoin-project/lotus/pull/4786)
+- Add client deal-stats CLI (https://github.com/filecoin-project/lotus/pull/4788)
+- Modify printf format (https://github.com/filecoin-project/lotus/pull/4795)
+- Updated msig inspect (https://github.com/filecoin-project/lotus/pull/4533)
+- Delete the duplicate output (https://github.com/filecoin-project/lotus/pull/4819)
+- miner: Storage list sectors command (https://github.com/filecoin-project/lotus/pull/4831)
+- drop a few logs down to debug (https://github.com/filecoin-project/lotus/pull/4832)
+
+#### Testing and Tooling
+
+- refactor: share code between CLI tests (https://github.com/filecoin-project/lotus/pull/4598)
+- Fix flaky TestCLIDealFlow (https://github.com/filecoin-project/lotus/pull/4608)
+- Fix flaky testMiningReal (https://github.com/filecoin-project/lotus/pull/4609)
+- Add election run-dummy command (https://github.com/filecoin-project/lotus/pull/4498)
+- Fix .gitmodules (https://github.com/filecoin-project/lotus/pull/4713)
+- fix metrics wiring.(https://github.com/filecoin-project/lotus/pull/4691)
+- shed: Util for creating ID CIDs (https://github.com/filecoin-project/lotus/pull/4726)
+- Run kumquat upgrade on devnets (https://github.com/filecoin-project/lotus/pull/4734)
+- Make pond work again (https://github.com/filecoin-project/lotus/pull/4775)
+- lotus-stats: fix influx flags (https://github.com/filecoin-project/lotus/pull/4810)
+- 2k sync BootstrapPeerThreshold (https://github.com/filecoin-project/lotus/pull/4797)
+- test for FundManager panic to ensure it is fixed (https://github.com/filecoin-project/lotus/pull/4825)
+- Stop mining at the end of tests (https://github.com/filecoin-project/lotus/pull/4826)
+- Make some logs quieter (https://github.com/filecoin-project/lotus/pull/4709)
+
+#### Dependencies
+
+- update filecoin-ffi in go mod (https://github.com/filecoin-project/lotus/pull/4584)
+- Update FFI (https://github.com/filecoin-project/lotus/pull/4613)
+- feat: integrate new optional blst backend and verification optimizations from proofs (https://github.com/filecoin-project/lotus/pull/4630)
+- Use https for blst submodule (https://github.com/filecoin-project/lotus/pull/4710)
+- Update go-bitfield (https://github.com/filecoin-project/lotus/pull/4756)
+- Update Yamux (https://github.com/filecoin-project/lotus/pull/4758)
+- Update to latest go-bitfield (https://github.com/filecoin-project/lotus/pull/4793)
+- Update to latest go-address (https://github.com/filecoin-project/lotus/pull/4798)
+- update libp2p for stream interface changes (https://github.com/filecoin-project/lotus/pull/4814)
+
# 1.1.2 / 2020-10-24
This is a patch release of Lotus that builds on the fixes involving worker keys that was introduced in v1.1.1. Miners and node operators should update to this release as soon as possible in order to ensure their blocks are propagated and validated.
-## Changes
+## Changes
- Handle worker key changes correctly in runtime (https://github.com/filecoin-project/lotus/pull/4579)
@@ -247,7 +1104,7 @@ This consensus-breaking release of Lotus upgrades the actors version to v2.0.0.
- Fix pond (https://github.com/filecoin-project/lotus/pull/4203)
- allow manual setting of noncefix fee cap (https://github.com/filecoin-project/lotus/pull/4205)
- implement command to get execution traces of any message (https://github.com/filecoin-project/lotus/pull/4200)
-- conformance: minor driver refactors (https://github.com/filecoin-project/lotus/pull/4211)
+- conformance: minor driver refactors (https://github.com/filecoin-project/lotus/pull/4211)
- lotus-pcr: ignore all other messages (https://github.com/filecoin-project/lotus/pull/4218)
- lotus-pcr: zero refund (https://github.com/filecoin-project/lotus/pull/4229)
@@ -274,7 +1131,7 @@ We are grateful for every contribution!
This optional release of Lotus introduces a new version of markets which switches to CBOR-map encodings, and allows datastore migrations. The release also introduces several improvements to the mining process, a few performance optimizations, and a battery of UX additions and enhancements.
-## Changes
+## Changes
#### Dependencies
@@ -345,7 +1202,7 @@ This consensus-breaking release of Lotus introduces an upgrade to the network. T
This release also updates go-fil-markets to fix an incompatibility issue between v0.7.2 and earlier versions.
-## Changes
+## Changes
#### Dependencies
@@ -434,7 +1291,7 @@ This optional release of Lotus introduces some critical fixes to the window PoSt
## Changes
-#### Some notable improvements:
+#### Some notable improvements:
- Correctly construct params for `SubmitWindowedPoSt` messages (https://github.com/filecoin-project/lotus/pull/3909)
- Skip sectors correctly for Window PoSt (https://github.com/filecoin-project/lotus/pull/3839)
@@ -470,7 +1327,7 @@ This consensus-breaking release of Lotus is designed to test a network upgrade o
- Drand upgrade (https://github.com/filecoin-project/lotus/pull/3670)
- Multisig API additions (https://github.com/filecoin-project/lotus/pull/3590)
-#### Storage Miner
+#### Storage Miner
- Increase the number of times precommit2 is attempted before moving back to precommit1 (https://github.com/filecoin-project/lotus/pull/3720)
@@ -513,7 +1370,7 @@ This release introduces some critical fixes to message selection and gas estimat
## Changes
-#### Messagepool
+#### Messagepool
- Warn when optimal selection fails to pack a block and we fall back to random selection (https://github.com/filecoin-project/lotus/pull/3708)
- Add basic command for printing gas performance of messages in the mpool (https://github.com/filecoin-project/lotus/pull/3701)
@@ -583,7 +1440,7 @@ This release also introduces many improvements to Lotus! Among them are a new ve
- Add additional info about gas premium (https://github.com/filecoin-project/lotus/pull/3578)
- Fix GasPremium capping logic (https://github.com/filecoin-project/lotus/pull/3552)
-#### Payment channels
+#### Payment channels
- Get available funds by address or by from/to (https://github.com/filecoin-project/lotus/pull/3547)
- Create `lotus paych status` command (https://github.com/filecoin-project/lotus/pull/3523)
@@ -633,7 +1490,7 @@ This patch includes a crucial fix to the message pool selection logic, strongly
This patch includes a hotfix to the `GasEstimateFeeCap` method, capping the estimated fee to a reasonable level by default.
-## Changes
+## Changes
- Added target height to sync wait (https://github.com/filecoin-project/lotus/pull/3502)
- Disable codecov annotations (https://github.com/filecoin-project/lotus/pull/3514)
@@ -663,7 +1520,7 @@ This patch includes some bugfixes to the sector sealing process, and updates go-
# 0.5.7 / 2020-08-31
-This patch release includes some bugfixes and enhancements to the sector lifecycle and message pool logic.
+This patch release includes some bugfixes and enhancements to the sector lifecycle and message pool logic.
## Changes
@@ -683,7 +1540,7 @@ Hotfix release that fixes a panic in the sealing scheduler (https://github.com/f
# 0.5.5
This patch release introduces a large number of improvements to the sealing process.
-It also updates go-fil-markets to
+It also updates go-fil-markets to
[version 0.5.8](https://github.com/filecoin-project/go-fil-markets/releases/tag/v0.5.8),
and go-libp2p-pubsub to [v0.3.5](https://github.com/libp2p/go-libp2p-pubsub/releases/tag/v0.3.5).
@@ -696,16 +1553,16 @@ and go-libp2p-pubsub to [v0.3.5](https://github.com/libp2p/go-libp2p-pubsub/rele
- The following improvements were introduced in https://github.com/filecoin-project/lotus/pull/3350.
- - Allow `lotus-miner sectors remove` to remove a sector in any state.
- - Create a separate state in the storage FSM dedicated to submitting the Commit message.
- - Recovery for when the Deal IDs of deals in a sector get changed in a reorg.
- - Auto-retry sending Precommit and Commit messages if they run out of gas
- - Auto-retry sector remove tasks when they fail
- - Compact worker windows, and allow their tasks to be executed in any order
+ - Allow `lotus-miner sectors remove` to remove a sector in any state.
+ - Create a separate state in the storage FSM dedicated to submitting the Commit message.
+ - Recovery for when the Deal IDs of deals in a sector get changed in a reorg.
+ - Auto-retry sending Precommit and Commit messages if they run out of gas
+ - Auto-retry sector remove tasks when they fail
+ - Compact worker windows, and allow their tasks to be executed in any order
- Don't simply skip PoSt for bad sectors (https://github.com/filecoin-project/lotus/pull/3323)
-#### Message Pool
+#### Message Pool
- Spam Protection: Track required funds for pending messages (https://github.com/filecoin-project/lotus/pull/3313)
@@ -730,7 +1587,7 @@ A patch release, containing a few nice bugfixes and improvements:
# 0.5.3
-Yet another hotfix release.
+Yet another hotfix release.
A lesson for readers, having people who have been awake for 12+ hours review
your hotfix PR is not a good idea. Find someone who has enough slept recently
enough to give you good code review, otherwise you'll end up quickly bumping
@@ -749,9 +1606,9 @@ This is a hotfix release.
# 0.5.1 / 2020-08-24
-The Space Race release!
+The Space Race release!
This release contains the genesis car file and bootstrap peers for the space
-race network.
+race network.
Additionally, we included two small fixes to genesis creation:
- Randomize ticket value in genesis generation
@@ -769,9 +1626,9 @@ Among the highlights included in this release are:
- Gas changes: We implemented EIP-1559 and introduced real gas values.
- Deal-making: We now support "Committed Capacity" sectors, "fast-retrieval" deals,
-and the packing of multiple deals into a single sector.
+ and the packing of multiple deals into a single sector.
- Renamed features: We renamed some of the binaries, environment variables, and default
-paths associated with a Lotus node.
+ paths associated with a Lotus node.
### Gas changes
@@ -779,19 +1636,19 @@ We made some significant changes to the mechanics of gas in this release.
#### Network fee
-We implemented something similar to
+We implemented something similar to
[Ethereum's EIP-1559](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-1559.md).
The `Message` structure had three changes:
- The `GasPrice` field has been removed
- A new `GasFeeCap` field has been added, which controls the maximum cost
-the sender incurs for the message
+ the sender incurs for the message
- A new `GasPremium` field has been added, which controls the reward a miner
-earns for including the message
+ earns for including the message
-A sender will never be charged more than `GasFeeCap * GasLimit`.
+A sender will never be charged more than `GasFeeCap * GasLimit`.
A miner will typically earn `GasPremium * GasLimit` as a reward.
-The `Blockheader` structure has one new field, called `ParentBaseFee`.
+The `Blockheader` structure has one new field, called `ParentBaseFee`.
Informally speaking,the `ParentBaseFee`
is increased when blocks are densely packed with messages, and decreased otherwise.
diff --git a/Dockerfile.lotus b/Dockerfile.lotus
new file mode 100644
index 00000000000..0b43ef8063e
--- /dev/null
+++ b/Dockerfile.lotus
@@ -0,0 +1,74 @@
+FROM golang:1.16.4 AS builder-deps
+MAINTAINER Lotus Development Team
+
+RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev
+
+ARG RUST_VERSION=nightly
+ENV XDG_CACHE_HOME="/tmp"
+
+ENV RUSTUP_HOME=/usr/local/rustup \
+ CARGO_HOME=/usr/local/cargo \
+ PATH=/usr/local/cargo/bin:$PATH
+
+RUN wget "https://static.rust-lang.org/rustup/dist/x86_64-unknown-linux-gnu/rustup-init"; \
+ chmod +x rustup-init; \
+ ./rustup-init -y --no-modify-path --profile minimal --default-toolchain $RUST_VERSION; \
+ rm rustup-init; \
+ chmod -R a+w $RUSTUP_HOME $CARGO_HOME; \
+ rustup --version; \
+ cargo --version; \
+ rustc --version;
+
+
+FROM builder-deps AS builder-local
+MAINTAINER Lotus Development Team
+
+COPY ./ /opt/filecoin
+WORKDIR /opt/filecoin
+RUN make clean deps
+
+
+FROM builder-local AS builder
+MAINTAINER Lotus Development Team
+
+WORKDIR /opt/filecoin
+
+ARG RUSTFLAGS=""
+ARG GOFLAGS=""
+
+RUN make deps lotus lotus-miner lotus-worker lotus-shed lotus-chainwatch lotus-stats
+
+
+FROM ubuntu:20.04 AS base
+MAINTAINER Lotus Development Team
+
+# Base resources
+COPY --from=builder /etc/ssl/certs /etc/ssl/certs
+COPY --from=builder /lib/x86_64-linux-gnu/libdl.so.2 /lib/
+COPY --from=builder /lib/x86_64-linux-gnu/librt.so.1 /lib/
+COPY --from=builder /lib/x86_64-linux-gnu/libgcc_s.so.1 /lib/
+COPY --from=builder /lib/x86_64-linux-gnu/libutil.so.1 /lib/
+COPY --from=builder /usr/lib/x86_64-linux-gnu/libltdl.so.7 /lib/
+COPY --from=builder /usr/lib/x86_64-linux-gnu/libnuma.so.1 /lib/
+COPY --from=builder /usr/lib/x86_64-linux-gnu/libhwloc.so.5 /lib/
+COPY --from=builder /usr/lib/x86_64-linux-gnu/libOpenCL.so.1 /lib/
+
+RUN useradd -r -u 532 -U fc
+
+
+FROM base AS lotus
+MAINTAINER Lotus Development Team
+
+COPY --from=builder /opt/filecoin/lotus /usr/local/bin/
+COPY --from=builder /opt/filecoin/lotus-shed /usr/local/bin/
+
+ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
+ENV LOTUS_PATH /var/lib/lotus
+
+RUN mkdir /var/lib/lotus /var/tmp/filecoin-proof-parameters && chown fc /var/lib/lotus /var/tmp/filecoin-proof-parameters
+
+USER fc
+
+ENTRYPOINT ["/usr/local/bin/lotus"]
+
+CMD ["-help"]
diff --git a/Makefile b/Makefile
index 093f62ef697..a5ce8a99fbf 100644
--- a/Makefile
+++ b/Makefile
@@ -5,10 +5,10 @@ all: build
unexport GOFLAGS
-GOVERSION:=$(shell go version | cut -d' ' -f 3 | cut -d. -f 2)
-ifeq ($(shell expr $(GOVERSION) \< 14), 1)
-$(warning Your Golang version is go 1.$(GOVERSION))
-$(error Update Golang to version $(shell grep '^go' go.mod))
+GOVERSION:=$(shell go version | cut -d' ' -f 3 | sed 's/^go//' | awk -F. '{printf "%d%03d%03d", $$1, $$2, $$3}')
+ifeq ($(shell expr $(GOVERSION) \< 1016000), 1)
+$(warning Your Golang version is go$(shell expr $(GOVERSION) / 1000000).$(shell expr $(GOVERSION) % 1000000 / 1000).$(shell expr $(GOVERSION) % 1000))
+$(error Update Golang to version to at least 1.16.0)
endif
# git modules that need to be loaded
@@ -41,8 +41,13 @@ MODULES+=$(FFI_PATH)
BUILD_DEPS+=build/.filecoin-install
CLEAN+=build/.filecoin-install
-$(MODULES): build/.update-modules ;
+ffi-version-check:
+ @[[ "$$(awk '/const Version/{print $$5}' extern/filecoin-ffi/version.go)" -eq 3 ]] || (echo "FFI version mismatch, update submodules"; exit 1)
+BUILD_DEPS+=ffi-version-check
+
+.PHONY: ffi-version-check
+$(MODULES): build/.update-modules ;
# dummy file that marks the last time modules were updated
build/.update-modules:
git submodule update --init --recursive
@@ -57,16 +62,30 @@ CLEAN+=build/.update-modules
deps: $(BUILD_DEPS)
.PHONY: deps
+build-devnets: build lotus-seed lotus-shed lotus-wallet lotus-gateway
+.PHONY: build-devnets
+
debug: GOFLAGS+=-tags=debug
-debug: lotus lotus-miner lotus-worker lotus-seed
+debug: build-devnets
2k: GOFLAGS+=-tags=2k
-2k: lotus lotus-miner lotus-worker lotus-seed
+2k: build-devnets
+
+calibnet: GOFLAGS+=-tags=calibnet
+calibnet: build-devnets
+
+nerpanet: GOFLAGS+=-tags=nerpanet
+nerpanet: build-devnets
+
+butterflynet: GOFLAGS+=-tags=butterflynet
+butterflynet: build-devnets
+
+interopnet: GOFLAGS+=-tags=interopnet
+interopnet: build-devnets
lotus: $(BUILD_DEPS)
rm -f lotus
go build $(GOFLAGS) -o lotus ./cmd/lotus
- go run github.com/GeertJohan/go.rice/rice append --exec lotus -i ./build
.PHONY: lotus
BINS+=lotus
@@ -74,21 +93,18 @@ BINS+=lotus
lotus-miner: $(BUILD_DEPS)
rm -f lotus-miner
go build $(GOFLAGS) -o lotus-miner ./cmd/lotus-storage-miner
- go run github.com/GeertJohan/go.rice/rice append --exec lotus-miner -i ./build
.PHONY: lotus-miner
BINS+=lotus-miner
lotus-worker: $(BUILD_DEPS)
rm -f lotus-worker
go build $(GOFLAGS) -o lotus-worker ./cmd/lotus-seal-worker
- go run github.com/GeertJohan/go.rice/rice append --exec lotus-worker -i ./build
.PHONY: lotus-worker
BINS+=lotus-worker
lotus-shed: $(BUILD_DEPS)
rm -f lotus-shed
go build $(GOFLAGS) -o lotus-shed ./cmd/lotus-shed
- go run github.com/GeertJohan/go.rice/rice append --exec lotus-shed -i ./build
.PHONY: lotus-shed
BINS+=lotus-shed
@@ -115,12 +131,14 @@ install-miner:
install-worker:
install -C ./lotus-worker /usr/local/bin/lotus-worker
+install-app:
+ install -C ./$(APP) /usr/local/bin/$(APP)
+
# TOOLS
lotus-seed: $(BUILD_DEPS)
rm -f lotus-seed
go build $(GOFLAGS) -o lotus-seed ./cmd/lotus-seed
- go run github.com/GeertJohan/go.rice/rice append --exec lotus-seed -i ./build
.PHONY: lotus-seed
BINS+=lotus-seed
@@ -154,13 +172,11 @@ lotus-townhall-front:
.PHONY: lotus-townhall-front
lotus-townhall-app: lotus-touch lotus-townhall-front
- go run github.com/GeertJohan/go.rice/rice append --exec lotus-townhall -i ./cmd/lotus-townhall -i ./build
.PHONY: lotus-townhall-app
lotus-fountain:
rm -f lotus-fountain
go build -o lotus-fountain ./cmd/lotus-fountain
- go run github.com/GeertJohan/go.rice/rice append --exec lotus-fountain -i ./cmd/lotus-fountain -i ./build
.PHONY: lotus-fountain
BINS+=lotus-fountain
@@ -173,28 +189,24 @@ BINS+=lotus-chainwatch
lotus-bench:
rm -f lotus-bench
go build -o lotus-bench ./cmd/lotus-bench
- go run github.com/GeertJohan/go.rice/rice append --exec lotus-bench -i ./build
.PHONY: lotus-bench
BINS+=lotus-bench
lotus-stats:
rm -f lotus-stats
- go build -o lotus-stats ./cmd/lotus-stats
- go run github.com/GeertJohan/go.rice/rice append --exec lotus-stats -i ./build
+ go build $(GOFLAGS) -o lotus-stats ./cmd/lotus-stats
.PHONY: lotus-stats
BINS+=lotus-stats
lotus-pcr:
rm -f lotus-pcr
go build $(GOFLAGS) -o lotus-pcr ./cmd/lotus-pcr
- go run github.com/GeertJohan/go.rice/rice append --exec lotus-pcr -i ./build
.PHONY: lotus-pcr
BINS+=lotus-pcr
lotus-health:
rm -f lotus-health
go build -o lotus-health ./cmd/lotus-health
- go run github.com/GeertJohan/go.rice/rice append --exec lotus-health -i ./build
.PHONY: lotus-health
BINS+=lotus-health
@@ -204,14 +216,33 @@ lotus-wallet:
.PHONY: lotus-wallet
BINS+=lotus-wallet
+lotus-keygen:
+ rm -f lotus-keygen
+ go build -o lotus-keygen ./cmd/lotus-keygen
+.PHONY: lotus-keygen
+BINS+=lotus-keygen
+
testground:
go build -tags testground -o /dev/null ./cmd/lotus
.PHONY: testground
BINS+=testground
+
+tvx:
+ rm -f tvx
+ go build -o tvx ./cmd/tvx
+.PHONY: tvx
+BINS+=tvx
+
install-chainwatch: lotus-chainwatch
install -C ./lotus-chainwatch /usr/local/bin/lotus-chainwatch
+lotus-sim: $(BUILD_DEPS)
+ rm -f lotus-sim
+ go build $(GOFLAGS) -o lotus-sim ./cmd/lotus-sim
+.PHONY: lotus-sim
+BINS+=lotus-sim
+
# SYSTEMD
install-daemon-service: install-daemon
@@ -272,17 +303,10 @@ clean-services: clean-all-services
buildall: $(BINS)
-completions:
- ./scripts/make-completions.sh lotus
- ./scripts/make-completions.sh lotus-miner
-.PHONY: completions
-
install-completions:
mkdir -p /usr/share/bash-completion/completions /usr/local/share/zsh/site-functions/
install -C ./scripts/bash-completion/lotus /usr/share/bash-completion/completions/lotus
- install -C ./scripts/bash-completion/lotus-miner /usr/share/bash-completion/completions/lotus-miner
install -C ./scripts/zsh-completion/lotus /usr/local/share/zsh/site-functions/_lotus
- install -C ./scripts/zsh-completion/lotus-miner /usr/local/share/zsh/site-functions/_lotus-miner
clean:
rm -rf $(CLEAN) $(BINS)
@@ -294,17 +318,76 @@ dist-clean:
git submodule deinit --all -f
.PHONY: dist-clean
-type-gen:
+type-gen: api-gen
go run ./gen/main.go
- go generate ./...
+ go generate -x ./...
+ goimports -w api/
-method-gen:
+method-gen: api-gen
(cd ./lotuspond/front/src/chain && go run ./methodgen.go)
-gen: type-gen method-gen
-
-docsgen:
- go run ./api/docgen > documentation/en/api-methods.md
+actors-gen:
+ go run ./chain/actors/agen
+ go fmt ./...
+
+api-gen:
+ go run ./gen/api
+ goimports -w api
+ goimports -w api
+.PHONY: api-gen
+
+appimage: lotus
+ rm -rf appimage-builder-cache || true
+ rm AppDir/io.filecoin.lotus.desktop || true
+ rm AppDir/icon.svg || true
+ rm Appdir/AppRun || true
+ mkdir -p AppDir/usr/bin
+ cp ./lotus AppDir/usr/bin/
+ appimage-builder
+
+docsgen: docsgen-md docsgen-openrpc
+
+docsgen-md-bin: api-gen actors-gen
+ go build $(GOFLAGS) -o docgen-md ./api/docgen/cmd
+docsgen-openrpc-bin: api-gen actors-gen
+ go build $(GOFLAGS) -o docgen-openrpc ./api/docgen-openrpc/cmd
+
+docsgen-md: docsgen-md-full docsgen-md-storage docsgen-md-worker
+
+docsgen-md-full: docsgen-md-bin
+ ./docgen-md "api/api_full.go" "FullNode" "api" "./api" > documentation/en/api-v1-unstable-methods.md
+ ./docgen-md "api/v0api/full.go" "FullNode" "v0api" "./api/v0api" > documentation/en/api-v0-methods.md
+docsgen-md-storage: docsgen-md-bin
+ ./docgen-md "api/api_storage.go" "StorageMiner" "api" "./api" > documentation/en/api-v0-methods-miner.md
+docsgen-md-worker: docsgen-md-bin
+ ./docgen-md "api/api_worker.go" "Worker" "api" "./api" > documentation/en/api-v0-methods-worker.md
+
+docsgen-openrpc: docsgen-openrpc-full docsgen-openrpc-storage docsgen-openrpc-worker
+
+docsgen-openrpc-full: docsgen-openrpc-bin
+ ./docgen-openrpc "api/api_full.go" "FullNode" "api" "./api" -gzip > build/openrpc/full.json.gz
+docsgen-openrpc-storage: docsgen-openrpc-bin
+ ./docgen-openrpc "api/api_storage.go" "StorageMiner" "api" "./api" -gzip > build/openrpc/miner.json.gz
+docsgen-openrpc-worker: docsgen-openrpc-bin
+ ./docgen-openrpc "api/api_worker.go" "Worker" "api" "./api" -gzip > build/openrpc/worker.json.gz
+
+.PHONY: docsgen docsgen-md-bin docsgen-openrpc-bin
+
+gen: actors-gen type-gen method-gen docsgen api-gen circleci
+ @echo ">>> IF YOU'VE MODIFIED THE CLI, REMEMBER TO ALSO MAKE docsgen-cli"
+.PHONY: gen
+
+snap: lotus lotus-miner lotus-worker
+ snapcraft
+ # snapcraft upload ./lotus_*.snap
+
+# separate from gen because it needs binaries
+docsgen-cli: lotus lotus-miner lotus-worker
+ python ./scripts/generate-lotus-cli.py
+.PHONY: docsgen-cli
print-%:
@echo $*=$($*)
+
+circleci:
+ go generate -x ./.circleci
\ No newline at end of file
diff --git a/README.md b/README.md
index fa432bf7dc8..a44c690066c 100644
--- a/README.md
+++ b/README.md
@@ -10,7 +10,7 @@
-
+
@@ -18,30 +18,121 @@ Lotus is an implementation of the Filecoin Distributed Storage Network. For more
## Building & Documentation
-For instructions on how to build, install and setup lotus, please visit [https://docs.filecoin.io/get-started/lotus](https://docs.filecoin.io/get-started/lotus/).
+> Note: The default `master` branch is the dev branch, please use with caution. For the latest stable version, checkout the most recent [`Latest release`](https://github.com/filecoin-project/lotus/releases).
+
+For complete instructions on how to build, install and setup lotus, please visit [https://docs.filecoin.io/get-started/lotus](https://docs.filecoin.io/get-started/lotus/). Basic build instructions can be found further down in this readme.
## Reporting a Vulnerability
Please send an email to security@filecoin.org. See our [security policy](SECURITY.md) for more details.
-## Development
+## Related packages
-The main branches under development at the moment are:
-* [`master`](https://github.com/filecoin-project/lotus): current testnet.
-* [`next`](https://github.com/filecoin-project/lotus/tree/next): working branch with chain-breaking changes.
-* [`ntwk-calibration`](https://github.com/filecoin-project/lotus/tree/ntwk-calibration): devnet running one of `next` commits.
+These repos are independent and reusable modules, but are tightly integrated into Lotus to make up a fully featured Filecoin implementation:
-### Tracker
+- [go-fil-markets](https://github.com/filecoin-project/go-fil-markets) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/markets-shared-components-5daa144a7046a60001c6e253/board)
+- [specs-actors](https://github.com/filecoin-project/specs-actors) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/actors-5ee6f3aa87591f0016c05685/board)
-All work is tracked via issues. An attempt at keeping an up-to-date view on remaining work towards Mainnet launch can be seen at the [lotus github project board](https://github.com/orgs/filecoin-project/projects/8). The issues labeled with `incentives` are there to identify the issues needed for Space Race launch.
+## Contribute
-### Packages
+Lotus is a universally open project and welcomes contributions of all kinds: code, docs, and more. However, before making a contribution, we ask you to heed these recommendations:
-The lotus Filecoin implementation unfolds into the following packages:
+1. If the proposal entails a protocol change, please first submit a [Filecoin Improvement Proposal](https://github.com/filecoin-project/FIPs).
+2. If the change is complex and requires prior discussion, [open an issue](github.com/filecoin-project/lotus/issues) or a [discussion](https://github.com/filecoin-project/lotus/discussions) to request feedback before you start working on a pull request. This is to avoid disappointment and sunk costs, in case the change is not actually needed or accepted.
+3. Please refrain from submitting PRs to adapt existing code to subjective preferences. The changeset should contain functional or technical improvements/enhancements, bug fixes, new features, or some other clear material contribution. Simple stylistic changes are likely to be rejected in order to reduce code churn.
-- [This repo](https://github.com/filecoin-project/lotus)
-- [go-fil-markets](https://github.com/filecoin-project/go-fil-markets) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/markets-shared-components-5daa144a7046a60001c6e253/board)
-- [spec-actors](https://github.com/filecoin-project/specs-actors) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/actors-5ee6f3aa87591f0016c05685/board)
+When implementing a change:
+
+1. Adhere to the standard Go formatting guidelines, e.g. [Effective Go](https://golang.org/doc/effective_go.html). Run `go fmt`.
+2. Stick to the idioms and patterns used in the codebase. Familiar-looking code has a higher chance of being accepted than eerie code. Pay attention to commonly used variable and parameter names, avoidance of naked returns, error handling patterns, etc.
+3. Comments: follow the advice on the [Commentary](https://golang.org/doc/effective_go.html#commentary) section of Effective Go.
+4. Minimize code churn. Modify only what is strictly necessary. Well-encapsulated changesets will get a quicker response from maintainers.
+5. Lint your code with [`golangci-lint`](https://golangci-lint.run) (CI will reject your PR if unlinted).
+6. Add tests.
+7. Title the PR in a meaningful way and describe the rationale and the thought process in the PR description.
+8. Write clean, thoughtful, and detailed [commit messages](https://chris.beams.io/posts/git-commit/). This is even more important than the PR description, because commit messages are stored _inside_ the Git history. One good rule is: if you are happy posting the commit message as the PR description, then it's a good commit message.
+
+## Basic Build Instructions
+**System-specific Software Dependencies**:
+
+Building Lotus requires some system dependencies, usually provided by your distribution.
+
+Ubuntu/Debian:
+```
+sudo apt install mesa-opencl-icd ocl-icd-opencl-dev gcc git bzr jq pkg-config curl clang build-essential hwloc libhwloc-dev wget -y && sudo apt upgrade -y
+```
+
+Fedora:
+```
+sudo dnf -y install gcc make git bzr jq pkgconfig mesa-libOpenCL mesa-libOpenCL-devel opencl-headers ocl-icd ocl-icd-devel clang llvm wget hwloc libhwloc-dev
+```
+
+For other distributions you can find the required dependencies [here.](https://docs.filecoin.io/get-started/lotus/installation/#system-specific) For instructions specific to macOS, you can find them [here.](https://docs.filecoin.io/get-started/lotus/installation/#macos)
+
+#### Go
+
+To build Lotus, you need a working installation of [Go 1.16.4 or higher](https://golang.org/dl/):
+
+```bash
+wget -c https://golang.org/dl/go1.16.4.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local
+```
+
+**TIP:**
+You'll need to add `/usr/local/go/bin` to your path. For most Linux distributions you can run something like:
+
+```shell
+echo "export PATH=$PATH:/usr/local/go/bin" >> ~/.bashrc && source ~/.bashrc
+```
+
+See the [official Golang installation instructions](https://golang.org/doc/install) if you get stuck.
+
+### Build and install Lotus
+
+Once all the dependencies are installed, you can build and install the Lotus suite (`lotus`, `lotus-miner`, and `lotus-worker`).
+
+1. Clone the repository:
+
+ ```sh
+ git clone https://github.com/filecoin-project/lotus.git
+ cd lotus/
+ ```
+
+Note: The default branch `master` is the dev branch where the latest new features, bug fixes and improvement are in. However, if you want to run lotus on Filecoin mainnet and want to run a production-ready lotus, get the latest release[ here](https://github.com/filecoin-project/lotus/releases).
+
+2. To join mainnet, checkout the [latest release](https://github.com/filecoin-project/lotus/releases).
+
+ If you are changing networks from a previous Lotus installation or there has been a network reset, read the [Switch networks guide](https://docs.filecoin.io/get-started/lotus/switch-networks/) before proceeding.
+
+ For networks other than mainnet, look up the current branch or tag/commit for the network you want to join in the [Filecoin networks dashboard](https://network.filecoin.io), then build Lotus for your specific network below.
+
+ ```sh
+ git checkout
+ # For example:
+ git checkout # tag for a release
+ ```
+
+ Currently, the latest code on the _master_ branch corresponds to mainnet.
+
+3. If you are in China, see "[Lotus: tips when running in China](https://docs.filecoin.io/get-started/lotus/tips-running-in-china/)".
+4. This build instruction uses the prebuilt proofs binaries. If you want to build the proof binaries from source check the [complete instructions](https://docs.filecoin.io/get-started/lotus/installation/#build-and-install-lotus). Note, if you are building the proof binaries from source, [installing rustup](https://docs.filecoin.io/get-started/lotus/installation/#rustup) is also needed.
+
+5. Build and install Lotus:
+
+ ```sh
+ make clean all #mainnet
+
+ # Or to join a testnet or devnet:
+ make clean calibnet # Calibration with min 32GiB sectors
+ make clean nerpanet # Nerpa with min 512MiB sectors
+
+ sudo make install
+ ```
+
+ This will put `lotus`, `lotus-miner` and `lotus-worker` in `/usr/local/bin`.
+
+ `lotus` will use the `$HOME/.lotus` folder by default for storage (configuration, chain data, wallets, etc). See [advanced options](https://docs.filecoin.io/get-started/lotus/configuration-and-advanced-usage/) for information on how to customize the Lotus folder.
+
+6. You should now have Lotus installed. You can now [start the Lotus daemon and sync the chain](https://docs.filecoin.io/get-started/lotus/installation/#start-the-lotus-daemon-and-sync-the-chain).
## License
diff --git a/SECURITY.md b/SECURITY.md
index 592206bc5a9..d53c2b920b5 100644
--- a/SECURITY.md
+++ b/SECURITY.md
@@ -2,11 +2,11 @@
## Reporting a Vulnerability
-For *critical* bugs, please consult our Security Policy and Responsible Disclosure Program information at https://github.com/filecoin-project/community/blob/master/SECURITY.md
+For reporting security vulnerabilities/bugs, please consult our Security Policy and Responsible Disclosure Program information at https://github.com/filecoin-project/community/blob/master/SECURITY.md. Security vulnerabilities should be reported via our [Vulnerability Reporting channels](https://github.com/filecoin-project/community/blob/master/SECURITY.md#vulnerability-reporting) and will be eligible for a [Bug Bounty](https://security.filecoin.io/bug-bounty/).
Please try to provide a clear description of any bugs reported, along with how to reproduce the bug if possible. More detailed bug reports (especially those with a PoC included) will help us move forward much faster. Additionally, please avoid reporting bugs that already have open issues. Take a moment to search the issue list of the related GitHub repositories before writing up a new report.
-Here are some examples of bugs we would consider 'critical':
+Here are some examples of bugs we would consider to be security vulnerabilities:
* If you can spend from a `multisig` wallet you do not control the keys for.
* If you can cause a miner to be slashed without them actually misbehaving.
@@ -16,8 +16,8 @@ Here are some examples of bugs we would consider 'critical':
* If you can craft a message that causes a persistent fork in the network.
* If you can cause the total amount of Filecoin in the network to no longer be 2 billion.
-This is not an exhaustive list, but should provide some idea of what we consider 'critical'.
+This is not an exhaustive list, but should provide some idea of what we consider as a security vulnerability, .
## Reporting a non security bug
-For non-critical bugs, please simply file a GitHub [issue](https://github.com/filecoin-project/lotus/issues/new?template=bug_report.md).
+For non-security bugs, please simply file a GitHub [issue](https://github.com/filecoin-project/lotus/issues/new?template=bug_report.md).
diff --git a/api/README.md b/api/README.md
new file mode 100644
index 00000000000..07089d7ae9f
--- /dev/null
+++ b/api/README.md
@@ -0,0 +1,14 @@
+## Lotus API
+
+This package contains all lotus API definitions. Interfaces defined here are
+exposed as JsonRPC 2.0 endpoints by lotus programs.
+
+### Versions
+
+| File | Alias File | Interface | Exposed by | Version | HTTP Endpoint | Status | Docs
+|------------------|-------------------|----------------|--------------------|---------|---------------|------------------------------|------
+| `api_common.go` | `v0api/latest.go` | `Common` | lotus; lotus-miner | v0 | `/rpc/v0` | Latest, Stable | [Methods](../documentation/en/api-v0-methods.md)
+| `api_full.go` | `v1api/latest.go` | `FullNode` | lotus | v1 | `/rpc/v1` | Latest, **Work in progress** | [Methods](../documentation/en/api-v1-unstable-methods.md)
+| `api_storage.go` | `v0api/latest.go` | `StorageMiner` | lotus-miner | v0 | `/rpc/v0` | Latest, Stable | [Methods](../documentation/en/api-v0-methods-miner.md)
+| `api_worker.go` | `v0api/latest.go` | `Worker` | lotus-worker | v0 | `/rpc/v0` | Latest, Stable | [Methods](../documentation/en/api-v0-methods-worker.md)
+| `v0api/full.go` | | `FullNode` | lotus | v0 | `/rpc/v0` | Stable | [Methods](../documentation/en/api-v0-methods.md)
diff --git a/api/api_common.go b/api/api_common.go
index 5b036d1f6d9..629299db3b6 100644
--- a/api/api_common.go
+++ b/api/api_common.go
@@ -4,77 +4,61 @@ import (
"context"
"fmt"
+ apitypes "github.com/filecoin-project/lotus/api/types"
+
"github.com/google/uuid"
"github.com/filecoin-project/go-jsonrpc/auth"
- metrics "github.com/libp2p/go-libp2p-core/metrics"
- "github.com/libp2p/go-libp2p-core/network"
- "github.com/libp2p/go-libp2p-core/peer"
- protocol "github.com/libp2p/go-libp2p-core/protocol"
-
- "github.com/filecoin-project/lotus/build"
)
-type Common interface {
+// MODIFYING THE API INTERFACE
+//
+// When adding / changing methods in this file:
+// * Do the change here
+// * Adjust implementation in `node/impl/`
+// * Run `make gen` - this will:
+// * Generate proxy structs
+// * Generate mocks
+// * Generate markdown docs
+// * Generate openrpc blobs
+type Common interface {
// MethodGroup: Auth
- AuthVerify(ctx context.Context, token string) ([]auth.Permission, error)
- AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error)
-
- // MethodGroup: Net
+ AuthVerify(ctx context.Context, token string) ([]auth.Permission, error) //perm:read
+ AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error) //perm:admin
- NetConnectedness(context.Context, peer.ID) (network.Connectedness, error)
- NetPeers(context.Context) ([]peer.AddrInfo, error)
- NetConnect(context.Context, peer.AddrInfo) error
- NetAddrsListen(context.Context) (peer.AddrInfo, error)
- NetDisconnect(context.Context, peer.ID) error
- NetFindPeer(context.Context, peer.ID) (peer.AddrInfo, error)
- NetPubsubScores(context.Context) ([]PubsubScore, error)
- NetAutoNatStatus(context.Context) (NatInfo, error)
- NetAgentVersion(ctx context.Context, p peer.ID) (string, error)
+ // MethodGroup: Log
- // NetBandwidthStats returns statistics about the nodes total bandwidth
- // usage and current rate across all peers and protocols.
- NetBandwidthStats(ctx context.Context) (metrics.Stats, error)
-
- // NetBandwidthStatsByPeer returns statistics about the nodes bandwidth
- // usage and current rate per peer
- NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error)
-
- // NetBandwidthStatsByProtocol returns statistics about the nodes bandwidth
- // usage and current rate per protocol
- NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error)
+ LogList(context.Context) ([]string, error) //perm:write
+ LogSetLevel(context.Context, string, string) error //perm:write
// MethodGroup: Common
- // ID returns peerID of libp2p node backing this API
- ID(context.Context) (peer.ID, error)
-
// Version provides information about API provider
- Version(context.Context) (Version, error)
+ Version(context.Context) (APIVersion, error) //perm:read
- LogList(context.Context) ([]string, error)
- LogSetLevel(context.Context, string, string) error
+ // Discover returns an OpenRPC document describing an RPC API.
+ Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) //perm:read
// trigger graceful shutdown
- Shutdown(context.Context) error
+ Shutdown(context.Context) error //perm:admin
// Session returns a random UUID of api provider session
- Session(context.Context) (uuid.UUID, error)
+ Session(context.Context) (uuid.UUID, error) //perm:read
- Closing(context.Context) (<-chan struct{}, error)
+ Closing(context.Context) (<-chan struct{}, error) //perm:read
}
-// Version provides various build-time information
-type Version struct {
+// APIVersion provides various build-time information
+type APIVersion struct {
Version string
// APIVersion is a binary encoded semver version of the remote implementing
// this api
//
// See APIVersion in build/version.go
- APIVersion build.Version
+ APIVersion Version
// TODO: git commit / os / genesis cid?
@@ -82,11 +66,6 @@ type Version struct {
BlockDelay uint64
}
-func (v Version) String() string {
+func (v APIVersion) String() string {
return fmt.Sprintf("%s+api%s", v.Version, v.APIVersion.String())
}
-
-type NatInfo struct {
- Reachability network.Reachability
- PublicAddr string
-}
diff --git a/api/api_full.go b/api/api_full.go
index bb1eb159540..5c72c3613a8 100644
--- a/api/api_full.go
+++ b/api/api_full.go
@@ -2,17 +2,16 @@ package api
import (
"context"
+ "encoding/json"
"fmt"
"time"
- datatransfer "github.com/filecoin-project/go-data-transfer"
- "github.com/filecoin-project/go-state-types/network"
-
"github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
+ datatransfer "github.com/filecoin-project/go-data-transfer"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-multistore"
@@ -20,20 +19,46 @@ import (
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/dline"
+
+ apitypes "github.com/filecoin-project/lotus/api/types"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/builtin/paych"
-
- "github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/types"
marketevents "github.com/filecoin-project/lotus/markets/loggers"
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
+//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_full.go -package=mocks . FullNode
+
+// ChainIO abstracts operations for accessing raw IPLD objects.
+type ChainIO interface {
+ ChainReadObj(context.Context, cid.Cid) ([]byte, error)
+ ChainHasObj(context.Context, cid.Cid) (bool, error)
+}
+
+const LookbackNoLimit = abi.ChainEpoch(-1)
+
+// MODIFYING THE API INTERFACE
+//
+// NOTE: This is the V1 (Unstable) API - to add methods to the V0 (Stable) API
+// you'll have to add those methods to interfaces in `api/v0api`
+//
+// When adding / changing methods in this file:
+// * Do the change here
+// * Adjust implementation in `node/impl/`
+// * Run `make gen` - this will:
+// * Generate proxy structs
+// * Generate mocks
+// * Generate markdown docs
+// * Generate openrpc blobs
+
// FullNode API is a low-level interface to the Filecoin network full node
type FullNode interface {
Common
+ Net
// MethodGroup: Chain
// The Chain method group contains methods for interacting with the
@@ -41,66 +66,81 @@ type FullNode interface {
// ChainNotify returns channel with chain head updates.
// First message is guaranteed to be of len == 1, and type == 'current'.
- ChainNotify(context.Context) (<-chan []*HeadChange, error)
+ ChainNotify(context.Context) (<-chan []*HeadChange, error) //perm:read
// ChainHead returns the current head of the chain.
- ChainHead(context.Context) (*types.TipSet, error)
+ ChainHead(context.Context) (*types.TipSet, error) //perm:read
// ChainGetRandomnessFromTickets is used to sample the chain for randomness.
- ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error)
+ ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) //perm:read
// ChainGetRandomnessFromBeacon is used to sample the beacon for randomness.
- ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error)
+ ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) //perm:read
// ChainGetBlock returns the block specified by the given CID.
- ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error)
+ ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error) //perm:read
// ChainGetTipSet returns the tipset specified by the given TipSetKey.
- ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error)
+ ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) //perm:read
// ChainGetBlockMessages returns messages stored in the specified block.
- ChainGetBlockMessages(ctx context.Context, blockCid cid.Cid) (*BlockMessages, error)
+ //
+ // Note: If there are multiple blocks in a tipset, it's likely that some
+ // messages will be duplicated. It's also possible for blocks in a tipset to have
+ // different messages from the same sender at the same nonce. When that happens,
+ // only the first message (in a block with lowest ticket) will be considered
+ // for execution
+ //
+ // NOTE: THIS METHOD SHOULD ONLY BE USED FOR GETTING MESSAGES IN A SPECIFIC BLOCK
+ //
+ // DO NOT USE THIS METHOD TO GET MESSAGES INCLUDED IN A TIPSET
+ // Use ChainGetParentMessages, which will perform correct message deduplication
+ ChainGetBlockMessages(ctx context.Context, blockCid cid.Cid) (*BlockMessages, error) //perm:read
// ChainGetParentReceipts returns receipts for messages in parent tipset of
- // the specified block.
- ChainGetParentReceipts(ctx context.Context, blockCid cid.Cid) ([]*types.MessageReceipt, error)
+ // the specified block. The receipts in the list returned is one-to-one with the
+ // messages returned by a call to ChainGetParentMessages with the same blockCid.
+ ChainGetParentReceipts(ctx context.Context, blockCid cid.Cid) ([]*types.MessageReceipt, error) //perm:read
// ChainGetParentMessages returns messages stored in parent tipset of the
// specified block.
- ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]Message, error)
+ ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]Message, error) //perm:read
+
+ // ChainGetMessagesInTipset returns message stores in current tipset
+ ChainGetMessagesInTipset(ctx context.Context, tsk types.TipSetKey) ([]Message, error) //perm:read
// ChainGetTipSetByHeight looks back for a tipset at the specified epoch.
// If there are no blocks at the specified epoch, a tipset at an earlier epoch
// will be returned.
- ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error)
+ ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) //perm:read
// ChainReadObj reads ipld nodes referenced by the specified CID from chain
// blockstore and returns raw bytes.
- ChainReadObj(context.Context, cid.Cid) ([]byte, error)
+ ChainReadObj(context.Context, cid.Cid) ([]byte, error) //perm:read
// ChainDeleteObj deletes node referenced by the given CID
- ChainDeleteObj(context.Context, cid.Cid) error
+ ChainDeleteObj(context.Context, cid.Cid) error //perm:admin
// ChainHasObj checks if a given CID exists in the chain blockstore.
- ChainHasObj(context.Context, cid.Cid) (bool, error)
+ ChainHasObj(context.Context, cid.Cid) (bool, error) //perm:read
// ChainStatObj returns statistics about the graph referenced by 'obj'.
// If 'base' is also specified, then the returned stat will be a diff
// between the two objects.
- ChainStatObj(ctx context.Context, obj cid.Cid, base cid.Cid) (ObjStat, error)
+ ChainStatObj(ctx context.Context, obj cid.Cid, base cid.Cid) (ObjStat, error) //perm:read
// ChainSetHead forcefully sets current chain head. Use with caution.
- ChainSetHead(context.Context, types.TipSetKey) error
+ ChainSetHead(context.Context, types.TipSetKey) error //perm:admin
// ChainGetGenesis returns the genesis tipset.
- ChainGetGenesis(context.Context) (*types.TipSet, error)
+ ChainGetGenesis(context.Context) (*types.TipSet, error) //perm:read
// ChainTipSetWeight computes weight for the specified tipset.
- ChainTipSetWeight(context.Context, types.TipSetKey) (types.BigInt, error)
- ChainGetNode(ctx context.Context, p string) (*IpldObject, error)
+ ChainTipSetWeight(context.Context, types.TipSetKey) (types.BigInt, error) //perm:read
+ ChainGetNode(ctx context.Context, p string) (*IpldObject, error) //perm:read
// ChainGetMessage reads a message referenced by the specified CID from the
// chain blockstore.
- ChainGetMessage(context.Context, cid.Cid) (*types.Message, error)
+ ChainGetMessage(context.Context, cid.Cid) (*types.Message, error) //perm:read
// ChainGetPath returns a set of revert/apply operations needed to get from
// one tipset to another, for example:
@@ -115,14 +155,14 @@ type FullNode interface {
// tRR
//```
// Would return `[revert(tBA), apply(tAB), apply(tAA)]`
- ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*HeadChange, error)
+ ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*HeadChange, error) //perm:read
// ChainExport returns a stream of bytes with CAR dump of chain data.
// The exported chain data includes the header chain from the given tipset
// back to genesis, the entire genesis state, and the most recent 'nroots'
// state trees.
// If oldmsgskip is set, messages from before the requested roots are also not included.
- ChainExport(ctx context.Context, nroots abi.ChainEpoch, oldmsgskip bool, tsk types.TipSetKey) (<-chan []byte, error)
+ ChainExport(ctx context.Context, nroots abi.ChainEpoch, oldmsgskip bool, tsk types.TipSetKey) (<-chan []byte, error) //perm:read
// MethodGroup: Beacon
// The Beacon method group contains methods for interacting with the random beacon (DRAND)
@@ -130,74 +170,74 @@ type FullNode interface {
// BeaconGetEntry returns the beacon entry for the given filecoin epoch. If
// the entry has not yet been produced, the call will block until the entry
// becomes available
- BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error)
+ BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) //perm:read
// GasEstimateFeeCap estimates gas fee cap
- GasEstimateFeeCap(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error)
+ GasEstimateFeeCap(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error) //perm:read
// GasEstimateGasLimit estimates gas used by the message and returns it.
// It fails if message fails to execute.
- GasEstimateGasLimit(context.Context, *types.Message, types.TipSetKey) (int64, error)
+ GasEstimateGasLimit(context.Context, *types.Message, types.TipSetKey) (int64, error) //perm:read
// GasEstimateGasPremium estimates what gas price should be used for a
// message to have high likelihood of inclusion in `nblocksincl` epochs.
GasEstimateGasPremium(_ context.Context, nblocksincl uint64,
- sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error)
+ sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) //perm:read
// GasEstimateMessageGas estimates gas values for unset message gas fields
- GasEstimateMessageGas(context.Context, *types.Message, *MessageSendSpec, types.TipSetKey) (*types.Message, error)
+ GasEstimateMessageGas(context.Context, *types.Message, *MessageSendSpec, types.TipSetKey) (*types.Message, error) //perm:read
// MethodGroup: Sync
// The Sync method group contains methods for interacting with and
// observing the lotus sync service.
// SyncState returns the current status of the lotus sync system.
- SyncState(context.Context) (*SyncState, error)
+ SyncState(context.Context) (*SyncState, error) //perm:read
// SyncSubmitBlock can be used to submit a newly created block to the.
// network through this node
- SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error
+ SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error //perm:write
// SyncIncomingBlocks returns a channel streaming incoming, potentially not
// yet synced block headers.
- SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error)
+ SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) //perm:read
// SyncCheckpoint marks a blocks as checkpointed, meaning that it won't ever fork away from it.
- SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) error
+ SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) error //perm:admin
// SyncMarkBad marks a blocks as bad, meaning that it won't ever by synced.
// Use with extreme caution.
- SyncMarkBad(ctx context.Context, bcid cid.Cid) error
+ SyncMarkBad(ctx context.Context, bcid cid.Cid) error //perm:admin
// SyncUnmarkBad unmarks a blocks as bad, making it possible to be validated and synced again.
- SyncUnmarkBad(ctx context.Context, bcid cid.Cid) error
+ SyncUnmarkBad(ctx context.Context, bcid cid.Cid) error //perm:admin
// SyncUnmarkAllBad purges bad block cache, making it possible to sync to chains previously marked as bad
- SyncUnmarkAllBad(ctx context.Context) error
+ SyncUnmarkAllBad(ctx context.Context) error //perm:admin
// SyncCheckBad checks if a block was marked as bad, and if it was, returns
// the reason.
- SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error)
+ SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error) //perm:read
// SyncValidateTipset indicates whether the provided tipset is valid or not
- SyncValidateTipset(ctx context.Context, tsk types.TipSetKey) (bool, error)
+ SyncValidateTipset(ctx context.Context, tsk types.TipSetKey) (bool, error) //perm:read
// MethodGroup: Mpool
// The Mpool methods are for interacting with the message pool. The message pool
// manages all incoming and outgoing 'messages' going over the network.
// MpoolPending returns pending mempool messages.
- MpoolPending(context.Context, types.TipSetKey) ([]*types.SignedMessage, error)
+ MpoolPending(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) //perm:read
// MpoolSelect returns a list of pending messages for inclusion in the next block
- MpoolSelect(context.Context, types.TipSetKey, float64) ([]*types.SignedMessage, error)
+ MpoolSelect(context.Context, types.TipSetKey, float64) ([]*types.SignedMessage, error) //perm:read
// MpoolPush pushes a signed message to mempool.
- MpoolPush(context.Context, *types.SignedMessage) (cid.Cid, error)
+ MpoolPush(context.Context, *types.SignedMessage) (cid.Cid, error) //perm:write
// MpoolPushUntrusted pushes a signed message to mempool from untrusted sources.
- MpoolPushUntrusted(context.Context, *types.SignedMessage) (cid.Cid, error)
+ MpoolPushUntrusted(context.Context, *types.SignedMessage) (cid.Cid, error) //perm:write
// MpoolPushMessage atomically assigns a nonce, signs, and pushes a message
// to mempool.
@@ -205,34 +245,41 @@ type FullNode interface {
//
// When maxFee is set to 0, MpoolPushMessage will guess appropriate fee
// based on current chain conditions
- MpoolPushMessage(ctx context.Context, msg *types.Message, spec *MessageSendSpec) (*types.SignedMessage, error)
+ MpoolPushMessage(ctx context.Context, msg *types.Message, spec *MessageSendSpec) (*types.SignedMessage, error) //perm:sign
// MpoolBatchPush batch pushes a signed message to mempool.
- MpoolBatchPush(context.Context, []*types.SignedMessage) ([]cid.Cid, error)
+ MpoolBatchPush(context.Context, []*types.SignedMessage) ([]cid.Cid, error) //perm:write
// MpoolBatchPushUntrusted batch pushes a signed message to mempool from untrusted sources.
- MpoolBatchPushUntrusted(context.Context, []*types.SignedMessage) ([]cid.Cid, error)
+ MpoolBatchPushUntrusted(context.Context, []*types.SignedMessage) ([]cid.Cid, error) //perm:write
// MpoolBatchPushMessage batch pushes a unsigned message to mempool.
- MpoolBatchPushMessage(context.Context, []*types.Message, *MessageSendSpec) ([]*types.SignedMessage, error)
+ MpoolBatchPushMessage(context.Context, []*types.Message, *MessageSendSpec) ([]*types.SignedMessage, error) //perm:sign
+
+ // MpoolCheckMessages performs logical checks on a batch of messages
+ MpoolCheckMessages(context.Context, []*MessagePrototype) ([][]MessageCheckStatus, error) //perm:read
+ // MpoolCheckPendingMessages performs logical checks for all pending messages from a given address
+ MpoolCheckPendingMessages(context.Context, address.Address) ([][]MessageCheckStatus, error) //perm:read
+ // MpoolCheckReplaceMessages performs logical checks on pending messages with replacement
+ MpoolCheckReplaceMessages(context.Context, []*types.Message) ([][]MessageCheckStatus, error) //perm:read
// MpoolGetNonce gets next nonce for the specified sender.
// Note that this method may not be atomic. Use MpoolPushMessage instead.
- MpoolGetNonce(context.Context, address.Address) (uint64, error)
- MpoolSub(context.Context) (<-chan MpoolUpdate, error)
+ MpoolGetNonce(context.Context, address.Address) (uint64, error) //perm:read
+ MpoolSub(context.Context) (<-chan MpoolUpdate, error) //perm:read
// MpoolClear clears pending messages from the mpool
- MpoolClear(context.Context, bool) error
+ MpoolClear(context.Context, bool) error //perm:write
// MpoolGetConfig returns (a copy of) the current mpool config
- MpoolGetConfig(context.Context) (*types.MpoolConfig, error)
+ MpoolGetConfig(context.Context) (*types.MpoolConfig, error) //perm:read
// MpoolSetConfig sets the mpool config to (a copy of) the supplied config
- MpoolSetConfig(context.Context, *types.MpoolConfig) error
+ MpoolSetConfig(context.Context, *types.MpoolConfig) error //perm:admin
// MethodGroup: Miner
- MinerGetBaseInfo(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*MiningBaseInfo, error)
- MinerCreateBlock(context.Context, *BlockTemplate) (*types.BlockMsg, error)
+ MinerGetBaseInfo(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*MiningBaseInfo, error) //perm:read
+ MinerCreateBlock(context.Context, *BlockTemplate) (*types.BlockMsg, error) //perm:write
// // UX ?
@@ -241,32 +288,32 @@ type FullNode interface {
// WalletNew creates a new address in the wallet with the given sigType.
// Available key types: bls, secp256k1, secp256k1-ledger
// Support for numerical types: 1 - secp256k1, 2 - BLS is deprecated
- WalletNew(context.Context, types.KeyType) (address.Address, error)
+ WalletNew(context.Context, types.KeyType) (address.Address, error) //perm:write
// WalletHas indicates whether the given address is in the wallet.
- WalletHas(context.Context, address.Address) (bool, error)
+ WalletHas(context.Context, address.Address) (bool, error) //perm:write
// WalletList lists all the addresses in the wallet.
- WalletList(context.Context) ([]address.Address, error)
+ WalletList(context.Context) ([]address.Address, error) //perm:write
// WalletBalance returns the balance of the given address at the current head of the chain.
- WalletBalance(context.Context, address.Address) (types.BigInt, error)
+ WalletBalance(context.Context, address.Address) (types.BigInt, error) //perm:read
// WalletSign signs the given bytes using the given address.
- WalletSign(context.Context, address.Address, []byte) (*crypto.Signature, error)
+ WalletSign(context.Context, address.Address, []byte) (*crypto.Signature, error) //perm:sign
// WalletSignMessage signs the given message using the given address.
- WalletSignMessage(context.Context, address.Address, *types.Message) (*types.SignedMessage, error)
+ WalletSignMessage(context.Context, address.Address, *types.Message) (*types.SignedMessage, error) //perm:sign
// WalletVerify takes an address, a signature, and some bytes, and indicates whether the signature is valid.
// The address does not have to be in the wallet.
- WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) (bool, error)
+ WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) //perm:read
// WalletDefaultAddress returns the address marked as default in the wallet.
- WalletDefaultAddress(context.Context) (address.Address, error)
+ WalletDefaultAddress(context.Context) (address.Address, error) //perm:write
// WalletSetDefault marks the given address as as the default one.
- WalletSetDefault(context.Context, address.Address) error
+ WalletSetDefault(context.Context, address.Address) error //perm:write
// WalletExport returns the private key of an address in the wallet.
- WalletExport(context.Context, address.Address) (*types.KeyInfo, error)
+ WalletExport(context.Context, address.Address) (*types.KeyInfo, error) //perm:admin
// WalletImport receives a KeyInfo, which includes a private key, and imports it into the wallet.
- WalletImport(context.Context, *types.KeyInfo) (address.Address, error)
+ WalletImport(context.Context, *types.KeyInfo) (address.Address, error) //perm:admin
// WalletDelete deletes an address from the wallet.
- WalletDelete(context.Context, address.Address) error
+ WalletDelete(context.Context, address.Address) error //perm:admin
// WalletValidateAddress validates whether a given string can be decoded as a well-formed address
- WalletValidateAddress(context.Context, string) (address.Address, error)
+ WalletValidateAddress(context.Context, string) (address.Address, error) //perm:read
// Other
@@ -275,198 +322,290 @@ type FullNode interface {
// retrieval markets as a client
// ClientImport imports file under the specified path into filestore.
- ClientImport(ctx context.Context, ref FileRef) (*ImportRes, error)
+ ClientImport(ctx context.Context, ref FileRef) (*ImportRes, error) //perm:admin
// ClientRemoveImport removes file import
- ClientRemoveImport(ctx context.Context, importID multistore.StoreID) error
+ ClientRemoveImport(ctx context.Context, importID multistore.StoreID) error //perm:admin
// ClientStartDeal proposes a deal with a miner.
- ClientStartDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error)
+ ClientStartDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error) //perm:admin
+ // ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking.
+ ClientStatelessDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error) //perm:write
// ClientGetDealInfo returns the latest information about a given deal.
- ClientGetDealInfo(context.Context, cid.Cid) (*DealInfo, error)
+ ClientGetDealInfo(context.Context, cid.Cid) (*DealInfo, error) //perm:read
// ClientListDeals returns information about the deals made by the local client.
- ClientListDeals(ctx context.Context) ([]DealInfo, error)
+ ClientListDeals(ctx context.Context) ([]DealInfo, error) //perm:write
// ClientGetDealUpdates returns the status of updated deals
- ClientGetDealUpdates(ctx context.Context) (<-chan DealInfo, error)
+ ClientGetDealUpdates(ctx context.Context) (<-chan DealInfo, error) //perm:write
// ClientGetDealStatus returns status given a code
- ClientGetDealStatus(ctx context.Context, statusCode uint64) (string, error)
+ ClientGetDealStatus(ctx context.Context, statusCode uint64) (string, error) //perm:read
// ClientHasLocal indicates whether a certain CID is locally stored.
- ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error)
+ ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) //perm:write
// ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer).
- ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]QueryOffer, error)
+ ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]QueryOffer, error) //perm:read
// ClientMinerQueryOffer returns a QueryOffer for the specific miner and file.
- ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (QueryOffer, error)
+ ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (QueryOffer, error) //perm:read
// ClientRetrieve initiates the retrieval of a file, as specified in the order.
- ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *FileRef) error
+ ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *FileRef) error //perm:admin
// ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel
// of status updates.
- ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *FileRef) (<-chan marketevents.RetrievalEvent, error)
+ ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin
+ // ClientListRetrievals returns information about retrievals made by the local client
+ ClientListRetrievals(ctx context.Context) ([]RetrievalInfo, error) //perm:write
+ // ClientGetRetrievalUpdates returns status of updated retrieval deals
+ ClientGetRetrievalUpdates(ctx context.Context) (<-chan RetrievalInfo, error) //perm:write
// ClientQueryAsk returns a signed StorageAsk from the specified miner.
- ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error)
+ ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) //perm:read
// ClientCalcCommP calculates the CommP and data size of the specified CID
- ClientDealPieceCID(ctx context.Context, root cid.Cid) (DataCIDSize, error)
+ ClientDealPieceCID(ctx context.Context, root cid.Cid) (DataCIDSize, error) //perm:read
// ClientCalcCommP calculates the CommP for a specified file
- ClientCalcCommP(ctx context.Context, inpath string) (*CommPRet, error)
+ ClientCalcCommP(ctx context.Context, inpath string) (*CommPRet, error) //perm:write
// ClientGenCar generates a CAR file for the specified file.
- ClientGenCar(ctx context.Context, ref FileRef, outpath string) error
+ ClientGenCar(ctx context.Context, ref FileRef, outpath string) error //perm:write
// ClientDealSize calculates real deal data size
- ClientDealSize(ctx context.Context, root cid.Cid) (DataSize, error)
+ ClientDealSize(ctx context.Context, root cid.Cid) (DataSize, error) //perm:read
// ClientListTransfers returns the status of all ongoing transfers of data
- ClientListDataTransfers(ctx context.Context) ([]DataTransferChannel, error)
- ClientDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error)
+ ClientListDataTransfers(ctx context.Context) ([]DataTransferChannel, error) //perm:write
+ ClientDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error) //perm:write
// ClientRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer
- ClientRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error
+ ClientRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
// ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
- ClientCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error
+ ClientCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
// ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel
// which are stuck due to insufficient funds
- ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error
+ ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error //perm:write
+
+ // ClientCancelRetrievalDeal cancels an ongoing retrieval deal based on DealID
+ ClientCancelRetrievalDeal(ctx context.Context, dealid retrievalmarket.DealID) error //perm:write
// ClientUnimport removes references to the specified file from filestore
//ClientUnimport(path string)
// ClientListImports lists imported files and their root CIDs
- ClientListImports(ctx context.Context) ([]Import, error)
+ ClientListImports(ctx context.Context) ([]Import, error) //perm:write
//ClientListAsks() []Ask
// MethodGroup: State
// The State methods are used to query, inspect, and interact with chain state.
- // Most methods take a TipSetKey as a parameter. The state looked up is the state at that tipset.
+ // Most methods take a TipSetKey as a parameter. The state looked up is the parent state of the tipset.
// A nil TipSetKey can be provided as a param, this will cause the heaviest tipset in the chain to be used.
// StateCall runs the given message and returns its result without any persisted changes.
- StateCall(context.Context, *types.Message, types.TipSetKey) (*InvocResult, error)
+ //
+ // StateCall applies the message to the tipset's parent state. The
+ // message is not applied on-top-of the messages in the passed-in
+ // tipset.
+ StateCall(context.Context, *types.Message, types.TipSetKey) (*InvocResult, error) //perm:read
// StateReplay replays a given message, assuming it was included in a block in the specified tipset.
- // If no tipset key is provided, the appropriate tipset is looked up.
- StateReplay(context.Context, types.TipSetKey, cid.Cid) (*InvocResult, error)
+ //
+ // If a tipset key is provided, and a replacing message is found on chain,
+ // the method will return an error saying that the message wasn't found
+ //
+ // If no tipset key is provided, the appropriate tipset is looked up, and if
+ // the message was gas-repriced, the on-chain message will be replayed - in
+ // that case the returned InvocResult.MsgCid will not match the Cid param
+ //
+ // If the caller wants to ensure that exactly the requested message was executed,
+ // they MUST check that InvocResult.MsgCid is equal to the provided Cid.
+ // Without this check both the requested and original message may appear as
+ // successfully executed on-chain, which may look like a double-spend.
+ //
+ // A replacing message is a message with a different CID, any of Gas values, and
+ // different signature, but with all other parameters matching (source/destination,
+ // nonce, params, etc.)
+ StateReplay(context.Context, types.TipSetKey, cid.Cid) (*InvocResult, error) //perm:read
// StateGetActor returns the indicated actor's nonce and balance.
- StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error)
+ StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) //perm:read
// StateReadState returns the indicated actor's state.
- StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error)
+ StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error) //perm:read
// StateListMessages looks back and returns all messages with a matching to or from address, stopping at the given height.
- StateListMessages(ctx context.Context, match *MessageMatch, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error)
+ StateListMessages(ctx context.Context, match *MessageMatch, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) //perm:read
+ // StateDecodeParams attempts to decode the provided params, based on the recipient actor address and method number.
+ StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error) //perm:read
// StateNetworkName returns the name of the network the node is synced to
- StateNetworkName(context.Context) (dtypes.NetworkName, error)
+ StateNetworkName(context.Context) (dtypes.NetworkName, error) //perm:read
// StateMinerSectors returns info about the given miner's sectors. If the filter bitfield is nil, all sectors are included.
- StateMinerSectors(context.Context, address.Address, *bitfield.BitField, types.TipSetKey) ([]*miner.SectorOnChainInfo, error)
+ StateMinerSectors(context.Context, address.Address, *bitfield.BitField, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) //perm:read
// StateMinerActiveSectors returns info about sectors that a given miner is actively proving.
- StateMinerActiveSectors(context.Context, address.Address, types.TipSetKey) ([]*miner.SectorOnChainInfo, error)
+ StateMinerActiveSectors(context.Context, address.Address, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) //perm:read
// StateMinerProvingDeadline calculates the deadline at some epoch for a proving period
// and returns the deadline-related calculations.
- StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error)
+ StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) //perm:read
// StateMinerPower returns the power of the indicated miner
- StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error)
+ StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error) //perm:read
// StateMinerInfo returns info about the indicated miner
- StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error)
+ StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error) //perm:read
// StateMinerDeadlines returns all the proving deadlines for the given miner
- StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]Deadline, error)
+ StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]Deadline, error) //perm:read
// StateMinerPartitions returns all partitions in the specified deadline
- StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tsk types.TipSetKey) ([]Partition, error)
+ StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tsk types.TipSetKey) ([]Partition, error) //perm:read
// StateMinerFaults returns a bitfield indicating the faulty sectors of the given miner
- StateMinerFaults(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error)
+ StateMinerFaults(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) //perm:read
// StateAllMinerFaults returns all non-expired Faults that occur within lookback epochs of the given tipset
- StateAllMinerFaults(ctx context.Context, lookback abi.ChainEpoch, ts types.TipSetKey) ([]*Fault, error)
+ StateAllMinerFaults(ctx context.Context, lookback abi.ChainEpoch, ts types.TipSetKey) ([]*Fault, error) //perm:read
// StateMinerRecoveries returns a bitfield indicating the recovering sectors of the given miner
- StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error)
+ StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) //perm:read
// StateMinerInitialPledgeCollateral returns the precommit deposit for the specified miner's sector
- StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error)
+ StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) //perm:read
// StateMinerInitialPledgeCollateral returns the initial pledge collateral for the specified miner's sector
- StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error)
+ StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) //perm:read
// StateMinerAvailableBalance returns the portion of a miner's balance that can be withdrawn or spent
- StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error)
+ StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) //perm:read
// StateMinerSectorAllocated checks if a sector is allocated
- StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (bool, error)
+ StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (bool, error) //perm:read
// StateSectorPreCommitInfo returns the PreCommit info for the specified miner's sector
- StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error)
+ StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) //perm:read
// StateSectorGetInfo returns the on-chain info for the specified miner's sector. Returns null in case the sector info isn't found
// NOTE: returned info.Expiration may not be accurate in some cases, use StateSectorExpiration to get accurate
// expiration epoch
- StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error)
+ StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) //perm:read
// StateSectorExpiration returns epoch at which given sector will expire
- StateSectorExpiration(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorExpiration, error)
+ StateSectorExpiration(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorExpiration, error) //perm:read
// StateSectorPartition finds deadline/partition with the specified sector
- StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error)
- // StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed
- StateSearchMsg(context.Context, cid.Cid) (*MsgLookup, error)
- // StateWaitMsg looks back in the chain for a message. If not found, it blocks until the
- // message arrives on chain, and gets to the indicated confidence depth.
- StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*MsgLookup, error)
- // StateWaitMsgLimited looks back up to limit epochs in the chain for a message.
+ StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error) //perm:read
+ // StateSearchMsg looks back up to limit epochs in the chain for a message, and returns its receipt and the tipset where it was executed
+ //
+ // NOTE: If a replacing message is found on chain, this method will return
+ // a MsgLookup for the replacing message - the MsgLookup.Message will be a different
+ // CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the
+ // result of the execution of the replacing message.
+ //
+ // If the caller wants to ensure that exactly the requested message was executed,
+ // they must check that MsgLookup.Message is equal to the provided 'cid', or set the
+ // `allowReplaced` parameter to false. Without this check, and with `allowReplaced`
+ // set to true, both the requested and original message may appear as
+ // successfully executed on-chain, which may look like a double-spend.
+ //
+ // A replacing message is a message with a different CID, any of Gas values, and
+ // different signature, but with all other parameters matching (source/destination,
+ // nonce, params, etc.)
+ StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error) //perm:read
+ // StateWaitMsg looks back up to limit epochs in the chain for a message.
// If not found, it blocks until the message arrives on chain, and gets to the
// indicated confidence depth.
- StateWaitMsgLimited(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch) (*MsgLookup, error)
+ //
+ // NOTE: If a replacing message is found on chain, this method will return
+ // a MsgLookup for the replacing message - the MsgLookup.Message will be a different
+ // CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the
+ // result of the execution of the replacing message.
+ //
+ // If the caller wants to ensure that exactly the requested message was executed,
+ // they must check that MsgLookup.Message is equal to the provided 'cid', or set the
+ // `allowReplaced` parameter to false. Without this check, and with `allowReplaced`
+ // set to true, both the requested and original message may appear as
+ // successfully executed on-chain, which may look like a double-spend.
+ //
+ // A replacing message is a message with a different CID, any of Gas values, and
+ // different signature, but with all other parameters matching (source/destination,
+ // nonce, params, etc.)
+ StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error) //perm:read
// StateListMiners returns the addresses of every miner that has claimed power in the Power Actor
- StateListMiners(context.Context, types.TipSetKey) ([]address.Address, error)
+ StateListMiners(context.Context, types.TipSetKey) ([]address.Address, error) //perm:read
// StateListActors returns the addresses of every actor in the state
- StateListActors(context.Context, types.TipSetKey) ([]address.Address, error)
+ StateListActors(context.Context, types.TipSetKey) ([]address.Address, error) //perm:read
// StateMarketBalance looks up the Escrow and Locked balances of the given address in the Storage Market
- StateMarketBalance(context.Context, address.Address, types.TipSetKey) (MarketBalance, error)
+ StateMarketBalance(context.Context, address.Address, types.TipSetKey) (MarketBalance, error) //perm:read
// StateMarketParticipants returns the Escrow and Locked balances of every participant in the Storage Market
- StateMarketParticipants(context.Context, types.TipSetKey) (map[string]MarketBalance, error)
+ StateMarketParticipants(context.Context, types.TipSetKey) (map[string]MarketBalance, error) //perm:read
// StateMarketDeals returns information about every deal in the Storage Market
- StateMarketDeals(context.Context, types.TipSetKey) (map[string]MarketDeal, error)
+ StateMarketDeals(context.Context, types.TipSetKey) (map[string]MarketDeal, error) //perm:read
// StateMarketStorageDeal returns information about the indicated deal
- StateMarketStorageDeal(context.Context, abi.DealID, types.TipSetKey) (*MarketDeal, error)
+ StateMarketStorageDeal(context.Context, abi.DealID, types.TipSetKey) (*MarketDeal, error) //perm:read
// StateLookupID retrieves the ID address of the given address
- StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error)
+ StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) //perm:read
// StateAccountKey returns the public key address of the given ID address
- StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error)
+ StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) //perm:read
// StateChangedActors returns all the actors whose states change between the two given state CIDs
// TODO: Should this take tipset keys instead?
- StateChangedActors(context.Context, cid.Cid, cid.Cid) (map[string]types.Actor, error)
- // StateGetReceipt returns the message receipt for the given message
- StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error)
+ StateChangedActors(context.Context, cid.Cid, cid.Cid) (map[string]types.Actor, error) //perm:read
// StateMinerSectorCount returns the number of sectors in a miner's sector set and proving set
- StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (MinerSectors, error)
+ StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (MinerSectors, error) //perm:read
// StateCompute is a flexible command that applies the given messages on the given tipset.
// The messages are run as though the VM were at the provided height.
- StateCompute(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*ComputeStateOutput, error)
+ //
+ // When called, StateCompute will:
+ // - Load the provided tipset, or use the current chain head if not provided
+ // - Compute the tipset state of the provided tipset on top of the parent state
+ // - (note that this step runs before vmheight is applied to the execution)
+ // - Execute state upgrade if any were scheduled at the epoch, or in null
+ // blocks preceding the tipset
+ // - Call the cron actor on null blocks preceding the tipset
+ // - For each block in the tipset
+ // - Apply messages in blocks in the specified
+ // - Award block reward by calling the reward actor
+ // - Call the cron actor for the current epoch
+ // - If the specified vmheight is higher than the current epoch, apply any
+ // needed state upgrades to the state
+ // - Apply the specified messages to the state
+ //
+ // The vmheight parameter sets VM execution epoch, and can be used to simulate
+ // message execution in different network versions. If the specified vmheight
+ // epoch is higher than the epoch of the specified tipset, any state upgrades
+ // until the vmheight will be executed on the state before applying messages
+ // specified by the user.
+ //
+ // Note that the initial tipset state computation is not affected by the
+ // vmheight parameter - only the messages in the `apply` set are
+ //
+ // If the caller wants to simply compute the state, vmheight should be set to
+ // the epoch of the specified tipset.
+ //
+ // Messages in the `apply` parameter must have the correct nonces, and gas
+ // values set.
+ StateCompute(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*ComputeStateOutput, error) //perm:read
// StateVerifierStatus returns the data cap for the given address.
// Returns nil if there is no entry in the data cap table for the
// address.
- StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
+ StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) //perm:read
// StateVerifiedClientStatus returns the data cap for the given address.
// Returns nil if there is no entry in the data cap table for the
// address.
- StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
+ StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) //perm:read
// StateVerifiedClientStatus returns the address of the Verified Registry's root key
- StateVerifiedRegistryRootKey(ctx context.Context, tsk types.TipSetKey) (address.Address, error)
+ StateVerifiedRegistryRootKey(ctx context.Context, tsk types.TipSetKey) (address.Address, error) //perm:read
// StateDealProviderCollateralBounds returns the min and max collateral a storage provider
// can issue. It takes the deal size and verified status as parameters.
- StateDealProviderCollateralBounds(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (DealCollateralBounds, error)
+ StateDealProviderCollateralBounds(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (DealCollateralBounds, error) //perm:read
// StateCirculatingSupply returns the exact circulating supply of Filecoin at the given tipset.
// This is not used anywhere in the protocol itself, and is only for external consumption.
- StateCirculatingSupply(context.Context, types.TipSetKey) (abi.TokenAmount, error)
+ StateCirculatingSupply(context.Context, types.TipSetKey) (abi.TokenAmount, error) //perm:read
// StateVMCirculatingSupplyInternal returns an approximation of the circulating supply of Filecoin at the given tipset.
// This is the value reported by the runtime interface to actors code.
- StateVMCirculatingSupplyInternal(context.Context, types.TipSetKey) (CirculatingSupply, error)
+ StateVMCirculatingSupplyInternal(context.Context, types.TipSetKey) (CirculatingSupply, error) //perm:read
// StateNetworkVersion returns the network version at the given tipset
- StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error)
+ StateNetworkVersion(context.Context, types.TipSetKey) (apitypes.NetworkVersion, error) //perm:read
// MethodGroup: Msig
// The Msig methods are used to interact with multisig wallets on the
// filecoin network
// MsigGetAvailableBalance returns the portion of a multisig's balance that can be withdrawn or spent
- MsigGetAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error)
+ MsigGetAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) //perm:read
// MsigGetVestingSchedule returns the vesting details of a given multisig.
- MsigGetVestingSchedule(context.Context, address.Address, types.TipSetKey) (MsigVesting, error)
+ MsigGetVestingSchedule(context.Context, address.Address, types.TipSetKey) (MsigVesting, error) //perm:read
// MsigGetVested returns the amount of FIL that vested in a multisig in a certain period.
// It takes the following params: , ,
- MsigGetVested(context.Context, address.Address, types.TipSetKey, types.TipSetKey) (types.BigInt, error)
+ MsigGetVested(context.Context, address.Address, types.TipSetKey, types.TipSetKey) (types.BigInt, error) //perm:read
+
+ //MsigGetPending returns pending transactions for the given multisig
+ //wallet. Once pending transactions are fully approved, they will no longer
+ //appear here.
+ MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*MsigTransaction, error) //perm:read
+
// MsigCreate creates a multisig wallet
// It takes the following params: , ,
//, ,
- MsigCreate(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (cid.Cid, error)
+ MsigCreate(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (*MessagePrototype, error) //perm:sign
+
// MsigPropose proposes a multisig message
// It takes the following params: , , ,
// , ,
- MsigPropose(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error)
+ MsigPropose(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign
// MsigApprove approves a previously-proposed multisig message by transaction ID
// It takes the following params: ,
- MsigApprove(context.Context, address.Address, uint64, address.Address) (cid.Cid, error)
+ MsigApprove(context.Context, address.Address, uint64, address.Address) (*MessagePrototype, error) //perm:sign
// MsigApproveTxnHash approves a previously-proposed multisig message, specified
// using both transaction ID and a hash of the parameters used in the
@@ -474,72 +613,91 @@ type FullNode interface {
// exactly the transaction you think you are.
// It takes the following params: , , , , ,
// , ,
- MsigApproveTxnHash(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error)
+ MsigApproveTxnHash(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign
// MsigCancel cancels a previously-proposed multisig message
// It takes the following params: , , , ,
// , ,
- MsigCancel(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error)
+ MsigCancel(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign
+
// MsigAddPropose proposes adding a signer in the multisig
// It takes the following params: , ,
// ,
- MsigAddPropose(context.Context, address.Address, address.Address, address.Address, bool) (cid.Cid, error)
+ MsigAddPropose(context.Context, address.Address, address.Address, address.Address, bool) (*MessagePrototype, error) //perm:sign
+
// MsigAddApprove approves a previously proposed AddSigner message
// It takes the following params: , , ,
// , ,
- MsigAddApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, bool) (cid.Cid, error)
+ MsigAddApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, bool) (*MessagePrototype, error) //perm:sign
+
// MsigAddCancel cancels a previously proposed AddSigner message
// It takes the following params: , , ,
// ,
- MsigAddCancel(context.Context, address.Address, address.Address, uint64, address.Address, bool) (cid.Cid, error)
+ MsigAddCancel(context.Context, address.Address, address.Address, uint64, address.Address, bool) (*MessagePrototype, error) //perm:sign
+
// MsigSwapPropose proposes swapping 2 signers in the multisig
// It takes the following params: , ,
// ,
- MsigSwapPropose(context.Context, address.Address, address.Address, address.Address, address.Address) (cid.Cid, error)
+ MsigSwapPropose(context.Context, address.Address, address.Address, address.Address, address.Address) (*MessagePrototype, error) //perm:sign
+
// MsigSwapApprove approves a previously proposed SwapSigner
// It takes the following params: , , ,
// , ,
- MsigSwapApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (cid.Cid, error)
+ MsigSwapApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (*MessagePrototype, error) //perm:sign
+
// MsigSwapCancel cancels a previously proposed SwapSigner message
// It takes the following params: , , ,
// ,
- MsigSwapCancel(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error)
+ MsigSwapCancel(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (*MessagePrototype, error) //perm:sign
// MsigRemoveSigner proposes the removal of a signer from the multisig.
// It accepts the multisig to make the change on, the proposer address to
// send the message from, the address to be removed, and a boolean
// indicating whether or not the signing threshold should be lowered by one
// along with the address removal.
- MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error)
-
- MarketEnsureAvailable(context.Context, address.Address, address.Address, types.BigInt) (cid.Cid, error)
- // MarketFreeBalance
+ MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (*MessagePrototype, error) //perm:sign
+
+ // MarketAddBalance adds funds to the market actor
+ MarketAddBalance(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign
+ // MarketGetReserved gets the amount of funds that are currently reserved for the address
+ MarketGetReserved(ctx context.Context, addr address.Address) (types.BigInt, error) //perm:sign
+ // MarketReserveFunds reserves funds for a deal
+ MarketReserveFunds(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign
+ // MarketReleaseFunds releases funds reserved by MarketReserveFunds
+ MarketReleaseFunds(ctx context.Context, addr address.Address, amt types.BigInt) error //perm:sign
+ // MarketWithdraw withdraws unlocked funds from the market actor
+ MarketWithdraw(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign
// MethodGroup: Paych
// The Paych methods are for interacting with and managing payment channels
- PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*ChannelInfo, error)
- PaychGetWaitReady(context.Context, cid.Cid) (address.Address, error)
- PaychAvailableFunds(ctx context.Context, ch address.Address) (*ChannelAvailableFunds, error)
- PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*ChannelAvailableFunds, error)
- PaychList(context.Context) ([]address.Address, error)
- PaychStatus(context.Context, address.Address) (*PaychStatus, error)
- PaychSettle(context.Context, address.Address) (cid.Cid, error)
- PaychCollect(context.Context, address.Address) (cid.Cid, error)
- PaychAllocateLane(ctx context.Context, ch address.Address) (uint64, error)
- PaychNewPayment(ctx context.Context, from, to address.Address, vouchers []VoucherSpec) (*PaymentInfo, error)
- PaychVoucherCheckValid(context.Context, address.Address, *paych.SignedVoucher) error
- PaychVoucherCheckSpendable(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (bool, error)
- PaychVoucherCreate(context.Context, address.Address, types.BigInt, uint64) (*VoucherCreateResult, error)
- PaychVoucherAdd(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error)
- PaychVoucherList(context.Context, address.Address) ([]*paych.SignedVoucher, error)
- PaychVoucherSubmit(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error)
+ PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*ChannelInfo, error) //perm:sign
+ PaychGetWaitReady(context.Context, cid.Cid) (address.Address, error) //perm:sign
+ PaychAvailableFunds(ctx context.Context, ch address.Address) (*ChannelAvailableFunds, error) //perm:sign
+ PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*ChannelAvailableFunds, error) //perm:sign
+ PaychList(context.Context) ([]address.Address, error) //perm:read
+ PaychStatus(context.Context, address.Address) (*PaychStatus, error) //perm:read
+ PaychSettle(context.Context, address.Address) (cid.Cid, error) //perm:sign
+ PaychCollect(context.Context, address.Address) (cid.Cid, error) //perm:sign
+ PaychAllocateLane(ctx context.Context, ch address.Address) (uint64, error) //perm:sign
+ PaychNewPayment(ctx context.Context, from, to address.Address, vouchers []VoucherSpec) (*PaymentInfo, error) //perm:sign
+ PaychVoucherCheckValid(context.Context, address.Address, *paych.SignedVoucher) error //perm:read
+ PaychVoucherCheckSpendable(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (bool, error) //perm:read
+ PaychVoucherCreate(context.Context, address.Address, types.BigInt, uint64) (*VoucherCreateResult, error) //perm:sign
+ PaychVoucherAdd(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error) //perm:write
+ PaychVoucherList(context.Context, address.Address) ([]*paych.SignedVoucher, error) //perm:write
+ PaychVoucherSubmit(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error) //perm:sign
+
+ // MethodGroup: Node
+ // These methods are general node management and status commands
+
+ NodeStatus(ctx context.Context, inclChainStatus bool) (NodeStatus, error) //perm:read
// CreateBackup creates node backup onder the specified file name. The
// method requires that the lotus daemon is running with the
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
// the path specified when calling CreateBackup is within the base path
- CreateBackup(ctx context.Context, fpath string) error
+ CreateBackup(ctx context.Context, fpath string) error //perm:admin
}
type FileRef struct {
@@ -574,6 +732,7 @@ type DealInfo struct {
ProposalCid cid.Cid
State storagemarket.StorageDealStatus
Message string // more information about deal state, particularly errors
+ DealStages *storagemarket.DealStages
Provider address.Address
DataRef *storagemarket.DataRef
@@ -587,6 +746,9 @@ type DealInfo struct {
CreationTime time.Time
Verified bool
+
+ TransferChannelID *datatransfer.ChannelID
+ DataTransfer *DataTransferChannel
}
type MsgLookup struct {
@@ -624,6 +786,7 @@ type Message struct {
type ActorState struct {
Balance types.BigInt
+ Code cid.Cid
State interface{}
}
@@ -725,7 +888,7 @@ func (o *QueryOffer) Order(client address.Address) RetrievalOrder {
Client: client,
Miner: o.Miner,
- MinerPeer: o.MinerPeer,
+ MinerPeer: &o.MinerPeer,
}
}
@@ -744,6 +907,8 @@ type RetrievalOrder struct {
Root cid.Cid
Piece *cid.Cid
Size uint64
+
+ LocalStore *multistore.StoreID // if specified, get data from local store
// TODO: support offset
Total types.BigInt
UnsealPrice types.BigInt
@@ -751,7 +916,7 @@ type RetrievalOrder struct {
PaymentIntervalIncrease uint64
Client address.Address
Miner address.Address
- MinerPeer retrievalmarket.RetrievalPeer
+ MinerPeer *retrievalmarket.RetrievalPeer
}
type InvocResult struct {
@@ -781,14 +946,31 @@ type StartDealParams struct {
VerifiedDeal bool
}
+func (s *StartDealParams) UnmarshalJSON(raw []byte) (err error) {
+ type sdpAlias StartDealParams
+
+ sdp := sdpAlias{
+ FastRetrieval: true,
+ }
+
+ if err := json.Unmarshal(raw, &sdp); err != nil {
+ return err
+ }
+
+ *s = StartDealParams(sdp)
+
+ return nil
+}
+
type IpldObject struct {
Cid cid.Cid
Obj interface{}
}
type ActiveSync struct {
- Base *types.TipSet
- Target *types.TipSet
+ WorkerID uint64
+ Base *types.TipSet
+ Target *types.TipSet
Stage SyncStateStage
Height abi.ChainEpoch
@@ -818,6 +1000,8 @@ const (
func (v SyncStateStage) String() string {
switch v {
+ case StageIdle:
+ return "idle"
case StageHeaders:
return "header sync"
case StagePersistHeaders:
@@ -858,11 +1042,12 @@ type DealCollateralBounds struct {
}
type CirculatingSupply struct {
- FilVested abi.TokenAmount
- FilMined abi.TokenAmount
- FilBurnt abi.TokenAmount
- FilLocked abi.TokenAmount
- FilCirculating abi.TokenAmount
+ FilVested abi.TokenAmount
+ FilMined abi.TokenAmount
+ FilBurnt abi.TokenAmount
+ FilLocked abi.TokenAmount
+ FilCirculating abi.TokenAmount
+ FilReserveDisbursed abi.TokenAmount
}
type MiningBaseInfo struct {
@@ -916,7 +1101,8 @@ const (
)
type Deadline struct {
- PostSubmissions bitfield.BitField
+ PostSubmissions bitfield.BitField
+ DisputableProofCount uint64
}
type Partition struct {
@@ -948,3 +1134,13 @@ type MessageMatch struct {
To address.Address
From address.Address
}
+
+type MsigTransaction struct {
+ ID int64
+ To address.Address
+ Value abi.TokenAmount
+ Method abi.MethodNum
+ Params []byte
+
+ Approved []address.Address
+}
diff --git a/api/api_gateway.go b/api/api_gateway.go
index 07fb5deb34e..6db1c8e45a1 100644
--- a/api/api_gateway.go
+++ b/api/api_gateway.go
@@ -8,13 +8,27 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/dline"
- "github.com/filecoin-project/go-state-types/network"
+ apitypes "github.com/filecoin-project/lotus/api/types"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
)
-type GatewayAPI interface {
+// MODIFYING THE API INTERFACE
+//
+// NOTE: This is the V1 (Unstable) API - to add methods to the V0 (Stable) API
+// you'll have to add those methods to interfaces in `api/v0api`
+//
+// When adding / changing methods in this file:
+// * Do the change here
+// * Adjust implementation in `node/impl/`
+// * Run `make gen` - this will:
+// * Generate proxy structs
+// * Generate mocks
+// * Generate markdown docs
+// * Generate openrpc blobs
+
+type Gateway interface {
ChainHasObj(context.Context, cid.Cid) (bool, error)
ChainHead(ctx context.Context) (*types.TipSet, error)
ChainGetBlockMessages(context.Context, cid.Cid) (*BlockMessages, error)
@@ -27,10 +41,11 @@ type GatewayAPI interface {
MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error)
MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error)
MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error)
+ MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*MsigTransaction, error)
StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (DealCollateralBounds, error)
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
- StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error)
+ StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error) //perm:read
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (MarketBalance, error)
@@ -38,7 +53,11 @@ type GatewayAPI interface {
StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error)
StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error)
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error)
- StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error)
+ StateNetworkVersion(context.Context, types.TipSetKey) (apitypes.NetworkVersion, error)
+ StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error)
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
- StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*MsgLookup, error)
+ StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error)
+ StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error)
+ WalletBalance(context.Context, address.Address) (types.BigInt, error)
+ Version(context.Context) (APIVersion, error)
}
diff --git a/api/api_net.go b/api/api_net.go
new file mode 100644
index 00000000000..4cf9ca336a3
--- /dev/null
+++ b/api/api_net.go
@@ -0,0 +1,66 @@
+package api
+
+import (
+ "context"
+
+ metrics "github.com/libp2p/go-libp2p-core/metrics"
+ "github.com/libp2p/go-libp2p-core/network"
+ "github.com/libp2p/go-libp2p-core/peer"
+ "github.com/libp2p/go-libp2p-core/protocol"
+)
+
+// MODIFYING THE API INTERFACE
+//
+// When adding / changing methods in this file:
+// * Do the change here
+// * Adjust implementation in `node/impl/`
+// * Run `make gen` - this will:
+// * Generate proxy structs
+// * Generate mocks
+// * Generate markdown docs
+// * Generate openrpc blobs
+
+type Net interface {
+ // MethodGroup: Net
+
+ NetConnectedness(context.Context, peer.ID) (network.Connectedness, error) //perm:read
+ NetPeers(context.Context) ([]peer.AddrInfo, error) //perm:read
+ NetConnect(context.Context, peer.AddrInfo) error //perm:write
+ NetAddrsListen(context.Context) (peer.AddrInfo, error) //perm:read
+ NetDisconnect(context.Context, peer.ID) error //perm:write
+ NetFindPeer(context.Context, peer.ID) (peer.AddrInfo, error) //perm:read
+ NetPubsubScores(context.Context) ([]PubsubScore, error) //perm:read
+ NetAutoNatStatus(context.Context) (NatInfo, error) //perm:read
+ NetAgentVersion(ctx context.Context, p peer.ID) (string, error) //perm:read
+ NetPeerInfo(context.Context, peer.ID) (*ExtendedPeerInfo, error) //perm:read
+
+ // NetBandwidthStats returns statistics about the nodes total bandwidth
+ // usage and current rate across all peers and protocols.
+ NetBandwidthStats(ctx context.Context) (metrics.Stats, error) //perm:read
+
+ // NetBandwidthStatsByPeer returns statistics about the nodes bandwidth
+ // usage and current rate per peer
+ NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) //perm:read
+
+ // NetBandwidthStatsByProtocol returns statistics about the nodes bandwidth
+ // usage and current rate per protocol
+ NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) //perm:read
+
+ // ConnectionGater API
+ NetBlockAdd(ctx context.Context, acl NetBlockList) error //perm:admin
+ NetBlockRemove(ctx context.Context, acl NetBlockList) error //perm:admin
+ NetBlockList(ctx context.Context) (NetBlockList, error) //perm:read
+
+ // ID returns peerID of libp2p node backing this API
+ ID(context.Context) (peer.ID, error) //perm:read
+}
+
+type CommonNet interface {
+ Common
+ Net
+}
+
+type NatInfo struct {
+ Reachability network.Reachability
+ PublicAddr string
+}
diff --git a/api/api_storage.go b/api/api_storage.go
index 2176456b7df..154abcea713 100644
--- a/api/api_storage.go
+++ b/api/api_storage.go
@@ -5,118 +5,205 @@ import (
"context"
"time"
- datatransfer "github.com/filecoin-project/go-data-transfer"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+
"github.com/google/uuid"
"github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/filecoin-project/go-address"
+ datatransfer "github.com/filecoin-project/go-data-transfer"
"github.com/filecoin-project/go-fil-markets/piecestore"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
+ "github.com/filecoin-project/specs-storage/storage"
+
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
+ "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
)
+// MODIFYING THE API INTERFACE
+//
+// When adding / changing methods in this file:
+// * Do the change here
+// * Adjust implementation in `node/impl/`
+// * Run `make gen` - this will:
+// * Generate proxy structs
+// * Generate mocks
+// * Generate markdown docs
+// * Generate openrpc blobs
+
// StorageMiner is a low-level interface to the Filecoin network storage miner node
type StorageMiner interface {
Common
+ Net
- ActorAddress(context.Context) (address.Address, error)
+ ActorAddress(context.Context) (address.Address, error) //perm:read
- ActorSectorSize(context.Context, address.Address) (abi.SectorSize, error)
+ ActorSectorSize(context.Context, address.Address) (abi.SectorSize, error) //perm:read
+ ActorAddressConfig(ctx context.Context) (AddressConfig, error) //perm:read
- MiningBase(context.Context) (*types.TipSet, error)
+ MiningBase(context.Context) (*types.TipSet, error) //perm:read
// Temp api for testing
- PledgeSector(context.Context) error
+ PledgeSector(context.Context) (abi.SectorID, error) //perm:write
// Get the status of a given sector by ID
- SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (SectorInfo, error)
+ SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (SectorInfo, error) //perm:read
+
+ // Add piece to an open sector. If no sectors with enough space are open,
+ // either a new sector will be created, or this call will block until more
+ // sectors can be created.
+ SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storage.Data, d PieceDealInfo) (SectorOffset, error) //perm:admin
+
+ SectorsUnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd *cid.Cid) error //perm:admin
// List all staged sectors
- SectorsList(context.Context) ([]abi.SectorNumber, error)
+ SectorsList(context.Context) ([]abi.SectorNumber, error) //perm:read
- SectorsRefs(context.Context) (map[string][]SealedRef, error)
+ // Get summary info of sectors
+ SectorsSummary(ctx context.Context) (map[SectorState]int, error) //perm:read
+
+ // List sectors in particular states
+ SectorsListInStates(context.Context, []SectorState) ([]abi.SectorNumber, error) //perm:read
+
+ SectorsRefs(context.Context) (map[string][]SealedRef, error) //perm:read
// SectorStartSealing can be called on sectors in Empty or WaitDeals states
// to trigger sealing early
- SectorStartSealing(context.Context, abi.SectorNumber) error
+ SectorStartSealing(context.Context, abi.SectorNumber) error //perm:write
// SectorSetSealDelay sets the time that a newly-created sector
// waits for more deals before it starts sealing
- SectorSetSealDelay(context.Context, time.Duration) error
+ SectorSetSealDelay(context.Context, time.Duration) error //perm:write
// SectorGetSealDelay gets the time that a newly-created sector
// waits for more deals before it starts sealing
- SectorGetSealDelay(context.Context) (time.Duration, error)
+ SectorGetSealDelay(context.Context) (time.Duration, error) //perm:read
// SectorSetExpectedSealDuration sets the expected time for a sector to seal
- SectorSetExpectedSealDuration(context.Context, time.Duration) error
+ SectorSetExpectedSealDuration(context.Context, time.Duration) error //perm:write
// SectorGetExpectedSealDuration gets the expected time for a sector to seal
- SectorGetExpectedSealDuration(context.Context) (time.Duration, error)
- SectorsUpdate(context.Context, abi.SectorNumber, SectorState) error
- SectorRemove(context.Context, abi.SectorNumber) error
- SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error
-
- StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error)
- StorageLocal(ctx context.Context) (map[stores.ID]string, error)
- StorageStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error)
+ SectorGetExpectedSealDuration(context.Context) (time.Duration, error) //perm:read
+ SectorsUpdate(context.Context, abi.SectorNumber, SectorState) error //perm:admin
+ // SectorRemove removes the sector from storage. It doesn't terminate it on-chain, which can
+ // be done with SectorTerminate. Removing and not terminating live sectors will cause additional penalties.
+ SectorRemove(context.Context, abi.SectorNumber) error //perm:admin
+ // SectorTerminate terminates the sector on-chain (adding it to a termination batch first), then
+ // automatically removes it from storage
+ SectorTerminate(context.Context, abi.SectorNumber) error //perm:admin
+ // SectorTerminateFlush immediately sends a terminate message with sectors batched for termination.
+ // Returns null if message wasn't sent
+ SectorTerminateFlush(ctx context.Context) (*cid.Cid, error) //perm:admin
+ // SectorTerminatePending returns a list of pending sector terminations to be sent in the next batch message
+ SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
+ SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error //perm:admin
+ // SectorPreCommitFlush immediately sends a PreCommit message with sectors batched for PreCommit.
+ // Returns null if message wasn't sent
+ SectorPreCommitFlush(ctx context.Context) ([]sealiface.PreCommitBatchRes, error) //perm:admin
+ // SectorPreCommitPending returns a list of pending PreCommit sectors to be sent in the next batch message
+ SectorPreCommitPending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
+ // SectorCommitFlush immediately sends a Commit message with sectors aggregated for Commit.
+ // Returns null if message wasn't sent
+ SectorCommitFlush(ctx context.Context) ([]sealiface.CommitBatchRes, error) //perm:admin
+ // SectorCommitPending returns a list of pending Commit sectors to be sent in the next aggregate message
+ SectorCommitPending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
// WorkerConnect tells the node to connect to workers RPC
- WorkerConnect(context.Context, string) error
- WorkerStats(context.Context) (map[uuid.UUID]storiface.WorkerStats, error)
- WorkerJobs(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error)
- storiface.WorkerReturn
+ WorkerConnect(context.Context, string) error //perm:admin retry:true
+ WorkerStats(context.Context) (map[uuid.UUID]storiface.WorkerStats, error) //perm:admin
+ WorkerJobs(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) //perm:admin
+
+ //storiface.WorkerReturn
+ ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error //perm:admin retry:true
+ ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err *storiface.CallError) error //perm:admin retry:true
+ ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err *storiface.CallError) error //perm:admin retry:true
+ ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err *storiface.CallError) error //perm:admin retry:true
+ ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err *storiface.CallError) error //perm:admin retry:true
+ ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
+ ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
+ ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
+ ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
+ ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err *storiface.CallError) error //perm:admin retry:true
+ ReturnFetch(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
// SealingSchedDiag dumps internal sealing scheduler state
- SealingSchedDiag(context.Context) (interface{}, error)
-
- stores.SectorIndex
-
- MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error
- MarketListDeals(ctx context.Context) ([]MarketDeal, error)
- MarketListRetrievalDeals(ctx context.Context) ([]retrievalmarket.ProviderDealState, error)
- MarketGetDealUpdates(ctx context.Context) (<-chan storagemarket.MinerDeal, error)
- MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error)
- MarketSetAsk(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error
- MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error)
- MarketSetRetrievalAsk(ctx context.Context, rask *retrievalmarket.Ask) error
- MarketGetRetrievalAsk(ctx context.Context) (*retrievalmarket.Ask, error)
- MarketListDataTransfers(ctx context.Context) ([]DataTransferChannel, error)
- MarketDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error)
- // MinerRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer
- MarketRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error
- // ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
- MarketCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error
-
- DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error
- DealsList(ctx context.Context) ([]MarketDeal, error)
- DealsConsiderOnlineStorageDeals(context.Context) (bool, error)
- DealsSetConsiderOnlineStorageDeals(context.Context, bool) error
- DealsConsiderOnlineRetrievalDeals(context.Context) (bool, error)
- DealsSetConsiderOnlineRetrievalDeals(context.Context, bool) error
- DealsPieceCidBlocklist(context.Context) ([]cid.Cid, error)
- DealsSetPieceCidBlocklist(context.Context, []cid.Cid) error
- DealsConsiderOfflineStorageDeals(context.Context) (bool, error)
- DealsSetConsiderOfflineStorageDeals(context.Context, bool) error
- DealsConsiderOfflineRetrievalDeals(context.Context) (bool, error)
- DealsSetConsiderOfflineRetrievalDeals(context.Context, bool) error
-
- StorageAddLocal(ctx context.Context, path string) error
-
- PiecesListPieces(ctx context.Context) ([]cid.Cid, error)
- PiecesListCidInfos(ctx context.Context) ([]cid.Cid, error)
- PiecesGetPieceInfo(ctx context.Context, pieceCid cid.Cid) (*piecestore.PieceInfo, error)
- PiecesGetCIDInfo(ctx context.Context, payloadCid cid.Cid) (*piecestore.CIDInfo, error)
+ SealingSchedDiag(ctx context.Context, doSched bool) (interface{}, error) //perm:admin
+ SealingAbort(ctx context.Context, call storiface.CallID) error //perm:admin
+
+ //stores.SectorIndex
+ StorageAttach(context.Context, stores.StorageInfo, fsutil.FsStat) error //perm:admin
+ StorageInfo(context.Context, stores.ID) (stores.StorageInfo, error) //perm:admin
+ StorageReportHealth(context.Context, stores.ID, stores.HealthReport) error //perm:admin
+ StorageDeclareSector(ctx context.Context, storageID stores.ID, s abi.SectorID, ft storiface.SectorFileType, primary bool) error //perm:admin
+ StorageDropSector(ctx context.Context, storageID stores.ID, s abi.SectorID, ft storiface.SectorFileType) error //perm:admin
+ StorageFindSector(ctx context.Context, sector abi.SectorID, ft storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]stores.SectorStorageInfo, error) //perm:admin
+ StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]stores.StorageInfo, error) //perm:admin
+ StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error //perm:admin
+ StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) //perm:admin
+ StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) //perm:admin
+
+ StorageLocal(ctx context.Context) (map[stores.ID]string, error) //perm:admin
+ StorageStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) //perm:admin
+
+ MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error //perm:write
+ MarketListDeals(ctx context.Context) ([]MarketDeal, error) //perm:read
+ MarketListRetrievalDeals(ctx context.Context) ([]retrievalmarket.ProviderDealState, error) //perm:read
+ MarketGetDealUpdates(ctx context.Context) (<-chan storagemarket.MinerDeal, error) //perm:read
+ MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error) //perm:read
+ MarketSetAsk(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error //perm:admin
+ MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error) //perm:read
+ MarketSetRetrievalAsk(ctx context.Context, rask *retrievalmarket.Ask) error //perm:admin
+ MarketGetRetrievalAsk(ctx context.Context) (*retrievalmarket.Ask, error) //perm:read
+ MarketListDataTransfers(ctx context.Context) ([]DataTransferChannel, error) //perm:write
+ MarketDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error) //perm:write
+ // MarketRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer
+ MarketRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
+ // MarketCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
+ MarketCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
+ MarketPendingDeals(ctx context.Context) (PendingDealInfo, error) //perm:write
+ MarketPublishPendingDeals(ctx context.Context) error //perm:admin
+
+ DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error //perm:admin
+ DealsList(ctx context.Context) ([]MarketDeal, error) //perm:admin
+ DealsConsiderOnlineStorageDeals(context.Context) (bool, error) //perm:admin
+ DealsSetConsiderOnlineStorageDeals(context.Context, bool) error //perm:admin
+ DealsConsiderOnlineRetrievalDeals(context.Context) (bool, error) //perm:admin
+ DealsSetConsiderOnlineRetrievalDeals(context.Context, bool) error //perm:admin
+ DealsPieceCidBlocklist(context.Context) ([]cid.Cid, error) //perm:admin
+ DealsSetPieceCidBlocklist(context.Context, []cid.Cid) error //perm:admin
+ DealsConsiderOfflineStorageDeals(context.Context) (bool, error) //perm:admin
+ DealsSetConsiderOfflineStorageDeals(context.Context, bool) error //perm:admin
+ DealsConsiderOfflineRetrievalDeals(context.Context) (bool, error) //perm:admin
+ DealsSetConsiderOfflineRetrievalDeals(context.Context, bool) error //perm:admin
+ DealsConsiderVerifiedStorageDeals(context.Context) (bool, error) //perm:admin
+ DealsSetConsiderVerifiedStorageDeals(context.Context, bool) error //perm:admin
+ DealsConsiderUnverifiedStorageDeals(context.Context) (bool, error) //perm:admin
+ DealsSetConsiderUnverifiedStorageDeals(context.Context, bool) error //perm:admin
+
+ StorageAddLocal(ctx context.Context, path string) error //perm:admin
+
+ PiecesListPieces(ctx context.Context) ([]cid.Cid, error) //perm:read
+ PiecesListCidInfos(ctx context.Context) ([]cid.Cid, error) //perm:read
+ PiecesGetPieceInfo(ctx context.Context, pieceCid cid.Cid) (*piecestore.PieceInfo, error) //perm:read
+ PiecesGetCIDInfo(ctx context.Context, payloadCid cid.Cid) (*piecestore.CIDInfo, error) //perm:read
// CreateBackup creates node backup onder the specified file name. The
// method requires that the lotus-miner is running with the
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
// the path specified when calling CreateBackup is within the base path
- CreateBackup(ctx context.Context, fpath string) error
+ CreateBackup(ctx context.Context, fpath string) error //perm:admin
+
+ CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin
+
+ ComputeProof(ctx context.Context, ssi []builtin.SectorInfo, rand abi.PoStRandomness) ([]builtin.PoStProof, error) //perm:read
}
+var _ storiface.WorkerReturn = *new(StorageMiner)
+var _ stores.SectorIndex = *new(StorageMiner)
+
type SealRes struct {
Err string
GoErr error `json:"-"`
@@ -194,3 +281,54 @@ func (st *SealSeed) Equals(ost *SealSeed) bool {
}
type SectorState string
+
+type AddrUse int
+
+const (
+ PreCommitAddr AddrUse = iota
+ CommitAddr
+ DealPublishAddr
+ PoStAddr
+
+ TerminateSectorsAddr
+)
+
+type AddressConfig struct {
+ PreCommitControl []address.Address
+ CommitControl []address.Address
+ TerminateControl []address.Address
+ DealPublishControl []address.Address
+
+ DisableOwnerFallback bool
+ DisableWorkerFallback bool
+}
+
+// PendingDealInfo has info about pending deals and when they are due to be
+// published
+type PendingDealInfo struct {
+ Deals []market.ClientDealProposal
+ PublishPeriodStart time.Time
+ PublishPeriod time.Duration
+}
+
+type SectorOffset struct {
+ Sector abi.SectorNumber
+ Offset abi.PaddedPieceSize
+}
+
+// DealInfo is a tuple of deal identity and its schedule
+type PieceDealInfo struct {
+ PublishCid *cid.Cid
+ DealID abi.DealID
+ DealProposal *market.DealProposal
+ DealSchedule DealSchedule
+ KeepUnsealed bool
+}
+
+// DealSchedule communicates the time interval of a storage deal. The deal must
+// appear in a sealed (proven) sector no later than StartEpoch, otherwise it
+// is invalid.
+type DealSchedule struct {
+ StartEpoch abi.ChainEpoch
+ EndEpoch abi.ChainEpoch
+}
diff --git a/api/api_test.go b/api/api_test.go
index 34c47f432c5..738e1b06799 100644
--- a/api/api_test.go
+++ b/api/api_test.go
@@ -37,6 +37,18 @@ func TestDoesntDependOnFFI(t *testing.T) {
}
}
+func TestDoesntDependOnBuild(t *testing.T) {
+ deps, err := exec.Command(goCmd(), "list", "-deps", "github.com/filecoin-project/lotus/api").Output()
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, pkg := range strings.Fields(string(deps)) {
+ if pkg == "github.com/filecoin-project/build" {
+ t.Fatal("api depends on filecoin-ffi")
+ }
+ }
+}
+
func TestReturnTypes(t *testing.T) {
errType := reflect.TypeOf(new(error)).Elem()
bareIface := reflect.TypeOf(new(interface{})).Elem()
@@ -99,5 +111,11 @@ func TestReturnTypes(t *testing.T) {
t.Run("common", tst(new(Common)))
t.Run("full", tst(new(FullNode)))
t.Run("miner", tst(new(StorageMiner)))
- t.Run("worker", tst(new(WorkerAPI)))
+ t.Run("worker", tst(new(Worker)))
+}
+
+func TestPermTags(t *testing.T) {
+ _ = PermissionedFullAPI(&FullNodeStruct{})
+ _ = PermissionedStorMinerAPI(&StorageMinerStruct{})
+ _ = PermissionedWorkerAPI(&WorkerStruct{})
}
diff --git a/api/api_wallet.go b/api/api_wallet.go
index 88ad8f43a7a..973aaaf6d85 100644
--- a/api/api_wallet.go
+++ b/api/api_wallet.go
@@ -34,14 +34,14 @@ type MsgMeta struct {
Extra []byte
}
-type WalletAPI interface {
- WalletNew(context.Context, types.KeyType) (address.Address, error)
- WalletHas(context.Context, address.Address) (bool, error)
- WalletList(context.Context) ([]address.Address, error)
+type Wallet interface {
+ WalletNew(context.Context, types.KeyType) (address.Address, error) //perm:admin
+ WalletHas(context.Context, address.Address) (bool, error) //perm:admin
+ WalletList(context.Context) ([]address.Address, error) //perm:admin
- WalletSign(ctx context.Context, signer address.Address, toSign []byte, meta MsgMeta) (*crypto.Signature, error)
+ WalletSign(ctx context.Context, signer address.Address, toSign []byte, meta MsgMeta) (*crypto.Signature, error) //perm:admin
- WalletExport(context.Context, address.Address) (*types.KeyInfo, error)
- WalletImport(context.Context, *types.KeyInfo) (address.Address, error)
- WalletDelete(context.Context, address.Address) error
+ WalletExport(context.Context, address.Address) (*types.KeyInfo, error) //perm:admin
+ WalletImport(context.Context, *types.KeyInfo) (address.Address, error) //perm:admin
+ WalletDelete(context.Context, address.Address) error //perm:admin
}
diff --git a/api/api_worker.go b/api/api_worker.go
index 036748ec6f9..4553c30e095 100644
--- a/api/api_worker.go
+++ b/api/api_worker.go
@@ -4,29 +4,69 @@ import (
"context"
"github.com/google/uuid"
+ "github.com/ipfs/go-cid"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
-
- "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/specs-storage/storage"
)
-type WorkerAPI interface {
- Version(context.Context) (build.Version, error)
- // TODO: Info() (name, ...) ?
+// MODIFYING THE API INTERFACE
+//
+// When adding / changing methods in this file:
+// * Do the change here
+// * Adjust implementation in `node/impl/`
+// * Run `make gen` - this will:
+// * Generate proxy structs
+// * Generate mocks
+// * Generate markdown docs
+// * Generate openrpc blobs
+
+type Worker interface {
+ Version(context.Context) (Version, error) //perm:admin
- TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) // TaskType -> Weight
- Paths(context.Context) ([]stores.StoragePath, error)
- Info(context.Context) (storiface.WorkerInfo, error)
+ // TaskType -> Weight
+ TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) //perm:admin
+ Paths(context.Context) ([]stores.StoragePath, error) //perm:admin
+ Info(context.Context) (storiface.WorkerInfo, error) //perm:admin
- storiface.WorkerCalls
+ // storiface.WorkerCalls
+ AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) //perm:admin
+ SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) //perm:admin
+ SealPreCommit2(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (storiface.CallID, error) //perm:admin
+ SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) //perm:admin
+ SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) //perm:admin
+ FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) //perm:admin
+ ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) //perm:admin
+ MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) //perm:admin
+ UnsealPiece(context.Context, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (storiface.CallID, error) //perm:admin
+ Fetch(context.Context, storage.SectorRef, storiface.SectorFileType, storiface.PathType, storiface.AcquireMode) (storiface.CallID, error) //perm:admin
+
+ TaskDisable(ctx context.Context, tt sealtasks.TaskType) error //perm:admin
+ TaskEnable(ctx context.Context, tt sealtasks.TaskType) error //perm:admin
// Storage / Other
- Remove(ctx context.Context, sector abi.SectorID) error
+ Remove(ctx context.Context, sector abi.SectorID) error //perm:admin
+
+ StorageAddLocal(ctx context.Context, path string) error //perm:admin
- StorageAddLocal(ctx context.Context, path string) error
+ // SetEnabled marks the worker as enabled/disabled. Not that this setting
+ // may take a few seconds to propagate to task scheduler
+ SetEnabled(ctx context.Context, enabled bool) error //perm:admin
- Session(context.Context) (uuid.UUID, error)
+ Enabled(ctx context.Context) (bool, error) //perm:admin
+
+ // WaitQuiet blocks until there are no tasks running
+ WaitQuiet(ctx context.Context) error //perm:admin
+
+ // returns a random UUID of worker session, generated randomly when worker
+ // process starts
+ ProcessSession(context.Context) (uuid.UUID, error) //perm:admin
+
+ // Like ProcessSession, but returns an error when worker is disabled
+ Session(context.Context) (uuid.UUID, error) //perm:admin
}
+
+var _ storiface.WorkerCalls = *new(Worker)
diff --git a/api/apibstore/apibstore.go b/api/apibstore/apibstore.go
deleted file mode 100644
index cf9f4f24c66..00000000000
--- a/api/apibstore/apibstore.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package apibstore
-
-import (
- "context"
-
- blocks "github.com/ipfs/go-block-format"
- "github.com/ipfs/go-cid"
- "golang.org/x/xerrors"
-
- "github.com/filecoin-project/lotus/lib/blockstore"
-)
-
-type ChainIO interface {
- ChainReadObj(context.Context, cid.Cid) ([]byte, error)
- ChainHasObj(context.Context, cid.Cid) (bool, error)
-}
-
-type apiBStore struct {
- api ChainIO
-}
-
-func NewAPIBlockstore(cio ChainIO) blockstore.Blockstore {
- return &apiBStore{
- api: cio,
- }
-}
-
-func (a *apiBStore) DeleteBlock(cid.Cid) error {
- return xerrors.New("not supported")
-}
-
-func (a *apiBStore) Has(c cid.Cid) (bool, error) {
- return a.api.ChainHasObj(context.TODO(), c)
-}
-
-func (a *apiBStore) Get(c cid.Cid) (blocks.Block, error) {
- bb, err := a.api.ChainReadObj(context.TODO(), c)
- if err != nil {
- return nil, err
- }
- return blocks.NewBlockWithCid(bb, c)
-}
-
-func (a *apiBStore) GetSize(c cid.Cid) (int, error) {
- bb, err := a.api.ChainReadObj(context.TODO(), c)
- if err != nil {
- return 0, err
- }
- return len(bb), nil
-}
-
-func (a *apiBStore) Put(blocks.Block) error {
- return xerrors.New("not supported")
-}
-
-func (a *apiBStore) PutMany([]blocks.Block) error {
- return xerrors.New("not supported")
-}
-
-func (a *apiBStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
- return nil, xerrors.New("not supported")
-}
-
-func (a *apiBStore) HashOnRead(enabled bool) {
- return
-}
-
-var _ blockstore.Blockstore = &apiBStore{}
diff --git a/api/apistruct/permissioned.go b/api/apistruct/permissioned.go
deleted file mode 100644
index 86902d31b29..00000000000
--- a/api/apistruct/permissioned.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package apistruct
-
-import (
- "github.com/filecoin-project/go-jsonrpc/auth"
- "github.com/filecoin-project/lotus/api"
-)
-
-const (
- // When changing these, update docs/API.md too
-
- PermRead auth.Permission = "read" // default
- PermWrite auth.Permission = "write"
- PermSign auth.Permission = "sign" // Use wallet keys for signing
- PermAdmin auth.Permission = "admin" // Manage permissions
-)
-
-var AllPermissions = []auth.Permission{PermRead, PermWrite, PermSign, PermAdmin}
-var DefaultPerms = []auth.Permission{PermRead}
-
-func PermissionedStorMinerAPI(a api.StorageMiner) api.StorageMiner {
- var out StorageMinerStruct
- auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal)
- auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.CommonStruct.Internal)
- return &out
-}
-
-func PermissionedFullAPI(a api.FullNode) api.FullNode {
- var out FullNodeStruct
- auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal)
- auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.CommonStruct.Internal)
- return &out
-}
-
-func PermissionedWorkerAPI(a api.WorkerAPI) api.WorkerAPI {
- var out WorkerStruct
- auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal)
- return &out
-}
-
-func PermissionedWalletAPI(a api.WalletAPI) api.WalletAPI {
- var out WalletStruct
- auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal)
- return &out
-}
diff --git a/api/apistruct/struct.go b/api/apistruct/struct.go
deleted file mode 100644
index 3a4ae75a8f3..00000000000
--- a/api/apistruct/struct.go
+++ /dev/null
@@ -1,1688 +0,0 @@
-package apistruct
-
-import (
- "context"
- "io"
- "time"
-
- "github.com/google/uuid"
- "github.com/ipfs/go-cid"
- metrics "github.com/libp2p/go-libp2p-core/metrics"
- "github.com/libp2p/go-libp2p-core/network"
- "github.com/libp2p/go-libp2p-core/peer"
- protocol "github.com/libp2p/go-libp2p-core/protocol"
-
- "github.com/filecoin-project/go-address"
- "github.com/filecoin-project/go-bitfield"
- datatransfer "github.com/filecoin-project/go-data-transfer"
- "github.com/filecoin-project/go-fil-markets/piecestore"
- "github.com/filecoin-project/go-fil-markets/retrievalmarket"
- "github.com/filecoin-project/go-fil-markets/storagemarket"
- "github.com/filecoin-project/go-jsonrpc/auth"
- "github.com/filecoin-project/go-multistore"
- "github.com/filecoin-project/go-state-types/abi"
- "github.com/filecoin-project/go-state-types/big"
- "github.com/filecoin-project/go-state-types/crypto"
- "github.com/filecoin-project/go-state-types/dline"
- stnetwork "github.com/filecoin-project/go-state-types/network"
- "github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
- "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
- "github.com/filecoin-project/lotus/extern/sector-storage/stores"
- "github.com/filecoin-project/lotus/extern/sector-storage/storiface"
- marketevents "github.com/filecoin-project/lotus/markets/loggers"
- "github.com/filecoin-project/specs-storage/storage"
-
- "github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/build"
- "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
- "github.com/filecoin-project/lotus/chain/actors/builtin/paych"
- "github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/lotus/node/modules/dtypes"
-)
-
-// All permissions are listed in permissioned.go
-var _ = AllPermissions
-
-type CommonStruct struct {
- Internal struct {
- AuthVerify func(ctx context.Context, token string) ([]auth.Permission, error) `perm:"read"`
- AuthNew func(ctx context.Context, perms []auth.Permission) ([]byte, error) `perm:"admin"`
-
- NetConnectedness func(context.Context, peer.ID) (network.Connectedness, error) `perm:"read"`
- NetPeers func(context.Context) ([]peer.AddrInfo, error) `perm:"read"`
- NetConnect func(context.Context, peer.AddrInfo) error `perm:"write"`
- NetAddrsListen func(context.Context) (peer.AddrInfo, error) `perm:"read"`
- NetDisconnect func(context.Context, peer.ID) error `perm:"write"`
- NetFindPeer func(context.Context, peer.ID) (peer.AddrInfo, error) `perm:"read"`
- NetPubsubScores func(context.Context) ([]api.PubsubScore, error) `perm:"read"`
- NetAutoNatStatus func(context.Context) (api.NatInfo, error) `perm:"read"`
- NetBandwidthStats func(ctx context.Context) (metrics.Stats, error) `perm:"read"`
- NetBandwidthStatsByPeer func(ctx context.Context) (map[string]metrics.Stats, error) `perm:"read"`
- NetBandwidthStatsByProtocol func(ctx context.Context) (map[protocol.ID]metrics.Stats, error) `perm:"read"`
- NetAgentVersion func(ctx context.Context, p peer.ID) (string, error) `perm:"read"`
-
- ID func(context.Context) (peer.ID, error) `perm:"read"`
- Version func(context.Context) (api.Version, error) `perm:"read"`
-
- LogList func(context.Context) ([]string, error) `perm:"write"`
- LogSetLevel func(context.Context, string, string) error `perm:"write"`
-
- Shutdown func(context.Context) error `perm:"admin"`
- Session func(context.Context) (uuid.UUID, error) `perm:"read"`
- Closing func(context.Context) (<-chan struct{}, error) `perm:"read"`
- }
-}
-
-// FullNodeStruct implements API passing calls to user-provided function values.
-type FullNodeStruct struct {
- CommonStruct
-
- Internal struct {
- ChainNotify func(context.Context) (<-chan []*api.HeadChange, error) `perm:"read"`
- ChainHead func(context.Context) (*types.TipSet, error) `perm:"read"`
- ChainGetRandomnessFromTickets func(context.Context, types.TipSetKey, crypto.DomainSeparationTag, abi.ChainEpoch, []byte) (abi.Randomness, error) `perm:"read"`
- ChainGetRandomnessFromBeacon func(context.Context, types.TipSetKey, crypto.DomainSeparationTag, abi.ChainEpoch, []byte) (abi.Randomness, error) `perm:"read"`
- ChainGetBlock func(context.Context, cid.Cid) (*types.BlockHeader, error) `perm:"read"`
- ChainGetTipSet func(context.Context, types.TipSetKey) (*types.TipSet, error) `perm:"read"`
- ChainGetBlockMessages func(context.Context, cid.Cid) (*api.BlockMessages, error) `perm:"read"`
- ChainGetParentReceipts func(context.Context, cid.Cid) ([]*types.MessageReceipt, error) `perm:"read"`
- ChainGetParentMessages func(context.Context, cid.Cid) ([]api.Message, error) `perm:"read"`
- ChainGetTipSetByHeight func(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) `perm:"read"`
- ChainReadObj func(context.Context, cid.Cid) ([]byte, error) `perm:"read"`
- ChainDeleteObj func(context.Context, cid.Cid) error `perm:"admin"`
- ChainHasObj func(context.Context, cid.Cid) (bool, error) `perm:"read"`
- ChainStatObj func(context.Context, cid.Cid, cid.Cid) (api.ObjStat, error) `perm:"read"`
- ChainSetHead func(context.Context, types.TipSetKey) error `perm:"admin"`
- ChainGetGenesis func(context.Context) (*types.TipSet, error) `perm:"read"`
- ChainTipSetWeight func(context.Context, types.TipSetKey) (types.BigInt, error) `perm:"read"`
- ChainGetNode func(ctx context.Context, p string) (*api.IpldObject, error) `perm:"read"`
- ChainGetMessage func(context.Context, cid.Cid) (*types.Message, error) `perm:"read"`
- ChainGetPath func(context.Context, types.TipSetKey, types.TipSetKey) ([]*api.HeadChange, error) `perm:"read"`
- ChainExport func(context.Context, abi.ChainEpoch, bool, types.TipSetKey) (<-chan []byte, error) `perm:"read"`
-
- BeaconGetEntry func(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"`
-
- GasEstimateGasPremium func(context.Context, uint64, address.Address, int64, types.TipSetKey) (types.BigInt, error) `perm:"read"`
- GasEstimateGasLimit func(context.Context, *types.Message, types.TipSetKey) (int64, error) `perm:"read"`
- GasEstimateFeeCap func(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error) `perm:"read"`
- GasEstimateMessageGas func(context.Context, *types.Message, *api.MessageSendSpec, types.TipSetKey) (*types.Message, error) `perm:"read"`
-
- SyncState func(context.Context) (*api.SyncState, error) `perm:"read"`
- SyncSubmitBlock func(ctx context.Context, blk *types.BlockMsg) error `perm:"write"`
- SyncIncomingBlocks func(ctx context.Context) (<-chan *types.BlockHeader, error) `perm:"read"`
- SyncCheckpoint func(ctx context.Context, key types.TipSetKey) error `perm:"admin"`
- SyncMarkBad func(ctx context.Context, bcid cid.Cid) error `perm:"admin"`
- SyncUnmarkBad func(ctx context.Context, bcid cid.Cid) error `perm:"admin"`
- SyncUnmarkAllBad func(ctx context.Context) error `perm:"admin"`
- SyncCheckBad func(ctx context.Context, bcid cid.Cid) (string, error) `perm:"read"`
- SyncValidateTipset func(ctx context.Context, tsk types.TipSetKey) (bool, error) `perm:"read"`
-
- MpoolGetConfig func(context.Context) (*types.MpoolConfig, error) `perm:"read"`
- MpoolSetConfig func(context.Context, *types.MpoolConfig) error `perm:"write"`
-
- MpoolSelect func(context.Context, types.TipSetKey, float64) ([]*types.SignedMessage, error) `perm:"read"`
-
- MpoolPending func(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) `perm:"read"`
- MpoolClear func(context.Context, bool) error `perm:"write"`
-
- MpoolPush func(context.Context, *types.SignedMessage) (cid.Cid, error) `perm:"write"`
- MpoolPushUntrusted func(context.Context, *types.SignedMessage) (cid.Cid, error) `perm:"write"`
-
- MpoolPushMessage func(context.Context, *types.Message, *api.MessageSendSpec) (*types.SignedMessage, error) `perm:"sign"`
- MpoolGetNonce func(context.Context, address.Address) (uint64, error) `perm:"read"`
- MpoolSub func(context.Context) (<-chan api.MpoolUpdate, error) `perm:"read"`
-
- MpoolBatchPush func(ctx context.Context, smsgs []*types.SignedMessage) ([]cid.Cid, error) `perm:"write"`
- MpoolBatchPushUntrusted func(ctx context.Context, smsgs []*types.SignedMessage) ([]cid.Cid, error) `perm:"write"`
- MpoolBatchPushMessage func(ctx context.Context, msgs []*types.Message, spec *api.MessageSendSpec) ([]*types.SignedMessage, error) `perm:"sign"`
-
- MinerGetBaseInfo func(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*api.MiningBaseInfo, error) `perm:"read"`
- MinerCreateBlock func(context.Context, *api.BlockTemplate) (*types.BlockMsg, error) `perm:"write"`
-
- WalletNew func(context.Context, types.KeyType) (address.Address, error) `perm:"write"`
- WalletHas func(context.Context, address.Address) (bool, error) `perm:"write"`
- WalletList func(context.Context) ([]address.Address, error) `perm:"write"`
- WalletBalance func(context.Context, address.Address) (types.BigInt, error) `perm:"read"`
- WalletSign func(context.Context, address.Address, []byte) (*crypto.Signature, error) `perm:"sign"`
- WalletSignMessage func(context.Context, address.Address, *types.Message) (*types.SignedMessage, error) `perm:"sign"`
- WalletVerify func(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) `perm:"read"`
- WalletDefaultAddress func(context.Context) (address.Address, error) `perm:"write"`
- WalletSetDefault func(context.Context, address.Address) error `perm:"admin"`
- WalletExport func(context.Context, address.Address) (*types.KeyInfo, error) `perm:"admin"`
- WalletImport func(context.Context, *types.KeyInfo) (address.Address, error) `perm:"admin"`
- WalletDelete func(context.Context, address.Address) error `perm:"write"`
- WalletValidateAddress func(context.Context, string) (address.Address, error) `perm:"read"`
-
- ClientImport func(ctx context.Context, ref api.FileRef) (*api.ImportRes, error) `perm:"admin"`
- ClientListImports func(ctx context.Context) ([]api.Import, error) `perm:"write"`
- ClientRemoveImport func(ctx context.Context, importID multistore.StoreID) error `perm:"admin"`
- ClientHasLocal func(ctx context.Context, root cid.Cid) (bool, error) `perm:"write"`
- ClientFindData func(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]api.QueryOffer, error) `perm:"read"`
- ClientMinerQueryOffer func(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (api.QueryOffer, error) `perm:"read"`
- ClientStartDeal func(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) `perm:"admin"`
- ClientGetDealInfo func(context.Context, cid.Cid) (*api.DealInfo, error) `perm:"read"`
- ClientGetDealStatus func(context.Context, uint64) (string, error) `perm:"read"`
- ClientListDeals func(ctx context.Context) ([]api.DealInfo, error) `perm:"write"`
- ClientGetDealUpdates func(ctx context.Context) (<-chan api.DealInfo, error) `perm:"read"`
- ClientRetrieve func(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error `perm:"admin"`
- ClientRetrieveWithEvents func(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"`
- ClientQueryAsk func(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) `perm:"read"`
- ClientDealPieceCID func(ctx context.Context, root cid.Cid) (api.DataCIDSize, error) `perm:"read"`
- ClientCalcCommP func(ctx context.Context, inpath string) (*api.CommPRet, error) `perm:"read"`
- ClientGenCar func(ctx context.Context, ref api.FileRef, outpath string) error `perm:"write"`
- ClientDealSize func(ctx context.Context, root cid.Cid) (api.DataSize, error) `perm:"read"`
- ClientListDataTransfers func(ctx context.Context) ([]api.DataTransferChannel, error) `perm:"write"`
- ClientDataTransferUpdates func(ctx context.Context) (<-chan api.DataTransferChannel, error) `perm:"write"`
- ClientRestartDataTransfer func(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error `perm:"write"`
- ClientCancelDataTransfer func(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error `perm:"write"`
- ClientRetrieveTryRestartInsufficientFunds func(ctx context.Context, paymentChannel address.Address) error `perm:"write"`
-
- StateNetworkName func(context.Context) (dtypes.NetworkName, error) `perm:"read"`
- StateMinerSectors func(context.Context, address.Address, *bitfield.BitField, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) `perm:"read"`
- StateMinerActiveSectors func(context.Context, address.Address, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) `perm:"read"`
- StateMinerProvingDeadline func(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) `perm:"read"`
- StateMinerPower func(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) `perm:"read"`
- StateMinerInfo func(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error) `perm:"read"`
- StateMinerDeadlines func(context.Context, address.Address, types.TipSetKey) ([]api.Deadline, error) `perm:"read"`
- StateMinerPartitions func(ctx context.Context, m address.Address, dlIdx uint64, tsk types.TipSetKey) ([]api.Partition, error) `perm:"read"`
- StateMinerFaults func(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) `perm:"read"`
- StateAllMinerFaults func(context.Context, abi.ChainEpoch, types.TipSetKey) ([]*api.Fault, error) `perm:"read"`
- StateMinerRecoveries func(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) `perm:"read"`
- StateMinerPreCommitDepositForPower func(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) `perm:"read"`
- StateMinerInitialPledgeCollateral func(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) `perm:"read"`
- StateMinerAvailableBalance func(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) `perm:"read"`
- StateMinerSectorAllocated func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (bool, error) `perm:"read"`
- StateSectorPreCommitInfo func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) `perm:"read"`
- StateSectorGetInfo func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) `perm:"read"`
- StateSectorExpiration func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorExpiration, error) `perm:"read"`
- StateSectorPartition func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorLocation, error) `perm:"read"`
- StateCall func(context.Context, *types.Message, types.TipSetKey) (*api.InvocResult, error) `perm:"read"`
- StateReplay func(context.Context, types.TipSetKey, cid.Cid) (*api.InvocResult, error) `perm:"read"`
- StateGetActor func(context.Context, address.Address, types.TipSetKey) (*types.Actor, error) `perm:"read"`
- StateReadState func(context.Context, address.Address, types.TipSetKey) (*api.ActorState, error) `perm:"read"`
- StateWaitMsg func(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error) `perm:"read"`
- StateWaitMsgLimited func(context.Context, cid.Cid, uint64, abi.ChainEpoch) (*api.MsgLookup, error) `perm:"read"`
- StateSearchMsg func(context.Context, cid.Cid) (*api.MsgLookup, error) `perm:"read"`
- StateListMiners func(context.Context, types.TipSetKey) ([]address.Address, error) `perm:"read"`
- StateListActors func(context.Context, types.TipSetKey) ([]address.Address, error) `perm:"read"`
- StateMarketBalance func(context.Context, address.Address, types.TipSetKey) (api.MarketBalance, error) `perm:"read"`
- StateMarketParticipants func(context.Context, types.TipSetKey) (map[string]api.MarketBalance, error) `perm:"read"`
- StateMarketDeals func(context.Context, types.TipSetKey) (map[string]api.MarketDeal, error) `perm:"read"`
- StateMarketStorageDeal func(context.Context, abi.DealID, types.TipSetKey) (*api.MarketDeal, error) `perm:"read"`
- StateLookupID func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) `perm:"read"`
- StateAccountKey func(context.Context, address.Address, types.TipSetKey) (address.Address, error) `perm:"read"`
- StateChangedActors func(context.Context, cid.Cid, cid.Cid) (map[string]types.Actor, error) `perm:"read"`
- StateGetReceipt func(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) `perm:"read"`
- StateMinerSectorCount func(context.Context, address.Address, types.TipSetKey) (api.MinerSectors, error) `perm:"read"`
- StateListMessages func(ctx context.Context, match *api.MessageMatch, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) `perm:"read"`
- StateCompute func(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*api.ComputeStateOutput, error) `perm:"read"`
- StateVerifierStatus func(context.Context, address.Address, types.TipSetKey) (*abi.StoragePower, error) `perm:"read"`
- StateVerifiedClientStatus func(context.Context, address.Address, types.TipSetKey) (*abi.StoragePower, error) `perm:"read"`
- StateVerifiedRegistryRootKey func(ctx context.Context, tsk types.TipSetKey) (address.Address, error) `perm:"read"`
- StateDealProviderCollateralBounds func(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (api.DealCollateralBounds, error) `perm:"read"`
- StateCirculatingSupply func(context.Context, types.TipSetKey) (abi.TokenAmount, error) `perm:"read"`
- StateVMCirculatingSupplyInternal func(context.Context, types.TipSetKey) (api.CirculatingSupply, error) `perm:"read"`
- StateNetworkVersion func(context.Context, types.TipSetKey) (stnetwork.Version, error) `perm:"read"`
-
- MsigGetAvailableBalance func(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) `perm:"read"`
- MsigGetVestingSchedule func(context.Context, address.Address, types.TipSetKey) (api.MsigVesting, error) `perm:"read"`
- MsigGetVested func(context.Context, address.Address, types.TipSetKey, types.TipSetKey) (types.BigInt, error) `perm:"read"`
- MsigCreate func(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (cid.Cid, error) `perm:"sign"`
- MsigPropose func(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"`
- MsigApprove func(context.Context, address.Address, uint64, address.Address) (cid.Cid, error) `perm:"sign"`
- MsigApproveTxnHash func(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"`
- MsigCancel func(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"`
- MsigAddPropose func(context.Context, address.Address, address.Address, address.Address, bool) (cid.Cid, error) `perm:"sign"`
- MsigAddApprove func(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, bool) (cid.Cid, error) `perm:"sign"`
- MsigAddCancel func(context.Context, address.Address, address.Address, uint64, address.Address, bool) (cid.Cid, error) `perm:"sign"`
- MsigSwapPropose func(context.Context, address.Address, address.Address, address.Address, address.Address) (cid.Cid, error) `perm:"sign"`
- MsigSwapApprove func(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (cid.Cid, error) `perm:"sign"`
- MsigSwapCancel func(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error) `perm:"sign"`
- MsigRemoveSigner func(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error) `perm:"sign"`
-
- MarketEnsureAvailable func(context.Context, address.Address, address.Address, types.BigInt) (cid.Cid, error) `perm:"sign"`
-
- PaychGet func(ctx context.Context, from, to address.Address, amt types.BigInt) (*api.ChannelInfo, error) `perm:"sign"`
- PaychGetWaitReady func(context.Context, cid.Cid) (address.Address, error) `perm:"sign"`
- PaychAvailableFunds func(context.Context, address.Address) (*api.ChannelAvailableFunds, error) `perm:"sign"`
- PaychAvailableFundsByFromTo func(context.Context, address.Address, address.Address) (*api.ChannelAvailableFunds, error) `perm:"sign"`
- PaychList func(context.Context) ([]address.Address, error) `perm:"read"`
- PaychStatus func(context.Context, address.Address) (*api.PaychStatus, error) `perm:"read"`
- PaychSettle func(context.Context, address.Address) (cid.Cid, error) `perm:"sign"`
- PaychCollect func(context.Context, address.Address) (cid.Cid, error) `perm:"sign"`
- PaychAllocateLane func(context.Context, address.Address) (uint64, error) `perm:"sign"`
- PaychNewPayment func(ctx context.Context, from, to address.Address, vouchers []api.VoucherSpec) (*api.PaymentInfo, error) `perm:"sign"`
- PaychVoucherCheck func(context.Context, *paych.SignedVoucher) error `perm:"read"`
- PaychVoucherCheckValid func(context.Context, address.Address, *paych.SignedVoucher) error `perm:"read"`
- PaychVoucherCheckSpendable func(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (bool, error) `perm:"read"`
- PaychVoucherAdd func(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error) `perm:"write"`
- PaychVoucherCreate func(context.Context, address.Address, big.Int, uint64) (*api.VoucherCreateResult, error) `perm:"sign"`
- PaychVoucherList func(context.Context, address.Address) ([]*paych.SignedVoucher, error) `perm:"write"`
- PaychVoucherSubmit func(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error) `perm:"sign"`
-
- CreateBackup func(ctx context.Context, fpath string) error `perm:"admin"`
- }
-}
-
-func (c *FullNodeStruct) StateMinerSectorCount(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MinerSectors, error) {
- return c.Internal.StateMinerSectorCount(ctx, addr, tsk)
-}
-
-type StorageMinerStruct struct {
- CommonStruct
-
- Internal struct {
- ActorAddress func(context.Context) (address.Address, error) `perm:"read"`
- ActorSectorSize func(context.Context, address.Address) (abi.SectorSize, error) `perm:"read"`
-
- MiningBase func(context.Context) (*types.TipSet, error) `perm:"read"`
-
- MarketImportDealData func(context.Context, cid.Cid, string) error `perm:"write"`
- MarketListDeals func(ctx context.Context) ([]api.MarketDeal, error) `perm:"read"`
- MarketListRetrievalDeals func(ctx context.Context) ([]retrievalmarket.ProviderDealState, error) `perm:"read"`
- MarketGetDealUpdates func(ctx context.Context) (<-chan storagemarket.MinerDeal, error) `perm:"read"`
- MarketListIncompleteDeals func(ctx context.Context) ([]storagemarket.MinerDeal, error) `perm:"read"`
- MarketSetAsk func(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error `perm:"admin"`
- MarketGetAsk func(ctx context.Context) (*storagemarket.SignedStorageAsk, error) `perm:"read"`
- MarketSetRetrievalAsk func(ctx context.Context, rask *retrievalmarket.Ask) error `perm:"admin"`
- MarketGetRetrievalAsk func(ctx context.Context) (*retrievalmarket.Ask, error) `perm:"read"`
- MarketListDataTransfers func(ctx context.Context) ([]api.DataTransferChannel, error) `perm:"write"`
- MarketDataTransferUpdates func(ctx context.Context) (<-chan api.DataTransferChannel, error) `perm:"write"`
- MarketRestartDataTransfer func(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error `perm:"read"`
- MarketCancelDataTransfer func(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error `perm:"read"`
-
- PledgeSector func(context.Context) error `perm:"write"`
-
- SectorsStatus func(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) `perm:"read"`
- SectorsList func(context.Context) ([]abi.SectorNumber, error) `perm:"read"`
- SectorsRefs func(context.Context) (map[string][]api.SealedRef, error) `perm:"read"`
- SectorStartSealing func(context.Context, abi.SectorNumber) error `perm:"write"`
- SectorSetSealDelay func(context.Context, time.Duration) error `perm:"write"`
- SectorGetSealDelay func(context.Context) (time.Duration, error) `perm:"read"`
- SectorSetExpectedSealDuration func(context.Context, time.Duration) error `perm:"write"`
- SectorGetExpectedSealDuration func(context.Context) (time.Duration, error) `perm:"read"`
- SectorsUpdate func(context.Context, abi.SectorNumber, api.SectorState) error `perm:"admin"`
- SectorRemove func(context.Context, abi.SectorNumber) error `perm:"admin"`
- SectorMarkForUpgrade func(ctx context.Context, id abi.SectorNumber) error `perm:"admin"`
-
- WorkerConnect func(context.Context, string) error `perm:"admin" retry:"true"` // TODO: worker perm
- WorkerStats func(context.Context) (map[uuid.UUID]storiface.WorkerStats, error) `perm:"admin"`
- WorkerJobs func(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) `perm:"admin"`
-
- ReturnAddPiece func(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err string) error `perm:"admin" retry:"true"`
- ReturnSealPreCommit1 func(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err string) error `perm:"admin" retry:"true"`
- ReturnSealPreCommit2 func(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err string) error `perm:"admin" retry:"true"`
- ReturnSealCommit1 func(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err string) error `perm:"admin" retry:"true"`
- ReturnSealCommit2 func(ctx context.Context, callID storiface.CallID, proof storage.Proof, err string) error `perm:"admin" retry:"true"`
- ReturnFinalizeSector func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"`
- ReturnReleaseUnsealed func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"`
- ReturnMoveStorage func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"`
- ReturnUnsealPiece func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"`
- ReturnReadPiece func(ctx context.Context, callID storiface.CallID, ok bool, err string) error `perm:"admin" retry:"true"`
- ReturnFetch func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"`
-
- SealingSchedDiag func(context.Context) (interface{}, error) `perm:"admin"`
-
- StorageList func(context.Context) (map[stores.ID][]stores.Decl, error) `perm:"admin"`
- StorageLocal func(context.Context) (map[stores.ID]string, error) `perm:"admin"`
- StorageStat func(context.Context, stores.ID) (fsutil.FsStat, error) `perm:"admin"`
- StorageAttach func(context.Context, stores.StorageInfo, fsutil.FsStat) error `perm:"admin"`
- StorageDeclareSector func(context.Context, stores.ID, abi.SectorID, storiface.SectorFileType, bool) error `perm:"admin"`
- StorageDropSector func(context.Context, stores.ID, abi.SectorID, storiface.SectorFileType) error `perm:"admin"`
- StorageFindSector func(context.Context, abi.SectorID, storiface.SectorFileType, abi.SectorSize, bool) ([]stores.SectorStorageInfo, error) `perm:"admin"`
- StorageInfo func(context.Context, stores.ID) (stores.StorageInfo, error) `perm:"admin"`
- StorageBestAlloc func(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, sealing storiface.PathType) ([]stores.StorageInfo, error) `perm:"admin"`
- StorageReportHealth func(ctx context.Context, id stores.ID, report stores.HealthReport) error `perm:"admin"`
- StorageLock func(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error `perm:"admin"`
- StorageTryLock func(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) `perm:"admin"`
-
- DealsImportData func(ctx context.Context, dealPropCid cid.Cid, file string) error `perm:"write"`
- DealsList func(ctx context.Context) ([]api.MarketDeal, error) `perm:"read"`
- DealsConsiderOnlineStorageDeals func(context.Context) (bool, error) `perm:"read"`
- DealsSetConsiderOnlineStorageDeals func(context.Context, bool) error `perm:"admin"`
- DealsConsiderOnlineRetrievalDeals func(context.Context) (bool, error) `perm:"read"`
- DealsSetConsiderOnlineRetrievalDeals func(context.Context, bool) error `perm:"admin"`
- DealsConsiderOfflineStorageDeals func(context.Context) (bool, error) `perm:"read"`
- DealsSetConsiderOfflineStorageDeals func(context.Context, bool) error `perm:"admin"`
- DealsConsiderOfflineRetrievalDeals func(context.Context) (bool, error) `perm:"read"`
- DealsSetConsiderOfflineRetrievalDeals func(context.Context, bool) error `perm:"admin"`
- DealsPieceCidBlocklist func(context.Context) ([]cid.Cid, error) `perm:"read"`
- DealsSetPieceCidBlocklist func(context.Context, []cid.Cid) error `perm:"admin"`
-
- StorageAddLocal func(ctx context.Context, path string) error `perm:"admin"`
-
- PiecesListPieces func(ctx context.Context) ([]cid.Cid, error) `perm:"read"`
- PiecesListCidInfos func(ctx context.Context) ([]cid.Cid, error) `perm:"read"`
- PiecesGetPieceInfo func(ctx context.Context, pieceCid cid.Cid) (*piecestore.PieceInfo, error) `perm:"read"`
- PiecesGetCIDInfo func(ctx context.Context, payloadCid cid.Cid) (*piecestore.CIDInfo, error) `perm:"read"`
-
- CreateBackup func(ctx context.Context, fpath string) error `perm:"admin"`
- }
-}
-
-type WorkerStruct struct {
- Internal struct {
- // TODO: lower perms
-
- Version func(context.Context) (build.Version, error) `perm:"admin"`
-
- TaskTypes func(context.Context) (map[sealtasks.TaskType]struct{}, error) `perm:"admin"`
- Paths func(context.Context) ([]stores.StoragePath, error) `perm:"admin"`
- Info func(context.Context) (storiface.WorkerInfo, error) `perm:"admin"`
-
- AddPiece func(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) `perm:"admin"`
- SealPreCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) `perm:"admin"`
- SealPreCommit2 func(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storiface.CallID, error) `perm:"admin"`
- SealCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) `perm:"admin"`
- SealCommit2 func(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storiface.CallID, error) `perm:"admin"`
- FinalizeSector func(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (storiface.CallID, error) `perm:"admin"`
- ReleaseUnsealed func(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) (storiface.CallID, error) `perm:"admin"`
- MoveStorage func(ctx context.Context, sector abi.SectorID, types storiface.SectorFileType) (storiface.CallID, error) `perm:"admin"`
- UnsealPiece func(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (storiface.CallID, error) `perm:"admin"`
- ReadPiece func(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (storiface.CallID, error) `perm:"admin"`
- Fetch func(context.Context, abi.SectorID, storiface.SectorFileType, storiface.PathType, storiface.AcquireMode) (storiface.CallID, error) `perm:"admin"`
-
- Remove func(ctx context.Context, sector abi.SectorID) error `perm:"admin"`
- StorageAddLocal func(ctx context.Context, path string) error `perm:"admin"`
-
- Session func(context.Context) (uuid.UUID, error) `perm:"admin"`
- }
-}
-
-type GatewayStruct struct {
- Internal struct {
- ChainGetBlockMessages func(ctx context.Context, c cid.Cid) (*api.BlockMessages, error)
- ChainGetMessage func(ctx context.Context, mc cid.Cid) (*types.Message, error)
- ChainGetTipSet func(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error)
- ChainGetTipSetByHeight func(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error)
- ChainHasObj func(context.Context, cid.Cid) (bool, error)
- ChainHead func(ctx context.Context) (*types.TipSet, error)
- ChainNotify func(ctx context.Context) (<-chan []*api.HeadChange, error)
- ChainReadObj func(context.Context, cid.Cid) ([]byte, error)
- GasEstimateMessageGas func(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error)
- MpoolPush func(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error)
- MsigGetAvailableBalance func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error)
- MsigGetVested func(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error)
- StateAccountKey func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
- StateDealProviderCollateralBounds func(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error)
- StateGetActor func(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
- StateGetReceipt func(ctx context.Context, c cid.Cid, tsk types.TipSetKey) (*types.MessageReceipt, error)
- StateLookupID func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
- StateListMiners func(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
- StateMinerInfo func(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error)
- StateMinerProvingDeadline func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error)
- StateMinerPower func(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error)
- StateMarketBalance func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error)
- StateMarketStorageDeal func(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error)
- StateNetworkVersion func(ctx context.Context, tsk types.TipSetKey) (stnetwork.Version, error)
- StateVerifiedClientStatus func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
- StateWaitMsg func(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error)
- }
-}
-
-type WalletStruct struct {
- Internal struct {
- WalletNew func(context.Context, types.KeyType) (address.Address, error) `perm:"write"`
- WalletHas func(context.Context, address.Address) (bool, error) `perm:"write"`
- WalletList func(context.Context) ([]address.Address, error) `perm:"write"`
- WalletSign func(context.Context, address.Address, []byte, api.MsgMeta) (*crypto.Signature, error) `perm:"sign"`
- WalletExport func(context.Context, address.Address) (*types.KeyInfo, error) `perm:"admin"`
- WalletImport func(context.Context, *types.KeyInfo) (address.Address, error) `perm:"admin"`
- WalletDelete func(context.Context, address.Address) error `perm:"write"`
- }
-}
-
-// CommonStruct
-
-func (c *CommonStruct) AuthVerify(ctx context.Context, token string) ([]auth.Permission, error) {
- return c.Internal.AuthVerify(ctx, token)
-}
-
-func (c *CommonStruct) AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error) {
- return c.Internal.AuthNew(ctx, perms)
-}
-
-func (c *CommonStruct) NetPubsubScores(ctx context.Context) ([]api.PubsubScore, error) {
- return c.Internal.NetPubsubScores(ctx)
-}
-
-func (c *CommonStruct) NetConnectedness(ctx context.Context, pid peer.ID) (network.Connectedness, error) {
- return c.Internal.NetConnectedness(ctx, pid)
-}
-
-func (c *CommonStruct) NetPeers(ctx context.Context) ([]peer.AddrInfo, error) {
- return c.Internal.NetPeers(ctx)
-}
-
-func (c *CommonStruct) NetConnect(ctx context.Context, p peer.AddrInfo) error {
- return c.Internal.NetConnect(ctx, p)
-}
-
-func (c *CommonStruct) NetAddrsListen(ctx context.Context) (peer.AddrInfo, error) {
- return c.Internal.NetAddrsListen(ctx)
-}
-
-func (c *CommonStruct) NetDisconnect(ctx context.Context, p peer.ID) error {
- return c.Internal.NetDisconnect(ctx, p)
-}
-
-func (c *CommonStruct) NetFindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo, error) {
- return c.Internal.NetFindPeer(ctx, p)
-}
-
-func (c *CommonStruct) NetAutoNatStatus(ctx context.Context) (api.NatInfo, error) {
- return c.Internal.NetAutoNatStatus(ctx)
-}
-
-func (c *CommonStruct) NetBandwidthStats(ctx context.Context) (metrics.Stats, error) {
- return c.Internal.NetBandwidthStats(ctx)
-}
-
-func (c *CommonStruct) NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) {
- return c.Internal.NetBandwidthStatsByPeer(ctx)
-}
-
-func (c *CommonStruct) NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) {
- return c.Internal.NetBandwidthStatsByProtocol(ctx)
-}
-
-func (c *CommonStruct) NetAgentVersion(ctx context.Context, p peer.ID) (string, error) {
- return c.Internal.NetAgentVersion(ctx, p)
-}
-
-// ID implements API.ID
-func (c *CommonStruct) ID(ctx context.Context) (peer.ID, error) {
- return c.Internal.ID(ctx)
-}
-
-// Version implements API.Version
-func (c *CommonStruct) Version(ctx context.Context) (api.Version, error) {
- return c.Internal.Version(ctx)
-}
-
-func (c *CommonStruct) LogList(ctx context.Context) ([]string, error) {
- return c.Internal.LogList(ctx)
-}
-
-func (c *CommonStruct) LogSetLevel(ctx context.Context, group, level string) error {
- return c.Internal.LogSetLevel(ctx, group, level)
-}
-
-func (c *CommonStruct) Shutdown(ctx context.Context) error {
- return c.Internal.Shutdown(ctx)
-}
-
-func (c *CommonStruct) Session(ctx context.Context) (uuid.UUID, error) {
- return c.Internal.Session(ctx)
-}
-
-func (c *CommonStruct) Closing(ctx context.Context) (<-chan struct{}, error) {
- return c.Internal.Closing(ctx)
-}
-
-// FullNodeStruct
-
-func (c *FullNodeStruct) ClientListImports(ctx context.Context) ([]api.Import, error) {
- return c.Internal.ClientListImports(ctx)
-}
-
-func (c *FullNodeStruct) ClientRemoveImport(ctx context.Context, importID multistore.StoreID) error {
- return c.Internal.ClientRemoveImport(ctx, importID)
-}
-
-func (c *FullNodeStruct) ClientImport(ctx context.Context, ref api.FileRef) (*api.ImportRes, error) {
- return c.Internal.ClientImport(ctx, ref)
-}
-
-func (c *FullNodeStruct) ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) {
- return c.Internal.ClientHasLocal(ctx, root)
-}
-
-func (c *FullNodeStruct) ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]api.QueryOffer, error) {
- return c.Internal.ClientFindData(ctx, root, piece)
-}
-
-func (c *FullNodeStruct) ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (api.QueryOffer, error) {
- return c.Internal.ClientMinerQueryOffer(ctx, miner, root, piece)
-}
-
-func (c *FullNodeStruct) ClientStartDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) {
- return c.Internal.ClientStartDeal(ctx, params)
-}
-
-func (c *FullNodeStruct) ClientGetDealInfo(ctx context.Context, deal cid.Cid) (*api.DealInfo, error) {
- return c.Internal.ClientGetDealInfo(ctx, deal)
-}
-
-func (c *FullNodeStruct) ClientGetDealStatus(ctx context.Context, statusCode uint64) (string, error) {
- return c.Internal.ClientGetDealStatus(ctx, statusCode)
-}
-
-func (c *FullNodeStruct) ClientListDeals(ctx context.Context) ([]api.DealInfo, error) {
- return c.Internal.ClientListDeals(ctx)
-}
-
-func (c *FullNodeStruct) ClientGetDealUpdates(ctx context.Context) (<-chan api.DealInfo, error) {
- return c.Internal.ClientGetDealUpdates(ctx)
-}
-
-func (c *FullNodeStruct) ClientRetrieve(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error {
- return c.Internal.ClientRetrieve(ctx, order, ref)
-}
-
-func (c *FullNodeStruct) ClientRetrieveWithEvents(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
- return c.Internal.ClientRetrieveWithEvents(ctx, order, ref)
-}
-
-func (c *FullNodeStruct) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) {
- return c.Internal.ClientQueryAsk(ctx, p, miner)
-}
-
-func (c *FullNodeStruct) ClientDealPieceCID(ctx context.Context, root cid.Cid) (api.DataCIDSize, error) {
- return c.Internal.ClientDealPieceCID(ctx, root)
-}
-
-func (c *FullNodeStruct) ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet, error) {
- return c.Internal.ClientCalcCommP(ctx, inpath)
-}
-
-func (c *FullNodeStruct) ClientGenCar(ctx context.Context, ref api.FileRef, outpath string) error {
- return c.Internal.ClientGenCar(ctx, ref, outpath)
-}
-
-func (c *FullNodeStruct) ClientDealSize(ctx context.Context, root cid.Cid) (api.DataSize, error) {
- return c.Internal.ClientDealSize(ctx, root)
-}
-
-func (c *FullNodeStruct) ClientListDataTransfers(ctx context.Context) ([]api.DataTransferChannel, error) {
- return c.Internal.ClientListDataTransfers(ctx)
-}
-
-func (c *FullNodeStruct) ClientDataTransferUpdates(ctx context.Context) (<-chan api.DataTransferChannel, error) {
- return c.Internal.ClientDataTransferUpdates(ctx)
-}
-
-func (c *FullNodeStruct) ClientRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error {
- return c.Internal.ClientRestartDataTransfer(ctx, transferID, otherPeer, isInitiator)
-}
-
-func (c *FullNodeStruct) ClientCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error {
- return c.Internal.ClientCancelDataTransfer(ctx, transferID, otherPeer, isInitiator)
-}
-
-func (c *FullNodeStruct) ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error {
- return c.Internal.ClientRetrieveTryRestartInsufficientFunds(ctx, paymentChannel)
-}
-
-func (c *FullNodeStruct) GasEstimateGasPremium(ctx context.Context, nblocksincl uint64, sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) {
- return c.Internal.GasEstimateGasPremium(ctx, nblocksincl, sender, gaslimit, tsk)
-}
-
-func (c *FullNodeStruct) GasEstimateFeeCap(ctx context.Context, msg *types.Message, maxqueueblks int64, tsk types.TipSetKey) (types.BigInt, error) {
- return c.Internal.GasEstimateFeeCap(ctx, msg, maxqueueblks, tsk)
-}
-
-func (c *FullNodeStruct) GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) {
- return c.Internal.GasEstimateMessageGas(ctx, msg, spec, tsk)
-}
-
-func (c *FullNodeStruct) GasEstimateGasLimit(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (int64, error) {
- return c.Internal.GasEstimateGasLimit(ctx, msg, tsk)
-}
-
-func (c *FullNodeStruct) MpoolGetConfig(ctx context.Context) (*types.MpoolConfig, error) {
- return c.Internal.MpoolGetConfig(ctx)
-}
-
-func (c *FullNodeStruct) MpoolSetConfig(ctx context.Context, cfg *types.MpoolConfig) error {
- return c.Internal.MpoolSetConfig(ctx, cfg)
-}
-
-func (c *FullNodeStruct) MpoolSelect(ctx context.Context, tsk types.TipSetKey, tq float64) ([]*types.SignedMessage, error) {
- return c.Internal.MpoolSelect(ctx, tsk, tq)
-}
-
-func (c *FullNodeStruct) MpoolPending(ctx context.Context, tsk types.TipSetKey) ([]*types.SignedMessage, error) {
- return c.Internal.MpoolPending(ctx, tsk)
-}
-
-func (c *FullNodeStruct) MpoolClear(ctx context.Context, local bool) error {
- return c.Internal.MpoolClear(ctx, local)
-}
-
-func (c *FullNodeStruct) MpoolPush(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) {
- return c.Internal.MpoolPush(ctx, smsg)
-}
-
-func (c *FullNodeStruct) MpoolPushUntrusted(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) {
- return c.Internal.MpoolPushUntrusted(ctx, smsg)
-}
-
-func (c *FullNodeStruct) MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) {
- return c.Internal.MpoolPushMessage(ctx, msg, spec)
-}
-
-func (c *FullNodeStruct) MpoolBatchPush(ctx context.Context, smsgs []*types.SignedMessage) ([]cid.Cid, error) {
- return c.Internal.MpoolBatchPush(ctx, smsgs)
-}
-
-func (c *FullNodeStruct) MpoolBatchPushUntrusted(ctx context.Context, smsgs []*types.SignedMessage) ([]cid.Cid, error) {
- return c.Internal.MpoolBatchPushUntrusted(ctx, smsgs)
-}
-
-func (c *FullNodeStruct) MpoolBatchPushMessage(ctx context.Context, msgs []*types.Message, spec *api.MessageSendSpec) ([]*types.SignedMessage, error) {
- return c.Internal.MpoolBatchPushMessage(ctx, msgs, spec)
-}
-
-func (c *FullNodeStruct) MpoolSub(ctx context.Context) (<-chan api.MpoolUpdate, error) {
- return c.Internal.MpoolSub(ctx)
-}
-
-func (c *FullNodeStruct) MinerGetBaseInfo(ctx context.Context, maddr address.Address, epoch abi.ChainEpoch, tsk types.TipSetKey) (*api.MiningBaseInfo, error) {
- return c.Internal.MinerGetBaseInfo(ctx, maddr, epoch, tsk)
-}
-
-func (c *FullNodeStruct) MinerCreateBlock(ctx context.Context, bt *api.BlockTemplate) (*types.BlockMsg, error) {
- return c.Internal.MinerCreateBlock(ctx, bt)
-}
-
-func (c *FullNodeStruct) ChainHead(ctx context.Context) (*types.TipSet, error) {
- return c.Internal.ChainHead(ctx)
-}
-
-func (c *FullNodeStruct) ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) {
- return c.Internal.ChainGetRandomnessFromTickets(ctx, tsk, personalization, randEpoch, entropy)
-}
-
-func (c *FullNodeStruct) ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) {
- return c.Internal.ChainGetRandomnessFromBeacon(ctx, tsk, personalization, randEpoch, entropy)
-}
-
-func (c *FullNodeStruct) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) {
- return c.Internal.ChainGetTipSetByHeight(ctx, h, tsk)
-}
-
-func (c *FullNodeStruct) WalletNew(ctx context.Context, typ types.KeyType) (address.Address, error) {
- return c.Internal.WalletNew(ctx, typ)
-}
-
-func (c *FullNodeStruct) WalletHas(ctx context.Context, addr address.Address) (bool, error) {
- return c.Internal.WalletHas(ctx, addr)
-}
-
-func (c *FullNodeStruct) WalletList(ctx context.Context) ([]address.Address, error) {
- return c.Internal.WalletList(ctx)
-}
-
-func (c *FullNodeStruct) WalletBalance(ctx context.Context, a address.Address) (types.BigInt, error) {
- return c.Internal.WalletBalance(ctx, a)
-}
-
-func (c *FullNodeStruct) WalletSign(ctx context.Context, k address.Address, msg []byte) (*crypto.Signature, error) {
- return c.Internal.WalletSign(ctx, k, msg)
-}
-
-func (c *FullNodeStruct) WalletSignMessage(ctx context.Context, k address.Address, msg *types.Message) (*types.SignedMessage, error) {
- return c.Internal.WalletSignMessage(ctx, k, msg)
-}
-
-func (c *FullNodeStruct) WalletVerify(ctx context.Context, k address.Address, msg []byte, sig *crypto.Signature) (bool, error) {
- return c.Internal.WalletVerify(ctx, k, msg, sig)
-}
-
-func (c *FullNodeStruct) WalletDefaultAddress(ctx context.Context) (address.Address, error) {
- return c.Internal.WalletDefaultAddress(ctx)
-}
-
-func (c *FullNodeStruct) WalletSetDefault(ctx context.Context, a address.Address) error {
- return c.Internal.WalletSetDefault(ctx, a)
-}
-
-func (c *FullNodeStruct) WalletExport(ctx context.Context, a address.Address) (*types.KeyInfo, error) {
- return c.Internal.WalletExport(ctx, a)
-}
-
-func (c *FullNodeStruct) WalletImport(ctx context.Context, ki *types.KeyInfo) (address.Address, error) {
- return c.Internal.WalletImport(ctx, ki)
-}
-
-func (c *FullNodeStruct) WalletDelete(ctx context.Context, addr address.Address) error {
- return c.Internal.WalletDelete(ctx, addr)
-}
-
-func (c *FullNodeStruct) WalletValidateAddress(ctx context.Context, str string) (address.Address, error) {
- return c.Internal.WalletValidateAddress(ctx, str)
-}
-
-func (c *FullNodeStruct) MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error) {
- return c.Internal.MpoolGetNonce(ctx, addr)
-}
-
-func (c *FullNodeStruct) ChainGetBlock(ctx context.Context, b cid.Cid) (*types.BlockHeader, error) {
- return c.Internal.ChainGetBlock(ctx, b)
-}
-
-func (c *FullNodeStruct) ChainGetTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) {
- return c.Internal.ChainGetTipSet(ctx, key)
-}
-
-func (c *FullNodeStruct) ChainGetBlockMessages(ctx context.Context, b cid.Cid) (*api.BlockMessages, error) {
- return c.Internal.ChainGetBlockMessages(ctx, b)
-}
-
-func (c *FullNodeStruct) ChainGetParentReceipts(ctx context.Context, b cid.Cid) ([]*types.MessageReceipt, error) {
- return c.Internal.ChainGetParentReceipts(ctx, b)
-}
-
-func (c *FullNodeStruct) ChainGetParentMessages(ctx context.Context, b cid.Cid) ([]api.Message, error) {
- return c.Internal.ChainGetParentMessages(ctx, b)
-}
-
-func (c *FullNodeStruct) ChainNotify(ctx context.Context) (<-chan []*api.HeadChange, error) {
- return c.Internal.ChainNotify(ctx)
-}
-
-func (c *FullNodeStruct) ChainReadObj(ctx context.Context, obj cid.Cid) ([]byte, error) {
- return c.Internal.ChainReadObj(ctx, obj)
-}
-
-func (c *FullNodeStruct) ChainDeleteObj(ctx context.Context, obj cid.Cid) error {
- return c.Internal.ChainDeleteObj(ctx, obj)
-}
-
-func (c *FullNodeStruct) ChainHasObj(ctx context.Context, o cid.Cid) (bool, error) {
- return c.Internal.ChainHasObj(ctx, o)
-}
-
-func (c *FullNodeStruct) ChainStatObj(ctx context.Context, obj, base cid.Cid) (api.ObjStat, error) {
- return c.Internal.ChainStatObj(ctx, obj, base)
-}
-
-func (c *FullNodeStruct) ChainSetHead(ctx context.Context, tsk types.TipSetKey) error {
- return c.Internal.ChainSetHead(ctx, tsk)
-}
-
-func (c *FullNodeStruct) ChainGetGenesis(ctx context.Context) (*types.TipSet, error) {
- return c.Internal.ChainGetGenesis(ctx)
-}
-
-func (c *FullNodeStruct) ChainTipSetWeight(ctx context.Context, tsk types.TipSetKey) (types.BigInt, error) {
- return c.Internal.ChainTipSetWeight(ctx, tsk)
-}
-
-func (c *FullNodeStruct) ChainGetNode(ctx context.Context, p string) (*api.IpldObject, error) {
- return c.Internal.ChainGetNode(ctx, p)
-}
-
-func (c *FullNodeStruct) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) {
- return c.Internal.ChainGetMessage(ctx, mc)
-}
-
-func (c *FullNodeStruct) ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*api.HeadChange, error) {
- return c.Internal.ChainGetPath(ctx, from, to)
-}
-
-func (c *FullNodeStruct) ChainExport(ctx context.Context, nroots abi.ChainEpoch, iom bool, tsk types.TipSetKey) (<-chan []byte, error) {
- return c.Internal.ChainExport(ctx, nroots, iom, tsk)
-}
-
-func (c *FullNodeStruct) BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) {
- return c.Internal.BeaconGetEntry(ctx, epoch)
-}
-
-func (c *FullNodeStruct) SyncState(ctx context.Context) (*api.SyncState, error) {
- return c.Internal.SyncState(ctx)
-}
-
-func (c *FullNodeStruct) SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error {
- return c.Internal.SyncSubmitBlock(ctx, blk)
-}
-
-func (c *FullNodeStruct) SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) {
- return c.Internal.SyncIncomingBlocks(ctx)
-}
-
-func (c *FullNodeStruct) SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) error {
- return c.Internal.SyncCheckpoint(ctx, tsk)
-}
-
-func (c *FullNodeStruct) SyncMarkBad(ctx context.Context, bcid cid.Cid) error {
- return c.Internal.SyncMarkBad(ctx, bcid)
-}
-
-func (c *FullNodeStruct) SyncUnmarkBad(ctx context.Context, bcid cid.Cid) error {
- return c.Internal.SyncUnmarkBad(ctx, bcid)
-}
-
-func (c *FullNodeStruct) SyncUnmarkAllBad(ctx context.Context) error {
- return c.Internal.SyncUnmarkAllBad(ctx)
-}
-
-func (c *FullNodeStruct) SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error) {
- return c.Internal.SyncCheckBad(ctx, bcid)
-}
-
-func (c *FullNodeStruct) SyncValidateTipset(ctx context.Context, tsk types.TipSetKey) (bool, error) {
- return c.Internal.SyncValidateTipset(ctx, tsk)
-}
-
-func (c *FullNodeStruct) StateNetworkName(ctx context.Context) (dtypes.NetworkName, error) {
- return c.Internal.StateNetworkName(ctx)
-}
-
-func (c *FullNodeStruct) StateMinerSectors(ctx context.Context, addr address.Address, sectorNos *bitfield.BitField, tsk types.TipSetKey) ([]*miner.SectorOnChainInfo, error) {
- return c.Internal.StateMinerSectors(ctx, addr, sectorNos, tsk)
-}
-
-func (c *FullNodeStruct) StateMinerActiveSectors(ctx context.Context, addr address.Address, tsk types.TipSetKey) ([]*miner.SectorOnChainInfo, error) {
- return c.Internal.StateMinerActiveSectors(ctx, addr, tsk)
-}
-
-func (c *FullNodeStruct) StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error) {
- return c.Internal.StateMinerProvingDeadline(ctx, addr, tsk)
-}
-
-func (c *FullNodeStruct) StateMinerPower(ctx context.Context, a address.Address, tsk types.TipSetKey) (*api.MinerPower, error) {
- return c.Internal.StateMinerPower(ctx, a, tsk)
-}
-
-func (c *FullNodeStruct) StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) {
- return c.Internal.StateMinerInfo(ctx, actor, tsk)
-}
-
-func (c *FullNodeStruct) StateMinerDeadlines(ctx context.Context, actor address.Address, tsk types.TipSetKey) ([]api.Deadline, error) {
- return c.Internal.StateMinerDeadlines(ctx, actor, tsk)
-}
-
-func (c *FullNodeStruct) StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tsk types.TipSetKey) ([]api.Partition, error) {
- return c.Internal.StateMinerPartitions(ctx, m, dlIdx, tsk)
-}
-
-func (c *FullNodeStruct) StateMinerFaults(ctx context.Context, actor address.Address, tsk types.TipSetKey) (bitfield.BitField, error) {
- return c.Internal.StateMinerFaults(ctx, actor, tsk)
-}
-
-func (c *FullNodeStruct) StateAllMinerFaults(ctx context.Context, cutoff abi.ChainEpoch, endTsk types.TipSetKey) ([]*api.Fault, error) {
- return c.Internal.StateAllMinerFaults(ctx, cutoff, endTsk)
-}
-
-func (c *FullNodeStruct) StateMinerRecoveries(ctx context.Context, actor address.Address, tsk types.TipSetKey) (bitfield.BitField, error) {
- return c.Internal.StateMinerRecoveries(ctx, actor, tsk)
-}
-
-func (c *FullNodeStruct) StateMinerPreCommitDepositForPower(ctx context.Context, maddr address.Address, pci miner.SectorPreCommitInfo, tsk types.TipSetKey) (types.BigInt, error) {
- return c.Internal.StateMinerPreCommitDepositForPower(ctx, maddr, pci, tsk)
-}
-
-func (c *FullNodeStruct) StateMinerInitialPledgeCollateral(ctx context.Context, maddr address.Address, pci miner.SectorPreCommitInfo, tsk types.TipSetKey) (types.BigInt, error) {
- return c.Internal.StateMinerInitialPledgeCollateral(ctx, maddr, pci, tsk)
-}
-
-func (c *FullNodeStruct) StateMinerAvailableBalance(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (types.BigInt, error) {
- return c.Internal.StateMinerAvailableBalance(ctx, maddr, tsk)
-}
-
-func (c *FullNodeStruct) StateMinerSectorAllocated(ctx context.Context, maddr address.Address, s abi.SectorNumber, tsk types.TipSetKey) (bool, error) {
- return c.Internal.StateMinerSectorAllocated(ctx, maddr, s, tsk)
-}
-
-func (c *FullNodeStruct) StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) {
- return c.Internal.StateSectorPreCommitInfo(ctx, maddr, n, tsk)
-}
-
-func (c *FullNodeStruct) StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) {
- return c.Internal.StateSectorGetInfo(ctx, maddr, n, tsk)
-}
-
-func (c *FullNodeStruct) StateSectorExpiration(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorExpiration, error) {
- return c.Internal.StateSectorExpiration(ctx, maddr, n, tsk)
-}
-
-func (c *FullNodeStruct) StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error) {
- return c.Internal.StateSectorPartition(ctx, maddr, sectorNumber, tok)
-}
-
-func (c *FullNodeStruct) StateCall(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (*api.InvocResult, error) {
- return c.Internal.StateCall(ctx, msg, tsk)
-}
-
-func (c *FullNodeStruct) StateReplay(ctx context.Context, tsk types.TipSetKey, mc cid.Cid) (*api.InvocResult, error) {
- return c.Internal.StateReplay(ctx, tsk, mc)
-}
-
-func (c *FullNodeStruct) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) {
- return c.Internal.StateGetActor(ctx, actor, tsk)
-}
-
-func (c *FullNodeStruct) StateReadState(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*api.ActorState, error) {
- return c.Internal.StateReadState(ctx, addr, tsk)
-}
-
-func (c *FullNodeStruct) StateWaitMsg(ctx context.Context, msgc cid.Cid, confidence uint64) (*api.MsgLookup, error) {
- return c.Internal.StateWaitMsg(ctx, msgc, confidence)
-}
-
-func (c *FullNodeStruct) StateWaitMsgLimited(ctx context.Context, msgc cid.Cid, confidence uint64, limit abi.ChainEpoch) (*api.MsgLookup, error) {
- return c.Internal.StateWaitMsgLimited(ctx, msgc, confidence, limit)
-}
-
-func (c *FullNodeStruct) StateSearchMsg(ctx context.Context, msgc cid.Cid) (*api.MsgLookup, error) {
- return c.Internal.StateSearchMsg(ctx, msgc)
-}
-
-func (c *FullNodeStruct) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) {
- return c.Internal.StateListMiners(ctx, tsk)
-}
-
-func (c *FullNodeStruct) StateListActors(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) {
- return c.Internal.StateListActors(ctx, tsk)
-}
-
-func (c *FullNodeStruct) StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) {
- return c.Internal.StateMarketBalance(ctx, addr, tsk)
-}
-
-func (c *FullNodeStruct) StateMarketParticipants(ctx context.Context, tsk types.TipSetKey) (map[string]api.MarketBalance, error) {
- return c.Internal.StateMarketParticipants(ctx, tsk)
-}
-
-func (c *FullNodeStruct) StateMarketDeals(ctx context.Context, tsk types.TipSetKey) (map[string]api.MarketDeal, error) {
- return c.Internal.StateMarketDeals(ctx, tsk)
-}
-
-func (c *FullNodeStruct) StateMarketStorageDeal(ctx context.Context, dealid abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) {
- return c.Internal.StateMarketStorageDeal(ctx, dealid, tsk)
-}
-
-func (c *FullNodeStruct) StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) {
- return c.Internal.StateLookupID(ctx, addr, tsk)
-}
-
-func (c *FullNodeStruct) StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) {
- return c.Internal.StateAccountKey(ctx, addr, tsk)
-}
-
-func (c *FullNodeStruct) StateChangedActors(ctx context.Context, olnstate cid.Cid, newstate cid.Cid) (map[string]types.Actor, error) {
- return c.Internal.StateChangedActors(ctx, olnstate, newstate)
-}
-
-func (c *FullNodeStruct) StateGetReceipt(ctx context.Context, msg cid.Cid, tsk types.TipSetKey) (*types.MessageReceipt, error) {
- return c.Internal.StateGetReceipt(ctx, msg, tsk)
-}
-
-func (c *FullNodeStruct) StateListMessages(ctx context.Context, match *api.MessageMatch, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) {
- return c.Internal.StateListMessages(ctx, match, tsk, toht)
-}
-
-func (c *FullNodeStruct) StateCompute(ctx context.Context, height abi.ChainEpoch, msgs []*types.Message, tsk types.TipSetKey) (*api.ComputeStateOutput, error) {
- return c.Internal.StateCompute(ctx, height, msgs, tsk)
-}
-
-func (c *FullNodeStruct) StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) {
- return c.Internal.StateVerifierStatus(ctx, addr, tsk)
-}
-
-func (c *FullNodeStruct) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) {
- return c.Internal.StateVerifiedClientStatus(ctx, addr, tsk)
-}
-
-func (c *FullNodeStruct) StateVerifiedRegistryRootKey(ctx context.Context, tsk types.TipSetKey) (address.Address, error) {
- return c.Internal.StateVerifiedRegistryRootKey(ctx, tsk)
-}
-
-func (c *FullNodeStruct) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) {
- return c.Internal.StateDealProviderCollateralBounds(ctx, size, verified, tsk)
-}
-
-func (c *FullNodeStruct) StateCirculatingSupply(ctx context.Context, tsk types.TipSetKey) (abi.TokenAmount, error) {
- return c.Internal.StateCirculatingSupply(ctx, tsk)
-}
-
-func (c *FullNodeStruct) StateVMCirculatingSupplyInternal(ctx context.Context, tsk types.TipSetKey) (api.CirculatingSupply, error) {
- return c.Internal.StateVMCirculatingSupplyInternal(ctx, tsk)
-}
-
-func (c *FullNodeStruct) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (stnetwork.Version, error) {
- return c.Internal.StateNetworkVersion(ctx, tsk)
-}
-
-func (c *FullNodeStruct) MsigGetAvailableBalance(ctx context.Context, a address.Address, tsk types.TipSetKey) (types.BigInt, error) {
- return c.Internal.MsigGetAvailableBalance(ctx, a, tsk)
-}
-
-func (c *FullNodeStruct) MsigGetVestingSchedule(ctx context.Context, a address.Address, tsk types.TipSetKey) (api.MsigVesting, error) {
- return c.Internal.MsigGetVestingSchedule(ctx, a, tsk)
-}
-
-func (c *FullNodeStruct) MsigGetVested(ctx context.Context, a address.Address, sTsk types.TipSetKey, eTsk types.TipSetKey) (types.BigInt, error) {
- return c.Internal.MsigGetVested(ctx, a, sTsk, eTsk)
-}
-
-func (c *FullNodeStruct) MsigCreate(ctx context.Context, req uint64, addrs []address.Address, duration abi.ChainEpoch, val types.BigInt, src address.Address, gp types.BigInt) (cid.Cid, error) {
- return c.Internal.MsigCreate(ctx, req, addrs, duration, val, src, gp)
-}
-
-func (c *FullNodeStruct) MsigPropose(ctx context.Context, msig address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) {
- return c.Internal.MsigPropose(ctx, msig, to, amt, src, method, params)
-}
-
-func (c *FullNodeStruct) MsigApprove(ctx context.Context, msig address.Address, txID uint64, signer address.Address) (cid.Cid, error) {
- return c.Internal.MsigApprove(ctx, msig, txID, signer)
-}
-
-func (c *FullNodeStruct) MsigApproveTxnHash(ctx context.Context, msig address.Address, txID uint64, proposer address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) {
- return c.Internal.MsigApproveTxnHash(ctx, msig, txID, proposer, to, amt, src, method, params)
-}
-
-func (c *FullNodeStruct) MsigCancel(ctx context.Context, msig address.Address, txID uint64, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) {
- return c.Internal.MsigCancel(ctx, msig, txID, to, amt, src, method, params)
-}
-
-func (c *FullNodeStruct) MsigAddPropose(ctx context.Context, msig address.Address, src address.Address, newAdd address.Address, inc bool) (cid.Cid, error) {
- return c.Internal.MsigAddPropose(ctx, msig, src, newAdd, inc)
-}
-
-func (c *FullNodeStruct) MsigAddApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, newAdd address.Address, inc bool) (cid.Cid, error) {
- return c.Internal.MsigAddApprove(ctx, msig, src, txID, proposer, newAdd, inc)
-}
-
-func (c *FullNodeStruct) MsigAddCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, newAdd address.Address, inc bool) (cid.Cid, error) {
- return c.Internal.MsigAddCancel(ctx, msig, src, txID, newAdd, inc)
-}
-
-func (c *FullNodeStruct) MsigSwapPropose(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
- return c.Internal.MsigSwapPropose(ctx, msig, src, oldAdd, newAdd)
-}
-
-func (c *FullNodeStruct) MsigSwapApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
- return c.Internal.MsigSwapApprove(ctx, msig, src, txID, proposer, oldAdd, newAdd)
-}
-
-func (c *FullNodeStruct) MsigSwapCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
- return c.Internal.MsigSwapCancel(ctx, msig, src, txID, oldAdd, newAdd)
-}
-
-func (c *FullNodeStruct) MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error) {
- return c.Internal.MsigRemoveSigner(ctx, msig, proposer, toRemove, decrease)
-}
-
-func (c *FullNodeStruct) MarketEnsureAvailable(ctx context.Context, addr, wallet address.Address, amt types.BigInt) (cid.Cid, error) {
- return c.Internal.MarketEnsureAvailable(ctx, addr, wallet, amt)
-}
-
-func (c *FullNodeStruct) PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*api.ChannelInfo, error) {
- return c.Internal.PaychGet(ctx, from, to, amt)
-}
-
-func (c *FullNodeStruct) PaychGetWaitReady(ctx context.Context, sentinel cid.Cid) (address.Address, error) {
- return c.Internal.PaychGetWaitReady(ctx, sentinel)
-}
-
-func (c *FullNodeStruct) PaychAvailableFunds(ctx context.Context, ch address.Address) (*api.ChannelAvailableFunds, error) {
- return c.Internal.PaychAvailableFunds(ctx, ch)
-}
-
-func (c *FullNodeStruct) PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*api.ChannelAvailableFunds, error) {
- return c.Internal.PaychAvailableFundsByFromTo(ctx, from, to)
-}
-
-func (c *FullNodeStruct) PaychList(ctx context.Context) ([]address.Address, error) {
- return c.Internal.PaychList(ctx)
-}
-
-func (c *FullNodeStruct) PaychStatus(ctx context.Context, pch address.Address) (*api.PaychStatus, error) {
- return c.Internal.PaychStatus(ctx, pch)
-}
-
-func (c *FullNodeStruct) PaychVoucherCheckValid(ctx context.Context, addr address.Address, sv *paych.SignedVoucher) error {
- return c.Internal.PaychVoucherCheckValid(ctx, addr, sv)
-}
-
-func (c *FullNodeStruct) PaychVoucherCheckSpendable(ctx context.Context, addr address.Address, sv *paych.SignedVoucher, secret []byte, proof []byte) (bool, error) {
- return c.Internal.PaychVoucherCheckSpendable(ctx, addr, sv, secret, proof)
-}
-
-func (c *FullNodeStruct) PaychVoucherAdd(ctx context.Context, addr address.Address, sv *paych.SignedVoucher, proof []byte, minDelta types.BigInt) (types.BigInt, error) {
- return c.Internal.PaychVoucherAdd(ctx, addr, sv, proof, minDelta)
-}
-
-func (c *FullNodeStruct) PaychVoucherCreate(ctx context.Context, pch address.Address, amt types.BigInt, lane uint64) (*api.VoucherCreateResult, error) {
- return c.Internal.PaychVoucherCreate(ctx, pch, amt, lane)
-}
-
-func (c *FullNodeStruct) PaychVoucherList(ctx context.Context, pch address.Address) ([]*paych.SignedVoucher, error) {
- return c.Internal.PaychVoucherList(ctx, pch)
-}
-
-func (c *FullNodeStruct) PaychSettle(ctx context.Context, a address.Address) (cid.Cid, error) {
- return c.Internal.PaychSettle(ctx, a)
-}
-
-func (c *FullNodeStruct) PaychCollect(ctx context.Context, a address.Address) (cid.Cid, error) {
- return c.Internal.PaychCollect(ctx, a)
-}
-
-func (c *FullNodeStruct) PaychAllocateLane(ctx context.Context, ch address.Address) (uint64, error) {
- return c.Internal.PaychAllocateLane(ctx, ch)
-}
-
-func (c *FullNodeStruct) PaychNewPayment(ctx context.Context, from, to address.Address, vouchers []api.VoucherSpec) (*api.PaymentInfo, error) {
- return c.Internal.PaychNewPayment(ctx, from, to, vouchers)
-}
-
-func (c *FullNodeStruct) PaychVoucherSubmit(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, secret []byte, proof []byte) (cid.Cid, error) {
- return c.Internal.PaychVoucherSubmit(ctx, ch, sv, secret, proof)
-}
-
-func (c *FullNodeStruct) CreateBackup(ctx context.Context, fpath string) error {
- return c.Internal.CreateBackup(ctx, fpath)
-}
-
-// StorageMinerStruct
-
-func (c *StorageMinerStruct) ActorAddress(ctx context.Context) (address.Address, error) {
- return c.Internal.ActorAddress(ctx)
-}
-
-func (c *StorageMinerStruct) MiningBase(ctx context.Context) (*types.TipSet, error) {
- return c.Internal.MiningBase(ctx)
-}
-
-func (c *StorageMinerStruct) ActorSectorSize(ctx context.Context, addr address.Address) (abi.SectorSize, error) {
- return c.Internal.ActorSectorSize(ctx, addr)
-}
-
-func (c *StorageMinerStruct) PledgeSector(ctx context.Context) error {
- return c.Internal.PledgeSector(ctx)
-}
-
-// Get the status of a given sector by ID
-func (c *StorageMinerStruct) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) {
- return c.Internal.SectorsStatus(ctx, sid, showOnChainInfo)
-}
-
-// List all staged sectors
-func (c *StorageMinerStruct) SectorsList(ctx context.Context) ([]abi.SectorNumber, error) {
- return c.Internal.SectorsList(ctx)
-}
-
-func (c *StorageMinerStruct) SectorsRefs(ctx context.Context) (map[string][]api.SealedRef, error) {
- return c.Internal.SectorsRefs(ctx)
-}
-
-func (c *StorageMinerStruct) SectorStartSealing(ctx context.Context, number abi.SectorNumber) error {
- return c.Internal.SectorStartSealing(ctx, number)
-}
-
-func (c *StorageMinerStruct) SectorSetSealDelay(ctx context.Context, delay time.Duration) error {
- return c.Internal.SectorSetSealDelay(ctx, delay)
-}
-
-func (c *StorageMinerStruct) SectorGetSealDelay(ctx context.Context) (time.Duration, error) {
- return c.Internal.SectorGetSealDelay(ctx)
-}
-
-func (c *StorageMinerStruct) SectorSetExpectedSealDuration(ctx context.Context, delay time.Duration) error {
- return c.Internal.SectorSetExpectedSealDuration(ctx, delay)
-}
-
-func (c *StorageMinerStruct) SectorGetExpectedSealDuration(ctx context.Context) (time.Duration, error) {
- return c.Internal.SectorGetExpectedSealDuration(ctx)
-}
-
-func (c *StorageMinerStruct) SectorsUpdate(ctx context.Context, id abi.SectorNumber, state api.SectorState) error {
- return c.Internal.SectorsUpdate(ctx, id, state)
-}
-
-func (c *StorageMinerStruct) SectorRemove(ctx context.Context, number abi.SectorNumber) error {
- return c.Internal.SectorRemove(ctx, number)
-}
-
-func (c *StorageMinerStruct) SectorMarkForUpgrade(ctx context.Context, number abi.SectorNumber) error {
- return c.Internal.SectorMarkForUpgrade(ctx, number)
-}
-
-func (c *StorageMinerStruct) WorkerConnect(ctx context.Context, url string) error {
- return c.Internal.WorkerConnect(ctx, url)
-}
-
-func (c *StorageMinerStruct) WorkerStats(ctx context.Context) (map[uuid.UUID]storiface.WorkerStats, error) {
- return c.Internal.WorkerStats(ctx)
-}
-
-func (c *StorageMinerStruct) WorkerJobs(ctx context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) {
- return c.Internal.WorkerJobs(ctx)
-}
-
-func (c *StorageMinerStruct) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err string) error {
- return c.Internal.ReturnAddPiece(ctx, callID, pi, err)
-}
-
-func (c *StorageMinerStruct) ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err string) error {
- return c.Internal.ReturnSealPreCommit1(ctx, callID, p1o, err)
-}
-
-func (c *StorageMinerStruct) ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err string) error {
- return c.Internal.ReturnSealPreCommit2(ctx, callID, sealed, err)
-}
-
-func (c *StorageMinerStruct) ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err string) error {
- return c.Internal.ReturnSealCommit1(ctx, callID, out, err)
-}
-
-func (c *StorageMinerStruct) ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err string) error {
- return c.Internal.ReturnSealCommit2(ctx, callID, proof, err)
-}
-
-func (c *StorageMinerStruct) ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err string) error {
- return c.Internal.ReturnFinalizeSector(ctx, callID, err)
-}
-
-func (c *StorageMinerStruct) ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err string) error {
- return c.Internal.ReturnReleaseUnsealed(ctx, callID, err)
-}
-
-func (c *StorageMinerStruct) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err string) error {
- return c.Internal.ReturnMoveStorage(ctx, callID, err)
-}
-
-func (c *StorageMinerStruct) ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err string) error {
- return c.Internal.ReturnUnsealPiece(ctx, callID, err)
-}
-
-func (c *StorageMinerStruct) ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err string) error {
- return c.Internal.ReturnReadPiece(ctx, callID, ok, err)
-}
-
-func (c *StorageMinerStruct) ReturnFetch(ctx context.Context, callID storiface.CallID, err string) error {
- return c.Internal.ReturnFetch(ctx, callID, err)
-}
-
-func (c *StorageMinerStruct) SealingSchedDiag(ctx context.Context) (interface{}, error) {
- return c.Internal.SealingSchedDiag(ctx)
-}
-
-func (c *StorageMinerStruct) StorageAttach(ctx context.Context, si stores.StorageInfo, st fsutil.FsStat) error {
- return c.Internal.StorageAttach(ctx, si, st)
-}
-
-func (c *StorageMinerStruct) StorageDeclareSector(ctx context.Context, storageId stores.ID, s abi.SectorID, ft storiface.SectorFileType, primary bool) error {
- return c.Internal.StorageDeclareSector(ctx, storageId, s, ft, primary)
-}
-
-func (c *StorageMinerStruct) StorageDropSector(ctx context.Context, storageId stores.ID, s abi.SectorID, ft storiface.SectorFileType) error {
- return c.Internal.StorageDropSector(ctx, storageId, s, ft)
-}
-
-func (c *StorageMinerStruct) StorageFindSector(ctx context.Context, si abi.SectorID, types storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]stores.SectorStorageInfo, error) {
- return c.Internal.StorageFindSector(ctx, si, types, ssize, allowFetch)
-}
-
-func (c *StorageMinerStruct) StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) {
- return c.Internal.StorageList(ctx)
-}
-
-func (c *StorageMinerStruct) StorageLocal(ctx context.Context) (map[stores.ID]string, error) {
- return c.Internal.StorageLocal(ctx)
-}
-
-func (c *StorageMinerStruct) StorageStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) {
- return c.Internal.StorageStat(ctx, id)
-}
-
-func (c *StorageMinerStruct) StorageInfo(ctx context.Context, id stores.ID) (stores.StorageInfo, error) {
- return c.Internal.StorageInfo(ctx, id)
-}
-
-func (c *StorageMinerStruct) StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pt storiface.PathType) ([]stores.StorageInfo, error) {
- return c.Internal.StorageBestAlloc(ctx, allocate, ssize, pt)
-}
-
-func (c *StorageMinerStruct) StorageReportHealth(ctx context.Context, id stores.ID, report stores.HealthReport) error {
- return c.Internal.StorageReportHealth(ctx, id, report)
-}
-
-func (c *StorageMinerStruct) StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error {
- return c.Internal.StorageLock(ctx, sector, read, write)
-}
-
-func (c *StorageMinerStruct) StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) {
- return c.Internal.StorageTryLock(ctx, sector, read, write)
-}
-
-func (c *StorageMinerStruct) MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error {
- return c.Internal.MarketImportDealData(ctx, propcid, path)
-}
-
-func (c *StorageMinerStruct) MarketListDeals(ctx context.Context) ([]api.MarketDeal, error) {
- return c.Internal.MarketListDeals(ctx)
-}
-
-func (c *StorageMinerStruct) MarketListRetrievalDeals(ctx context.Context) ([]retrievalmarket.ProviderDealState, error) {
- return c.Internal.MarketListRetrievalDeals(ctx)
-}
-
-func (c *StorageMinerStruct) MarketGetDealUpdates(ctx context.Context) (<-chan storagemarket.MinerDeal, error) {
- return c.Internal.MarketGetDealUpdates(ctx)
-}
-
-func (c *StorageMinerStruct) MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error) {
- return c.Internal.MarketListIncompleteDeals(ctx)
-}
-
-func (c *StorageMinerStruct) MarketSetAsk(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error {
- return c.Internal.MarketSetAsk(ctx, price, verifiedPrice, duration, minPieceSize, maxPieceSize)
-}
-
-func (c *StorageMinerStruct) MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error) {
- return c.Internal.MarketGetAsk(ctx)
-}
-
-func (c *StorageMinerStruct) MarketSetRetrievalAsk(ctx context.Context, rask *retrievalmarket.Ask) error {
- return c.Internal.MarketSetRetrievalAsk(ctx, rask)
-}
-
-func (c *StorageMinerStruct) MarketGetRetrievalAsk(ctx context.Context) (*retrievalmarket.Ask, error) {
- return c.Internal.MarketGetRetrievalAsk(ctx)
-}
-
-func (c *StorageMinerStruct) MarketListDataTransfers(ctx context.Context) ([]api.DataTransferChannel, error) {
- return c.Internal.MarketListDataTransfers(ctx)
-}
-
-func (c *StorageMinerStruct) MarketDataTransferUpdates(ctx context.Context) (<-chan api.DataTransferChannel, error) {
- return c.Internal.MarketDataTransferUpdates(ctx)
-}
-
-func (c *StorageMinerStruct) MarketRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error {
- return c.Internal.MarketRestartDataTransfer(ctx, transferID, otherPeer, isInitiator)
-}
-
-func (c *StorageMinerStruct) MarketCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error {
- return c.Internal.MarketCancelDataTransfer(ctx, transferID, otherPeer, isInitiator)
-}
-
-func (c *StorageMinerStruct) DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error {
- return c.Internal.DealsImportData(ctx, dealPropCid, file)
-}
-
-func (c *StorageMinerStruct) DealsList(ctx context.Context) ([]api.MarketDeal, error) {
- return c.Internal.DealsList(ctx)
-}
-
-func (c *StorageMinerStruct) DealsConsiderOnlineStorageDeals(ctx context.Context) (bool, error) {
- return c.Internal.DealsConsiderOnlineStorageDeals(ctx)
-}
-
-func (c *StorageMinerStruct) DealsSetConsiderOnlineStorageDeals(ctx context.Context, b bool) error {
- return c.Internal.DealsSetConsiderOnlineStorageDeals(ctx, b)
-}
-
-func (c *StorageMinerStruct) DealsConsiderOnlineRetrievalDeals(ctx context.Context) (bool, error) {
- return c.Internal.DealsConsiderOnlineRetrievalDeals(ctx)
-}
-
-func (c *StorageMinerStruct) DealsSetConsiderOnlineRetrievalDeals(ctx context.Context, b bool) error {
- return c.Internal.DealsSetConsiderOnlineRetrievalDeals(ctx, b)
-}
-
-func (c *StorageMinerStruct) DealsPieceCidBlocklist(ctx context.Context) ([]cid.Cid, error) {
- return c.Internal.DealsPieceCidBlocklist(ctx)
-}
-
-func (c *StorageMinerStruct) DealsSetPieceCidBlocklist(ctx context.Context, cids []cid.Cid) error {
- return c.Internal.DealsSetPieceCidBlocklist(ctx, cids)
-}
-
-func (c *StorageMinerStruct) DealsConsiderOfflineStorageDeals(ctx context.Context) (bool, error) {
- return c.Internal.DealsConsiderOfflineStorageDeals(ctx)
-}
-
-func (c *StorageMinerStruct) DealsSetConsiderOfflineStorageDeals(ctx context.Context, b bool) error {
- return c.Internal.DealsSetConsiderOfflineStorageDeals(ctx, b)
-}
-
-func (c *StorageMinerStruct) DealsConsiderOfflineRetrievalDeals(ctx context.Context) (bool, error) {
- return c.Internal.DealsConsiderOfflineRetrievalDeals(ctx)
-}
-
-func (c *StorageMinerStruct) DealsSetConsiderOfflineRetrievalDeals(ctx context.Context, b bool) error {
- return c.Internal.DealsSetConsiderOfflineRetrievalDeals(ctx, b)
-}
-
-func (c *StorageMinerStruct) StorageAddLocal(ctx context.Context, path string) error {
- return c.Internal.StorageAddLocal(ctx, path)
-}
-
-func (c *StorageMinerStruct) PiecesListPieces(ctx context.Context) ([]cid.Cid, error) {
- return c.Internal.PiecesListPieces(ctx)
-}
-
-func (c *StorageMinerStruct) PiecesListCidInfos(ctx context.Context) ([]cid.Cid, error) {
- return c.Internal.PiecesListCidInfos(ctx)
-}
-
-func (c *StorageMinerStruct) PiecesGetPieceInfo(ctx context.Context, pieceCid cid.Cid) (*piecestore.PieceInfo, error) {
- return c.Internal.PiecesGetPieceInfo(ctx, pieceCid)
-}
-
-func (c *StorageMinerStruct) PiecesGetCIDInfo(ctx context.Context, payloadCid cid.Cid) (*piecestore.CIDInfo, error) {
- return c.Internal.PiecesGetCIDInfo(ctx, payloadCid)
-}
-
-func (c *StorageMinerStruct) CreateBackup(ctx context.Context, fpath string) error {
- return c.Internal.CreateBackup(ctx, fpath)
-}
-
-// WorkerStruct
-
-func (w *WorkerStruct) Version(ctx context.Context) (build.Version, error) {
- return w.Internal.Version(ctx)
-}
-
-func (w *WorkerStruct) TaskTypes(ctx context.Context) (map[sealtasks.TaskType]struct{}, error) {
- return w.Internal.TaskTypes(ctx)
-}
-
-func (w *WorkerStruct) Paths(ctx context.Context) ([]stores.StoragePath, error) {
- return w.Internal.Paths(ctx)
-}
-
-func (w *WorkerStruct) Info(ctx context.Context) (storiface.WorkerInfo, error) {
- return w.Internal.Info(ctx)
-}
-
-func (w *WorkerStruct) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) {
- return w.Internal.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData)
-}
-
-func (w *WorkerStruct) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) {
- return w.Internal.SealPreCommit1(ctx, sector, ticket, pieces)
-}
-
-func (w *WorkerStruct) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storiface.CallID, error) {
- return w.Internal.SealPreCommit2(ctx, sector, pc1o)
-}
-
-func (w *WorkerStruct) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) {
- return w.Internal.SealCommit1(ctx, sector, ticket, seed, pieces, cids)
-}
-
-func (w *WorkerStruct) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storiface.CallID, error) {
- return w.Internal.SealCommit2(ctx, sector, c1o)
-}
-
-func (w *WorkerStruct) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (storiface.CallID, error) {
- return w.Internal.FinalizeSector(ctx, sector, keepUnsealed)
-}
-
-func (w *WorkerStruct) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) (storiface.CallID, error) {
- return w.Internal.ReleaseUnsealed(ctx, sector, safeToFree)
-}
-
-func (w *WorkerStruct) MoveStorage(ctx context.Context, sector abi.SectorID, types storiface.SectorFileType) (storiface.CallID, error) {
- return w.Internal.MoveStorage(ctx, sector, types)
-}
-
-func (w *WorkerStruct) UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, c cid.Cid) (storiface.CallID, error) {
- return w.Internal.UnsealPiece(ctx, sector, offset, size, ticket, c)
-}
-
-func (w *WorkerStruct) ReadPiece(ctx context.Context, sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) {
- return w.Internal.ReadPiece(ctx, sink, sector, offset, size)
-}
-
-func (w *WorkerStruct) Fetch(ctx context.Context, id abi.SectorID, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) {
- return w.Internal.Fetch(ctx, id, fileType, ptype, am)
-}
-
-func (w *WorkerStruct) Remove(ctx context.Context, sector abi.SectorID) error {
- return w.Internal.Remove(ctx, sector)
-}
-
-func (w *WorkerStruct) StorageAddLocal(ctx context.Context, path string) error {
- return w.Internal.StorageAddLocal(ctx, path)
-}
-
-func (w *WorkerStruct) Session(ctx context.Context) (uuid.UUID, error) {
- return w.Internal.Session(ctx)
-}
-
-func (g GatewayStruct) ChainGetBlockMessages(ctx context.Context, c cid.Cid) (*api.BlockMessages, error) {
- return g.Internal.ChainGetBlockMessages(ctx, c)
-}
-
-func (g GatewayStruct) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) {
- return g.Internal.ChainGetMessage(ctx, mc)
-}
-
-func (g GatewayStruct) ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) {
- return g.Internal.ChainGetTipSet(ctx, tsk)
-}
-
-func (g GatewayStruct) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) {
- return g.Internal.ChainGetTipSetByHeight(ctx, h, tsk)
-}
-
-func (g GatewayStruct) ChainHasObj(ctx context.Context, c cid.Cid) (bool, error) {
- return g.Internal.ChainHasObj(ctx, c)
-}
-
-func (g GatewayStruct) ChainHead(ctx context.Context) (*types.TipSet, error) {
- return g.Internal.ChainHead(ctx)
-}
-
-func (g GatewayStruct) ChainNotify(ctx context.Context) (<-chan []*api.HeadChange, error) {
- return g.Internal.ChainNotify(ctx)
-}
-
-func (g GatewayStruct) ChainReadObj(ctx context.Context, c cid.Cid) ([]byte, error) {
- return g.Internal.ChainReadObj(ctx, c)
-}
-
-func (g GatewayStruct) GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) {
- return g.Internal.GasEstimateMessageGas(ctx, msg, spec, tsk)
-}
-
-func (g GatewayStruct) MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) {
- return g.Internal.MpoolPush(ctx, sm)
-}
-
-func (g GatewayStruct) MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) {
- return g.Internal.MsigGetAvailableBalance(ctx, addr, tsk)
-}
-
-func (g GatewayStruct) MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) {
- return g.Internal.MsigGetVested(ctx, addr, start, end)
-}
-
-func (g GatewayStruct) StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) {
- return g.Internal.StateAccountKey(ctx, addr, tsk)
-}
-
-func (g GatewayStruct) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) {
- return g.Internal.StateDealProviderCollateralBounds(ctx, size, verified, tsk)
-}
-
-func (g GatewayStruct) StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) {
- return g.Internal.StateGetActor(ctx, actor, ts)
-}
-
-func (g GatewayStruct) StateGetReceipt(ctx context.Context, c cid.Cid, tsk types.TipSetKey) (*types.MessageReceipt, error) {
- return g.Internal.StateGetReceipt(ctx, c, tsk)
-}
-
-func (g GatewayStruct) StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) {
- return g.Internal.StateLookupID(ctx, addr, tsk)
-}
-
-func (g GatewayStruct) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) {
- return g.Internal.StateListMiners(ctx, tsk)
-}
-
-func (g GatewayStruct) StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) {
- return g.Internal.StateMarketBalance(ctx, addr, tsk)
-}
-
-func (g GatewayStruct) StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) {
- return g.Internal.StateMarketStorageDeal(ctx, dealId, tsk)
-}
-
-func (g GatewayStruct) StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) {
- return g.Internal.StateMinerInfo(ctx, actor, tsk)
-}
-
-func (g GatewayStruct) StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error) {
- return g.Internal.StateMinerProvingDeadline(ctx, addr, tsk)
-}
-
-func (g GatewayStruct) StateMinerPower(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*api.MinerPower, error) {
- return g.Internal.StateMinerPower(ctx, addr, tsk)
-}
-
-func (g GatewayStruct) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (stnetwork.Version, error) {
- return g.Internal.StateNetworkVersion(ctx, tsk)
-}
-
-func (g GatewayStruct) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) {
- return g.Internal.StateVerifiedClientStatus(ctx, addr, tsk)
-}
-
-func (g GatewayStruct) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error) {
- return g.Internal.StateWaitMsg(ctx, msg, confidence)
-}
-
-func (c *WalletStruct) WalletNew(ctx context.Context, typ types.KeyType) (address.Address, error) {
- return c.Internal.WalletNew(ctx, typ)
-}
-
-func (c *WalletStruct) WalletHas(ctx context.Context, addr address.Address) (bool, error) {
- return c.Internal.WalletHas(ctx, addr)
-}
-
-func (c *WalletStruct) WalletList(ctx context.Context) ([]address.Address, error) {
- return c.Internal.WalletList(ctx)
-}
-
-func (c *WalletStruct) WalletSign(ctx context.Context, k address.Address, msg []byte, meta api.MsgMeta) (*crypto.Signature, error) {
- return c.Internal.WalletSign(ctx, k, msg, meta)
-}
-
-func (c *WalletStruct) WalletExport(ctx context.Context, a address.Address) (*types.KeyInfo, error) {
- return c.Internal.WalletExport(ctx, a)
-}
-
-func (c *WalletStruct) WalletImport(ctx context.Context, ki *types.KeyInfo) (address.Address, error) {
- return c.Internal.WalletImport(ctx, ki)
-}
-
-func (c *WalletStruct) WalletDelete(ctx context.Context, addr address.Address) error {
- return c.Internal.WalletDelete(ctx, addr)
-}
-
-var _ api.Common = &CommonStruct{}
-var _ api.FullNode = &FullNodeStruct{}
-var _ api.StorageMiner = &StorageMinerStruct{}
-var _ api.WorkerAPI = &WorkerStruct{}
-var _ api.GatewayAPI = &GatewayStruct{}
-var _ api.WalletAPI = &WalletStruct{}
diff --git a/api/apistruct/struct_test.go b/api/apistruct/struct_test.go
deleted file mode 100644
index 9f5f5836015..00000000000
--- a/api/apistruct/struct_test.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package apistruct
-
-import "testing"
-
-func TestPermTags(t *testing.T) {
- _ = PermissionedFullAPI(&FullNodeStruct{})
- _ = PermissionedStorMinerAPI(&StorageMinerStruct{})
- _ = PermissionedWorkerAPI(&WorkerStruct{})
-}
diff --git a/api/cbor_gen.go b/api/cbor_gen.go
index 7ab575b287d..4434b45ede9 100644
--- a/api/cbor_gen.go
+++ b/api/cbor_gen.go
@@ -5,14 +5,19 @@ package api
import (
"fmt"
"io"
+ "sort"
abi "github.com/filecoin-project/go-state-types/abi"
+ market "github.com/filecoin-project/specs-actors/actors/builtin/market"
paych "github.com/filecoin-project/specs-actors/actors/builtin/paych"
+ cid "github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
xerrors "golang.org/x/xerrors"
)
var _ = xerrors.Errorf
+var _ = cid.Undef
+var _ = sort.Sort
func (t *PaymentInfo) MarshalCBOR(w io.Writer) error {
if t == nil {
@@ -171,7 +176,8 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) error {
}
default:
- return fmt.Errorf("unknown struct field %d: '%s'", i, name)
+ // Field doesn't exist on this type, so ignore it
+ cbg.ScanForLinks(r, func(cid.Cid) {})
}
}
@@ -319,7 +325,8 @@ func (t *SealedRef) UnmarshalCBOR(r io.Reader) error {
}
default:
- return fmt.Errorf("unknown struct field %d: '%s'", i, name)
+ // Field doesn't exist on this type, so ignore it
+ cbg.ScanForLinks(r, func(cid.Cid) {})
}
}
@@ -427,7 +434,8 @@ func (t *SealedRefs) UnmarshalCBOR(r io.Reader) error {
}
default:
- return fmt.Errorf("unknown struct field %d: '%s'", i, name)
+ // Field doesn't exist on this type, so ignore it
+ cbg.ScanForLinks(r, func(cid.Cid) {})
}
}
@@ -575,7 +583,8 @@ func (t *SealTicket) UnmarshalCBOR(r io.Reader) error {
}
default:
- return fmt.Errorf("unknown struct field %d: '%s'", i, name)
+ // Field doesn't exist on this type, so ignore it
+ cbg.ScanForLinks(r, func(cid.Cid) {})
}
}
@@ -723,7 +732,386 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) error {
}
default:
- return fmt.Errorf("unknown struct field %d: '%s'", i, name)
+ // Field doesn't exist on this type, so ignore it
+ cbg.ScanForLinks(r, func(cid.Cid) {})
+ }
+ }
+
+ return nil
+}
+func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error {
+ if t == nil {
+ _, err := w.Write(cbg.CborNull)
+ return err
+ }
+ if _, err := w.Write([]byte{165}); err != nil {
+ return err
+ }
+
+ scratch := make([]byte, 9)
+
+ // t.PublishCid (cid.Cid) (struct)
+ if len("PublishCid") > cbg.MaxLength {
+ return xerrors.Errorf("Value in field \"PublishCid\" was too long")
+ }
+
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PublishCid"))); err != nil {
+ return err
+ }
+ if _, err := io.WriteString(w, string("PublishCid")); err != nil {
+ return err
+ }
+
+ if t.PublishCid == nil {
+ if _, err := w.Write(cbg.CborNull); err != nil {
+ return err
+ }
+ } else {
+ if err := cbg.WriteCidBuf(scratch, w, *t.PublishCid); err != nil {
+ return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err)
+ }
+ }
+
+ // t.DealID (abi.DealID) (uint64)
+ if len("DealID") > cbg.MaxLength {
+ return xerrors.Errorf("Value in field \"DealID\" was too long")
+ }
+
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealID"))); err != nil {
+ return err
+ }
+ if _, err := io.WriteString(w, string("DealID")); err != nil {
+ return err
+ }
+
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.DealID)); err != nil {
+ return err
+ }
+
+ // t.DealProposal (market.DealProposal) (struct)
+ if len("DealProposal") > cbg.MaxLength {
+ return xerrors.Errorf("Value in field \"DealProposal\" was too long")
+ }
+
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealProposal"))); err != nil {
+ return err
+ }
+ if _, err := io.WriteString(w, string("DealProposal")); err != nil {
+ return err
+ }
+
+ if err := t.DealProposal.MarshalCBOR(w); err != nil {
+ return err
+ }
+
+ // t.DealSchedule (api.DealSchedule) (struct)
+ if len("DealSchedule") > cbg.MaxLength {
+ return xerrors.Errorf("Value in field \"DealSchedule\" was too long")
+ }
+
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealSchedule"))); err != nil {
+ return err
+ }
+ if _, err := io.WriteString(w, string("DealSchedule")); err != nil {
+ return err
+ }
+
+ if err := t.DealSchedule.MarshalCBOR(w); err != nil {
+ return err
+ }
+
+ // t.KeepUnsealed (bool) (bool)
+ if len("KeepUnsealed") > cbg.MaxLength {
+ return xerrors.Errorf("Value in field \"KeepUnsealed\" was too long")
+ }
+
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("KeepUnsealed"))); err != nil {
+ return err
+ }
+ if _, err := io.WriteString(w, string("KeepUnsealed")); err != nil {
+ return err
+ }
+
+ if err := cbg.WriteBool(w, t.KeepUnsealed); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (t *PieceDealInfo) UnmarshalCBOR(r io.Reader) error {
+ *t = PieceDealInfo{}
+
+ br := cbg.GetPeeker(r)
+ scratch := make([]byte, 8)
+
+ maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajMap {
+ return fmt.Errorf("cbor input should be of type map")
+ }
+
+ if extra > cbg.MaxLength {
+ return fmt.Errorf("PieceDealInfo: map struct too large (%d)", extra)
+ }
+
+ var name string
+ n := extra
+
+ for i := uint64(0); i < n; i++ {
+
+ {
+ sval, err := cbg.ReadStringBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+
+ name = string(sval)
+ }
+
+ switch name {
+ // t.PublishCid (cid.Cid) (struct)
+ case "PublishCid":
+
+ {
+
+ b, err := br.ReadByte()
+ if err != nil {
+ return err
+ }
+ if b != cbg.CborNull[0] {
+ if err := br.UnreadByte(); err != nil {
+ return err
+ }
+
+ c, err := cbg.ReadCid(br)
+ if err != nil {
+ return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err)
+ }
+
+ t.PublishCid = &c
+ }
+
+ }
+ // t.DealID (abi.DealID) (uint64)
+ case "DealID":
+
+ {
+
+ maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajUnsignedInt {
+ return fmt.Errorf("wrong type for uint64 field")
+ }
+ t.DealID = abi.DealID(extra)
+
+ }
+ // t.DealProposal (market.DealProposal) (struct)
+ case "DealProposal":
+
+ {
+
+ b, err := br.ReadByte()
+ if err != nil {
+ return err
+ }
+ if b != cbg.CborNull[0] {
+ if err := br.UnreadByte(); err != nil {
+ return err
+ }
+ t.DealProposal = new(market.DealProposal)
+ if err := t.DealProposal.UnmarshalCBOR(br); err != nil {
+ return xerrors.Errorf("unmarshaling t.DealProposal pointer: %w", err)
+ }
+ }
+
+ }
+ // t.DealSchedule (api.DealSchedule) (struct)
+ case "DealSchedule":
+
+ {
+
+ if err := t.DealSchedule.UnmarshalCBOR(br); err != nil {
+ return xerrors.Errorf("unmarshaling t.DealSchedule: %w", err)
+ }
+
+ }
+ // t.KeepUnsealed (bool) (bool)
+ case "KeepUnsealed":
+
+ maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajOther {
+ return fmt.Errorf("booleans must be major type 7")
+ }
+ switch extra {
+ case 20:
+ t.KeepUnsealed = false
+ case 21:
+ t.KeepUnsealed = true
+ default:
+ return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra)
+ }
+
+ default:
+ // Field doesn't exist on this type, so ignore it
+ cbg.ScanForLinks(r, func(cid.Cid) {})
+ }
+ }
+
+ return nil
+}
+func (t *DealSchedule) MarshalCBOR(w io.Writer) error {
+ if t == nil {
+ _, err := w.Write(cbg.CborNull)
+ return err
+ }
+ if _, err := w.Write([]byte{162}); err != nil {
+ return err
+ }
+
+ scratch := make([]byte, 9)
+
+ // t.StartEpoch (abi.ChainEpoch) (int64)
+ if len("StartEpoch") > cbg.MaxLength {
+ return xerrors.Errorf("Value in field \"StartEpoch\" was too long")
+ }
+
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("StartEpoch"))); err != nil {
+ return err
+ }
+ if _, err := io.WriteString(w, string("StartEpoch")); err != nil {
+ return err
+ }
+
+ if t.StartEpoch >= 0 {
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil {
+ return err
+ }
+ } else {
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil {
+ return err
+ }
+ }
+
+ // t.EndEpoch (abi.ChainEpoch) (int64)
+ if len("EndEpoch") > cbg.MaxLength {
+ return xerrors.Errorf("Value in field \"EndEpoch\" was too long")
+ }
+
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("EndEpoch"))); err != nil {
+ return err
+ }
+ if _, err := io.WriteString(w, string("EndEpoch")); err != nil {
+ return err
+ }
+
+ if t.EndEpoch >= 0 {
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.EndEpoch)); err != nil {
+ return err
+ }
+ } else {
+ if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.EndEpoch-1)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (t *DealSchedule) UnmarshalCBOR(r io.Reader) error {
+ *t = DealSchedule{}
+
+ br := cbg.GetPeeker(r)
+ scratch := make([]byte, 8)
+
+ maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajMap {
+ return fmt.Errorf("cbor input should be of type map")
+ }
+
+ if extra > cbg.MaxLength {
+ return fmt.Errorf("DealSchedule: map struct too large (%d)", extra)
+ }
+
+ var name string
+ n := extra
+
+ for i := uint64(0); i < n; i++ {
+
+ {
+ sval, err := cbg.ReadStringBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+
+ name = string(sval)
+ }
+
+ switch name {
+ // t.StartEpoch (abi.ChainEpoch) (int64)
+ case "StartEpoch":
+ {
+ maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
+ var extraI int64
+ if err != nil {
+ return err
+ }
+ switch maj {
+ case cbg.MajUnsignedInt:
+ extraI = int64(extra)
+ if extraI < 0 {
+ return fmt.Errorf("int64 positive overflow")
+ }
+ case cbg.MajNegativeInt:
+ extraI = int64(extra)
+ if extraI < 0 {
+ return fmt.Errorf("int64 negative oveflow")
+ }
+ extraI = -1 - extraI
+ default:
+ return fmt.Errorf("wrong type for int64 field: %d", maj)
+ }
+
+ t.StartEpoch = abi.ChainEpoch(extraI)
+ }
+ // t.EndEpoch (abi.ChainEpoch) (int64)
+ case "EndEpoch":
+ {
+ maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
+ var extraI int64
+ if err != nil {
+ return err
+ }
+ switch maj {
+ case cbg.MajUnsignedInt:
+ extraI = int64(extra)
+ if extraI < 0 {
+ return fmt.Errorf("int64 positive overflow")
+ }
+ case cbg.MajNegativeInt:
+ extraI = int64(extra)
+ if extraI < 0 {
+ return fmt.Errorf("int64 negative oveflow")
+ }
+ extraI = -1 - extraI
+ default:
+ return fmt.Errorf("wrong type for int64 field: %d", maj)
+ }
+
+ t.EndEpoch = abi.ChainEpoch(extraI)
+ }
+
+ default:
+ // Field doesn't exist on this type, so ignore it
+ cbg.ScanForLinks(r, func(cid.Cid) {})
}
}
diff --git a/api/checkstatuscode_string.go b/api/checkstatuscode_string.go
new file mode 100644
index 00000000000..072f7798975
--- /dev/null
+++ b/api/checkstatuscode_string.go
@@ -0,0 +1,35 @@
+// Code generated by "stringer -type=CheckStatusCode -trimprefix=CheckStatus"; DO NOT EDIT.
+
+package api
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[CheckStatusMessageSerialize-1]
+ _ = x[CheckStatusMessageSize-2]
+ _ = x[CheckStatusMessageValidity-3]
+ _ = x[CheckStatusMessageMinGas-4]
+ _ = x[CheckStatusMessageMinBaseFee-5]
+ _ = x[CheckStatusMessageBaseFee-6]
+ _ = x[CheckStatusMessageBaseFeeLowerBound-7]
+ _ = x[CheckStatusMessageBaseFeeUpperBound-8]
+ _ = x[CheckStatusMessageGetStateNonce-9]
+ _ = x[CheckStatusMessageNonce-10]
+ _ = x[CheckStatusMessageGetStateBalance-11]
+ _ = x[CheckStatusMessageBalance-12]
+}
+
+const _CheckStatusCode_name = "MessageSerializeMessageSizeMessageValidityMessageMinGasMessageMinBaseFeeMessageBaseFeeMessageBaseFeeLowerBoundMessageBaseFeeUpperBoundMessageGetStateNonceMessageNonceMessageGetStateBalanceMessageBalance"
+
+var _CheckStatusCode_index = [...]uint8{0, 16, 27, 42, 55, 72, 86, 110, 134, 154, 166, 188, 202}
+
+func (i CheckStatusCode) String() string {
+ i -= 1
+ if i < 0 || i >= CheckStatusCode(len(_CheckStatusCode_index)-1) {
+ return "CheckStatusCode(" + strconv.FormatInt(int64(i+1), 10) + ")"
+ }
+ return _CheckStatusCode_name[_CheckStatusCode_index[i]:_CheckStatusCode_index[i+1]]
+}
diff --git a/api/client/client.go b/api/client/client.go
index 7d8a466d333..669c58f278b 100644
--- a/api/client/client.go
+++ b/api/client/client.go
@@ -10,72 +10,84 @@ import (
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/api/apistruct"
+ "github.com/filecoin-project/lotus/api/v0api"
+ "github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/lib/rpcenc"
)
-// NewCommonRPC creates a new http jsonrpc client.
-func NewCommonRPC(ctx context.Context, addr string, requestHeader http.Header) (api.Common, jsonrpc.ClientCloser, error) {
- var res apistruct.CommonStruct
+// NewCommonRPCV0 creates a new http jsonrpc client.
+func NewCommonRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.CommonNet, jsonrpc.ClientCloser, error) {
+ var res v0api.CommonNetStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
- []interface{}{
- &res.Internal,
- },
- requestHeader,
- )
+ api.GetInternalStructs(&res), requestHeader)
return &res, closer, err
}
-// NewFullNodeRPC creates a new http jsonrpc client.
-func NewFullNodeRPC(ctx context.Context, addr string, requestHeader http.Header) (api.FullNode, jsonrpc.ClientCloser, error) {
- var res apistruct.FullNodeStruct
+// NewFullNodeRPCV0 creates a new http jsonrpc client.
+func NewFullNodeRPCV0(ctx context.Context, addr string, requestHeader http.Header) (v0api.FullNode, jsonrpc.ClientCloser, error) {
+ var res v0api.FullNodeStruct
+
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
- []interface{}{
- &res.CommonStruct.Internal,
- &res.Internal,
- }, requestHeader)
+ api.GetInternalStructs(&res), requestHeader)
return &res, closer, err
}
-// NewStorageMinerRPC creates a new http jsonrpc client for miner
-func NewStorageMinerRPC(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (api.StorageMiner, jsonrpc.ClientCloser, error) {
- var res apistruct.StorageMinerStruct
+// NewFullNodeRPCV1 creates a new http jsonrpc client.
+func NewFullNodeRPCV1(ctx context.Context, addr string, requestHeader http.Header) (api.FullNode, jsonrpc.ClientCloser, error) {
+ var res v1api.FullNodeStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
- []interface{}{
- &res.CommonStruct.Internal,
- &res.Internal,
- },
- requestHeader,
- opts...,
- )
+ api.GetInternalStructs(&res), requestHeader)
return &res, closer, err
}
-func NewWorkerRPC(ctx context.Context, addr string, requestHeader http.Header) (api.WorkerAPI, jsonrpc.ClientCloser, error) {
- u, err := url.Parse(addr)
+func getPushUrl(addr string) (string, error) {
+ pushUrl, err := url.Parse(addr)
if err != nil {
- return nil, nil, err
+ return "", err
}
- switch u.Scheme {
+ switch pushUrl.Scheme {
case "ws":
- u.Scheme = "http"
+ pushUrl.Scheme = "http"
case "wss":
- u.Scheme = "https"
+ pushUrl.Scheme = "https"
}
///rpc/v0 -> /rpc/streams/v0/push
- u.Path = path.Join(u.Path, "../streams/v0/push")
+ pushUrl.Path = path.Join(pushUrl.Path, "../streams/v0/push")
+ return pushUrl.String(), nil
+}
- var res apistruct.WorkerStruct
+// NewStorageMinerRPCV0 creates a new http jsonrpc client for miner
+func NewStorageMinerRPCV0(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (v0api.StorageMiner, jsonrpc.ClientCloser, error) {
+ pushUrl, err := getPushUrl(addr)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var res v0api.StorageMinerStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
- []interface{}{
- &res.Internal,
- },
+ api.GetInternalStructs(&res), requestHeader,
+ append([]jsonrpc.Option{
+ rpcenc.ReaderParamEncoder(pushUrl),
+ }, opts...)...)
+
+ return &res, closer, err
+}
+
+func NewWorkerRPCV0(ctx context.Context, addr string, requestHeader http.Header) (v0api.Worker, jsonrpc.ClientCloser, error) {
+ pushUrl, err := getPushUrl(addr)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var res api.WorkerStruct
+ closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
+ api.GetInternalStructs(&res),
requestHeader,
- rpcenc.ReaderParamEncoder(u.String()),
+ rpcenc.ReaderParamEncoder(pushUrl),
jsonrpc.WithNoReconnect(),
jsonrpc.WithTimeout(30*time.Second),
)
@@ -83,13 +95,23 @@ func NewWorkerRPC(ctx context.Context, addr string, requestHeader http.Header) (
return &res, closer, err
}
-// NewGatewayRPC creates a new http jsonrpc client for a gateway node.
-func NewGatewayRPC(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (api.GatewayAPI, jsonrpc.ClientCloser, error) {
- var res apistruct.GatewayStruct
+// NewGatewayRPCV1 creates a new http jsonrpc client for a gateway node.
+func NewGatewayRPCV1(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (api.Gateway, jsonrpc.ClientCloser, error) {
+ var res api.GatewayStruct
+ closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
+ api.GetInternalStructs(&res),
+ requestHeader,
+ opts...,
+ )
+
+ return &res, closer, err
+}
+
+// NewGatewayRPCV0 creates a new http jsonrpc client for a gateway node.
+func NewGatewayRPCV0(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (v0api.Gateway, jsonrpc.ClientCloser, error) {
+ var res v0api.GatewayStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
- []interface{}{
- &res.Internal,
- },
+ api.GetInternalStructs(&res),
requestHeader,
opts...,
)
@@ -97,12 +119,10 @@ func NewGatewayRPC(ctx context.Context, addr string, requestHeader http.Header,
return &res, closer, err
}
-func NewWalletRPC(ctx context.Context, addr string, requestHeader http.Header) (api.WalletAPI, jsonrpc.ClientCloser, error) {
- var res apistruct.WalletStruct
+func NewWalletRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.Wallet, jsonrpc.ClientCloser, error) {
+ var res api.WalletStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
- []interface{}{
- &res.Internal,
- },
+ api.GetInternalStructs(&res),
requestHeader,
)
diff --git a/api/docgen-openrpc/cmd/docgen_openrpc.go b/api/docgen-openrpc/cmd/docgen_openrpc.go
new file mode 100644
index 00000000000..cc5e9f0cda5
--- /dev/null
+++ b/api/docgen-openrpc/cmd/docgen_openrpc.go
@@ -0,0 +1,74 @@
+package main
+
+import (
+ "compress/gzip"
+ "encoding/json"
+ "io"
+ "log"
+ "os"
+
+ "github.com/filecoin-project/lotus/api/docgen"
+
+ docgen_openrpc "github.com/filecoin-project/lotus/api/docgen-openrpc"
+)
+
+/*
+main defines a small program that writes an OpenRPC document describing
+a Lotus API to stdout.
+
+If the first argument is "miner", the document will describe the StorageMiner API.
+If not (no, or any other args), the document will describe the Full API.
+
+Use:
+
+ go run ./api/openrpc/cmd ["api/api_full.go"|"api/api_storage.go"|"api/api_worker.go"] ["FullNode"|"StorageMiner"|"Worker"]
+
+ With gzip compression: a '-gzip' flag is made available as an optional third argument. Note that position matters.
+
+ go run ./api/openrpc/cmd ["api/api_full.go"|"api/api_storage.go"|"api/api_worker.go"] ["FullNode"|"StorageMiner"|"Worker"] -gzip
+
+*/
+
+func main() {
+ Comments, GroupDocs := docgen.ParseApiASTInfo(os.Args[1], os.Args[2], os.Args[3], os.Args[4])
+
+ doc := docgen_openrpc.NewLotusOpenRPCDocument(Comments, GroupDocs)
+
+ i, _, _ := docgen.GetAPIType(os.Args[2], os.Args[3])
+ doc.RegisterReceiverName("Filecoin", i)
+
+ out, err := doc.Discover()
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ var jsonOut []byte
+ var writer io.WriteCloser
+
+ // Use os.Args to handle a somewhat hacky flag for the gzip option.
+ // Could use flags package to handle this more cleanly, but that requires changes elsewhere
+ // the scope of which just isn't warranted by this one use case which will usually be run
+ // programmatically anyways.
+ if len(os.Args) > 5 && os.Args[5] == "-gzip" {
+ jsonOut, err = json.Marshal(out)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ writer = gzip.NewWriter(os.Stdout)
+ } else {
+ jsonOut, err = json.MarshalIndent(out, "", " ")
+ if err != nil {
+ log.Fatalln(err)
+ }
+ writer = os.Stdout
+ }
+
+ _, err = writer.Write(jsonOut)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ err = writer.Close()
+ if err != nil {
+ log.Fatalln(err)
+ }
+}
diff --git a/api/docgen-openrpc/openrpc.go b/api/docgen-openrpc/openrpc.go
new file mode 100644
index 00000000000..271b43ac607
--- /dev/null
+++ b/api/docgen-openrpc/openrpc.go
@@ -0,0 +1,161 @@
+package docgenopenrpc
+
+import (
+ "encoding/json"
+ "go/ast"
+ "net"
+ "reflect"
+
+ "github.com/alecthomas/jsonschema"
+ go_openrpc_reflect "github.com/etclabscore/go-openrpc-reflect"
+ "github.com/filecoin-project/lotus/api/docgen"
+ "github.com/filecoin-project/lotus/build"
+ "github.com/ipfs/go-cid"
+ meta_schema "github.com/open-rpc/meta-schema"
+)
+
+// schemaDictEntry represents a type association passed to the jsonschema reflector.
+type schemaDictEntry struct {
+ example interface{}
+ rawJson string
+}
+
+const integerD = `{
+ "title": "number",
+ "type": "number",
+ "description": "Number is a number"
+ }`
+
+const cidCidD = `{"title": "Content Identifier", "type": "string", "description": "Cid represents a self-describing content addressed identifier. It is formed by a Version, a Codec (which indicates a multicodec-packed content type) and a Multihash."}`
+
+func OpenRPCSchemaTypeMapper(ty reflect.Type) *jsonschema.Type {
+ unmarshalJSONToJSONSchemaType := func(input string) *jsonschema.Type {
+ var js jsonschema.Type
+ err := json.Unmarshal([]byte(input), &js)
+ if err != nil {
+ panic(err)
+ }
+ return &js
+ }
+
+ if ty.Kind() == reflect.Ptr {
+ ty = ty.Elem()
+ }
+
+ if ty == reflect.TypeOf((*interface{})(nil)).Elem() {
+ return &jsonschema.Type{Type: "object", AdditionalProperties: []byte("true")}
+ }
+
+ // Second, handle other types.
+ // Use a slice instead of a map because it preserves order, as a logic safeguard/fallback.
+ dict := []schemaDictEntry{
+ {cid.Cid{}, cidCidD},
+ }
+
+ for _, d := range dict {
+ if reflect.TypeOf(d.example) == ty {
+ tt := unmarshalJSONToJSONSchemaType(d.rawJson)
+
+ return tt
+ }
+ }
+
+ // Handle primitive types in case there are generic cases
+ // specific to our services.
+ switch ty.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ // Return all integer types as the hex representation integer schemea.
+ ret := unmarshalJSONToJSONSchemaType(integerD)
+ return ret
+ case reflect.Uintptr:
+ return &jsonschema.Type{Type: "number", Title: "uintptr-title"}
+ case reflect.Struct:
+ case reflect.Map:
+ case reflect.Slice, reflect.Array:
+ case reflect.Float32, reflect.Float64:
+ case reflect.Bool:
+ case reflect.String:
+ case reflect.Ptr, reflect.Interface:
+ default:
+ }
+
+ return nil
+}
+
+// NewLotusOpenRPCDocument defines application-specific documentation and configuration for its OpenRPC document.
+func NewLotusOpenRPCDocument(Comments, GroupDocs map[string]string) *go_openrpc_reflect.Document {
+ d := &go_openrpc_reflect.Document{}
+
+ // Register "Meta" document fields.
+ // These include getters for
+ // - Servers object
+ // - Info object
+ // - ExternalDocs object
+ //
+ // These objects represent server-specific data that cannot be
+ // reflected.
+ d.WithMeta(&go_openrpc_reflect.MetaT{
+ GetServersFn: func() func(listeners []net.Listener) (*meta_schema.Servers, error) {
+ return func(listeners []net.Listener) (*meta_schema.Servers, error) {
+ return nil, nil
+ }
+ },
+ GetInfoFn: func() (info *meta_schema.InfoObject) {
+ info = &meta_schema.InfoObject{}
+ title := "Lotus RPC API"
+ info.Title = (*meta_schema.InfoObjectProperties)(&title)
+
+ version := build.BuildVersion
+ info.Version = (*meta_schema.InfoObjectVersion)(&version)
+ return info
+ },
+ GetExternalDocsFn: func() (exdocs *meta_schema.ExternalDocumentationObject) {
+ return nil // FIXME
+ },
+ })
+
+ // Use a provided Ethereum default configuration as a base.
+ appReflector := &go_openrpc_reflect.EthereumReflectorT{}
+
+ // Install overrides for the json schema->type map fn used by the jsonschema reflect package.
+ appReflector.FnSchemaTypeMap = func() func(ty reflect.Type) *jsonschema.Type {
+ return OpenRPCSchemaTypeMapper
+ }
+
+ appReflector.FnIsMethodEligible = func(m reflect.Method) bool {
+ for i := 0; i < m.Func.Type().NumOut(); i++ {
+ if m.Func.Type().Out(i).Kind() == reflect.Chan {
+ return false
+ }
+ }
+ return go_openrpc_reflect.EthereumReflector.IsMethodEligible(m)
+ }
+ appReflector.FnGetMethodName = func(moduleName string, r reflect.Value, m reflect.Method, funcDecl *ast.FuncDecl) (string, error) {
+ if m.Name == "ID" {
+ return moduleName + "_ID", nil
+ }
+ if moduleName == "rpc" && m.Name == "Discover" {
+ return "rpc.discover", nil
+ }
+
+ return moduleName + "." + m.Name, nil
+ }
+
+ appReflector.FnGetMethodSummary = func(r reflect.Value, m reflect.Method, funcDecl *ast.FuncDecl) (string, error) {
+ if v, ok := Comments[m.Name]; ok {
+ return v, nil
+ }
+ return "", nil // noComment
+ }
+
+ appReflector.FnSchemaExamples = func(ty reflect.Type) (examples *meta_schema.Examples, err error) {
+ v := docgen.ExampleValue("unknown", ty, ty) // This isn't ideal, but seems to work well enough.
+ return &meta_schema.Examples{
+ meta_schema.AlwaysTrue(v),
+ }, nil
+ }
+
+ // Finally, register the configured reflector to the document.
+ d.WithReflector(appReflector)
+ return d
+}
diff --git a/api/docgen/cmd/docgen.go b/api/docgen/cmd/docgen.go
new file mode 100644
index 00000000000..9ae2df2e707
--- /dev/null
+++ b/api/docgen/cmd/docgen.go
@@ -0,0 +1,121 @@
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "reflect"
+ "sort"
+ "strings"
+
+ "github.com/filecoin-project/lotus/api/docgen"
+)
+
+func main() {
+ comments, groupComments := docgen.ParseApiASTInfo(os.Args[1], os.Args[2], os.Args[3], os.Args[4])
+
+ groups := make(map[string]*docgen.MethodGroup)
+
+ _, t, permStruct := docgen.GetAPIType(os.Args[2], os.Args[3])
+
+ for i := 0; i < t.NumMethod(); i++ {
+ m := t.Method(i)
+
+ groupName := docgen.MethodGroupFromName(m.Name)
+
+ g, ok := groups[groupName]
+ if !ok {
+ g = new(docgen.MethodGroup)
+ g.Header = groupComments[groupName]
+ g.GroupName = groupName
+ groups[groupName] = g
+ }
+
+ var args []interface{}
+ ft := m.Func.Type()
+ for j := 2; j < ft.NumIn(); j++ {
+ inp := ft.In(j)
+ args = append(args, docgen.ExampleValue(m.Name, inp, nil))
+ }
+
+ v, err := json.MarshalIndent(args, "", " ")
+ if err != nil {
+ panic(err)
+ }
+
+ outv := docgen.ExampleValue(m.Name, ft.Out(0), nil)
+
+ ov, err := json.MarshalIndent(outv, "", " ")
+ if err != nil {
+ panic(err)
+ }
+
+ g.Methods = append(g.Methods, &docgen.Method{
+ Name: m.Name,
+ Comment: comments[m.Name],
+ InputExample: string(v),
+ ResponseExample: string(ov),
+ })
+ }
+
+ var groupslice []*docgen.MethodGroup
+ for _, g := range groups {
+ groupslice = append(groupslice, g)
+ }
+
+ sort.Slice(groupslice, func(i, j int) bool {
+ return groupslice[i].GroupName < groupslice[j].GroupName
+ })
+
+ fmt.Printf("# Groups\n")
+
+ for _, g := range groupslice {
+ fmt.Printf("* [%s](#%s)\n", g.GroupName, g.GroupName)
+ for _, method := range g.Methods {
+ fmt.Printf(" * [%s](#%s)\n", method.Name, method.Name)
+ }
+ }
+
+ for _, g := range groupslice {
+ g := g
+ fmt.Printf("## %s\n", g.GroupName)
+ fmt.Printf("%s\n\n", g.Header)
+
+ sort.Slice(g.Methods, func(i, j int) bool {
+ return g.Methods[i].Name < g.Methods[j].Name
+ })
+
+ for _, m := range g.Methods {
+ fmt.Printf("### %s\n", m.Name)
+ fmt.Printf("%s\n\n", m.Comment)
+
+ var meth reflect.StructField
+ var ok bool
+ for _, ps := range permStruct {
+ meth, ok = ps.FieldByName(m.Name)
+ if ok {
+ break
+ }
+ }
+ if !ok {
+ panic("no perms for method: " + m.Name)
+ }
+
+ perms := meth.Tag.Get("perm")
+
+ fmt.Printf("Perms: %s\n\n", perms)
+
+ if strings.Count(m.InputExample, "\n") > 0 {
+ fmt.Printf("Inputs:\n```json\n%s\n```\n\n", m.InputExample)
+ } else {
+ fmt.Printf("Inputs: `%s`\n\n", m.InputExample)
+ }
+
+ if strings.Count(m.ResponseExample, "\n") > 0 {
+ fmt.Printf("Response:\n```json\n%s\n```\n\n", m.ResponseExample)
+ } else {
+ fmt.Printf("Response: `%s`\n\n", m.ResponseExample)
+ }
+ }
+ }
+}
diff --git a/api/docgen/docgen.go b/api/docgen/docgen.go
index dc60041211f..39980023f0a 100644
--- a/api/docgen/docgen.go
+++ b/api/docgen/docgen.go
@@ -1,17 +1,19 @@
-package main
+package docgen
import (
- "encoding/json"
"fmt"
"go/ast"
"go/parser"
"go/token"
+ "path/filepath"
"reflect"
- "sort"
"strings"
"time"
"unicode"
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-bitfield"
+ "github.com/google/uuid"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-filestore"
metrics "github.com/libp2p/go-libp2p-core/metrics"
@@ -21,9 +23,8 @@ import (
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/multiformats/go-multiaddr"
- "github.com/filecoin-project/go-address"
- "github.com/filecoin-project/go-bitfield"
datatransfer "github.com/filecoin-project/go-data-transfer"
+ filestore2 "github.com/filecoin-project/go-fil-markets/filestore"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/go-multistore"
@@ -33,9 +34,14 @@ import (
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/api/apistruct"
+ apitypes "github.com/filecoin-project/lotus/api/types"
+ "github.com/filecoin-project/lotus/api/v0api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
+ "github.com/filecoin-project/lotus/extern/sector-storage/stores"
+ "github.com/filecoin-project/lotus/extern/sector-storage/storiface"
+ sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
@@ -82,8 +88,10 @@ func init() {
addExample(pid)
addExample(&pid)
+ multistoreIDExample := multistore.StoreID(50)
+
addExample(bitfield.NewFromSet([]uint64{5}))
- addExample(abi.RegisteredSealProof_StackedDrg32GiBV1)
+ addExample(abi.RegisteredSealProof_StackedDrg32GiBV1_1)
addExample(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1)
addExample(abi.ChainEpoch(10101))
addExample(crypto.SigTypeBLS)
@@ -106,28 +114,31 @@ func init() {
addExample(network.Connected)
addExample(dtypes.NetworkName("lotus"))
addExample(api.SyncStateStage(1))
- addExample(build.FullAPIVersion)
+ addExample(api.FullAPIVersion1)
addExample(api.PCHInbound)
addExample(time.Minute)
addExample(datatransfer.TransferID(3))
addExample(datatransfer.Ongoing)
- addExample(multistore.StoreID(50))
+ addExample(multistoreIDExample)
+ addExample(&multistoreIDExample)
addExample(retrievalmarket.ClientEventDealAccepted)
addExample(retrievalmarket.DealStatusNew)
addExample(network.ReachabilityPublic)
addExample(build.NewestNetworkVersion)
+ addExample(map[string]int{"name": 42})
+ addExample(map[string]time.Time{"name": time.Unix(1615243938, 0).UTC()})
addExample(&types.ExecutionTrace{
- Msg: exampleValue(reflect.TypeOf(&types.Message{}), nil).(*types.Message),
- MsgRct: exampleValue(reflect.TypeOf(&types.MessageReceipt{}), nil).(*types.MessageReceipt),
+ Msg: ExampleValue("init", reflect.TypeOf(&types.Message{}), nil).(*types.Message),
+ MsgRct: ExampleValue("init", reflect.TypeOf(&types.MessageReceipt{}), nil).(*types.MessageReceipt),
})
addExample(map[string]types.Actor{
- "t01236": exampleValue(reflect.TypeOf(types.Actor{}), nil).(types.Actor),
+ "t01236": ExampleValue("init", reflect.TypeOf(types.Actor{}), nil).(types.Actor),
})
addExample(map[string]api.MarketDeal{
- "t026363": exampleValue(reflect.TypeOf(api.MarketDeal{}), nil).(api.MarketDeal),
+ "t026363": ExampleValue("init", reflect.TypeOf(api.MarketDeal{}), nil).(api.MarketDeal),
})
addExample(map[string]api.MarketBalance{
- "t026363": exampleValue(reflect.TypeOf(api.MarketBalance{}), nil).(api.MarketBalance),
+ "t026363": ExampleValue("init", reflect.TypeOf(api.MarketBalance{}), nil).(api.MarketBalance),
})
addExample(map[string]*pubsub.TopicScoreSnapshot{
"/blocks": {
@@ -162,9 +173,139 @@ func init() {
// because reflect.TypeOf(maddr) returns the concrete type...
ExampleValues[reflect.TypeOf(struct{ A multiaddr.Multiaddr }{}).Field(0).Type] = maddr
+ // miner specific
+ addExample(filestore2.Path(".lotusminer/fstmp123"))
+ si := multistore.StoreID(12)
+ addExample(&si)
+ addExample(retrievalmarket.DealID(5))
+ addExample(abi.ActorID(1000))
+ addExample(map[string][]api.SealedRef{
+ "98000": {
+ api.SealedRef{
+ SectorID: 100,
+ Offset: 10 << 20,
+ Size: 1 << 20,
+ },
+ },
+ })
+ addExample(api.SectorState(sealing.Proving))
+ addExample(stores.ID("76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8"))
+ addExample(storiface.FTUnsealed)
+ addExample(storiface.PathSealing)
+ addExample(map[stores.ID][]stores.Decl{
+ "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": {
+ {
+ SectorID: abi.SectorID{Miner: 1000, Number: 100},
+ SectorFileType: storiface.FTSealed,
+ },
+ },
+ })
+ addExample(map[stores.ID]string{
+ "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": "/data/path",
+ })
+ addExample(map[uuid.UUID][]storiface.WorkerJob{
+ uuid.MustParse("ef8d99a2-6865-4189-8ffa-9fef0f806eee"): {
+ {
+ ID: storiface.CallID{
+ Sector: abi.SectorID{Miner: 1000, Number: 100},
+ ID: uuid.MustParse("76081ba0-61bd-45a5-bc08-af05f1c26e5d"),
+ },
+ Sector: abi.SectorID{Miner: 1000, Number: 100},
+ Task: sealtasks.TTPreCommit2,
+ RunWait: 0,
+ Start: time.Unix(1605172927, 0).UTC(),
+ Hostname: "host",
+ },
+ },
+ })
+ addExample(map[uuid.UUID]storiface.WorkerStats{
+ uuid.MustParse("ef8d99a2-6865-4189-8ffa-9fef0f806eee"): {
+ Info: storiface.WorkerInfo{
+ Hostname: "host",
+ Resources: storiface.WorkerResources{
+ MemPhysical: 256 << 30,
+ MemSwap: 120 << 30,
+ MemReserved: 2 << 30,
+ CPUs: 64,
+ GPUs: []string{"aGPU 1337"},
+ },
+ },
+ Enabled: true,
+ MemUsedMin: 0,
+ MemUsedMax: 0,
+ GpuUsed: false,
+ CpuUse: 0,
+ },
+ })
+ addExample(storiface.ErrorCode(0))
+ addExample(map[abi.SectorNumber]string{
+ 123: "can't acquire read lock",
+ })
+ addExample(map[api.SectorState]int{
+ api.SectorState(sealing.Proving): 120,
+ })
+ addExample([]abi.SectorNumber{123, 124})
+
+ // worker specific
+ addExample(storiface.AcquireMove)
+ addExample(storiface.UnpaddedByteIndex(abi.PaddedPieceSize(1 << 20).Unpadded()))
+ addExample(map[sealtasks.TaskType]struct{}{
+ sealtasks.TTPreCommit2: {},
+ })
+ addExample(sealtasks.TTCommit2)
+ addExample(apitypes.OpenRPCDocument{
+ "openrpc": "1.2.6",
+ "info": map[string]interface{}{
+ "title": "Lotus RPC API",
+ "version": "1.2.1/generated=2020-11-22T08:22:42-06:00",
+ },
+ "methods": []interface{}{}},
+ )
+
+ addExample(api.CheckStatusCode(0))
+ addExample(map[string]interface{}{"abc": 123})
}
-func exampleValue(t, parent reflect.Type) interface{} {
+func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []reflect.Type) {
+
+ switch pkg {
+ case "api": // latest
+ switch name {
+ case "FullNode":
+ i = &api.FullNodeStruct{}
+ t = reflect.TypeOf(new(struct{ api.FullNode })).Elem()
+ permStruct = append(permStruct, reflect.TypeOf(api.FullNodeStruct{}.Internal))
+ permStruct = append(permStruct, reflect.TypeOf(api.CommonStruct{}.Internal))
+ permStruct = append(permStruct, reflect.TypeOf(api.NetStruct{}.Internal))
+ case "StorageMiner":
+ i = &api.StorageMinerStruct{}
+ t = reflect.TypeOf(new(struct{ api.StorageMiner })).Elem()
+ permStruct = append(permStruct, reflect.TypeOf(api.StorageMinerStruct{}.Internal))
+ permStruct = append(permStruct, reflect.TypeOf(api.CommonStruct{}.Internal))
+ permStruct = append(permStruct, reflect.TypeOf(api.NetStruct{}.Internal))
+ case "Worker":
+ i = &api.WorkerStruct{}
+ t = reflect.TypeOf(new(struct{ api.Worker })).Elem()
+ permStruct = append(permStruct, reflect.TypeOf(api.WorkerStruct{}.Internal))
+ default:
+ panic("unknown type")
+ }
+ case "v0api":
+ switch name {
+ case "FullNode":
+ i = v0api.FullNodeStruct{}
+ t = reflect.TypeOf(new(struct{ v0api.FullNode })).Elem()
+ permStruct = append(permStruct, reflect.TypeOf(v0api.FullNodeStruct{}.Internal))
+ permStruct = append(permStruct, reflect.TypeOf(v0api.CommonStruct{}.Internal))
+ permStruct = append(permStruct, reflect.TypeOf(v0api.NetStruct{}.Internal))
+ default:
+ panic("unknown type")
+ }
+ }
+ return
+}
+
+func ExampleValue(method string, t, parent reflect.Type) interface{} {
v, ok := ExampleValues[t]
if ok {
return v
@@ -173,25 +314,25 @@ func exampleValue(t, parent reflect.Type) interface{} {
switch t.Kind() {
case reflect.Slice:
out := reflect.New(t).Elem()
- reflect.Append(out, reflect.ValueOf(exampleValue(t.Elem(), t)))
+ reflect.Append(out, reflect.ValueOf(ExampleValue(method, t.Elem(), t)))
return out.Interface()
case reflect.Chan:
- return exampleValue(t.Elem(), nil)
+ return ExampleValue(method, t.Elem(), nil)
case reflect.Struct:
- es := exampleStruct(t, parent)
+ es := exampleStruct(method, t, parent)
v := reflect.ValueOf(es).Elem().Interface()
ExampleValues[t] = v
return v
case reflect.Array:
out := reflect.New(t).Elem()
for i := 0; i < t.Len(); i++ {
- out.Index(i).Set(reflect.ValueOf(exampleValue(t.Elem(), t)))
+ out.Index(i).Set(reflect.ValueOf(ExampleValue(method, t.Elem(), t)))
}
return out.Interface()
case reflect.Ptr:
if t.Elem().Kind() == reflect.Struct {
- es := exampleStruct(t.Elem(), t)
+ es := exampleStruct(method, t.Elem(), t)
//ExampleValues[t] = es
return es
}
@@ -199,10 +340,10 @@ func exampleValue(t, parent reflect.Type) interface{} {
return struct{}{}
}
- panic(fmt.Sprintf("No example value for type: %s", t))
+ panic(fmt.Sprintf("No example value for type: %s (method '%s')", t, method))
}
-func exampleStruct(t, parent reflect.Type) interface{} {
+func exampleStruct(method string, t, parent reflect.Type) interface{} {
ns := reflect.New(t)
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
@@ -210,7 +351,7 @@ func exampleStruct(t, parent reflect.Type) interface{} {
continue
}
if strings.Title(f.Name) == f.Name {
- ns.Elem().Field(i).Set(reflect.ValueOf(exampleValue(f.Type, t)))
+ ns.Elem().Field(i).Set(reflect.ValueOf(ExampleValue(method, f.Type, t)))
}
}
@@ -218,6 +359,7 @@ func exampleStruct(t, parent reflect.Type) interface{} {
}
type Visitor struct {
+ Root string
Methods map[string]ast.Node
}
@@ -227,7 +369,7 @@ func (v *Visitor) Visit(node ast.Node) ast.Visitor {
return v
}
- if st.Name.Name != "FullNode" {
+ if st.Name.Name != v.Root {
return nil
}
@@ -241,32 +383,43 @@ func (v *Visitor) Visit(node ast.Node) ast.Visitor {
return v
}
-const noComment = "There are not yet any comments for this method."
+const NoComment = "There are not yet any comments for this method."
-func parseApiASTInfo() (map[string]string, map[string]string) { //nolint:golint
+func ParseApiASTInfo(apiFile, iface, pkg, dir string) (comments map[string]string, groupDocs map[string]string) { //nolint:golint
fset := token.NewFileSet()
- pkgs, err := parser.ParseDir(fset, "./api", nil, parser.AllErrors|parser.ParseComments)
+ apiDir, err := filepath.Abs(dir)
+ if err != nil {
+ fmt.Println("./api filepath absolute error: ", err)
+ return
+ }
+ apiFile, err = filepath.Abs(apiFile)
+ if err != nil {
+ fmt.Println("filepath absolute error: ", err, "file:", apiFile)
+ return
+ }
+ pkgs, err := parser.ParseDir(fset, apiDir, nil, parser.AllErrors|parser.ParseComments)
if err != nil {
fmt.Println("parse error: ", err)
+ return
}
- ap := pkgs["api"]
+ ap := pkgs[pkg]
- f := ap.Files["api/api_full.go"]
+ f := ap.Files[apiFile]
cmap := ast.NewCommentMap(fset, f, f.Comments)
- v := &Visitor{make(map[string]ast.Node)}
- ast.Walk(v, pkgs["api"])
+ v := &Visitor{iface, make(map[string]ast.Node)}
+ ast.Walk(v, ap)
- groupDocs := make(map[string]string)
- out := make(map[string]string)
+ comments = make(map[string]string)
+ groupDocs = make(map[string]string)
for mn, node := range v.Methods {
- cs := cmap.Filter(node).Comments()
- if len(cs) == 0 {
- out[mn] = noComment
+ filteredComments := cmap.Filter(node).Comments()
+ if len(filteredComments) == 0 {
+ comments[mn] = NoComment
} else {
- for _, c := range cs {
+ for _, c := range filteredComments {
if strings.HasPrefix(c.Text(), "MethodGroup:") {
parts := strings.Split(c.Text(), "\n")
groupName := strings.TrimSpace(parts[0][12:])
@@ -277,15 +430,19 @@ func parseApiASTInfo() (map[string]string, map[string]string) { //nolint:golint
}
}
- last := cs[len(cs)-1].Text()
+ l := len(filteredComments) - 1
+ if len(filteredComments) > 1 {
+ l = len(filteredComments) - 2
+ }
+ last := filteredComments[l].Text()
if !strings.HasPrefix(last, "MethodGroup:") {
- out[mn] = last
+ comments[mn] = last
} else {
- out[mn] = noComment
+ comments[mn] = NoComment
}
}
}
- return out, groupDocs
+ return comments, groupDocs
}
type MethodGroup struct {
@@ -301,7 +458,7 @@ type Method struct {
ResponseExample string
}
-func methodGroupFromName(mn string) string {
+func MethodGroupFromName(mn string) string {
i := strings.IndexFunc(mn[1:], func(r rune) bool {
return unicode.IsUpper(r)
})
@@ -310,112 +467,3 @@ func methodGroupFromName(mn string) string {
}
return mn[:i+1]
}
-
-func main() {
-
- comments, groupComments := parseApiASTInfo()
-
- groups := make(map[string]*MethodGroup)
-
- var api struct{ api.FullNode }
- t := reflect.TypeOf(api)
- for i := 0; i < t.NumMethod(); i++ {
- m := t.Method(i)
-
- groupName := methodGroupFromName(m.Name)
-
- g, ok := groups[groupName]
- if !ok {
- g = new(MethodGroup)
- g.Header = groupComments[groupName]
- g.GroupName = groupName
- groups[groupName] = g
- }
-
- var args []interface{}
- ft := m.Func.Type()
- for j := 2; j < ft.NumIn(); j++ {
- inp := ft.In(j)
- args = append(args, exampleValue(inp, nil))
- }
-
- v, err := json.MarshalIndent(args, "", " ")
- if err != nil {
- panic(err)
- }
-
- outv := exampleValue(ft.Out(0), nil)
-
- ov, err := json.MarshalIndent(outv, "", " ")
- if err != nil {
- panic(err)
- }
-
- g.Methods = append(g.Methods, &Method{
- Name: m.Name,
- Comment: comments[m.Name],
- InputExample: string(v),
- ResponseExample: string(ov),
- })
- }
-
- var groupslice []*MethodGroup
- for _, g := range groups {
- groupslice = append(groupslice, g)
- }
-
- sort.Slice(groupslice, func(i, j int) bool {
- return groupslice[i].GroupName < groupslice[j].GroupName
- })
-
- fmt.Printf("# Groups\n")
-
- for _, g := range groupslice {
- fmt.Printf("* [%s](#%s)\n", g.GroupName, g.GroupName)
- for _, method := range g.Methods {
- fmt.Printf(" * [%s](#%s)\n", method.Name, method.Name)
- }
- }
-
- permStruct := reflect.TypeOf(apistruct.FullNodeStruct{}.Internal)
- commonPermStruct := reflect.TypeOf(apistruct.CommonStruct{}.Internal)
-
- for _, g := range groupslice {
- g := g
- fmt.Printf("## %s\n", g.GroupName)
- fmt.Printf("%s\n\n", g.Header)
-
- sort.Slice(g.Methods, func(i, j int) bool {
- return g.Methods[i].Name < g.Methods[j].Name
- })
-
- for _, m := range g.Methods {
- fmt.Printf("### %s\n", m.Name)
- fmt.Printf("%s\n\n", m.Comment)
-
- meth, ok := permStruct.FieldByName(m.Name)
- if !ok {
- meth, ok = commonPermStruct.FieldByName(m.Name)
- if !ok {
- panic("no perms for method: " + m.Name)
- }
- }
-
- perms := meth.Tag.Get("perm")
-
- fmt.Printf("Perms: %s\n\n", perms)
-
- if strings.Count(m.InputExample, "\n") > 0 {
- fmt.Printf("Inputs:\n```json\n%s\n```\n\n", m.InputExample)
- } else {
- fmt.Printf("Inputs: `%s`\n\n", m.InputExample)
- }
-
- if strings.Count(m.ResponseExample, "\n") > 0 {
- fmt.Printf("Response:\n```json\n%s\n```\n\n", m.ResponseExample)
- } else {
- fmt.Printf("Response: `%s`\n\n", m.ResponseExample)
- }
- }
- }
-}
diff --git a/api/mocks/mock_full.go b/api/mocks/mock_full.go
new file mode 100644
index 00000000000..69f315be948
--- /dev/null
+++ b/api/mocks/mock_full.go
@@ -0,0 +1,3094 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: github.com/filecoin-project/lotus/api (interfaces: FullNode)
+
+// Package mocks is a generated GoMock package.
+package mocks
+
+import (
+ context "context"
+ reflect "reflect"
+
+ address "github.com/filecoin-project/go-address"
+ bitfield "github.com/filecoin-project/go-bitfield"
+ datatransfer "github.com/filecoin-project/go-data-transfer"
+ retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket"
+ storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket"
+ auth "github.com/filecoin-project/go-jsonrpc/auth"
+ multistore "github.com/filecoin-project/go-multistore"
+ abi "github.com/filecoin-project/go-state-types/abi"
+ big "github.com/filecoin-project/go-state-types/big"
+ crypto "github.com/filecoin-project/go-state-types/crypto"
+ dline "github.com/filecoin-project/go-state-types/dline"
+ network "github.com/filecoin-project/go-state-types/network"
+ api "github.com/filecoin-project/lotus/api"
+ apitypes "github.com/filecoin-project/lotus/api/types"
+ miner "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+ types "github.com/filecoin-project/lotus/chain/types"
+ marketevents "github.com/filecoin-project/lotus/markets/loggers"
+ dtypes "github.com/filecoin-project/lotus/node/modules/dtypes"
+ miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
+ paych "github.com/filecoin-project/specs-actors/actors/builtin/paych"
+ gomock "github.com/golang/mock/gomock"
+ uuid "github.com/google/uuid"
+ cid "github.com/ipfs/go-cid"
+ metrics "github.com/libp2p/go-libp2p-core/metrics"
+ network0 "github.com/libp2p/go-libp2p-core/network"
+ peer "github.com/libp2p/go-libp2p-core/peer"
+ protocol "github.com/libp2p/go-libp2p-core/protocol"
+)
+
+// MockFullNode is a mock of FullNode interface.
+type MockFullNode struct {
+ ctrl *gomock.Controller
+ recorder *MockFullNodeMockRecorder
+}
+
+// MockFullNodeMockRecorder is the mock recorder for MockFullNode.
+type MockFullNodeMockRecorder struct {
+ mock *MockFullNode
+}
+
+// NewMockFullNode creates a new mock instance.
+func NewMockFullNode(ctrl *gomock.Controller) *MockFullNode {
+ mock := &MockFullNode{ctrl: ctrl}
+ mock.recorder = &MockFullNodeMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockFullNode) EXPECT() *MockFullNodeMockRecorder {
+ return m.recorder
+}
+
+// AuthNew mocks base method.
+func (m *MockFullNode) AuthNew(arg0 context.Context, arg1 []auth.Permission) ([]byte, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AuthNew", arg0, arg1)
+ ret0, _ := ret[0].([]byte)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// AuthNew indicates an expected call of AuthNew.
+func (mr *MockFullNodeMockRecorder) AuthNew(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AuthNew", reflect.TypeOf((*MockFullNode)(nil).AuthNew), arg0, arg1)
+}
+
+// AuthVerify mocks base method.
+func (m *MockFullNode) AuthVerify(arg0 context.Context, arg1 string) ([]auth.Permission, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AuthVerify", arg0, arg1)
+ ret0, _ := ret[0].([]auth.Permission)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// AuthVerify indicates an expected call of AuthVerify.
+func (mr *MockFullNodeMockRecorder) AuthVerify(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AuthVerify", reflect.TypeOf((*MockFullNode)(nil).AuthVerify), arg0, arg1)
+}
+
+// BeaconGetEntry mocks base method.
+func (m *MockFullNode) BeaconGetEntry(arg0 context.Context, arg1 abi.ChainEpoch) (*types.BeaconEntry, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "BeaconGetEntry", arg0, arg1)
+ ret0, _ := ret[0].(*types.BeaconEntry)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// BeaconGetEntry indicates an expected call of BeaconGetEntry.
+func (mr *MockFullNodeMockRecorder) BeaconGetEntry(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeaconGetEntry", reflect.TypeOf((*MockFullNode)(nil).BeaconGetEntry), arg0, arg1)
+}
+
+// ChainDeleteObj mocks base method.
+func (m *MockFullNode) ChainDeleteObj(arg0 context.Context, arg1 cid.Cid) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainDeleteObj", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ChainDeleteObj indicates an expected call of ChainDeleteObj.
+func (mr *MockFullNodeMockRecorder) ChainDeleteObj(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainDeleteObj", reflect.TypeOf((*MockFullNode)(nil).ChainDeleteObj), arg0, arg1)
+}
+
+// ChainExport mocks base method.
+func (m *MockFullNode) ChainExport(arg0 context.Context, arg1 abi.ChainEpoch, arg2 bool, arg3 types.TipSetKey) (<-chan []byte, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainExport", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(<-chan []byte)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainExport indicates an expected call of ChainExport.
+func (mr *MockFullNodeMockRecorder) ChainExport(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainExport", reflect.TypeOf((*MockFullNode)(nil).ChainExport), arg0, arg1, arg2, arg3)
+}
+
+// ChainGetBlock mocks base method.
+func (m *MockFullNode) ChainGetBlock(arg0 context.Context, arg1 cid.Cid) (*types.BlockHeader, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainGetBlock", arg0, arg1)
+ ret0, _ := ret[0].(*types.BlockHeader)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainGetBlock indicates an expected call of ChainGetBlock.
+func (mr *MockFullNodeMockRecorder) ChainGetBlock(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetBlock", reflect.TypeOf((*MockFullNode)(nil).ChainGetBlock), arg0, arg1)
+}
+
+// ChainGetBlockMessages mocks base method.
+func (m *MockFullNode) ChainGetBlockMessages(arg0 context.Context, arg1 cid.Cid) (*api.BlockMessages, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainGetBlockMessages", arg0, arg1)
+ ret0, _ := ret[0].(*api.BlockMessages)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainGetBlockMessages indicates an expected call of ChainGetBlockMessages.
+func (mr *MockFullNodeMockRecorder) ChainGetBlockMessages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetBlockMessages", reflect.TypeOf((*MockFullNode)(nil).ChainGetBlockMessages), arg0, arg1)
+}
+
+// ChainGetGenesis mocks base method.
+func (m *MockFullNode) ChainGetGenesis(arg0 context.Context) (*types.TipSet, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainGetGenesis", arg0)
+ ret0, _ := ret[0].(*types.TipSet)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainGetGenesis indicates an expected call of ChainGetGenesis.
+func (mr *MockFullNodeMockRecorder) ChainGetGenesis(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetGenesis", reflect.TypeOf((*MockFullNode)(nil).ChainGetGenesis), arg0)
+}
+
+// ChainGetMessage mocks base method.
+func (m *MockFullNode) ChainGetMessage(arg0 context.Context, arg1 cid.Cid) (*types.Message, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainGetMessage", arg0, arg1)
+ ret0, _ := ret[0].(*types.Message)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainGetMessage indicates an expected call of ChainGetMessage.
+func (mr *MockFullNodeMockRecorder) ChainGetMessage(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetMessage", reflect.TypeOf((*MockFullNode)(nil).ChainGetMessage), arg0, arg1)
+}
+
+// ChainGetMessagesInTipset mocks base method.
+func (m *MockFullNode) ChainGetMessagesInTipset(arg0 context.Context, arg1 types.TipSetKey) ([]api.Message, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainGetMessagesInTipset", arg0, arg1)
+ ret0, _ := ret[0].([]api.Message)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainGetMessagesInTipset indicates an expected call of ChainGetMessagesInTipset.
+func (mr *MockFullNodeMockRecorder) ChainGetMessagesInTipset(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetMessagesInTipset", reflect.TypeOf((*MockFullNode)(nil).ChainGetMessagesInTipset), arg0, arg1)
+}
+
+// ChainGetNode mocks base method.
+func (m *MockFullNode) ChainGetNode(arg0 context.Context, arg1 string) (*api.IpldObject, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainGetNode", arg0, arg1)
+ ret0, _ := ret[0].(*api.IpldObject)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainGetNode indicates an expected call of ChainGetNode.
+func (mr *MockFullNodeMockRecorder) ChainGetNode(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetNode", reflect.TypeOf((*MockFullNode)(nil).ChainGetNode), arg0, arg1)
+}
+
+// ChainGetParentMessages mocks base method.
+func (m *MockFullNode) ChainGetParentMessages(arg0 context.Context, arg1 cid.Cid) ([]api.Message, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainGetParentMessages", arg0, arg1)
+ ret0, _ := ret[0].([]api.Message)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainGetParentMessages indicates an expected call of ChainGetParentMessages.
+func (mr *MockFullNodeMockRecorder) ChainGetParentMessages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetParentMessages", reflect.TypeOf((*MockFullNode)(nil).ChainGetParentMessages), arg0, arg1)
+}
+
+// ChainGetParentReceipts mocks base method.
+func (m *MockFullNode) ChainGetParentReceipts(arg0 context.Context, arg1 cid.Cid) ([]*types.MessageReceipt, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainGetParentReceipts", arg0, arg1)
+ ret0, _ := ret[0].([]*types.MessageReceipt)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainGetParentReceipts indicates an expected call of ChainGetParentReceipts.
+func (mr *MockFullNodeMockRecorder) ChainGetParentReceipts(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetParentReceipts", reflect.TypeOf((*MockFullNode)(nil).ChainGetParentReceipts), arg0, arg1)
+}
+
+// ChainGetPath mocks base method.
+func (m *MockFullNode) ChainGetPath(arg0 context.Context, arg1, arg2 types.TipSetKey) ([]*api.HeadChange, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainGetPath", arg0, arg1, arg2)
+ ret0, _ := ret[0].([]*api.HeadChange)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainGetPath indicates an expected call of ChainGetPath.
+func (mr *MockFullNodeMockRecorder) ChainGetPath(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetPath", reflect.TypeOf((*MockFullNode)(nil).ChainGetPath), arg0, arg1, arg2)
+}
+
+// ChainGetRandomnessFromBeacon mocks base method.
+func (m *MockFullNode) ChainGetRandomnessFromBeacon(arg0 context.Context, arg1 types.TipSetKey, arg2 crypto.DomainSeparationTag, arg3 abi.ChainEpoch, arg4 []byte) (abi.Randomness, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainGetRandomnessFromBeacon", arg0, arg1, arg2, arg3, arg4)
+ ret0, _ := ret[0].(abi.Randomness)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainGetRandomnessFromBeacon indicates an expected call of ChainGetRandomnessFromBeacon.
+func (mr *MockFullNodeMockRecorder) ChainGetRandomnessFromBeacon(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetRandomnessFromBeacon", reflect.TypeOf((*MockFullNode)(nil).ChainGetRandomnessFromBeacon), arg0, arg1, arg2, arg3, arg4)
+}
+
+// ChainGetRandomnessFromTickets mocks base method.
+func (m *MockFullNode) ChainGetRandomnessFromTickets(arg0 context.Context, arg1 types.TipSetKey, arg2 crypto.DomainSeparationTag, arg3 abi.ChainEpoch, arg4 []byte) (abi.Randomness, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainGetRandomnessFromTickets", arg0, arg1, arg2, arg3, arg4)
+ ret0, _ := ret[0].(abi.Randomness)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainGetRandomnessFromTickets indicates an expected call of ChainGetRandomnessFromTickets.
+func (mr *MockFullNodeMockRecorder) ChainGetRandomnessFromTickets(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetRandomnessFromTickets", reflect.TypeOf((*MockFullNode)(nil).ChainGetRandomnessFromTickets), arg0, arg1, arg2, arg3, arg4)
+}
+
+// ChainGetTipSet mocks base method.
+func (m *MockFullNode) ChainGetTipSet(arg0 context.Context, arg1 types.TipSetKey) (*types.TipSet, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainGetTipSet", arg0, arg1)
+ ret0, _ := ret[0].(*types.TipSet)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainGetTipSet indicates an expected call of ChainGetTipSet.
+func (mr *MockFullNodeMockRecorder) ChainGetTipSet(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetTipSet", reflect.TypeOf((*MockFullNode)(nil).ChainGetTipSet), arg0, arg1)
+}
+
+// ChainGetTipSetByHeight mocks base method.
+func (m *MockFullNode) ChainGetTipSetByHeight(arg0 context.Context, arg1 abi.ChainEpoch, arg2 types.TipSetKey) (*types.TipSet, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainGetTipSetByHeight", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*types.TipSet)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainGetTipSetByHeight indicates an expected call of ChainGetTipSetByHeight.
+func (mr *MockFullNodeMockRecorder) ChainGetTipSetByHeight(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetTipSetByHeight", reflect.TypeOf((*MockFullNode)(nil).ChainGetTipSetByHeight), arg0, arg1, arg2)
+}
+
+// ChainHasObj mocks base method.
+func (m *MockFullNode) ChainHasObj(arg0 context.Context, arg1 cid.Cid) (bool, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainHasObj", arg0, arg1)
+ ret0, _ := ret[0].(bool)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainHasObj indicates an expected call of ChainHasObj.
+func (mr *MockFullNodeMockRecorder) ChainHasObj(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHasObj", reflect.TypeOf((*MockFullNode)(nil).ChainHasObj), arg0, arg1)
+}
+
+// ChainHead mocks base method.
+func (m *MockFullNode) ChainHead(arg0 context.Context) (*types.TipSet, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainHead", arg0)
+ ret0, _ := ret[0].(*types.TipSet)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainHead indicates an expected call of ChainHead.
+func (mr *MockFullNodeMockRecorder) ChainHead(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHead", reflect.TypeOf((*MockFullNode)(nil).ChainHead), arg0)
+}
+
+// ChainNotify mocks base method.
+func (m *MockFullNode) ChainNotify(arg0 context.Context) (<-chan []*api.HeadChange, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainNotify", arg0)
+ ret0, _ := ret[0].(<-chan []*api.HeadChange)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainNotify indicates an expected call of ChainNotify.
+func (mr *MockFullNodeMockRecorder) ChainNotify(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainNotify", reflect.TypeOf((*MockFullNode)(nil).ChainNotify), arg0)
+}
+
+// ChainReadObj mocks base method.
+func (m *MockFullNode) ChainReadObj(arg0 context.Context, arg1 cid.Cid) ([]byte, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainReadObj", arg0, arg1)
+ ret0, _ := ret[0].([]byte)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainReadObj indicates an expected call of ChainReadObj.
+func (mr *MockFullNodeMockRecorder) ChainReadObj(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainReadObj", reflect.TypeOf((*MockFullNode)(nil).ChainReadObj), arg0, arg1)
+}
+
+// ChainSetHead mocks base method.
+func (m *MockFullNode) ChainSetHead(arg0 context.Context, arg1 types.TipSetKey) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainSetHead", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ChainSetHead indicates an expected call of ChainSetHead.
+func (mr *MockFullNodeMockRecorder) ChainSetHead(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainSetHead", reflect.TypeOf((*MockFullNode)(nil).ChainSetHead), arg0, arg1)
+}
+
+// ChainStatObj mocks base method.
+func (m *MockFullNode) ChainStatObj(arg0 context.Context, arg1, arg2 cid.Cid) (api.ObjStat, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainStatObj", arg0, arg1, arg2)
+ ret0, _ := ret[0].(api.ObjStat)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainStatObj indicates an expected call of ChainStatObj.
+func (mr *MockFullNodeMockRecorder) ChainStatObj(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainStatObj", reflect.TypeOf((*MockFullNode)(nil).ChainStatObj), arg0, arg1, arg2)
+}
+
+// ChainTipSetWeight mocks base method.
+func (m *MockFullNode) ChainTipSetWeight(arg0 context.Context, arg1 types.TipSetKey) (big.Int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainTipSetWeight", arg0, arg1)
+ ret0, _ := ret[0].(big.Int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainTipSetWeight indicates an expected call of ChainTipSetWeight.
+func (mr *MockFullNodeMockRecorder) ChainTipSetWeight(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainTipSetWeight", reflect.TypeOf((*MockFullNode)(nil).ChainTipSetWeight), arg0, arg1)
+}
+
+// ClientCalcCommP mocks base method.
+func (m *MockFullNode) ClientCalcCommP(arg0 context.Context, arg1 string) (*api.CommPRet, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientCalcCommP", arg0, arg1)
+ ret0, _ := ret[0].(*api.CommPRet)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientCalcCommP indicates an expected call of ClientCalcCommP.
+func (mr *MockFullNodeMockRecorder) ClientCalcCommP(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCalcCommP", reflect.TypeOf((*MockFullNode)(nil).ClientCalcCommP), arg0, arg1)
+}
+
+// ClientCancelDataTransfer mocks base method.
+func (m *MockFullNode) ClientCancelDataTransfer(arg0 context.Context, arg1 datatransfer.TransferID, arg2 peer.ID, arg3 bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientCancelDataTransfer", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ClientCancelDataTransfer indicates an expected call of ClientCancelDataTransfer.
+func (mr *MockFullNodeMockRecorder) ClientCancelDataTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCancelDataTransfer", reflect.TypeOf((*MockFullNode)(nil).ClientCancelDataTransfer), arg0, arg1, arg2, arg3)
+}
+
+// ClientCancelRetrievalDeal mocks base method.
+func (m *MockFullNode) ClientCancelRetrievalDeal(arg0 context.Context, arg1 retrievalmarket.DealID) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientCancelRetrievalDeal", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ClientCancelRetrievalDeal indicates an expected call of ClientCancelRetrievalDeal.
+func (mr *MockFullNodeMockRecorder) ClientCancelRetrievalDeal(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCancelRetrievalDeal", reflect.TypeOf((*MockFullNode)(nil).ClientCancelRetrievalDeal), arg0, arg1)
+}
+
+// ClientDataTransferUpdates mocks base method.
+func (m *MockFullNode) ClientDataTransferUpdates(arg0 context.Context) (<-chan api.DataTransferChannel, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientDataTransferUpdates", arg0)
+ ret0, _ := ret[0].(<-chan api.DataTransferChannel)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientDataTransferUpdates indicates an expected call of ClientDataTransferUpdates.
+func (mr *MockFullNodeMockRecorder) ClientDataTransferUpdates(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDataTransferUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientDataTransferUpdates), arg0)
+}
+
+// ClientDealPieceCID mocks base method.
+func (m *MockFullNode) ClientDealPieceCID(arg0 context.Context, arg1 cid.Cid) (api.DataCIDSize, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientDealPieceCID", arg0, arg1)
+ ret0, _ := ret[0].(api.DataCIDSize)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientDealPieceCID indicates an expected call of ClientDealPieceCID.
+func (mr *MockFullNodeMockRecorder) ClientDealPieceCID(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealPieceCID", reflect.TypeOf((*MockFullNode)(nil).ClientDealPieceCID), arg0, arg1)
+}
+
+// ClientDealSize mocks base method.
+func (m *MockFullNode) ClientDealSize(arg0 context.Context, arg1 cid.Cid) (api.DataSize, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientDealSize", arg0, arg1)
+ ret0, _ := ret[0].(api.DataSize)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientDealSize indicates an expected call of ClientDealSize.
+func (mr *MockFullNodeMockRecorder) ClientDealSize(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealSize", reflect.TypeOf((*MockFullNode)(nil).ClientDealSize), arg0, arg1)
+}
+
+// ClientFindData mocks base method.
+func (m *MockFullNode) ClientFindData(arg0 context.Context, arg1 cid.Cid, arg2 *cid.Cid) ([]api.QueryOffer, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientFindData", arg0, arg1, arg2)
+ ret0, _ := ret[0].([]api.QueryOffer)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientFindData indicates an expected call of ClientFindData.
+func (mr *MockFullNodeMockRecorder) ClientFindData(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientFindData", reflect.TypeOf((*MockFullNode)(nil).ClientFindData), arg0, arg1, arg2)
+}
+
+// ClientGenCar mocks base method.
+func (m *MockFullNode) ClientGenCar(arg0 context.Context, arg1 api.FileRef, arg2 string) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientGenCar", arg0, arg1, arg2)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ClientGenCar indicates an expected call of ClientGenCar.
+func (mr *MockFullNodeMockRecorder) ClientGenCar(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGenCar", reflect.TypeOf((*MockFullNode)(nil).ClientGenCar), arg0, arg1, arg2)
+}
+
+// ClientGetDealInfo mocks base method.
+func (m *MockFullNode) ClientGetDealInfo(arg0 context.Context, arg1 cid.Cid) (*api.DealInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientGetDealInfo", arg0, arg1)
+ ret0, _ := ret[0].(*api.DealInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientGetDealInfo indicates an expected call of ClientGetDealInfo.
+func (mr *MockFullNodeMockRecorder) ClientGetDealInfo(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealInfo", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealInfo), arg0, arg1)
+}
+
+// ClientGetDealStatus mocks base method.
+func (m *MockFullNode) ClientGetDealStatus(arg0 context.Context, arg1 uint64) (string, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientGetDealStatus", arg0, arg1)
+ ret0, _ := ret[0].(string)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientGetDealStatus indicates an expected call of ClientGetDealStatus.
+func (mr *MockFullNodeMockRecorder) ClientGetDealStatus(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealStatus", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealStatus), arg0, arg1)
+}
+
+// ClientGetDealUpdates mocks base method.
+func (m *MockFullNode) ClientGetDealUpdates(arg0 context.Context) (<-chan api.DealInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientGetDealUpdates", arg0)
+ ret0, _ := ret[0].(<-chan api.DealInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientGetDealUpdates indicates an expected call of ClientGetDealUpdates.
+func (mr *MockFullNodeMockRecorder) ClientGetDealUpdates(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealUpdates), arg0)
+}
+
+// ClientGetRetrievalUpdates mocks base method.
+func (m *MockFullNode) ClientGetRetrievalUpdates(arg0 context.Context) (<-chan api.RetrievalInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientGetRetrievalUpdates", arg0)
+ ret0, _ := ret[0].(<-chan api.RetrievalInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientGetRetrievalUpdates indicates an expected call of ClientGetRetrievalUpdates.
+func (mr *MockFullNodeMockRecorder) ClientGetRetrievalUpdates(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetRetrievalUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientGetRetrievalUpdates), arg0)
+}
+
+// ClientHasLocal mocks base method.
+func (m *MockFullNode) ClientHasLocal(arg0 context.Context, arg1 cid.Cid) (bool, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientHasLocal", arg0, arg1)
+ ret0, _ := ret[0].(bool)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientHasLocal indicates an expected call of ClientHasLocal.
+func (mr *MockFullNodeMockRecorder) ClientHasLocal(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientHasLocal", reflect.TypeOf((*MockFullNode)(nil).ClientHasLocal), arg0, arg1)
+}
+
+// ClientImport mocks base method.
+func (m *MockFullNode) ClientImport(arg0 context.Context, arg1 api.FileRef) (*api.ImportRes, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientImport", arg0, arg1)
+ ret0, _ := ret[0].(*api.ImportRes)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientImport indicates an expected call of ClientImport.
+func (mr *MockFullNodeMockRecorder) ClientImport(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientImport", reflect.TypeOf((*MockFullNode)(nil).ClientImport), arg0, arg1)
+}
+
+// ClientListDataTransfers mocks base method.
+func (m *MockFullNode) ClientListDataTransfers(arg0 context.Context) ([]api.DataTransferChannel, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientListDataTransfers", arg0)
+ ret0, _ := ret[0].([]api.DataTransferChannel)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientListDataTransfers indicates an expected call of ClientListDataTransfers.
+func (mr *MockFullNodeMockRecorder) ClientListDataTransfers(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListDataTransfers", reflect.TypeOf((*MockFullNode)(nil).ClientListDataTransfers), arg0)
+}
+
+// ClientListDeals mocks base method.
+func (m *MockFullNode) ClientListDeals(arg0 context.Context) ([]api.DealInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientListDeals", arg0)
+ ret0, _ := ret[0].([]api.DealInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientListDeals indicates an expected call of ClientListDeals.
+func (mr *MockFullNodeMockRecorder) ClientListDeals(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListDeals", reflect.TypeOf((*MockFullNode)(nil).ClientListDeals), arg0)
+}
+
+// ClientListImports mocks base method.
+func (m *MockFullNode) ClientListImports(arg0 context.Context) ([]api.Import, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientListImports", arg0)
+ ret0, _ := ret[0].([]api.Import)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientListImports indicates an expected call of ClientListImports.
+func (mr *MockFullNodeMockRecorder) ClientListImports(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListImports", reflect.TypeOf((*MockFullNode)(nil).ClientListImports), arg0)
+}
+
+// ClientListRetrievals mocks base method.
+func (m *MockFullNode) ClientListRetrievals(arg0 context.Context) ([]api.RetrievalInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientListRetrievals", arg0)
+ ret0, _ := ret[0].([]api.RetrievalInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientListRetrievals indicates an expected call of ClientListRetrievals.
+func (mr *MockFullNodeMockRecorder) ClientListRetrievals(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListRetrievals", reflect.TypeOf((*MockFullNode)(nil).ClientListRetrievals), arg0)
+}
+
+// ClientMinerQueryOffer mocks base method.
+func (m *MockFullNode) ClientMinerQueryOffer(arg0 context.Context, arg1 address.Address, arg2 cid.Cid, arg3 *cid.Cid) (api.QueryOffer, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientMinerQueryOffer", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(api.QueryOffer)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientMinerQueryOffer indicates an expected call of ClientMinerQueryOffer.
+func (mr *MockFullNodeMockRecorder) ClientMinerQueryOffer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientMinerQueryOffer", reflect.TypeOf((*MockFullNode)(nil).ClientMinerQueryOffer), arg0, arg1, arg2, arg3)
+}
+
+// ClientQueryAsk mocks base method.
+func (m *MockFullNode) ClientQueryAsk(arg0 context.Context, arg1 peer.ID, arg2 address.Address) (*storagemarket.StorageAsk, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientQueryAsk", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*storagemarket.StorageAsk)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientQueryAsk indicates an expected call of ClientQueryAsk.
+func (mr *MockFullNodeMockRecorder) ClientQueryAsk(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientQueryAsk", reflect.TypeOf((*MockFullNode)(nil).ClientQueryAsk), arg0, arg1, arg2)
+}
+
+// ClientRemoveImport mocks base method.
+func (m *MockFullNode) ClientRemoveImport(arg0 context.Context, arg1 multistore.StoreID) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientRemoveImport", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ClientRemoveImport indicates an expected call of ClientRemoveImport.
+func (mr *MockFullNodeMockRecorder) ClientRemoveImport(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRemoveImport", reflect.TypeOf((*MockFullNode)(nil).ClientRemoveImport), arg0, arg1)
+}
+
+// ClientRestartDataTransfer mocks base method.
+func (m *MockFullNode) ClientRestartDataTransfer(arg0 context.Context, arg1 datatransfer.TransferID, arg2 peer.ID, arg3 bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientRestartDataTransfer", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ClientRestartDataTransfer indicates an expected call of ClientRestartDataTransfer.
+func (mr *MockFullNodeMockRecorder) ClientRestartDataTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRestartDataTransfer", reflect.TypeOf((*MockFullNode)(nil).ClientRestartDataTransfer), arg0, arg1, arg2, arg3)
+}
+
+// ClientRetrieve mocks base method.
+func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientRetrieve", arg0, arg1, arg2)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ClientRetrieve indicates an expected call of ClientRetrieve.
+func (mr *MockFullNodeMockRecorder) ClientRetrieve(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieve", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieve), arg0, arg1, arg2)
+}
+
+// ClientRetrieveTryRestartInsufficientFunds mocks base method.
+func (m *MockFullNode) ClientRetrieveTryRestartInsufficientFunds(arg0 context.Context, arg1 address.Address) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientRetrieveTryRestartInsufficientFunds", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ClientRetrieveTryRestartInsufficientFunds indicates an expected call of ClientRetrieveTryRestartInsufficientFunds.
+func (mr *MockFullNodeMockRecorder) ClientRetrieveTryRestartInsufficientFunds(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveTryRestartInsufficientFunds", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveTryRestartInsufficientFunds), arg0, arg1)
+}
+
+// ClientRetrieveWithEvents mocks base method.
+func (m *MockFullNode) ClientRetrieveWithEvents(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientRetrieveWithEvents", arg0, arg1, arg2)
+ ret0, _ := ret[0].(<-chan marketevents.RetrievalEvent)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientRetrieveWithEvents indicates an expected call of ClientRetrieveWithEvents.
+func (mr *MockFullNodeMockRecorder) ClientRetrieveWithEvents(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveWithEvents", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveWithEvents), arg0, arg1, arg2)
+}
+
+// ClientStartDeal mocks base method.
+func (m *MockFullNode) ClientStartDeal(arg0 context.Context, arg1 *api.StartDealParams) (*cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientStartDeal", arg0, arg1)
+ ret0, _ := ret[0].(*cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientStartDeal indicates an expected call of ClientStartDeal.
+func (mr *MockFullNodeMockRecorder) ClientStartDeal(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientStartDeal", reflect.TypeOf((*MockFullNode)(nil).ClientStartDeal), arg0, arg1)
+}
+
+// ClientStatelessDeal mocks base method.
+func (m *MockFullNode) ClientStatelessDeal(arg0 context.Context, arg1 *api.StartDealParams) (*cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientStatelessDeal", arg0, arg1)
+ ret0, _ := ret[0].(*cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientStatelessDeal indicates an expected call of ClientStatelessDeal.
+func (mr *MockFullNodeMockRecorder) ClientStatelessDeal(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientStatelessDeal", reflect.TypeOf((*MockFullNode)(nil).ClientStatelessDeal), arg0, arg1)
+}
+
+// Closing mocks base method.
+func (m *MockFullNode) Closing(arg0 context.Context) (<-chan struct{}, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Closing", arg0)
+ ret0, _ := ret[0].(<-chan struct{})
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Closing indicates an expected call of Closing.
+func (mr *MockFullNodeMockRecorder) Closing(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Closing", reflect.TypeOf((*MockFullNode)(nil).Closing), arg0)
+}
+
+// CreateBackup mocks base method.
+func (m *MockFullNode) CreateBackup(arg0 context.Context, arg1 string) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateBackup", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// CreateBackup indicates an expected call of CreateBackup.
+func (mr *MockFullNodeMockRecorder) CreateBackup(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateBackup", reflect.TypeOf((*MockFullNode)(nil).CreateBackup), arg0, arg1)
+}
+
+// Discover mocks base method.
+func (m *MockFullNode) Discover(arg0 context.Context) (apitypes.OpenRPCDocument, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Discover", arg0)
+ ret0, _ := ret[0].(apitypes.OpenRPCDocument)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Discover indicates an expected call of Discover.
+func (mr *MockFullNodeMockRecorder) Discover(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Discover", reflect.TypeOf((*MockFullNode)(nil).Discover), arg0)
+}
+
+// GasEstimateFeeCap mocks base method.
+func (m *MockFullNode) GasEstimateFeeCap(arg0 context.Context, arg1 *types.Message, arg2 int64, arg3 types.TipSetKey) (big.Int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GasEstimateFeeCap", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(big.Int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GasEstimateFeeCap indicates an expected call of GasEstimateFeeCap.
+func (mr *MockFullNodeMockRecorder) GasEstimateFeeCap(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateFeeCap", reflect.TypeOf((*MockFullNode)(nil).GasEstimateFeeCap), arg0, arg1, arg2, arg3)
+}
+
+// GasEstimateGasLimit mocks base method.
+func (m *MockFullNode) GasEstimateGasLimit(arg0 context.Context, arg1 *types.Message, arg2 types.TipSetKey) (int64, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GasEstimateGasLimit", arg0, arg1, arg2)
+ ret0, _ := ret[0].(int64)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GasEstimateGasLimit indicates an expected call of GasEstimateGasLimit.
+func (mr *MockFullNodeMockRecorder) GasEstimateGasLimit(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateGasLimit", reflect.TypeOf((*MockFullNode)(nil).GasEstimateGasLimit), arg0, arg1, arg2)
+}
+
+// GasEstimateGasPremium mocks base method.
+func (m *MockFullNode) GasEstimateGasPremium(arg0 context.Context, arg1 uint64, arg2 address.Address, arg3 int64, arg4 types.TipSetKey) (big.Int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GasEstimateGasPremium", arg0, arg1, arg2, arg3, arg4)
+ ret0, _ := ret[0].(big.Int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GasEstimateGasPremium indicates an expected call of GasEstimateGasPremium.
+func (mr *MockFullNodeMockRecorder) GasEstimateGasPremium(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateGasPremium", reflect.TypeOf((*MockFullNode)(nil).GasEstimateGasPremium), arg0, arg1, arg2, arg3, arg4)
+}
+
+// GasEstimateMessageGas mocks base method.
+func (m *MockFullNode) GasEstimateMessageGas(arg0 context.Context, arg1 *types.Message, arg2 *api.MessageSendSpec, arg3 types.TipSetKey) (*types.Message, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GasEstimateMessageGas", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(*types.Message)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GasEstimateMessageGas indicates an expected call of GasEstimateMessageGas.
+func (mr *MockFullNodeMockRecorder) GasEstimateMessageGas(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateMessageGas", reflect.TypeOf((*MockFullNode)(nil).GasEstimateMessageGas), arg0, arg1, arg2, arg3)
+}
+
+// ID mocks base method.
+func (m *MockFullNode) ID(arg0 context.Context) (peer.ID, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ID", arg0)
+ ret0, _ := ret[0].(peer.ID)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ID indicates an expected call of ID.
+func (mr *MockFullNodeMockRecorder) ID(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ID", reflect.TypeOf((*MockFullNode)(nil).ID), arg0)
+}
+
+// LogList mocks base method.
+func (m *MockFullNode) LogList(arg0 context.Context) ([]string, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "LogList", arg0)
+ ret0, _ := ret[0].([]string)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// LogList indicates an expected call of LogList.
+func (mr *MockFullNodeMockRecorder) LogList(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LogList", reflect.TypeOf((*MockFullNode)(nil).LogList), arg0)
+}
+
+// LogSetLevel mocks base method.
+func (m *MockFullNode) LogSetLevel(arg0 context.Context, arg1, arg2 string) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "LogSetLevel", arg0, arg1, arg2)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// LogSetLevel indicates an expected call of LogSetLevel.
+func (mr *MockFullNodeMockRecorder) LogSetLevel(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LogSetLevel", reflect.TypeOf((*MockFullNode)(nil).LogSetLevel), arg0, arg1, arg2)
+}
+
+// MarketAddBalance mocks base method.
+func (m *MockFullNode) MarketAddBalance(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MarketAddBalance", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MarketAddBalance indicates an expected call of MarketAddBalance.
+func (mr *MockFullNodeMockRecorder) MarketAddBalance(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketAddBalance", reflect.TypeOf((*MockFullNode)(nil).MarketAddBalance), arg0, arg1, arg2, arg3)
+}
+
+// MarketGetReserved mocks base method.
+func (m *MockFullNode) MarketGetReserved(arg0 context.Context, arg1 address.Address) (big.Int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MarketGetReserved", arg0, arg1)
+ ret0, _ := ret[0].(big.Int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MarketGetReserved indicates an expected call of MarketGetReserved.
+func (mr *MockFullNodeMockRecorder) MarketGetReserved(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketGetReserved", reflect.TypeOf((*MockFullNode)(nil).MarketGetReserved), arg0, arg1)
+}
+
+// MarketReleaseFunds mocks base method.
+func (m *MockFullNode) MarketReleaseFunds(arg0 context.Context, arg1 address.Address, arg2 big.Int) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MarketReleaseFunds", arg0, arg1, arg2)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// MarketReleaseFunds indicates an expected call of MarketReleaseFunds.
+func (mr *MockFullNodeMockRecorder) MarketReleaseFunds(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketReleaseFunds", reflect.TypeOf((*MockFullNode)(nil).MarketReleaseFunds), arg0, arg1, arg2)
+}
+
+// MarketReserveFunds mocks base method.
+func (m *MockFullNode) MarketReserveFunds(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MarketReserveFunds", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MarketReserveFunds indicates an expected call of MarketReserveFunds.
+func (mr *MockFullNodeMockRecorder) MarketReserveFunds(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketReserveFunds", reflect.TypeOf((*MockFullNode)(nil).MarketReserveFunds), arg0, arg1, arg2, arg3)
+}
+
+// MarketWithdraw mocks base method.
+func (m *MockFullNode) MarketWithdraw(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MarketWithdraw", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MarketWithdraw indicates an expected call of MarketWithdraw.
+func (mr *MockFullNodeMockRecorder) MarketWithdraw(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketWithdraw", reflect.TypeOf((*MockFullNode)(nil).MarketWithdraw), arg0, arg1, arg2, arg3)
+}
+
+// MinerCreateBlock mocks base method.
+func (m *MockFullNode) MinerCreateBlock(arg0 context.Context, arg1 *api.BlockTemplate) (*types.BlockMsg, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MinerCreateBlock", arg0, arg1)
+ ret0, _ := ret[0].(*types.BlockMsg)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MinerCreateBlock indicates an expected call of MinerCreateBlock.
+func (mr *MockFullNodeMockRecorder) MinerCreateBlock(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MinerCreateBlock", reflect.TypeOf((*MockFullNode)(nil).MinerCreateBlock), arg0, arg1)
+}
+
+// MinerGetBaseInfo mocks base method.
+func (m *MockFullNode) MinerGetBaseInfo(arg0 context.Context, arg1 address.Address, arg2 abi.ChainEpoch, arg3 types.TipSetKey) (*api.MiningBaseInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MinerGetBaseInfo", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(*api.MiningBaseInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MinerGetBaseInfo indicates an expected call of MinerGetBaseInfo.
+func (mr *MockFullNodeMockRecorder) MinerGetBaseInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MinerGetBaseInfo", reflect.TypeOf((*MockFullNode)(nil).MinerGetBaseInfo), arg0, arg1, arg2, arg3)
+}
+
+// MpoolBatchPush mocks base method.
+func (m *MockFullNode) MpoolBatchPush(arg0 context.Context, arg1 []*types.SignedMessage) ([]cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MpoolBatchPush", arg0, arg1)
+ ret0, _ := ret[0].([]cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MpoolBatchPush indicates an expected call of MpoolBatchPush.
+func (mr *MockFullNodeMockRecorder) MpoolBatchPush(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolBatchPush", reflect.TypeOf((*MockFullNode)(nil).MpoolBatchPush), arg0, arg1)
+}
+
+// MpoolBatchPushMessage mocks base method.
+func (m *MockFullNode) MpoolBatchPushMessage(arg0 context.Context, arg1 []*types.Message, arg2 *api.MessageSendSpec) ([]*types.SignedMessage, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MpoolBatchPushMessage", arg0, arg1, arg2)
+ ret0, _ := ret[0].([]*types.SignedMessage)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MpoolBatchPushMessage indicates an expected call of MpoolBatchPushMessage.
+func (mr *MockFullNodeMockRecorder) MpoolBatchPushMessage(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolBatchPushMessage", reflect.TypeOf((*MockFullNode)(nil).MpoolBatchPushMessage), arg0, arg1, arg2)
+}
+
+// MpoolBatchPushUntrusted mocks base method.
+func (m *MockFullNode) MpoolBatchPushUntrusted(arg0 context.Context, arg1 []*types.SignedMessage) ([]cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MpoolBatchPushUntrusted", arg0, arg1)
+ ret0, _ := ret[0].([]cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MpoolBatchPushUntrusted indicates an expected call of MpoolBatchPushUntrusted.
+func (mr *MockFullNodeMockRecorder) MpoolBatchPushUntrusted(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolBatchPushUntrusted", reflect.TypeOf((*MockFullNode)(nil).MpoolBatchPushUntrusted), arg0, arg1)
+}
+
+// MpoolCheckMessages mocks base method.
+func (m *MockFullNode) MpoolCheckMessages(arg0 context.Context, arg1 []*api.MessagePrototype) ([][]api.MessageCheckStatus, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MpoolCheckMessages", arg0, arg1)
+ ret0, _ := ret[0].([][]api.MessageCheckStatus)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MpoolCheckMessages indicates an expected call of MpoolCheckMessages.
+func (mr *MockFullNodeMockRecorder) MpoolCheckMessages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolCheckMessages", reflect.TypeOf((*MockFullNode)(nil).MpoolCheckMessages), arg0, arg1)
+}
+
+// MpoolCheckPendingMessages mocks base method.
+func (m *MockFullNode) MpoolCheckPendingMessages(arg0 context.Context, arg1 address.Address) ([][]api.MessageCheckStatus, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MpoolCheckPendingMessages", arg0, arg1)
+ ret0, _ := ret[0].([][]api.MessageCheckStatus)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MpoolCheckPendingMessages indicates an expected call of MpoolCheckPendingMessages.
+func (mr *MockFullNodeMockRecorder) MpoolCheckPendingMessages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolCheckPendingMessages", reflect.TypeOf((*MockFullNode)(nil).MpoolCheckPendingMessages), arg0, arg1)
+}
+
+// MpoolCheckReplaceMessages mocks base method.
+func (m *MockFullNode) MpoolCheckReplaceMessages(arg0 context.Context, arg1 []*types.Message) ([][]api.MessageCheckStatus, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MpoolCheckReplaceMessages", arg0, arg1)
+ ret0, _ := ret[0].([][]api.MessageCheckStatus)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MpoolCheckReplaceMessages indicates an expected call of MpoolCheckReplaceMessages.
+func (mr *MockFullNodeMockRecorder) MpoolCheckReplaceMessages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolCheckReplaceMessages", reflect.TypeOf((*MockFullNode)(nil).MpoolCheckReplaceMessages), arg0, arg1)
+}
+
+// MpoolClear mocks base method.
+func (m *MockFullNode) MpoolClear(arg0 context.Context, arg1 bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MpoolClear", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// MpoolClear indicates an expected call of MpoolClear.
+func (mr *MockFullNodeMockRecorder) MpoolClear(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolClear", reflect.TypeOf((*MockFullNode)(nil).MpoolClear), arg0, arg1)
+}
+
+// MpoolGetConfig mocks base method.
+func (m *MockFullNode) MpoolGetConfig(arg0 context.Context) (*types.MpoolConfig, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MpoolGetConfig", arg0)
+ ret0, _ := ret[0].(*types.MpoolConfig)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MpoolGetConfig indicates an expected call of MpoolGetConfig.
+func (mr *MockFullNodeMockRecorder) MpoolGetConfig(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolGetConfig", reflect.TypeOf((*MockFullNode)(nil).MpoolGetConfig), arg0)
+}
+
+// MpoolGetNonce mocks base method.
+func (m *MockFullNode) MpoolGetNonce(arg0 context.Context, arg1 address.Address) (uint64, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MpoolGetNonce", arg0, arg1)
+ ret0, _ := ret[0].(uint64)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MpoolGetNonce indicates an expected call of MpoolGetNonce.
+func (mr *MockFullNodeMockRecorder) MpoolGetNonce(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolGetNonce", reflect.TypeOf((*MockFullNode)(nil).MpoolGetNonce), arg0, arg1)
+}
+
+// MpoolPending mocks base method.
+func (m *MockFullNode) MpoolPending(arg0 context.Context, arg1 types.TipSetKey) ([]*types.SignedMessage, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MpoolPending", arg0, arg1)
+ ret0, _ := ret[0].([]*types.SignedMessage)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MpoolPending indicates an expected call of MpoolPending.
+func (mr *MockFullNodeMockRecorder) MpoolPending(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPending", reflect.TypeOf((*MockFullNode)(nil).MpoolPending), arg0, arg1)
+}
+
+// MpoolPush mocks base method.
+func (m *MockFullNode) MpoolPush(arg0 context.Context, arg1 *types.SignedMessage) (cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MpoolPush", arg0, arg1)
+ ret0, _ := ret[0].(cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MpoolPush indicates an expected call of MpoolPush.
+func (mr *MockFullNodeMockRecorder) MpoolPush(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPush", reflect.TypeOf((*MockFullNode)(nil).MpoolPush), arg0, arg1)
+}
+
+// MpoolPushMessage mocks base method.
+func (m *MockFullNode) MpoolPushMessage(arg0 context.Context, arg1 *types.Message, arg2 *api.MessageSendSpec) (*types.SignedMessage, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MpoolPushMessage", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*types.SignedMessage)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MpoolPushMessage indicates an expected call of MpoolPushMessage.
+func (mr *MockFullNodeMockRecorder) MpoolPushMessage(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPushMessage", reflect.TypeOf((*MockFullNode)(nil).MpoolPushMessage), arg0, arg1, arg2)
+}
+
+// MpoolPushUntrusted mocks base method.
+func (m *MockFullNode) MpoolPushUntrusted(arg0 context.Context, arg1 *types.SignedMessage) (cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MpoolPushUntrusted", arg0, arg1)
+ ret0, _ := ret[0].(cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MpoolPushUntrusted indicates an expected call of MpoolPushUntrusted.
+func (mr *MockFullNodeMockRecorder) MpoolPushUntrusted(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPushUntrusted", reflect.TypeOf((*MockFullNode)(nil).MpoolPushUntrusted), arg0, arg1)
+}
+
+// MpoolSelect mocks base method.
+func (m *MockFullNode) MpoolSelect(arg0 context.Context, arg1 types.TipSetKey, arg2 float64) ([]*types.SignedMessage, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MpoolSelect", arg0, arg1, arg2)
+ ret0, _ := ret[0].([]*types.SignedMessage)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MpoolSelect indicates an expected call of MpoolSelect.
+func (mr *MockFullNodeMockRecorder) MpoolSelect(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolSelect", reflect.TypeOf((*MockFullNode)(nil).MpoolSelect), arg0, arg1, arg2)
+}
+
+// MpoolSetConfig mocks base method.
+func (m *MockFullNode) MpoolSetConfig(arg0 context.Context, arg1 *types.MpoolConfig) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MpoolSetConfig", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// MpoolSetConfig indicates an expected call of MpoolSetConfig.
+func (mr *MockFullNodeMockRecorder) MpoolSetConfig(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolSetConfig", reflect.TypeOf((*MockFullNode)(nil).MpoolSetConfig), arg0, arg1)
+}
+
+// MpoolSub mocks base method.
+func (m *MockFullNode) MpoolSub(arg0 context.Context) (<-chan api.MpoolUpdate, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MpoolSub", arg0)
+ ret0, _ := ret[0].(<-chan api.MpoolUpdate)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MpoolSub indicates an expected call of MpoolSub.
+func (mr *MockFullNodeMockRecorder) MpoolSub(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolSub", reflect.TypeOf((*MockFullNode)(nil).MpoolSub), arg0)
+}
+
+// MsigAddApprove mocks base method.
+func (m *MockFullNode) MsigAddApprove(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4, arg5 address.Address, arg6 bool) (*api.MessagePrototype, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigAddApprove", arg0, arg1, arg2, arg3, arg4, arg5, arg6)
+ ret0, _ := ret[0].(*api.MessagePrototype)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigAddApprove indicates an expected call of MsigAddApprove.
+func (mr *MockFullNodeMockRecorder) MsigAddApprove(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigAddApprove", reflect.TypeOf((*MockFullNode)(nil).MsigAddApprove), arg0, arg1, arg2, arg3, arg4, arg5, arg6)
+}
+
+// MsigAddCancel mocks base method.
+func (m *MockFullNode) MsigAddCancel(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4 address.Address, arg5 bool) (*api.MessagePrototype, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigAddCancel", arg0, arg1, arg2, arg3, arg4, arg5)
+ ret0, _ := ret[0].(*api.MessagePrototype)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigAddCancel indicates an expected call of MsigAddCancel.
+func (mr *MockFullNodeMockRecorder) MsigAddCancel(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigAddCancel", reflect.TypeOf((*MockFullNode)(nil).MsigAddCancel), arg0, arg1, arg2, arg3, arg4, arg5)
+}
+
+// MsigAddPropose mocks base method.
+func (m *MockFullNode) MsigAddPropose(arg0 context.Context, arg1, arg2, arg3 address.Address, arg4 bool) (*api.MessagePrototype, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigAddPropose", arg0, arg1, arg2, arg3, arg4)
+ ret0, _ := ret[0].(*api.MessagePrototype)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigAddPropose indicates an expected call of MsigAddPropose.
+func (mr *MockFullNodeMockRecorder) MsigAddPropose(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigAddPropose", reflect.TypeOf((*MockFullNode)(nil).MsigAddPropose), arg0, arg1, arg2, arg3, arg4)
+}
+
+// MsigApprove mocks base method.
+func (m *MockFullNode) MsigApprove(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address) (*api.MessagePrototype, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigApprove", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(*api.MessagePrototype)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigApprove indicates an expected call of MsigApprove.
+func (mr *MockFullNodeMockRecorder) MsigApprove(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigApprove", reflect.TypeOf((*MockFullNode)(nil).MsigApprove), arg0, arg1, arg2, arg3)
+}
+
+// MsigApproveTxnHash mocks base method.
+func (m *MockFullNode) MsigApproveTxnHash(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3, arg4 address.Address, arg5 big.Int, arg6 address.Address, arg7 uint64, arg8 []byte) (*api.MessagePrototype, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigApproveTxnHash", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8)
+ ret0, _ := ret[0].(*api.MessagePrototype)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigApproveTxnHash indicates an expected call of MsigApproveTxnHash.
+func (mr *MockFullNodeMockRecorder) MsigApproveTxnHash(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigApproveTxnHash", reflect.TypeOf((*MockFullNode)(nil).MsigApproveTxnHash), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8)
+}
+
+// MsigCancel mocks base method.
+func (m *MockFullNode) MsigCancel(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address, arg4 big.Int, arg5 address.Address, arg6 uint64, arg7 []byte) (*api.MessagePrototype, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigCancel", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
+ ret0, _ := ret[0].(*api.MessagePrototype)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigCancel indicates an expected call of MsigCancel.
+func (mr *MockFullNodeMockRecorder) MsigCancel(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCancel", reflect.TypeOf((*MockFullNode)(nil).MsigCancel), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
+}
+
+// MsigCreate mocks base method.
+func (m *MockFullNode) MsigCreate(arg0 context.Context, arg1 uint64, arg2 []address.Address, arg3 abi.ChainEpoch, arg4 big.Int, arg5 address.Address, arg6 big.Int) (*api.MessagePrototype, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigCreate", arg0, arg1, arg2, arg3, arg4, arg5, arg6)
+ ret0, _ := ret[0].(*api.MessagePrototype)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigCreate indicates an expected call of MsigCreate.
+func (mr *MockFullNodeMockRecorder) MsigCreate(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCreate", reflect.TypeOf((*MockFullNode)(nil).MsigCreate), arg0, arg1, arg2, arg3, arg4, arg5, arg6)
+}
+
+// MsigGetAvailableBalance mocks base method.
+func (m *MockFullNode) MsigGetAvailableBalance(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (big.Int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigGetAvailableBalance", arg0, arg1, arg2)
+ ret0, _ := ret[0].(big.Int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigGetAvailableBalance indicates an expected call of MsigGetAvailableBalance.
+func (mr *MockFullNodeMockRecorder) MsigGetAvailableBalance(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigGetAvailableBalance", reflect.TypeOf((*MockFullNode)(nil).MsigGetAvailableBalance), arg0, arg1, arg2)
+}
+
+// MsigGetPending mocks base method.
+func (m *MockFullNode) MsigGetPending(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) ([]*api.MsigTransaction, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigGetPending", arg0, arg1, arg2)
+ ret0, _ := ret[0].([]*api.MsigTransaction)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigGetPending indicates an expected call of MsigGetPending.
+func (mr *MockFullNodeMockRecorder) MsigGetPending(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigGetPending", reflect.TypeOf((*MockFullNode)(nil).MsigGetPending), arg0, arg1, arg2)
+}
+
+// MsigGetVested mocks base method.
+func (m *MockFullNode) MsigGetVested(arg0 context.Context, arg1 address.Address, arg2, arg3 types.TipSetKey) (big.Int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigGetVested", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(big.Int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigGetVested indicates an expected call of MsigGetVested.
+func (mr *MockFullNodeMockRecorder) MsigGetVested(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigGetVested", reflect.TypeOf((*MockFullNode)(nil).MsigGetVested), arg0, arg1, arg2, arg3)
+}
+
+// MsigGetVestingSchedule mocks base method.
+func (m *MockFullNode) MsigGetVestingSchedule(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (api.MsigVesting, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigGetVestingSchedule", arg0, arg1, arg2)
+ ret0, _ := ret[0].(api.MsigVesting)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigGetVestingSchedule indicates an expected call of MsigGetVestingSchedule.
+func (mr *MockFullNodeMockRecorder) MsigGetVestingSchedule(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigGetVestingSchedule", reflect.TypeOf((*MockFullNode)(nil).MsigGetVestingSchedule), arg0, arg1, arg2)
+}
+
+// MsigPropose mocks base method.
+func (m *MockFullNode) MsigPropose(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int, arg4 address.Address, arg5 uint64, arg6 []byte) (*api.MessagePrototype, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigPropose", arg0, arg1, arg2, arg3, arg4, arg5, arg6)
+ ret0, _ := ret[0].(*api.MessagePrototype)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigPropose indicates an expected call of MsigPropose.
+func (mr *MockFullNodeMockRecorder) MsigPropose(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigPropose", reflect.TypeOf((*MockFullNode)(nil).MsigPropose), arg0, arg1, arg2, arg3, arg4, arg5, arg6)
+}
+
+// MsigRemoveSigner mocks base method.
+func (m *MockFullNode) MsigRemoveSigner(arg0 context.Context, arg1, arg2, arg3 address.Address, arg4 bool) (*api.MessagePrototype, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigRemoveSigner", arg0, arg1, arg2, arg3, arg4)
+ ret0, _ := ret[0].(*api.MessagePrototype)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigRemoveSigner indicates an expected call of MsigRemoveSigner.
+func (mr *MockFullNodeMockRecorder) MsigRemoveSigner(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigRemoveSigner", reflect.TypeOf((*MockFullNode)(nil).MsigRemoveSigner), arg0, arg1, arg2, arg3, arg4)
+}
+
+// MsigSwapApprove mocks base method.
+func (m *MockFullNode) MsigSwapApprove(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4, arg5, arg6 address.Address) (*api.MessagePrototype, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigSwapApprove", arg0, arg1, arg2, arg3, arg4, arg5, arg6)
+ ret0, _ := ret[0].(*api.MessagePrototype)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigSwapApprove indicates an expected call of MsigSwapApprove.
+func (mr *MockFullNodeMockRecorder) MsigSwapApprove(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigSwapApprove", reflect.TypeOf((*MockFullNode)(nil).MsigSwapApprove), arg0, arg1, arg2, arg3, arg4, arg5, arg6)
+}
+
+// MsigSwapCancel mocks base method.
+func (m *MockFullNode) MsigSwapCancel(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4, arg5 address.Address) (*api.MessagePrototype, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigSwapCancel", arg0, arg1, arg2, arg3, arg4, arg5)
+ ret0, _ := ret[0].(*api.MessagePrototype)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigSwapCancel indicates an expected call of MsigSwapCancel.
+func (mr *MockFullNodeMockRecorder) MsigSwapCancel(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigSwapCancel", reflect.TypeOf((*MockFullNode)(nil).MsigSwapCancel), arg0, arg1, arg2, arg3, arg4, arg5)
+}
+
+// MsigSwapPropose mocks base method.
+func (m *MockFullNode) MsigSwapPropose(arg0 context.Context, arg1, arg2, arg3, arg4 address.Address) (*api.MessagePrototype, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigSwapPropose", arg0, arg1, arg2, arg3, arg4)
+ ret0, _ := ret[0].(*api.MessagePrototype)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigSwapPropose indicates an expected call of MsigSwapPropose.
+func (mr *MockFullNodeMockRecorder) MsigSwapPropose(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigSwapPropose", reflect.TypeOf((*MockFullNode)(nil).MsigSwapPropose), arg0, arg1, arg2, arg3, arg4)
+}
+
+// NetAddrsListen mocks base method.
+func (m *MockFullNode) NetAddrsListen(arg0 context.Context) (peer.AddrInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetAddrsListen", arg0)
+ ret0, _ := ret[0].(peer.AddrInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// NetAddrsListen indicates an expected call of NetAddrsListen.
+func (mr *MockFullNodeMockRecorder) NetAddrsListen(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetAddrsListen", reflect.TypeOf((*MockFullNode)(nil).NetAddrsListen), arg0)
+}
+
+// NetAgentVersion mocks base method.
+func (m *MockFullNode) NetAgentVersion(arg0 context.Context, arg1 peer.ID) (string, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetAgentVersion", arg0, arg1)
+ ret0, _ := ret[0].(string)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// NetAgentVersion indicates an expected call of NetAgentVersion.
+func (mr *MockFullNodeMockRecorder) NetAgentVersion(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetAgentVersion", reflect.TypeOf((*MockFullNode)(nil).NetAgentVersion), arg0, arg1)
+}
+
+// NetAutoNatStatus mocks base method.
+func (m *MockFullNode) NetAutoNatStatus(arg0 context.Context) (api.NatInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetAutoNatStatus", arg0)
+ ret0, _ := ret[0].(api.NatInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// NetAutoNatStatus indicates an expected call of NetAutoNatStatus.
+func (mr *MockFullNodeMockRecorder) NetAutoNatStatus(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetAutoNatStatus", reflect.TypeOf((*MockFullNode)(nil).NetAutoNatStatus), arg0)
+}
+
+// NetBandwidthStats mocks base method.
+func (m *MockFullNode) NetBandwidthStats(arg0 context.Context) (metrics.Stats, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetBandwidthStats", arg0)
+ ret0, _ := ret[0].(metrics.Stats)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// NetBandwidthStats indicates an expected call of NetBandwidthStats.
+func (mr *MockFullNodeMockRecorder) NetBandwidthStats(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBandwidthStats", reflect.TypeOf((*MockFullNode)(nil).NetBandwidthStats), arg0)
+}
+
+// NetBandwidthStatsByPeer mocks base method.
+func (m *MockFullNode) NetBandwidthStatsByPeer(arg0 context.Context) (map[string]metrics.Stats, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetBandwidthStatsByPeer", arg0)
+ ret0, _ := ret[0].(map[string]metrics.Stats)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// NetBandwidthStatsByPeer indicates an expected call of NetBandwidthStatsByPeer.
+func (mr *MockFullNodeMockRecorder) NetBandwidthStatsByPeer(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBandwidthStatsByPeer", reflect.TypeOf((*MockFullNode)(nil).NetBandwidthStatsByPeer), arg0)
+}
+
+// NetBandwidthStatsByProtocol mocks base method.
+func (m *MockFullNode) NetBandwidthStatsByProtocol(arg0 context.Context) (map[protocol.ID]metrics.Stats, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetBandwidthStatsByProtocol", arg0)
+ ret0, _ := ret[0].(map[protocol.ID]metrics.Stats)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// NetBandwidthStatsByProtocol indicates an expected call of NetBandwidthStatsByProtocol.
+func (mr *MockFullNodeMockRecorder) NetBandwidthStatsByProtocol(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBandwidthStatsByProtocol", reflect.TypeOf((*MockFullNode)(nil).NetBandwidthStatsByProtocol), arg0)
+}
+
+// NetBlockAdd mocks base method.
+func (m *MockFullNode) NetBlockAdd(arg0 context.Context, arg1 api.NetBlockList) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetBlockAdd", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// NetBlockAdd indicates an expected call of NetBlockAdd.
+func (mr *MockFullNodeMockRecorder) NetBlockAdd(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBlockAdd", reflect.TypeOf((*MockFullNode)(nil).NetBlockAdd), arg0, arg1)
+}
+
+// NetBlockList mocks base method.
+func (m *MockFullNode) NetBlockList(arg0 context.Context) (api.NetBlockList, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetBlockList", arg0)
+ ret0, _ := ret[0].(api.NetBlockList)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// NetBlockList indicates an expected call of NetBlockList.
+func (mr *MockFullNodeMockRecorder) NetBlockList(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBlockList", reflect.TypeOf((*MockFullNode)(nil).NetBlockList), arg0)
+}
+
+// NetBlockRemove mocks base method.
+func (m *MockFullNode) NetBlockRemove(arg0 context.Context, arg1 api.NetBlockList) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetBlockRemove", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// NetBlockRemove indicates an expected call of NetBlockRemove.
+func (mr *MockFullNodeMockRecorder) NetBlockRemove(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBlockRemove", reflect.TypeOf((*MockFullNode)(nil).NetBlockRemove), arg0, arg1)
+}
+
+// NetConnect mocks base method.
+func (m *MockFullNode) NetConnect(arg0 context.Context, arg1 peer.AddrInfo) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetConnect", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// NetConnect indicates an expected call of NetConnect.
+func (mr *MockFullNodeMockRecorder) NetConnect(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetConnect", reflect.TypeOf((*MockFullNode)(nil).NetConnect), arg0, arg1)
+}
+
+// NetConnectedness mocks base method.
+func (m *MockFullNode) NetConnectedness(arg0 context.Context, arg1 peer.ID) (network0.Connectedness, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetConnectedness", arg0, arg1)
+ ret0, _ := ret[0].(network0.Connectedness)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// NetConnectedness indicates an expected call of NetConnectedness.
+func (mr *MockFullNodeMockRecorder) NetConnectedness(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetConnectedness", reflect.TypeOf((*MockFullNode)(nil).NetConnectedness), arg0, arg1)
+}
+
+// NetDisconnect mocks base method.
+func (m *MockFullNode) NetDisconnect(arg0 context.Context, arg1 peer.ID) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetDisconnect", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// NetDisconnect indicates an expected call of NetDisconnect.
+func (mr *MockFullNodeMockRecorder) NetDisconnect(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetDisconnect", reflect.TypeOf((*MockFullNode)(nil).NetDisconnect), arg0, arg1)
+}
+
+// NetFindPeer mocks base method.
+func (m *MockFullNode) NetFindPeer(arg0 context.Context, arg1 peer.ID) (peer.AddrInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetFindPeer", arg0, arg1)
+ ret0, _ := ret[0].(peer.AddrInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// NetFindPeer indicates an expected call of NetFindPeer.
+func (mr *MockFullNodeMockRecorder) NetFindPeer(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetFindPeer", reflect.TypeOf((*MockFullNode)(nil).NetFindPeer), arg0, arg1)
+}
+
+// NetPeerInfo mocks base method.
+func (m *MockFullNode) NetPeerInfo(arg0 context.Context, arg1 peer.ID) (*api.ExtendedPeerInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetPeerInfo", arg0, arg1)
+ ret0, _ := ret[0].(*api.ExtendedPeerInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// NetPeerInfo indicates an expected call of NetPeerInfo.
+func (mr *MockFullNodeMockRecorder) NetPeerInfo(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPeerInfo", reflect.TypeOf((*MockFullNode)(nil).NetPeerInfo), arg0, arg1)
+}
+
+// NetPeers mocks base method.
+func (m *MockFullNode) NetPeers(arg0 context.Context) ([]peer.AddrInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetPeers", arg0)
+ ret0, _ := ret[0].([]peer.AddrInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// NetPeers indicates an expected call of NetPeers.
+func (mr *MockFullNodeMockRecorder) NetPeers(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPeers", reflect.TypeOf((*MockFullNode)(nil).NetPeers), arg0)
+}
+
+// NetPubsubScores mocks base method.
+func (m *MockFullNode) NetPubsubScores(arg0 context.Context) ([]api.PubsubScore, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetPubsubScores", arg0)
+ ret0, _ := ret[0].([]api.PubsubScore)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// NetPubsubScores indicates an expected call of NetPubsubScores.
+func (mr *MockFullNodeMockRecorder) NetPubsubScores(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPubsubScores", reflect.TypeOf((*MockFullNode)(nil).NetPubsubScores), arg0)
+}
+
+// NodeStatus mocks base method.
+func (m *MockFullNode) NodeStatus(arg0 context.Context, arg1 bool) (api.NodeStatus, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NodeStatus", arg0, arg1)
+ ret0, _ := ret[0].(api.NodeStatus)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// NodeStatus indicates an expected call of NodeStatus.
+func (mr *MockFullNodeMockRecorder) NodeStatus(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeStatus", reflect.TypeOf((*MockFullNode)(nil).NodeStatus), arg0, arg1)
+}
+
+// PaychAllocateLane mocks base method.
+func (m *MockFullNode) PaychAllocateLane(arg0 context.Context, arg1 address.Address) (uint64, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychAllocateLane", arg0, arg1)
+ ret0, _ := ret[0].(uint64)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PaychAllocateLane indicates an expected call of PaychAllocateLane.
+func (mr *MockFullNodeMockRecorder) PaychAllocateLane(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychAllocateLane", reflect.TypeOf((*MockFullNode)(nil).PaychAllocateLane), arg0, arg1)
+}
+
+// PaychAvailableFunds mocks base method.
+func (m *MockFullNode) PaychAvailableFunds(arg0 context.Context, arg1 address.Address) (*api.ChannelAvailableFunds, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychAvailableFunds", arg0, arg1)
+ ret0, _ := ret[0].(*api.ChannelAvailableFunds)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PaychAvailableFunds indicates an expected call of PaychAvailableFunds.
+func (mr *MockFullNodeMockRecorder) PaychAvailableFunds(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychAvailableFunds", reflect.TypeOf((*MockFullNode)(nil).PaychAvailableFunds), arg0, arg1)
+}
+
+// PaychAvailableFundsByFromTo mocks base method.
+func (m *MockFullNode) PaychAvailableFundsByFromTo(arg0 context.Context, arg1, arg2 address.Address) (*api.ChannelAvailableFunds, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychAvailableFundsByFromTo", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*api.ChannelAvailableFunds)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PaychAvailableFundsByFromTo indicates an expected call of PaychAvailableFundsByFromTo.
+func (mr *MockFullNodeMockRecorder) PaychAvailableFundsByFromTo(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychAvailableFundsByFromTo", reflect.TypeOf((*MockFullNode)(nil).PaychAvailableFundsByFromTo), arg0, arg1, arg2)
+}
+
+// PaychCollect mocks base method.
+func (m *MockFullNode) PaychCollect(arg0 context.Context, arg1 address.Address) (cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychCollect", arg0, arg1)
+ ret0, _ := ret[0].(cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PaychCollect indicates an expected call of PaychCollect.
+func (mr *MockFullNodeMockRecorder) PaychCollect(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychCollect", reflect.TypeOf((*MockFullNode)(nil).PaychCollect), arg0, arg1)
+}
+
+// PaychGet mocks base method.
+func (m *MockFullNode) PaychGet(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (*api.ChannelInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychGet", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(*api.ChannelInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PaychGet indicates an expected call of PaychGet.
+func (mr *MockFullNodeMockRecorder) PaychGet(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychGet", reflect.TypeOf((*MockFullNode)(nil).PaychGet), arg0, arg1, arg2, arg3)
+}
+
+// PaychGetWaitReady mocks base method.
+func (m *MockFullNode) PaychGetWaitReady(arg0 context.Context, arg1 cid.Cid) (address.Address, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychGetWaitReady", arg0, arg1)
+ ret0, _ := ret[0].(address.Address)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PaychGetWaitReady indicates an expected call of PaychGetWaitReady.
+func (mr *MockFullNodeMockRecorder) PaychGetWaitReady(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychGetWaitReady", reflect.TypeOf((*MockFullNode)(nil).PaychGetWaitReady), arg0, arg1)
+}
+
+// PaychList mocks base method.
+func (m *MockFullNode) PaychList(arg0 context.Context) ([]address.Address, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychList", arg0)
+ ret0, _ := ret[0].([]address.Address)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PaychList indicates an expected call of PaychList.
+func (mr *MockFullNodeMockRecorder) PaychList(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychList", reflect.TypeOf((*MockFullNode)(nil).PaychList), arg0)
+}
+
+// PaychNewPayment mocks base method.
+func (m *MockFullNode) PaychNewPayment(arg0 context.Context, arg1, arg2 address.Address, arg3 []api.VoucherSpec) (*api.PaymentInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychNewPayment", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(*api.PaymentInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PaychNewPayment indicates an expected call of PaychNewPayment.
+func (mr *MockFullNodeMockRecorder) PaychNewPayment(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychNewPayment", reflect.TypeOf((*MockFullNode)(nil).PaychNewPayment), arg0, arg1, arg2, arg3)
+}
+
+// PaychSettle mocks base method.
+func (m *MockFullNode) PaychSettle(arg0 context.Context, arg1 address.Address) (cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychSettle", arg0, arg1)
+ ret0, _ := ret[0].(cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PaychSettle indicates an expected call of PaychSettle.
+func (mr *MockFullNodeMockRecorder) PaychSettle(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychSettle", reflect.TypeOf((*MockFullNode)(nil).PaychSettle), arg0, arg1)
+}
+
+// PaychStatus mocks base method.
+func (m *MockFullNode) PaychStatus(arg0 context.Context, arg1 address.Address) (*api.PaychStatus, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychStatus", arg0, arg1)
+ ret0, _ := ret[0].(*api.PaychStatus)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PaychStatus indicates an expected call of PaychStatus.
+func (mr *MockFullNodeMockRecorder) PaychStatus(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychStatus", reflect.TypeOf((*MockFullNode)(nil).PaychStatus), arg0, arg1)
+}
+
+// PaychVoucherAdd mocks base method.
+func (m *MockFullNode) PaychVoucherAdd(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher, arg3 []byte, arg4 big.Int) (big.Int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychVoucherAdd", arg0, arg1, arg2, arg3, arg4)
+ ret0, _ := ret[0].(big.Int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PaychVoucherAdd indicates an expected call of PaychVoucherAdd.
+func (mr *MockFullNodeMockRecorder) PaychVoucherAdd(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherAdd", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherAdd), arg0, arg1, arg2, arg3, arg4)
+}
+
+// PaychVoucherCheckSpendable mocks base method.
+func (m *MockFullNode) PaychVoucherCheckSpendable(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher, arg3, arg4 []byte) (bool, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychVoucherCheckSpendable", arg0, arg1, arg2, arg3, arg4)
+ ret0, _ := ret[0].(bool)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PaychVoucherCheckSpendable indicates an expected call of PaychVoucherCheckSpendable.
+func (mr *MockFullNodeMockRecorder) PaychVoucherCheckSpendable(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherCheckSpendable", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherCheckSpendable), arg0, arg1, arg2, arg3, arg4)
+}
+
+// PaychVoucherCheckValid mocks base method.
+func (m *MockFullNode) PaychVoucherCheckValid(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychVoucherCheckValid", arg0, arg1, arg2)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// PaychVoucherCheckValid indicates an expected call of PaychVoucherCheckValid.
+func (mr *MockFullNodeMockRecorder) PaychVoucherCheckValid(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherCheckValid", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherCheckValid), arg0, arg1, arg2)
+}
+
+// PaychVoucherCreate mocks base method.
+func (m *MockFullNode) PaychVoucherCreate(arg0 context.Context, arg1 address.Address, arg2 big.Int, arg3 uint64) (*api.VoucherCreateResult, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychVoucherCreate", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(*api.VoucherCreateResult)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PaychVoucherCreate indicates an expected call of PaychVoucherCreate.
+func (mr *MockFullNodeMockRecorder) PaychVoucherCreate(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherCreate", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherCreate), arg0, arg1, arg2, arg3)
+}
+
+// PaychVoucherList mocks base method.
+func (m *MockFullNode) PaychVoucherList(arg0 context.Context, arg1 address.Address) ([]*paych.SignedVoucher, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychVoucherList", arg0, arg1)
+ ret0, _ := ret[0].([]*paych.SignedVoucher)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PaychVoucherList indicates an expected call of PaychVoucherList.
+func (mr *MockFullNodeMockRecorder) PaychVoucherList(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherList", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherList), arg0, arg1)
+}
+
+// PaychVoucherSubmit mocks base method.
+func (m *MockFullNode) PaychVoucherSubmit(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher, arg3, arg4 []byte) (cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychVoucherSubmit", arg0, arg1, arg2, arg3, arg4)
+ ret0, _ := ret[0].(cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PaychVoucherSubmit indicates an expected call of PaychVoucherSubmit.
+func (mr *MockFullNodeMockRecorder) PaychVoucherSubmit(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherSubmit", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherSubmit), arg0, arg1, arg2, arg3, arg4)
+}
+
+// Session mocks base method.
+func (m *MockFullNode) Session(arg0 context.Context) (uuid.UUID, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Session", arg0)
+ ret0, _ := ret[0].(uuid.UUID)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Session indicates an expected call of Session.
+func (mr *MockFullNodeMockRecorder) Session(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Session", reflect.TypeOf((*MockFullNode)(nil).Session), arg0)
+}
+
+// Shutdown mocks base method.
+func (m *MockFullNode) Shutdown(arg0 context.Context) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Shutdown", arg0)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Shutdown indicates an expected call of Shutdown.
+func (mr *MockFullNodeMockRecorder) Shutdown(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Shutdown", reflect.TypeOf((*MockFullNode)(nil).Shutdown), arg0)
+}
+
+// StateAccountKey mocks base method.
+func (m *MockFullNode) StateAccountKey(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (address.Address, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateAccountKey", arg0, arg1, arg2)
+ ret0, _ := ret[0].(address.Address)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateAccountKey indicates an expected call of StateAccountKey.
+func (mr *MockFullNodeMockRecorder) StateAccountKey(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateAccountKey", reflect.TypeOf((*MockFullNode)(nil).StateAccountKey), arg0, arg1, arg2)
+}
+
+// StateAllMinerFaults mocks base method.
+func (m *MockFullNode) StateAllMinerFaults(arg0 context.Context, arg1 abi.ChainEpoch, arg2 types.TipSetKey) ([]*api.Fault, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateAllMinerFaults", arg0, arg1, arg2)
+ ret0, _ := ret[0].([]*api.Fault)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateAllMinerFaults indicates an expected call of StateAllMinerFaults.
+func (mr *MockFullNodeMockRecorder) StateAllMinerFaults(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateAllMinerFaults", reflect.TypeOf((*MockFullNode)(nil).StateAllMinerFaults), arg0, arg1, arg2)
+}
+
+// StateCall mocks base method.
+func (m *MockFullNode) StateCall(arg0 context.Context, arg1 *types.Message, arg2 types.TipSetKey) (*api.InvocResult, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateCall", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*api.InvocResult)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateCall indicates an expected call of StateCall.
+func (mr *MockFullNodeMockRecorder) StateCall(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateCall", reflect.TypeOf((*MockFullNode)(nil).StateCall), arg0, arg1, arg2)
+}
+
+// StateChangedActors mocks base method.
+func (m *MockFullNode) StateChangedActors(arg0 context.Context, arg1, arg2 cid.Cid) (map[string]types.Actor, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateChangedActors", arg0, arg1, arg2)
+ ret0, _ := ret[0].(map[string]types.Actor)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateChangedActors indicates an expected call of StateChangedActors.
+func (mr *MockFullNodeMockRecorder) StateChangedActors(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateChangedActors", reflect.TypeOf((*MockFullNode)(nil).StateChangedActors), arg0, arg1, arg2)
+}
+
+// StateCirculatingSupply mocks base method.
+func (m *MockFullNode) StateCirculatingSupply(arg0 context.Context, arg1 types.TipSetKey) (big.Int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateCirculatingSupply", arg0, arg1)
+ ret0, _ := ret[0].(big.Int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateCirculatingSupply indicates an expected call of StateCirculatingSupply.
+func (mr *MockFullNodeMockRecorder) StateCirculatingSupply(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateCirculatingSupply", reflect.TypeOf((*MockFullNode)(nil).StateCirculatingSupply), arg0, arg1)
+}
+
+// StateCompute mocks base method.
+func (m *MockFullNode) StateCompute(arg0 context.Context, arg1 abi.ChainEpoch, arg2 []*types.Message, arg3 types.TipSetKey) (*api.ComputeStateOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateCompute", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(*api.ComputeStateOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateCompute indicates an expected call of StateCompute.
+func (mr *MockFullNodeMockRecorder) StateCompute(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateCompute", reflect.TypeOf((*MockFullNode)(nil).StateCompute), arg0, arg1, arg2, arg3)
+}
+
+// StateDealProviderCollateralBounds mocks base method.
+func (m *MockFullNode) StateDealProviderCollateralBounds(arg0 context.Context, arg1 abi.PaddedPieceSize, arg2 bool, arg3 types.TipSetKey) (api.DealCollateralBounds, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateDealProviderCollateralBounds", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(api.DealCollateralBounds)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateDealProviderCollateralBounds indicates an expected call of StateDealProviderCollateralBounds.
+func (mr *MockFullNodeMockRecorder) StateDealProviderCollateralBounds(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateDealProviderCollateralBounds", reflect.TypeOf((*MockFullNode)(nil).StateDealProviderCollateralBounds), arg0, arg1, arg2, arg3)
+}
+
+// StateDecodeParams mocks base method.
+func (m *MockFullNode) StateDecodeParams(arg0 context.Context, arg1 address.Address, arg2 abi.MethodNum, arg3 []byte, arg4 types.TipSetKey) (interface{}, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateDecodeParams", arg0, arg1, arg2, arg3, arg4)
+ ret0, _ := ret[0].(interface{})
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateDecodeParams indicates an expected call of StateDecodeParams.
+func (mr *MockFullNodeMockRecorder) StateDecodeParams(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateDecodeParams", reflect.TypeOf((*MockFullNode)(nil).StateDecodeParams), arg0, arg1, arg2, arg3, arg4)
+}
+
+// StateGetActor mocks base method.
+func (m *MockFullNode) StateGetActor(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*types.Actor, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateGetActor", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*types.Actor)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateGetActor indicates an expected call of StateGetActor.
+func (mr *MockFullNodeMockRecorder) StateGetActor(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetActor", reflect.TypeOf((*MockFullNode)(nil).StateGetActor), arg0, arg1, arg2)
+}
+
+// StateListActors mocks base method.
+func (m *MockFullNode) StateListActors(arg0 context.Context, arg1 types.TipSetKey) ([]address.Address, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateListActors", arg0, arg1)
+ ret0, _ := ret[0].([]address.Address)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateListActors indicates an expected call of StateListActors.
+func (mr *MockFullNodeMockRecorder) StateListActors(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateListActors", reflect.TypeOf((*MockFullNode)(nil).StateListActors), arg0, arg1)
+}
+
+// StateListMessages mocks base method.
+func (m *MockFullNode) StateListMessages(arg0 context.Context, arg1 *api.MessageMatch, arg2 types.TipSetKey, arg3 abi.ChainEpoch) ([]cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateListMessages", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].([]cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateListMessages indicates an expected call of StateListMessages.
+func (mr *MockFullNodeMockRecorder) StateListMessages(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateListMessages", reflect.TypeOf((*MockFullNode)(nil).StateListMessages), arg0, arg1, arg2, arg3)
+}
+
+// StateListMiners mocks base method.
+func (m *MockFullNode) StateListMiners(arg0 context.Context, arg1 types.TipSetKey) ([]address.Address, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateListMiners", arg0, arg1)
+ ret0, _ := ret[0].([]address.Address)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateListMiners indicates an expected call of StateListMiners.
+func (mr *MockFullNodeMockRecorder) StateListMiners(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateListMiners", reflect.TypeOf((*MockFullNode)(nil).StateListMiners), arg0, arg1)
+}
+
+// StateLookupID mocks base method.
+func (m *MockFullNode) StateLookupID(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (address.Address, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateLookupID", arg0, arg1, arg2)
+ ret0, _ := ret[0].(address.Address)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateLookupID indicates an expected call of StateLookupID.
+func (mr *MockFullNodeMockRecorder) StateLookupID(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateLookupID", reflect.TypeOf((*MockFullNode)(nil).StateLookupID), arg0, arg1, arg2)
+}
+
+// StateMarketBalance mocks base method.
+func (m *MockFullNode) StateMarketBalance(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (api.MarketBalance, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMarketBalance", arg0, arg1, arg2)
+ ret0, _ := ret[0].(api.MarketBalance)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMarketBalance indicates an expected call of StateMarketBalance.
+func (mr *MockFullNodeMockRecorder) StateMarketBalance(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketBalance", reflect.TypeOf((*MockFullNode)(nil).StateMarketBalance), arg0, arg1, arg2)
+}
+
+// StateMarketDeals mocks base method.
+func (m *MockFullNode) StateMarketDeals(arg0 context.Context, arg1 types.TipSetKey) (map[string]api.MarketDeal, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMarketDeals", arg0, arg1)
+ ret0, _ := ret[0].(map[string]api.MarketDeal)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMarketDeals indicates an expected call of StateMarketDeals.
+func (mr *MockFullNodeMockRecorder) StateMarketDeals(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketDeals", reflect.TypeOf((*MockFullNode)(nil).StateMarketDeals), arg0, arg1)
+}
+
+// StateMarketParticipants mocks base method.
+func (m *MockFullNode) StateMarketParticipants(arg0 context.Context, arg1 types.TipSetKey) (map[string]api.MarketBalance, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMarketParticipants", arg0, arg1)
+ ret0, _ := ret[0].(map[string]api.MarketBalance)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMarketParticipants indicates an expected call of StateMarketParticipants.
+func (mr *MockFullNodeMockRecorder) StateMarketParticipants(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketParticipants", reflect.TypeOf((*MockFullNode)(nil).StateMarketParticipants), arg0, arg1)
+}
+
+// StateMarketStorageDeal mocks base method.
+func (m *MockFullNode) StateMarketStorageDeal(arg0 context.Context, arg1 abi.DealID, arg2 types.TipSetKey) (*api.MarketDeal, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMarketStorageDeal", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*api.MarketDeal)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMarketStorageDeal indicates an expected call of StateMarketStorageDeal.
+func (mr *MockFullNodeMockRecorder) StateMarketStorageDeal(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketStorageDeal", reflect.TypeOf((*MockFullNode)(nil).StateMarketStorageDeal), arg0, arg1, arg2)
+}
+
+// StateMinerActiveSectors mocks base method.
+func (m *MockFullNode) StateMinerActiveSectors(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMinerActiveSectors", arg0, arg1, arg2)
+ ret0, _ := ret[0].([]*miner.SectorOnChainInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMinerActiveSectors indicates an expected call of StateMinerActiveSectors.
+func (mr *MockFullNodeMockRecorder) StateMinerActiveSectors(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerActiveSectors", reflect.TypeOf((*MockFullNode)(nil).StateMinerActiveSectors), arg0, arg1, arg2)
+}
+
+// StateMinerAvailableBalance mocks base method.
+func (m *MockFullNode) StateMinerAvailableBalance(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (big.Int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMinerAvailableBalance", arg0, arg1, arg2)
+ ret0, _ := ret[0].(big.Int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMinerAvailableBalance indicates an expected call of StateMinerAvailableBalance.
+func (mr *MockFullNodeMockRecorder) StateMinerAvailableBalance(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerAvailableBalance", reflect.TypeOf((*MockFullNode)(nil).StateMinerAvailableBalance), arg0, arg1, arg2)
+}
+
+// StateMinerDeadlines mocks base method.
+func (m *MockFullNode) StateMinerDeadlines(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) ([]api.Deadline, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMinerDeadlines", arg0, arg1, arg2)
+ ret0, _ := ret[0].([]api.Deadline)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMinerDeadlines indicates an expected call of StateMinerDeadlines.
+func (mr *MockFullNodeMockRecorder) StateMinerDeadlines(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerDeadlines", reflect.TypeOf((*MockFullNode)(nil).StateMinerDeadlines), arg0, arg1, arg2)
+}
+
+// StateMinerFaults mocks base method.
+func (m *MockFullNode) StateMinerFaults(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (bitfield.BitField, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMinerFaults", arg0, arg1, arg2)
+ ret0, _ := ret[0].(bitfield.BitField)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMinerFaults indicates an expected call of StateMinerFaults.
+func (mr *MockFullNodeMockRecorder) StateMinerFaults(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerFaults", reflect.TypeOf((*MockFullNode)(nil).StateMinerFaults), arg0, arg1, arg2)
+}
+
+// StateMinerInfo mocks base method.
+func (m *MockFullNode) StateMinerInfo(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (miner.MinerInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMinerInfo", arg0, arg1, arg2)
+ ret0, _ := ret[0].(miner.MinerInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMinerInfo indicates an expected call of StateMinerInfo.
+func (mr *MockFullNodeMockRecorder) StateMinerInfo(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerInfo", reflect.TypeOf((*MockFullNode)(nil).StateMinerInfo), arg0, arg1, arg2)
+}
+
+// StateMinerInitialPledgeCollateral mocks base method.
+func (m *MockFullNode) StateMinerInitialPledgeCollateral(arg0 context.Context, arg1 address.Address, arg2 miner0.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMinerInitialPledgeCollateral", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(big.Int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMinerInitialPledgeCollateral indicates an expected call of StateMinerInitialPledgeCollateral.
+func (mr *MockFullNodeMockRecorder) StateMinerInitialPledgeCollateral(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerInitialPledgeCollateral", reflect.TypeOf((*MockFullNode)(nil).StateMinerInitialPledgeCollateral), arg0, arg1, arg2, arg3)
+}
+
+// StateMinerPartitions mocks base method.
+func (m *MockFullNode) StateMinerPartitions(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 types.TipSetKey) ([]api.Partition, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMinerPartitions", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].([]api.Partition)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMinerPartitions indicates an expected call of StateMinerPartitions.
+func (mr *MockFullNodeMockRecorder) StateMinerPartitions(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerPartitions", reflect.TypeOf((*MockFullNode)(nil).StateMinerPartitions), arg0, arg1, arg2, arg3)
+}
+
+// StateMinerPower mocks base method.
+func (m *MockFullNode) StateMinerPower(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*api.MinerPower, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMinerPower", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*api.MinerPower)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMinerPower indicates an expected call of StateMinerPower.
+func (mr *MockFullNodeMockRecorder) StateMinerPower(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerPower", reflect.TypeOf((*MockFullNode)(nil).StateMinerPower), arg0, arg1, arg2)
+}
+
+// StateMinerPreCommitDepositForPower mocks base method.
+func (m *MockFullNode) StateMinerPreCommitDepositForPower(arg0 context.Context, arg1 address.Address, arg2 miner0.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMinerPreCommitDepositForPower", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(big.Int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMinerPreCommitDepositForPower indicates an expected call of StateMinerPreCommitDepositForPower.
+func (mr *MockFullNodeMockRecorder) StateMinerPreCommitDepositForPower(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerPreCommitDepositForPower", reflect.TypeOf((*MockFullNode)(nil).StateMinerPreCommitDepositForPower), arg0, arg1, arg2, arg3)
+}
+
+// StateMinerProvingDeadline mocks base method.
+func (m *MockFullNode) StateMinerProvingDeadline(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*dline.Info, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMinerProvingDeadline", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*dline.Info)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMinerProvingDeadline indicates an expected call of StateMinerProvingDeadline.
+func (mr *MockFullNodeMockRecorder) StateMinerProvingDeadline(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerProvingDeadline", reflect.TypeOf((*MockFullNode)(nil).StateMinerProvingDeadline), arg0, arg1, arg2)
+}
+
+// StateMinerRecoveries mocks base method.
+func (m *MockFullNode) StateMinerRecoveries(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (bitfield.BitField, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMinerRecoveries", arg0, arg1, arg2)
+ ret0, _ := ret[0].(bitfield.BitField)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMinerRecoveries indicates an expected call of StateMinerRecoveries.
+func (mr *MockFullNodeMockRecorder) StateMinerRecoveries(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerRecoveries", reflect.TypeOf((*MockFullNode)(nil).StateMinerRecoveries), arg0, arg1, arg2)
+}
+
+// StateMinerSectorAllocated mocks base method.
+func (m *MockFullNode) StateMinerSectorAllocated(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (bool, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMinerSectorAllocated", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(bool)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMinerSectorAllocated indicates an expected call of StateMinerSectorAllocated.
+func (mr *MockFullNodeMockRecorder) StateMinerSectorAllocated(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerSectorAllocated", reflect.TypeOf((*MockFullNode)(nil).StateMinerSectorAllocated), arg0, arg1, arg2, arg3)
+}
+
+// StateMinerSectorCount mocks base method.
+func (m *MockFullNode) StateMinerSectorCount(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (api.MinerSectors, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMinerSectorCount", arg0, arg1, arg2)
+ ret0, _ := ret[0].(api.MinerSectors)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMinerSectorCount indicates an expected call of StateMinerSectorCount.
+func (mr *MockFullNodeMockRecorder) StateMinerSectorCount(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerSectorCount", reflect.TypeOf((*MockFullNode)(nil).StateMinerSectorCount), arg0, arg1, arg2)
+}
+
+// StateMinerSectors mocks base method.
+func (m *MockFullNode) StateMinerSectors(arg0 context.Context, arg1 address.Address, arg2 *bitfield.BitField, arg3 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMinerSectors", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].([]*miner.SectorOnChainInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMinerSectors indicates an expected call of StateMinerSectors.
+func (mr *MockFullNodeMockRecorder) StateMinerSectors(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerSectors", reflect.TypeOf((*MockFullNode)(nil).StateMinerSectors), arg0, arg1, arg2, arg3)
+}
+
+// StateNetworkName mocks base method.
+func (m *MockFullNode) StateNetworkName(arg0 context.Context) (dtypes.NetworkName, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateNetworkName", arg0)
+ ret0, _ := ret[0].(dtypes.NetworkName)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateNetworkName indicates an expected call of StateNetworkName.
+func (mr *MockFullNodeMockRecorder) StateNetworkName(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateNetworkName", reflect.TypeOf((*MockFullNode)(nil).StateNetworkName), arg0)
+}
+
+// StateNetworkVersion mocks base method.
+func (m *MockFullNode) StateNetworkVersion(arg0 context.Context, arg1 types.TipSetKey) (network.Version, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateNetworkVersion", arg0, arg1)
+ ret0, _ := ret[0].(network.Version)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateNetworkVersion indicates an expected call of StateNetworkVersion.
+func (mr *MockFullNodeMockRecorder) StateNetworkVersion(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateNetworkVersion", reflect.TypeOf((*MockFullNode)(nil).StateNetworkVersion), arg0, arg1)
+}
+
+// StateReadState mocks base method.
+func (m *MockFullNode) StateReadState(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*api.ActorState, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateReadState", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*api.ActorState)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateReadState indicates an expected call of StateReadState.
+func (mr *MockFullNodeMockRecorder) StateReadState(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateReadState", reflect.TypeOf((*MockFullNode)(nil).StateReadState), arg0, arg1, arg2)
+}
+
+// StateReplay mocks base method.
+func (m *MockFullNode) StateReplay(arg0 context.Context, arg1 types.TipSetKey, arg2 cid.Cid) (*api.InvocResult, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateReplay", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*api.InvocResult)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateReplay indicates an expected call of StateReplay.
+func (mr *MockFullNodeMockRecorder) StateReplay(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateReplay", reflect.TypeOf((*MockFullNode)(nil).StateReplay), arg0, arg1, arg2)
+}
+
+// StateSearchMsg mocks base method.
+func (m *MockFullNode) StateSearchMsg(arg0 context.Context, arg1 types.TipSetKey, arg2 cid.Cid, arg3 abi.ChainEpoch, arg4 bool) (*api.MsgLookup, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateSearchMsg", arg0, arg1, arg2, arg3, arg4)
+ ret0, _ := ret[0].(*api.MsgLookup)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateSearchMsg indicates an expected call of StateSearchMsg.
+func (mr *MockFullNodeMockRecorder) StateSearchMsg(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSearchMsg", reflect.TypeOf((*MockFullNode)(nil).StateSearchMsg), arg0, arg1, arg2, arg3, arg4)
+}
+
+// StateSectorExpiration mocks base method.
+func (m *MockFullNode) StateSectorExpiration(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner.SectorExpiration, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateSectorExpiration", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(*miner.SectorExpiration)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateSectorExpiration indicates an expected call of StateSectorExpiration.
+func (mr *MockFullNodeMockRecorder) StateSectorExpiration(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorExpiration", reflect.TypeOf((*MockFullNode)(nil).StateSectorExpiration), arg0, arg1, arg2, arg3)
+}
+
+// StateSectorGetInfo mocks base method.
+func (m *MockFullNode) StateSectorGetInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner.SectorOnChainInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateSectorGetInfo", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(*miner.SectorOnChainInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateSectorGetInfo indicates an expected call of StateSectorGetInfo.
+func (mr *MockFullNodeMockRecorder) StateSectorGetInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorGetInfo", reflect.TypeOf((*MockFullNode)(nil).StateSectorGetInfo), arg0, arg1, arg2, arg3)
+}
+
+// StateSectorPartition mocks base method.
+func (m *MockFullNode) StateSectorPartition(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner.SectorLocation, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateSectorPartition", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(*miner.SectorLocation)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateSectorPartition indicates an expected call of StateSectorPartition.
+func (mr *MockFullNodeMockRecorder) StateSectorPartition(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorPartition", reflect.TypeOf((*MockFullNode)(nil).StateSectorPartition), arg0, arg1, arg2, arg3)
+}
+
+// StateSectorPreCommitInfo mocks base method.
+func (m *MockFullNode) StateSectorPreCommitInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateSectorPreCommitInfo", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(miner.SectorPreCommitOnChainInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateSectorPreCommitInfo indicates an expected call of StateSectorPreCommitInfo.
+func (mr *MockFullNodeMockRecorder) StateSectorPreCommitInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorPreCommitInfo", reflect.TypeOf((*MockFullNode)(nil).StateSectorPreCommitInfo), arg0, arg1, arg2, arg3)
+}
+
+// StateVMCirculatingSupplyInternal mocks base method.
+func (m *MockFullNode) StateVMCirculatingSupplyInternal(arg0 context.Context, arg1 types.TipSetKey) (api.CirculatingSupply, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateVMCirculatingSupplyInternal", arg0, arg1)
+ ret0, _ := ret[0].(api.CirculatingSupply)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateVMCirculatingSupplyInternal indicates an expected call of StateVMCirculatingSupplyInternal.
+func (mr *MockFullNodeMockRecorder) StateVMCirculatingSupplyInternal(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVMCirculatingSupplyInternal", reflect.TypeOf((*MockFullNode)(nil).StateVMCirculatingSupplyInternal), arg0, arg1)
+}
+
+// StateVerifiedClientStatus mocks base method.
+func (m *MockFullNode) StateVerifiedClientStatus(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*big.Int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateVerifiedClientStatus", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*big.Int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateVerifiedClientStatus indicates an expected call of StateVerifiedClientStatus.
+func (mr *MockFullNodeMockRecorder) StateVerifiedClientStatus(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVerifiedClientStatus", reflect.TypeOf((*MockFullNode)(nil).StateVerifiedClientStatus), arg0, arg1, arg2)
+}
+
+// StateVerifiedRegistryRootKey mocks base method.
+func (m *MockFullNode) StateVerifiedRegistryRootKey(arg0 context.Context, arg1 types.TipSetKey) (address.Address, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateVerifiedRegistryRootKey", arg0, arg1)
+ ret0, _ := ret[0].(address.Address)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateVerifiedRegistryRootKey indicates an expected call of StateVerifiedRegistryRootKey.
+func (mr *MockFullNodeMockRecorder) StateVerifiedRegistryRootKey(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVerifiedRegistryRootKey", reflect.TypeOf((*MockFullNode)(nil).StateVerifiedRegistryRootKey), arg0, arg1)
+}
+
+// StateVerifierStatus mocks base method.
+func (m *MockFullNode) StateVerifierStatus(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*big.Int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateVerifierStatus", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*big.Int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateVerifierStatus indicates an expected call of StateVerifierStatus.
+func (mr *MockFullNodeMockRecorder) StateVerifierStatus(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVerifierStatus", reflect.TypeOf((*MockFullNode)(nil).StateVerifierStatus), arg0, arg1, arg2)
+}
+
+// StateWaitMsg mocks base method.
+func (m *MockFullNode) StateWaitMsg(arg0 context.Context, arg1 cid.Cid, arg2 uint64, arg3 abi.ChainEpoch, arg4 bool) (*api.MsgLookup, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateWaitMsg", arg0, arg1, arg2, arg3, arg4)
+ ret0, _ := ret[0].(*api.MsgLookup)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateWaitMsg indicates an expected call of StateWaitMsg.
+func (mr *MockFullNodeMockRecorder) StateWaitMsg(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateWaitMsg", reflect.TypeOf((*MockFullNode)(nil).StateWaitMsg), arg0, arg1, arg2, arg3, arg4)
+}
+
+// SyncCheckBad mocks base method.
+func (m *MockFullNode) SyncCheckBad(arg0 context.Context, arg1 cid.Cid) (string, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SyncCheckBad", arg0, arg1)
+ ret0, _ := ret[0].(string)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// SyncCheckBad indicates an expected call of SyncCheckBad.
+func (mr *MockFullNodeMockRecorder) SyncCheckBad(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncCheckBad", reflect.TypeOf((*MockFullNode)(nil).SyncCheckBad), arg0, arg1)
+}
+
+// SyncCheckpoint mocks base method.
+func (m *MockFullNode) SyncCheckpoint(arg0 context.Context, arg1 types.TipSetKey) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SyncCheckpoint", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// SyncCheckpoint indicates an expected call of SyncCheckpoint.
+func (mr *MockFullNodeMockRecorder) SyncCheckpoint(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncCheckpoint", reflect.TypeOf((*MockFullNode)(nil).SyncCheckpoint), arg0, arg1)
+}
+
+// SyncIncomingBlocks mocks base method.
+func (m *MockFullNode) SyncIncomingBlocks(arg0 context.Context) (<-chan *types.BlockHeader, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SyncIncomingBlocks", arg0)
+ ret0, _ := ret[0].(<-chan *types.BlockHeader)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// SyncIncomingBlocks indicates an expected call of SyncIncomingBlocks.
+func (mr *MockFullNodeMockRecorder) SyncIncomingBlocks(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncIncomingBlocks", reflect.TypeOf((*MockFullNode)(nil).SyncIncomingBlocks), arg0)
+}
+
+// SyncMarkBad mocks base method.
+func (m *MockFullNode) SyncMarkBad(arg0 context.Context, arg1 cid.Cid) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SyncMarkBad", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// SyncMarkBad indicates an expected call of SyncMarkBad.
+func (mr *MockFullNodeMockRecorder) SyncMarkBad(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncMarkBad", reflect.TypeOf((*MockFullNode)(nil).SyncMarkBad), arg0, arg1)
+}
+
+// SyncState mocks base method.
+func (m *MockFullNode) SyncState(arg0 context.Context) (*api.SyncState, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SyncState", arg0)
+ ret0, _ := ret[0].(*api.SyncState)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// SyncState indicates an expected call of SyncState.
+func (mr *MockFullNodeMockRecorder) SyncState(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncState", reflect.TypeOf((*MockFullNode)(nil).SyncState), arg0)
+}
+
+// SyncSubmitBlock mocks base method.
+func (m *MockFullNode) SyncSubmitBlock(arg0 context.Context, arg1 *types.BlockMsg) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SyncSubmitBlock", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// SyncSubmitBlock indicates an expected call of SyncSubmitBlock.
+func (mr *MockFullNodeMockRecorder) SyncSubmitBlock(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncSubmitBlock", reflect.TypeOf((*MockFullNode)(nil).SyncSubmitBlock), arg0, arg1)
+}
+
+// SyncUnmarkAllBad mocks base method.
+func (m *MockFullNode) SyncUnmarkAllBad(arg0 context.Context) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SyncUnmarkAllBad", arg0)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// SyncUnmarkAllBad indicates an expected call of SyncUnmarkAllBad.
+func (mr *MockFullNodeMockRecorder) SyncUnmarkAllBad(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncUnmarkAllBad", reflect.TypeOf((*MockFullNode)(nil).SyncUnmarkAllBad), arg0)
+}
+
+// SyncUnmarkBad mocks base method.
+func (m *MockFullNode) SyncUnmarkBad(arg0 context.Context, arg1 cid.Cid) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SyncUnmarkBad", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// SyncUnmarkBad indicates an expected call of SyncUnmarkBad.
+func (mr *MockFullNodeMockRecorder) SyncUnmarkBad(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncUnmarkBad", reflect.TypeOf((*MockFullNode)(nil).SyncUnmarkBad), arg0, arg1)
+}
+
+// SyncValidateTipset mocks base method.
+func (m *MockFullNode) SyncValidateTipset(arg0 context.Context, arg1 types.TipSetKey) (bool, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SyncValidateTipset", arg0, arg1)
+ ret0, _ := ret[0].(bool)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// SyncValidateTipset indicates an expected call of SyncValidateTipset.
+func (mr *MockFullNodeMockRecorder) SyncValidateTipset(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncValidateTipset", reflect.TypeOf((*MockFullNode)(nil).SyncValidateTipset), arg0, arg1)
+}
+
+// Version mocks base method.
+func (m *MockFullNode) Version(arg0 context.Context) (api.APIVersion, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Version", arg0)
+ ret0, _ := ret[0].(api.APIVersion)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Version indicates an expected call of Version.
+func (mr *MockFullNodeMockRecorder) Version(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockFullNode)(nil).Version), arg0)
+}
+
+// WalletBalance mocks base method.
+func (m *MockFullNode) WalletBalance(arg0 context.Context, arg1 address.Address) (big.Int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WalletBalance", arg0, arg1)
+ ret0, _ := ret[0].(big.Int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// WalletBalance indicates an expected call of WalletBalance.
+func (mr *MockFullNodeMockRecorder) WalletBalance(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletBalance", reflect.TypeOf((*MockFullNode)(nil).WalletBalance), arg0, arg1)
+}
+
+// WalletDefaultAddress mocks base method.
+func (m *MockFullNode) WalletDefaultAddress(arg0 context.Context) (address.Address, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WalletDefaultAddress", arg0)
+ ret0, _ := ret[0].(address.Address)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// WalletDefaultAddress indicates an expected call of WalletDefaultAddress.
+func (mr *MockFullNodeMockRecorder) WalletDefaultAddress(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletDefaultAddress", reflect.TypeOf((*MockFullNode)(nil).WalletDefaultAddress), arg0)
+}
+
+// WalletDelete mocks base method.
+func (m *MockFullNode) WalletDelete(arg0 context.Context, arg1 address.Address) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WalletDelete", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// WalletDelete indicates an expected call of WalletDelete.
+func (mr *MockFullNodeMockRecorder) WalletDelete(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletDelete", reflect.TypeOf((*MockFullNode)(nil).WalletDelete), arg0, arg1)
+}
+
+// WalletExport mocks base method.
+func (m *MockFullNode) WalletExport(arg0 context.Context, arg1 address.Address) (*types.KeyInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WalletExport", arg0, arg1)
+ ret0, _ := ret[0].(*types.KeyInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// WalletExport indicates an expected call of WalletExport.
+func (mr *MockFullNodeMockRecorder) WalletExport(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletExport", reflect.TypeOf((*MockFullNode)(nil).WalletExport), arg0, arg1)
+}
+
+// WalletHas mocks base method.
+func (m *MockFullNode) WalletHas(arg0 context.Context, arg1 address.Address) (bool, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WalletHas", arg0, arg1)
+ ret0, _ := ret[0].(bool)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// WalletHas indicates an expected call of WalletHas.
+func (mr *MockFullNodeMockRecorder) WalletHas(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletHas", reflect.TypeOf((*MockFullNode)(nil).WalletHas), arg0, arg1)
+}
+
+// WalletImport mocks base method.
+func (m *MockFullNode) WalletImport(arg0 context.Context, arg1 *types.KeyInfo) (address.Address, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WalletImport", arg0, arg1)
+ ret0, _ := ret[0].(address.Address)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// WalletImport indicates an expected call of WalletImport.
+func (mr *MockFullNodeMockRecorder) WalletImport(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletImport", reflect.TypeOf((*MockFullNode)(nil).WalletImport), arg0, arg1)
+}
+
+// WalletList mocks base method.
+func (m *MockFullNode) WalletList(arg0 context.Context) ([]address.Address, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WalletList", arg0)
+ ret0, _ := ret[0].([]address.Address)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// WalletList indicates an expected call of WalletList.
+func (mr *MockFullNodeMockRecorder) WalletList(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletList", reflect.TypeOf((*MockFullNode)(nil).WalletList), arg0)
+}
+
+// WalletNew mocks base method.
+func (m *MockFullNode) WalletNew(arg0 context.Context, arg1 types.KeyType) (address.Address, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WalletNew", arg0, arg1)
+ ret0, _ := ret[0].(address.Address)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// WalletNew indicates an expected call of WalletNew.
+func (mr *MockFullNodeMockRecorder) WalletNew(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletNew", reflect.TypeOf((*MockFullNode)(nil).WalletNew), arg0, arg1)
+}
+
+// WalletSetDefault mocks base method.
+func (m *MockFullNode) WalletSetDefault(arg0 context.Context, arg1 address.Address) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WalletSetDefault", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// WalletSetDefault indicates an expected call of WalletSetDefault.
+func (mr *MockFullNodeMockRecorder) WalletSetDefault(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletSetDefault", reflect.TypeOf((*MockFullNode)(nil).WalletSetDefault), arg0, arg1)
+}
+
+// WalletSign mocks base method.
+func (m *MockFullNode) WalletSign(arg0 context.Context, arg1 address.Address, arg2 []byte) (*crypto.Signature, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WalletSign", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*crypto.Signature)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// WalletSign indicates an expected call of WalletSign.
+func (mr *MockFullNodeMockRecorder) WalletSign(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletSign", reflect.TypeOf((*MockFullNode)(nil).WalletSign), arg0, arg1, arg2)
+}
+
+// WalletSignMessage mocks base method.
+func (m *MockFullNode) WalletSignMessage(arg0 context.Context, arg1 address.Address, arg2 *types.Message) (*types.SignedMessage, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WalletSignMessage", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*types.SignedMessage)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// WalletSignMessage indicates an expected call of WalletSignMessage.
+func (mr *MockFullNodeMockRecorder) WalletSignMessage(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletSignMessage", reflect.TypeOf((*MockFullNode)(nil).WalletSignMessage), arg0, arg1, arg2)
+}
+
+// WalletValidateAddress mocks base method.
+func (m *MockFullNode) WalletValidateAddress(arg0 context.Context, arg1 string) (address.Address, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WalletValidateAddress", arg0, arg1)
+ ret0, _ := ret[0].(address.Address)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// WalletValidateAddress indicates an expected call of WalletValidateAddress.
+func (mr *MockFullNodeMockRecorder) WalletValidateAddress(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletValidateAddress", reflect.TypeOf((*MockFullNode)(nil).WalletValidateAddress), arg0, arg1)
+}
+
+// WalletVerify mocks base method.
+func (m *MockFullNode) WalletVerify(arg0 context.Context, arg1 address.Address, arg2 []byte, arg3 *crypto.Signature) (bool, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WalletVerify", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(bool)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// WalletVerify indicates an expected call of WalletVerify.
+func (mr *MockFullNodeMockRecorder) WalletVerify(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletVerify", reflect.TypeOf((*MockFullNode)(nil).WalletVerify), arg0, arg1, arg2, arg3)
+}
diff --git a/api/permissioned.go b/api/permissioned.go
new file mode 100644
index 00000000000..72d2239ee3c
--- /dev/null
+++ b/api/permissioned.go
@@ -0,0 +1,48 @@
+package api
+
+import (
+ "github.com/filecoin-project/go-jsonrpc/auth"
+)
+
+const (
+ // When changing these, update docs/API.md too
+
+ PermRead auth.Permission = "read" // default
+ PermWrite auth.Permission = "write"
+ PermSign auth.Permission = "sign" // Use wallet keys for signing
+ PermAdmin auth.Permission = "admin" // Manage permissions
+)
+
+var AllPermissions = []auth.Permission{PermRead, PermWrite, PermSign, PermAdmin}
+var DefaultPerms = []auth.Permission{PermRead}
+
+func permissionedProxies(in, out interface{}) {
+ outs := GetInternalStructs(out)
+ for _, o := range outs {
+ auth.PermissionedProxy(AllPermissions, DefaultPerms, in, o)
+ }
+}
+
+func PermissionedStorMinerAPI(a StorageMiner) StorageMiner {
+ var out StorageMinerStruct
+ permissionedProxies(a, &out)
+ return &out
+}
+
+func PermissionedFullAPI(a FullNode) FullNode {
+ var out FullNodeStruct
+ permissionedProxies(a, &out)
+ return &out
+}
+
+func PermissionedWorkerAPI(a Worker) Worker {
+ var out WorkerStruct
+ permissionedProxies(a, &out)
+ return &out
+}
+
+func PermissionedWalletAPI(a Wallet) Wallet {
+ var out WalletStruct
+ permissionedProxies(a, &out)
+ return &out
+}
diff --git a/api/proxy_gen.go b/api/proxy_gen.go
new file mode 100644
index 00000000000..fb645eb4800
--- /dev/null
+++ b/api/proxy_gen.go
@@ -0,0 +1,3763 @@
+// Code generated by github.com/filecoin-project/lotus/gen/api. DO NOT EDIT.
+
+package api
+
+import (
+ "context"
+ "time"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-bitfield"
+ datatransfer "github.com/filecoin-project/go-data-transfer"
+ "github.com/filecoin-project/go-fil-markets/piecestore"
+ "github.com/filecoin-project/go-fil-markets/retrievalmarket"
+ "github.com/filecoin-project/go-fil-markets/storagemarket"
+ "github.com/filecoin-project/go-jsonrpc/auth"
+ "github.com/filecoin-project/go-multistore"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/crypto"
+ "github.com/filecoin-project/go-state-types/dline"
+ apitypes "github.com/filecoin-project/lotus/api/types"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/paych"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
+ "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
+ "github.com/filecoin-project/lotus/extern/sector-storage/stores"
+ "github.com/filecoin-project/lotus/extern/sector-storage/storiface"
+ "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
+ marketevents "github.com/filecoin-project/lotus/markets/loggers"
+ "github.com/filecoin-project/lotus/node/modules/dtypes"
+ "github.com/filecoin-project/specs-storage/storage"
+ "github.com/google/uuid"
+ "github.com/ipfs/go-cid"
+ metrics "github.com/libp2p/go-libp2p-core/metrics"
+ "github.com/libp2p/go-libp2p-core/network"
+ "github.com/libp2p/go-libp2p-core/peer"
+ "github.com/libp2p/go-libp2p-core/protocol"
+ xerrors "golang.org/x/xerrors"
+)
+
+type ChainIOStruct struct {
+ Internal struct {
+ ChainHasObj func(p0 context.Context, p1 cid.Cid) (bool, error) ``
+
+ ChainReadObj func(p0 context.Context, p1 cid.Cid) ([]byte, error) ``
+ }
+}
+
+type ChainIOStub struct {
+}
+
+type CommonStruct struct {
+ Internal struct {
+ AuthNew func(p0 context.Context, p1 []auth.Permission) ([]byte, error) `perm:"admin"`
+
+ AuthVerify func(p0 context.Context, p1 string) ([]auth.Permission, error) `perm:"read"`
+
+ Closing func(p0 context.Context) (<-chan struct{}, error) `perm:"read"`
+
+ Discover func(p0 context.Context) (apitypes.OpenRPCDocument, error) `perm:"read"`
+
+ LogList func(p0 context.Context) ([]string, error) `perm:"write"`
+
+ LogSetLevel func(p0 context.Context, p1 string, p2 string) error `perm:"write"`
+
+ Session func(p0 context.Context) (uuid.UUID, error) `perm:"read"`
+
+ Shutdown func(p0 context.Context) error `perm:"admin"`
+
+ Version func(p0 context.Context) (APIVersion, error) `perm:"read"`
+ }
+}
+
+type CommonStub struct {
+}
+
+type CommonNetStruct struct {
+ CommonStruct
+
+ NetStruct
+
+ Internal struct {
+ }
+}
+
+type CommonNetStub struct {
+ CommonStub
+
+ NetStub
+}
+
+type FullNodeStruct struct {
+ CommonStruct
+
+ NetStruct
+
+ Internal struct {
+ BeaconGetEntry func(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"`
+
+ ChainDeleteObj func(p0 context.Context, p1 cid.Cid) error `perm:"admin"`
+
+ ChainExport func(p0 context.Context, p1 abi.ChainEpoch, p2 bool, p3 types.TipSetKey) (<-chan []byte, error) `perm:"read"`
+
+ ChainGetBlock func(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) `perm:"read"`
+
+ ChainGetBlockMessages func(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) `perm:"read"`
+
+ ChainGetGenesis func(p0 context.Context) (*types.TipSet, error) `perm:"read"`
+
+ ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) `perm:"read"`
+
+ ChainGetMessagesInTipset func(p0 context.Context, p1 types.TipSetKey) ([]Message, error) `perm:"read"`
+
+ ChainGetNode func(p0 context.Context, p1 string) (*IpldObject, error) `perm:"read"`
+
+ ChainGetParentMessages func(p0 context.Context, p1 cid.Cid) ([]Message, error) `perm:"read"`
+
+ ChainGetParentReceipts func(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) `perm:"read"`
+
+ ChainGetPath func(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*HeadChange, error) `perm:"read"`
+
+ ChainGetRandomnessFromBeacon func(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) `perm:"read"`
+
+ ChainGetRandomnessFromTickets func(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) `perm:"read"`
+
+ ChainGetTipSet func(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) `perm:"read"`
+
+ ChainGetTipSetByHeight func(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) `perm:"read"`
+
+ ChainHasObj func(p0 context.Context, p1 cid.Cid) (bool, error) `perm:"read"`
+
+ ChainHead func(p0 context.Context) (*types.TipSet, error) `perm:"read"`
+
+ ChainNotify func(p0 context.Context) (<-chan []*HeadChange, error) `perm:"read"`
+
+ ChainReadObj func(p0 context.Context, p1 cid.Cid) ([]byte, error) `perm:"read"`
+
+ ChainSetHead func(p0 context.Context, p1 types.TipSetKey) error `perm:"admin"`
+
+ ChainStatObj func(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (ObjStat, error) `perm:"read"`
+
+ ChainTipSetWeight func(p0 context.Context, p1 types.TipSetKey) (types.BigInt, error) `perm:"read"`
+
+ ClientCalcCommP func(p0 context.Context, p1 string) (*CommPRet, error) `perm:"write"`
+
+ ClientCancelDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"`
+
+ ClientCancelRetrievalDeal func(p0 context.Context, p1 retrievalmarket.DealID) error `perm:"write"`
+
+ ClientDataTransferUpdates func(p0 context.Context) (<-chan DataTransferChannel, error) `perm:"write"`
+
+ ClientDealPieceCID func(p0 context.Context, p1 cid.Cid) (DataCIDSize, error) `perm:"read"`
+
+ ClientDealSize func(p0 context.Context, p1 cid.Cid) (DataSize, error) `perm:"read"`
+
+ ClientFindData func(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]QueryOffer, error) `perm:"read"`
+
+ ClientGenCar func(p0 context.Context, p1 FileRef, p2 string) error `perm:"write"`
+
+ ClientGetDealInfo func(p0 context.Context, p1 cid.Cid) (*DealInfo, error) `perm:"read"`
+
+ ClientGetDealStatus func(p0 context.Context, p1 uint64) (string, error) `perm:"read"`
+
+ ClientGetDealUpdates func(p0 context.Context) (<-chan DealInfo, error) `perm:"write"`
+
+ ClientGetRetrievalUpdates func(p0 context.Context) (<-chan RetrievalInfo, error) `perm:"write"`
+
+ ClientHasLocal func(p0 context.Context, p1 cid.Cid) (bool, error) `perm:"write"`
+
+ ClientImport func(p0 context.Context, p1 FileRef) (*ImportRes, error) `perm:"admin"`
+
+ ClientListDataTransfers func(p0 context.Context) ([]DataTransferChannel, error) `perm:"write"`
+
+ ClientListDeals func(p0 context.Context) ([]DealInfo, error) `perm:"write"`
+
+ ClientListImports func(p0 context.Context) ([]Import, error) `perm:"write"`
+
+ ClientListRetrievals func(p0 context.Context) ([]RetrievalInfo, error) `perm:"write"`
+
+ ClientMinerQueryOffer func(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (QueryOffer, error) `perm:"read"`
+
+ ClientQueryAsk func(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) `perm:"read"`
+
+ ClientRemoveImport func(p0 context.Context, p1 multistore.StoreID) error `perm:"admin"`
+
+ ClientRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"`
+
+ ClientRetrieve func(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) error `perm:"admin"`
+
+ ClientRetrieveTryRestartInsufficientFunds func(p0 context.Context, p1 address.Address) error `perm:"write"`
+
+ ClientRetrieveWithEvents func(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"`
+
+ ClientStartDeal func(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) `perm:"admin"`
+
+ ClientStatelessDeal func(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) `perm:"write"`
+
+ CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"`
+
+ GasEstimateFeeCap func(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"`
+
+ GasEstimateGasLimit func(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (int64, error) `perm:"read"`
+
+ GasEstimateGasPremium func(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) `perm:"read"`
+
+ GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) `perm:"read"`
+
+ MarketAddBalance func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) `perm:"sign"`
+
+ MarketGetReserved func(p0 context.Context, p1 address.Address) (types.BigInt, error) `perm:"sign"`
+
+ MarketReleaseFunds func(p0 context.Context, p1 address.Address, p2 types.BigInt) error `perm:"sign"`
+
+ MarketReserveFunds func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) `perm:"sign"`
+
+ MarketWithdraw func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) `perm:"sign"`
+
+ MinerCreateBlock func(p0 context.Context, p1 *BlockTemplate) (*types.BlockMsg, error) `perm:"write"`
+
+ MinerGetBaseInfo func(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*MiningBaseInfo, error) `perm:"read"`
+
+ MpoolBatchPush func(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) `perm:"write"`
+
+ MpoolBatchPushMessage func(p0 context.Context, p1 []*types.Message, p2 *MessageSendSpec) ([]*types.SignedMessage, error) `perm:"sign"`
+
+ MpoolBatchPushUntrusted func(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) `perm:"write"`
+
+ MpoolCheckMessages func(p0 context.Context, p1 []*MessagePrototype) ([][]MessageCheckStatus, error) `perm:"read"`
+
+ MpoolCheckPendingMessages func(p0 context.Context, p1 address.Address) ([][]MessageCheckStatus, error) `perm:"read"`
+
+ MpoolCheckReplaceMessages func(p0 context.Context, p1 []*types.Message) ([][]MessageCheckStatus, error) `perm:"read"`
+
+ MpoolClear func(p0 context.Context, p1 bool) error `perm:"write"`
+
+ MpoolGetConfig func(p0 context.Context) (*types.MpoolConfig, error) `perm:"read"`
+
+ MpoolGetNonce func(p0 context.Context, p1 address.Address) (uint64, error) `perm:"read"`
+
+ MpoolPending func(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) `perm:"read"`
+
+ MpoolPush func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) `perm:"write"`
+
+ MpoolPushMessage func(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec) (*types.SignedMessage, error) `perm:"sign"`
+
+ MpoolPushUntrusted func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) `perm:"write"`
+
+ MpoolSelect func(p0 context.Context, p1 types.TipSetKey, p2 float64) ([]*types.SignedMessage, error) `perm:"read"`
+
+ MpoolSetConfig func(p0 context.Context, p1 *types.MpoolConfig) error `perm:"admin"`
+
+ MpoolSub func(p0 context.Context) (<-chan MpoolUpdate, error) `perm:"read"`
+
+ MsigAddApprove func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (*MessagePrototype, error) `perm:"sign"`
+
+ MsigAddCancel func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (*MessagePrototype, error) `perm:"sign"`
+
+ MsigAddPropose func(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (*MessagePrototype, error) `perm:"sign"`
+
+ MsigApprove func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (*MessagePrototype, error) `perm:"sign"`
+
+ MsigApproveTxnHash func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (*MessagePrototype, error) `perm:"sign"`
+
+ MsigCancel func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) `perm:"sign"`
+
+ MsigCreate func(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (*MessagePrototype, error) `perm:"sign"`
+
+ MsigGetAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) `perm:"read"`
+
+ MsigGetPending func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*MsigTransaction, error) `perm:"read"`
+
+ MsigGetVested func(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"`
+
+ MsigGetVestingSchedule func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MsigVesting, error) `perm:"read"`
+
+ MsigPropose func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (*MessagePrototype, error) `perm:"sign"`
+
+ MsigRemoveSigner func(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (*MessagePrototype, error) `perm:"sign"`
+
+ MsigSwapApprove func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (*MessagePrototype, error) `perm:"sign"`
+
+ MsigSwapCancel func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (*MessagePrototype, error) `perm:"sign"`
+
+ MsigSwapPropose func(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (*MessagePrototype, error) `perm:"sign"`
+
+ NodeStatus func(p0 context.Context, p1 bool) (NodeStatus, error) `perm:"read"`
+
+ PaychAllocateLane func(p0 context.Context, p1 address.Address) (uint64, error) `perm:"sign"`
+
+ PaychAvailableFunds func(p0 context.Context, p1 address.Address) (*ChannelAvailableFunds, error) `perm:"sign"`
+
+ PaychAvailableFundsByFromTo func(p0 context.Context, p1 address.Address, p2 address.Address) (*ChannelAvailableFunds, error) `perm:"sign"`
+
+ PaychCollect func(p0 context.Context, p1 address.Address) (cid.Cid, error) `perm:"sign"`
+
+ PaychGet func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (*ChannelInfo, error) `perm:"sign"`
+
+ PaychGetWaitReady func(p0 context.Context, p1 cid.Cid) (address.Address, error) `perm:"sign"`
+
+ PaychList func(p0 context.Context) ([]address.Address, error) `perm:"read"`
+
+ PaychNewPayment func(p0 context.Context, p1 address.Address, p2 address.Address, p3 []VoucherSpec) (*PaymentInfo, error) `perm:"sign"`
+
+ PaychSettle func(p0 context.Context, p1 address.Address) (cid.Cid, error) `perm:"sign"`
+
+ PaychStatus func(p0 context.Context, p1 address.Address) (*PaychStatus, error) `perm:"read"`
+
+ PaychVoucherAdd func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 types.BigInt) (types.BigInt, error) `perm:"write"`
+
+ PaychVoucherCheckSpendable func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (bool, error) `perm:"read"`
+
+ PaychVoucherCheckValid func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher) error `perm:"read"`
+
+ PaychVoucherCreate func(p0 context.Context, p1 address.Address, p2 types.BigInt, p3 uint64) (*VoucherCreateResult, error) `perm:"sign"`
+
+ PaychVoucherList func(p0 context.Context, p1 address.Address) ([]*paych.SignedVoucher, error) `perm:"write"`
+
+ PaychVoucherSubmit func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (cid.Cid, error) `perm:"sign"`
+
+ StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `perm:"read"`
+
+ StateAllMinerFaults func(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) ([]*Fault, error) `perm:"read"`
+
+ StateCall func(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*InvocResult, error) `perm:"read"`
+
+ StateChangedActors func(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (map[string]types.Actor, error) `perm:"read"`
+
+ StateCirculatingSupply func(p0 context.Context, p1 types.TipSetKey) (abi.TokenAmount, error) `perm:"read"`
+
+ StateCompute func(p0 context.Context, p1 abi.ChainEpoch, p2 []*types.Message, p3 types.TipSetKey) (*ComputeStateOutput, error) `perm:"read"`
+
+ StateDealProviderCollateralBounds func(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (DealCollateralBounds, error) `perm:"read"`
+
+ StateDecodeParams func(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) `perm:"read"`
+
+ StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) `perm:"read"`
+
+ StateListActors func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) `perm:"read"`
+
+ StateListMessages func(p0 context.Context, p1 *MessageMatch, p2 types.TipSetKey, p3 abi.ChainEpoch) ([]cid.Cid, error) `perm:"read"`
+
+ StateListMiners func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) `perm:"read"`
+
+ StateLookupID func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `perm:"read"`
+
+ StateMarketBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MarketBalance, error) `perm:"read"`
+
+ StateMarketDeals func(p0 context.Context, p1 types.TipSetKey) (map[string]MarketDeal, error) `perm:"read"`
+
+ StateMarketParticipants func(p0 context.Context, p1 types.TipSetKey) (map[string]MarketBalance, error) `perm:"read"`
+
+ StateMarketStorageDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*MarketDeal, error) `perm:"read"`
+
+ StateMinerActiveSectors func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) `perm:"read"`
+
+ StateMinerAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) `perm:"read"`
+
+ StateMinerDeadlines func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]Deadline, error) `perm:"read"`
+
+ StateMinerFaults func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) `perm:"read"`
+
+ StateMinerInfo func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) `perm:"read"`
+
+ StateMinerInitialPledgeCollateral func(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"`
+
+ StateMinerPartitions func(p0 context.Context, p1 address.Address, p2 uint64, p3 types.TipSetKey) ([]Partition, error) `perm:"read"`
+
+ StateMinerPower func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*MinerPower, error) `perm:"read"`
+
+ StateMinerPreCommitDepositForPower func(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"`
+
+ StateMinerProvingDeadline func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) `perm:"read"`
+
+ StateMinerRecoveries func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) `perm:"read"`
+
+ StateMinerSectorAllocated func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (bool, error) `perm:"read"`
+
+ StateMinerSectorCount func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MinerSectors, error) `perm:"read"`
+
+ StateMinerSectors func(p0 context.Context, p1 address.Address, p2 *bitfield.BitField, p3 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) `perm:"read"`
+
+ StateNetworkName func(p0 context.Context) (dtypes.NetworkName, error) `perm:"read"`
+
+ StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) `perm:"read"`
+
+ StateReadState func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*ActorState, error) `perm:"read"`
+
+ StateReplay func(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*InvocResult, error) `perm:"read"`
+
+ StateSearchMsg func(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) `perm:"read"`
+
+ StateSectorExpiration func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorExpiration, error) `perm:"read"`
+
+ StateSectorGetInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) `perm:"read"`
+
+ StateSectorPartition func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorLocation, error) `perm:"read"`
+
+ StateSectorPreCommitInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) `perm:"read"`
+
+ StateVMCirculatingSupplyInternal func(p0 context.Context, p1 types.TipSetKey) (CirculatingSupply, error) `perm:"read"`
+
+ StateVerifiedClientStatus func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) `perm:"read"`
+
+ StateVerifiedRegistryRootKey func(p0 context.Context, p1 types.TipSetKey) (address.Address, error) `perm:"read"`
+
+ StateVerifierStatus func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) `perm:"read"`
+
+ StateWaitMsg func(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) `perm:"read"`
+
+ SyncCheckBad func(p0 context.Context, p1 cid.Cid) (string, error) `perm:"read"`
+
+ SyncCheckpoint func(p0 context.Context, p1 types.TipSetKey) error `perm:"admin"`
+
+ SyncIncomingBlocks func(p0 context.Context) (<-chan *types.BlockHeader, error) `perm:"read"`
+
+ SyncMarkBad func(p0 context.Context, p1 cid.Cid) error `perm:"admin"`
+
+ SyncState func(p0 context.Context) (*SyncState, error) `perm:"read"`
+
+ SyncSubmitBlock func(p0 context.Context, p1 *types.BlockMsg) error `perm:"write"`
+
+ SyncUnmarkAllBad func(p0 context.Context) error `perm:"admin"`
+
+ SyncUnmarkBad func(p0 context.Context, p1 cid.Cid) error `perm:"admin"`
+
+ SyncValidateTipset func(p0 context.Context, p1 types.TipSetKey) (bool, error) `perm:"read"`
+
+ WalletBalance func(p0 context.Context, p1 address.Address) (types.BigInt, error) `perm:"read"`
+
+ WalletDefaultAddress func(p0 context.Context) (address.Address, error) `perm:"write"`
+
+ WalletDelete func(p0 context.Context, p1 address.Address) error `perm:"admin"`
+
+ WalletExport func(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) `perm:"admin"`
+
+ WalletHas func(p0 context.Context, p1 address.Address) (bool, error) `perm:"write"`
+
+ WalletImport func(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) `perm:"admin"`
+
+ WalletList func(p0 context.Context) ([]address.Address, error) `perm:"write"`
+
+ WalletNew func(p0 context.Context, p1 types.KeyType) (address.Address, error) `perm:"write"`
+
+ WalletSetDefault func(p0 context.Context, p1 address.Address) error `perm:"write"`
+
+ WalletSign func(p0 context.Context, p1 address.Address, p2 []byte) (*crypto.Signature, error) `perm:"sign"`
+
+ WalletSignMessage func(p0 context.Context, p1 address.Address, p2 *types.Message) (*types.SignedMessage, error) `perm:"sign"`
+
+ WalletValidateAddress func(p0 context.Context, p1 string) (address.Address, error) `perm:"read"`
+
+ WalletVerify func(p0 context.Context, p1 address.Address, p2 []byte, p3 *crypto.Signature) (bool, error) `perm:"read"`
+ }
+}
+
+type FullNodeStub struct {
+ CommonStub
+
+ NetStub
+}
+
+type GatewayStruct struct {
+ Internal struct {
+ ChainGetBlockMessages func(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) ``
+
+ ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) ``
+
+ ChainGetTipSet func(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) ``
+
+ ChainGetTipSetByHeight func(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) ``
+
+ ChainHasObj func(p0 context.Context, p1 cid.Cid) (bool, error) ``
+
+ ChainHead func(p0 context.Context) (*types.TipSet, error) ``
+
+ ChainNotify func(p0 context.Context) (<-chan []*HeadChange, error) ``
+
+ ChainReadObj func(p0 context.Context, p1 cid.Cid) ([]byte, error) ``
+
+ GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) ``
+
+ MpoolPush func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) ``
+
+ MsigGetAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) ``
+
+ MsigGetPending func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*MsigTransaction, error) ``
+
+ MsigGetVested func(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) ``
+
+ StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) ``
+
+ StateDealProviderCollateralBounds func(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (DealCollateralBounds, error) ``
+
+ StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) ``
+
+ StateListMiners func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) ``
+
+ StateLookupID func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) ``
+
+ StateMarketBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MarketBalance, error) ``
+
+ StateMarketStorageDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*MarketDeal, error) ``
+
+ StateMinerInfo func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) ``
+
+ StateMinerPower func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*MinerPower, error) ``
+
+ StateMinerProvingDeadline func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) ``
+
+ StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) ``
+
+ StateReadState func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*ActorState, error) `perm:"read"`
+
+ StateSearchMsg func(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) ``
+
+ StateSectorGetInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) ``
+
+ StateVerifiedClientStatus func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) ``
+
+ StateWaitMsg func(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) ``
+
+ Version func(p0 context.Context) (APIVersion, error) ``
+
+ WalletBalance func(p0 context.Context, p1 address.Address) (types.BigInt, error) ``
+ }
+}
+
+type GatewayStub struct {
+}
+
+type NetStruct struct {
+ Internal struct {
+ ID func(p0 context.Context) (peer.ID, error) `perm:"read"`
+
+ NetAddrsListen func(p0 context.Context) (peer.AddrInfo, error) `perm:"read"`
+
+ NetAgentVersion func(p0 context.Context, p1 peer.ID) (string, error) `perm:"read"`
+
+ NetAutoNatStatus func(p0 context.Context) (NatInfo, error) `perm:"read"`
+
+ NetBandwidthStats func(p0 context.Context) (metrics.Stats, error) `perm:"read"`
+
+ NetBandwidthStatsByPeer func(p0 context.Context) (map[string]metrics.Stats, error) `perm:"read"`
+
+ NetBandwidthStatsByProtocol func(p0 context.Context) (map[protocol.ID]metrics.Stats, error) `perm:"read"`
+
+ NetBlockAdd func(p0 context.Context, p1 NetBlockList) error `perm:"admin"`
+
+ NetBlockList func(p0 context.Context) (NetBlockList, error) `perm:"read"`
+
+ NetBlockRemove func(p0 context.Context, p1 NetBlockList) error `perm:"admin"`
+
+ NetConnect func(p0 context.Context, p1 peer.AddrInfo) error `perm:"write"`
+
+ NetConnectedness func(p0 context.Context, p1 peer.ID) (network.Connectedness, error) `perm:"read"`
+
+ NetDisconnect func(p0 context.Context, p1 peer.ID) error `perm:"write"`
+
+ NetFindPeer func(p0 context.Context, p1 peer.ID) (peer.AddrInfo, error) `perm:"read"`
+
+ NetPeerInfo func(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) `perm:"read"`
+
+ NetPeers func(p0 context.Context) ([]peer.AddrInfo, error) `perm:"read"`
+
+ NetPubsubScores func(p0 context.Context) ([]PubsubScore, error) `perm:"read"`
+ }
+}
+
+type NetStub struct {
+}
+
+type SignableStruct struct {
+ Internal struct {
+ Sign func(p0 context.Context, p1 SignFunc) error ``
+ }
+}
+
+type SignableStub struct {
+}
+
+type StorageMinerStruct struct {
+ CommonStruct
+
+ NetStruct
+
+ Internal struct {
+ ActorAddress func(p0 context.Context) (address.Address, error) `perm:"read"`
+
+ ActorAddressConfig func(p0 context.Context) (AddressConfig, error) `perm:"read"`
+
+ ActorSectorSize func(p0 context.Context, p1 address.Address) (abi.SectorSize, error) `perm:"read"`
+
+ CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) `perm:"admin"`
+
+ ComputeProof func(p0 context.Context, p1 []builtin.SectorInfo, p2 abi.PoStRandomness) ([]builtin.PoStProof, error) `perm:"read"`
+
+ CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"`
+
+ DealsConsiderOfflineRetrievalDeals func(p0 context.Context) (bool, error) `perm:"admin"`
+
+ DealsConsiderOfflineStorageDeals func(p0 context.Context) (bool, error) `perm:"admin"`
+
+ DealsConsiderOnlineRetrievalDeals func(p0 context.Context) (bool, error) `perm:"admin"`
+
+ DealsConsiderOnlineStorageDeals func(p0 context.Context) (bool, error) `perm:"admin"`
+
+ DealsConsiderUnverifiedStorageDeals func(p0 context.Context) (bool, error) `perm:"admin"`
+
+ DealsConsiderVerifiedStorageDeals func(p0 context.Context) (bool, error) `perm:"admin"`
+
+ DealsImportData func(p0 context.Context, p1 cid.Cid, p2 string) error `perm:"admin"`
+
+ DealsList func(p0 context.Context) ([]MarketDeal, error) `perm:"admin"`
+
+ DealsPieceCidBlocklist func(p0 context.Context) ([]cid.Cid, error) `perm:"admin"`
+
+ DealsSetConsiderOfflineRetrievalDeals func(p0 context.Context, p1 bool) error `perm:"admin"`
+
+ DealsSetConsiderOfflineStorageDeals func(p0 context.Context, p1 bool) error `perm:"admin"`
+
+ DealsSetConsiderOnlineRetrievalDeals func(p0 context.Context, p1 bool) error `perm:"admin"`
+
+ DealsSetConsiderOnlineStorageDeals func(p0 context.Context, p1 bool) error `perm:"admin"`
+
+ DealsSetConsiderUnverifiedStorageDeals func(p0 context.Context, p1 bool) error `perm:"admin"`
+
+ DealsSetConsiderVerifiedStorageDeals func(p0 context.Context, p1 bool) error `perm:"admin"`
+
+ DealsSetPieceCidBlocklist func(p0 context.Context, p1 []cid.Cid) error `perm:"admin"`
+
+ MarketCancelDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"`
+
+ MarketDataTransferUpdates func(p0 context.Context) (<-chan DataTransferChannel, error) `perm:"write"`
+
+ MarketGetAsk func(p0 context.Context) (*storagemarket.SignedStorageAsk, error) `perm:"read"`
+
+ MarketGetDealUpdates func(p0 context.Context) (<-chan storagemarket.MinerDeal, error) `perm:"read"`
+
+ MarketGetRetrievalAsk func(p0 context.Context) (*retrievalmarket.Ask, error) `perm:"read"`
+
+ MarketImportDealData func(p0 context.Context, p1 cid.Cid, p2 string) error `perm:"write"`
+
+ MarketListDataTransfers func(p0 context.Context) ([]DataTransferChannel, error) `perm:"write"`
+
+ MarketListDeals func(p0 context.Context) ([]MarketDeal, error) `perm:"read"`
+
+ MarketListIncompleteDeals func(p0 context.Context) ([]storagemarket.MinerDeal, error) `perm:"read"`
+
+ MarketListRetrievalDeals func(p0 context.Context) ([]retrievalmarket.ProviderDealState, error) `perm:"read"`
+
+ MarketPendingDeals func(p0 context.Context) (PendingDealInfo, error) `perm:"write"`
+
+ MarketPublishPendingDeals func(p0 context.Context) error `perm:"admin"`
+
+ MarketRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"`
+
+ MarketSetAsk func(p0 context.Context, p1 types.BigInt, p2 types.BigInt, p3 abi.ChainEpoch, p4 abi.PaddedPieceSize, p5 abi.PaddedPieceSize) error `perm:"admin"`
+
+ MarketSetRetrievalAsk func(p0 context.Context, p1 *retrievalmarket.Ask) error `perm:"admin"`
+
+ MiningBase func(p0 context.Context) (*types.TipSet, error) `perm:"read"`
+
+ PiecesGetCIDInfo func(p0 context.Context, p1 cid.Cid) (*piecestore.CIDInfo, error) `perm:"read"`
+
+ PiecesGetPieceInfo func(p0 context.Context, p1 cid.Cid) (*piecestore.PieceInfo, error) `perm:"read"`
+
+ PiecesListCidInfos func(p0 context.Context) ([]cid.Cid, error) `perm:"read"`
+
+ PiecesListPieces func(p0 context.Context) ([]cid.Cid, error) `perm:"read"`
+
+ PledgeSector func(p0 context.Context) (abi.SectorID, error) `perm:"write"`
+
+ ReturnAddPiece func(p0 context.Context, p1 storiface.CallID, p2 abi.PieceInfo, p3 *storiface.CallError) error `perm:"admin"`
+
+ ReturnFetch func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
+
+ ReturnFinalizeSector func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
+
+ ReturnMoveStorage func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
+
+ ReturnReadPiece func(p0 context.Context, p1 storiface.CallID, p2 bool, p3 *storiface.CallError) error `perm:"admin"`
+
+ ReturnReleaseUnsealed func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
+
+ ReturnSealCommit1 func(p0 context.Context, p1 storiface.CallID, p2 storage.Commit1Out, p3 *storiface.CallError) error `perm:"admin"`
+
+ ReturnSealCommit2 func(p0 context.Context, p1 storiface.CallID, p2 storage.Proof, p3 *storiface.CallError) error `perm:"admin"`
+
+ ReturnSealPreCommit1 func(p0 context.Context, p1 storiface.CallID, p2 storage.PreCommit1Out, p3 *storiface.CallError) error `perm:"admin"`
+
+ ReturnSealPreCommit2 func(p0 context.Context, p1 storiface.CallID, p2 storage.SectorCids, p3 *storiface.CallError) error `perm:"admin"`
+
+ ReturnUnsealPiece func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
+
+ SealingAbort func(p0 context.Context, p1 storiface.CallID) error `perm:"admin"`
+
+ SealingSchedDiag func(p0 context.Context, p1 bool) (interface{}, error) `perm:"admin"`
+
+ SectorAddPieceToAny func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) `perm:"admin"`
+
+ SectorCommitFlush func(p0 context.Context) ([]sealiface.CommitBatchRes, error) `perm:"admin"`
+
+ SectorCommitPending func(p0 context.Context) ([]abi.SectorID, error) `perm:"admin"`
+
+ SectorGetExpectedSealDuration func(p0 context.Context) (time.Duration, error) `perm:"read"`
+
+ SectorGetSealDelay func(p0 context.Context) (time.Duration, error) `perm:"read"`
+
+ SectorMarkForUpgrade func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"`
+
+ SectorPreCommitFlush func(p0 context.Context) ([]sealiface.PreCommitBatchRes, error) `perm:"admin"`
+
+ SectorPreCommitPending func(p0 context.Context) ([]abi.SectorID, error) `perm:"admin"`
+
+ SectorRemove func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"`
+
+ SectorSetExpectedSealDuration func(p0 context.Context, p1 time.Duration) error `perm:"write"`
+
+ SectorSetSealDelay func(p0 context.Context, p1 time.Duration) error `perm:"write"`
+
+ SectorStartSealing func(p0 context.Context, p1 abi.SectorNumber) error `perm:"write"`
+
+ SectorTerminate func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"`
+
+ SectorTerminateFlush func(p0 context.Context) (*cid.Cid, error) `perm:"admin"`
+
+ SectorTerminatePending func(p0 context.Context) ([]abi.SectorID, error) `perm:"admin"`
+
+ SectorsList func(p0 context.Context) ([]abi.SectorNumber, error) `perm:"read"`
+
+ SectorsListInStates func(p0 context.Context, p1 []SectorState) ([]abi.SectorNumber, error) `perm:"read"`
+
+ SectorsRefs func(p0 context.Context) (map[string][]SealedRef, error) `perm:"read"`
+
+ SectorsStatus func(p0 context.Context, p1 abi.SectorNumber, p2 bool) (SectorInfo, error) `perm:"read"`
+
+ SectorsSummary func(p0 context.Context) (map[SectorState]int, error) `perm:"read"`
+
+ SectorsUnsealPiece func(p0 context.Context, p1 storage.SectorRef, p2 storiface.UnpaddedByteIndex, p3 abi.UnpaddedPieceSize, p4 abi.SealRandomness, p5 *cid.Cid) error `perm:"admin"`
+
+ SectorsUpdate func(p0 context.Context, p1 abi.SectorNumber, p2 SectorState) error `perm:"admin"`
+
+ StorageAddLocal func(p0 context.Context, p1 string) error `perm:"admin"`
+
+ StorageAttach func(p0 context.Context, p1 stores.StorageInfo, p2 fsutil.FsStat) error `perm:"admin"`
+
+ StorageBestAlloc func(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType) ([]stores.StorageInfo, error) `perm:"admin"`
+
+ StorageDeclareSector func(p0 context.Context, p1 stores.ID, p2 abi.SectorID, p3 storiface.SectorFileType, p4 bool) error `perm:"admin"`
+
+ StorageDropSector func(p0 context.Context, p1 stores.ID, p2 abi.SectorID, p3 storiface.SectorFileType) error `perm:"admin"`
+
+ StorageFindSector func(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]stores.SectorStorageInfo, error) `perm:"admin"`
+
+ StorageInfo func(p0 context.Context, p1 stores.ID) (stores.StorageInfo, error) `perm:"admin"`
+
+ StorageList func(p0 context.Context) (map[stores.ID][]stores.Decl, error) `perm:"admin"`
+
+ StorageLocal func(p0 context.Context) (map[stores.ID]string, error) `perm:"admin"`
+
+ StorageLock func(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 storiface.SectorFileType) error `perm:"admin"`
+
+ StorageReportHealth func(p0 context.Context, p1 stores.ID, p2 stores.HealthReport) error `perm:"admin"`
+
+ StorageStat func(p0 context.Context, p1 stores.ID) (fsutil.FsStat, error) `perm:"admin"`
+
+ StorageTryLock func(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 storiface.SectorFileType) (bool, error) `perm:"admin"`
+
+ WorkerConnect func(p0 context.Context, p1 string) error `perm:"admin"`
+
+ WorkerJobs func(p0 context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) `perm:"admin"`
+
+ WorkerStats func(p0 context.Context) (map[uuid.UUID]storiface.WorkerStats, error) `perm:"admin"`
+ }
+}
+
+type StorageMinerStub struct {
+ CommonStub
+
+ NetStub
+}
+
+type WalletStruct struct {
+ Internal struct {
+ WalletDelete func(p0 context.Context, p1 address.Address) error `perm:"admin"`
+
+ WalletExport func(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) `perm:"admin"`
+
+ WalletHas func(p0 context.Context, p1 address.Address) (bool, error) `perm:"admin"`
+
+ WalletImport func(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) `perm:"admin"`
+
+ WalletList func(p0 context.Context) ([]address.Address, error) `perm:"admin"`
+
+ WalletNew func(p0 context.Context, p1 types.KeyType) (address.Address, error) `perm:"admin"`
+
+ WalletSign func(p0 context.Context, p1 address.Address, p2 []byte, p3 MsgMeta) (*crypto.Signature, error) `perm:"admin"`
+ }
+}
+
+type WalletStub struct {
+}
+
+type WorkerStruct struct {
+ Internal struct {
+ AddPiece func(p0 context.Context, p1 storage.SectorRef, p2 []abi.UnpaddedPieceSize, p3 abi.UnpaddedPieceSize, p4 storage.Data) (storiface.CallID, error) `perm:"admin"`
+
+ Enabled func(p0 context.Context) (bool, error) `perm:"admin"`
+
+ Fetch func(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType, p3 storiface.PathType, p4 storiface.AcquireMode) (storiface.CallID, error) `perm:"admin"`
+
+ FinalizeSector func(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) `perm:"admin"`
+
+ Info func(p0 context.Context) (storiface.WorkerInfo, error) `perm:"admin"`
+
+ MoveStorage func(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType) (storiface.CallID, error) `perm:"admin"`
+
+ Paths func(p0 context.Context) ([]stores.StoragePath, error) `perm:"admin"`
+
+ ProcessSession func(p0 context.Context) (uuid.UUID, error) `perm:"admin"`
+
+ ReleaseUnsealed func(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) `perm:"admin"`
+
+ Remove func(p0 context.Context, p1 abi.SectorID) error `perm:"admin"`
+
+ SealCommit1 func(p0 context.Context, p1 storage.SectorRef, p2 abi.SealRandomness, p3 abi.InteractiveSealRandomness, p4 []abi.PieceInfo, p5 storage.SectorCids) (storiface.CallID, error) `perm:"admin"`
+
+ SealCommit2 func(p0 context.Context, p1 storage.SectorRef, p2 storage.Commit1Out) (storiface.CallID, error) `perm:"admin"`
+
+ SealPreCommit1 func(p0 context.Context, p1 storage.SectorRef, p2 abi.SealRandomness, p3 []abi.PieceInfo) (storiface.CallID, error) `perm:"admin"`
+
+ SealPreCommit2 func(p0 context.Context, p1 storage.SectorRef, p2 storage.PreCommit1Out) (storiface.CallID, error) `perm:"admin"`
+
+ Session func(p0 context.Context) (uuid.UUID, error) `perm:"admin"`
+
+ SetEnabled func(p0 context.Context, p1 bool) error `perm:"admin"`
+
+ StorageAddLocal func(p0 context.Context, p1 string) error `perm:"admin"`
+
+ TaskDisable func(p0 context.Context, p1 sealtasks.TaskType) error `perm:"admin"`
+
+ TaskEnable func(p0 context.Context, p1 sealtasks.TaskType) error `perm:"admin"`
+
+ TaskTypes func(p0 context.Context) (map[sealtasks.TaskType]struct{}, error) `perm:"admin"`
+
+ UnsealPiece func(p0 context.Context, p1 storage.SectorRef, p2 storiface.UnpaddedByteIndex, p3 abi.UnpaddedPieceSize, p4 abi.SealRandomness, p5 cid.Cid) (storiface.CallID, error) `perm:"admin"`
+
+ Version func(p0 context.Context) (Version, error) `perm:"admin"`
+
+ WaitQuiet func(p0 context.Context) error `perm:"admin"`
+ }
+}
+
+type WorkerStub struct {
+}
+
+func (s *ChainIOStruct) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) {
+ return s.Internal.ChainHasObj(p0, p1)
+}
+
+func (s *ChainIOStub) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) {
+ return false, xerrors.New("method not supported")
+}
+
+func (s *ChainIOStruct) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) {
+ return s.Internal.ChainReadObj(p0, p1)
+}
+
+func (s *ChainIOStub) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) {
+ return *new([]byte), xerrors.New("method not supported")
+}
+
+func (s *CommonStruct) AuthNew(p0 context.Context, p1 []auth.Permission) ([]byte, error) {
+ return s.Internal.AuthNew(p0, p1)
+}
+
+func (s *CommonStub) AuthNew(p0 context.Context, p1 []auth.Permission) ([]byte, error) {
+ return *new([]byte), xerrors.New("method not supported")
+}
+
+func (s *CommonStruct) AuthVerify(p0 context.Context, p1 string) ([]auth.Permission, error) {
+ return s.Internal.AuthVerify(p0, p1)
+}
+
+func (s *CommonStub) AuthVerify(p0 context.Context, p1 string) ([]auth.Permission, error) {
+ return *new([]auth.Permission), xerrors.New("method not supported")
+}
+
+func (s *CommonStruct) Closing(p0 context.Context) (<-chan struct{}, error) {
+ return s.Internal.Closing(p0)
+}
+
+func (s *CommonStub) Closing(p0 context.Context) (<-chan struct{}, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *CommonStruct) Discover(p0 context.Context) (apitypes.OpenRPCDocument, error) {
+ return s.Internal.Discover(p0)
+}
+
+func (s *CommonStub) Discover(p0 context.Context) (apitypes.OpenRPCDocument, error) {
+ return *new(apitypes.OpenRPCDocument), xerrors.New("method not supported")
+}
+
+func (s *CommonStruct) LogList(p0 context.Context) ([]string, error) {
+ return s.Internal.LogList(p0)
+}
+
+func (s *CommonStub) LogList(p0 context.Context) ([]string, error) {
+ return *new([]string), xerrors.New("method not supported")
+}
+
+func (s *CommonStruct) LogSetLevel(p0 context.Context, p1 string, p2 string) error {
+ return s.Internal.LogSetLevel(p0, p1, p2)
+}
+
+func (s *CommonStub) LogSetLevel(p0 context.Context, p1 string, p2 string) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *CommonStruct) Session(p0 context.Context) (uuid.UUID, error) {
+ return s.Internal.Session(p0)
+}
+
+func (s *CommonStub) Session(p0 context.Context) (uuid.UUID, error) {
+ return *new(uuid.UUID), xerrors.New("method not supported")
+}
+
+func (s *CommonStruct) Shutdown(p0 context.Context) error {
+ return s.Internal.Shutdown(p0)
+}
+
+func (s *CommonStub) Shutdown(p0 context.Context) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *CommonStruct) Version(p0 context.Context) (APIVersion, error) {
+ return s.Internal.Version(p0)
+}
+
+func (s *CommonStub) Version(p0 context.Context) (APIVersion, error) {
+ return *new(APIVersion), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) BeaconGetEntry(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) {
+ return s.Internal.BeaconGetEntry(p0, p1)
+}
+
+func (s *FullNodeStub) BeaconGetEntry(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainDeleteObj(p0 context.Context, p1 cid.Cid) error {
+ return s.Internal.ChainDeleteObj(p0, p1)
+}
+
+func (s *FullNodeStub) ChainDeleteObj(p0 context.Context, p1 cid.Cid) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainExport(p0 context.Context, p1 abi.ChainEpoch, p2 bool, p3 types.TipSetKey) (<-chan []byte, error) {
+ return s.Internal.ChainExport(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) ChainExport(p0 context.Context, p1 abi.ChainEpoch, p2 bool, p3 types.TipSetKey) (<-chan []byte, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainGetBlock(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) {
+ return s.Internal.ChainGetBlock(p0, p1)
+}
+
+func (s *FullNodeStub) ChainGetBlock(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) {
+ return s.Internal.ChainGetBlockMessages(p0, p1)
+}
+
+func (s *FullNodeStub) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainGetGenesis(p0 context.Context) (*types.TipSet, error) {
+ return s.Internal.ChainGetGenesis(p0)
+}
+
+func (s *FullNodeStub) ChainGetGenesis(p0 context.Context) (*types.TipSet, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) {
+ return s.Internal.ChainGetMessage(p0, p1)
+}
+
+func (s *FullNodeStub) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainGetMessagesInTipset(p0 context.Context, p1 types.TipSetKey) ([]Message, error) {
+ return s.Internal.ChainGetMessagesInTipset(p0, p1)
+}
+
+func (s *FullNodeStub) ChainGetMessagesInTipset(p0 context.Context, p1 types.TipSetKey) ([]Message, error) {
+ return *new([]Message), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainGetNode(p0 context.Context, p1 string) (*IpldObject, error) {
+ return s.Internal.ChainGetNode(p0, p1)
+}
+
+func (s *FullNodeStub) ChainGetNode(p0 context.Context, p1 string) (*IpldObject, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainGetParentMessages(p0 context.Context, p1 cid.Cid) ([]Message, error) {
+ return s.Internal.ChainGetParentMessages(p0, p1)
+}
+
+func (s *FullNodeStub) ChainGetParentMessages(p0 context.Context, p1 cid.Cid) ([]Message, error) {
+ return *new([]Message), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainGetParentReceipts(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) {
+ return s.Internal.ChainGetParentReceipts(p0, p1)
+}
+
+func (s *FullNodeStub) ChainGetParentReceipts(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) {
+ return *new([]*types.MessageReceipt), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainGetPath(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*HeadChange, error) {
+ return s.Internal.ChainGetPath(p0, p1, p2)
+}
+
+func (s *FullNodeStub) ChainGetPath(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*HeadChange, error) {
+ return *new([]*HeadChange), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainGetRandomnessFromBeacon(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) {
+ return s.Internal.ChainGetRandomnessFromBeacon(p0, p1, p2, p3, p4)
+}
+
+func (s *FullNodeStub) ChainGetRandomnessFromBeacon(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) {
+ return *new(abi.Randomness), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainGetRandomnessFromTickets(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) {
+ return s.Internal.ChainGetRandomnessFromTickets(p0, p1, p2, p3, p4)
+}
+
+func (s *FullNodeStub) ChainGetRandomnessFromTickets(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) {
+ return *new(abi.Randomness), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainGetTipSet(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) {
+ return s.Internal.ChainGetTipSet(p0, p1)
+}
+
+func (s *FullNodeStub) ChainGetTipSet(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainGetTipSetByHeight(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) {
+ return s.Internal.ChainGetTipSetByHeight(p0, p1, p2)
+}
+
+func (s *FullNodeStub) ChainGetTipSetByHeight(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) {
+ return s.Internal.ChainHasObj(p0, p1)
+}
+
+func (s *FullNodeStub) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) {
+ return false, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainHead(p0 context.Context) (*types.TipSet, error) {
+ return s.Internal.ChainHead(p0)
+}
+
+func (s *FullNodeStub) ChainHead(p0 context.Context) (*types.TipSet, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainNotify(p0 context.Context) (<-chan []*HeadChange, error) {
+ return s.Internal.ChainNotify(p0)
+}
+
+func (s *FullNodeStub) ChainNotify(p0 context.Context) (<-chan []*HeadChange, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) {
+ return s.Internal.ChainReadObj(p0, p1)
+}
+
+func (s *FullNodeStub) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) {
+ return *new([]byte), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainSetHead(p0 context.Context, p1 types.TipSetKey) error {
+ return s.Internal.ChainSetHead(p0, p1)
+}
+
+func (s *FullNodeStub) ChainSetHead(p0 context.Context, p1 types.TipSetKey) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainStatObj(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (ObjStat, error) {
+ return s.Internal.ChainStatObj(p0, p1, p2)
+}
+
+func (s *FullNodeStub) ChainStatObj(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (ObjStat, error) {
+ return *new(ObjStat), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainTipSetWeight(p0 context.Context, p1 types.TipSetKey) (types.BigInt, error) {
+ return s.Internal.ChainTipSetWeight(p0, p1)
+}
+
+func (s *FullNodeStub) ChainTipSetWeight(p0 context.Context, p1 types.TipSetKey) (types.BigInt, error) {
+ return *new(types.BigInt), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientCalcCommP(p0 context.Context, p1 string) (*CommPRet, error) {
+ return s.Internal.ClientCalcCommP(p0, p1)
+}
+
+func (s *FullNodeStub) ClientCalcCommP(p0 context.Context, p1 string) (*CommPRet, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {
+ return s.Internal.ClientCancelDataTransfer(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) ClientCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientCancelRetrievalDeal(p0 context.Context, p1 retrievalmarket.DealID) error {
+ return s.Internal.ClientCancelRetrievalDeal(p0, p1)
+}
+
+func (s *FullNodeStub) ClientCancelRetrievalDeal(p0 context.Context, p1 retrievalmarket.DealID) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientDataTransferUpdates(p0 context.Context) (<-chan DataTransferChannel, error) {
+ return s.Internal.ClientDataTransferUpdates(p0)
+}
+
+func (s *FullNodeStub) ClientDataTransferUpdates(p0 context.Context) (<-chan DataTransferChannel, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientDealPieceCID(p0 context.Context, p1 cid.Cid) (DataCIDSize, error) {
+ return s.Internal.ClientDealPieceCID(p0, p1)
+}
+
+func (s *FullNodeStub) ClientDealPieceCID(p0 context.Context, p1 cid.Cid) (DataCIDSize, error) {
+ return *new(DataCIDSize), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientDealSize(p0 context.Context, p1 cid.Cid) (DataSize, error) {
+ return s.Internal.ClientDealSize(p0, p1)
+}
+
+func (s *FullNodeStub) ClientDealSize(p0 context.Context, p1 cid.Cid) (DataSize, error) {
+ return *new(DataSize), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientFindData(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]QueryOffer, error) {
+ return s.Internal.ClientFindData(p0, p1, p2)
+}
+
+func (s *FullNodeStub) ClientFindData(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]QueryOffer, error) {
+ return *new([]QueryOffer), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientGenCar(p0 context.Context, p1 FileRef, p2 string) error {
+ return s.Internal.ClientGenCar(p0, p1, p2)
+}
+
+func (s *FullNodeStub) ClientGenCar(p0 context.Context, p1 FileRef, p2 string) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientGetDealInfo(p0 context.Context, p1 cid.Cid) (*DealInfo, error) {
+ return s.Internal.ClientGetDealInfo(p0, p1)
+}
+
+func (s *FullNodeStub) ClientGetDealInfo(p0 context.Context, p1 cid.Cid) (*DealInfo, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientGetDealStatus(p0 context.Context, p1 uint64) (string, error) {
+ return s.Internal.ClientGetDealStatus(p0, p1)
+}
+
+func (s *FullNodeStub) ClientGetDealStatus(p0 context.Context, p1 uint64) (string, error) {
+ return "", xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientGetDealUpdates(p0 context.Context) (<-chan DealInfo, error) {
+ return s.Internal.ClientGetDealUpdates(p0)
+}
+
+func (s *FullNodeStub) ClientGetDealUpdates(p0 context.Context) (<-chan DealInfo, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientGetRetrievalUpdates(p0 context.Context) (<-chan RetrievalInfo, error) {
+ return s.Internal.ClientGetRetrievalUpdates(p0)
+}
+
+func (s *FullNodeStub) ClientGetRetrievalUpdates(p0 context.Context) (<-chan RetrievalInfo, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientHasLocal(p0 context.Context, p1 cid.Cid) (bool, error) {
+ return s.Internal.ClientHasLocal(p0, p1)
+}
+
+func (s *FullNodeStub) ClientHasLocal(p0 context.Context, p1 cid.Cid) (bool, error) {
+ return false, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientImport(p0 context.Context, p1 FileRef) (*ImportRes, error) {
+ return s.Internal.ClientImport(p0, p1)
+}
+
+func (s *FullNodeStub) ClientImport(p0 context.Context, p1 FileRef) (*ImportRes, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientListDataTransfers(p0 context.Context) ([]DataTransferChannel, error) {
+ return s.Internal.ClientListDataTransfers(p0)
+}
+
+func (s *FullNodeStub) ClientListDataTransfers(p0 context.Context) ([]DataTransferChannel, error) {
+ return *new([]DataTransferChannel), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientListDeals(p0 context.Context) ([]DealInfo, error) {
+ return s.Internal.ClientListDeals(p0)
+}
+
+func (s *FullNodeStub) ClientListDeals(p0 context.Context) ([]DealInfo, error) {
+ return *new([]DealInfo), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientListImports(p0 context.Context) ([]Import, error) {
+ return s.Internal.ClientListImports(p0)
+}
+
+func (s *FullNodeStub) ClientListImports(p0 context.Context) ([]Import, error) {
+ return *new([]Import), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientListRetrievals(p0 context.Context) ([]RetrievalInfo, error) {
+ return s.Internal.ClientListRetrievals(p0)
+}
+
+func (s *FullNodeStub) ClientListRetrievals(p0 context.Context) ([]RetrievalInfo, error) {
+ return *new([]RetrievalInfo), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientMinerQueryOffer(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (QueryOffer, error) {
+ return s.Internal.ClientMinerQueryOffer(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) ClientMinerQueryOffer(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (QueryOffer, error) {
+ return *new(QueryOffer), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientQueryAsk(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) {
+ return s.Internal.ClientQueryAsk(p0, p1, p2)
+}
+
+func (s *FullNodeStub) ClientQueryAsk(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientRemoveImport(p0 context.Context, p1 multistore.StoreID) error {
+ return s.Internal.ClientRemoveImport(p0, p1)
+}
+
+func (s *FullNodeStub) ClientRemoveImport(p0 context.Context, p1 multistore.StoreID) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {
+ return s.Internal.ClientRestartDataTransfer(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) ClientRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) error {
+ return s.Internal.ClientRetrieve(p0, p1, p2)
+}
+
+func (s *FullNodeStub) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientRetrieveTryRestartInsufficientFunds(p0 context.Context, p1 address.Address) error {
+ return s.Internal.ClientRetrieveTryRestartInsufficientFunds(p0, p1)
+}
+
+func (s *FullNodeStub) ClientRetrieveTryRestartInsufficientFunds(p0 context.Context, p1 address.Address) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) (<-chan marketevents.RetrievalEvent, error) {
+ return s.Internal.ClientRetrieveWithEvents(p0, p1, p2)
+}
+
+func (s *FullNodeStub) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) (<-chan marketevents.RetrievalEvent, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientStartDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) {
+ return s.Internal.ClientStartDeal(p0, p1)
+}
+
+func (s *FullNodeStub) ClientStartDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientStatelessDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) {
+ return s.Internal.ClientStatelessDeal(p0, p1)
+}
+
+func (s *FullNodeStub) ClientStatelessDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) CreateBackup(p0 context.Context, p1 string) error {
+ return s.Internal.CreateBackup(p0, p1)
+}
+
+func (s *FullNodeStub) CreateBackup(p0 context.Context, p1 string) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) GasEstimateFeeCap(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (types.BigInt, error) {
+ return s.Internal.GasEstimateFeeCap(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) GasEstimateFeeCap(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (types.BigInt, error) {
+ return *new(types.BigInt), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) GasEstimateGasLimit(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (int64, error) {
+ return s.Internal.GasEstimateGasLimit(p0, p1, p2)
+}
+
+func (s *FullNodeStub) GasEstimateGasLimit(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (int64, error) {
+ return 0, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) GasEstimateGasPremium(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) {
+ return s.Internal.GasEstimateGasPremium(p0, p1, p2, p3, p4)
+}
+
+func (s *FullNodeStub) GasEstimateGasPremium(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) {
+ return *new(types.BigInt), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) {
+ return s.Internal.GasEstimateMessageGas(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MarketAddBalance(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) {
+ return s.Internal.MarketAddBalance(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) MarketAddBalance(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) {
+ return *new(cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MarketGetReserved(p0 context.Context, p1 address.Address) (types.BigInt, error) {
+ return s.Internal.MarketGetReserved(p0, p1)
+}
+
+func (s *FullNodeStub) MarketGetReserved(p0 context.Context, p1 address.Address) (types.BigInt, error) {
+ return *new(types.BigInt), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MarketReleaseFunds(p0 context.Context, p1 address.Address, p2 types.BigInt) error {
+ return s.Internal.MarketReleaseFunds(p0, p1, p2)
+}
+
+func (s *FullNodeStub) MarketReleaseFunds(p0 context.Context, p1 address.Address, p2 types.BigInt) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MarketReserveFunds(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) {
+ return s.Internal.MarketReserveFunds(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) MarketReserveFunds(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) {
+ return *new(cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MarketWithdraw(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) {
+ return s.Internal.MarketWithdraw(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) MarketWithdraw(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) {
+ return *new(cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MinerCreateBlock(p0 context.Context, p1 *BlockTemplate) (*types.BlockMsg, error) {
+ return s.Internal.MinerCreateBlock(p0, p1)
+}
+
+func (s *FullNodeStub) MinerCreateBlock(p0 context.Context, p1 *BlockTemplate) (*types.BlockMsg, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MinerGetBaseInfo(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*MiningBaseInfo, error) {
+ return s.Internal.MinerGetBaseInfo(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) MinerGetBaseInfo(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*MiningBaseInfo, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MpoolBatchPush(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) {
+ return s.Internal.MpoolBatchPush(p0, p1)
+}
+
+func (s *FullNodeStub) MpoolBatchPush(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) {
+ return *new([]cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MpoolBatchPushMessage(p0 context.Context, p1 []*types.Message, p2 *MessageSendSpec) ([]*types.SignedMessage, error) {
+ return s.Internal.MpoolBatchPushMessage(p0, p1, p2)
+}
+
+func (s *FullNodeStub) MpoolBatchPushMessage(p0 context.Context, p1 []*types.Message, p2 *MessageSendSpec) ([]*types.SignedMessage, error) {
+ return *new([]*types.SignedMessage), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MpoolBatchPushUntrusted(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) {
+ return s.Internal.MpoolBatchPushUntrusted(p0, p1)
+}
+
+func (s *FullNodeStub) MpoolBatchPushUntrusted(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) {
+ return *new([]cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MpoolCheckMessages(p0 context.Context, p1 []*MessagePrototype) ([][]MessageCheckStatus, error) {
+ return s.Internal.MpoolCheckMessages(p0, p1)
+}
+
+func (s *FullNodeStub) MpoolCheckMessages(p0 context.Context, p1 []*MessagePrototype) ([][]MessageCheckStatus, error) {
+ return *new([][]MessageCheckStatus), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MpoolCheckPendingMessages(p0 context.Context, p1 address.Address) ([][]MessageCheckStatus, error) {
+ return s.Internal.MpoolCheckPendingMessages(p0, p1)
+}
+
+func (s *FullNodeStub) MpoolCheckPendingMessages(p0 context.Context, p1 address.Address) ([][]MessageCheckStatus, error) {
+ return *new([][]MessageCheckStatus), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MpoolCheckReplaceMessages(p0 context.Context, p1 []*types.Message) ([][]MessageCheckStatus, error) {
+ return s.Internal.MpoolCheckReplaceMessages(p0, p1)
+}
+
+func (s *FullNodeStub) MpoolCheckReplaceMessages(p0 context.Context, p1 []*types.Message) ([][]MessageCheckStatus, error) {
+ return *new([][]MessageCheckStatus), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MpoolClear(p0 context.Context, p1 bool) error {
+ return s.Internal.MpoolClear(p0, p1)
+}
+
+func (s *FullNodeStub) MpoolClear(p0 context.Context, p1 bool) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MpoolGetConfig(p0 context.Context) (*types.MpoolConfig, error) {
+ return s.Internal.MpoolGetConfig(p0)
+}
+
+func (s *FullNodeStub) MpoolGetConfig(p0 context.Context) (*types.MpoolConfig, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MpoolGetNonce(p0 context.Context, p1 address.Address) (uint64, error) {
+ return s.Internal.MpoolGetNonce(p0, p1)
+}
+
+func (s *FullNodeStub) MpoolGetNonce(p0 context.Context, p1 address.Address) (uint64, error) {
+ return 0, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MpoolPending(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) {
+ return s.Internal.MpoolPending(p0, p1)
+}
+
+func (s *FullNodeStub) MpoolPending(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) {
+ return *new([]*types.SignedMessage), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) {
+ return s.Internal.MpoolPush(p0, p1)
+}
+
+func (s *FullNodeStub) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) {
+ return *new(cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MpoolPushMessage(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec) (*types.SignedMessage, error) {
+ return s.Internal.MpoolPushMessage(p0, p1, p2)
+}
+
+func (s *FullNodeStub) MpoolPushMessage(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec) (*types.SignedMessage, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MpoolPushUntrusted(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) {
+ return s.Internal.MpoolPushUntrusted(p0, p1)
+}
+
+func (s *FullNodeStub) MpoolPushUntrusted(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) {
+ return *new(cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MpoolSelect(p0 context.Context, p1 types.TipSetKey, p2 float64) ([]*types.SignedMessage, error) {
+ return s.Internal.MpoolSelect(p0, p1, p2)
+}
+
+func (s *FullNodeStub) MpoolSelect(p0 context.Context, p1 types.TipSetKey, p2 float64) ([]*types.SignedMessage, error) {
+ return *new([]*types.SignedMessage), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MpoolSetConfig(p0 context.Context, p1 *types.MpoolConfig) error {
+ return s.Internal.MpoolSetConfig(p0, p1)
+}
+
+func (s *FullNodeStub) MpoolSetConfig(p0 context.Context, p1 *types.MpoolConfig) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MpoolSub(p0 context.Context) (<-chan MpoolUpdate, error) {
+ return s.Internal.MpoolSub(p0)
+}
+
+func (s *FullNodeStub) MpoolSub(p0 context.Context) (<-chan MpoolUpdate, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigAddApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (*MessagePrototype, error) {
+ return s.Internal.MsigAddApprove(p0, p1, p2, p3, p4, p5, p6)
+}
+
+func (s *FullNodeStub) MsigAddApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (*MessagePrototype, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigAddCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (*MessagePrototype, error) {
+ return s.Internal.MsigAddCancel(p0, p1, p2, p3, p4, p5)
+}
+
+func (s *FullNodeStub) MsigAddCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (*MessagePrototype, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigAddPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (*MessagePrototype, error) {
+ return s.Internal.MsigAddPropose(p0, p1, p2, p3, p4)
+}
+
+func (s *FullNodeStub) MsigAddPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (*MessagePrototype, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigApprove(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (*MessagePrototype, error) {
+ return s.Internal.MsigApprove(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) MsigApprove(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (*MessagePrototype, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigApproveTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (*MessagePrototype, error) {
+ return s.Internal.MsigApproveTxnHash(p0, p1, p2, p3, p4, p5, p6, p7, p8)
+}
+
+func (s *FullNodeStub) MsigApproveTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (*MessagePrototype, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) {
+ return s.Internal.MsigCancel(p0, p1, p2, p3, p4, p5, p6, p7)
+}
+
+func (s *FullNodeStub) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigCreate(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (*MessagePrototype, error) {
+ return s.Internal.MsigCreate(p0, p1, p2, p3, p4, p5, p6)
+}
+
+func (s *FullNodeStub) MsigCreate(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (*MessagePrototype, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigGetAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) {
+ return s.Internal.MsigGetAvailableBalance(p0, p1, p2)
+}
+
+func (s *FullNodeStub) MsigGetAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) {
+ return *new(types.BigInt), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigGetPending(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*MsigTransaction, error) {
+ return s.Internal.MsigGetPending(p0, p1, p2)
+}
+
+func (s *FullNodeStub) MsigGetPending(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*MsigTransaction, error) {
+ return *new([]*MsigTransaction), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigGetVested(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) {
+ return s.Internal.MsigGetVested(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) MsigGetVested(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) {
+ return *new(types.BigInt), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigGetVestingSchedule(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MsigVesting, error) {
+ return s.Internal.MsigGetVestingSchedule(p0, p1, p2)
+}
+
+func (s *FullNodeStub) MsigGetVestingSchedule(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MsigVesting, error) {
+ return *new(MsigVesting), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (*MessagePrototype, error) {
+ return s.Internal.MsigPropose(p0, p1, p2, p3, p4, p5, p6)
+}
+
+func (s *FullNodeStub) MsigPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (*MessagePrototype, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigRemoveSigner(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (*MessagePrototype, error) {
+ return s.Internal.MsigRemoveSigner(p0, p1, p2, p3, p4)
+}
+
+func (s *FullNodeStub) MsigRemoveSigner(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (*MessagePrototype, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigSwapApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (*MessagePrototype, error) {
+ return s.Internal.MsigSwapApprove(p0, p1, p2, p3, p4, p5, p6)
+}
+
+func (s *FullNodeStub) MsigSwapApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (*MessagePrototype, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigSwapCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (*MessagePrototype, error) {
+ return s.Internal.MsigSwapCancel(p0, p1, p2, p3, p4, p5)
+}
+
+func (s *FullNodeStub) MsigSwapCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (*MessagePrototype, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigSwapPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (*MessagePrototype, error) {
+ return s.Internal.MsigSwapPropose(p0, p1, p2, p3, p4)
+}
+
+func (s *FullNodeStub) MsigSwapPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (*MessagePrototype, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) NodeStatus(p0 context.Context, p1 bool) (NodeStatus, error) {
+ return s.Internal.NodeStatus(p0, p1)
+}
+
+func (s *FullNodeStub) NodeStatus(p0 context.Context, p1 bool) (NodeStatus, error) {
+ return *new(NodeStatus), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychAllocateLane(p0 context.Context, p1 address.Address) (uint64, error) {
+ return s.Internal.PaychAllocateLane(p0, p1)
+}
+
+func (s *FullNodeStub) PaychAllocateLane(p0 context.Context, p1 address.Address) (uint64, error) {
+ return 0, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychAvailableFunds(p0 context.Context, p1 address.Address) (*ChannelAvailableFunds, error) {
+ return s.Internal.PaychAvailableFunds(p0, p1)
+}
+
+func (s *FullNodeStub) PaychAvailableFunds(p0 context.Context, p1 address.Address) (*ChannelAvailableFunds, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychAvailableFundsByFromTo(p0 context.Context, p1 address.Address, p2 address.Address) (*ChannelAvailableFunds, error) {
+ return s.Internal.PaychAvailableFundsByFromTo(p0, p1, p2)
+}
+
+func (s *FullNodeStub) PaychAvailableFundsByFromTo(p0 context.Context, p1 address.Address, p2 address.Address) (*ChannelAvailableFunds, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychCollect(p0 context.Context, p1 address.Address) (cid.Cid, error) {
+ return s.Internal.PaychCollect(p0, p1)
+}
+
+func (s *FullNodeStub) PaychCollect(p0 context.Context, p1 address.Address) (cid.Cid, error) {
+ return *new(cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychGet(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (*ChannelInfo, error) {
+ return s.Internal.PaychGet(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) PaychGet(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (*ChannelInfo, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychGetWaitReady(p0 context.Context, p1 cid.Cid) (address.Address, error) {
+ return s.Internal.PaychGetWaitReady(p0, p1)
+}
+
+func (s *FullNodeStub) PaychGetWaitReady(p0 context.Context, p1 cid.Cid) (address.Address, error) {
+ return *new(address.Address), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychList(p0 context.Context) ([]address.Address, error) {
+ return s.Internal.PaychList(p0)
+}
+
+func (s *FullNodeStub) PaychList(p0 context.Context) ([]address.Address, error) {
+ return *new([]address.Address), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychNewPayment(p0 context.Context, p1 address.Address, p2 address.Address, p3 []VoucherSpec) (*PaymentInfo, error) {
+ return s.Internal.PaychNewPayment(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) PaychNewPayment(p0 context.Context, p1 address.Address, p2 address.Address, p3 []VoucherSpec) (*PaymentInfo, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychSettle(p0 context.Context, p1 address.Address) (cid.Cid, error) {
+ return s.Internal.PaychSettle(p0, p1)
+}
+
+func (s *FullNodeStub) PaychSettle(p0 context.Context, p1 address.Address) (cid.Cid, error) {
+ return *new(cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychStatus(p0 context.Context, p1 address.Address) (*PaychStatus, error) {
+ return s.Internal.PaychStatus(p0, p1)
+}
+
+func (s *FullNodeStub) PaychStatus(p0 context.Context, p1 address.Address) (*PaychStatus, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychVoucherAdd(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 types.BigInt) (types.BigInt, error) {
+ return s.Internal.PaychVoucherAdd(p0, p1, p2, p3, p4)
+}
+
+func (s *FullNodeStub) PaychVoucherAdd(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 types.BigInt) (types.BigInt, error) {
+ return *new(types.BigInt), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychVoucherCheckSpendable(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (bool, error) {
+ return s.Internal.PaychVoucherCheckSpendable(p0, p1, p2, p3, p4)
+}
+
+func (s *FullNodeStub) PaychVoucherCheckSpendable(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (bool, error) {
+ return false, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychVoucherCheckValid(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher) error {
+ return s.Internal.PaychVoucherCheckValid(p0, p1, p2)
+}
+
+func (s *FullNodeStub) PaychVoucherCheckValid(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychVoucherCreate(p0 context.Context, p1 address.Address, p2 types.BigInt, p3 uint64) (*VoucherCreateResult, error) {
+ return s.Internal.PaychVoucherCreate(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) PaychVoucherCreate(p0 context.Context, p1 address.Address, p2 types.BigInt, p3 uint64) (*VoucherCreateResult, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychVoucherList(p0 context.Context, p1 address.Address) ([]*paych.SignedVoucher, error) {
+ return s.Internal.PaychVoucherList(p0, p1)
+}
+
+func (s *FullNodeStub) PaychVoucherList(p0 context.Context, p1 address.Address) ([]*paych.SignedVoucher, error) {
+ return *new([]*paych.SignedVoucher), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychVoucherSubmit(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (cid.Cid, error) {
+ return s.Internal.PaychVoucherSubmit(p0, p1, p2, p3, p4)
+}
+
+func (s *FullNodeStub) PaychVoucherSubmit(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (cid.Cid, error) {
+ return *new(cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) {
+ return s.Internal.StateAccountKey(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) {
+ return *new(address.Address), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateAllMinerFaults(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) ([]*Fault, error) {
+ return s.Internal.StateAllMinerFaults(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateAllMinerFaults(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) ([]*Fault, error) {
+ return *new([]*Fault), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateCall(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*InvocResult, error) {
+ return s.Internal.StateCall(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateCall(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*InvocResult, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateChangedActors(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (map[string]types.Actor, error) {
+ return s.Internal.StateChangedActors(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateChangedActors(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (map[string]types.Actor, error) {
+ return *new(map[string]types.Actor), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateCirculatingSupply(p0 context.Context, p1 types.TipSetKey) (abi.TokenAmount, error) {
+ return s.Internal.StateCirculatingSupply(p0, p1)
+}
+
+func (s *FullNodeStub) StateCirculatingSupply(p0 context.Context, p1 types.TipSetKey) (abi.TokenAmount, error) {
+ return *new(abi.TokenAmount), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateCompute(p0 context.Context, p1 abi.ChainEpoch, p2 []*types.Message, p3 types.TipSetKey) (*ComputeStateOutput, error) {
+ return s.Internal.StateCompute(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) StateCompute(p0 context.Context, p1 abi.ChainEpoch, p2 []*types.Message, p3 types.TipSetKey) (*ComputeStateOutput, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (DealCollateralBounds, error) {
+ return s.Internal.StateDealProviderCollateralBounds(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (DealCollateralBounds, error) {
+ return *new(DealCollateralBounds), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateDecodeParams(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) {
+ return s.Internal.StateDecodeParams(p0, p1, p2, p3, p4)
+}
+
+func (s *FullNodeStub) StateDecodeParams(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) {
+ return s.Internal.StateGetActor(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateListActors(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) {
+ return s.Internal.StateListActors(p0, p1)
+}
+
+func (s *FullNodeStub) StateListActors(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) {
+ return *new([]address.Address), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateListMessages(p0 context.Context, p1 *MessageMatch, p2 types.TipSetKey, p3 abi.ChainEpoch) ([]cid.Cid, error) {
+ return s.Internal.StateListMessages(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) StateListMessages(p0 context.Context, p1 *MessageMatch, p2 types.TipSetKey, p3 abi.ChainEpoch) ([]cid.Cid, error) {
+ return *new([]cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateListMiners(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) {
+ return s.Internal.StateListMiners(p0, p1)
+}
+
+func (s *FullNodeStub) StateListMiners(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) {
+ return *new([]address.Address), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateLookupID(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) {
+ return s.Internal.StateLookupID(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateLookupID(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) {
+ return *new(address.Address), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMarketBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MarketBalance, error) {
+ return s.Internal.StateMarketBalance(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateMarketBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MarketBalance, error) {
+ return *new(MarketBalance), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMarketDeals(p0 context.Context, p1 types.TipSetKey) (map[string]MarketDeal, error) {
+ return s.Internal.StateMarketDeals(p0, p1)
+}
+
+func (s *FullNodeStub) StateMarketDeals(p0 context.Context, p1 types.TipSetKey) (map[string]MarketDeal, error) {
+ return *new(map[string]MarketDeal), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMarketParticipants(p0 context.Context, p1 types.TipSetKey) (map[string]MarketBalance, error) {
+ return s.Internal.StateMarketParticipants(p0, p1)
+}
+
+func (s *FullNodeStub) StateMarketParticipants(p0 context.Context, p1 types.TipSetKey) (map[string]MarketBalance, error) {
+ return *new(map[string]MarketBalance), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMarketStorageDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*MarketDeal, error) {
+ return s.Internal.StateMarketStorageDeal(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateMarketStorageDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*MarketDeal, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMinerActiveSectors(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) {
+ return s.Internal.StateMinerActiveSectors(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateMinerActiveSectors(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) {
+ return *new([]*miner.SectorOnChainInfo), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMinerAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) {
+ return s.Internal.StateMinerAvailableBalance(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateMinerAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) {
+ return *new(types.BigInt), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMinerDeadlines(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]Deadline, error) {
+ return s.Internal.StateMinerDeadlines(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateMinerDeadlines(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]Deadline, error) {
+ return *new([]Deadline), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMinerFaults(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) {
+ return s.Internal.StateMinerFaults(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateMinerFaults(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) {
+ return *new(bitfield.BitField), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMinerInfo(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) {
+ return s.Internal.StateMinerInfo(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateMinerInfo(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) {
+ return *new(miner.MinerInfo), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMinerInitialPledgeCollateral(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) {
+ return s.Internal.StateMinerInitialPledgeCollateral(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) StateMinerInitialPledgeCollateral(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) {
+ return *new(types.BigInt), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMinerPartitions(p0 context.Context, p1 address.Address, p2 uint64, p3 types.TipSetKey) ([]Partition, error) {
+ return s.Internal.StateMinerPartitions(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) StateMinerPartitions(p0 context.Context, p1 address.Address, p2 uint64, p3 types.TipSetKey) ([]Partition, error) {
+ return *new([]Partition), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMinerPower(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*MinerPower, error) {
+ return s.Internal.StateMinerPower(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateMinerPower(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*MinerPower, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMinerPreCommitDepositForPower(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) {
+ return s.Internal.StateMinerPreCommitDepositForPower(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) StateMinerPreCommitDepositForPower(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) {
+ return *new(types.BigInt), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMinerProvingDeadline(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) {
+ return s.Internal.StateMinerProvingDeadline(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateMinerProvingDeadline(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMinerRecoveries(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) {
+ return s.Internal.StateMinerRecoveries(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateMinerRecoveries(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) {
+ return *new(bitfield.BitField), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMinerSectorAllocated(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (bool, error) {
+ return s.Internal.StateMinerSectorAllocated(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) StateMinerSectorAllocated(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (bool, error) {
+ return false, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMinerSectorCount(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MinerSectors, error) {
+ return s.Internal.StateMinerSectorCount(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateMinerSectorCount(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MinerSectors, error) {
+ return *new(MinerSectors), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMinerSectors(p0 context.Context, p1 address.Address, p2 *bitfield.BitField, p3 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) {
+ return s.Internal.StateMinerSectors(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) StateMinerSectors(p0 context.Context, p1 address.Address, p2 *bitfield.BitField, p3 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) {
+ return *new([]*miner.SectorOnChainInfo), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateNetworkName(p0 context.Context) (dtypes.NetworkName, error) {
+ return s.Internal.StateNetworkName(p0)
+}
+
+func (s *FullNodeStub) StateNetworkName(p0 context.Context) (dtypes.NetworkName, error) {
+ return *new(dtypes.NetworkName), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) {
+ return s.Internal.StateNetworkVersion(p0, p1)
+}
+
+func (s *FullNodeStub) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) {
+ return *new(apitypes.NetworkVersion), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateReadState(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*ActorState, error) {
+ return s.Internal.StateReadState(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateReadState(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*ActorState, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateReplay(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*InvocResult, error) {
+ return s.Internal.StateReplay(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateReplay(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*InvocResult, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateSearchMsg(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) {
+ return s.Internal.StateSearchMsg(p0, p1, p2, p3, p4)
+}
+
+func (s *FullNodeStub) StateSearchMsg(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateSectorExpiration(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorExpiration, error) {
+ return s.Internal.StateSectorExpiration(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) StateSectorExpiration(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorExpiration, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateSectorGetInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) {
+ return s.Internal.StateSectorGetInfo(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) StateSectorGetInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateSectorPartition(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorLocation, error) {
+ return s.Internal.StateSectorPartition(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) StateSectorPartition(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorLocation, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateSectorPreCommitInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) {
+ return s.Internal.StateSectorPreCommitInfo(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) StateSectorPreCommitInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) {
+ return *new(miner.SectorPreCommitOnChainInfo), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateVMCirculatingSupplyInternal(p0 context.Context, p1 types.TipSetKey) (CirculatingSupply, error) {
+ return s.Internal.StateVMCirculatingSupplyInternal(p0, p1)
+}
+
+func (s *FullNodeStub) StateVMCirculatingSupplyInternal(p0 context.Context, p1 types.TipSetKey) (CirculatingSupply, error) {
+ return *new(CirculatingSupply), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateVerifiedClientStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) {
+ return s.Internal.StateVerifiedClientStatus(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateVerifiedClientStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateVerifiedRegistryRootKey(p0 context.Context, p1 types.TipSetKey) (address.Address, error) {
+ return s.Internal.StateVerifiedRegistryRootKey(p0, p1)
+}
+
+func (s *FullNodeStub) StateVerifiedRegistryRootKey(p0 context.Context, p1 types.TipSetKey) (address.Address, error) {
+ return *new(address.Address), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateVerifierStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) {
+ return s.Internal.StateVerifierStatus(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateVerifierStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) {
+ return s.Internal.StateWaitMsg(p0, p1, p2, p3, p4)
+}
+
+func (s *FullNodeStub) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) SyncCheckBad(p0 context.Context, p1 cid.Cid) (string, error) {
+ return s.Internal.SyncCheckBad(p0, p1)
+}
+
+func (s *FullNodeStub) SyncCheckBad(p0 context.Context, p1 cid.Cid) (string, error) {
+ return "", xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) SyncCheckpoint(p0 context.Context, p1 types.TipSetKey) error {
+ return s.Internal.SyncCheckpoint(p0, p1)
+}
+
+func (s *FullNodeStub) SyncCheckpoint(p0 context.Context, p1 types.TipSetKey) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) SyncIncomingBlocks(p0 context.Context) (<-chan *types.BlockHeader, error) {
+ return s.Internal.SyncIncomingBlocks(p0)
+}
+
+func (s *FullNodeStub) SyncIncomingBlocks(p0 context.Context) (<-chan *types.BlockHeader, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) SyncMarkBad(p0 context.Context, p1 cid.Cid) error {
+ return s.Internal.SyncMarkBad(p0, p1)
+}
+
+func (s *FullNodeStub) SyncMarkBad(p0 context.Context, p1 cid.Cid) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) SyncState(p0 context.Context) (*SyncState, error) {
+ return s.Internal.SyncState(p0)
+}
+
+func (s *FullNodeStub) SyncState(p0 context.Context) (*SyncState, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) SyncSubmitBlock(p0 context.Context, p1 *types.BlockMsg) error {
+ return s.Internal.SyncSubmitBlock(p0, p1)
+}
+
+func (s *FullNodeStub) SyncSubmitBlock(p0 context.Context, p1 *types.BlockMsg) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) SyncUnmarkAllBad(p0 context.Context) error {
+ return s.Internal.SyncUnmarkAllBad(p0)
+}
+
+func (s *FullNodeStub) SyncUnmarkAllBad(p0 context.Context) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) SyncUnmarkBad(p0 context.Context, p1 cid.Cid) error {
+ return s.Internal.SyncUnmarkBad(p0, p1)
+}
+
+func (s *FullNodeStub) SyncUnmarkBad(p0 context.Context, p1 cid.Cid) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) SyncValidateTipset(p0 context.Context, p1 types.TipSetKey) (bool, error) {
+ return s.Internal.SyncValidateTipset(p0, p1)
+}
+
+func (s *FullNodeStub) SyncValidateTipset(p0 context.Context, p1 types.TipSetKey) (bool, error) {
+ return false, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) WalletBalance(p0 context.Context, p1 address.Address) (types.BigInt, error) {
+ return s.Internal.WalletBalance(p0, p1)
+}
+
+func (s *FullNodeStub) WalletBalance(p0 context.Context, p1 address.Address) (types.BigInt, error) {
+ return *new(types.BigInt), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) WalletDefaultAddress(p0 context.Context) (address.Address, error) {
+ return s.Internal.WalletDefaultAddress(p0)
+}
+
+func (s *FullNodeStub) WalletDefaultAddress(p0 context.Context) (address.Address, error) {
+ return *new(address.Address), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) WalletDelete(p0 context.Context, p1 address.Address) error {
+ return s.Internal.WalletDelete(p0, p1)
+}
+
+func (s *FullNodeStub) WalletDelete(p0 context.Context, p1 address.Address) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) WalletExport(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) {
+ return s.Internal.WalletExport(p0, p1)
+}
+
+func (s *FullNodeStub) WalletExport(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) WalletHas(p0 context.Context, p1 address.Address) (bool, error) {
+ return s.Internal.WalletHas(p0, p1)
+}
+
+func (s *FullNodeStub) WalletHas(p0 context.Context, p1 address.Address) (bool, error) {
+ return false, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) WalletImport(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) {
+ return s.Internal.WalletImport(p0, p1)
+}
+
+func (s *FullNodeStub) WalletImport(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) {
+ return *new(address.Address), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) WalletList(p0 context.Context) ([]address.Address, error) {
+ return s.Internal.WalletList(p0)
+}
+
+func (s *FullNodeStub) WalletList(p0 context.Context) ([]address.Address, error) {
+ return *new([]address.Address), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) WalletNew(p0 context.Context, p1 types.KeyType) (address.Address, error) {
+ return s.Internal.WalletNew(p0, p1)
+}
+
+func (s *FullNodeStub) WalletNew(p0 context.Context, p1 types.KeyType) (address.Address, error) {
+ return *new(address.Address), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) WalletSetDefault(p0 context.Context, p1 address.Address) error {
+ return s.Internal.WalletSetDefault(p0, p1)
+}
+
+func (s *FullNodeStub) WalletSetDefault(p0 context.Context, p1 address.Address) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) WalletSign(p0 context.Context, p1 address.Address, p2 []byte) (*crypto.Signature, error) {
+ return s.Internal.WalletSign(p0, p1, p2)
+}
+
+func (s *FullNodeStub) WalletSign(p0 context.Context, p1 address.Address, p2 []byte) (*crypto.Signature, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) WalletSignMessage(p0 context.Context, p1 address.Address, p2 *types.Message) (*types.SignedMessage, error) {
+ return s.Internal.WalletSignMessage(p0, p1, p2)
+}
+
+func (s *FullNodeStub) WalletSignMessage(p0 context.Context, p1 address.Address, p2 *types.Message) (*types.SignedMessage, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) WalletValidateAddress(p0 context.Context, p1 string) (address.Address, error) {
+ return s.Internal.WalletValidateAddress(p0, p1)
+}
+
+func (s *FullNodeStub) WalletValidateAddress(p0 context.Context, p1 string) (address.Address, error) {
+ return *new(address.Address), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) WalletVerify(p0 context.Context, p1 address.Address, p2 []byte, p3 *crypto.Signature) (bool, error) {
+ return s.Internal.WalletVerify(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) WalletVerify(p0 context.Context, p1 address.Address, p2 []byte, p3 *crypto.Signature) (bool, error) {
+ return false, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) {
+ return s.Internal.ChainGetBlockMessages(p0, p1)
+}
+
+func (s *GatewayStub) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) {
+ return s.Internal.ChainGetMessage(p0, p1)
+}
+
+func (s *GatewayStub) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) ChainGetTipSet(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) {
+ return s.Internal.ChainGetTipSet(p0, p1)
+}
+
+func (s *GatewayStub) ChainGetTipSet(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) ChainGetTipSetByHeight(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) {
+ return s.Internal.ChainGetTipSetByHeight(p0, p1, p2)
+}
+
+func (s *GatewayStub) ChainGetTipSetByHeight(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) {
+ return s.Internal.ChainHasObj(p0, p1)
+}
+
+func (s *GatewayStub) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) {
+ return false, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) ChainHead(p0 context.Context) (*types.TipSet, error) {
+ return s.Internal.ChainHead(p0)
+}
+
+func (s *GatewayStub) ChainHead(p0 context.Context) (*types.TipSet, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) ChainNotify(p0 context.Context) (<-chan []*HeadChange, error) {
+ return s.Internal.ChainNotify(p0)
+}
+
+func (s *GatewayStub) ChainNotify(p0 context.Context) (<-chan []*HeadChange, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) {
+ return s.Internal.ChainReadObj(p0, p1)
+}
+
+func (s *GatewayStub) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) {
+ return *new([]byte), xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) {
+ return s.Internal.GasEstimateMessageGas(p0, p1, p2, p3)
+}
+
+func (s *GatewayStub) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) {
+ return s.Internal.MpoolPush(p0, p1)
+}
+
+func (s *GatewayStub) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) {
+ return *new(cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) MsigGetAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) {
+ return s.Internal.MsigGetAvailableBalance(p0, p1, p2)
+}
+
+func (s *GatewayStub) MsigGetAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) {
+ return *new(types.BigInt), xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) MsigGetPending(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*MsigTransaction, error) {
+ return s.Internal.MsigGetPending(p0, p1, p2)
+}
+
+func (s *GatewayStub) MsigGetPending(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*MsigTransaction, error) {
+ return *new([]*MsigTransaction), xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) MsigGetVested(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) {
+ return s.Internal.MsigGetVested(p0, p1, p2, p3)
+}
+
+func (s *GatewayStub) MsigGetVested(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) {
+ return *new(types.BigInt), xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) {
+ return s.Internal.StateAccountKey(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) {
+ return *new(address.Address), xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (DealCollateralBounds, error) {
+ return s.Internal.StateDealProviderCollateralBounds(p0, p1, p2, p3)
+}
+
+func (s *GatewayStub) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (DealCollateralBounds, error) {
+ return *new(DealCollateralBounds), xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) {
+ return s.Internal.StateGetActor(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateListMiners(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) {
+ return s.Internal.StateListMiners(p0, p1)
+}
+
+func (s *GatewayStub) StateListMiners(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) {
+ return *new([]address.Address), xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateLookupID(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) {
+ return s.Internal.StateLookupID(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateLookupID(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) {
+ return *new(address.Address), xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateMarketBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MarketBalance, error) {
+ return s.Internal.StateMarketBalance(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateMarketBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MarketBalance, error) {
+ return *new(MarketBalance), xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateMarketStorageDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*MarketDeal, error) {
+ return s.Internal.StateMarketStorageDeal(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateMarketStorageDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*MarketDeal, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateMinerInfo(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) {
+ return s.Internal.StateMinerInfo(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateMinerInfo(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) {
+ return *new(miner.MinerInfo), xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateMinerPower(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*MinerPower, error) {
+ return s.Internal.StateMinerPower(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateMinerPower(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*MinerPower, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateMinerProvingDeadline(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) {
+ return s.Internal.StateMinerProvingDeadline(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateMinerProvingDeadline(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) {
+ return s.Internal.StateNetworkVersion(p0, p1)
+}
+
+func (s *GatewayStub) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) {
+ return *new(apitypes.NetworkVersion), xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateReadState(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*ActorState, error) {
+ return s.Internal.StateReadState(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateReadState(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*ActorState, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateSearchMsg(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) {
+ return s.Internal.StateSearchMsg(p0, p1, p2, p3, p4)
+}
+
+func (s *GatewayStub) StateSearchMsg(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateSectorGetInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) {
+ return s.Internal.StateSectorGetInfo(p0, p1, p2, p3)
+}
+
+func (s *GatewayStub) StateSectorGetInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateVerifiedClientStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) {
+ return s.Internal.StateVerifiedClientStatus(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateVerifiedClientStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) {
+ return s.Internal.StateWaitMsg(p0, p1, p2, p3, p4)
+}
+
+func (s *GatewayStub) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) Version(p0 context.Context) (APIVersion, error) {
+ return s.Internal.Version(p0)
+}
+
+func (s *GatewayStub) Version(p0 context.Context) (APIVersion, error) {
+ return *new(APIVersion), xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) WalletBalance(p0 context.Context, p1 address.Address) (types.BigInt, error) {
+ return s.Internal.WalletBalance(p0, p1)
+}
+
+func (s *GatewayStub) WalletBalance(p0 context.Context, p1 address.Address) (types.BigInt, error) {
+ return *new(types.BigInt), xerrors.New("method not supported")
+}
+
+func (s *NetStruct) ID(p0 context.Context) (peer.ID, error) {
+ return s.Internal.ID(p0)
+}
+
+func (s *NetStub) ID(p0 context.Context) (peer.ID, error) {
+ return *new(peer.ID), xerrors.New("method not supported")
+}
+
+func (s *NetStruct) NetAddrsListen(p0 context.Context) (peer.AddrInfo, error) {
+ return s.Internal.NetAddrsListen(p0)
+}
+
+func (s *NetStub) NetAddrsListen(p0 context.Context) (peer.AddrInfo, error) {
+ return *new(peer.AddrInfo), xerrors.New("method not supported")
+}
+
+func (s *NetStruct) NetAgentVersion(p0 context.Context, p1 peer.ID) (string, error) {
+ return s.Internal.NetAgentVersion(p0, p1)
+}
+
+func (s *NetStub) NetAgentVersion(p0 context.Context, p1 peer.ID) (string, error) {
+ return "", xerrors.New("method not supported")
+}
+
+func (s *NetStruct) NetAutoNatStatus(p0 context.Context) (NatInfo, error) {
+ return s.Internal.NetAutoNatStatus(p0)
+}
+
+func (s *NetStub) NetAutoNatStatus(p0 context.Context) (NatInfo, error) {
+ return *new(NatInfo), xerrors.New("method not supported")
+}
+
+func (s *NetStruct) NetBandwidthStats(p0 context.Context) (metrics.Stats, error) {
+ return s.Internal.NetBandwidthStats(p0)
+}
+
+func (s *NetStub) NetBandwidthStats(p0 context.Context) (metrics.Stats, error) {
+ return *new(metrics.Stats), xerrors.New("method not supported")
+}
+
+func (s *NetStruct) NetBandwidthStatsByPeer(p0 context.Context) (map[string]metrics.Stats, error) {
+ return s.Internal.NetBandwidthStatsByPeer(p0)
+}
+
+func (s *NetStub) NetBandwidthStatsByPeer(p0 context.Context) (map[string]metrics.Stats, error) {
+ return *new(map[string]metrics.Stats), xerrors.New("method not supported")
+}
+
+func (s *NetStruct) NetBandwidthStatsByProtocol(p0 context.Context) (map[protocol.ID]metrics.Stats, error) {
+ return s.Internal.NetBandwidthStatsByProtocol(p0)
+}
+
+func (s *NetStub) NetBandwidthStatsByProtocol(p0 context.Context) (map[protocol.ID]metrics.Stats, error) {
+ return *new(map[protocol.ID]metrics.Stats), xerrors.New("method not supported")
+}
+
+func (s *NetStruct) NetBlockAdd(p0 context.Context, p1 NetBlockList) error {
+ return s.Internal.NetBlockAdd(p0, p1)
+}
+
+func (s *NetStub) NetBlockAdd(p0 context.Context, p1 NetBlockList) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *NetStruct) NetBlockList(p0 context.Context) (NetBlockList, error) {
+ return s.Internal.NetBlockList(p0)
+}
+
+func (s *NetStub) NetBlockList(p0 context.Context) (NetBlockList, error) {
+ return *new(NetBlockList), xerrors.New("method not supported")
+}
+
+func (s *NetStruct) NetBlockRemove(p0 context.Context, p1 NetBlockList) error {
+ return s.Internal.NetBlockRemove(p0, p1)
+}
+
+func (s *NetStub) NetBlockRemove(p0 context.Context, p1 NetBlockList) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *NetStruct) NetConnect(p0 context.Context, p1 peer.AddrInfo) error {
+ return s.Internal.NetConnect(p0, p1)
+}
+
+func (s *NetStub) NetConnect(p0 context.Context, p1 peer.AddrInfo) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *NetStruct) NetConnectedness(p0 context.Context, p1 peer.ID) (network.Connectedness, error) {
+ return s.Internal.NetConnectedness(p0, p1)
+}
+
+func (s *NetStub) NetConnectedness(p0 context.Context, p1 peer.ID) (network.Connectedness, error) {
+ return *new(network.Connectedness), xerrors.New("method not supported")
+}
+
+func (s *NetStruct) NetDisconnect(p0 context.Context, p1 peer.ID) error {
+ return s.Internal.NetDisconnect(p0, p1)
+}
+
+func (s *NetStub) NetDisconnect(p0 context.Context, p1 peer.ID) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *NetStruct) NetFindPeer(p0 context.Context, p1 peer.ID) (peer.AddrInfo, error) {
+ return s.Internal.NetFindPeer(p0, p1)
+}
+
+func (s *NetStub) NetFindPeer(p0 context.Context, p1 peer.ID) (peer.AddrInfo, error) {
+ return *new(peer.AddrInfo), xerrors.New("method not supported")
+}
+
+func (s *NetStruct) NetPeerInfo(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) {
+ return s.Internal.NetPeerInfo(p0, p1)
+}
+
+func (s *NetStub) NetPeerInfo(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *NetStruct) NetPeers(p0 context.Context) ([]peer.AddrInfo, error) {
+ return s.Internal.NetPeers(p0)
+}
+
+func (s *NetStub) NetPeers(p0 context.Context) ([]peer.AddrInfo, error) {
+ return *new([]peer.AddrInfo), xerrors.New("method not supported")
+}
+
+func (s *NetStruct) NetPubsubScores(p0 context.Context) ([]PubsubScore, error) {
+ return s.Internal.NetPubsubScores(p0)
+}
+
+func (s *NetStub) NetPubsubScores(p0 context.Context) ([]PubsubScore, error) {
+ return *new([]PubsubScore), xerrors.New("method not supported")
+}
+
+func (s *SignableStruct) Sign(p0 context.Context, p1 SignFunc) error {
+ return s.Internal.Sign(p0, p1)
+}
+
+func (s *SignableStub) Sign(p0 context.Context, p1 SignFunc) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) ActorAddress(p0 context.Context) (address.Address, error) {
+ return s.Internal.ActorAddress(p0)
+}
+
+func (s *StorageMinerStub) ActorAddress(p0 context.Context) (address.Address, error) {
+ return *new(address.Address), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) ActorAddressConfig(p0 context.Context) (AddressConfig, error) {
+ return s.Internal.ActorAddressConfig(p0)
+}
+
+func (s *StorageMinerStub) ActorAddressConfig(p0 context.Context) (AddressConfig, error) {
+ return *new(AddressConfig), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) ActorSectorSize(p0 context.Context, p1 address.Address) (abi.SectorSize, error) {
+ return s.Internal.ActorSectorSize(p0, p1)
+}
+
+func (s *StorageMinerStub) ActorSectorSize(p0 context.Context, p1 address.Address) (abi.SectorSize, error) {
+ return *new(abi.SectorSize), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) {
+ return s.Internal.CheckProvable(p0, p1, p2, p3)
+}
+
+func (s *StorageMinerStub) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) {
+ return *new(map[abi.SectorNumber]string), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) ComputeProof(p0 context.Context, p1 []builtin.SectorInfo, p2 abi.PoStRandomness) ([]builtin.PoStProof, error) {
+ return s.Internal.ComputeProof(p0, p1, p2)
+}
+
+func (s *StorageMinerStub) ComputeProof(p0 context.Context, p1 []builtin.SectorInfo, p2 abi.PoStRandomness) ([]builtin.PoStProof, error) {
+ return *new([]builtin.PoStProof), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) CreateBackup(p0 context.Context, p1 string) error {
+ return s.Internal.CreateBackup(p0, p1)
+}
+
+func (s *StorageMinerStub) CreateBackup(p0 context.Context, p1 string) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) DealsConsiderOfflineRetrievalDeals(p0 context.Context) (bool, error) {
+ return s.Internal.DealsConsiderOfflineRetrievalDeals(p0)
+}
+
+func (s *StorageMinerStub) DealsConsiderOfflineRetrievalDeals(p0 context.Context) (bool, error) {
+ return false, xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) DealsConsiderOfflineStorageDeals(p0 context.Context) (bool, error) {
+ return s.Internal.DealsConsiderOfflineStorageDeals(p0)
+}
+
+func (s *StorageMinerStub) DealsConsiderOfflineStorageDeals(p0 context.Context) (bool, error) {
+ return false, xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) DealsConsiderOnlineRetrievalDeals(p0 context.Context) (bool, error) {
+ return s.Internal.DealsConsiderOnlineRetrievalDeals(p0)
+}
+
+func (s *StorageMinerStub) DealsConsiderOnlineRetrievalDeals(p0 context.Context) (bool, error) {
+ return false, xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) DealsConsiderOnlineStorageDeals(p0 context.Context) (bool, error) {
+ return s.Internal.DealsConsiderOnlineStorageDeals(p0)
+}
+
+func (s *StorageMinerStub) DealsConsiderOnlineStorageDeals(p0 context.Context) (bool, error) {
+ return false, xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) DealsConsiderUnverifiedStorageDeals(p0 context.Context) (bool, error) {
+ return s.Internal.DealsConsiderUnverifiedStorageDeals(p0)
+}
+
+func (s *StorageMinerStub) DealsConsiderUnverifiedStorageDeals(p0 context.Context) (bool, error) {
+ return false, xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) DealsConsiderVerifiedStorageDeals(p0 context.Context) (bool, error) {
+ return s.Internal.DealsConsiderVerifiedStorageDeals(p0)
+}
+
+func (s *StorageMinerStub) DealsConsiderVerifiedStorageDeals(p0 context.Context) (bool, error) {
+ return false, xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) DealsImportData(p0 context.Context, p1 cid.Cid, p2 string) error {
+ return s.Internal.DealsImportData(p0, p1, p2)
+}
+
+func (s *StorageMinerStub) DealsImportData(p0 context.Context, p1 cid.Cid, p2 string) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) DealsList(p0 context.Context) ([]MarketDeal, error) {
+ return s.Internal.DealsList(p0)
+}
+
+func (s *StorageMinerStub) DealsList(p0 context.Context) ([]MarketDeal, error) {
+ return *new([]MarketDeal), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) DealsPieceCidBlocklist(p0 context.Context) ([]cid.Cid, error) {
+ return s.Internal.DealsPieceCidBlocklist(p0)
+}
+
+func (s *StorageMinerStub) DealsPieceCidBlocklist(p0 context.Context) ([]cid.Cid, error) {
+ return *new([]cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) DealsSetConsiderOfflineRetrievalDeals(p0 context.Context, p1 bool) error {
+ return s.Internal.DealsSetConsiderOfflineRetrievalDeals(p0, p1)
+}
+
+func (s *StorageMinerStub) DealsSetConsiderOfflineRetrievalDeals(p0 context.Context, p1 bool) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) DealsSetConsiderOfflineStorageDeals(p0 context.Context, p1 bool) error {
+ return s.Internal.DealsSetConsiderOfflineStorageDeals(p0, p1)
+}
+
+func (s *StorageMinerStub) DealsSetConsiderOfflineStorageDeals(p0 context.Context, p1 bool) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) DealsSetConsiderOnlineRetrievalDeals(p0 context.Context, p1 bool) error {
+ return s.Internal.DealsSetConsiderOnlineRetrievalDeals(p0, p1)
+}
+
+func (s *StorageMinerStub) DealsSetConsiderOnlineRetrievalDeals(p0 context.Context, p1 bool) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) DealsSetConsiderOnlineStorageDeals(p0 context.Context, p1 bool) error {
+ return s.Internal.DealsSetConsiderOnlineStorageDeals(p0, p1)
+}
+
+func (s *StorageMinerStub) DealsSetConsiderOnlineStorageDeals(p0 context.Context, p1 bool) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) DealsSetConsiderUnverifiedStorageDeals(p0 context.Context, p1 bool) error {
+ return s.Internal.DealsSetConsiderUnverifiedStorageDeals(p0, p1)
+}
+
+func (s *StorageMinerStub) DealsSetConsiderUnverifiedStorageDeals(p0 context.Context, p1 bool) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) DealsSetConsiderVerifiedStorageDeals(p0 context.Context, p1 bool) error {
+ return s.Internal.DealsSetConsiderVerifiedStorageDeals(p0, p1)
+}
+
+func (s *StorageMinerStub) DealsSetConsiderVerifiedStorageDeals(p0 context.Context, p1 bool) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) DealsSetPieceCidBlocklist(p0 context.Context, p1 []cid.Cid) error {
+ return s.Internal.DealsSetPieceCidBlocklist(p0, p1)
+}
+
+func (s *StorageMinerStub) DealsSetPieceCidBlocklist(p0 context.Context, p1 []cid.Cid) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) MarketCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {
+ return s.Internal.MarketCancelDataTransfer(p0, p1, p2, p3)
+}
+
+func (s *StorageMinerStub) MarketCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) MarketDataTransferUpdates(p0 context.Context) (<-chan DataTransferChannel, error) {
+ return s.Internal.MarketDataTransferUpdates(p0)
+}
+
+func (s *StorageMinerStub) MarketDataTransferUpdates(p0 context.Context) (<-chan DataTransferChannel, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) MarketGetAsk(p0 context.Context) (*storagemarket.SignedStorageAsk, error) {
+ return s.Internal.MarketGetAsk(p0)
+}
+
+func (s *StorageMinerStub) MarketGetAsk(p0 context.Context) (*storagemarket.SignedStorageAsk, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) MarketGetDealUpdates(p0 context.Context) (<-chan storagemarket.MinerDeal, error) {
+ return s.Internal.MarketGetDealUpdates(p0)
+}
+
+func (s *StorageMinerStub) MarketGetDealUpdates(p0 context.Context) (<-chan storagemarket.MinerDeal, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) MarketGetRetrievalAsk(p0 context.Context) (*retrievalmarket.Ask, error) {
+ return s.Internal.MarketGetRetrievalAsk(p0)
+}
+
+func (s *StorageMinerStub) MarketGetRetrievalAsk(p0 context.Context) (*retrievalmarket.Ask, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) MarketImportDealData(p0 context.Context, p1 cid.Cid, p2 string) error {
+ return s.Internal.MarketImportDealData(p0, p1, p2)
+}
+
+func (s *StorageMinerStub) MarketImportDealData(p0 context.Context, p1 cid.Cid, p2 string) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) MarketListDataTransfers(p0 context.Context) ([]DataTransferChannel, error) {
+ return s.Internal.MarketListDataTransfers(p0)
+}
+
+func (s *StorageMinerStub) MarketListDataTransfers(p0 context.Context) ([]DataTransferChannel, error) {
+ return *new([]DataTransferChannel), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) MarketListDeals(p0 context.Context) ([]MarketDeal, error) {
+ return s.Internal.MarketListDeals(p0)
+}
+
+func (s *StorageMinerStub) MarketListDeals(p0 context.Context) ([]MarketDeal, error) {
+ return *new([]MarketDeal), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) MarketListIncompleteDeals(p0 context.Context) ([]storagemarket.MinerDeal, error) {
+ return s.Internal.MarketListIncompleteDeals(p0)
+}
+
+func (s *StorageMinerStub) MarketListIncompleteDeals(p0 context.Context) ([]storagemarket.MinerDeal, error) {
+ return *new([]storagemarket.MinerDeal), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) MarketListRetrievalDeals(p0 context.Context) ([]retrievalmarket.ProviderDealState, error) {
+ return s.Internal.MarketListRetrievalDeals(p0)
+}
+
+func (s *StorageMinerStub) MarketListRetrievalDeals(p0 context.Context) ([]retrievalmarket.ProviderDealState, error) {
+ return *new([]retrievalmarket.ProviderDealState), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) MarketPendingDeals(p0 context.Context) (PendingDealInfo, error) {
+ return s.Internal.MarketPendingDeals(p0)
+}
+
+func (s *StorageMinerStub) MarketPendingDeals(p0 context.Context) (PendingDealInfo, error) {
+ return *new(PendingDealInfo), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) MarketPublishPendingDeals(p0 context.Context) error {
+ return s.Internal.MarketPublishPendingDeals(p0)
+}
+
+func (s *StorageMinerStub) MarketPublishPendingDeals(p0 context.Context) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) MarketRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {
+ return s.Internal.MarketRestartDataTransfer(p0, p1, p2, p3)
+}
+
+func (s *StorageMinerStub) MarketRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) MarketSetAsk(p0 context.Context, p1 types.BigInt, p2 types.BigInt, p3 abi.ChainEpoch, p4 abi.PaddedPieceSize, p5 abi.PaddedPieceSize) error {
+ return s.Internal.MarketSetAsk(p0, p1, p2, p3, p4, p5)
+}
+
+func (s *StorageMinerStub) MarketSetAsk(p0 context.Context, p1 types.BigInt, p2 types.BigInt, p3 abi.ChainEpoch, p4 abi.PaddedPieceSize, p5 abi.PaddedPieceSize) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) MarketSetRetrievalAsk(p0 context.Context, p1 *retrievalmarket.Ask) error {
+ return s.Internal.MarketSetRetrievalAsk(p0, p1)
+}
+
+func (s *StorageMinerStub) MarketSetRetrievalAsk(p0 context.Context, p1 *retrievalmarket.Ask) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) MiningBase(p0 context.Context) (*types.TipSet, error) {
+ return s.Internal.MiningBase(p0)
+}
+
+func (s *StorageMinerStub) MiningBase(p0 context.Context) (*types.TipSet, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) PiecesGetCIDInfo(p0 context.Context, p1 cid.Cid) (*piecestore.CIDInfo, error) {
+ return s.Internal.PiecesGetCIDInfo(p0, p1)
+}
+
+func (s *StorageMinerStub) PiecesGetCIDInfo(p0 context.Context, p1 cid.Cid) (*piecestore.CIDInfo, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) PiecesGetPieceInfo(p0 context.Context, p1 cid.Cid) (*piecestore.PieceInfo, error) {
+ return s.Internal.PiecesGetPieceInfo(p0, p1)
+}
+
+func (s *StorageMinerStub) PiecesGetPieceInfo(p0 context.Context, p1 cid.Cid) (*piecestore.PieceInfo, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) PiecesListCidInfos(p0 context.Context) ([]cid.Cid, error) {
+ return s.Internal.PiecesListCidInfos(p0)
+}
+
+func (s *StorageMinerStub) PiecesListCidInfos(p0 context.Context) ([]cid.Cid, error) {
+ return *new([]cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) PiecesListPieces(p0 context.Context) ([]cid.Cid, error) {
+ return s.Internal.PiecesListPieces(p0)
+}
+
+func (s *StorageMinerStub) PiecesListPieces(p0 context.Context) ([]cid.Cid, error) {
+ return *new([]cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) PledgeSector(p0 context.Context) (abi.SectorID, error) {
+ return s.Internal.PledgeSector(p0)
+}
+
+func (s *StorageMinerStub) PledgeSector(p0 context.Context) (abi.SectorID, error) {
+ return *new(abi.SectorID), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) ReturnAddPiece(p0 context.Context, p1 storiface.CallID, p2 abi.PieceInfo, p3 *storiface.CallError) error {
+ return s.Internal.ReturnAddPiece(p0, p1, p2, p3)
+}
+
+func (s *StorageMinerStub) ReturnAddPiece(p0 context.Context, p1 storiface.CallID, p2 abi.PieceInfo, p3 *storiface.CallError) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) ReturnFetch(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error {
+ return s.Internal.ReturnFetch(p0, p1, p2)
+}
+
+func (s *StorageMinerStub) ReturnFetch(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) ReturnFinalizeSector(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error {
+ return s.Internal.ReturnFinalizeSector(p0, p1, p2)
+}
+
+func (s *StorageMinerStub) ReturnFinalizeSector(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) ReturnMoveStorage(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error {
+ return s.Internal.ReturnMoveStorage(p0, p1, p2)
+}
+
+func (s *StorageMinerStub) ReturnMoveStorage(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) ReturnReadPiece(p0 context.Context, p1 storiface.CallID, p2 bool, p3 *storiface.CallError) error {
+ return s.Internal.ReturnReadPiece(p0, p1, p2, p3)
+}
+
+func (s *StorageMinerStub) ReturnReadPiece(p0 context.Context, p1 storiface.CallID, p2 bool, p3 *storiface.CallError) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) ReturnReleaseUnsealed(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error {
+ return s.Internal.ReturnReleaseUnsealed(p0, p1, p2)
+}
+
+func (s *StorageMinerStub) ReturnReleaseUnsealed(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) ReturnSealCommit1(p0 context.Context, p1 storiface.CallID, p2 storage.Commit1Out, p3 *storiface.CallError) error {
+ return s.Internal.ReturnSealCommit1(p0, p1, p2, p3)
+}
+
+func (s *StorageMinerStub) ReturnSealCommit1(p0 context.Context, p1 storiface.CallID, p2 storage.Commit1Out, p3 *storiface.CallError) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) ReturnSealCommit2(p0 context.Context, p1 storiface.CallID, p2 storage.Proof, p3 *storiface.CallError) error {
+ return s.Internal.ReturnSealCommit2(p0, p1, p2, p3)
+}
+
+func (s *StorageMinerStub) ReturnSealCommit2(p0 context.Context, p1 storiface.CallID, p2 storage.Proof, p3 *storiface.CallError) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) ReturnSealPreCommit1(p0 context.Context, p1 storiface.CallID, p2 storage.PreCommit1Out, p3 *storiface.CallError) error {
+ return s.Internal.ReturnSealPreCommit1(p0, p1, p2, p3)
+}
+
+func (s *StorageMinerStub) ReturnSealPreCommit1(p0 context.Context, p1 storiface.CallID, p2 storage.PreCommit1Out, p3 *storiface.CallError) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) ReturnSealPreCommit2(p0 context.Context, p1 storiface.CallID, p2 storage.SectorCids, p3 *storiface.CallError) error {
+ return s.Internal.ReturnSealPreCommit2(p0, p1, p2, p3)
+}
+
+func (s *StorageMinerStub) ReturnSealPreCommit2(p0 context.Context, p1 storiface.CallID, p2 storage.SectorCids, p3 *storiface.CallError) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) ReturnUnsealPiece(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error {
+ return s.Internal.ReturnUnsealPiece(p0, p1, p2)
+}
+
+func (s *StorageMinerStub) ReturnUnsealPiece(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) SealingAbort(p0 context.Context, p1 storiface.CallID) error {
+ return s.Internal.SealingAbort(p0, p1)
+}
+
+func (s *StorageMinerStub) SealingAbort(p0 context.Context, p1 storiface.CallID) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) SealingSchedDiag(p0 context.Context, p1 bool) (interface{}, error) {
+ return s.Internal.SealingSchedDiag(p0, p1)
+}
+
+func (s *StorageMinerStub) SealingSchedDiag(p0 context.Context, p1 bool) (interface{}, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) {
+ return s.Internal.SectorAddPieceToAny(p0, p1, p2, p3)
+}
+
+func (s *StorageMinerStub) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) {
+ return *new(SectorOffset), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) SectorCommitFlush(p0 context.Context) ([]sealiface.CommitBatchRes, error) {
+ return s.Internal.SectorCommitFlush(p0)
+}
+
+func (s *StorageMinerStub) SectorCommitFlush(p0 context.Context) ([]sealiface.CommitBatchRes, error) {
+ return *new([]sealiface.CommitBatchRes), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) SectorCommitPending(p0 context.Context) ([]abi.SectorID, error) {
+ return s.Internal.SectorCommitPending(p0)
+}
+
+func (s *StorageMinerStub) SectorCommitPending(p0 context.Context) ([]abi.SectorID, error) {
+ return *new([]abi.SectorID), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) SectorGetExpectedSealDuration(p0 context.Context) (time.Duration, error) {
+ return s.Internal.SectorGetExpectedSealDuration(p0)
+}
+
+func (s *StorageMinerStub) SectorGetExpectedSealDuration(p0 context.Context) (time.Duration, error) {
+ return *new(time.Duration), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) SectorGetSealDelay(p0 context.Context) (time.Duration, error) {
+ return s.Internal.SectorGetSealDelay(p0)
+}
+
+func (s *StorageMinerStub) SectorGetSealDelay(p0 context.Context) (time.Duration, error) {
+ return *new(time.Duration), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) SectorMarkForUpgrade(p0 context.Context, p1 abi.SectorNumber) error {
+ return s.Internal.SectorMarkForUpgrade(p0, p1)
+}
+
+func (s *StorageMinerStub) SectorMarkForUpgrade(p0 context.Context, p1 abi.SectorNumber) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) SectorPreCommitFlush(p0 context.Context) ([]sealiface.PreCommitBatchRes, error) {
+ return s.Internal.SectorPreCommitFlush(p0)
+}
+
+func (s *StorageMinerStub) SectorPreCommitFlush(p0 context.Context) ([]sealiface.PreCommitBatchRes, error) {
+ return *new([]sealiface.PreCommitBatchRes), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) SectorPreCommitPending(p0 context.Context) ([]abi.SectorID, error) {
+ return s.Internal.SectorPreCommitPending(p0)
+}
+
+func (s *StorageMinerStub) SectorPreCommitPending(p0 context.Context) ([]abi.SectorID, error) {
+ return *new([]abi.SectorID), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) SectorRemove(p0 context.Context, p1 abi.SectorNumber) error {
+ return s.Internal.SectorRemove(p0, p1)
+}
+
+func (s *StorageMinerStub) SectorRemove(p0 context.Context, p1 abi.SectorNumber) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) SectorSetExpectedSealDuration(p0 context.Context, p1 time.Duration) error {
+ return s.Internal.SectorSetExpectedSealDuration(p0, p1)
+}
+
+func (s *StorageMinerStub) SectorSetExpectedSealDuration(p0 context.Context, p1 time.Duration) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) SectorSetSealDelay(p0 context.Context, p1 time.Duration) error {
+ return s.Internal.SectorSetSealDelay(p0, p1)
+}
+
+func (s *StorageMinerStub) SectorSetSealDelay(p0 context.Context, p1 time.Duration) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) SectorStartSealing(p0 context.Context, p1 abi.SectorNumber) error {
+ return s.Internal.SectorStartSealing(p0, p1)
+}
+
+func (s *StorageMinerStub) SectorStartSealing(p0 context.Context, p1 abi.SectorNumber) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) SectorTerminate(p0 context.Context, p1 abi.SectorNumber) error {
+ return s.Internal.SectorTerminate(p0, p1)
+}
+
+func (s *StorageMinerStub) SectorTerminate(p0 context.Context, p1 abi.SectorNumber) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) SectorTerminateFlush(p0 context.Context) (*cid.Cid, error) {
+ return s.Internal.SectorTerminateFlush(p0)
+}
+
+func (s *StorageMinerStub) SectorTerminateFlush(p0 context.Context) (*cid.Cid, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) SectorTerminatePending(p0 context.Context) ([]abi.SectorID, error) {
+ return s.Internal.SectorTerminatePending(p0)
+}
+
+func (s *StorageMinerStub) SectorTerminatePending(p0 context.Context) ([]abi.SectorID, error) {
+ return *new([]abi.SectorID), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) SectorsList(p0 context.Context) ([]abi.SectorNumber, error) {
+ return s.Internal.SectorsList(p0)
+}
+
+func (s *StorageMinerStub) SectorsList(p0 context.Context) ([]abi.SectorNumber, error) {
+ return *new([]abi.SectorNumber), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) SectorsListInStates(p0 context.Context, p1 []SectorState) ([]abi.SectorNumber, error) {
+ return s.Internal.SectorsListInStates(p0, p1)
+}
+
+func (s *StorageMinerStub) SectorsListInStates(p0 context.Context, p1 []SectorState) ([]abi.SectorNumber, error) {
+ return *new([]abi.SectorNumber), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) SectorsRefs(p0 context.Context) (map[string][]SealedRef, error) {
+ return s.Internal.SectorsRefs(p0)
+}
+
+func (s *StorageMinerStub) SectorsRefs(p0 context.Context) (map[string][]SealedRef, error) {
+ return *new(map[string][]SealedRef), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) SectorsStatus(p0 context.Context, p1 abi.SectorNumber, p2 bool) (SectorInfo, error) {
+ return s.Internal.SectorsStatus(p0, p1, p2)
+}
+
+func (s *StorageMinerStub) SectorsStatus(p0 context.Context, p1 abi.SectorNumber, p2 bool) (SectorInfo, error) {
+ return *new(SectorInfo), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) SectorsSummary(p0 context.Context) (map[SectorState]int, error) {
+ return s.Internal.SectorsSummary(p0)
+}
+
+func (s *StorageMinerStub) SectorsSummary(p0 context.Context) (map[SectorState]int, error) {
+ return *new(map[SectorState]int), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) SectorsUnsealPiece(p0 context.Context, p1 storage.SectorRef, p2 storiface.UnpaddedByteIndex, p3 abi.UnpaddedPieceSize, p4 abi.SealRandomness, p5 *cid.Cid) error {
+ return s.Internal.SectorsUnsealPiece(p0, p1, p2, p3, p4, p5)
+}
+
+func (s *StorageMinerStub) SectorsUnsealPiece(p0 context.Context, p1 storage.SectorRef, p2 storiface.UnpaddedByteIndex, p3 abi.UnpaddedPieceSize, p4 abi.SealRandomness, p5 *cid.Cid) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) SectorsUpdate(p0 context.Context, p1 abi.SectorNumber, p2 SectorState) error {
+ return s.Internal.SectorsUpdate(p0, p1, p2)
+}
+
+func (s *StorageMinerStub) SectorsUpdate(p0 context.Context, p1 abi.SectorNumber, p2 SectorState) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) StorageAddLocal(p0 context.Context, p1 string) error {
+ return s.Internal.StorageAddLocal(p0, p1)
+}
+
+func (s *StorageMinerStub) StorageAddLocal(p0 context.Context, p1 string) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) StorageAttach(p0 context.Context, p1 stores.StorageInfo, p2 fsutil.FsStat) error {
+ return s.Internal.StorageAttach(p0, p1, p2)
+}
+
+func (s *StorageMinerStub) StorageAttach(p0 context.Context, p1 stores.StorageInfo, p2 fsutil.FsStat) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) StorageBestAlloc(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType) ([]stores.StorageInfo, error) {
+ return s.Internal.StorageBestAlloc(p0, p1, p2, p3)
+}
+
+func (s *StorageMinerStub) StorageBestAlloc(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType) ([]stores.StorageInfo, error) {
+ return *new([]stores.StorageInfo), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) StorageDeclareSector(p0 context.Context, p1 stores.ID, p2 abi.SectorID, p3 storiface.SectorFileType, p4 bool) error {
+ return s.Internal.StorageDeclareSector(p0, p1, p2, p3, p4)
+}
+
+func (s *StorageMinerStub) StorageDeclareSector(p0 context.Context, p1 stores.ID, p2 abi.SectorID, p3 storiface.SectorFileType, p4 bool) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) StorageDropSector(p0 context.Context, p1 stores.ID, p2 abi.SectorID, p3 storiface.SectorFileType) error {
+ return s.Internal.StorageDropSector(p0, p1, p2, p3)
+}
+
+func (s *StorageMinerStub) StorageDropSector(p0 context.Context, p1 stores.ID, p2 abi.SectorID, p3 storiface.SectorFileType) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) StorageFindSector(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]stores.SectorStorageInfo, error) {
+ return s.Internal.StorageFindSector(p0, p1, p2, p3, p4)
+}
+
+func (s *StorageMinerStub) StorageFindSector(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]stores.SectorStorageInfo, error) {
+ return *new([]stores.SectorStorageInfo), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) StorageInfo(p0 context.Context, p1 stores.ID) (stores.StorageInfo, error) {
+ return s.Internal.StorageInfo(p0, p1)
+}
+
+func (s *StorageMinerStub) StorageInfo(p0 context.Context, p1 stores.ID) (stores.StorageInfo, error) {
+ return *new(stores.StorageInfo), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) StorageList(p0 context.Context) (map[stores.ID][]stores.Decl, error) {
+ return s.Internal.StorageList(p0)
+}
+
+func (s *StorageMinerStub) StorageList(p0 context.Context) (map[stores.ID][]stores.Decl, error) {
+ return *new(map[stores.ID][]stores.Decl), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) StorageLocal(p0 context.Context) (map[stores.ID]string, error) {
+ return s.Internal.StorageLocal(p0)
+}
+
+func (s *StorageMinerStub) StorageLocal(p0 context.Context) (map[stores.ID]string, error) {
+ return *new(map[stores.ID]string), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) StorageLock(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 storiface.SectorFileType) error {
+ return s.Internal.StorageLock(p0, p1, p2, p3)
+}
+
+func (s *StorageMinerStub) StorageLock(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 storiface.SectorFileType) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) StorageReportHealth(p0 context.Context, p1 stores.ID, p2 stores.HealthReport) error {
+ return s.Internal.StorageReportHealth(p0, p1, p2)
+}
+
+func (s *StorageMinerStub) StorageReportHealth(p0 context.Context, p1 stores.ID, p2 stores.HealthReport) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) StorageStat(p0 context.Context, p1 stores.ID) (fsutil.FsStat, error) {
+ return s.Internal.StorageStat(p0, p1)
+}
+
+func (s *StorageMinerStub) StorageStat(p0 context.Context, p1 stores.ID) (fsutil.FsStat, error) {
+ return *new(fsutil.FsStat), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) StorageTryLock(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 storiface.SectorFileType) (bool, error) {
+ return s.Internal.StorageTryLock(p0, p1, p2, p3)
+}
+
+func (s *StorageMinerStub) StorageTryLock(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 storiface.SectorFileType) (bool, error) {
+ return false, xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) WorkerConnect(p0 context.Context, p1 string) error {
+ return s.Internal.WorkerConnect(p0, p1)
+}
+
+func (s *StorageMinerStub) WorkerConnect(p0 context.Context, p1 string) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) WorkerJobs(p0 context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) {
+ return s.Internal.WorkerJobs(p0)
+}
+
+func (s *StorageMinerStub) WorkerJobs(p0 context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) {
+ return *new(map[uuid.UUID][]storiface.WorkerJob), xerrors.New("method not supported")
+}
+
+func (s *StorageMinerStruct) WorkerStats(p0 context.Context) (map[uuid.UUID]storiface.WorkerStats, error) {
+ return s.Internal.WorkerStats(p0)
+}
+
+func (s *StorageMinerStub) WorkerStats(p0 context.Context) (map[uuid.UUID]storiface.WorkerStats, error) {
+ return *new(map[uuid.UUID]storiface.WorkerStats), xerrors.New("method not supported")
+}
+
+func (s *WalletStruct) WalletDelete(p0 context.Context, p1 address.Address) error {
+ return s.Internal.WalletDelete(p0, p1)
+}
+
+func (s *WalletStub) WalletDelete(p0 context.Context, p1 address.Address) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *WalletStruct) WalletExport(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) {
+ return s.Internal.WalletExport(p0, p1)
+}
+
+func (s *WalletStub) WalletExport(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *WalletStruct) WalletHas(p0 context.Context, p1 address.Address) (bool, error) {
+ return s.Internal.WalletHas(p0, p1)
+}
+
+func (s *WalletStub) WalletHas(p0 context.Context, p1 address.Address) (bool, error) {
+ return false, xerrors.New("method not supported")
+}
+
+func (s *WalletStruct) WalletImport(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) {
+ return s.Internal.WalletImport(p0, p1)
+}
+
+func (s *WalletStub) WalletImport(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) {
+ return *new(address.Address), xerrors.New("method not supported")
+}
+
+func (s *WalletStruct) WalletList(p0 context.Context) ([]address.Address, error) {
+ return s.Internal.WalletList(p0)
+}
+
+func (s *WalletStub) WalletList(p0 context.Context) ([]address.Address, error) {
+ return *new([]address.Address), xerrors.New("method not supported")
+}
+
+func (s *WalletStruct) WalletNew(p0 context.Context, p1 types.KeyType) (address.Address, error) {
+ return s.Internal.WalletNew(p0, p1)
+}
+
+func (s *WalletStub) WalletNew(p0 context.Context, p1 types.KeyType) (address.Address, error) {
+ return *new(address.Address), xerrors.New("method not supported")
+}
+
+func (s *WalletStruct) WalletSign(p0 context.Context, p1 address.Address, p2 []byte, p3 MsgMeta) (*crypto.Signature, error) {
+ return s.Internal.WalletSign(p0, p1, p2, p3)
+}
+
+func (s *WalletStub) WalletSign(p0 context.Context, p1 address.Address, p2 []byte, p3 MsgMeta) (*crypto.Signature, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *WorkerStruct) AddPiece(p0 context.Context, p1 storage.SectorRef, p2 []abi.UnpaddedPieceSize, p3 abi.UnpaddedPieceSize, p4 storage.Data) (storiface.CallID, error) {
+ return s.Internal.AddPiece(p0, p1, p2, p3, p4)
+}
+
+func (s *WorkerStub) AddPiece(p0 context.Context, p1 storage.SectorRef, p2 []abi.UnpaddedPieceSize, p3 abi.UnpaddedPieceSize, p4 storage.Data) (storiface.CallID, error) {
+ return *new(storiface.CallID), xerrors.New("method not supported")
+}
+
+func (s *WorkerStruct) Enabled(p0 context.Context) (bool, error) {
+ return s.Internal.Enabled(p0)
+}
+
+func (s *WorkerStub) Enabled(p0 context.Context) (bool, error) {
+ return false, xerrors.New("method not supported")
+}
+
+func (s *WorkerStruct) Fetch(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType, p3 storiface.PathType, p4 storiface.AcquireMode) (storiface.CallID, error) {
+ return s.Internal.Fetch(p0, p1, p2, p3, p4)
+}
+
+func (s *WorkerStub) Fetch(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType, p3 storiface.PathType, p4 storiface.AcquireMode) (storiface.CallID, error) {
+ return *new(storiface.CallID), xerrors.New("method not supported")
+}
+
+func (s *WorkerStruct) FinalizeSector(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) {
+ return s.Internal.FinalizeSector(p0, p1, p2)
+}
+
+func (s *WorkerStub) FinalizeSector(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) {
+ return *new(storiface.CallID), xerrors.New("method not supported")
+}
+
+func (s *WorkerStruct) Info(p0 context.Context) (storiface.WorkerInfo, error) {
+ return s.Internal.Info(p0)
+}
+
+func (s *WorkerStub) Info(p0 context.Context) (storiface.WorkerInfo, error) {
+ return *new(storiface.WorkerInfo), xerrors.New("method not supported")
+}
+
+func (s *WorkerStruct) MoveStorage(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType) (storiface.CallID, error) {
+ return s.Internal.MoveStorage(p0, p1, p2)
+}
+
+func (s *WorkerStub) MoveStorage(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType) (storiface.CallID, error) {
+ return *new(storiface.CallID), xerrors.New("method not supported")
+}
+
+func (s *WorkerStruct) Paths(p0 context.Context) ([]stores.StoragePath, error) {
+ return s.Internal.Paths(p0)
+}
+
+func (s *WorkerStub) Paths(p0 context.Context) ([]stores.StoragePath, error) {
+ return *new([]stores.StoragePath), xerrors.New("method not supported")
+}
+
+func (s *WorkerStruct) ProcessSession(p0 context.Context) (uuid.UUID, error) {
+ return s.Internal.ProcessSession(p0)
+}
+
+func (s *WorkerStub) ProcessSession(p0 context.Context) (uuid.UUID, error) {
+ return *new(uuid.UUID), xerrors.New("method not supported")
+}
+
+func (s *WorkerStruct) ReleaseUnsealed(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) {
+ return s.Internal.ReleaseUnsealed(p0, p1, p2)
+}
+
+func (s *WorkerStub) ReleaseUnsealed(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) {
+ return *new(storiface.CallID), xerrors.New("method not supported")
+}
+
+func (s *WorkerStruct) Remove(p0 context.Context, p1 abi.SectorID) error {
+ return s.Internal.Remove(p0, p1)
+}
+
+func (s *WorkerStub) Remove(p0 context.Context, p1 abi.SectorID) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *WorkerStruct) SealCommit1(p0 context.Context, p1 storage.SectorRef, p2 abi.SealRandomness, p3 abi.InteractiveSealRandomness, p4 []abi.PieceInfo, p5 storage.SectorCids) (storiface.CallID, error) {
+ return s.Internal.SealCommit1(p0, p1, p2, p3, p4, p5)
+}
+
+func (s *WorkerStub) SealCommit1(p0 context.Context, p1 storage.SectorRef, p2 abi.SealRandomness, p3 abi.InteractiveSealRandomness, p4 []abi.PieceInfo, p5 storage.SectorCids) (storiface.CallID, error) {
+ return *new(storiface.CallID), xerrors.New("method not supported")
+}
+
+func (s *WorkerStruct) SealCommit2(p0 context.Context, p1 storage.SectorRef, p2 storage.Commit1Out) (storiface.CallID, error) {
+ return s.Internal.SealCommit2(p0, p1, p2)
+}
+
+func (s *WorkerStub) SealCommit2(p0 context.Context, p1 storage.SectorRef, p2 storage.Commit1Out) (storiface.CallID, error) {
+ return *new(storiface.CallID), xerrors.New("method not supported")
+}
+
+func (s *WorkerStruct) SealPreCommit1(p0 context.Context, p1 storage.SectorRef, p2 abi.SealRandomness, p3 []abi.PieceInfo) (storiface.CallID, error) {
+ return s.Internal.SealPreCommit1(p0, p1, p2, p3)
+}
+
+func (s *WorkerStub) SealPreCommit1(p0 context.Context, p1 storage.SectorRef, p2 abi.SealRandomness, p3 []abi.PieceInfo) (storiface.CallID, error) {
+ return *new(storiface.CallID), xerrors.New("method not supported")
+}
+
+func (s *WorkerStruct) SealPreCommit2(p0 context.Context, p1 storage.SectorRef, p2 storage.PreCommit1Out) (storiface.CallID, error) {
+ return s.Internal.SealPreCommit2(p0, p1, p2)
+}
+
+func (s *WorkerStub) SealPreCommit2(p0 context.Context, p1 storage.SectorRef, p2 storage.PreCommit1Out) (storiface.CallID, error) {
+ return *new(storiface.CallID), xerrors.New("method not supported")
+}
+
+func (s *WorkerStruct) Session(p0 context.Context) (uuid.UUID, error) {
+ return s.Internal.Session(p0)
+}
+
+func (s *WorkerStub) Session(p0 context.Context) (uuid.UUID, error) {
+ return *new(uuid.UUID), xerrors.New("method not supported")
+}
+
+func (s *WorkerStruct) SetEnabled(p0 context.Context, p1 bool) error {
+ return s.Internal.SetEnabled(p0, p1)
+}
+
+func (s *WorkerStub) SetEnabled(p0 context.Context, p1 bool) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *WorkerStruct) StorageAddLocal(p0 context.Context, p1 string) error {
+ return s.Internal.StorageAddLocal(p0, p1)
+}
+
+func (s *WorkerStub) StorageAddLocal(p0 context.Context, p1 string) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *WorkerStruct) TaskDisable(p0 context.Context, p1 sealtasks.TaskType) error {
+ return s.Internal.TaskDisable(p0, p1)
+}
+
+func (s *WorkerStub) TaskDisable(p0 context.Context, p1 sealtasks.TaskType) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *WorkerStruct) TaskEnable(p0 context.Context, p1 sealtasks.TaskType) error {
+ return s.Internal.TaskEnable(p0, p1)
+}
+
+func (s *WorkerStub) TaskEnable(p0 context.Context, p1 sealtasks.TaskType) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *WorkerStruct) TaskTypes(p0 context.Context) (map[sealtasks.TaskType]struct{}, error) {
+ return s.Internal.TaskTypes(p0)
+}
+
+func (s *WorkerStub) TaskTypes(p0 context.Context) (map[sealtasks.TaskType]struct{}, error) {
+ return *new(map[sealtasks.TaskType]struct{}), xerrors.New("method not supported")
+}
+
+func (s *WorkerStruct) UnsealPiece(p0 context.Context, p1 storage.SectorRef, p2 storiface.UnpaddedByteIndex, p3 abi.UnpaddedPieceSize, p4 abi.SealRandomness, p5 cid.Cid) (storiface.CallID, error) {
+ return s.Internal.UnsealPiece(p0, p1, p2, p3, p4, p5)
+}
+
+func (s *WorkerStub) UnsealPiece(p0 context.Context, p1 storage.SectorRef, p2 storiface.UnpaddedByteIndex, p3 abi.UnpaddedPieceSize, p4 abi.SealRandomness, p5 cid.Cid) (storiface.CallID, error) {
+ return *new(storiface.CallID), xerrors.New("method not supported")
+}
+
+func (s *WorkerStruct) Version(p0 context.Context) (Version, error) {
+ return s.Internal.Version(p0)
+}
+
+func (s *WorkerStub) Version(p0 context.Context) (Version, error) {
+ return *new(Version), xerrors.New("method not supported")
+}
+
+func (s *WorkerStruct) WaitQuiet(p0 context.Context) error {
+ return s.Internal.WaitQuiet(p0)
+}
+
+func (s *WorkerStub) WaitQuiet(p0 context.Context) error {
+ return xerrors.New("method not supported")
+}
+
+var _ ChainIO = new(ChainIOStruct)
+var _ Common = new(CommonStruct)
+var _ CommonNet = new(CommonNetStruct)
+var _ FullNode = new(FullNodeStruct)
+var _ Gateway = new(GatewayStruct)
+var _ Net = new(NetStruct)
+var _ Signable = new(SignableStruct)
+var _ StorageMiner = new(StorageMinerStruct)
+var _ Wallet = new(WalletStruct)
+var _ Worker = new(WorkerStruct)
diff --git a/api/proxy_util.go b/api/proxy_util.go
new file mode 100644
index 00000000000..ba94a9e5dce
--- /dev/null
+++ b/api/proxy_util.go
@@ -0,0 +1,30 @@
+package api
+
+import "reflect"
+
+var _internalField = "Internal"
+
+// GetInternalStructs extracts all pointers to 'Internal' sub-structs from the provided pointer to a proxy struct
+func GetInternalStructs(in interface{}) []interface{} {
+ return getInternalStructs(reflect.ValueOf(in).Elem())
+}
+
+func getInternalStructs(rv reflect.Value) []interface{} {
+ var out []interface{}
+
+ internal := rv.FieldByName(_internalField)
+ ii := internal.Addr().Interface()
+ out = append(out, ii)
+
+ for i := 0; i < rv.NumField(); i++ {
+ if rv.Type().Field(i).Name == _internalField {
+ continue
+ }
+
+ sub := getInternalStructs(rv.Field(i))
+
+ out = append(out, sub...)
+ }
+
+ return out
+}
diff --git a/api/proxy_util_test.go b/api/proxy_util_test.go
new file mode 100644
index 00000000000..3cbc466b6a4
--- /dev/null
+++ b/api/proxy_util_test.go
@@ -0,0 +1,62 @@
+package api
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+type StrA struct {
+ StrB
+
+ Internal struct {
+ A int
+ }
+}
+
+type StrB struct {
+ Internal struct {
+ B int
+ }
+}
+
+type StrC struct {
+ Internal struct {
+ Internal struct {
+ C int
+ }
+ }
+}
+
+func TestGetInternalStructs(t *testing.T) {
+ var proxy StrA
+
+ sts := GetInternalStructs(&proxy)
+ require.Len(t, sts, 2)
+
+ sa := sts[0].(*struct{ A int })
+ sa.A = 3
+ sb := sts[1].(*struct{ B int })
+ sb.B = 4
+
+ require.Equal(t, 3, proxy.Internal.A)
+ require.Equal(t, 4, proxy.StrB.Internal.B)
+}
+
+func TestNestedInternalStructs(t *testing.T) {
+ var proxy StrC
+
+ // check that only the top-level internal struct gets picked up
+
+ sts := GetInternalStructs(&proxy)
+ require.Len(t, sts, 1)
+
+ sa := sts[0].(*struct {
+ Internal struct {
+ C int
+ }
+ })
+ sa.Internal.C = 5
+
+ require.Equal(t, 5, proxy.Internal.Internal.C)
+}
diff --git a/api/test/blockminer.go b/api/test/blockminer.go
deleted file mode 100644
index 6b28a579416..00000000000
--- a/api/test/blockminer.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package test
-
-import (
- "context"
- "fmt"
- "sync/atomic"
- "testing"
- "time"
-
- "github.com/filecoin-project/go-state-types/abi"
- "github.com/filecoin-project/lotus/miner"
-)
-
-type BlockMiner struct {
- ctx context.Context
- t *testing.T
- miner TestStorageNode
- blocktime time.Duration
- mine int64
- nulls int64
- done chan struct{}
-}
-
-func NewBlockMiner(ctx context.Context, t *testing.T, miner TestStorageNode, blocktime time.Duration) *BlockMiner {
- return &BlockMiner{
- ctx: ctx,
- t: t,
- miner: miner,
- blocktime: blocktime,
- mine: int64(1),
- done: make(chan struct{}),
- }
-}
-
-func (bm *BlockMiner) MineBlocks() {
- time.Sleep(time.Second)
- go func() {
- defer close(bm.done)
- for atomic.LoadInt64(&bm.mine) == 1 {
- time.Sleep(bm.blocktime)
- nulls := atomic.SwapInt64(&bm.nulls, 0)
- if err := bm.miner.MineOne(bm.ctx, miner.MineReq{
- InjectNulls: abi.ChainEpoch(nulls),
- Done: func(bool, abi.ChainEpoch, error) {},
- }); err != nil {
- bm.t.Error(err)
- }
- }
- }()
-}
-
-func (bm *BlockMiner) Stop() {
- atomic.AddInt64(&bm.mine, -1)
- fmt.Println("shutting down mining")
- <-bm.done
-}
diff --git a/api/test/ccupgrade.go b/api/test/ccupgrade.go
deleted file mode 100644
index 75f72d86157..00000000000
--- a/api/test/ccupgrade.go
+++ /dev/null
@@ -1,127 +0,0 @@
-package test
-
-import (
- "context"
- "fmt"
- "sync/atomic"
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-
- "github.com/filecoin-project/go-state-types/abi"
-
- "github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/lotus/node/impl"
-)
-
-func TestCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) {
- for _, height := range []abi.ChainEpoch{
- 1, // before
- 162, // while sealing
- 520, // after upgrade deal
- 5000, // after
- } {
- height := height // make linters happy by copying
- t.Run(fmt.Sprintf("upgrade-%d", height), func(t *testing.T) {
- testCCUpgrade(t, b, blocktime, height)
- })
- }
-}
-
-func testCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, upgradeHeight abi.ChainEpoch) {
- ctx := context.Background()
- n, sn := b(t, []FullNodeOpts{FullNodeWithUpgradeAt(upgradeHeight)}, OneMiner)
- client := n[0].FullNode.(*impl.FullNodeAPI)
- miner := sn[0]
-
- addrinfo, err := client.NetAddrsListen(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- if err := miner.NetConnect(ctx, addrinfo); err != nil {
- t.Fatal(err)
- }
- time.Sleep(time.Second)
-
- mine := int64(1)
- done := make(chan struct{})
- go func() {
- defer close(done)
- for atomic.LoadInt64(&mine) == 1 {
- time.Sleep(blocktime)
- if err := sn[0].MineOne(ctx, MineNext); err != nil {
- t.Error(err)
- }
- }
- }()
-
- maddr, err := miner.ActorAddress(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- CC := abi.SectorNumber(GenesisPreseals + 1)
- Upgraded := CC + 1
-
- pledgeSectors(t, ctx, miner, 1, 0, nil)
-
- sl, err := miner.SectorsList(ctx)
- if err != nil {
- t.Fatal(err)
- }
- if len(sl) != 1 {
- t.Fatal("expected 1 sector")
- }
-
- if sl[0] != CC {
- t.Fatal("bad")
- }
-
- {
- si, err := client.StateSectorGetInfo(ctx, maddr, CC, types.EmptyTSK)
- require.NoError(t, err)
- require.Less(t, 50000, int(si.Expiration))
- }
-
- if err := miner.SectorMarkForUpgrade(ctx, sl[0]); err != nil {
- t.Fatal(err)
- }
-
- MakeDeal(t, ctx, 6, client, miner, false, false)
-
- // Validate upgrade
-
- {
- exp, err := client.StateSectorExpiration(ctx, maddr, CC, types.EmptyTSK)
- require.NoError(t, err)
- require.NotNil(t, exp)
- require.Greater(t, 50000, int(exp.OnTime))
- }
- {
- exp, err := client.StateSectorExpiration(ctx, maddr, Upgraded, types.EmptyTSK)
- require.NoError(t, err)
- require.Less(t, 50000, int(exp.OnTime))
- }
-
- dlInfo, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
- require.NoError(t, err)
-
- // Sector should expire.
- for {
- // Wait for the sector to expire.
- status, err := miner.SectorsStatus(ctx, CC, true)
- require.NoError(t, err)
- if status.OnTime == 0 && status.Early == 0 {
- break
- }
- t.Log("waiting for sector to expire")
- // wait one deadline per loop.
- time.Sleep(time.Duration(dlInfo.WPoStChallengeWindow) * blocktime)
- }
-
- fmt.Println("shutting down mining")
- atomic.AddInt64(&mine, -1)
- <-done
-}
diff --git a/api/test/deals.go b/api/test/deals.go
deleted file mode 100644
index b81099d9015..00000000000
--- a/api/test/deals.go
+++ /dev/null
@@ -1,458 +0,0 @@
-package test
-
-import (
- "bytes"
- "context"
- "fmt"
- "io/ioutil"
- "math/rand"
- "os"
- "path/filepath"
- "sync/atomic"
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-
- "github.com/ipfs/go-cid"
- files "github.com/ipfs/go-ipfs-files"
- "github.com/ipld/go-car"
-
- "github.com/filecoin-project/go-fil-markets/storagemarket"
- "github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/build"
- sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
- dag "github.com/ipfs/go-merkledag"
- dstest "github.com/ipfs/go-merkledag/test"
- unixfile "github.com/ipfs/go-unixfs/file"
-
- "github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/lotus/node/impl"
- ipld "github.com/ipfs/go-ipld-format"
-)
-
-func TestDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, carExport, fastRet bool) {
-
- ctx := context.Background()
- n, sn := b(t, OneFull, OneMiner)
- client := n[0].FullNode.(*impl.FullNodeAPI)
- miner := sn[0]
-
- addrinfo, err := client.NetAddrsListen(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- if err := miner.NetConnect(ctx, addrinfo); err != nil {
- t.Fatal(err)
- }
- time.Sleep(time.Second)
-
- mine := int64(1)
- done := make(chan struct{})
- go func() {
- defer close(done)
- for atomic.LoadInt64(&mine) == 1 {
- time.Sleep(blocktime)
- if err := sn[0].MineOne(ctx, MineNext); err != nil {
- t.Error(err)
- }
- }
- }()
-
- MakeDeal(t, ctx, 6, client, miner, carExport, fastRet)
-
- atomic.AddInt64(&mine, -1)
- fmt.Println("shutting down mining")
- <-done
-}
-
-func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) {
-
- ctx := context.Background()
- n, sn := b(t, OneFull, OneMiner)
- client := n[0].FullNode.(*impl.FullNodeAPI)
- miner := sn[0]
-
- addrinfo, err := client.NetAddrsListen(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- if err := miner.NetConnect(ctx, addrinfo); err != nil {
- t.Fatal(err)
- }
- time.Sleep(time.Second)
-
- mine := int64(1)
- done := make(chan struct{})
-
- go func() {
- defer close(done)
- for atomic.LoadInt64(&mine) == 1 {
- time.Sleep(blocktime)
- if err := sn[0].MineOne(ctx, MineNext); err != nil {
- t.Error(err)
- }
- }
- }()
-
- MakeDeal(t, ctx, 6, client, miner, false, false)
- MakeDeal(t, ctx, 7, client, miner, false, false)
-
- atomic.AddInt64(&mine, -1)
- fmt.Println("shutting down mining")
- <-done
-}
-
-func MakeDeal(t *testing.T, ctx context.Context, rseed int, client api.FullNode, miner TestStorageNode, carExport, fastRet bool) {
- res, data, err := CreateClientFile(ctx, client, rseed)
- if err != nil {
- t.Fatal(err)
- }
-
- fcid := res.Root
- fmt.Println("FILE CID: ", fcid)
-
- deal := startDeal(t, ctx, miner, client, fcid, fastRet)
-
- // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
- time.Sleep(time.Second)
- waitDealSealed(t, ctx, miner, client, deal, false)
-
- // Retrieval
- info, err := client.ClientGetDealInfo(ctx, *deal)
- require.NoError(t, err)
-
- testRetrieval(t, ctx, client, fcid, &info.PieceCID, carExport, data)
-}
-
-func CreateClientFile(ctx context.Context, client api.FullNode, rseed int) (*api.ImportRes, []byte, error) {
- data := make([]byte, 1600)
- rand.New(rand.NewSource(int64(rseed))).Read(data)
-
- dir, err := ioutil.TempDir(os.TempDir(), "test-make-deal-")
- if err != nil {
- return nil, nil, err
- }
-
- path := filepath.Join(dir, "sourcefile.dat")
- err = ioutil.WriteFile(path, data, 0644)
- if err != nil {
- return nil, nil, err
- }
-
- res, err := client.ClientImport(ctx, api.FileRef{Path: path})
- if err != nil {
- return nil, nil, err
- }
- return res, data, nil
-}
-
-func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) {
-
- ctx := context.Background()
- n, sn := b(t, OneFull, OneMiner)
- client := n[0].FullNode.(*impl.FullNodeAPI)
- miner := sn[0]
-
- addrinfo, err := client.NetAddrsListen(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- if err := miner.NetConnect(ctx, addrinfo); err != nil {
- t.Fatal(err)
- }
- time.Sleep(time.Second)
-
- mine := int64(1)
- done := make(chan struct{})
- go func() {
- defer close(done)
- for atomic.LoadInt64(&mine) == 1 {
- time.Sleep(blocktime)
- if err := sn[0].MineOne(ctx, MineNext); err != nil {
- t.Error(err)
- }
- }
- }()
-
- data := make([]byte, 1600)
- rand.New(rand.NewSource(int64(8))).Read(data)
-
- r := bytes.NewReader(data)
- fcid, err := client.ClientImportLocal(ctx, r)
- if err != nil {
- t.Fatal(err)
- }
-
- fmt.Println("FILE CID: ", fcid)
-
- deal := startDeal(t, ctx, miner, client, fcid, true)
-
- waitDealPublished(t, ctx, miner, deal)
- fmt.Println("deal published, retrieving")
- // Retrieval
- info, err := client.ClientGetDealInfo(ctx, *deal)
- require.NoError(t, err)
-
- testRetrieval(t, ctx, client, fcid, &info.PieceCID, false, data)
- atomic.AddInt64(&mine, -1)
- fmt.Println("shutting down mining")
- <-done
-}
-
-func TestSenondDealRetrieval(t *testing.T, b APIBuilder, blocktime time.Duration) {
-
- ctx := context.Background()
- n, sn := b(t, OneFull, OneMiner)
- client := n[0].FullNode.(*impl.FullNodeAPI)
- miner := sn[0]
-
- addrinfo, err := client.NetAddrsListen(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- if err := miner.NetConnect(ctx, addrinfo); err != nil {
- t.Fatal(err)
- }
- time.Sleep(time.Second)
-
- mine := int64(1)
- done := make(chan struct{})
-
- go func() {
- defer close(done)
- for atomic.LoadInt64(&mine) == 1 {
- time.Sleep(blocktime)
- if err := sn[0].MineOne(ctx, MineNext); err != nil {
- t.Error(err)
- }
- }
- }()
-
- {
- data1 := make([]byte, 800)
- rand.New(rand.NewSource(int64(3))).Read(data1)
- r := bytes.NewReader(data1)
-
- fcid1, err := client.ClientImportLocal(ctx, r)
- if err != nil {
- t.Fatal(err)
- }
-
- data2 := make([]byte, 800)
- rand.New(rand.NewSource(int64(9))).Read(data2)
- r2 := bytes.NewReader(data2)
-
- fcid2, err := client.ClientImportLocal(ctx, r2)
- if err != nil {
- t.Fatal(err)
- }
-
- deal1 := startDeal(t, ctx, miner, client, fcid1, true)
-
- // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
- time.Sleep(time.Second)
- waitDealSealed(t, ctx, miner, client, deal1, true)
-
- deal2 := startDeal(t, ctx, miner, client, fcid2, true)
-
- time.Sleep(time.Second)
- waitDealSealed(t, ctx, miner, client, deal2, false)
-
- // Retrieval
- info, err := client.ClientGetDealInfo(ctx, *deal2)
- require.NoError(t, err)
-
- rf, _ := miner.SectorsRefs(ctx)
- fmt.Printf("refs: %+v\n", rf)
-
- testRetrieval(t, ctx, client, fcid2, &info.PieceCID, false, data2)
- }
-
- atomic.AddInt64(&mine, -1)
- fmt.Println("shutting down mining")
- <-done
-}
-
-func startDeal(t *testing.T, ctx context.Context, miner TestStorageNode, client api.FullNode, fcid cid.Cid, fastRet bool) *cid.Cid {
- maddr, err := miner.ActorAddress(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- addr, err := client.WalletDefaultAddress(ctx)
- if err != nil {
- t.Fatal(err)
- }
- deal, err := client.ClientStartDeal(ctx, &api.StartDealParams{
- Data: &storagemarket.DataRef{
- TransferType: storagemarket.TTGraphsync,
- Root: fcid,
- },
- Wallet: addr,
- Miner: maddr,
- EpochPrice: types.NewInt(1000000),
- MinBlocksDuration: uint64(build.MinDealDuration),
- FastRetrieval: fastRet,
- })
- if err != nil {
- t.Fatalf("%+v", err)
- }
- return deal
-}
-
-func waitDealSealed(t *testing.T, ctx context.Context, miner TestStorageNode, client api.FullNode, deal *cid.Cid, noseal bool) {
-loop:
- for {
- di, err := client.ClientGetDealInfo(ctx, *deal)
- if err != nil {
- t.Fatal(err)
- }
- switch di.State {
- case storagemarket.StorageDealSealing:
- if noseal {
- return
- }
- startSealingWaiting(t, ctx, miner)
- case storagemarket.StorageDealProposalRejected:
- t.Fatal("deal rejected")
- case storagemarket.StorageDealFailing:
- t.Fatal("deal failed")
- case storagemarket.StorageDealError:
- t.Fatal("deal errored", di.Message)
- case storagemarket.StorageDealActive:
- fmt.Println("COMPLETE", di)
- break loop
- }
- fmt.Println("Deal state: ", storagemarket.DealStates[di.State])
- time.Sleep(time.Second / 2)
- }
-}
-
-func waitDealPublished(t *testing.T, ctx context.Context, miner TestStorageNode, deal *cid.Cid) {
- subCtx, cancel := context.WithCancel(ctx)
- defer cancel()
- updates, err := miner.MarketGetDealUpdates(subCtx)
- if err != nil {
- t.Fatal(err)
- }
- for {
- select {
- case <-ctx.Done():
- t.Fatal("context timeout")
- case di := <-updates:
- if deal.Equals(di.ProposalCid) {
- switch di.State {
- case storagemarket.StorageDealProposalRejected:
- t.Fatal("deal rejected")
- case storagemarket.StorageDealFailing:
- t.Fatal("deal failed")
- case storagemarket.StorageDealError:
- t.Fatal("deal errored", di.Message)
- case storagemarket.StorageDealFinalizing, storagemarket.StorageDealSealing, storagemarket.StorageDealActive:
- fmt.Println("COMPLETE", di)
- return
- }
- fmt.Println("Deal state: ", storagemarket.DealStates[di.State])
- }
- }
- }
-}
-
-func startSealingWaiting(t *testing.T, ctx context.Context, miner TestStorageNode) {
- snums, err := miner.SectorsList(ctx)
- require.NoError(t, err)
-
- for _, snum := range snums {
- si, err := miner.SectorsStatus(ctx, snum, false)
- require.NoError(t, err)
-
- t.Logf("Sector state: %s", si.State)
- if si.State == api.SectorState(sealing.WaitDeals) {
- require.NoError(t, miner.SectorStartSealing(ctx, snum))
- }
- }
-}
-
-func testRetrieval(t *testing.T, ctx context.Context, client api.FullNode, fcid cid.Cid, piece *cid.Cid, carExport bool, data []byte) {
- offers, err := client.ClientFindData(ctx, fcid, piece)
- if err != nil {
- t.Fatal(err)
- }
-
- if len(offers) < 1 {
- t.Fatal("no offers")
- }
-
- rpath, err := ioutil.TempDir("", "lotus-retrieve-test-")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(rpath) //nolint:errcheck
-
- caddr, err := client.WalletDefaultAddress(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- ref := &api.FileRef{
- Path: filepath.Join(rpath, "ret"),
- IsCAR: carExport,
- }
- updates, err := client.ClientRetrieveWithEvents(ctx, offers[0].Order(caddr), ref)
- if err != nil {
- t.Fatal(err)
- }
- for update := range updates {
- if update.Err != "" {
- t.Fatalf("retrieval failed: %s", update.Err)
- }
- }
-
- rdata, err := ioutil.ReadFile(filepath.Join(rpath, "ret"))
- if err != nil {
- t.Fatal(err)
- }
-
- if carExport {
- rdata = extractCarData(t, ctx, rdata, rpath)
- }
-
- if !bytes.Equal(rdata, data) {
- t.Fatal("wrong data retrieved")
- }
-}
-
-func extractCarData(t *testing.T, ctx context.Context, rdata []byte, rpath string) []byte {
- bserv := dstest.Bserv()
- ch, err := car.LoadCar(bserv.Blockstore(), bytes.NewReader(rdata))
- if err != nil {
- t.Fatal(err)
- }
- b, err := bserv.GetBlock(ctx, ch.Roots[0])
- if err != nil {
- t.Fatal(err)
- }
- nd, err := ipld.Decode(b)
- if err != nil {
- t.Fatal(err)
- }
- dserv := dag.NewDAGService(bserv)
- fil, err := unixfile.NewUnixfsFile(ctx, dserv, nd)
- if err != nil {
- t.Fatal(err)
- }
- outPath := filepath.Join(rpath, "retLoadedCAR")
- if err := files.WriteTo(fil, outPath); err != nil {
- t.Fatal(err)
- }
- rdata, err = ioutil.ReadFile(outPath)
- if err != nil {
- t.Fatal(err)
- }
- return rdata
-}
diff --git a/api/test/mining.go b/api/test/mining.go
deleted file mode 100644
index 11953b95d70..00000000000
--- a/api/test/mining.go
+++ /dev/null
@@ -1,201 +0,0 @@
-package test
-
-import (
- "bytes"
- "context"
- "fmt"
- "math/rand"
- "sync/atomic"
- "testing"
- "time"
-
- logging "github.com/ipfs/go-log/v2"
-
- "github.com/filecoin-project/go-state-types/abi"
- "github.com/stretchr/testify/require"
-
- "github.com/filecoin-project/lotus/build"
- "github.com/filecoin-project/lotus/miner"
- "github.com/filecoin-project/lotus/node/impl"
-)
-
-//nolint:deadcode,varcheck
-var log = logging.Logger("apitest")
-
-func (ts *testSuite) testMining(t *testing.T) {
- ctx := context.Background()
- apis, sn := ts.makeNodes(t, OneFull, OneMiner)
- api := apis[0]
-
- newHeads, err := api.ChainNotify(ctx)
- require.NoError(t, err)
- initHead := (<-newHeads)[0]
- baseHeight := initHead.Val.Height()
-
- h1, err := api.ChainHead(ctx)
- require.NoError(t, err)
- require.Equal(t, int64(h1.Height()), int64(baseHeight))
-
- MineUntilBlock(ctx, t, apis[0], sn[0], nil)
- require.NoError(t, err)
-
- <-newHeads
-
- h2, err := api.ChainHead(ctx)
- require.NoError(t, err)
- require.Greater(t, int64(h2.Height()), int64(h1.Height()))
-}
-
-func (ts *testSuite) testMiningReal(t *testing.T) {
- build.InsecurePoStValidation = false
- defer func() {
- build.InsecurePoStValidation = true
- }()
-
- ctx := context.Background()
- apis, sn := ts.makeNodes(t, OneFull, OneMiner)
- api := apis[0]
-
- newHeads, err := api.ChainNotify(ctx)
- require.NoError(t, err)
- at := (<-newHeads)[0].Val.Height()
-
- h1, err := api.ChainHead(ctx)
- require.NoError(t, err)
- require.Equal(t, int64(at), int64(h1.Height()))
-
- MineUntilBlock(ctx, t, apis[0], sn[0], nil)
- require.NoError(t, err)
-
- <-newHeads
-
- h2, err := api.ChainHead(ctx)
- require.NoError(t, err)
- require.Greater(t, int64(h2.Height()), int64(h1.Height()))
-
- MineUntilBlock(ctx, t, apis[0], sn[0], nil)
- require.NoError(t, err)
-
- <-newHeads
-
- h3, err := api.ChainHead(ctx)
- require.NoError(t, err)
- require.Greater(t, int64(h3.Height()), int64(h2.Height()))
-}
-
-func TestDealMining(t *testing.T, b APIBuilder, blocktime time.Duration, carExport bool) {
- // test making a deal with a fresh miner, and see if it starts to mine
-
- ctx := context.Background()
- n, sn := b(t, OneFull, []StorageMiner{
- {Full: 0, Preseal: PresealGenesis},
- {Full: 0, Preseal: 0}, // TODO: Add support for miners on non-first full node
- })
- client := n[0].FullNode.(*impl.FullNodeAPI)
- provider := sn[1]
- genesisMiner := sn[0]
-
- addrinfo, err := client.NetAddrsListen(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- if err := provider.NetConnect(ctx, addrinfo); err != nil {
- t.Fatal(err)
- }
-
- if err := genesisMiner.NetConnect(ctx, addrinfo); err != nil {
- t.Fatal(err)
- }
-
- time.Sleep(time.Second)
-
- data := make([]byte, 600)
- rand.New(rand.NewSource(5)).Read(data)
-
- r := bytes.NewReader(data)
- fcid, err := client.ClientImportLocal(ctx, r)
- if err != nil {
- t.Fatal(err)
- }
-
- fmt.Println("FILE CID: ", fcid)
-
- var mine int32 = 1
- done := make(chan struct{})
- minedTwo := make(chan struct{})
-
- m2addr, err := sn[1].ActorAddress(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- go func() {
- defer close(done)
-
- complChan := minedTwo
- for atomic.LoadInt32(&mine) != 0 {
- wait := make(chan int)
- mdone := func(mined bool, _ abi.ChainEpoch, err error) {
- n := 0
- if mined {
- n = 1
- }
- wait <- n
- }
-
- if err := sn[0].MineOne(ctx, miner.MineReq{Done: mdone}); err != nil {
- t.Error(err)
- }
-
- if err := sn[1].MineOne(ctx, miner.MineReq{Done: mdone}); err != nil {
- t.Error(err)
- }
-
- expect := <-wait
- expect += <-wait
-
- time.Sleep(blocktime)
- if expect == 0 {
- // null block
- continue
- }
-
- var nodeOneMined bool
- for _, node := range sn {
- mb, err := node.MiningBase(ctx)
- if err != nil {
- t.Error(err)
- return
- }
-
- for _, b := range mb.Blocks() {
- if b.Miner == m2addr {
- nodeOneMined = true
- break
- }
- }
-
- }
-
- if nodeOneMined && complChan != nil {
- close(complChan)
- complChan = nil
- }
-
- }
- }()
-
- deal := startDeal(t, ctx, provider, client, fcid, false)
-
- // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
- time.Sleep(time.Second)
-
- waitDealSealed(t, ctx, provider, client, deal, false)
-
- <-minedTwo
-
- atomic.StoreInt32(&mine, 0)
- fmt.Println("shutting down mining")
- <-done
-}
diff --git a/api/test/tape.go b/api/test/tape.go
deleted file mode 100644
index 466bdd829a5..00000000000
--- a/api/test/tape.go
+++ /dev/null
@@ -1,114 +0,0 @@
-package test
-
-import (
- "context"
- "fmt"
- "testing"
- "time"
-
- "github.com/filecoin-project/go-state-types/abi"
- "github.com/filecoin-project/go-state-types/network"
- "github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/build"
- "github.com/filecoin-project/lotus/chain/stmgr"
- sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
- "github.com/filecoin-project/lotus/node"
- "github.com/filecoin-project/lotus/node/impl"
- "github.com/stretchr/testify/require"
-)
-
-func TestTapeFix(t *testing.T, b APIBuilder, blocktime time.Duration) {
- t.Run("before", func(t *testing.T) { testTapeFix(t, b, blocktime, false) })
- t.Run("after", func(t *testing.T) { testTapeFix(t, b, blocktime, true) })
-}
-func testTapeFix(t *testing.T, b APIBuilder, blocktime time.Duration, after bool) {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- upgradeSchedule := stmgr.UpgradeSchedule{{
- Network: build.ActorUpgradeNetworkVersion,
- Height: 1,
- Migration: stmgr.UpgradeActorsV2,
- }}
- if after {
- upgradeSchedule = append(upgradeSchedule, stmgr.Upgrade{
- Network: network.Version5,
- Height: 2,
- })
- }
-
- n, sn := b(t, []FullNodeOpts{{Opts: func(_ []TestNode) node.Option {
- return node.Override(new(stmgr.UpgradeSchedule), upgradeSchedule)
- }}}, OneMiner)
-
- client := n[0].FullNode.(*impl.FullNodeAPI)
- miner := sn[0]
-
- addrinfo, err := client.NetAddrsListen(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- if err := miner.NetConnect(ctx, addrinfo); err != nil {
- t.Fatal(err)
- }
- build.Clock.Sleep(time.Second)
-
- done := make(chan struct{})
- go func() {
- defer close(done)
- for ctx.Err() == nil {
- build.Clock.Sleep(blocktime)
- if err := sn[0].MineOne(ctx, MineNext); err != nil {
- if ctx.Err() != nil {
- // context was canceled, ignore the error.
- return
- }
- t.Error(err)
- }
- }
- }()
- defer func() {
- cancel()
- <-done
- }()
-
- err = miner.PledgeSector(ctx)
- require.NoError(t, err)
-
- // Wait till done.
- var sectorNo abi.SectorNumber
- for {
- s, err := miner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM
- require.NoError(t, err)
- fmt.Printf("Sectors: %d\n", len(s))
- if len(s) == 1 {
- sectorNo = s[0]
- break
- }
-
- build.Clock.Sleep(100 * time.Millisecond)
- }
-
- fmt.Printf("All sectors is fsm\n")
-
- // If before, we expect the precommit to fail
- successState := api.SectorState(sealing.CommitFailed)
- failureState := api.SectorState(sealing.Proving)
- if after {
- // otherwise, it should succeed.
- successState, failureState = failureState, successState
- }
-
- for {
- st, err := miner.SectorsStatus(ctx, sectorNo, false)
- require.NoError(t, err)
- if st.State == successState {
- break
- }
- require.NotEqual(t, failureState, st.State)
- build.Clock.Sleep(100 * time.Millisecond)
- fmt.Println("WaitSeal")
- }
-
-}
diff --git a/api/test/test.go b/api/test/test.go
deleted file mode 100644
index bae3d520ef8..00000000000
--- a/api/test/test.go
+++ /dev/null
@@ -1,243 +0,0 @@
-package test
-
-import (
- "context"
- "fmt"
- "os"
- "testing"
- "time"
-
- "github.com/filecoin-project/lotus/chain/stmgr"
- "github.com/filecoin-project/lotus/chain/types"
-
- logging "github.com/ipfs/go-log/v2"
- "github.com/multiformats/go-multiaddr"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- "github.com/filecoin-project/go-state-types/abi"
- "github.com/filecoin-project/go-state-types/big"
- "github.com/filecoin-project/go-state-types/network"
- "github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/build"
- "github.com/filecoin-project/lotus/miner"
- "github.com/filecoin-project/lotus/node"
-)
-
-func init() {
- logging.SetAllLoggers(logging.LevelInfo)
- err := os.Setenv("BELLMAN_NO_GPU", "1")
- if err != nil {
- panic(fmt.Sprintf("failed to set BELLMAN_NO_GPU env variable: %s", err))
- }
- build.InsecurePoStValidation = true
-}
-
-type TestNode struct {
- api.FullNode
- // ListenAddr is the address on which an API server is listening, if an
- // API server is created for this Node
- ListenAddr multiaddr.Multiaddr
-}
-
-type TestStorageNode struct {
- api.StorageMiner
- // ListenAddr is the address on which an API server is listening, if an
- // API server is created for this Node
- ListenAddr multiaddr.Multiaddr
-
- MineOne func(context.Context, miner.MineReq) error
-}
-
-var PresealGenesis = -1
-
-const GenesisPreseals = 2
-
-// Options for setting up a mock storage miner
-type StorageMiner struct {
- Full int
- Preseal int
-}
-
-type OptionGenerator func([]TestNode) node.Option
-
-// Options for setting up a mock full node
-type FullNodeOpts struct {
- Lite bool // run node in "lite" mode
- Opts OptionGenerator // generate dependency injection options
-}
-
-// APIBuilder is a function which is invoked in test suite to provide
-// test nodes and networks
-//
-// fullOpts array defines options for each full node
-// storage array defines storage nodes, numbers in the array specify full node
-// index the storage node 'belongs' to
-type APIBuilder func(t *testing.T, full []FullNodeOpts, storage []StorageMiner) ([]TestNode, []TestStorageNode)
-type testSuite struct {
- makeNodes APIBuilder
-}
-
-// TestApis is the entry point to API test suite
-func TestApis(t *testing.T, b APIBuilder) {
- ts := testSuite{
- makeNodes: b,
- }
-
- t.Run("version", ts.testVersion)
- t.Run("id", ts.testID)
- t.Run("testConnectTwo", ts.testConnectTwo)
- t.Run("testMining", ts.testMining)
- t.Run("testMiningReal", ts.testMiningReal)
- t.Run("testSearchMsg", ts.testSearchMsg)
-}
-
-func DefaultFullOpts(nFull int) []FullNodeOpts {
- full := make([]FullNodeOpts, nFull)
- for i := range full {
- full[i] = FullNodeOpts{
- Opts: func(nodes []TestNode) node.Option {
- return node.Options()
- },
- }
- }
- return full
-}
-
-var OneMiner = []StorageMiner{{Full: 0, Preseal: PresealGenesis}}
-var OneFull = DefaultFullOpts(1)
-var TwoFull = DefaultFullOpts(2)
-
-var FullNodeWithUpgradeAt = func(upgradeHeight abi.ChainEpoch) FullNodeOpts {
- return FullNodeOpts{
- Opts: func(nodes []TestNode) node.Option {
- return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
- // Skip directly to tape height so precommits work.
- Network: network.Version5,
- Height: upgradeHeight,
- Migration: stmgr.UpgradeActorsV2,
- }})
- },
- }
-}
-
-var MineNext = miner.MineReq{
- InjectNulls: 0,
- Done: func(bool, abi.ChainEpoch, error) {},
-}
-
-func (ts *testSuite) testVersion(t *testing.T) {
- build.RunningNodeType = build.NodeFull
-
- ctx := context.Background()
- apis, _ := ts.makeNodes(t, OneFull, OneMiner)
- api := apis[0]
-
- v, err := api.Version(ctx)
- if err != nil {
- t.Fatal(err)
- }
- require.Equal(t, v.Version, build.BuildVersion)
-}
-
-func (ts *testSuite) testSearchMsg(t *testing.T) {
- apis, miners := ts.makeNodes(t, OneFull, OneMiner)
-
- api := apis[0]
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
- senderAddr, err := api.WalletDefaultAddress(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- msg := &types.Message{
- From: senderAddr,
- To: senderAddr,
- Value: big.Zero(),
- }
- bm := NewBlockMiner(ctx, t, miners[0], 100*time.Millisecond)
- bm.MineBlocks()
- defer bm.Stop()
-
- sm, err := api.MpoolPushMessage(ctx, msg, nil)
- if err != nil {
- t.Fatal(err)
- }
- res, err := api.StateWaitMsg(ctx, sm.Cid(), 1)
- if err != nil {
- t.Fatal(err)
- }
- if res.Receipt.ExitCode != 0 {
- t.Fatal("did not successfully send message")
- }
-
- searchRes, err := api.StateSearchMsg(ctx, sm.Cid())
- if err != nil {
- t.Fatal(err)
- }
-
- if searchRes.TipSet != res.TipSet {
- t.Fatalf("search ts: %s, different from wait ts: %s", searchRes.TipSet, res.TipSet)
- }
-
-}
-
-func (ts *testSuite) testID(t *testing.T) {
- ctx := context.Background()
- apis, _ := ts.makeNodes(t, OneFull, OneMiner)
- api := apis[0]
-
- id, err := api.ID(ctx)
- if err != nil {
- t.Fatal(err)
- }
- assert.Regexp(t, "^12", id.Pretty())
-}
-
-func (ts *testSuite) testConnectTwo(t *testing.T) {
- ctx := context.Background()
- apis, _ := ts.makeNodes(t, TwoFull, OneMiner)
-
- p, err := apis[0].NetPeers(ctx)
- if err != nil {
- t.Fatal(err)
- }
- if len(p) != 0 {
- t.Error("Node 0 has a peer")
- }
-
- p, err = apis[1].NetPeers(ctx)
- if err != nil {
- t.Fatal(err)
- }
- if len(p) != 0 {
- t.Error("Node 1 has a peer")
- }
-
- addrs, err := apis[1].NetAddrsListen(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- if err := apis[0].NetConnect(ctx, addrs); err != nil {
- t.Fatal(err)
- }
-
- p, err = apis[0].NetPeers(ctx)
- if err != nil {
- t.Fatal(err)
- }
- if len(p) != 1 {
- t.Error("Node 0 doesn't have 1 peer")
- }
-
- p, err = apis[1].NetPeers(ctx)
- if err != nil {
- t.Fatal(err)
- }
- if len(p) != 1 {
- t.Error("Node 0 doesn't have 1 peer")
- }
-}
diff --git a/api/test/util.go b/api/test/util.go
deleted file mode 100644
index 8695e2e2efb..00000000000
--- a/api/test/util.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package test
-
-import (
- "context"
- "testing"
- "time"
-
- "github.com/filecoin-project/go-state-types/abi"
-
- "github.com/filecoin-project/go-address"
- "github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/lotus/miner"
-)
-
-func SendFunds(ctx context.Context, t *testing.T, sender TestNode, addr address.Address, amount abi.TokenAmount) {
- senderAddr, err := sender.WalletDefaultAddress(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- msg := &types.Message{
- From: senderAddr,
- To: addr,
- Value: amount,
- }
-
- sm, err := sender.MpoolPushMessage(ctx, msg, nil)
- if err != nil {
- t.Fatal(err)
- }
- res, err := sender.StateWaitMsg(ctx, sm.Cid(), 1)
- if err != nil {
- t.Fatal(err)
- }
- if res.Receipt.ExitCode != 0 {
- t.Fatal("did not successfully send money")
- }
-}
-
-func MineUntilBlock(ctx context.Context, t *testing.T, fn TestNode, sn TestStorageNode, cb func(abi.ChainEpoch)) {
- for i := 0; i < 1000; i++ {
- var success bool
- var err error
- var epoch abi.ChainEpoch
- wait := make(chan struct{})
- mineErr := sn.MineOne(ctx, miner.MineReq{
- Done: func(win bool, ep abi.ChainEpoch, e error) {
- success = win
- err = e
- epoch = ep
- wait <- struct{}{}
- },
- })
- if mineErr != nil {
- t.Fatal(mineErr)
- }
- <-wait
- if err != nil {
- t.Fatal(err)
- }
- if success {
- // Wait until it shows up on the given full nodes ChainHead
- nloops := 50
- for i := 0; i < nloops; i++ {
- ts, err := fn.ChainHead(ctx)
- if err != nil {
- t.Fatal(err)
- }
- if ts.Height() == epoch {
- break
- }
- if i == nloops-1 {
- t.Fatal("block never managed to sync to node")
- }
- time.Sleep(time.Millisecond * 10)
- }
-
- if cb != nil {
- cb(epoch)
- }
- return
- }
- t.Log("did not mine block, trying again", i)
- }
- t.Fatal("failed to mine 1000 times in a row...")
-}
diff --git a/api/test/window_post.go b/api/test/window_post.go
deleted file mode 100644
index 55fc4ad7044..00000000000
--- a/api/test/window_post.go
+++ /dev/null
@@ -1,336 +0,0 @@
-package test
-
-import (
- "context"
- "fmt"
- "sync/atomic"
-
- "strings"
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-
- "github.com/filecoin-project/go-address"
- "github.com/filecoin-project/go-state-types/abi"
- "github.com/filecoin-project/lotus/extern/sector-storage/mock"
- sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
-
- "github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/build"
- "github.com/filecoin-project/lotus/chain/types"
- bminer "github.com/filecoin-project/lotus/miner"
- "github.com/filecoin-project/lotus/node/impl"
-)
-
-func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- n, sn := b(t, OneFull, OneMiner)
- client := n[0].FullNode.(*impl.FullNodeAPI)
- miner := sn[0]
-
- addrinfo, err := client.NetAddrsListen(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- if err := miner.NetConnect(ctx, addrinfo); err != nil {
- t.Fatal(err)
- }
- build.Clock.Sleep(time.Second)
-
- mine := int64(1)
- done := make(chan struct{})
- go func() {
- defer close(done)
- for atomic.LoadInt64(&mine) != 0 {
- build.Clock.Sleep(blocktime)
- if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
-
- }}); err != nil {
- t.Error(err)
- }
- }
- }()
-
- pledgeSectors(t, ctx, miner, nSectors, 0, nil)
-
- atomic.StoreInt64(&mine, 0)
- <-done
-}
-
-func pledgeSectors(t *testing.T, ctx context.Context, miner TestStorageNode, n, existing int, blockNotif <-chan struct{}) {
- for i := 0; i < n; i++ {
- err := miner.PledgeSector(ctx)
- require.NoError(t, err)
- if i%3 == 0 && blockNotif != nil {
- <-blockNotif
- }
- }
-
- for {
- s, err := miner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM
- require.NoError(t, err)
- fmt.Printf("Sectors: %d\n", len(s))
- if len(s) >= n+existing {
- break
- }
-
- build.Clock.Sleep(100 * time.Millisecond)
- }
-
- fmt.Printf("All sectors is fsm\n")
-
- s, err := miner.SectorsList(ctx)
- require.NoError(t, err)
-
- toCheck := map[abi.SectorNumber]struct{}{}
- for _, number := range s {
- toCheck[number] = struct{}{}
- }
-
- for len(toCheck) > 0 {
- for n := range toCheck {
- st, err := miner.SectorsStatus(ctx, n, false)
- require.NoError(t, err)
- if st.State == api.SectorState(sealing.Proving) {
- delete(toCheck, n)
- }
- if strings.Contains(string(st.State), "Fail") {
- t.Fatal("sector in a failed state", st.State)
- }
- }
-
- build.Clock.Sleep(100 * time.Millisecond)
- fmt.Printf("WaitSeal: %d\n", len(s))
- }
-}
-
-func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
- for _, height := range []abi.ChainEpoch{
- 1, // before
- 162, // while sealing
- 5000, // while proving
- } {
- height := height // copy to satisfy lints
- t.Run(fmt.Sprintf("upgrade-%d", height), func(t *testing.T) {
- testWindowPostUpgrade(t, b, blocktime, nSectors, height)
- })
- }
-
-}
-func testWindowPostUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int,
- upgradeHeight abi.ChainEpoch) {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- n, sn := b(t, []FullNodeOpts{FullNodeWithUpgradeAt(upgradeHeight)}, OneMiner)
-
- client := n[0].FullNode.(*impl.FullNodeAPI)
- miner := sn[0]
-
- addrinfo, err := client.NetAddrsListen(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- if err := miner.NetConnect(ctx, addrinfo); err != nil {
- t.Fatal(err)
- }
- build.Clock.Sleep(time.Second)
-
- done := make(chan struct{})
- go func() {
- defer close(done)
- for ctx.Err() == nil {
- build.Clock.Sleep(blocktime)
- if err := sn[0].MineOne(ctx, MineNext); err != nil {
- if ctx.Err() != nil {
- // context was canceled, ignore the error.
- return
- }
- t.Error(err)
- }
- }
- }()
- defer func() {
- cancel()
- <-done
- }()
-
- pledgeSectors(t, ctx, miner, nSectors, 0, nil)
-
- maddr, err := miner.ActorAddress(ctx)
- require.NoError(t, err)
-
- di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
- require.NoError(t, err)
-
- mid, err := address.IDFromAddress(maddr)
- require.NoError(t, err)
-
- fmt.Printf("Running one proving period\n")
- fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2)
-
- for {
- head, err := client.ChainHead(ctx)
- require.NoError(t, err)
-
- if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 {
- fmt.Printf("Now head.Height = %d\n", head.Height())
- break
- }
- build.Clock.Sleep(blocktime)
- }
-
- p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
- require.NoError(t, err)
-
- ssz, err := miner.ActorSectorSize(ctx, maddr)
- require.NoError(t, err)
-
- require.Equal(t, p.MinerPower, p.TotalPower)
- require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(nSectors+GenesisPreseals)))
-
- fmt.Printf("Drop some sectors\n")
-
- // Drop 2 sectors from deadline 2 partition 0 (full partition / deadline)
- {
- parts, err := client.StateMinerPartitions(ctx, maddr, 2, types.EmptyTSK)
- require.NoError(t, err)
- require.Greater(t, len(parts), 0)
-
- secs := parts[0].AllSectors
- n, err := secs.Count()
- require.NoError(t, err)
- require.Equal(t, uint64(2), n)
-
- // Drop the partition
- err = secs.ForEach(func(sid uint64) error {
- return miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkCorrupted(abi.SectorID{
- Miner: abi.ActorID(mid),
- Number: abi.SectorNumber(sid),
- }, true)
- })
- require.NoError(t, err)
- }
-
- var s abi.SectorID
-
- // Drop 1 sectors from deadline 3 partition 0
- {
- parts, err := client.StateMinerPartitions(ctx, maddr, 3, types.EmptyTSK)
- require.NoError(t, err)
- require.Greater(t, len(parts), 0)
-
- secs := parts[0].AllSectors
- n, err := secs.Count()
- require.NoError(t, err)
- require.Equal(t, uint64(2), n)
-
- // Drop the sector
- sn, err := secs.First()
- require.NoError(t, err)
-
- all, err := secs.All(2)
- require.NoError(t, err)
- fmt.Println("the sectors", all)
-
- s = abi.SectorID{
- Miner: abi.ActorID(mid),
- Number: abi.SectorNumber(sn),
- }
-
- err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, true)
- require.NoError(t, err)
- }
-
- di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
- require.NoError(t, err)
-
- fmt.Printf("Go through another PP, wait for sectors to become faulty\n")
- fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2)
-
- for {
- head, err := client.ChainHead(ctx)
- require.NoError(t, err)
-
- if head.Height() > di.PeriodStart+(di.WPoStProvingPeriod)+2 {
- fmt.Printf("Now head.Height = %d\n", head.Height())
- break
- }
-
- build.Clock.Sleep(blocktime)
- }
-
- p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
- require.NoError(t, err)
-
- require.Equal(t, p.MinerPower, p.TotalPower)
-
- sectors := p.MinerPower.RawBytePower.Uint64() / uint64(ssz)
- require.Equal(t, nSectors+GenesisPreseals-3, int(sectors)) // -3 just removed sectors
-
- fmt.Printf("Recover one sector\n")
-
- err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, false)
- require.NoError(t, err)
-
- di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
- require.NoError(t, err)
-
- fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2)
-
- for {
- head, err := client.ChainHead(ctx)
- require.NoError(t, err)
-
- if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 {
- fmt.Printf("Now head.Height = %d\n", head.Height())
- break
- }
-
- build.Clock.Sleep(blocktime)
- }
-
- p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
- require.NoError(t, err)
-
- require.Equal(t, p.MinerPower, p.TotalPower)
-
- sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz)
- require.Equal(t, nSectors+GenesisPreseals-2, int(sectors)) // -2 not recovered sectors
-
- // pledge a sector after recovery
-
- pledgeSectors(t, ctx, miner, 1, nSectors, nil)
-
- {
- // Wait until proven.
- di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
- require.NoError(t, err)
-
- waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 2
- fmt.Printf("End for head.Height > %d\n", waitUntil)
-
- for {
- head, err := client.ChainHead(ctx)
- require.NoError(t, err)
-
- if head.Height() > waitUntil {
- fmt.Printf("Now head.Height = %d\n", head.Height())
- break
- }
- }
- }
-
- p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
- require.NoError(t, err)
-
- require.Equal(t, p.MinerPower, p.TotalPower)
-
- sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz)
- require.Equal(t, nSectors+GenesisPreseals-2+1, int(sectors)) // -2 not recovered sectors + 1 just pledged
-}
diff --git a/api/types.go b/api/types.go
index a69aa28d99e..9d887b0a117 100644
--- a/api/types.go
+++ b/api/types.go
@@ -3,10 +3,13 @@ package api
import (
"encoding/json"
"fmt"
+ "time"
+
+ "github.com/filecoin-project/go-fil-markets/retrievalmarket"
+ "github.com/filecoin-project/lotus/chain/types"
datatransfer "github.com/filecoin-project/go-data-transfer"
"github.com/filecoin-project/go-state-types/abi"
- "github.com/filecoin-project/lotus/build"
"github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p-core/peer"
@@ -51,19 +54,6 @@ type MessageSendSpec struct {
MaxFee abi.TokenAmount
}
-var DefaultMessageSendSpec = MessageSendSpec{
- // MaxFee of 0.1FIL
- MaxFee: abi.NewTokenAmount(int64(build.FilecoinPrecision) / 10),
-}
-
-func (ms *MessageSendSpec) Get() MessageSendSpec {
- if ms == nil {
- return DefaultMessageSendSpec
- }
-
- return *ms
-}
-
type DataTransferChannel struct {
TransferID datatransfer.TransferID
Status datatransfer.Status
@@ -74,6 +64,7 @@ type DataTransferChannel struct {
Message string
OtherPeer peer.ID
Transferred uint64
+ Stages *datatransfer.ChannelStages
}
// NewDataTransferChannel constructs an API DataTransferChannel type from full channel state snapshot and a host id
@@ -107,3 +98,100 @@ func NewDataTransferChannel(hostID peer.ID, channelState datatransfer.ChannelSta
}
return channel
}
+
+type NetBlockList struct {
+ Peers []peer.ID
+ IPAddrs []string
+ IPSubnets []string
+}
+
+type ExtendedPeerInfo struct {
+ ID peer.ID
+ Agent string
+ Addrs []string
+ Protocols []string
+ ConnMgrMeta *ConnMgrInfo
+}
+
+type ConnMgrInfo struct {
+ FirstSeen time.Time
+ Value int
+ Tags map[string]int
+ Conns map[string]time.Time
+}
+
+type NodeStatus struct {
+ SyncStatus NodeSyncStatus
+ PeerStatus NodePeerStatus
+ ChainStatus NodeChainStatus
+}
+
+type NodeSyncStatus struct {
+ Epoch uint64
+ Behind uint64
+}
+
+type NodePeerStatus struct {
+ PeersToPublishMsgs int
+ PeersToPublishBlocks int
+}
+
+type NodeChainStatus struct {
+ BlocksPerTipsetLast100 float64
+ BlocksPerTipsetLastFinality float64
+}
+
+type CheckStatusCode int
+
+//go:generate go run golang.org/x/tools/cmd/stringer -type=CheckStatusCode -trimprefix=CheckStatus
+const (
+ _ CheckStatusCode = iota
+ // Message Checks
+ CheckStatusMessageSerialize
+ CheckStatusMessageSize
+ CheckStatusMessageValidity
+ CheckStatusMessageMinGas
+ CheckStatusMessageMinBaseFee
+ CheckStatusMessageBaseFee
+ CheckStatusMessageBaseFeeLowerBound
+ CheckStatusMessageBaseFeeUpperBound
+ CheckStatusMessageGetStateNonce
+ CheckStatusMessageNonce
+ CheckStatusMessageGetStateBalance
+ CheckStatusMessageBalance
+)
+
+type CheckStatus struct {
+ Code CheckStatusCode
+ OK bool
+ Err string
+ Hint map[string]interface{}
+}
+
+type MessageCheckStatus struct {
+ Cid cid.Cid
+ CheckStatus
+}
+
+type MessagePrototype struct {
+ Message types.Message
+ ValidNonce bool
+}
+
+type RetrievalInfo struct {
+ PayloadCID cid.Cid
+ ID retrievalmarket.DealID
+ PieceCID *cid.Cid
+ PricePerByte abi.TokenAmount
+ UnsealPrice abi.TokenAmount
+
+ Status retrievalmarket.DealStatus
+ Message string // more information about deal state, particularly errors
+ Provider peer.ID
+ BytesReceived uint64
+ BytesPaidFor uint64
+ TotalPaid abi.TokenAmount
+
+ TransferChannelID *datatransfer.ChannelID
+ DataTransfer *DataTransferChannel
+}
diff --git a/api/types/actors.go b/api/types/actors.go
new file mode 100644
index 00000000000..d55ef3e107a
--- /dev/null
+++ b/api/types/actors.go
@@ -0,0 +1,5 @@
+package apitypes
+
+import "github.com/filecoin-project/go-state-types/network"
+
+type NetworkVersion = network.Version
diff --git a/api/types/openrpc.go b/api/types/openrpc.go
new file mode 100644
index 00000000000..7d65cbde63c
--- /dev/null
+++ b/api/types/openrpc.go
@@ -0,0 +1,3 @@
+package apitypes
+
+type OpenRPCDocument map[string]interface{}
diff --git a/api/v0api/full.go b/api/v0api/full.go
new file mode 100644
index 00000000000..b152c6cbb84
--- /dev/null
+++ b/api/v0api/full.go
@@ -0,0 +1,711 @@
+package v0api
+
+import (
+ "context"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-bitfield"
+ datatransfer "github.com/filecoin-project/go-data-transfer"
+ "github.com/filecoin-project/go-fil-markets/retrievalmarket"
+ "github.com/filecoin-project/go-fil-markets/storagemarket"
+ "github.com/filecoin-project/go-multistore"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/crypto"
+ "github.com/filecoin-project/go-state-types/dline"
+ "github.com/ipfs/go-cid"
+ "github.com/libp2p/go-libp2p-core/peer"
+
+ "github.com/filecoin-project/lotus/api"
+ apitypes "github.com/filecoin-project/lotus/api/types"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/paych"
+ "github.com/filecoin-project/lotus/chain/types"
+ marketevents "github.com/filecoin-project/lotus/markets/loggers"
+ "github.com/filecoin-project/lotus/node/modules/dtypes"
+)
+
+//go:generate go run github.com/golang/mock/mockgen -destination=v0mocks/mock_full.go -package=v0mocks . FullNode
+
+// MODIFYING THE API INTERFACE
+//
+// NOTE: This is the V0 (Stable) API - when adding methods to this interface,
+// you'll need to make sure they are also present on the V1 (Unstable) API
+//
+// This API is implemented in `v1_wrapper.go` as a compatibility layer backed
+// by the V1 api
+//
+// When adding / changing methods in this file:
+// * Do the change here
+// * Adjust implementation in `node/impl/`
+// * Run `make gen` - this will:
+// * Generate proxy structs
+// * Generate mocks
+// * Generate markdown docs
+// * Generate openrpc blobs
+
+// FullNode API is a low-level interface to the Filecoin network full node
+type FullNode interface {
+ Common
+ Net
+
+ // MethodGroup: Chain
+ // The Chain method group contains methods for interacting with the
+ // blockchain, but that do not require any form of state computation.
+
+ // ChainNotify returns channel with chain head updates.
+ // First message is guaranteed to be of len == 1, and type == 'current'.
+ ChainNotify(context.Context) (<-chan []*api.HeadChange, error) //perm:read
+
+ // ChainHead returns the current head of the chain.
+ ChainHead(context.Context) (*types.TipSet, error) //perm:read
+
+ // ChainGetRandomnessFromTickets is used to sample the chain for randomness.
+ ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) //perm:read
+
+ // ChainGetRandomnessFromBeacon is used to sample the beacon for randomness.
+ ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) //perm:read
+
+ // ChainGetBlock returns the block specified by the given CID.
+ ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error) //perm:read
+ // ChainGetTipSet returns the tipset specified by the given TipSetKey.
+ ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) //perm:read
+
+ // ChainGetBlockMessages returns messages stored in the specified block.
+ //
+ // Note: If there are multiple blocks in a tipset, it's likely that some
+ // messages will be duplicated. It's also possible for blocks in a tipset to have
+ // different messages from the same sender at the same nonce. When that happens,
+ // only the first message (in a block with lowest ticket) will be considered
+ // for execution
+ //
+ // NOTE: THIS METHOD SHOULD ONLY BE USED FOR GETTING MESSAGES IN A SPECIFIC BLOCK
+ //
+ // DO NOT USE THIS METHOD TO GET MESSAGES INCLUDED IN A TIPSET
+ // Use ChainGetParentMessages, which will perform correct message deduplication
+ ChainGetBlockMessages(ctx context.Context, blockCid cid.Cid) (*api.BlockMessages, error) //perm:read
+
+ // ChainGetParentReceipts returns receipts for messages in parent tipset of
+ // the specified block. The receipts in the list returned is one-to-one with the
+ // messages returned by a call to ChainGetParentMessages with the same blockCid.
+ ChainGetParentReceipts(ctx context.Context, blockCid cid.Cid) ([]*types.MessageReceipt, error) //perm:read
+
+ // ChainGetParentMessages returns messages stored in parent tipset of the
+ // specified block.
+ ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]api.Message, error) //perm:read
+
+ // ChainGetMessagesInTipset returns message stores in current tipset
+ ChainGetMessagesInTipset(ctx context.Context, tsk types.TipSetKey) ([]api.Message, error) //perm:read
+
+ // ChainGetTipSetByHeight looks back for a tipset at the specified epoch.
+ // If there are no blocks at the specified epoch, a tipset at an earlier epoch
+ // will be returned.
+ ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) //perm:read
+
+ // ChainReadObj reads ipld nodes referenced by the specified CID from chain
+ // blockstore and returns raw bytes.
+ ChainReadObj(context.Context, cid.Cid) ([]byte, error) //perm:read
+
+ // ChainDeleteObj deletes node referenced by the given CID
+ ChainDeleteObj(context.Context, cid.Cid) error //perm:admin
+
+ // ChainHasObj checks if a given CID exists in the chain blockstore.
+ ChainHasObj(context.Context, cid.Cid) (bool, error) //perm:read
+
+ // ChainStatObj returns statistics about the graph referenced by 'obj'.
+ // If 'base' is also specified, then the returned stat will be a diff
+ // between the two objects.
+ ChainStatObj(ctx context.Context, obj cid.Cid, base cid.Cid) (api.ObjStat, error) //perm:read
+
+ // ChainSetHead forcefully sets current chain head. Use with caution.
+ ChainSetHead(context.Context, types.TipSetKey) error //perm:admin
+
+ // ChainGetGenesis returns the genesis tipset.
+ ChainGetGenesis(context.Context) (*types.TipSet, error) //perm:read
+
+ // ChainTipSetWeight computes weight for the specified tipset.
+ ChainTipSetWeight(context.Context, types.TipSetKey) (types.BigInt, error) //perm:read
+ ChainGetNode(ctx context.Context, p string) (*api.IpldObject, error) //perm:read
+
+ // ChainGetMessage reads a message referenced by the specified CID from the
+ // chain blockstore.
+ ChainGetMessage(context.Context, cid.Cid) (*types.Message, error) //perm:read
+
+ // ChainGetPath returns a set of revert/apply operations needed to get from
+ // one tipset to another, for example:
+ //```
+ // to
+ // ^
+ // from tAA
+ // ^ ^
+ // tBA tAB
+ // ^---*--^
+ // ^
+ // tRR
+ //```
+ // Would return `[revert(tBA), apply(tAB), apply(tAA)]`
+ ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*api.HeadChange, error) //perm:read
+
+ // ChainExport returns a stream of bytes with CAR dump of chain data.
+ // The exported chain data includes the header chain from the given tipset
+ // back to genesis, the entire genesis state, and the most recent 'nroots'
+ // state trees.
+ // If oldmsgskip is set, messages from before the requested roots are also not included.
+ ChainExport(ctx context.Context, nroots abi.ChainEpoch, oldmsgskip bool, tsk types.TipSetKey) (<-chan []byte, error) //perm:read
+
+ // MethodGroup: Beacon
+ // The Beacon method group contains methods for interacting with the random beacon (DRAND)
+
+ // BeaconGetEntry returns the beacon entry for the given filecoin epoch. If
+ // the entry has not yet been produced, the call will block until the entry
+ // becomes available
+ BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) //perm:read
+
+ // GasEstimateFeeCap estimates gas fee cap
+ GasEstimateFeeCap(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error) //perm:read
+
+ // GasEstimateGasLimit estimates gas used by the message and returns it.
+ // It fails if message fails to execute.
+ GasEstimateGasLimit(context.Context, *types.Message, types.TipSetKey) (int64, error) //perm:read
+
+ // GasEstimateGasPremium estimates what gas price should be used for a
+ // message to have high likelihood of inclusion in `nblocksincl` epochs.
+
+ GasEstimateGasPremium(_ context.Context, nblocksincl uint64,
+ sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) //perm:read
+
+ // GasEstimateMessageGas estimates gas values for unset message gas fields
+ GasEstimateMessageGas(context.Context, *types.Message, *api.MessageSendSpec, types.TipSetKey) (*types.Message, error) //perm:read
+
+ // MethodGroup: Sync
+ // The Sync method group contains methods for interacting with and
+ // observing the lotus sync service.
+
+ // SyncState returns the current status of the lotus sync system.
+ SyncState(context.Context) (*api.SyncState, error) //perm:read
+
+ // SyncSubmitBlock can be used to submit a newly created block to the.
+ // network through this node
+ SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error //perm:write
+
+ // SyncIncomingBlocks returns a channel streaming incoming, potentially not
+ // yet synced block headers.
+ SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) //perm:read
+
+ // SyncCheckpoint marks a blocks as checkpointed, meaning that it won't ever fork away from it.
+ SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) error //perm:admin
+
+ // SyncMarkBad marks a blocks as bad, meaning that it won't ever by synced.
+ // Use with extreme caution.
+ SyncMarkBad(ctx context.Context, bcid cid.Cid) error //perm:admin
+
+ // SyncUnmarkBad unmarks a blocks as bad, making it possible to be validated and synced again.
+ SyncUnmarkBad(ctx context.Context, bcid cid.Cid) error //perm:admin
+
+ // SyncUnmarkAllBad purges bad block cache, making it possible to sync to chains previously marked as bad
+ SyncUnmarkAllBad(ctx context.Context) error //perm:admin
+
+ // SyncCheckBad checks if a block was marked as bad, and if it was, returns
+ // the reason.
+ SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error) //perm:read
+
+ // SyncValidateTipset indicates whether the provided tipset is valid or not
+ SyncValidateTipset(ctx context.Context, tsk types.TipSetKey) (bool, error) //perm:read
+
+ // MethodGroup: Mpool
+ // The Mpool methods are for interacting with the message pool. The message pool
+ // manages all incoming and outgoing 'messages' going over the network.
+
+ // MpoolPending returns pending mempool messages.
+ MpoolPending(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) //perm:read
+
+ // MpoolSelect returns a list of pending messages for inclusion in the next block
+ MpoolSelect(context.Context, types.TipSetKey, float64) ([]*types.SignedMessage, error) //perm:read
+
+ // MpoolPush pushes a signed message to mempool.
+ MpoolPush(context.Context, *types.SignedMessage) (cid.Cid, error) //perm:write
+
+ // MpoolPushUntrusted pushes a signed message to mempool from untrusted sources.
+ MpoolPushUntrusted(context.Context, *types.SignedMessage) (cid.Cid, error) //perm:write
+
+ // MpoolPushMessage atomically assigns a nonce, signs, and pushes a message
+ // to mempool.
+ // maxFee is only used when GasFeeCap/GasPremium fields aren't specified
+ //
+ // When maxFee is set to 0, MpoolPushMessage will guess appropriate fee
+ // based on current chain conditions
+ MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) //perm:sign
+
+ // MpoolBatchPush batch pushes a signed message to mempool.
+ MpoolBatchPush(context.Context, []*types.SignedMessage) ([]cid.Cid, error) //perm:write
+
+ // MpoolBatchPushUntrusted batch pushes a signed message to mempool from untrusted sources.
+ MpoolBatchPushUntrusted(context.Context, []*types.SignedMessage) ([]cid.Cid, error) //perm:write
+
+ // MpoolBatchPushMessage batch pushes a unsigned message to mempool.
+ MpoolBatchPushMessage(context.Context, []*types.Message, *api.MessageSendSpec) ([]*types.SignedMessage, error) //perm:sign
+
+ // MpoolGetNonce gets next nonce for the specified sender.
+ // Note that this method may not be atomic. Use MpoolPushMessage instead.
+ MpoolGetNonce(context.Context, address.Address) (uint64, error) //perm:read
+ MpoolSub(context.Context) (<-chan api.MpoolUpdate, error) //perm:read
+
+ // MpoolClear clears pending messages from the mpool
+ MpoolClear(context.Context, bool) error //perm:write
+
+ // MpoolGetConfig returns (a copy of) the current mpool config
+ MpoolGetConfig(context.Context) (*types.MpoolConfig, error) //perm:read
+ // MpoolSetConfig sets the mpool config to (a copy of) the supplied config
+ MpoolSetConfig(context.Context, *types.MpoolConfig) error //perm:admin
+
+ // MethodGroup: Miner
+
+ MinerGetBaseInfo(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*api.MiningBaseInfo, error) //perm:read
+ MinerCreateBlock(context.Context, *api.BlockTemplate) (*types.BlockMsg, error) //perm:write
+
+ // // UX ?
+
+ // MethodGroup: Wallet
+
+ // WalletNew creates a new address in the wallet with the given sigType.
+ // Available key types: bls, secp256k1, secp256k1-ledger
+ // Support for numerical types: 1 - secp256k1, 2 - BLS is deprecated
+ WalletNew(context.Context, types.KeyType) (address.Address, error) //perm:write
+ // WalletHas indicates whether the given address is in the wallet.
+ WalletHas(context.Context, address.Address) (bool, error) //perm:write
+ // WalletList lists all the addresses in the wallet.
+ WalletList(context.Context) ([]address.Address, error) //perm:write
+ // WalletBalance returns the balance of the given address at the current head of the chain.
+ WalletBalance(context.Context, address.Address) (types.BigInt, error) //perm:read
+ // WalletSign signs the given bytes using the given address.
+ WalletSign(context.Context, address.Address, []byte) (*crypto.Signature, error) //perm:sign
+ // WalletSignMessage signs the given message using the given address.
+ WalletSignMessage(context.Context, address.Address, *types.Message) (*types.SignedMessage, error) //perm:sign
+ // WalletVerify takes an address, a signature, and some bytes, and indicates whether the signature is valid.
+ // The address does not have to be in the wallet.
+ WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) //perm:read
+ // WalletDefaultAddress returns the address marked as default in the wallet.
+ WalletDefaultAddress(context.Context) (address.Address, error) //perm:write
+ // WalletSetDefault marks the given address as as the default one.
+ WalletSetDefault(context.Context, address.Address) error //perm:write
+ // WalletExport returns the private key of an address in the wallet.
+ WalletExport(context.Context, address.Address) (*types.KeyInfo, error) //perm:admin
+ // WalletImport receives a KeyInfo, which includes a private key, and imports it into the wallet.
+ WalletImport(context.Context, *types.KeyInfo) (address.Address, error) //perm:admin
+ // WalletDelete deletes an address from the wallet.
+ WalletDelete(context.Context, address.Address) error //perm:admin
+ // WalletValidateAddress validates whether a given string can be decoded as a well-formed address
+ WalletValidateAddress(context.Context, string) (address.Address, error) //perm:read
+
+ // Other
+
+ // MethodGroup: Client
+ // The Client methods all have to do with interacting with the storage and
+ // retrieval markets as a client
+
+ // ClientImport imports file under the specified path into filestore.
+ ClientImport(ctx context.Context, ref api.FileRef) (*api.ImportRes, error) //perm:admin
+ // ClientRemoveImport removes file import
+ ClientRemoveImport(ctx context.Context, importID multistore.StoreID) error //perm:admin
+ // ClientStartDeal proposes a deal with a miner.
+ ClientStartDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) //perm:admin
+ // ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking.
+ ClientStatelessDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) //perm:write
+ // ClientGetDealInfo returns the latest information about a given deal.
+ ClientGetDealInfo(context.Context, cid.Cid) (*api.DealInfo, error) //perm:read
+ // ClientListDeals returns information about the deals made by the local client.
+ ClientListDeals(ctx context.Context) ([]api.DealInfo, error) //perm:write
+ // ClientGetDealUpdates returns the status of updated deals
+ ClientGetDealUpdates(ctx context.Context) (<-chan api.DealInfo, error) //perm:write
+ // ClientGetDealStatus returns status given a code
+ ClientGetDealStatus(ctx context.Context, statusCode uint64) (string, error) //perm:read
+ // ClientHasLocal indicates whether a certain CID is locally stored.
+ ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) //perm:write
+ // ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer).
+ ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]api.QueryOffer, error) //perm:read
+ // ClientMinerQueryOffer returns a QueryOffer for the specific miner and file.
+ ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (api.QueryOffer, error) //perm:read
+ // ClientRetrieve initiates the retrieval of a file, as specified in the order.
+ ClientRetrieve(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error //perm:admin
+ // ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel
+ // of status updates.
+ ClientRetrieveWithEvents(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin
+ // ClientQueryAsk returns a signed StorageAsk from the specified miner.
+ // ClientListRetrievals returns information about retrievals made by the local client
+ ClientListRetrievals(ctx context.Context) ([]api.RetrievalInfo, error) //perm:write
+ // ClientGetRetrievalUpdates returns status of updated retrieval deals
+ ClientGetRetrievalUpdates(ctx context.Context) (<-chan api.RetrievalInfo, error) //perm:write
+ ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) //perm:read
+ // ClientCalcCommP calculates the CommP and data size of the specified CID
+ ClientDealPieceCID(ctx context.Context, root cid.Cid) (api.DataCIDSize, error) //perm:read
+ // ClientCalcCommP calculates the CommP for a specified file
+ ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet, error) //perm:write
+ // ClientGenCar generates a CAR file for the specified file.
+ ClientGenCar(ctx context.Context, ref api.FileRef, outpath string) error //perm:write
+ // ClientDealSize calculates real deal data size
+ ClientDealSize(ctx context.Context, root cid.Cid) (api.DataSize, error) //perm:read
+ // ClientListTransfers returns the status of all ongoing transfers of data
+ ClientListDataTransfers(ctx context.Context) ([]api.DataTransferChannel, error) //perm:write
+ ClientDataTransferUpdates(ctx context.Context) (<-chan api.DataTransferChannel, error) //perm:write
+ // ClientRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer
+ ClientRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
+ // ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
+ ClientCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
+ // ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel
+ // which are stuck due to insufficient funds
+ ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error //perm:write
+
+ // ClientCancelRetrievalDeal cancels an ongoing retrieval deal based on DealID
+ ClientCancelRetrievalDeal(ctx context.Context, dealid retrievalmarket.DealID) error //perm:write
+
+ // ClientUnimport removes references to the specified file from filestore
+ //ClientUnimport(path string)
+
+ // ClientListImports lists imported files and their root CIDs
+ ClientListImports(ctx context.Context) ([]api.Import, error) //perm:write
+
+ //ClientListAsks() []Ask
+
+ // MethodGroup: State
+ // The State methods are used to query, inspect, and interact with chain state.
+ // Most methods take a TipSetKey as a parameter. The state looked up is the parent state of the tipset.
+ // A nil TipSetKey can be provided as a param, this will cause the heaviest tipset in the chain to be used.
+
+ // StateCall runs the given message and returns its result without any persisted changes.
+ //
+ // StateCall applies the message to the tipset's parent state. The
+ // message is not applied on-top-of the messages in the passed-in
+ // tipset.
+ StateCall(context.Context, *types.Message, types.TipSetKey) (*api.InvocResult, error) //perm:read
+ // StateReplay replays a given message, assuming it was included in a block in the specified tipset.
+ //
+ // If a tipset key is provided, and a replacing message is found on chain,
+ // the method will return an error saying that the message wasn't found
+ //
+ // If no tipset key is provided, the appropriate tipset is looked up, and if
+ // the message was gas-repriced, the on-chain message will be replayed - in
+ // that case the returned InvocResult.MsgCid will not match the Cid param
+ //
+ // If the caller wants to ensure that exactly the requested message was executed,
+ // they MUST check that InvocResult.MsgCid is equal to the provided Cid.
+ // Without this check both the requested and original message may appear as
+ // successfully executed on-chain, which may look like a double-spend.
+ //
+ // A replacing message is a message with a different CID, any of Gas values, and
+ // different signature, but with all other parameters matching (source/destination,
+ // nonce, params, etc.)
+ StateReplay(context.Context, types.TipSetKey, cid.Cid) (*api.InvocResult, error) //perm:read
+ // StateGetActor returns the indicated actor's nonce and balance.
+ StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) //perm:read
+ // StateReadState returns the indicated actor's state.
+ StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*api.ActorState, error) //perm:read
+ // StateListMessages looks back and returns all messages with a matching to or from address, stopping at the given height.
+ StateListMessages(ctx context.Context, match *api.MessageMatch, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) //perm:read
+ // StateDecodeParams attempts to decode the provided params, based on the recipient actor address and method number.
+ StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error) //perm:read
+
+ // StateNetworkName returns the name of the network the node is synced to
+ StateNetworkName(context.Context) (dtypes.NetworkName, error) //perm:read
+ // StateMinerSectors returns info about the given miner's sectors. If the filter bitfield is nil, all sectors are included.
+ StateMinerSectors(context.Context, address.Address, *bitfield.BitField, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) //perm:read
+ // StateMinerActiveSectors returns info about sectors that a given miner is actively proving.
+ StateMinerActiveSectors(context.Context, address.Address, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) //perm:read
+ // StateMinerProvingDeadline calculates the deadline at some epoch for a proving period
+ // and returns the deadline-related calculations.
+ StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) //perm:read
+ // StateMinerPower returns the power of the indicated miner
+ StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) //perm:read
+ // StateMinerInfo returns info about the indicated miner
+ StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error) //perm:read
+ // StateMinerDeadlines returns all the proving deadlines for the given miner
+ StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]api.Deadline, error) //perm:read
+ // StateMinerPartitions returns all partitions in the specified deadline
+ StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tsk types.TipSetKey) ([]api.Partition, error) //perm:read
+ // StateMinerFaults returns a bitfield indicating the faulty sectors of the given miner
+ StateMinerFaults(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) //perm:read
+ // StateAllMinerFaults returns all non-expired Faults that occur within lookback epochs of the given tipset
+ StateAllMinerFaults(ctx context.Context, lookback abi.ChainEpoch, ts types.TipSetKey) ([]*api.Fault, error) //perm:read
+ // StateMinerRecoveries returns a bitfield indicating the recovering sectors of the given miner
+ StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) //perm:read
+ // StateMinerInitialPledgeCollateral returns the precommit deposit for the specified miner's sector
+ StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) //perm:read
+ // StateMinerInitialPledgeCollateral returns the initial pledge collateral for the specified miner's sector
+ StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) //perm:read
+ // StateMinerAvailableBalance returns the portion of a miner's balance that can be withdrawn or spent
+ StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) //perm:read
+ // StateMinerSectorAllocated checks if a sector is allocated
+ StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (bool, error) //perm:read
+ // StateSectorPreCommitInfo returns the PreCommit info for the specified miner's sector
+ StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) //perm:read
+ // StateSectorGetInfo returns the on-chain info for the specified miner's sector. Returns null in case the sector info isn't found
+ // NOTE: returned info.Expiration may not be accurate in some cases, use StateSectorExpiration to get accurate
+ // expiration epoch
+ StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) //perm:read
+ // StateSectorExpiration returns epoch at which given sector will expire
+ StateSectorExpiration(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorExpiration, error) //perm:read
+ // StateSectorPartition finds deadline/partition with the specified sector
+ StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error) //perm:read
+ // StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed
+ //
+ // NOTE: If a replacing message is found on chain, this method will return
+ // a MsgLookup for the replacing message - the MsgLookup.Message will be a different
+ // CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the
+ // result of the execution of the replacing message.
+ //
+ // If the caller wants to ensure that exactly the requested message was executed,
+ // they MUST check that MsgLookup.Message is equal to the provided 'cid'.
+ // Without this check both the requested and original message may appear as
+ // successfully executed on-chain, which may look like a double-spend.
+ //
+ // A replacing message is a message with a different CID, any of Gas values, and
+ // different signature, but with all other parameters matching (source/destination,
+ // nonce, params, etc.)
+ StateSearchMsg(context.Context, cid.Cid) (*api.MsgLookup, error) //perm:read
+ // StateSearchMsgLimited looks back up to limit epochs in the chain for a message, and returns its receipt and the tipset where it was executed
+ //
+ // NOTE: If a replacing message is found on chain, this method will return
+ // a MsgLookup for the replacing message - the MsgLookup.Message will be a different
+ // CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the
+ // result of the execution of the replacing message.
+ //
+ // If the caller wants to ensure that exactly the requested message was executed,
+ // they MUST check that MsgLookup.Message is equal to the provided 'cid'.
+ // Without this check both the requested and original message may appear as
+ // successfully executed on-chain, which may look like a double-spend.
+ //
+ // A replacing message is a message with a different CID, any of Gas values, and
+ // different signature, but with all other parameters matching (source/destination,
+ // nonce, params, etc.)
+ StateSearchMsgLimited(ctx context.Context, msg cid.Cid, limit abi.ChainEpoch) (*api.MsgLookup, error) //perm:read
+ // StateWaitMsg looks back in the chain for a message. If not found, it blocks until the
+ // message arrives on chain, and gets to the indicated confidence depth.
+ //
+ // NOTE: If a replacing message is found on chain, this method will return
+ // a MsgLookup for the replacing message - the MsgLookup.Message will be a different
+ // CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the
+ // result of the execution of the replacing message.
+ //
+ // If the caller wants to ensure that exactly the requested message was executed,
+ // they MUST check that MsgLookup.Message is equal to the provided 'cid'.
+ // Without this check both the requested and original message may appear as
+ // successfully executed on-chain, which may look like a double-spend.
+ //
+ // A replacing message is a message with a different CID, any of Gas values, and
+ // different signature, but with all other parameters matching (source/destination,
+ // nonce, params, etc.)
+ StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error) //perm:read
+ // StateWaitMsgLimited looks back up to limit epochs in the chain for a message.
+ // If not found, it blocks until the message arrives on chain, and gets to the
+ // indicated confidence depth.
+ //
+ // NOTE: If a replacing message is found on chain, this method will return
+ // a MsgLookup for the replacing message - the MsgLookup.Message will be a different
+ // CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the
+ // result of the execution of the replacing message.
+ //
+ // If the caller wants to ensure that exactly the requested message was executed,
+ // they MUST check that MsgLookup.Message is equal to the provided 'cid'.
+ // Without this check both the requested and original message may appear as
+ // successfully executed on-chain, which may look like a double-spend.
+ //
+ // A replacing message is a message with a different CID, any of Gas values, and
+ // different signature, but with all other parameters matching (source/destination,
+ // nonce, params, etc.)
+ StateWaitMsgLimited(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch) (*api.MsgLookup, error) //perm:read
+ // StateListMiners returns the addresses of every miner that has claimed power in the Power Actor
+ StateListMiners(context.Context, types.TipSetKey) ([]address.Address, error) //perm:read
+ // StateListActors returns the addresses of every actor in the state
+ StateListActors(context.Context, types.TipSetKey) ([]address.Address, error) //perm:read
+ // StateMarketBalance looks up the Escrow and Locked balances of the given address in the Storage Market
+ StateMarketBalance(context.Context, address.Address, types.TipSetKey) (api.MarketBalance, error) //perm:read
+ // StateMarketParticipants returns the Escrow and Locked balances of every participant in the Storage Market
+ StateMarketParticipants(context.Context, types.TipSetKey) (map[string]api.MarketBalance, error) //perm:read
+ // StateMarketDeals returns information about every deal in the Storage Market
+ StateMarketDeals(context.Context, types.TipSetKey) (map[string]api.MarketDeal, error) //perm:read
+ // StateMarketStorageDeal returns information about the indicated deal
+ StateMarketStorageDeal(context.Context, abi.DealID, types.TipSetKey) (*api.MarketDeal, error) //perm:read
+ // StateLookupID retrieves the ID address of the given address
+ StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) //perm:read
+ // StateAccountKey returns the public key address of the given ID address
+ StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) //perm:read
+ // StateChangedActors returns all the actors whose states change between the two given state CIDs
+ // TODO: Should this take tipset keys instead?
+ StateChangedActors(context.Context, cid.Cid, cid.Cid) (map[string]types.Actor, error) //perm:read
+ // StateGetReceipt returns the message receipt for the given message or for a
+ // matching gas-repriced replacing message
+ //
+ // NOTE: If the requested message was replaced, this method will return the receipt
+ // for the replacing message - if the caller needs the receipt for exactly the
+ // requested message, use StateSearchMsg().Receipt, and check that MsgLookup.Message
+ // is matching the requested CID
+ //
+ // DEPRECATED: Use StateSearchMsg, this method won't be supported in v1 API
+ StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) //perm:read
+ // StateMinerSectorCount returns the number of sectors in a miner's sector set and proving set
+ StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (api.MinerSectors, error) //perm:read
+ // StateCompute is a flexible command that applies the given messages on the given tipset.
+ // The messages are run as though the VM were at the provided height.
+ //
+ // When called, StateCompute will:
+ // - Load the provided tipset, or use the current chain head if not provided
+ // - Compute the tipset state of the provided tipset on top of the parent state
+ // - (note that this step runs before vmheight is applied to the execution)
+ // - Execute state upgrade if any were scheduled at the epoch, or in null
+ // blocks preceding the tipset
+ // - Call the cron actor on null blocks preceding the tipset
+ // - For each block in the tipset
+ // - Apply messages in blocks in the specified
+ // - Award block reward by calling the reward actor
+ // - Call the cron actor for the current epoch
+ // - If the specified vmheight is higher than the current epoch, apply any
+ // needed state upgrades to the state
+ // - Apply the specified messages to the state
+ //
+ // The vmheight parameter sets VM execution epoch, and can be used to simulate
+ // message execution in different network versions. If the specified vmheight
+ // epoch is higher than the epoch of the specified tipset, any state upgrades
+ // until the vmheight will be executed on the state before applying messages
+ // specified by the user.
+ //
+ // Note that the initial tipset state computation is not affected by the
+ // vmheight parameter - only the messages in the `apply` set are
+ //
+ // If the caller wants to simply compute the state, vmheight should be set to
+ // the epoch of the specified tipset.
+ //
+ // Messages in the `apply` parameter must have the correct nonces, and gas
+ // values set.
+ StateCompute(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*api.ComputeStateOutput, error) //perm:read
+ // StateVerifierStatus returns the data cap for the given address.
+ // Returns nil if there is no entry in the data cap table for the
+ // address.
+ StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) //perm:read
+ // StateVerifiedClientStatus returns the data cap for the given address.
+ // Returns nil if there is no entry in the data cap table for the
+ // address.
+ StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) //perm:read
+ // StateVerifiedClientStatus returns the address of the Verified Registry's root key
+ StateVerifiedRegistryRootKey(ctx context.Context, tsk types.TipSetKey) (address.Address, error) //perm:read
+ // StateDealProviderCollateralBounds returns the min and max collateral a storage provider
+ // can issue. It takes the deal size and verified status as parameters.
+ StateDealProviderCollateralBounds(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (api.DealCollateralBounds, error) //perm:read
+
+ // StateCirculatingSupply returns the exact circulating supply of Filecoin at the given tipset.
+ // This is not used anywhere in the protocol itself, and is only for external consumption.
+ StateCirculatingSupply(context.Context, types.TipSetKey) (abi.TokenAmount, error) //perm:read
+ // StateVMCirculatingSupplyInternal returns an approximation of the circulating supply of Filecoin at the given tipset.
+ // This is the value reported by the runtime interface to actors code.
+ StateVMCirculatingSupplyInternal(context.Context, types.TipSetKey) (api.CirculatingSupply, error) //perm:read
+ // StateNetworkVersion returns the network version at the given tipset
+ StateNetworkVersion(context.Context, types.TipSetKey) (apitypes.NetworkVersion, error) //perm:read
+
+ // MethodGroup: Msig
+ // The Msig methods are used to interact with multisig wallets on the
+ // filecoin network
+
+ // MsigGetAvailableBalance returns the portion of a multisig's balance that can be withdrawn or spent
+ MsigGetAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) //perm:read
+ // MsigGetVestingSchedule returns the vesting details of a given multisig.
+ MsigGetVestingSchedule(context.Context, address.Address, types.TipSetKey) (api.MsigVesting, error) //perm:read
+ // MsigGetVested returns the amount of FIL that vested in a multisig in a certain period.
+ // It takes the following params: , ,
+ MsigGetVested(context.Context, address.Address, types.TipSetKey, types.TipSetKey) (types.BigInt, error) //perm:read
+
+ //MsigGetPending returns pending transactions for the given multisig
+ //wallet. Once pending transactions are fully approved, they will no longer
+ //appear here.
+ MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*api.MsigTransaction, error) //perm:read
+
+ // MsigCreate creates a multisig wallet
+ // It takes the following params: , ,
+ //, ,
+ MsigCreate(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (cid.Cid, error) //perm:sign
+ // MsigPropose proposes a multisig message
+ // It takes the following params: , , ,
+ // , ,
+ MsigPropose(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) //perm:sign
+
+ // MsigApprove approves a previously-proposed multisig message by transaction ID
+ // It takes the following params: ,
+ MsigApprove(context.Context, address.Address, uint64, address.Address) (cid.Cid, error) //perm:sign
+
+ // MsigApproveTxnHash approves a previously-proposed multisig message, specified
+ // using both transaction ID and a hash of the parameters used in the
+ // proposal. This method of approval can be used to ensure you only approve
+ // exactly the transaction you think you are.
+ // It takes the following params: , , , , ,
+ // , ,
+ MsigApproveTxnHash(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) //perm:sign
+
+ // MsigCancel cancels a previously-proposed multisig message
+ // It takes the following params: , , , ,
+ // , ,
+ MsigCancel(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) //perm:sign
+ // MsigAddPropose proposes adding a signer in the multisig
+ // It takes the following params: , ,
+ // ,
+ MsigAddPropose(context.Context, address.Address, address.Address, address.Address, bool) (cid.Cid, error) //perm:sign
+ // MsigAddApprove approves a previously proposed AddSigner message
+ // It takes the following params: , , ,
+ // , ,
+ MsigAddApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, bool) (cid.Cid, error) //perm:sign
+ // MsigAddCancel cancels a previously proposed AddSigner message
+ // It takes the following params: , , ,
+ // ,
+ MsigAddCancel(context.Context, address.Address, address.Address, uint64, address.Address, bool) (cid.Cid, error) //perm:sign
+ // MsigSwapPropose proposes swapping 2 signers in the multisig
+ // It takes the following params: , ,
+ // ,
+ MsigSwapPropose(context.Context, address.Address, address.Address, address.Address, address.Address) (cid.Cid, error) //perm:sign
+ // MsigSwapApprove approves a previously proposed SwapSigner
+ // It takes the following params: , , ,
+ // , ,
+ MsigSwapApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (cid.Cid, error) //perm:sign
+ // MsigSwapCancel cancels a previously proposed SwapSigner message
+ // It takes the following params: , , ,
+ // ,
+ MsigSwapCancel(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error) //perm:sign
+
+ // MsigRemoveSigner proposes the removal of a signer from the multisig.
+ // It accepts the multisig to make the change on, the proposer address to
+ // send the message from, the address to be removed, and a boolean
+ // indicating whether or not the signing threshold should be lowered by one
+ // along with the address removal.
+ MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error) //perm:sign
+
+ // MarketAddBalance adds funds to the market actor
+ MarketAddBalance(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign
+ // MarketGetReserved gets the amount of funds that are currently reserved for the address
+ MarketGetReserved(ctx context.Context, addr address.Address) (types.BigInt, error) //perm:sign
+ // MarketReserveFunds reserves funds for a deal
+ MarketReserveFunds(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign
+ // MarketReleaseFunds releases funds reserved by MarketReserveFunds
+ MarketReleaseFunds(ctx context.Context, addr address.Address, amt types.BigInt) error //perm:sign
+ // MarketWithdraw withdraws unlocked funds from the market actor
+ MarketWithdraw(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign
+
+ // MethodGroup: Paych
+ // The Paych methods are for interacting with and managing payment channels
+
+ PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*api.ChannelInfo, error) //perm:sign
+ PaychGetWaitReady(context.Context, cid.Cid) (address.Address, error) //perm:sign
+ PaychAvailableFunds(ctx context.Context, ch address.Address) (*api.ChannelAvailableFunds, error) //perm:sign
+ PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*api.ChannelAvailableFunds, error) //perm:sign
+ PaychList(context.Context) ([]address.Address, error) //perm:read
+ PaychStatus(context.Context, address.Address) (*api.PaychStatus, error) //perm:read
+ PaychSettle(context.Context, address.Address) (cid.Cid, error) //perm:sign
+ PaychCollect(context.Context, address.Address) (cid.Cid, error) //perm:sign
+ PaychAllocateLane(ctx context.Context, ch address.Address) (uint64, error) //perm:sign
+ PaychNewPayment(ctx context.Context, from, to address.Address, vouchers []api.VoucherSpec) (*api.PaymentInfo, error) //perm:sign
+ PaychVoucherCheckValid(context.Context, address.Address, *paych.SignedVoucher) error //perm:read
+ PaychVoucherCheckSpendable(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (bool, error) //perm:read
+ PaychVoucherCreate(context.Context, address.Address, types.BigInt, uint64) (*api.VoucherCreateResult, error) //perm:sign
+ PaychVoucherAdd(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error) //perm:write
+ PaychVoucherList(context.Context, address.Address) ([]*paych.SignedVoucher, error) //perm:write
+ PaychVoucherSubmit(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error) //perm:sign
+
+ // CreateBackup creates node backup onder the specified file name. The
+ // method requires that the lotus daemon is running with the
+ // LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
+ // the path specified when calling CreateBackup is within the base path
+ CreateBackup(ctx context.Context, fpath string) error //perm:admin
+}
diff --git a/api/v0api/gateway.go b/api/v0api/gateway.go
new file mode 100644
index 00000000000..18a5ec7d6e6
--- /dev/null
+++ b/api/v0api/gateway.go
@@ -0,0 +1,69 @@
+package v0api
+
+import (
+ "context"
+
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/dline"
+ "github.com/filecoin-project/go-state-types/network"
+
+ "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+// MODIFYING THE API INTERFACE
+//
+// NOTE: This is the V0 (Stable) API - when adding methods to this interface,
+// you'll need to make sure they are also present on the V1 (Unstable) API
+//
+// This API is implemented in `v1_wrapper.go` as a compatibility layer backed
+// by the V1 api
+//
+// When adding / changing methods in this file:
+// * Do the change here
+// * Adjust implementation in `node/impl/`
+// * Run `make gen` - this will:
+// * Generate proxy structs
+// * Generate mocks
+// * Generate markdown docs
+// * Generate openrpc blobs
+
+type Gateway interface {
+ ChainHasObj(context.Context, cid.Cid) (bool, error)
+ ChainHead(ctx context.Context) (*types.TipSet, error)
+ ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error)
+ ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error)
+ ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error)
+ ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error)
+ ChainNotify(context.Context) (<-chan []*api.HeadChange, error)
+ ChainReadObj(context.Context, cid.Cid) ([]byte, error)
+ GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error)
+ MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error)
+ MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error)
+ MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error)
+ MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*api.MsigTransaction, error)
+ StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
+ StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error)
+ StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
+ StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error)
+ StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
+ StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
+ StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error)
+ StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error)
+ StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error)
+ StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error)
+ StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error)
+ StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error)
+ StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error)
+ StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error)
+ StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
+ StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error)
+ WalletBalance(context.Context, address.Address) (types.BigInt, error)
+ Version(context.Context) (api.APIVersion, error)
+}
+
+var _ Gateway = *new(FullNode)
diff --git a/api/v0api/latest.go b/api/v0api/latest.go
new file mode 100644
index 00000000000..d423f57bc86
--- /dev/null
+++ b/api/v0api/latest.go
@@ -0,0 +1,32 @@
+package v0api
+
+import (
+ "github.com/filecoin-project/lotus/api"
+)
+
+type Common = api.Common
+type Net = api.Net
+type CommonNet = api.CommonNet
+
+type CommonStruct = api.CommonStruct
+type CommonStub = api.CommonStub
+type NetStruct = api.NetStruct
+type NetStub = api.NetStub
+type CommonNetStruct = api.CommonNetStruct
+type CommonNetStub = api.CommonNetStub
+
+type StorageMiner = api.StorageMiner
+type StorageMinerStruct = api.StorageMinerStruct
+
+type Worker = api.Worker
+type WorkerStruct = api.WorkerStruct
+
+type Wallet = api.Wallet
+
+func PermissionedStorMinerAPI(a StorageMiner) StorageMiner {
+ return api.PermissionedStorMinerAPI(a)
+}
+
+func PermissionedWorkerAPI(a Worker) Worker {
+ return api.PermissionedWorkerAPI(a)
+}
diff --git a/api/v0api/permissioned.go b/api/v0api/permissioned.go
new file mode 100644
index 00000000000..ad64bc29ede
--- /dev/null
+++ b/api/v0api/permissioned.go
@@ -0,0 +1,13 @@
+package v0api
+
+import (
+ "github.com/filecoin-project/go-jsonrpc/auth"
+ "github.com/filecoin-project/lotus/api"
+)
+
+func PermissionedFullAPI(a FullNode) FullNode {
+ var out FullNodeStruct
+ auth.PermissionedProxy(api.AllPermissions, api.DefaultPerms, a, &out.Internal)
+ auth.PermissionedProxy(api.AllPermissions, api.DefaultPerms, a, &out.CommonStruct.Internal)
+ return &out
+}
diff --git a/api/v0api/proxy_gen.go b/api/v0api/proxy_gen.go
new file mode 100644
index 00000000000..4cb96b53edf
--- /dev/null
+++ b/api/v0api/proxy_gen.go
@@ -0,0 +1,2132 @@
+// Code generated by github.com/filecoin-project/lotus/gen/api. DO NOT EDIT.
+
+package v0api
+
+import (
+ "context"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-bitfield"
+ datatransfer "github.com/filecoin-project/go-data-transfer"
+ "github.com/filecoin-project/go-fil-markets/retrievalmarket"
+ "github.com/filecoin-project/go-fil-markets/storagemarket"
+ "github.com/filecoin-project/go-multistore"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/crypto"
+ "github.com/filecoin-project/go-state-types/dline"
+ "github.com/filecoin-project/go-state-types/network"
+ "github.com/filecoin-project/lotus/api"
+ apitypes "github.com/filecoin-project/lotus/api/types"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/paych"
+ "github.com/filecoin-project/lotus/chain/types"
+ marketevents "github.com/filecoin-project/lotus/markets/loggers"
+ "github.com/filecoin-project/lotus/node/modules/dtypes"
+ "github.com/ipfs/go-cid"
+ "github.com/libp2p/go-libp2p-core/peer"
+ "golang.org/x/xerrors"
+)
+
+type FullNodeStruct struct {
+ CommonStruct
+
+ NetStruct
+
+ Internal struct {
+ BeaconGetEntry func(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"`
+
+ ChainDeleteObj func(p0 context.Context, p1 cid.Cid) error `perm:"admin"`
+
+ ChainExport func(p0 context.Context, p1 abi.ChainEpoch, p2 bool, p3 types.TipSetKey) (<-chan []byte, error) `perm:"read"`
+
+ ChainGetBlock func(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) `perm:"read"`
+
+ ChainGetBlockMessages func(p0 context.Context, p1 cid.Cid) (*api.BlockMessages, error) `perm:"read"`
+
+ ChainGetGenesis func(p0 context.Context) (*types.TipSet, error) `perm:"read"`
+
+ ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) `perm:"read"`
+
+ ChainGetMessagesInTipset func(p0 context.Context, p1 types.TipSetKey) ([]api.Message, error) `perm:"read"`
+
+ ChainGetNode func(p0 context.Context, p1 string) (*api.IpldObject, error) `perm:"read"`
+
+ ChainGetParentMessages func(p0 context.Context, p1 cid.Cid) ([]api.Message, error) `perm:"read"`
+
+ ChainGetParentReceipts func(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) `perm:"read"`
+
+ ChainGetPath func(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*api.HeadChange, error) `perm:"read"`
+
+ ChainGetRandomnessFromBeacon func(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) `perm:"read"`
+
+ ChainGetRandomnessFromTickets func(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) `perm:"read"`
+
+ ChainGetTipSet func(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) `perm:"read"`
+
+ ChainGetTipSetByHeight func(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) `perm:"read"`
+
+ ChainHasObj func(p0 context.Context, p1 cid.Cid) (bool, error) `perm:"read"`
+
+ ChainHead func(p0 context.Context) (*types.TipSet, error) `perm:"read"`
+
+ ChainNotify func(p0 context.Context) (<-chan []*api.HeadChange, error) `perm:"read"`
+
+ ChainReadObj func(p0 context.Context, p1 cid.Cid) ([]byte, error) `perm:"read"`
+
+ ChainSetHead func(p0 context.Context, p1 types.TipSetKey) error `perm:"admin"`
+
+ ChainStatObj func(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (api.ObjStat, error) `perm:"read"`
+
+ ChainTipSetWeight func(p0 context.Context, p1 types.TipSetKey) (types.BigInt, error) `perm:"read"`
+
+ ClientCalcCommP func(p0 context.Context, p1 string) (*api.CommPRet, error) `perm:"write"`
+
+ ClientCancelDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"`
+
+ ClientCancelRetrievalDeal func(p0 context.Context, p1 retrievalmarket.DealID) error `perm:"write"`
+
+ ClientDataTransferUpdates func(p0 context.Context) (<-chan api.DataTransferChannel, error) `perm:"write"`
+
+ ClientDealPieceCID func(p0 context.Context, p1 cid.Cid) (api.DataCIDSize, error) `perm:"read"`
+
+ ClientDealSize func(p0 context.Context, p1 cid.Cid) (api.DataSize, error) `perm:"read"`
+
+ ClientFindData func(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]api.QueryOffer, error) `perm:"read"`
+
+ ClientGenCar func(p0 context.Context, p1 api.FileRef, p2 string) error `perm:"write"`
+
+ ClientGetDealInfo func(p0 context.Context, p1 cid.Cid) (*api.DealInfo, error) `perm:"read"`
+
+ ClientGetDealStatus func(p0 context.Context, p1 uint64) (string, error) `perm:"read"`
+
+ ClientGetDealUpdates func(p0 context.Context) (<-chan api.DealInfo, error) `perm:"write"`
+
+ ClientGetRetrievalUpdates func(p0 context.Context) (<-chan api.RetrievalInfo, error) `perm:"write"`
+
+ ClientHasLocal func(p0 context.Context, p1 cid.Cid) (bool, error) `perm:"write"`
+
+ ClientImport func(p0 context.Context, p1 api.FileRef) (*api.ImportRes, error) `perm:"admin"`
+
+ ClientListDataTransfers func(p0 context.Context) ([]api.DataTransferChannel, error) `perm:"write"`
+
+ ClientListDeals func(p0 context.Context) ([]api.DealInfo, error) `perm:"write"`
+
+ ClientListImports func(p0 context.Context) ([]api.Import, error) `perm:"write"`
+
+ ClientListRetrievals func(p0 context.Context) ([]api.RetrievalInfo, error) `perm:"write"`
+
+ ClientMinerQueryOffer func(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (api.QueryOffer, error) `perm:"read"`
+
+ ClientQueryAsk func(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) `perm:"read"`
+
+ ClientRemoveImport func(p0 context.Context, p1 multistore.StoreID) error `perm:"admin"`
+
+ ClientRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"`
+
+ ClientRetrieve func(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) error `perm:"admin"`
+
+ ClientRetrieveTryRestartInsufficientFunds func(p0 context.Context, p1 address.Address) error `perm:"write"`
+
+ ClientRetrieveWithEvents func(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"`
+
+ ClientStartDeal func(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) `perm:"admin"`
+
+ ClientStatelessDeal func(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) `perm:"write"`
+
+ CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"`
+
+ GasEstimateFeeCap func(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"`
+
+ GasEstimateGasLimit func(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (int64, error) `perm:"read"`
+
+ GasEstimateGasPremium func(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) `perm:"read"`
+
+ GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) `perm:"read"`
+
+ MarketAddBalance func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) `perm:"sign"`
+
+ MarketGetReserved func(p0 context.Context, p1 address.Address) (types.BigInt, error) `perm:"sign"`
+
+ MarketReleaseFunds func(p0 context.Context, p1 address.Address, p2 types.BigInt) error `perm:"sign"`
+
+ MarketReserveFunds func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) `perm:"sign"`
+
+ MarketWithdraw func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) `perm:"sign"`
+
+ MinerCreateBlock func(p0 context.Context, p1 *api.BlockTemplate) (*types.BlockMsg, error) `perm:"write"`
+
+ MinerGetBaseInfo func(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*api.MiningBaseInfo, error) `perm:"read"`
+
+ MpoolBatchPush func(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) `perm:"write"`
+
+ MpoolBatchPushMessage func(p0 context.Context, p1 []*types.Message, p2 *api.MessageSendSpec) ([]*types.SignedMessage, error) `perm:"sign"`
+
+ MpoolBatchPushUntrusted func(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) `perm:"write"`
+
+ MpoolClear func(p0 context.Context, p1 bool) error `perm:"write"`
+
+ MpoolGetConfig func(p0 context.Context) (*types.MpoolConfig, error) `perm:"read"`
+
+ MpoolGetNonce func(p0 context.Context, p1 address.Address) (uint64, error) `perm:"read"`
+
+ MpoolPending func(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) `perm:"read"`
+
+ MpoolPush func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) `perm:"write"`
+
+ MpoolPushMessage func(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec) (*types.SignedMessage, error) `perm:"sign"`
+
+ MpoolPushUntrusted func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) `perm:"write"`
+
+ MpoolSelect func(p0 context.Context, p1 types.TipSetKey, p2 float64) ([]*types.SignedMessage, error) `perm:"read"`
+
+ MpoolSetConfig func(p0 context.Context, p1 *types.MpoolConfig) error `perm:"admin"`
+
+ MpoolSub func(p0 context.Context) (<-chan api.MpoolUpdate, error) `perm:"read"`
+
+ MsigAddApprove func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (cid.Cid, error) `perm:"sign"`
+
+ MsigAddCancel func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (cid.Cid, error) `perm:"sign"`
+
+ MsigAddPropose func(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) `perm:"sign"`
+
+ MsigApprove func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (cid.Cid, error) `perm:"sign"`
+
+ MsigApproveTxnHash func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (cid.Cid, error) `perm:"sign"`
+
+ MsigCancel func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (cid.Cid, error) `perm:"sign"`
+
+ MsigCreate func(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (cid.Cid, error) `perm:"sign"`
+
+ MsigGetAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) `perm:"read"`
+
+ MsigGetPending func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*api.MsigTransaction, error) `perm:"read"`
+
+ MsigGetVested func(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"`
+
+ MsigGetVestingSchedule func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MsigVesting, error) `perm:"read"`
+
+ MsigPropose func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (cid.Cid, error) `perm:"sign"`
+
+ MsigRemoveSigner func(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) `perm:"sign"`
+
+ MsigSwapApprove func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (cid.Cid, error) `perm:"sign"`
+
+ MsigSwapCancel func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (cid.Cid, error) `perm:"sign"`
+
+ MsigSwapPropose func(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (cid.Cid, error) `perm:"sign"`
+
+ PaychAllocateLane func(p0 context.Context, p1 address.Address) (uint64, error) `perm:"sign"`
+
+ PaychAvailableFunds func(p0 context.Context, p1 address.Address) (*api.ChannelAvailableFunds, error) `perm:"sign"`
+
+ PaychAvailableFundsByFromTo func(p0 context.Context, p1 address.Address, p2 address.Address) (*api.ChannelAvailableFunds, error) `perm:"sign"`
+
+ PaychCollect func(p0 context.Context, p1 address.Address) (cid.Cid, error) `perm:"sign"`
+
+ PaychGet func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (*api.ChannelInfo, error) `perm:"sign"`
+
+ PaychGetWaitReady func(p0 context.Context, p1 cid.Cid) (address.Address, error) `perm:"sign"`
+
+ PaychList func(p0 context.Context) ([]address.Address, error) `perm:"read"`
+
+ PaychNewPayment func(p0 context.Context, p1 address.Address, p2 address.Address, p3 []api.VoucherSpec) (*api.PaymentInfo, error) `perm:"sign"`
+
+ PaychSettle func(p0 context.Context, p1 address.Address) (cid.Cid, error) `perm:"sign"`
+
+ PaychStatus func(p0 context.Context, p1 address.Address) (*api.PaychStatus, error) `perm:"read"`
+
+ PaychVoucherAdd func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 types.BigInt) (types.BigInt, error) `perm:"write"`
+
+ PaychVoucherCheckSpendable func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (bool, error) `perm:"read"`
+
+ PaychVoucherCheckValid func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher) error `perm:"read"`
+
+ PaychVoucherCreate func(p0 context.Context, p1 address.Address, p2 types.BigInt, p3 uint64) (*api.VoucherCreateResult, error) `perm:"sign"`
+
+ PaychVoucherList func(p0 context.Context, p1 address.Address) ([]*paych.SignedVoucher, error) `perm:"write"`
+
+ PaychVoucherSubmit func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (cid.Cid, error) `perm:"sign"`
+
+ StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `perm:"read"`
+
+ StateAllMinerFaults func(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) ([]*api.Fault, error) `perm:"read"`
+
+ StateCall func(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*api.InvocResult, error) `perm:"read"`
+
+ StateChangedActors func(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (map[string]types.Actor, error) `perm:"read"`
+
+ StateCirculatingSupply func(p0 context.Context, p1 types.TipSetKey) (abi.TokenAmount, error) `perm:"read"`
+
+ StateCompute func(p0 context.Context, p1 abi.ChainEpoch, p2 []*types.Message, p3 types.TipSetKey) (*api.ComputeStateOutput, error) `perm:"read"`
+
+ StateDealProviderCollateralBounds func(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (api.DealCollateralBounds, error) `perm:"read"`
+
+ StateDecodeParams func(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) `perm:"read"`
+
+ StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) `perm:"read"`
+
+ StateGetReceipt func(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) `perm:"read"`
+
+ StateListActors func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) `perm:"read"`
+
+ StateListMessages func(p0 context.Context, p1 *api.MessageMatch, p2 types.TipSetKey, p3 abi.ChainEpoch) ([]cid.Cid, error) `perm:"read"`
+
+ StateListMiners func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) `perm:"read"`
+
+ StateLookupID func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `perm:"read"`
+
+ StateMarketBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MarketBalance, error) `perm:"read"`
+
+ StateMarketDeals func(p0 context.Context, p1 types.TipSetKey) (map[string]api.MarketDeal, error) `perm:"read"`
+
+ StateMarketParticipants func(p0 context.Context, p1 types.TipSetKey) (map[string]api.MarketBalance, error) `perm:"read"`
+
+ StateMarketStorageDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*api.MarketDeal, error) `perm:"read"`
+
+ StateMinerActiveSectors func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) `perm:"read"`
+
+ StateMinerAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) `perm:"read"`
+
+ StateMinerDeadlines func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]api.Deadline, error) `perm:"read"`
+
+ StateMinerFaults func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) `perm:"read"`
+
+ StateMinerInfo func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) `perm:"read"`
+
+ StateMinerInitialPledgeCollateral func(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"`
+
+ StateMinerPartitions func(p0 context.Context, p1 address.Address, p2 uint64, p3 types.TipSetKey) ([]api.Partition, error) `perm:"read"`
+
+ StateMinerPower func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*api.MinerPower, error) `perm:"read"`
+
+ StateMinerPreCommitDepositForPower func(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"`
+
+ StateMinerProvingDeadline func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) `perm:"read"`
+
+ StateMinerRecoveries func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) `perm:"read"`
+
+ StateMinerSectorAllocated func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (bool, error) `perm:"read"`
+
+ StateMinerSectorCount func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MinerSectors, error) `perm:"read"`
+
+ StateMinerSectors func(p0 context.Context, p1 address.Address, p2 *bitfield.BitField, p3 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) `perm:"read"`
+
+ StateNetworkName func(p0 context.Context) (dtypes.NetworkName, error) `perm:"read"`
+
+ StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) `perm:"read"`
+
+ StateReadState func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*api.ActorState, error) `perm:"read"`
+
+ StateReplay func(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*api.InvocResult, error) `perm:"read"`
+
+ StateSearchMsg func(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) `perm:"read"`
+
+ StateSearchMsgLimited func(p0 context.Context, p1 cid.Cid, p2 abi.ChainEpoch) (*api.MsgLookup, error) `perm:"read"`
+
+ StateSectorExpiration func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorExpiration, error) `perm:"read"`
+
+ StateSectorGetInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) `perm:"read"`
+
+ StateSectorPartition func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorLocation, error) `perm:"read"`
+
+ StateSectorPreCommitInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) `perm:"read"`
+
+ StateVMCirculatingSupplyInternal func(p0 context.Context, p1 types.TipSetKey) (api.CirculatingSupply, error) `perm:"read"`
+
+ StateVerifiedClientStatus func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) `perm:"read"`
+
+ StateVerifiedRegistryRootKey func(p0 context.Context, p1 types.TipSetKey) (address.Address, error) `perm:"read"`
+
+ StateVerifierStatus func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) `perm:"read"`
+
+ StateWaitMsg func(p0 context.Context, p1 cid.Cid, p2 uint64) (*api.MsgLookup, error) `perm:"read"`
+
+ StateWaitMsgLimited func(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch) (*api.MsgLookup, error) `perm:"read"`
+
+ SyncCheckBad func(p0 context.Context, p1 cid.Cid) (string, error) `perm:"read"`
+
+ SyncCheckpoint func(p0 context.Context, p1 types.TipSetKey) error `perm:"admin"`
+
+ SyncIncomingBlocks func(p0 context.Context) (<-chan *types.BlockHeader, error) `perm:"read"`
+
+ SyncMarkBad func(p0 context.Context, p1 cid.Cid) error `perm:"admin"`
+
+ SyncState func(p0 context.Context) (*api.SyncState, error) `perm:"read"`
+
+ SyncSubmitBlock func(p0 context.Context, p1 *types.BlockMsg) error `perm:"write"`
+
+ SyncUnmarkAllBad func(p0 context.Context) error `perm:"admin"`
+
+ SyncUnmarkBad func(p0 context.Context, p1 cid.Cid) error `perm:"admin"`
+
+ SyncValidateTipset func(p0 context.Context, p1 types.TipSetKey) (bool, error) `perm:"read"`
+
+ WalletBalance func(p0 context.Context, p1 address.Address) (types.BigInt, error) `perm:"read"`
+
+ WalletDefaultAddress func(p0 context.Context) (address.Address, error) `perm:"write"`
+
+ WalletDelete func(p0 context.Context, p1 address.Address) error `perm:"admin"`
+
+ WalletExport func(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) `perm:"admin"`
+
+ WalletHas func(p0 context.Context, p1 address.Address) (bool, error) `perm:"write"`
+
+ WalletImport func(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) `perm:"admin"`
+
+ WalletList func(p0 context.Context) ([]address.Address, error) `perm:"write"`
+
+ WalletNew func(p0 context.Context, p1 types.KeyType) (address.Address, error) `perm:"write"`
+
+ WalletSetDefault func(p0 context.Context, p1 address.Address) error `perm:"write"`
+
+ WalletSign func(p0 context.Context, p1 address.Address, p2 []byte) (*crypto.Signature, error) `perm:"sign"`
+
+ WalletSignMessage func(p0 context.Context, p1 address.Address, p2 *types.Message) (*types.SignedMessage, error) `perm:"sign"`
+
+ WalletValidateAddress func(p0 context.Context, p1 string) (address.Address, error) `perm:"read"`
+
+ WalletVerify func(p0 context.Context, p1 address.Address, p2 []byte, p3 *crypto.Signature) (bool, error) `perm:"read"`
+ }
+}
+
+type FullNodeStub struct {
+ CommonStub
+
+ NetStub
+}
+
+type GatewayStruct struct {
+ Internal struct {
+ ChainGetBlockMessages func(p0 context.Context, p1 cid.Cid) (*api.BlockMessages, error) ``
+
+ ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) ``
+
+ ChainGetTipSet func(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) ``
+
+ ChainGetTipSetByHeight func(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) ``
+
+ ChainHasObj func(p0 context.Context, p1 cid.Cid) (bool, error) ``
+
+ ChainHead func(p0 context.Context) (*types.TipSet, error) ``
+
+ ChainNotify func(p0 context.Context) (<-chan []*api.HeadChange, error) ``
+
+ ChainReadObj func(p0 context.Context, p1 cid.Cid) ([]byte, error) ``
+
+ GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) ``
+
+ MpoolPush func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) ``
+
+ MsigGetAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) ``
+
+ MsigGetPending func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*api.MsigTransaction, error) ``
+
+ MsigGetVested func(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) ``
+
+ StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) ``
+
+ StateDealProviderCollateralBounds func(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (api.DealCollateralBounds, error) ``
+
+ StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) ``
+
+ StateGetReceipt func(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) ``
+
+ StateListMiners func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) ``
+
+ StateLookupID func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) ``
+
+ StateMarketBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MarketBalance, error) ``
+
+ StateMarketStorageDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*api.MarketDeal, error) ``
+
+ StateMinerInfo func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) ``
+
+ StateMinerPower func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*api.MinerPower, error) ``
+
+ StateMinerProvingDeadline func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) ``
+
+ StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (network.Version, error) ``
+
+ StateSearchMsg func(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) ``
+
+ StateSectorGetInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) ``
+
+ StateVerifiedClientStatus func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) ``
+
+ StateWaitMsg func(p0 context.Context, p1 cid.Cid, p2 uint64) (*api.MsgLookup, error) ``
+
+ Version func(p0 context.Context) (api.APIVersion, error) ``
+
+ WalletBalance func(p0 context.Context, p1 address.Address) (types.BigInt, error) ``
+ }
+}
+
+type GatewayStub struct {
+}
+
+func (s *FullNodeStruct) BeaconGetEntry(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) {
+ return s.Internal.BeaconGetEntry(p0, p1)
+}
+
+func (s *FullNodeStub) BeaconGetEntry(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainDeleteObj(p0 context.Context, p1 cid.Cid) error {
+ return s.Internal.ChainDeleteObj(p0, p1)
+}
+
+func (s *FullNodeStub) ChainDeleteObj(p0 context.Context, p1 cid.Cid) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainExport(p0 context.Context, p1 abi.ChainEpoch, p2 bool, p3 types.TipSetKey) (<-chan []byte, error) {
+ return s.Internal.ChainExport(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) ChainExport(p0 context.Context, p1 abi.ChainEpoch, p2 bool, p3 types.TipSetKey) (<-chan []byte, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainGetBlock(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) {
+ return s.Internal.ChainGetBlock(p0, p1)
+}
+
+func (s *FullNodeStub) ChainGetBlock(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*api.BlockMessages, error) {
+ return s.Internal.ChainGetBlockMessages(p0, p1)
+}
+
+func (s *FullNodeStub) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*api.BlockMessages, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainGetGenesis(p0 context.Context) (*types.TipSet, error) {
+ return s.Internal.ChainGetGenesis(p0)
+}
+
+func (s *FullNodeStub) ChainGetGenesis(p0 context.Context) (*types.TipSet, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) {
+ return s.Internal.ChainGetMessage(p0, p1)
+}
+
+func (s *FullNodeStub) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainGetMessagesInTipset(p0 context.Context, p1 types.TipSetKey) ([]api.Message, error) {
+ return s.Internal.ChainGetMessagesInTipset(p0, p1)
+}
+
+func (s *FullNodeStub) ChainGetMessagesInTipset(p0 context.Context, p1 types.TipSetKey) ([]api.Message, error) {
+ return *new([]api.Message), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainGetNode(p0 context.Context, p1 string) (*api.IpldObject, error) {
+ return s.Internal.ChainGetNode(p0, p1)
+}
+
+func (s *FullNodeStub) ChainGetNode(p0 context.Context, p1 string) (*api.IpldObject, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainGetParentMessages(p0 context.Context, p1 cid.Cid) ([]api.Message, error) {
+ return s.Internal.ChainGetParentMessages(p0, p1)
+}
+
+func (s *FullNodeStub) ChainGetParentMessages(p0 context.Context, p1 cid.Cid) ([]api.Message, error) {
+ return *new([]api.Message), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainGetParentReceipts(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) {
+ return s.Internal.ChainGetParentReceipts(p0, p1)
+}
+
+func (s *FullNodeStub) ChainGetParentReceipts(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) {
+ return *new([]*types.MessageReceipt), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainGetPath(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*api.HeadChange, error) {
+ return s.Internal.ChainGetPath(p0, p1, p2)
+}
+
+func (s *FullNodeStub) ChainGetPath(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*api.HeadChange, error) {
+ return *new([]*api.HeadChange), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainGetRandomnessFromBeacon(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) {
+ return s.Internal.ChainGetRandomnessFromBeacon(p0, p1, p2, p3, p4)
+}
+
+func (s *FullNodeStub) ChainGetRandomnessFromBeacon(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) {
+ return *new(abi.Randomness), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainGetRandomnessFromTickets(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) {
+ return s.Internal.ChainGetRandomnessFromTickets(p0, p1, p2, p3, p4)
+}
+
+func (s *FullNodeStub) ChainGetRandomnessFromTickets(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) {
+ return *new(abi.Randomness), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainGetTipSet(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) {
+ return s.Internal.ChainGetTipSet(p0, p1)
+}
+
+func (s *FullNodeStub) ChainGetTipSet(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainGetTipSetByHeight(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) {
+ return s.Internal.ChainGetTipSetByHeight(p0, p1, p2)
+}
+
+func (s *FullNodeStub) ChainGetTipSetByHeight(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) {
+ return s.Internal.ChainHasObj(p0, p1)
+}
+
+func (s *FullNodeStub) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) {
+ return false, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainHead(p0 context.Context) (*types.TipSet, error) {
+ return s.Internal.ChainHead(p0)
+}
+
+func (s *FullNodeStub) ChainHead(p0 context.Context) (*types.TipSet, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainNotify(p0 context.Context) (<-chan []*api.HeadChange, error) {
+ return s.Internal.ChainNotify(p0)
+}
+
+func (s *FullNodeStub) ChainNotify(p0 context.Context) (<-chan []*api.HeadChange, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) {
+ return s.Internal.ChainReadObj(p0, p1)
+}
+
+func (s *FullNodeStub) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) {
+ return *new([]byte), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainSetHead(p0 context.Context, p1 types.TipSetKey) error {
+ return s.Internal.ChainSetHead(p0, p1)
+}
+
+func (s *FullNodeStub) ChainSetHead(p0 context.Context, p1 types.TipSetKey) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainStatObj(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (api.ObjStat, error) {
+ return s.Internal.ChainStatObj(p0, p1, p2)
+}
+
+func (s *FullNodeStub) ChainStatObj(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (api.ObjStat, error) {
+ return *new(api.ObjStat), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ChainTipSetWeight(p0 context.Context, p1 types.TipSetKey) (types.BigInt, error) {
+ return s.Internal.ChainTipSetWeight(p0, p1)
+}
+
+func (s *FullNodeStub) ChainTipSetWeight(p0 context.Context, p1 types.TipSetKey) (types.BigInt, error) {
+ return *new(types.BigInt), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientCalcCommP(p0 context.Context, p1 string) (*api.CommPRet, error) {
+ return s.Internal.ClientCalcCommP(p0, p1)
+}
+
+func (s *FullNodeStub) ClientCalcCommP(p0 context.Context, p1 string) (*api.CommPRet, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {
+ return s.Internal.ClientCancelDataTransfer(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) ClientCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientCancelRetrievalDeal(p0 context.Context, p1 retrievalmarket.DealID) error {
+ return s.Internal.ClientCancelRetrievalDeal(p0, p1)
+}
+
+func (s *FullNodeStub) ClientCancelRetrievalDeal(p0 context.Context, p1 retrievalmarket.DealID) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientDataTransferUpdates(p0 context.Context) (<-chan api.DataTransferChannel, error) {
+ return s.Internal.ClientDataTransferUpdates(p0)
+}
+
+func (s *FullNodeStub) ClientDataTransferUpdates(p0 context.Context) (<-chan api.DataTransferChannel, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientDealPieceCID(p0 context.Context, p1 cid.Cid) (api.DataCIDSize, error) {
+ return s.Internal.ClientDealPieceCID(p0, p1)
+}
+
+func (s *FullNodeStub) ClientDealPieceCID(p0 context.Context, p1 cid.Cid) (api.DataCIDSize, error) {
+ return *new(api.DataCIDSize), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientDealSize(p0 context.Context, p1 cid.Cid) (api.DataSize, error) {
+ return s.Internal.ClientDealSize(p0, p1)
+}
+
+func (s *FullNodeStub) ClientDealSize(p0 context.Context, p1 cid.Cid) (api.DataSize, error) {
+ return *new(api.DataSize), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientFindData(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]api.QueryOffer, error) {
+ return s.Internal.ClientFindData(p0, p1, p2)
+}
+
+func (s *FullNodeStub) ClientFindData(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]api.QueryOffer, error) {
+ return *new([]api.QueryOffer), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientGenCar(p0 context.Context, p1 api.FileRef, p2 string) error {
+ return s.Internal.ClientGenCar(p0, p1, p2)
+}
+
+func (s *FullNodeStub) ClientGenCar(p0 context.Context, p1 api.FileRef, p2 string) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientGetDealInfo(p0 context.Context, p1 cid.Cid) (*api.DealInfo, error) {
+ return s.Internal.ClientGetDealInfo(p0, p1)
+}
+
+func (s *FullNodeStub) ClientGetDealInfo(p0 context.Context, p1 cid.Cid) (*api.DealInfo, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientGetDealStatus(p0 context.Context, p1 uint64) (string, error) {
+ return s.Internal.ClientGetDealStatus(p0, p1)
+}
+
+func (s *FullNodeStub) ClientGetDealStatus(p0 context.Context, p1 uint64) (string, error) {
+ return "", xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientGetDealUpdates(p0 context.Context) (<-chan api.DealInfo, error) {
+ return s.Internal.ClientGetDealUpdates(p0)
+}
+
+func (s *FullNodeStub) ClientGetDealUpdates(p0 context.Context) (<-chan api.DealInfo, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientGetRetrievalUpdates(p0 context.Context) (<-chan api.RetrievalInfo, error) {
+ return s.Internal.ClientGetRetrievalUpdates(p0)
+}
+
+func (s *FullNodeStub) ClientGetRetrievalUpdates(p0 context.Context) (<-chan api.RetrievalInfo, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientHasLocal(p0 context.Context, p1 cid.Cid) (bool, error) {
+ return s.Internal.ClientHasLocal(p0, p1)
+}
+
+func (s *FullNodeStub) ClientHasLocal(p0 context.Context, p1 cid.Cid) (bool, error) {
+ return false, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientImport(p0 context.Context, p1 api.FileRef) (*api.ImportRes, error) {
+ return s.Internal.ClientImport(p0, p1)
+}
+
+func (s *FullNodeStub) ClientImport(p0 context.Context, p1 api.FileRef) (*api.ImportRes, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientListDataTransfers(p0 context.Context) ([]api.DataTransferChannel, error) {
+ return s.Internal.ClientListDataTransfers(p0)
+}
+
+func (s *FullNodeStub) ClientListDataTransfers(p0 context.Context) ([]api.DataTransferChannel, error) {
+ return *new([]api.DataTransferChannel), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientListDeals(p0 context.Context) ([]api.DealInfo, error) {
+ return s.Internal.ClientListDeals(p0)
+}
+
+func (s *FullNodeStub) ClientListDeals(p0 context.Context) ([]api.DealInfo, error) {
+ return *new([]api.DealInfo), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientListImports(p0 context.Context) ([]api.Import, error) {
+ return s.Internal.ClientListImports(p0)
+}
+
+func (s *FullNodeStub) ClientListImports(p0 context.Context) ([]api.Import, error) {
+ return *new([]api.Import), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientListRetrievals(p0 context.Context) ([]api.RetrievalInfo, error) {
+ return s.Internal.ClientListRetrievals(p0)
+}
+
+func (s *FullNodeStub) ClientListRetrievals(p0 context.Context) ([]api.RetrievalInfo, error) {
+ return *new([]api.RetrievalInfo), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientMinerQueryOffer(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (api.QueryOffer, error) {
+ return s.Internal.ClientMinerQueryOffer(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) ClientMinerQueryOffer(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (api.QueryOffer, error) {
+ return *new(api.QueryOffer), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientQueryAsk(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) {
+ return s.Internal.ClientQueryAsk(p0, p1, p2)
+}
+
+func (s *FullNodeStub) ClientQueryAsk(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientRemoveImport(p0 context.Context, p1 multistore.StoreID) error {
+ return s.Internal.ClientRemoveImport(p0, p1)
+}
+
+func (s *FullNodeStub) ClientRemoveImport(p0 context.Context, p1 multistore.StoreID) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {
+ return s.Internal.ClientRestartDataTransfer(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) ClientRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) error {
+ return s.Internal.ClientRetrieve(p0, p1, p2)
+}
+
+func (s *FullNodeStub) ClientRetrieve(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientRetrieveTryRestartInsufficientFunds(p0 context.Context, p1 address.Address) error {
+ return s.Internal.ClientRetrieveTryRestartInsufficientFunds(p0, p1)
+}
+
+func (s *FullNodeStub) ClientRetrieveTryRestartInsufficientFunds(p0 context.Context, p1 address.Address) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientRetrieveWithEvents(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
+ return s.Internal.ClientRetrieveWithEvents(p0, p1, p2)
+}
+
+func (s *FullNodeStub) ClientRetrieveWithEvents(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientStartDeal(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) {
+ return s.Internal.ClientStartDeal(p0, p1)
+}
+
+func (s *FullNodeStub) ClientStartDeal(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) ClientStatelessDeal(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) {
+ return s.Internal.ClientStatelessDeal(p0, p1)
+}
+
+func (s *FullNodeStub) ClientStatelessDeal(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) CreateBackup(p0 context.Context, p1 string) error {
+ return s.Internal.CreateBackup(p0, p1)
+}
+
+func (s *FullNodeStub) CreateBackup(p0 context.Context, p1 string) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) GasEstimateFeeCap(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (types.BigInt, error) {
+ return s.Internal.GasEstimateFeeCap(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) GasEstimateFeeCap(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (types.BigInt, error) {
+ return *new(types.BigInt), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) GasEstimateGasLimit(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (int64, error) {
+ return s.Internal.GasEstimateGasLimit(p0, p1, p2)
+}
+
+func (s *FullNodeStub) GasEstimateGasLimit(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (int64, error) {
+ return 0, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) GasEstimateGasPremium(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) {
+ return s.Internal.GasEstimateGasPremium(p0, p1, p2, p3, p4)
+}
+
+func (s *FullNodeStub) GasEstimateGasPremium(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) {
+ return *new(types.BigInt), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) {
+ return s.Internal.GasEstimateMessageGas(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MarketAddBalance(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) {
+ return s.Internal.MarketAddBalance(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) MarketAddBalance(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) {
+ return *new(cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MarketGetReserved(p0 context.Context, p1 address.Address) (types.BigInt, error) {
+ return s.Internal.MarketGetReserved(p0, p1)
+}
+
+func (s *FullNodeStub) MarketGetReserved(p0 context.Context, p1 address.Address) (types.BigInt, error) {
+ return *new(types.BigInt), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MarketReleaseFunds(p0 context.Context, p1 address.Address, p2 types.BigInt) error {
+ return s.Internal.MarketReleaseFunds(p0, p1, p2)
+}
+
+func (s *FullNodeStub) MarketReleaseFunds(p0 context.Context, p1 address.Address, p2 types.BigInt) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MarketReserveFunds(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) {
+ return s.Internal.MarketReserveFunds(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) MarketReserveFunds(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) {
+ return *new(cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MarketWithdraw(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) {
+ return s.Internal.MarketWithdraw(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) MarketWithdraw(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) {
+ return *new(cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MinerCreateBlock(p0 context.Context, p1 *api.BlockTemplate) (*types.BlockMsg, error) {
+ return s.Internal.MinerCreateBlock(p0, p1)
+}
+
+func (s *FullNodeStub) MinerCreateBlock(p0 context.Context, p1 *api.BlockTemplate) (*types.BlockMsg, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MinerGetBaseInfo(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*api.MiningBaseInfo, error) {
+ return s.Internal.MinerGetBaseInfo(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) MinerGetBaseInfo(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*api.MiningBaseInfo, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MpoolBatchPush(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) {
+ return s.Internal.MpoolBatchPush(p0, p1)
+}
+
+func (s *FullNodeStub) MpoolBatchPush(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) {
+ return *new([]cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MpoolBatchPushMessage(p0 context.Context, p1 []*types.Message, p2 *api.MessageSendSpec) ([]*types.SignedMessage, error) {
+ return s.Internal.MpoolBatchPushMessage(p0, p1, p2)
+}
+
+func (s *FullNodeStub) MpoolBatchPushMessage(p0 context.Context, p1 []*types.Message, p2 *api.MessageSendSpec) ([]*types.SignedMessage, error) {
+ return *new([]*types.SignedMessage), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MpoolBatchPushUntrusted(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) {
+ return s.Internal.MpoolBatchPushUntrusted(p0, p1)
+}
+
+func (s *FullNodeStub) MpoolBatchPushUntrusted(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) {
+ return *new([]cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MpoolClear(p0 context.Context, p1 bool) error {
+ return s.Internal.MpoolClear(p0, p1)
+}
+
+func (s *FullNodeStub) MpoolClear(p0 context.Context, p1 bool) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MpoolGetConfig(p0 context.Context) (*types.MpoolConfig, error) {
+ return s.Internal.MpoolGetConfig(p0)
+}
+
+func (s *FullNodeStub) MpoolGetConfig(p0 context.Context) (*types.MpoolConfig, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MpoolGetNonce(p0 context.Context, p1 address.Address) (uint64, error) {
+ return s.Internal.MpoolGetNonce(p0, p1)
+}
+
+func (s *FullNodeStub) MpoolGetNonce(p0 context.Context, p1 address.Address) (uint64, error) {
+ return 0, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MpoolPending(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) {
+ return s.Internal.MpoolPending(p0, p1)
+}
+
+func (s *FullNodeStub) MpoolPending(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) {
+ return *new([]*types.SignedMessage), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) {
+ return s.Internal.MpoolPush(p0, p1)
+}
+
+func (s *FullNodeStub) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) {
+ return *new(cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MpoolPushMessage(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec) (*types.SignedMessage, error) {
+ return s.Internal.MpoolPushMessage(p0, p1, p2)
+}
+
+func (s *FullNodeStub) MpoolPushMessage(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec) (*types.SignedMessage, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MpoolPushUntrusted(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) {
+ return s.Internal.MpoolPushUntrusted(p0, p1)
+}
+
+func (s *FullNodeStub) MpoolPushUntrusted(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) {
+ return *new(cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MpoolSelect(p0 context.Context, p1 types.TipSetKey, p2 float64) ([]*types.SignedMessage, error) {
+ return s.Internal.MpoolSelect(p0, p1, p2)
+}
+
+func (s *FullNodeStub) MpoolSelect(p0 context.Context, p1 types.TipSetKey, p2 float64) ([]*types.SignedMessage, error) {
+ return *new([]*types.SignedMessage), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MpoolSetConfig(p0 context.Context, p1 *types.MpoolConfig) error {
+ return s.Internal.MpoolSetConfig(p0, p1)
+}
+
+func (s *FullNodeStub) MpoolSetConfig(p0 context.Context, p1 *types.MpoolConfig) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MpoolSub(p0 context.Context) (<-chan api.MpoolUpdate, error) {
+ return s.Internal.MpoolSub(p0)
+}
+
+func (s *FullNodeStub) MpoolSub(p0 context.Context) (<-chan api.MpoolUpdate, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigAddApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (cid.Cid, error) {
+ return s.Internal.MsigAddApprove(p0, p1, p2, p3, p4, p5, p6)
+}
+
+func (s *FullNodeStub) MsigAddApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (cid.Cid, error) {
+ return *new(cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigAddCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (cid.Cid, error) {
+ return s.Internal.MsigAddCancel(p0, p1, p2, p3, p4, p5)
+}
+
+func (s *FullNodeStub) MsigAddCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (cid.Cid, error) {
+ return *new(cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigAddPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) {
+ return s.Internal.MsigAddPropose(p0, p1, p2, p3, p4)
+}
+
+func (s *FullNodeStub) MsigAddPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) {
+ return *new(cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigApprove(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (cid.Cid, error) {
+ return s.Internal.MsigApprove(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) MsigApprove(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (cid.Cid, error) {
+ return *new(cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigApproveTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (cid.Cid, error) {
+ return s.Internal.MsigApproveTxnHash(p0, p1, p2, p3, p4, p5, p6, p7, p8)
+}
+
+func (s *FullNodeStub) MsigApproveTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (cid.Cid, error) {
+ return *new(cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (cid.Cid, error) {
+ return s.Internal.MsigCancel(p0, p1, p2, p3, p4, p5, p6, p7)
+}
+
+func (s *FullNodeStub) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (cid.Cid, error) {
+ return *new(cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigCreate(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (cid.Cid, error) {
+ return s.Internal.MsigCreate(p0, p1, p2, p3, p4, p5, p6)
+}
+
+func (s *FullNodeStub) MsigCreate(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (cid.Cid, error) {
+ return *new(cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigGetAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) {
+ return s.Internal.MsigGetAvailableBalance(p0, p1, p2)
+}
+
+func (s *FullNodeStub) MsigGetAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) {
+ return *new(types.BigInt), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigGetPending(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*api.MsigTransaction, error) {
+ return s.Internal.MsigGetPending(p0, p1, p2)
+}
+
+func (s *FullNodeStub) MsigGetPending(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*api.MsigTransaction, error) {
+ return *new([]*api.MsigTransaction), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigGetVested(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) {
+ return s.Internal.MsigGetVested(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) MsigGetVested(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) {
+ return *new(types.BigInt), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigGetVestingSchedule(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MsigVesting, error) {
+ return s.Internal.MsigGetVestingSchedule(p0, p1, p2)
+}
+
+func (s *FullNodeStub) MsigGetVestingSchedule(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MsigVesting, error) {
+ return *new(api.MsigVesting), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (cid.Cid, error) {
+ return s.Internal.MsigPropose(p0, p1, p2, p3, p4, p5, p6)
+}
+
+func (s *FullNodeStub) MsigPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (cid.Cid, error) {
+ return *new(cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigRemoveSigner(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) {
+ return s.Internal.MsigRemoveSigner(p0, p1, p2, p3, p4)
+}
+
+func (s *FullNodeStub) MsigRemoveSigner(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) {
+ return *new(cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigSwapApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (cid.Cid, error) {
+ return s.Internal.MsigSwapApprove(p0, p1, p2, p3, p4, p5, p6)
+}
+
+func (s *FullNodeStub) MsigSwapApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (cid.Cid, error) {
+ return *new(cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigSwapCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (cid.Cid, error) {
+ return s.Internal.MsigSwapCancel(p0, p1, p2, p3, p4, p5)
+}
+
+func (s *FullNodeStub) MsigSwapCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (cid.Cid, error) {
+ return *new(cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) MsigSwapPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (cid.Cid, error) {
+ return s.Internal.MsigSwapPropose(p0, p1, p2, p3, p4)
+}
+
+func (s *FullNodeStub) MsigSwapPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (cid.Cid, error) {
+ return *new(cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychAllocateLane(p0 context.Context, p1 address.Address) (uint64, error) {
+ return s.Internal.PaychAllocateLane(p0, p1)
+}
+
+func (s *FullNodeStub) PaychAllocateLane(p0 context.Context, p1 address.Address) (uint64, error) {
+ return 0, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychAvailableFunds(p0 context.Context, p1 address.Address) (*api.ChannelAvailableFunds, error) {
+ return s.Internal.PaychAvailableFunds(p0, p1)
+}
+
+func (s *FullNodeStub) PaychAvailableFunds(p0 context.Context, p1 address.Address) (*api.ChannelAvailableFunds, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychAvailableFundsByFromTo(p0 context.Context, p1 address.Address, p2 address.Address) (*api.ChannelAvailableFunds, error) {
+ return s.Internal.PaychAvailableFundsByFromTo(p0, p1, p2)
+}
+
+func (s *FullNodeStub) PaychAvailableFundsByFromTo(p0 context.Context, p1 address.Address, p2 address.Address) (*api.ChannelAvailableFunds, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychCollect(p0 context.Context, p1 address.Address) (cid.Cid, error) {
+ return s.Internal.PaychCollect(p0, p1)
+}
+
+func (s *FullNodeStub) PaychCollect(p0 context.Context, p1 address.Address) (cid.Cid, error) {
+ return *new(cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychGet(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (*api.ChannelInfo, error) {
+ return s.Internal.PaychGet(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) PaychGet(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (*api.ChannelInfo, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychGetWaitReady(p0 context.Context, p1 cid.Cid) (address.Address, error) {
+ return s.Internal.PaychGetWaitReady(p0, p1)
+}
+
+func (s *FullNodeStub) PaychGetWaitReady(p0 context.Context, p1 cid.Cid) (address.Address, error) {
+ return *new(address.Address), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychList(p0 context.Context) ([]address.Address, error) {
+ return s.Internal.PaychList(p0)
+}
+
+func (s *FullNodeStub) PaychList(p0 context.Context) ([]address.Address, error) {
+ return *new([]address.Address), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychNewPayment(p0 context.Context, p1 address.Address, p2 address.Address, p3 []api.VoucherSpec) (*api.PaymentInfo, error) {
+ return s.Internal.PaychNewPayment(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) PaychNewPayment(p0 context.Context, p1 address.Address, p2 address.Address, p3 []api.VoucherSpec) (*api.PaymentInfo, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychSettle(p0 context.Context, p1 address.Address) (cid.Cid, error) {
+ return s.Internal.PaychSettle(p0, p1)
+}
+
+func (s *FullNodeStub) PaychSettle(p0 context.Context, p1 address.Address) (cid.Cid, error) {
+ return *new(cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychStatus(p0 context.Context, p1 address.Address) (*api.PaychStatus, error) {
+ return s.Internal.PaychStatus(p0, p1)
+}
+
+func (s *FullNodeStub) PaychStatus(p0 context.Context, p1 address.Address) (*api.PaychStatus, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychVoucherAdd(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 types.BigInt) (types.BigInt, error) {
+ return s.Internal.PaychVoucherAdd(p0, p1, p2, p3, p4)
+}
+
+func (s *FullNodeStub) PaychVoucherAdd(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 types.BigInt) (types.BigInt, error) {
+ return *new(types.BigInt), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychVoucherCheckSpendable(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (bool, error) {
+ return s.Internal.PaychVoucherCheckSpendable(p0, p1, p2, p3, p4)
+}
+
+func (s *FullNodeStub) PaychVoucherCheckSpendable(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (bool, error) {
+ return false, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychVoucherCheckValid(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher) error {
+ return s.Internal.PaychVoucherCheckValid(p0, p1, p2)
+}
+
+func (s *FullNodeStub) PaychVoucherCheckValid(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychVoucherCreate(p0 context.Context, p1 address.Address, p2 types.BigInt, p3 uint64) (*api.VoucherCreateResult, error) {
+ return s.Internal.PaychVoucherCreate(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) PaychVoucherCreate(p0 context.Context, p1 address.Address, p2 types.BigInt, p3 uint64) (*api.VoucherCreateResult, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychVoucherList(p0 context.Context, p1 address.Address) ([]*paych.SignedVoucher, error) {
+ return s.Internal.PaychVoucherList(p0, p1)
+}
+
+func (s *FullNodeStub) PaychVoucherList(p0 context.Context, p1 address.Address) ([]*paych.SignedVoucher, error) {
+ return *new([]*paych.SignedVoucher), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) PaychVoucherSubmit(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (cid.Cid, error) {
+ return s.Internal.PaychVoucherSubmit(p0, p1, p2, p3, p4)
+}
+
+func (s *FullNodeStub) PaychVoucherSubmit(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (cid.Cid, error) {
+ return *new(cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) {
+ return s.Internal.StateAccountKey(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) {
+ return *new(address.Address), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateAllMinerFaults(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) ([]*api.Fault, error) {
+ return s.Internal.StateAllMinerFaults(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateAllMinerFaults(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) ([]*api.Fault, error) {
+ return *new([]*api.Fault), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateCall(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*api.InvocResult, error) {
+ return s.Internal.StateCall(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateCall(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*api.InvocResult, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateChangedActors(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (map[string]types.Actor, error) {
+ return s.Internal.StateChangedActors(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateChangedActors(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (map[string]types.Actor, error) {
+ return *new(map[string]types.Actor), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateCirculatingSupply(p0 context.Context, p1 types.TipSetKey) (abi.TokenAmount, error) {
+ return s.Internal.StateCirculatingSupply(p0, p1)
+}
+
+func (s *FullNodeStub) StateCirculatingSupply(p0 context.Context, p1 types.TipSetKey) (abi.TokenAmount, error) {
+ return *new(abi.TokenAmount), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateCompute(p0 context.Context, p1 abi.ChainEpoch, p2 []*types.Message, p3 types.TipSetKey) (*api.ComputeStateOutput, error) {
+ return s.Internal.StateCompute(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) StateCompute(p0 context.Context, p1 abi.ChainEpoch, p2 []*types.Message, p3 types.TipSetKey) (*api.ComputeStateOutput, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (api.DealCollateralBounds, error) {
+ return s.Internal.StateDealProviderCollateralBounds(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (api.DealCollateralBounds, error) {
+ return *new(api.DealCollateralBounds), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateDecodeParams(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) {
+ return s.Internal.StateDecodeParams(p0, p1, p2, p3, p4)
+}
+
+func (s *FullNodeStub) StateDecodeParams(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) {
+ return s.Internal.StateGetActor(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateGetReceipt(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) {
+ return s.Internal.StateGetReceipt(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateGetReceipt(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateListActors(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) {
+ return s.Internal.StateListActors(p0, p1)
+}
+
+func (s *FullNodeStub) StateListActors(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) {
+ return *new([]address.Address), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateListMessages(p0 context.Context, p1 *api.MessageMatch, p2 types.TipSetKey, p3 abi.ChainEpoch) ([]cid.Cid, error) {
+ return s.Internal.StateListMessages(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) StateListMessages(p0 context.Context, p1 *api.MessageMatch, p2 types.TipSetKey, p3 abi.ChainEpoch) ([]cid.Cid, error) {
+ return *new([]cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateListMiners(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) {
+ return s.Internal.StateListMiners(p0, p1)
+}
+
+func (s *FullNodeStub) StateListMiners(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) {
+ return *new([]address.Address), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateLookupID(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) {
+ return s.Internal.StateLookupID(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateLookupID(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) {
+ return *new(address.Address), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMarketBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MarketBalance, error) {
+ return s.Internal.StateMarketBalance(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateMarketBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MarketBalance, error) {
+ return *new(api.MarketBalance), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMarketDeals(p0 context.Context, p1 types.TipSetKey) (map[string]api.MarketDeal, error) {
+ return s.Internal.StateMarketDeals(p0, p1)
+}
+
+func (s *FullNodeStub) StateMarketDeals(p0 context.Context, p1 types.TipSetKey) (map[string]api.MarketDeal, error) {
+ return *new(map[string]api.MarketDeal), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMarketParticipants(p0 context.Context, p1 types.TipSetKey) (map[string]api.MarketBalance, error) {
+ return s.Internal.StateMarketParticipants(p0, p1)
+}
+
+func (s *FullNodeStub) StateMarketParticipants(p0 context.Context, p1 types.TipSetKey) (map[string]api.MarketBalance, error) {
+ return *new(map[string]api.MarketBalance), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMarketStorageDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*api.MarketDeal, error) {
+ return s.Internal.StateMarketStorageDeal(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateMarketStorageDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*api.MarketDeal, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMinerActiveSectors(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) {
+ return s.Internal.StateMinerActiveSectors(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateMinerActiveSectors(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) {
+ return *new([]*miner.SectorOnChainInfo), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMinerAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) {
+ return s.Internal.StateMinerAvailableBalance(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateMinerAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) {
+ return *new(types.BigInt), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMinerDeadlines(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]api.Deadline, error) {
+ return s.Internal.StateMinerDeadlines(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateMinerDeadlines(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]api.Deadline, error) {
+ return *new([]api.Deadline), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMinerFaults(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) {
+ return s.Internal.StateMinerFaults(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateMinerFaults(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) {
+ return *new(bitfield.BitField), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMinerInfo(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) {
+ return s.Internal.StateMinerInfo(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateMinerInfo(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) {
+ return *new(miner.MinerInfo), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMinerInitialPledgeCollateral(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) {
+ return s.Internal.StateMinerInitialPledgeCollateral(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) StateMinerInitialPledgeCollateral(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) {
+ return *new(types.BigInt), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMinerPartitions(p0 context.Context, p1 address.Address, p2 uint64, p3 types.TipSetKey) ([]api.Partition, error) {
+ return s.Internal.StateMinerPartitions(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) StateMinerPartitions(p0 context.Context, p1 address.Address, p2 uint64, p3 types.TipSetKey) ([]api.Partition, error) {
+ return *new([]api.Partition), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMinerPower(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*api.MinerPower, error) {
+ return s.Internal.StateMinerPower(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateMinerPower(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*api.MinerPower, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMinerPreCommitDepositForPower(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) {
+ return s.Internal.StateMinerPreCommitDepositForPower(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) StateMinerPreCommitDepositForPower(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) {
+ return *new(types.BigInt), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMinerProvingDeadline(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) {
+ return s.Internal.StateMinerProvingDeadline(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateMinerProvingDeadline(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMinerRecoveries(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) {
+ return s.Internal.StateMinerRecoveries(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateMinerRecoveries(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) {
+ return *new(bitfield.BitField), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMinerSectorAllocated(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (bool, error) {
+ return s.Internal.StateMinerSectorAllocated(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) StateMinerSectorAllocated(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (bool, error) {
+ return false, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMinerSectorCount(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MinerSectors, error) {
+ return s.Internal.StateMinerSectorCount(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateMinerSectorCount(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MinerSectors, error) {
+ return *new(api.MinerSectors), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateMinerSectors(p0 context.Context, p1 address.Address, p2 *bitfield.BitField, p3 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) {
+ return s.Internal.StateMinerSectors(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) StateMinerSectors(p0 context.Context, p1 address.Address, p2 *bitfield.BitField, p3 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) {
+ return *new([]*miner.SectorOnChainInfo), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateNetworkName(p0 context.Context) (dtypes.NetworkName, error) {
+ return s.Internal.StateNetworkName(p0)
+}
+
+func (s *FullNodeStub) StateNetworkName(p0 context.Context) (dtypes.NetworkName, error) {
+ return *new(dtypes.NetworkName), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) {
+ return s.Internal.StateNetworkVersion(p0, p1)
+}
+
+func (s *FullNodeStub) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) {
+ return *new(apitypes.NetworkVersion), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateReadState(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*api.ActorState, error) {
+ return s.Internal.StateReadState(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateReadState(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*api.ActorState, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateReplay(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*api.InvocResult, error) {
+ return s.Internal.StateReplay(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateReplay(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*api.InvocResult, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateSearchMsg(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) {
+ return s.Internal.StateSearchMsg(p0, p1)
+}
+
+func (s *FullNodeStub) StateSearchMsg(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateSearchMsgLimited(p0 context.Context, p1 cid.Cid, p2 abi.ChainEpoch) (*api.MsgLookup, error) {
+ return s.Internal.StateSearchMsgLimited(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateSearchMsgLimited(p0 context.Context, p1 cid.Cid, p2 abi.ChainEpoch) (*api.MsgLookup, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateSectorExpiration(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorExpiration, error) {
+ return s.Internal.StateSectorExpiration(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) StateSectorExpiration(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorExpiration, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateSectorGetInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) {
+ return s.Internal.StateSectorGetInfo(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) StateSectorGetInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateSectorPartition(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorLocation, error) {
+ return s.Internal.StateSectorPartition(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) StateSectorPartition(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorLocation, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateSectorPreCommitInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) {
+ return s.Internal.StateSectorPreCommitInfo(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) StateSectorPreCommitInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) {
+ return *new(miner.SectorPreCommitOnChainInfo), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateVMCirculatingSupplyInternal(p0 context.Context, p1 types.TipSetKey) (api.CirculatingSupply, error) {
+ return s.Internal.StateVMCirculatingSupplyInternal(p0, p1)
+}
+
+func (s *FullNodeStub) StateVMCirculatingSupplyInternal(p0 context.Context, p1 types.TipSetKey) (api.CirculatingSupply, error) {
+ return *new(api.CirculatingSupply), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateVerifiedClientStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) {
+ return s.Internal.StateVerifiedClientStatus(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateVerifiedClientStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateVerifiedRegistryRootKey(p0 context.Context, p1 types.TipSetKey) (address.Address, error) {
+ return s.Internal.StateVerifiedRegistryRootKey(p0, p1)
+}
+
+func (s *FullNodeStub) StateVerifiedRegistryRootKey(p0 context.Context, p1 types.TipSetKey) (address.Address, error) {
+ return *new(address.Address), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateVerifierStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) {
+ return s.Internal.StateVerifierStatus(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateVerifierStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64) (*api.MsgLookup, error) {
+ return s.Internal.StateWaitMsg(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64) (*api.MsgLookup, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) StateWaitMsgLimited(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch) (*api.MsgLookup, error) {
+ return s.Internal.StateWaitMsgLimited(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) StateWaitMsgLimited(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch) (*api.MsgLookup, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) SyncCheckBad(p0 context.Context, p1 cid.Cid) (string, error) {
+ return s.Internal.SyncCheckBad(p0, p1)
+}
+
+func (s *FullNodeStub) SyncCheckBad(p0 context.Context, p1 cid.Cid) (string, error) {
+ return "", xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) SyncCheckpoint(p0 context.Context, p1 types.TipSetKey) error {
+ return s.Internal.SyncCheckpoint(p0, p1)
+}
+
+func (s *FullNodeStub) SyncCheckpoint(p0 context.Context, p1 types.TipSetKey) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) SyncIncomingBlocks(p0 context.Context) (<-chan *types.BlockHeader, error) {
+ return s.Internal.SyncIncomingBlocks(p0)
+}
+
+func (s *FullNodeStub) SyncIncomingBlocks(p0 context.Context) (<-chan *types.BlockHeader, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) SyncMarkBad(p0 context.Context, p1 cid.Cid) error {
+ return s.Internal.SyncMarkBad(p0, p1)
+}
+
+func (s *FullNodeStub) SyncMarkBad(p0 context.Context, p1 cid.Cid) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) SyncState(p0 context.Context) (*api.SyncState, error) {
+ return s.Internal.SyncState(p0)
+}
+
+func (s *FullNodeStub) SyncState(p0 context.Context) (*api.SyncState, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) SyncSubmitBlock(p0 context.Context, p1 *types.BlockMsg) error {
+ return s.Internal.SyncSubmitBlock(p0, p1)
+}
+
+func (s *FullNodeStub) SyncSubmitBlock(p0 context.Context, p1 *types.BlockMsg) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) SyncUnmarkAllBad(p0 context.Context) error {
+ return s.Internal.SyncUnmarkAllBad(p0)
+}
+
+func (s *FullNodeStub) SyncUnmarkAllBad(p0 context.Context) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) SyncUnmarkBad(p0 context.Context, p1 cid.Cid) error {
+ return s.Internal.SyncUnmarkBad(p0, p1)
+}
+
+func (s *FullNodeStub) SyncUnmarkBad(p0 context.Context, p1 cid.Cid) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) SyncValidateTipset(p0 context.Context, p1 types.TipSetKey) (bool, error) {
+ return s.Internal.SyncValidateTipset(p0, p1)
+}
+
+func (s *FullNodeStub) SyncValidateTipset(p0 context.Context, p1 types.TipSetKey) (bool, error) {
+ return false, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) WalletBalance(p0 context.Context, p1 address.Address) (types.BigInt, error) {
+ return s.Internal.WalletBalance(p0, p1)
+}
+
+func (s *FullNodeStub) WalletBalance(p0 context.Context, p1 address.Address) (types.BigInt, error) {
+ return *new(types.BigInt), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) WalletDefaultAddress(p0 context.Context) (address.Address, error) {
+ return s.Internal.WalletDefaultAddress(p0)
+}
+
+func (s *FullNodeStub) WalletDefaultAddress(p0 context.Context) (address.Address, error) {
+ return *new(address.Address), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) WalletDelete(p0 context.Context, p1 address.Address) error {
+ return s.Internal.WalletDelete(p0, p1)
+}
+
+func (s *FullNodeStub) WalletDelete(p0 context.Context, p1 address.Address) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) WalletExport(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) {
+ return s.Internal.WalletExport(p0, p1)
+}
+
+func (s *FullNodeStub) WalletExport(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) WalletHas(p0 context.Context, p1 address.Address) (bool, error) {
+ return s.Internal.WalletHas(p0, p1)
+}
+
+func (s *FullNodeStub) WalletHas(p0 context.Context, p1 address.Address) (bool, error) {
+ return false, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) WalletImport(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) {
+ return s.Internal.WalletImport(p0, p1)
+}
+
+func (s *FullNodeStub) WalletImport(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) {
+ return *new(address.Address), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) WalletList(p0 context.Context) ([]address.Address, error) {
+ return s.Internal.WalletList(p0)
+}
+
+func (s *FullNodeStub) WalletList(p0 context.Context) ([]address.Address, error) {
+ return *new([]address.Address), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) WalletNew(p0 context.Context, p1 types.KeyType) (address.Address, error) {
+ return s.Internal.WalletNew(p0, p1)
+}
+
+func (s *FullNodeStub) WalletNew(p0 context.Context, p1 types.KeyType) (address.Address, error) {
+ return *new(address.Address), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) WalletSetDefault(p0 context.Context, p1 address.Address) error {
+ return s.Internal.WalletSetDefault(p0, p1)
+}
+
+func (s *FullNodeStub) WalletSetDefault(p0 context.Context, p1 address.Address) error {
+ return xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) WalletSign(p0 context.Context, p1 address.Address, p2 []byte) (*crypto.Signature, error) {
+ return s.Internal.WalletSign(p0, p1, p2)
+}
+
+func (s *FullNodeStub) WalletSign(p0 context.Context, p1 address.Address, p2 []byte) (*crypto.Signature, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) WalletSignMessage(p0 context.Context, p1 address.Address, p2 *types.Message) (*types.SignedMessage, error) {
+ return s.Internal.WalletSignMessage(p0, p1, p2)
+}
+
+func (s *FullNodeStub) WalletSignMessage(p0 context.Context, p1 address.Address, p2 *types.Message) (*types.SignedMessage, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) WalletValidateAddress(p0 context.Context, p1 string) (address.Address, error) {
+ return s.Internal.WalletValidateAddress(p0, p1)
+}
+
+func (s *FullNodeStub) WalletValidateAddress(p0 context.Context, p1 string) (address.Address, error) {
+ return *new(address.Address), xerrors.New("method not supported")
+}
+
+func (s *FullNodeStruct) WalletVerify(p0 context.Context, p1 address.Address, p2 []byte, p3 *crypto.Signature) (bool, error) {
+ return s.Internal.WalletVerify(p0, p1, p2, p3)
+}
+
+func (s *FullNodeStub) WalletVerify(p0 context.Context, p1 address.Address, p2 []byte, p3 *crypto.Signature) (bool, error) {
+ return false, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*api.BlockMessages, error) {
+ return s.Internal.ChainGetBlockMessages(p0, p1)
+}
+
+func (s *GatewayStub) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*api.BlockMessages, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) {
+ return s.Internal.ChainGetMessage(p0, p1)
+}
+
+func (s *GatewayStub) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) ChainGetTipSet(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) {
+ return s.Internal.ChainGetTipSet(p0, p1)
+}
+
+func (s *GatewayStub) ChainGetTipSet(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) ChainGetTipSetByHeight(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) {
+ return s.Internal.ChainGetTipSetByHeight(p0, p1, p2)
+}
+
+func (s *GatewayStub) ChainGetTipSetByHeight(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) {
+ return s.Internal.ChainHasObj(p0, p1)
+}
+
+func (s *GatewayStub) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) {
+ return false, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) ChainHead(p0 context.Context) (*types.TipSet, error) {
+ return s.Internal.ChainHead(p0)
+}
+
+func (s *GatewayStub) ChainHead(p0 context.Context) (*types.TipSet, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) ChainNotify(p0 context.Context) (<-chan []*api.HeadChange, error) {
+ return s.Internal.ChainNotify(p0)
+}
+
+func (s *GatewayStub) ChainNotify(p0 context.Context) (<-chan []*api.HeadChange, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) {
+ return s.Internal.ChainReadObj(p0, p1)
+}
+
+func (s *GatewayStub) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) {
+ return *new([]byte), xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) {
+ return s.Internal.GasEstimateMessageGas(p0, p1, p2, p3)
+}
+
+func (s *GatewayStub) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) {
+ return s.Internal.MpoolPush(p0, p1)
+}
+
+func (s *GatewayStub) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) {
+ return *new(cid.Cid), xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) MsigGetAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) {
+ return s.Internal.MsigGetAvailableBalance(p0, p1, p2)
+}
+
+func (s *GatewayStub) MsigGetAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) {
+ return *new(types.BigInt), xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) MsigGetPending(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*api.MsigTransaction, error) {
+ return s.Internal.MsigGetPending(p0, p1, p2)
+}
+
+func (s *GatewayStub) MsigGetPending(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*api.MsigTransaction, error) {
+ return *new([]*api.MsigTransaction), xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) MsigGetVested(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) {
+ return s.Internal.MsigGetVested(p0, p1, p2, p3)
+}
+
+func (s *GatewayStub) MsigGetVested(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) {
+ return *new(types.BigInt), xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) {
+ return s.Internal.StateAccountKey(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) {
+ return *new(address.Address), xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (api.DealCollateralBounds, error) {
+ return s.Internal.StateDealProviderCollateralBounds(p0, p1, p2, p3)
+}
+
+func (s *GatewayStub) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (api.DealCollateralBounds, error) {
+ return *new(api.DealCollateralBounds), xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) {
+ return s.Internal.StateGetActor(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateGetReceipt(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) {
+ return s.Internal.StateGetReceipt(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateGetReceipt(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateListMiners(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) {
+ return s.Internal.StateListMiners(p0, p1)
+}
+
+func (s *GatewayStub) StateListMiners(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) {
+ return *new([]address.Address), xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateLookupID(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) {
+ return s.Internal.StateLookupID(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateLookupID(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) {
+ return *new(address.Address), xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateMarketBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MarketBalance, error) {
+ return s.Internal.StateMarketBalance(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateMarketBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MarketBalance, error) {
+ return *new(api.MarketBalance), xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateMarketStorageDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*api.MarketDeal, error) {
+ return s.Internal.StateMarketStorageDeal(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateMarketStorageDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*api.MarketDeal, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateMinerInfo(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) {
+ return s.Internal.StateMinerInfo(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateMinerInfo(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) {
+ return *new(miner.MinerInfo), xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateMinerPower(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*api.MinerPower, error) {
+ return s.Internal.StateMinerPower(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateMinerPower(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*api.MinerPower, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateMinerProvingDeadline(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) {
+ return s.Internal.StateMinerProvingDeadline(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateMinerProvingDeadline(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (network.Version, error) {
+ return s.Internal.StateNetworkVersion(p0, p1)
+}
+
+func (s *GatewayStub) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (network.Version, error) {
+ return *new(network.Version), xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateSearchMsg(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) {
+ return s.Internal.StateSearchMsg(p0, p1)
+}
+
+func (s *GatewayStub) StateSearchMsg(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateSectorGetInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) {
+ return s.Internal.StateSectorGetInfo(p0, p1, p2, p3)
+}
+
+func (s *GatewayStub) StateSectorGetInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateVerifiedClientStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) {
+ return s.Internal.StateVerifiedClientStatus(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateVerifiedClientStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64) (*api.MsgLookup, error) {
+ return s.Internal.StateWaitMsg(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64) (*api.MsgLookup, error) {
+ return nil, xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) Version(p0 context.Context) (api.APIVersion, error) {
+ return s.Internal.Version(p0)
+}
+
+func (s *GatewayStub) Version(p0 context.Context) (api.APIVersion, error) {
+ return *new(api.APIVersion), xerrors.New("method not supported")
+}
+
+func (s *GatewayStruct) WalletBalance(p0 context.Context, p1 address.Address) (types.BigInt, error) {
+ return s.Internal.WalletBalance(p0, p1)
+}
+
+func (s *GatewayStub) WalletBalance(p0 context.Context, p1 address.Address) (types.BigInt, error) {
+ return *new(types.BigInt), xerrors.New("method not supported")
+}
+
+var _ FullNode = new(FullNodeStruct)
+var _ Gateway = new(GatewayStruct)
diff --git a/api/v0api/v0mocks/mock_full.go b/api/v0api/v0mocks/mock_full.go
new file mode 100644
index 00000000000..6a4ef690ed1
--- /dev/null
+++ b/api/v0api/v0mocks/mock_full.go
@@ -0,0 +1,3079 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: github.com/filecoin-project/lotus/api/v0api (interfaces: FullNode)
+
+// Package v0mocks is a generated GoMock package.
+package v0mocks
+
+import (
+ context "context"
+ reflect "reflect"
+
+ address "github.com/filecoin-project/go-address"
+ bitfield "github.com/filecoin-project/go-bitfield"
+ datatransfer "github.com/filecoin-project/go-data-transfer"
+ retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket"
+ storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket"
+ auth "github.com/filecoin-project/go-jsonrpc/auth"
+ multistore "github.com/filecoin-project/go-multistore"
+ abi "github.com/filecoin-project/go-state-types/abi"
+ big "github.com/filecoin-project/go-state-types/big"
+ crypto "github.com/filecoin-project/go-state-types/crypto"
+ dline "github.com/filecoin-project/go-state-types/dline"
+ network "github.com/filecoin-project/go-state-types/network"
+ api "github.com/filecoin-project/lotus/api"
+ apitypes "github.com/filecoin-project/lotus/api/types"
+ miner "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+ types "github.com/filecoin-project/lotus/chain/types"
+ marketevents "github.com/filecoin-project/lotus/markets/loggers"
+ dtypes "github.com/filecoin-project/lotus/node/modules/dtypes"
+ miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
+ paych "github.com/filecoin-project/specs-actors/actors/builtin/paych"
+ gomock "github.com/golang/mock/gomock"
+ uuid "github.com/google/uuid"
+ cid "github.com/ipfs/go-cid"
+ metrics "github.com/libp2p/go-libp2p-core/metrics"
+ network0 "github.com/libp2p/go-libp2p-core/network"
+ peer "github.com/libp2p/go-libp2p-core/peer"
+ protocol "github.com/libp2p/go-libp2p-core/protocol"
+)
+
+// MockFullNode is a mock of FullNode interface.
+type MockFullNode struct {
+ ctrl *gomock.Controller
+ recorder *MockFullNodeMockRecorder
+}
+
+// MockFullNodeMockRecorder is the mock recorder for MockFullNode.
+type MockFullNodeMockRecorder struct {
+ mock *MockFullNode
+}
+
+// NewMockFullNode creates a new mock instance.
+func NewMockFullNode(ctrl *gomock.Controller) *MockFullNode {
+ mock := &MockFullNode{ctrl: ctrl}
+ mock.recorder = &MockFullNodeMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockFullNode) EXPECT() *MockFullNodeMockRecorder {
+ return m.recorder
+}
+
+// AuthNew mocks base method.
+func (m *MockFullNode) AuthNew(arg0 context.Context, arg1 []auth.Permission) ([]byte, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AuthNew", arg0, arg1)
+ ret0, _ := ret[0].([]byte)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// AuthNew indicates an expected call of AuthNew.
+func (mr *MockFullNodeMockRecorder) AuthNew(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AuthNew", reflect.TypeOf((*MockFullNode)(nil).AuthNew), arg0, arg1)
+}
+
+// AuthVerify mocks base method.
+func (m *MockFullNode) AuthVerify(arg0 context.Context, arg1 string) ([]auth.Permission, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AuthVerify", arg0, arg1)
+ ret0, _ := ret[0].([]auth.Permission)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// AuthVerify indicates an expected call of AuthVerify.
+func (mr *MockFullNodeMockRecorder) AuthVerify(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AuthVerify", reflect.TypeOf((*MockFullNode)(nil).AuthVerify), arg0, arg1)
+}
+
+// BeaconGetEntry mocks base method.
+func (m *MockFullNode) BeaconGetEntry(arg0 context.Context, arg1 abi.ChainEpoch) (*types.BeaconEntry, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "BeaconGetEntry", arg0, arg1)
+ ret0, _ := ret[0].(*types.BeaconEntry)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// BeaconGetEntry indicates an expected call of BeaconGetEntry.
+func (mr *MockFullNodeMockRecorder) BeaconGetEntry(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeaconGetEntry", reflect.TypeOf((*MockFullNode)(nil).BeaconGetEntry), arg0, arg1)
+}
+
+// ChainDeleteObj mocks base method.
+func (m *MockFullNode) ChainDeleteObj(arg0 context.Context, arg1 cid.Cid) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainDeleteObj", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ChainDeleteObj indicates an expected call of ChainDeleteObj.
+func (mr *MockFullNodeMockRecorder) ChainDeleteObj(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainDeleteObj", reflect.TypeOf((*MockFullNode)(nil).ChainDeleteObj), arg0, arg1)
+}
+
+// ChainExport mocks base method.
+func (m *MockFullNode) ChainExport(arg0 context.Context, arg1 abi.ChainEpoch, arg2 bool, arg3 types.TipSetKey) (<-chan []byte, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainExport", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(<-chan []byte)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainExport indicates an expected call of ChainExport.
+func (mr *MockFullNodeMockRecorder) ChainExport(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainExport", reflect.TypeOf((*MockFullNode)(nil).ChainExport), arg0, arg1, arg2, arg3)
+}
+
+// ChainGetBlock mocks base method.
+func (m *MockFullNode) ChainGetBlock(arg0 context.Context, arg1 cid.Cid) (*types.BlockHeader, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainGetBlock", arg0, arg1)
+ ret0, _ := ret[0].(*types.BlockHeader)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainGetBlock indicates an expected call of ChainGetBlock.
+func (mr *MockFullNodeMockRecorder) ChainGetBlock(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetBlock", reflect.TypeOf((*MockFullNode)(nil).ChainGetBlock), arg0, arg1)
+}
+
+// ChainGetBlockMessages mocks base method.
+func (m *MockFullNode) ChainGetBlockMessages(arg0 context.Context, arg1 cid.Cid) (*api.BlockMessages, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainGetBlockMessages", arg0, arg1)
+ ret0, _ := ret[0].(*api.BlockMessages)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainGetBlockMessages indicates an expected call of ChainGetBlockMessages.
+func (mr *MockFullNodeMockRecorder) ChainGetBlockMessages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetBlockMessages", reflect.TypeOf((*MockFullNode)(nil).ChainGetBlockMessages), arg0, arg1)
+}
+
+// ChainGetGenesis mocks base method.
+func (m *MockFullNode) ChainGetGenesis(arg0 context.Context) (*types.TipSet, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainGetGenesis", arg0)
+ ret0, _ := ret[0].(*types.TipSet)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainGetGenesis indicates an expected call of ChainGetGenesis.
+func (mr *MockFullNodeMockRecorder) ChainGetGenesis(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetGenesis", reflect.TypeOf((*MockFullNode)(nil).ChainGetGenesis), arg0)
+}
+
+// ChainGetMessage mocks base method.
+func (m *MockFullNode) ChainGetMessage(arg0 context.Context, arg1 cid.Cid) (*types.Message, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainGetMessage", arg0, arg1)
+ ret0, _ := ret[0].(*types.Message)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainGetMessage indicates an expected call of ChainGetMessage.
+func (mr *MockFullNodeMockRecorder) ChainGetMessage(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetMessage", reflect.TypeOf((*MockFullNode)(nil).ChainGetMessage), arg0, arg1)
+}
+
+// ChainGetMessagesInTipset mocks base method.
+func (m *MockFullNode) ChainGetMessagesInTipset(arg0 context.Context, arg1 types.TipSetKey) ([]api.Message, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainGetMessagesInTipset", arg0, arg1)
+ ret0, _ := ret[0].([]api.Message)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainGetMessagesInTipset indicates an expected call of ChainGetMessagesInTipset.
+func (mr *MockFullNodeMockRecorder) ChainGetMessagesInTipset(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetMessagesInTipset", reflect.TypeOf((*MockFullNode)(nil).ChainGetMessagesInTipset), arg0, arg1)
+}
+
+// ChainGetNode mocks base method.
+func (m *MockFullNode) ChainGetNode(arg0 context.Context, arg1 string) (*api.IpldObject, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainGetNode", arg0, arg1)
+ ret0, _ := ret[0].(*api.IpldObject)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainGetNode indicates an expected call of ChainGetNode.
+func (mr *MockFullNodeMockRecorder) ChainGetNode(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetNode", reflect.TypeOf((*MockFullNode)(nil).ChainGetNode), arg0, arg1)
+}
+
+// ChainGetParentMessages mocks base method.
+func (m *MockFullNode) ChainGetParentMessages(arg0 context.Context, arg1 cid.Cid) ([]api.Message, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainGetParentMessages", arg0, arg1)
+ ret0, _ := ret[0].([]api.Message)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainGetParentMessages indicates an expected call of ChainGetParentMessages.
+func (mr *MockFullNodeMockRecorder) ChainGetParentMessages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetParentMessages", reflect.TypeOf((*MockFullNode)(nil).ChainGetParentMessages), arg0, arg1)
+}
+
+// ChainGetParentReceipts mocks base method.
+func (m *MockFullNode) ChainGetParentReceipts(arg0 context.Context, arg1 cid.Cid) ([]*types.MessageReceipt, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainGetParentReceipts", arg0, arg1)
+ ret0, _ := ret[0].([]*types.MessageReceipt)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainGetParentReceipts indicates an expected call of ChainGetParentReceipts.
+func (mr *MockFullNodeMockRecorder) ChainGetParentReceipts(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetParentReceipts", reflect.TypeOf((*MockFullNode)(nil).ChainGetParentReceipts), arg0, arg1)
+}
+
+// ChainGetPath mocks base method.
+func (m *MockFullNode) ChainGetPath(arg0 context.Context, arg1, arg2 types.TipSetKey) ([]*api.HeadChange, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainGetPath", arg0, arg1, arg2)
+ ret0, _ := ret[0].([]*api.HeadChange)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainGetPath indicates an expected call of ChainGetPath.
+func (mr *MockFullNodeMockRecorder) ChainGetPath(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetPath", reflect.TypeOf((*MockFullNode)(nil).ChainGetPath), arg0, arg1, arg2)
+}
+
+// ChainGetRandomnessFromBeacon mocks base method.
+func (m *MockFullNode) ChainGetRandomnessFromBeacon(arg0 context.Context, arg1 types.TipSetKey, arg2 crypto.DomainSeparationTag, arg3 abi.ChainEpoch, arg4 []byte) (abi.Randomness, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainGetRandomnessFromBeacon", arg0, arg1, arg2, arg3, arg4)
+ ret0, _ := ret[0].(abi.Randomness)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainGetRandomnessFromBeacon indicates an expected call of ChainGetRandomnessFromBeacon.
+func (mr *MockFullNodeMockRecorder) ChainGetRandomnessFromBeacon(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetRandomnessFromBeacon", reflect.TypeOf((*MockFullNode)(nil).ChainGetRandomnessFromBeacon), arg0, arg1, arg2, arg3, arg4)
+}
+
+// ChainGetRandomnessFromTickets mocks base method.
+func (m *MockFullNode) ChainGetRandomnessFromTickets(arg0 context.Context, arg1 types.TipSetKey, arg2 crypto.DomainSeparationTag, arg3 abi.ChainEpoch, arg4 []byte) (abi.Randomness, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainGetRandomnessFromTickets", arg0, arg1, arg2, arg3, arg4)
+ ret0, _ := ret[0].(abi.Randomness)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainGetRandomnessFromTickets indicates an expected call of ChainGetRandomnessFromTickets.
+func (mr *MockFullNodeMockRecorder) ChainGetRandomnessFromTickets(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetRandomnessFromTickets", reflect.TypeOf((*MockFullNode)(nil).ChainGetRandomnessFromTickets), arg0, arg1, arg2, arg3, arg4)
+}
+
+// ChainGetTipSet mocks base method.
+func (m *MockFullNode) ChainGetTipSet(arg0 context.Context, arg1 types.TipSetKey) (*types.TipSet, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainGetTipSet", arg0, arg1)
+ ret0, _ := ret[0].(*types.TipSet)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainGetTipSet indicates an expected call of ChainGetTipSet.
+func (mr *MockFullNodeMockRecorder) ChainGetTipSet(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetTipSet", reflect.TypeOf((*MockFullNode)(nil).ChainGetTipSet), arg0, arg1)
+}
+
+// ChainGetTipSetByHeight mocks base method.
+func (m *MockFullNode) ChainGetTipSetByHeight(arg0 context.Context, arg1 abi.ChainEpoch, arg2 types.TipSetKey) (*types.TipSet, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainGetTipSetByHeight", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*types.TipSet)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainGetTipSetByHeight indicates an expected call of ChainGetTipSetByHeight.
+func (mr *MockFullNodeMockRecorder) ChainGetTipSetByHeight(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetTipSetByHeight", reflect.TypeOf((*MockFullNode)(nil).ChainGetTipSetByHeight), arg0, arg1, arg2)
+}
+
+// ChainHasObj mocks base method.
+func (m *MockFullNode) ChainHasObj(arg0 context.Context, arg1 cid.Cid) (bool, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainHasObj", arg0, arg1)
+ ret0, _ := ret[0].(bool)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainHasObj indicates an expected call of ChainHasObj.
+func (mr *MockFullNodeMockRecorder) ChainHasObj(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHasObj", reflect.TypeOf((*MockFullNode)(nil).ChainHasObj), arg0, arg1)
+}
+
+// ChainHead mocks base method.
+func (m *MockFullNode) ChainHead(arg0 context.Context) (*types.TipSet, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainHead", arg0)
+ ret0, _ := ret[0].(*types.TipSet)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainHead indicates an expected call of ChainHead.
+func (mr *MockFullNodeMockRecorder) ChainHead(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHead", reflect.TypeOf((*MockFullNode)(nil).ChainHead), arg0)
+}
+
+// ChainNotify mocks base method.
+func (m *MockFullNode) ChainNotify(arg0 context.Context) (<-chan []*api.HeadChange, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainNotify", arg0)
+ ret0, _ := ret[0].(<-chan []*api.HeadChange)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainNotify indicates an expected call of ChainNotify.
+func (mr *MockFullNodeMockRecorder) ChainNotify(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainNotify", reflect.TypeOf((*MockFullNode)(nil).ChainNotify), arg0)
+}
+
+// ChainReadObj mocks base method.
+func (m *MockFullNode) ChainReadObj(arg0 context.Context, arg1 cid.Cid) ([]byte, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainReadObj", arg0, arg1)
+ ret0, _ := ret[0].([]byte)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainReadObj indicates an expected call of ChainReadObj.
+func (mr *MockFullNodeMockRecorder) ChainReadObj(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainReadObj", reflect.TypeOf((*MockFullNode)(nil).ChainReadObj), arg0, arg1)
+}
+
+// ChainSetHead mocks base method.
+func (m *MockFullNode) ChainSetHead(arg0 context.Context, arg1 types.TipSetKey) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainSetHead", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ChainSetHead indicates an expected call of ChainSetHead.
+func (mr *MockFullNodeMockRecorder) ChainSetHead(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainSetHead", reflect.TypeOf((*MockFullNode)(nil).ChainSetHead), arg0, arg1)
+}
+
+// ChainStatObj mocks base method.
+func (m *MockFullNode) ChainStatObj(arg0 context.Context, arg1, arg2 cid.Cid) (api.ObjStat, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainStatObj", arg0, arg1, arg2)
+ ret0, _ := ret[0].(api.ObjStat)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainStatObj indicates an expected call of ChainStatObj.
+func (mr *MockFullNodeMockRecorder) ChainStatObj(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainStatObj", reflect.TypeOf((*MockFullNode)(nil).ChainStatObj), arg0, arg1, arg2)
+}
+
+// ChainTipSetWeight mocks base method.
+func (m *MockFullNode) ChainTipSetWeight(arg0 context.Context, arg1 types.TipSetKey) (big.Int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ChainTipSetWeight", arg0, arg1)
+ ret0, _ := ret[0].(big.Int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ChainTipSetWeight indicates an expected call of ChainTipSetWeight.
+func (mr *MockFullNodeMockRecorder) ChainTipSetWeight(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainTipSetWeight", reflect.TypeOf((*MockFullNode)(nil).ChainTipSetWeight), arg0, arg1)
+}
+
+// ClientCalcCommP mocks base method.
+func (m *MockFullNode) ClientCalcCommP(arg0 context.Context, arg1 string) (*api.CommPRet, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientCalcCommP", arg0, arg1)
+ ret0, _ := ret[0].(*api.CommPRet)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientCalcCommP indicates an expected call of ClientCalcCommP.
+func (mr *MockFullNodeMockRecorder) ClientCalcCommP(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCalcCommP", reflect.TypeOf((*MockFullNode)(nil).ClientCalcCommP), arg0, arg1)
+}
+
+// ClientCancelDataTransfer mocks base method.
+func (m *MockFullNode) ClientCancelDataTransfer(arg0 context.Context, arg1 datatransfer.TransferID, arg2 peer.ID, arg3 bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientCancelDataTransfer", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ClientCancelDataTransfer indicates an expected call of ClientCancelDataTransfer.
+func (mr *MockFullNodeMockRecorder) ClientCancelDataTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCancelDataTransfer", reflect.TypeOf((*MockFullNode)(nil).ClientCancelDataTransfer), arg0, arg1, arg2, arg3)
+}
+
+// ClientCancelRetrievalDeal mocks base method.
+func (m *MockFullNode) ClientCancelRetrievalDeal(arg0 context.Context, arg1 retrievalmarket.DealID) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientCancelRetrievalDeal", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ClientCancelRetrievalDeal indicates an expected call of ClientCancelRetrievalDeal.
+func (mr *MockFullNodeMockRecorder) ClientCancelRetrievalDeal(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCancelRetrievalDeal", reflect.TypeOf((*MockFullNode)(nil).ClientCancelRetrievalDeal), arg0, arg1)
+}
+
+// ClientDataTransferUpdates mocks base method.
+func (m *MockFullNode) ClientDataTransferUpdates(arg0 context.Context) (<-chan api.DataTransferChannel, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientDataTransferUpdates", arg0)
+ ret0, _ := ret[0].(<-chan api.DataTransferChannel)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientDataTransferUpdates indicates an expected call of ClientDataTransferUpdates.
+func (mr *MockFullNodeMockRecorder) ClientDataTransferUpdates(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDataTransferUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientDataTransferUpdates), arg0)
+}
+
+// ClientDealPieceCID mocks base method.
+func (m *MockFullNode) ClientDealPieceCID(arg0 context.Context, arg1 cid.Cid) (api.DataCIDSize, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientDealPieceCID", arg0, arg1)
+ ret0, _ := ret[0].(api.DataCIDSize)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientDealPieceCID indicates an expected call of ClientDealPieceCID.
+func (mr *MockFullNodeMockRecorder) ClientDealPieceCID(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealPieceCID", reflect.TypeOf((*MockFullNode)(nil).ClientDealPieceCID), arg0, arg1)
+}
+
+// ClientDealSize mocks base method.
+func (m *MockFullNode) ClientDealSize(arg0 context.Context, arg1 cid.Cid) (api.DataSize, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientDealSize", arg0, arg1)
+ ret0, _ := ret[0].(api.DataSize)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientDealSize indicates an expected call of ClientDealSize.
+func (mr *MockFullNodeMockRecorder) ClientDealSize(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealSize", reflect.TypeOf((*MockFullNode)(nil).ClientDealSize), arg0, arg1)
+}
+
+// ClientFindData mocks base method.
+func (m *MockFullNode) ClientFindData(arg0 context.Context, arg1 cid.Cid, arg2 *cid.Cid) ([]api.QueryOffer, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientFindData", arg0, arg1, arg2)
+ ret0, _ := ret[0].([]api.QueryOffer)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientFindData indicates an expected call of ClientFindData.
+func (mr *MockFullNodeMockRecorder) ClientFindData(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientFindData", reflect.TypeOf((*MockFullNode)(nil).ClientFindData), arg0, arg1, arg2)
+}
+
+// ClientGenCar mocks base method.
+func (m *MockFullNode) ClientGenCar(arg0 context.Context, arg1 api.FileRef, arg2 string) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientGenCar", arg0, arg1, arg2)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ClientGenCar indicates an expected call of ClientGenCar.
+func (mr *MockFullNodeMockRecorder) ClientGenCar(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGenCar", reflect.TypeOf((*MockFullNode)(nil).ClientGenCar), arg0, arg1, arg2)
+}
+
+// ClientGetDealInfo mocks base method.
+func (m *MockFullNode) ClientGetDealInfo(arg0 context.Context, arg1 cid.Cid) (*api.DealInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientGetDealInfo", arg0, arg1)
+ ret0, _ := ret[0].(*api.DealInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientGetDealInfo indicates an expected call of ClientGetDealInfo.
+func (mr *MockFullNodeMockRecorder) ClientGetDealInfo(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealInfo", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealInfo), arg0, arg1)
+}
+
+// ClientGetDealStatus mocks base method.
+func (m *MockFullNode) ClientGetDealStatus(arg0 context.Context, arg1 uint64) (string, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientGetDealStatus", arg0, arg1)
+ ret0, _ := ret[0].(string)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientGetDealStatus indicates an expected call of ClientGetDealStatus.
+func (mr *MockFullNodeMockRecorder) ClientGetDealStatus(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealStatus", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealStatus), arg0, arg1)
+}
+
+// ClientGetDealUpdates mocks base method.
+func (m *MockFullNode) ClientGetDealUpdates(arg0 context.Context) (<-chan api.DealInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientGetDealUpdates", arg0)
+ ret0, _ := ret[0].(<-chan api.DealInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientGetDealUpdates indicates an expected call of ClientGetDealUpdates.
+func (mr *MockFullNodeMockRecorder) ClientGetDealUpdates(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealUpdates), arg0)
+}
+
+// ClientGetRetrievalUpdates mocks base method.
+func (m *MockFullNode) ClientGetRetrievalUpdates(arg0 context.Context) (<-chan api.RetrievalInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientGetRetrievalUpdates", arg0)
+ ret0, _ := ret[0].(<-chan api.RetrievalInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientGetRetrievalUpdates indicates an expected call of ClientGetRetrievalUpdates.
+func (mr *MockFullNodeMockRecorder) ClientGetRetrievalUpdates(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetRetrievalUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientGetRetrievalUpdates), arg0)
+}
+
+// ClientHasLocal mocks base method.
+func (m *MockFullNode) ClientHasLocal(arg0 context.Context, arg1 cid.Cid) (bool, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientHasLocal", arg0, arg1)
+ ret0, _ := ret[0].(bool)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientHasLocal indicates an expected call of ClientHasLocal.
+func (mr *MockFullNodeMockRecorder) ClientHasLocal(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientHasLocal", reflect.TypeOf((*MockFullNode)(nil).ClientHasLocal), arg0, arg1)
+}
+
+// ClientImport mocks base method.
+func (m *MockFullNode) ClientImport(arg0 context.Context, arg1 api.FileRef) (*api.ImportRes, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientImport", arg0, arg1)
+ ret0, _ := ret[0].(*api.ImportRes)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientImport indicates an expected call of ClientImport.
+func (mr *MockFullNodeMockRecorder) ClientImport(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientImport", reflect.TypeOf((*MockFullNode)(nil).ClientImport), arg0, arg1)
+}
+
+// ClientListDataTransfers mocks base method.
+func (m *MockFullNode) ClientListDataTransfers(arg0 context.Context) ([]api.DataTransferChannel, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientListDataTransfers", arg0)
+ ret0, _ := ret[0].([]api.DataTransferChannel)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientListDataTransfers indicates an expected call of ClientListDataTransfers.
+func (mr *MockFullNodeMockRecorder) ClientListDataTransfers(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListDataTransfers", reflect.TypeOf((*MockFullNode)(nil).ClientListDataTransfers), arg0)
+}
+
+// ClientListDeals mocks base method.
+func (m *MockFullNode) ClientListDeals(arg0 context.Context) ([]api.DealInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientListDeals", arg0)
+ ret0, _ := ret[0].([]api.DealInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientListDeals indicates an expected call of ClientListDeals.
+func (mr *MockFullNodeMockRecorder) ClientListDeals(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListDeals", reflect.TypeOf((*MockFullNode)(nil).ClientListDeals), arg0)
+}
+
+// ClientListImports mocks base method.
+func (m *MockFullNode) ClientListImports(arg0 context.Context) ([]api.Import, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientListImports", arg0)
+ ret0, _ := ret[0].([]api.Import)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientListImports indicates an expected call of ClientListImports.
+func (mr *MockFullNodeMockRecorder) ClientListImports(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListImports", reflect.TypeOf((*MockFullNode)(nil).ClientListImports), arg0)
+}
+
+// ClientListRetrievals mocks base method.
+func (m *MockFullNode) ClientListRetrievals(arg0 context.Context) ([]api.RetrievalInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientListRetrievals", arg0)
+ ret0, _ := ret[0].([]api.RetrievalInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientListRetrievals indicates an expected call of ClientListRetrievals.
+func (mr *MockFullNodeMockRecorder) ClientListRetrievals(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListRetrievals", reflect.TypeOf((*MockFullNode)(nil).ClientListRetrievals), arg0)
+}
+
+// ClientMinerQueryOffer mocks base method.
+func (m *MockFullNode) ClientMinerQueryOffer(arg0 context.Context, arg1 address.Address, arg2 cid.Cid, arg3 *cid.Cid) (api.QueryOffer, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientMinerQueryOffer", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(api.QueryOffer)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientMinerQueryOffer indicates an expected call of ClientMinerQueryOffer.
+func (mr *MockFullNodeMockRecorder) ClientMinerQueryOffer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientMinerQueryOffer", reflect.TypeOf((*MockFullNode)(nil).ClientMinerQueryOffer), arg0, arg1, arg2, arg3)
+}
+
+// ClientQueryAsk mocks base method.
+func (m *MockFullNode) ClientQueryAsk(arg0 context.Context, arg1 peer.ID, arg2 address.Address) (*storagemarket.StorageAsk, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientQueryAsk", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*storagemarket.StorageAsk)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientQueryAsk indicates an expected call of ClientQueryAsk.
+func (mr *MockFullNodeMockRecorder) ClientQueryAsk(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientQueryAsk", reflect.TypeOf((*MockFullNode)(nil).ClientQueryAsk), arg0, arg1, arg2)
+}
+
+// ClientRemoveImport mocks base method.
+func (m *MockFullNode) ClientRemoveImport(arg0 context.Context, arg1 multistore.StoreID) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientRemoveImport", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ClientRemoveImport indicates an expected call of ClientRemoveImport.
+func (mr *MockFullNodeMockRecorder) ClientRemoveImport(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRemoveImport", reflect.TypeOf((*MockFullNode)(nil).ClientRemoveImport), arg0, arg1)
+}
+
+// ClientRestartDataTransfer mocks base method.
+func (m *MockFullNode) ClientRestartDataTransfer(arg0 context.Context, arg1 datatransfer.TransferID, arg2 peer.ID, arg3 bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientRestartDataTransfer", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ClientRestartDataTransfer indicates an expected call of ClientRestartDataTransfer.
+func (mr *MockFullNodeMockRecorder) ClientRestartDataTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRestartDataTransfer", reflect.TypeOf((*MockFullNode)(nil).ClientRestartDataTransfer), arg0, arg1, arg2, arg3)
+}
+
+// ClientRetrieve mocks base method.
+func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientRetrieve", arg0, arg1, arg2)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ClientRetrieve indicates an expected call of ClientRetrieve.
+func (mr *MockFullNodeMockRecorder) ClientRetrieve(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieve", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieve), arg0, arg1, arg2)
+}
+
+// ClientRetrieveTryRestartInsufficientFunds mocks base method.
+func (m *MockFullNode) ClientRetrieveTryRestartInsufficientFunds(arg0 context.Context, arg1 address.Address) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientRetrieveTryRestartInsufficientFunds", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ClientRetrieveTryRestartInsufficientFunds indicates an expected call of ClientRetrieveTryRestartInsufficientFunds.
+func (mr *MockFullNodeMockRecorder) ClientRetrieveTryRestartInsufficientFunds(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveTryRestartInsufficientFunds", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveTryRestartInsufficientFunds), arg0, arg1)
+}
+
+// ClientRetrieveWithEvents mocks base method.
+func (m *MockFullNode) ClientRetrieveWithEvents(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientRetrieveWithEvents", arg0, arg1, arg2)
+ ret0, _ := ret[0].(<-chan marketevents.RetrievalEvent)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientRetrieveWithEvents indicates an expected call of ClientRetrieveWithEvents.
+func (mr *MockFullNodeMockRecorder) ClientRetrieveWithEvents(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveWithEvents", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveWithEvents), arg0, arg1, arg2)
+}
+
+// ClientStartDeal mocks base method.
+func (m *MockFullNode) ClientStartDeal(arg0 context.Context, arg1 *api.StartDealParams) (*cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientStartDeal", arg0, arg1)
+ ret0, _ := ret[0].(*cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientStartDeal indicates an expected call of ClientStartDeal.
+func (mr *MockFullNodeMockRecorder) ClientStartDeal(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientStartDeal", reflect.TypeOf((*MockFullNode)(nil).ClientStartDeal), arg0, arg1)
+}
+
+// ClientStatelessDeal mocks base method.
+func (m *MockFullNode) ClientStatelessDeal(arg0 context.Context, arg1 *api.StartDealParams) (*cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClientStatelessDeal", arg0, arg1)
+ ret0, _ := ret[0].(*cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ClientStatelessDeal indicates an expected call of ClientStatelessDeal.
+func (mr *MockFullNodeMockRecorder) ClientStatelessDeal(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientStatelessDeal", reflect.TypeOf((*MockFullNode)(nil).ClientStatelessDeal), arg0, arg1)
+}
+
+// Closing mocks base method.
+func (m *MockFullNode) Closing(arg0 context.Context) (<-chan struct{}, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Closing", arg0)
+ ret0, _ := ret[0].(<-chan struct{})
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Closing indicates an expected call of Closing.
+func (mr *MockFullNodeMockRecorder) Closing(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Closing", reflect.TypeOf((*MockFullNode)(nil).Closing), arg0)
+}
+
+// CreateBackup mocks base method.
+func (m *MockFullNode) CreateBackup(arg0 context.Context, arg1 string) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateBackup", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// CreateBackup indicates an expected call of CreateBackup.
+func (mr *MockFullNodeMockRecorder) CreateBackup(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateBackup", reflect.TypeOf((*MockFullNode)(nil).CreateBackup), arg0, arg1)
+}
+
+// Discover mocks base method.
+func (m *MockFullNode) Discover(arg0 context.Context) (apitypes.OpenRPCDocument, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Discover", arg0)
+ ret0, _ := ret[0].(apitypes.OpenRPCDocument)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Discover indicates an expected call of Discover.
+func (mr *MockFullNodeMockRecorder) Discover(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Discover", reflect.TypeOf((*MockFullNode)(nil).Discover), arg0)
+}
+
+// GasEstimateFeeCap mocks base method.
+func (m *MockFullNode) GasEstimateFeeCap(arg0 context.Context, arg1 *types.Message, arg2 int64, arg3 types.TipSetKey) (big.Int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GasEstimateFeeCap", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(big.Int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GasEstimateFeeCap indicates an expected call of GasEstimateFeeCap.
+func (mr *MockFullNodeMockRecorder) GasEstimateFeeCap(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateFeeCap", reflect.TypeOf((*MockFullNode)(nil).GasEstimateFeeCap), arg0, arg1, arg2, arg3)
+}
+
+// GasEstimateGasLimit mocks base method.
+func (m *MockFullNode) GasEstimateGasLimit(arg0 context.Context, arg1 *types.Message, arg2 types.TipSetKey) (int64, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GasEstimateGasLimit", arg0, arg1, arg2)
+ ret0, _ := ret[0].(int64)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GasEstimateGasLimit indicates an expected call of GasEstimateGasLimit.
+func (mr *MockFullNodeMockRecorder) GasEstimateGasLimit(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateGasLimit", reflect.TypeOf((*MockFullNode)(nil).GasEstimateGasLimit), arg0, arg1, arg2)
+}
+
+// GasEstimateGasPremium mocks base method.
+func (m *MockFullNode) GasEstimateGasPremium(arg0 context.Context, arg1 uint64, arg2 address.Address, arg3 int64, arg4 types.TipSetKey) (big.Int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GasEstimateGasPremium", arg0, arg1, arg2, arg3, arg4)
+ ret0, _ := ret[0].(big.Int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GasEstimateGasPremium indicates an expected call of GasEstimateGasPremium.
+func (mr *MockFullNodeMockRecorder) GasEstimateGasPremium(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateGasPremium", reflect.TypeOf((*MockFullNode)(nil).GasEstimateGasPremium), arg0, arg1, arg2, arg3, arg4)
+}
+
+// GasEstimateMessageGas mocks base method.
+func (m *MockFullNode) GasEstimateMessageGas(arg0 context.Context, arg1 *types.Message, arg2 *api.MessageSendSpec, arg3 types.TipSetKey) (*types.Message, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GasEstimateMessageGas", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(*types.Message)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GasEstimateMessageGas indicates an expected call of GasEstimateMessageGas.
+func (mr *MockFullNodeMockRecorder) GasEstimateMessageGas(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateMessageGas", reflect.TypeOf((*MockFullNode)(nil).GasEstimateMessageGas), arg0, arg1, arg2, arg3)
+}
+
+// ID mocks base method.
+func (m *MockFullNode) ID(arg0 context.Context) (peer.ID, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ID", arg0)
+ ret0, _ := ret[0].(peer.ID)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ID indicates an expected call of ID.
+func (mr *MockFullNodeMockRecorder) ID(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ID", reflect.TypeOf((*MockFullNode)(nil).ID), arg0)
+}
+
+// LogList mocks base method.
+func (m *MockFullNode) LogList(arg0 context.Context) ([]string, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "LogList", arg0)
+ ret0, _ := ret[0].([]string)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// LogList indicates an expected call of LogList.
+func (mr *MockFullNodeMockRecorder) LogList(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LogList", reflect.TypeOf((*MockFullNode)(nil).LogList), arg0)
+}
+
+// LogSetLevel mocks base method.
+func (m *MockFullNode) LogSetLevel(arg0 context.Context, arg1, arg2 string) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "LogSetLevel", arg0, arg1, arg2)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// LogSetLevel indicates an expected call of LogSetLevel.
+func (mr *MockFullNodeMockRecorder) LogSetLevel(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LogSetLevel", reflect.TypeOf((*MockFullNode)(nil).LogSetLevel), arg0, arg1, arg2)
+}
+
+// MarketAddBalance mocks base method.
+func (m *MockFullNode) MarketAddBalance(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MarketAddBalance", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MarketAddBalance indicates an expected call of MarketAddBalance.
+func (mr *MockFullNodeMockRecorder) MarketAddBalance(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketAddBalance", reflect.TypeOf((*MockFullNode)(nil).MarketAddBalance), arg0, arg1, arg2, arg3)
+}
+
+// MarketGetReserved mocks base method.
+func (m *MockFullNode) MarketGetReserved(arg0 context.Context, arg1 address.Address) (big.Int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MarketGetReserved", arg0, arg1)
+ ret0, _ := ret[0].(big.Int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MarketGetReserved indicates an expected call of MarketGetReserved.
+func (mr *MockFullNodeMockRecorder) MarketGetReserved(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketGetReserved", reflect.TypeOf((*MockFullNode)(nil).MarketGetReserved), arg0, arg1)
+}
+
+// MarketReleaseFunds mocks base method.
+func (m *MockFullNode) MarketReleaseFunds(arg0 context.Context, arg1 address.Address, arg2 big.Int) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MarketReleaseFunds", arg0, arg1, arg2)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// MarketReleaseFunds indicates an expected call of MarketReleaseFunds.
+func (mr *MockFullNodeMockRecorder) MarketReleaseFunds(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketReleaseFunds", reflect.TypeOf((*MockFullNode)(nil).MarketReleaseFunds), arg0, arg1, arg2)
+}
+
+// MarketReserveFunds mocks base method.
+func (m *MockFullNode) MarketReserveFunds(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MarketReserveFunds", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MarketReserveFunds indicates an expected call of MarketReserveFunds.
+func (mr *MockFullNodeMockRecorder) MarketReserveFunds(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketReserveFunds", reflect.TypeOf((*MockFullNode)(nil).MarketReserveFunds), arg0, arg1, arg2, arg3)
+}
+
+// MarketWithdraw mocks base method.
+func (m *MockFullNode) MarketWithdraw(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MarketWithdraw", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MarketWithdraw indicates an expected call of MarketWithdraw.
+func (mr *MockFullNodeMockRecorder) MarketWithdraw(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketWithdraw", reflect.TypeOf((*MockFullNode)(nil).MarketWithdraw), arg0, arg1, arg2, arg3)
+}
+
+// MinerCreateBlock mocks base method.
+func (m *MockFullNode) MinerCreateBlock(arg0 context.Context, arg1 *api.BlockTemplate) (*types.BlockMsg, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MinerCreateBlock", arg0, arg1)
+ ret0, _ := ret[0].(*types.BlockMsg)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MinerCreateBlock indicates an expected call of MinerCreateBlock.
+func (mr *MockFullNodeMockRecorder) MinerCreateBlock(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MinerCreateBlock", reflect.TypeOf((*MockFullNode)(nil).MinerCreateBlock), arg0, arg1)
+}
+
+// MinerGetBaseInfo mocks base method.
+func (m *MockFullNode) MinerGetBaseInfo(arg0 context.Context, arg1 address.Address, arg2 abi.ChainEpoch, arg3 types.TipSetKey) (*api.MiningBaseInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MinerGetBaseInfo", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(*api.MiningBaseInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MinerGetBaseInfo indicates an expected call of MinerGetBaseInfo.
+func (mr *MockFullNodeMockRecorder) MinerGetBaseInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MinerGetBaseInfo", reflect.TypeOf((*MockFullNode)(nil).MinerGetBaseInfo), arg0, arg1, arg2, arg3)
+}
+
+// MpoolBatchPush mocks base method.
+func (m *MockFullNode) MpoolBatchPush(arg0 context.Context, arg1 []*types.SignedMessage) ([]cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MpoolBatchPush", arg0, arg1)
+ ret0, _ := ret[0].([]cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MpoolBatchPush indicates an expected call of MpoolBatchPush.
+func (mr *MockFullNodeMockRecorder) MpoolBatchPush(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolBatchPush", reflect.TypeOf((*MockFullNode)(nil).MpoolBatchPush), arg0, arg1)
+}
+
+// MpoolBatchPushMessage mocks base method.
+func (m *MockFullNode) MpoolBatchPushMessage(arg0 context.Context, arg1 []*types.Message, arg2 *api.MessageSendSpec) ([]*types.SignedMessage, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MpoolBatchPushMessage", arg0, arg1, arg2)
+ ret0, _ := ret[0].([]*types.SignedMessage)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MpoolBatchPushMessage indicates an expected call of MpoolBatchPushMessage.
+func (mr *MockFullNodeMockRecorder) MpoolBatchPushMessage(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolBatchPushMessage", reflect.TypeOf((*MockFullNode)(nil).MpoolBatchPushMessage), arg0, arg1, arg2)
+}
+
+// MpoolBatchPushUntrusted mocks base method.
+func (m *MockFullNode) MpoolBatchPushUntrusted(arg0 context.Context, arg1 []*types.SignedMessage) ([]cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MpoolBatchPushUntrusted", arg0, arg1)
+ ret0, _ := ret[0].([]cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MpoolBatchPushUntrusted indicates an expected call of MpoolBatchPushUntrusted.
+func (mr *MockFullNodeMockRecorder) MpoolBatchPushUntrusted(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolBatchPushUntrusted", reflect.TypeOf((*MockFullNode)(nil).MpoolBatchPushUntrusted), arg0, arg1)
+}
+
+// MpoolClear mocks base method.
+func (m *MockFullNode) MpoolClear(arg0 context.Context, arg1 bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MpoolClear", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// MpoolClear indicates an expected call of MpoolClear.
+func (mr *MockFullNodeMockRecorder) MpoolClear(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolClear", reflect.TypeOf((*MockFullNode)(nil).MpoolClear), arg0, arg1)
+}
+
+// MpoolGetConfig mocks base method.
+func (m *MockFullNode) MpoolGetConfig(arg0 context.Context) (*types.MpoolConfig, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MpoolGetConfig", arg0)
+ ret0, _ := ret[0].(*types.MpoolConfig)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MpoolGetConfig indicates an expected call of MpoolGetConfig.
+func (mr *MockFullNodeMockRecorder) MpoolGetConfig(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolGetConfig", reflect.TypeOf((*MockFullNode)(nil).MpoolGetConfig), arg0)
+}
+
+// MpoolGetNonce mocks base method.
+func (m *MockFullNode) MpoolGetNonce(arg0 context.Context, arg1 address.Address) (uint64, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MpoolGetNonce", arg0, arg1)
+ ret0, _ := ret[0].(uint64)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MpoolGetNonce indicates an expected call of MpoolGetNonce.
+func (mr *MockFullNodeMockRecorder) MpoolGetNonce(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolGetNonce", reflect.TypeOf((*MockFullNode)(nil).MpoolGetNonce), arg0, arg1)
+}
+
+// MpoolPending mocks base method.
+func (m *MockFullNode) MpoolPending(arg0 context.Context, arg1 types.TipSetKey) ([]*types.SignedMessage, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MpoolPending", arg0, arg1)
+ ret0, _ := ret[0].([]*types.SignedMessage)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MpoolPending indicates an expected call of MpoolPending.
+func (mr *MockFullNodeMockRecorder) MpoolPending(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPending", reflect.TypeOf((*MockFullNode)(nil).MpoolPending), arg0, arg1)
+}
+
+// MpoolPush mocks base method.
+func (m *MockFullNode) MpoolPush(arg0 context.Context, arg1 *types.SignedMessage) (cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MpoolPush", arg0, arg1)
+ ret0, _ := ret[0].(cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MpoolPush indicates an expected call of MpoolPush.
+func (mr *MockFullNodeMockRecorder) MpoolPush(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPush", reflect.TypeOf((*MockFullNode)(nil).MpoolPush), arg0, arg1)
+}
+
+// MpoolPushMessage mocks base method.
+func (m *MockFullNode) MpoolPushMessage(arg0 context.Context, arg1 *types.Message, arg2 *api.MessageSendSpec) (*types.SignedMessage, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MpoolPushMessage", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*types.SignedMessage)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MpoolPushMessage indicates an expected call of MpoolPushMessage.
+func (mr *MockFullNodeMockRecorder) MpoolPushMessage(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPushMessage", reflect.TypeOf((*MockFullNode)(nil).MpoolPushMessage), arg0, arg1, arg2)
+}
+
+// MpoolPushUntrusted mocks base method.
+func (m *MockFullNode) MpoolPushUntrusted(arg0 context.Context, arg1 *types.SignedMessage) (cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MpoolPushUntrusted", arg0, arg1)
+ ret0, _ := ret[0].(cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MpoolPushUntrusted indicates an expected call of MpoolPushUntrusted.
+func (mr *MockFullNodeMockRecorder) MpoolPushUntrusted(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPushUntrusted", reflect.TypeOf((*MockFullNode)(nil).MpoolPushUntrusted), arg0, arg1)
+}
+
+// MpoolSelect mocks base method.
+func (m *MockFullNode) MpoolSelect(arg0 context.Context, arg1 types.TipSetKey, arg2 float64) ([]*types.SignedMessage, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MpoolSelect", arg0, arg1, arg2)
+ ret0, _ := ret[0].([]*types.SignedMessage)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MpoolSelect indicates an expected call of MpoolSelect.
+func (mr *MockFullNodeMockRecorder) MpoolSelect(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolSelect", reflect.TypeOf((*MockFullNode)(nil).MpoolSelect), arg0, arg1, arg2)
+}
+
+// MpoolSetConfig mocks base method.
+func (m *MockFullNode) MpoolSetConfig(arg0 context.Context, arg1 *types.MpoolConfig) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MpoolSetConfig", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// MpoolSetConfig indicates an expected call of MpoolSetConfig.
+func (mr *MockFullNodeMockRecorder) MpoolSetConfig(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolSetConfig", reflect.TypeOf((*MockFullNode)(nil).MpoolSetConfig), arg0, arg1)
+}
+
+// MpoolSub mocks base method.
+func (m *MockFullNode) MpoolSub(arg0 context.Context) (<-chan api.MpoolUpdate, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MpoolSub", arg0)
+ ret0, _ := ret[0].(<-chan api.MpoolUpdate)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MpoolSub indicates an expected call of MpoolSub.
+func (mr *MockFullNodeMockRecorder) MpoolSub(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolSub", reflect.TypeOf((*MockFullNode)(nil).MpoolSub), arg0)
+}
+
+// MsigAddApprove mocks base method.
+func (m *MockFullNode) MsigAddApprove(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4, arg5 address.Address, arg6 bool) (cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigAddApprove", arg0, arg1, arg2, arg3, arg4, arg5, arg6)
+ ret0, _ := ret[0].(cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigAddApprove indicates an expected call of MsigAddApprove.
+func (mr *MockFullNodeMockRecorder) MsigAddApprove(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigAddApprove", reflect.TypeOf((*MockFullNode)(nil).MsigAddApprove), arg0, arg1, arg2, arg3, arg4, arg5, arg6)
+}
+
+// MsigAddCancel mocks base method.
+func (m *MockFullNode) MsigAddCancel(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4 address.Address, arg5 bool) (cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigAddCancel", arg0, arg1, arg2, arg3, arg4, arg5)
+ ret0, _ := ret[0].(cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigAddCancel indicates an expected call of MsigAddCancel.
+func (mr *MockFullNodeMockRecorder) MsigAddCancel(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigAddCancel", reflect.TypeOf((*MockFullNode)(nil).MsigAddCancel), arg0, arg1, arg2, arg3, arg4, arg5)
+}
+
+// MsigAddPropose mocks base method.
+func (m *MockFullNode) MsigAddPropose(arg0 context.Context, arg1, arg2, arg3 address.Address, arg4 bool) (cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigAddPropose", arg0, arg1, arg2, arg3, arg4)
+ ret0, _ := ret[0].(cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigAddPropose indicates an expected call of MsigAddPropose.
+func (mr *MockFullNodeMockRecorder) MsigAddPropose(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigAddPropose", reflect.TypeOf((*MockFullNode)(nil).MsigAddPropose), arg0, arg1, arg2, arg3, arg4)
+}
+
+// MsigApprove mocks base method.
+func (m *MockFullNode) MsigApprove(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address) (cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigApprove", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigApprove indicates an expected call of MsigApprove.
+func (mr *MockFullNodeMockRecorder) MsigApprove(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigApprove", reflect.TypeOf((*MockFullNode)(nil).MsigApprove), arg0, arg1, arg2, arg3)
+}
+
+// MsigApproveTxnHash mocks base method.
+func (m *MockFullNode) MsigApproveTxnHash(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3, arg4 address.Address, arg5 big.Int, arg6 address.Address, arg7 uint64, arg8 []byte) (cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigApproveTxnHash", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8)
+ ret0, _ := ret[0].(cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigApproveTxnHash indicates an expected call of MsigApproveTxnHash.
+func (mr *MockFullNodeMockRecorder) MsigApproveTxnHash(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigApproveTxnHash", reflect.TypeOf((*MockFullNode)(nil).MsigApproveTxnHash), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8)
+}
+
+// MsigCancel mocks base method.
+func (m *MockFullNode) MsigCancel(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address, arg4 big.Int, arg5 address.Address, arg6 uint64, arg7 []byte) (cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigCancel", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
+ ret0, _ := ret[0].(cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigCancel indicates an expected call of MsigCancel.
+func (mr *MockFullNodeMockRecorder) MsigCancel(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCancel", reflect.TypeOf((*MockFullNode)(nil).MsigCancel), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
+}
+
+// MsigCreate mocks base method.
+func (m *MockFullNode) MsigCreate(arg0 context.Context, arg1 uint64, arg2 []address.Address, arg3 abi.ChainEpoch, arg4 big.Int, arg5 address.Address, arg6 big.Int) (cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigCreate", arg0, arg1, arg2, arg3, arg4, arg5, arg6)
+ ret0, _ := ret[0].(cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigCreate indicates an expected call of MsigCreate.
+func (mr *MockFullNodeMockRecorder) MsigCreate(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCreate", reflect.TypeOf((*MockFullNode)(nil).MsigCreate), arg0, arg1, arg2, arg3, arg4, arg5, arg6)
+}
+
+// MsigGetAvailableBalance mocks base method.
+func (m *MockFullNode) MsigGetAvailableBalance(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (big.Int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigGetAvailableBalance", arg0, arg1, arg2)
+ ret0, _ := ret[0].(big.Int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigGetAvailableBalance indicates an expected call of MsigGetAvailableBalance.
+func (mr *MockFullNodeMockRecorder) MsigGetAvailableBalance(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigGetAvailableBalance", reflect.TypeOf((*MockFullNode)(nil).MsigGetAvailableBalance), arg0, arg1, arg2)
+}
+
+// MsigGetPending mocks base method.
+func (m *MockFullNode) MsigGetPending(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) ([]*api.MsigTransaction, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigGetPending", arg0, arg1, arg2)
+ ret0, _ := ret[0].([]*api.MsigTransaction)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigGetPending indicates an expected call of MsigGetPending.
+func (mr *MockFullNodeMockRecorder) MsigGetPending(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigGetPending", reflect.TypeOf((*MockFullNode)(nil).MsigGetPending), arg0, arg1, arg2)
+}
+
+// MsigGetVested mocks base method.
+func (m *MockFullNode) MsigGetVested(arg0 context.Context, arg1 address.Address, arg2, arg3 types.TipSetKey) (big.Int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigGetVested", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(big.Int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigGetVested indicates an expected call of MsigGetVested.
+func (mr *MockFullNodeMockRecorder) MsigGetVested(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigGetVested", reflect.TypeOf((*MockFullNode)(nil).MsigGetVested), arg0, arg1, arg2, arg3)
+}
+
+// MsigGetVestingSchedule mocks base method.
+func (m *MockFullNode) MsigGetVestingSchedule(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (api.MsigVesting, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigGetVestingSchedule", arg0, arg1, arg2)
+ ret0, _ := ret[0].(api.MsigVesting)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigGetVestingSchedule indicates an expected call of MsigGetVestingSchedule.
+func (mr *MockFullNodeMockRecorder) MsigGetVestingSchedule(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigGetVestingSchedule", reflect.TypeOf((*MockFullNode)(nil).MsigGetVestingSchedule), arg0, arg1, arg2)
+}
+
+// MsigPropose mocks base method.
+func (m *MockFullNode) MsigPropose(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int, arg4 address.Address, arg5 uint64, arg6 []byte) (cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigPropose", arg0, arg1, arg2, arg3, arg4, arg5, arg6)
+ ret0, _ := ret[0].(cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigPropose indicates an expected call of MsigPropose.
+func (mr *MockFullNodeMockRecorder) MsigPropose(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigPropose", reflect.TypeOf((*MockFullNode)(nil).MsigPropose), arg0, arg1, arg2, arg3, arg4, arg5, arg6)
+}
+
+// MsigRemoveSigner mocks base method.
+func (m *MockFullNode) MsigRemoveSigner(arg0 context.Context, arg1, arg2, arg3 address.Address, arg4 bool) (cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigRemoveSigner", arg0, arg1, arg2, arg3, arg4)
+ ret0, _ := ret[0].(cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigRemoveSigner indicates an expected call of MsigRemoveSigner.
+func (mr *MockFullNodeMockRecorder) MsigRemoveSigner(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigRemoveSigner", reflect.TypeOf((*MockFullNode)(nil).MsigRemoveSigner), arg0, arg1, arg2, arg3, arg4)
+}
+
+// MsigSwapApprove mocks base method.
+func (m *MockFullNode) MsigSwapApprove(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4, arg5, arg6 address.Address) (cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigSwapApprove", arg0, arg1, arg2, arg3, arg4, arg5, arg6)
+ ret0, _ := ret[0].(cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigSwapApprove indicates an expected call of MsigSwapApprove.
+func (mr *MockFullNodeMockRecorder) MsigSwapApprove(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigSwapApprove", reflect.TypeOf((*MockFullNode)(nil).MsigSwapApprove), arg0, arg1, arg2, arg3, arg4, arg5, arg6)
+}
+
+// MsigSwapCancel mocks base method.
+func (m *MockFullNode) MsigSwapCancel(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4, arg5 address.Address) (cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigSwapCancel", arg0, arg1, arg2, arg3, arg4, arg5)
+ ret0, _ := ret[0].(cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigSwapCancel indicates an expected call of MsigSwapCancel.
+func (mr *MockFullNodeMockRecorder) MsigSwapCancel(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigSwapCancel", reflect.TypeOf((*MockFullNode)(nil).MsigSwapCancel), arg0, arg1, arg2, arg3, arg4, arg5)
+}
+
+// MsigSwapPropose mocks base method.
+func (m *MockFullNode) MsigSwapPropose(arg0 context.Context, arg1, arg2, arg3, arg4 address.Address) (cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MsigSwapPropose", arg0, arg1, arg2, arg3, arg4)
+ ret0, _ := ret[0].(cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MsigSwapPropose indicates an expected call of MsigSwapPropose.
+func (mr *MockFullNodeMockRecorder) MsigSwapPropose(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigSwapPropose", reflect.TypeOf((*MockFullNode)(nil).MsigSwapPropose), arg0, arg1, arg2, arg3, arg4)
+}
+
+// NetAddrsListen mocks base method.
+func (m *MockFullNode) NetAddrsListen(arg0 context.Context) (peer.AddrInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetAddrsListen", arg0)
+ ret0, _ := ret[0].(peer.AddrInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// NetAddrsListen indicates an expected call of NetAddrsListen.
+func (mr *MockFullNodeMockRecorder) NetAddrsListen(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetAddrsListen", reflect.TypeOf((*MockFullNode)(nil).NetAddrsListen), arg0)
+}
+
+// NetAgentVersion mocks base method.
+func (m *MockFullNode) NetAgentVersion(arg0 context.Context, arg1 peer.ID) (string, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetAgentVersion", arg0, arg1)
+ ret0, _ := ret[0].(string)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// NetAgentVersion indicates an expected call of NetAgentVersion.
+func (mr *MockFullNodeMockRecorder) NetAgentVersion(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetAgentVersion", reflect.TypeOf((*MockFullNode)(nil).NetAgentVersion), arg0, arg1)
+}
+
+// NetAutoNatStatus mocks base method.
+func (m *MockFullNode) NetAutoNatStatus(arg0 context.Context) (api.NatInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetAutoNatStatus", arg0)
+ ret0, _ := ret[0].(api.NatInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// NetAutoNatStatus indicates an expected call of NetAutoNatStatus.
+func (mr *MockFullNodeMockRecorder) NetAutoNatStatus(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetAutoNatStatus", reflect.TypeOf((*MockFullNode)(nil).NetAutoNatStatus), arg0)
+}
+
+// NetBandwidthStats mocks base method.
+func (m *MockFullNode) NetBandwidthStats(arg0 context.Context) (metrics.Stats, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetBandwidthStats", arg0)
+ ret0, _ := ret[0].(metrics.Stats)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// NetBandwidthStats indicates an expected call of NetBandwidthStats.
+func (mr *MockFullNodeMockRecorder) NetBandwidthStats(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBandwidthStats", reflect.TypeOf((*MockFullNode)(nil).NetBandwidthStats), arg0)
+}
+
+// NetBandwidthStatsByPeer mocks base method.
+func (m *MockFullNode) NetBandwidthStatsByPeer(arg0 context.Context) (map[string]metrics.Stats, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetBandwidthStatsByPeer", arg0)
+ ret0, _ := ret[0].(map[string]metrics.Stats)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// NetBandwidthStatsByPeer indicates an expected call of NetBandwidthStatsByPeer.
+func (mr *MockFullNodeMockRecorder) NetBandwidthStatsByPeer(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBandwidthStatsByPeer", reflect.TypeOf((*MockFullNode)(nil).NetBandwidthStatsByPeer), arg0)
+}
+
+// NetBandwidthStatsByProtocol mocks base method.
+func (m *MockFullNode) NetBandwidthStatsByProtocol(arg0 context.Context) (map[protocol.ID]metrics.Stats, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetBandwidthStatsByProtocol", arg0)
+ ret0, _ := ret[0].(map[protocol.ID]metrics.Stats)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// NetBandwidthStatsByProtocol indicates an expected call of NetBandwidthStatsByProtocol.
+func (mr *MockFullNodeMockRecorder) NetBandwidthStatsByProtocol(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBandwidthStatsByProtocol", reflect.TypeOf((*MockFullNode)(nil).NetBandwidthStatsByProtocol), arg0)
+}
+
+// NetBlockAdd mocks base method.
+func (m *MockFullNode) NetBlockAdd(arg0 context.Context, arg1 api.NetBlockList) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetBlockAdd", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// NetBlockAdd indicates an expected call of NetBlockAdd.
+func (mr *MockFullNodeMockRecorder) NetBlockAdd(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBlockAdd", reflect.TypeOf((*MockFullNode)(nil).NetBlockAdd), arg0, arg1)
+}
+
+// NetBlockList mocks base method.
+func (m *MockFullNode) NetBlockList(arg0 context.Context) (api.NetBlockList, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetBlockList", arg0)
+ ret0, _ := ret[0].(api.NetBlockList)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// NetBlockList indicates an expected call of NetBlockList.
+func (mr *MockFullNodeMockRecorder) NetBlockList(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBlockList", reflect.TypeOf((*MockFullNode)(nil).NetBlockList), arg0)
+}
+
+// NetBlockRemove mocks base method.
+func (m *MockFullNode) NetBlockRemove(arg0 context.Context, arg1 api.NetBlockList) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetBlockRemove", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// NetBlockRemove indicates an expected call of NetBlockRemove.
+func (mr *MockFullNodeMockRecorder) NetBlockRemove(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBlockRemove", reflect.TypeOf((*MockFullNode)(nil).NetBlockRemove), arg0, arg1)
+}
+
+// NetConnect mocks base method.
+func (m *MockFullNode) NetConnect(arg0 context.Context, arg1 peer.AddrInfo) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetConnect", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// NetConnect indicates an expected call of NetConnect.
+func (mr *MockFullNodeMockRecorder) NetConnect(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetConnect", reflect.TypeOf((*MockFullNode)(nil).NetConnect), arg0, arg1)
+}
+
+// NetConnectedness mocks base method.
+func (m *MockFullNode) NetConnectedness(arg0 context.Context, arg1 peer.ID) (network0.Connectedness, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetConnectedness", arg0, arg1)
+ ret0, _ := ret[0].(network0.Connectedness)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// NetConnectedness indicates an expected call of NetConnectedness.
+func (mr *MockFullNodeMockRecorder) NetConnectedness(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetConnectedness", reflect.TypeOf((*MockFullNode)(nil).NetConnectedness), arg0, arg1)
+}
+
+// NetDisconnect mocks base method.
+func (m *MockFullNode) NetDisconnect(arg0 context.Context, arg1 peer.ID) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetDisconnect", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// NetDisconnect indicates an expected call of NetDisconnect.
+func (mr *MockFullNodeMockRecorder) NetDisconnect(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetDisconnect", reflect.TypeOf((*MockFullNode)(nil).NetDisconnect), arg0, arg1)
+}
+
+// NetFindPeer mocks base method.
+func (m *MockFullNode) NetFindPeer(arg0 context.Context, arg1 peer.ID) (peer.AddrInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetFindPeer", arg0, arg1)
+ ret0, _ := ret[0].(peer.AddrInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// NetFindPeer indicates an expected call of NetFindPeer.
+func (mr *MockFullNodeMockRecorder) NetFindPeer(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetFindPeer", reflect.TypeOf((*MockFullNode)(nil).NetFindPeer), arg0, arg1)
+}
+
+// NetPeerInfo mocks base method.
+func (m *MockFullNode) NetPeerInfo(arg0 context.Context, arg1 peer.ID) (*api.ExtendedPeerInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetPeerInfo", arg0, arg1)
+ ret0, _ := ret[0].(*api.ExtendedPeerInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// NetPeerInfo indicates an expected call of NetPeerInfo.
+func (mr *MockFullNodeMockRecorder) NetPeerInfo(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPeerInfo", reflect.TypeOf((*MockFullNode)(nil).NetPeerInfo), arg0, arg1)
+}
+
+// NetPeers mocks base method.
+func (m *MockFullNode) NetPeers(arg0 context.Context) ([]peer.AddrInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetPeers", arg0)
+ ret0, _ := ret[0].([]peer.AddrInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// NetPeers indicates an expected call of NetPeers.
+func (mr *MockFullNodeMockRecorder) NetPeers(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPeers", reflect.TypeOf((*MockFullNode)(nil).NetPeers), arg0)
+}
+
+// NetPubsubScores mocks base method.
+func (m *MockFullNode) NetPubsubScores(arg0 context.Context) ([]api.PubsubScore, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NetPubsubScores", arg0)
+ ret0, _ := ret[0].([]api.PubsubScore)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// NetPubsubScores indicates an expected call of NetPubsubScores.
+func (mr *MockFullNodeMockRecorder) NetPubsubScores(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPubsubScores", reflect.TypeOf((*MockFullNode)(nil).NetPubsubScores), arg0)
+}
+
+// PaychAllocateLane mocks base method.
+func (m *MockFullNode) PaychAllocateLane(arg0 context.Context, arg1 address.Address) (uint64, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychAllocateLane", arg0, arg1)
+ ret0, _ := ret[0].(uint64)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PaychAllocateLane indicates an expected call of PaychAllocateLane.
+func (mr *MockFullNodeMockRecorder) PaychAllocateLane(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychAllocateLane", reflect.TypeOf((*MockFullNode)(nil).PaychAllocateLane), arg0, arg1)
+}
+
+// PaychAvailableFunds mocks base method.
+func (m *MockFullNode) PaychAvailableFunds(arg0 context.Context, arg1 address.Address) (*api.ChannelAvailableFunds, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychAvailableFunds", arg0, arg1)
+ ret0, _ := ret[0].(*api.ChannelAvailableFunds)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PaychAvailableFunds indicates an expected call of PaychAvailableFunds.
+func (mr *MockFullNodeMockRecorder) PaychAvailableFunds(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychAvailableFunds", reflect.TypeOf((*MockFullNode)(nil).PaychAvailableFunds), arg0, arg1)
+}
+
+// PaychAvailableFundsByFromTo mocks base method.
+func (m *MockFullNode) PaychAvailableFundsByFromTo(arg0 context.Context, arg1, arg2 address.Address) (*api.ChannelAvailableFunds, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychAvailableFundsByFromTo", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*api.ChannelAvailableFunds)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PaychAvailableFundsByFromTo indicates an expected call of PaychAvailableFundsByFromTo.
+func (mr *MockFullNodeMockRecorder) PaychAvailableFundsByFromTo(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychAvailableFundsByFromTo", reflect.TypeOf((*MockFullNode)(nil).PaychAvailableFundsByFromTo), arg0, arg1, arg2)
+}
+
+// PaychCollect mocks base method.
+func (m *MockFullNode) PaychCollect(arg0 context.Context, arg1 address.Address) (cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychCollect", arg0, arg1)
+ ret0, _ := ret[0].(cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PaychCollect indicates an expected call of PaychCollect.
+func (mr *MockFullNodeMockRecorder) PaychCollect(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychCollect", reflect.TypeOf((*MockFullNode)(nil).PaychCollect), arg0, arg1)
+}
+
+// PaychGet mocks base method.
+func (m *MockFullNode) PaychGet(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (*api.ChannelInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychGet", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(*api.ChannelInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PaychGet indicates an expected call of PaychGet.
+func (mr *MockFullNodeMockRecorder) PaychGet(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychGet", reflect.TypeOf((*MockFullNode)(nil).PaychGet), arg0, arg1, arg2, arg3)
+}
+
+// PaychGetWaitReady mocks base method.
+func (m *MockFullNode) PaychGetWaitReady(arg0 context.Context, arg1 cid.Cid) (address.Address, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychGetWaitReady", arg0, arg1)
+ ret0, _ := ret[0].(address.Address)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PaychGetWaitReady indicates an expected call of PaychGetWaitReady.
+func (mr *MockFullNodeMockRecorder) PaychGetWaitReady(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychGetWaitReady", reflect.TypeOf((*MockFullNode)(nil).PaychGetWaitReady), arg0, arg1)
+}
+
+// PaychList mocks base method.
+func (m *MockFullNode) PaychList(arg0 context.Context) ([]address.Address, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychList", arg0)
+ ret0, _ := ret[0].([]address.Address)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PaychList indicates an expected call of PaychList.
+func (mr *MockFullNodeMockRecorder) PaychList(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychList", reflect.TypeOf((*MockFullNode)(nil).PaychList), arg0)
+}
+
+// PaychNewPayment mocks base method.
+func (m *MockFullNode) PaychNewPayment(arg0 context.Context, arg1, arg2 address.Address, arg3 []api.VoucherSpec) (*api.PaymentInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychNewPayment", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(*api.PaymentInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PaychNewPayment indicates an expected call of PaychNewPayment.
+func (mr *MockFullNodeMockRecorder) PaychNewPayment(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychNewPayment", reflect.TypeOf((*MockFullNode)(nil).PaychNewPayment), arg0, arg1, arg2, arg3)
+}
+
+// PaychSettle mocks base method.
+func (m *MockFullNode) PaychSettle(arg0 context.Context, arg1 address.Address) (cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychSettle", arg0, arg1)
+ ret0, _ := ret[0].(cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PaychSettle indicates an expected call of PaychSettle.
+func (mr *MockFullNodeMockRecorder) PaychSettle(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychSettle", reflect.TypeOf((*MockFullNode)(nil).PaychSettle), arg0, arg1)
+}
+
+// PaychStatus mocks base method.
+func (m *MockFullNode) PaychStatus(arg0 context.Context, arg1 address.Address) (*api.PaychStatus, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychStatus", arg0, arg1)
+ ret0, _ := ret[0].(*api.PaychStatus)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PaychStatus indicates an expected call of PaychStatus.
+func (mr *MockFullNodeMockRecorder) PaychStatus(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychStatus", reflect.TypeOf((*MockFullNode)(nil).PaychStatus), arg0, arg1)
+}
+
+// PaychVoucherAdd mocks base method.
+func (m *MockFullNode) PaychVoucherAdd(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher, arg3 []byte, arg4 big.Int) (big.Int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychVoucherAdd", arg0, arg1, arg2, arg3, arg4)
+ ret0, _ := ret[0].(big.Int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PaychVoucherAdd indicates an expected call of PaychVoucherAdd.
+func (mr *MockFullNodeMockRecorder) PaychVoucherAdd(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherAdd", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherAdd), arg0, arg1, arg2, arg3, arg4)
+}
+
+// PaychVoucherCheckSpendable mocks base method.
+func (m *MockFullNode) PaychVoucherCheckSpendable(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher, arg3, arg4 []byte) (bool, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychVoucherCheckSpendable", arg0, arg1, arg2, arg3, arg4)
+ ret0, _ := ret[0].(bool)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PaychVoucherCheckSpendable indicates an expected call of PaychVoucherCheckSpendable.
+func (mr *MockFullNodeMockRecorder) PaychVoucherCheckSpendable(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherCheckSpendable", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherCheckSpendable), arg0, arg1, arg2, arg3, arg4)
+}
+
+// PaychVoucherCheckValid mocks base method.
+func (m *MockFullNode) PaychVoucherCheckValid(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychVoucherCheckValid", arg0, arg1, arg2)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// PaychVoucherCheckValid indicates an expected call of PaychVoucherCheckValid.
+func (mr *MockFullNodeMockRecorder) PaychVoucherCheckValid(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherCheckValid", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherCheckValid), arg0, arg1, arg2)
+}
+
+// PaychVoucherCreate mocks base method.
+func (m *MockFullNode) PaychVoucherCreate(arg0 context.Context, arg1 address.Address, arg2 big.Int, arg3 uint64) (*api.VoucherCreateResult, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychVoucherCreate", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(*api.VoucherCreateResult)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PaychVoucherCreate indicates an expected call of PaychVoucherCreate.
+func (mr *MockFullNodeMockRecorder) PaychVoucherCreate(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherCreate", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherCreate), arg0, arg1, arg2, arg3)
+}
+
+// PaychVoucherList mocks base method.
+func (m *MockFullNode) PaychVoucherList(arg0 context.Context, arg1 address.Address) ([]*paych.SignedVoucher, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychVoucherList", arg0, arg1)
+ ret0, _ := ret[0].([]*paych.SignedVoucher)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PaychVoucherList indicates an expected call of PaychVoucherList.
+func (mr *MockFullNodeMockRecorder) PaychVoucherList(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherList", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherList), arg0, arg1)
+}
+
+// PaychVoucherSubmit mocks base method.
+func (m *MockFullNode) PaychVoucherSubmit(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher, arg3, arg4 []byte) (cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PaychVoucherSubmit", arg0, arg1, arg2, arg3, arg4)
+ ret0, _ := ret[0].(cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// PaychVoucherSubmit indicates an expected call of PaychVoucherSubmit.
+func (mr *MockFullNodeMockRecorder) PaychVoucherSubmit(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherSubmit", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherSubmit), arg0, arg1, arg2, arg3, arg4)
+}
+
+// Session mocks base method.
+func (m *MockFullNode) Session(arg0 context.Context) (uuid.UUID, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Session", arg0)
+ ret0, _ := ret[0].(uuid.UUID)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Session indicates an expected call of Session.
+func (mr *MockFullNodeMockRecorder) Session(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Session", reflect.TypeOf((*MockFullNode)(nil).Session), arg0)
+}
+
+// Shutdown mocks base method.
+func (m *MockFullNode) Shutdown(arg0 context.Context) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Shutdown", arg0)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Shutdown indicates an expected call of Shutdown.
+func (mr *MockFullNodeMockRecorder) Shutdown(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Shutdown", reflect.TypeOf((*MockFullNode)(nil).Shutdown), arg0)
+}
+
+// StateAccountKey mocks base method.
+func (m *MockFullNode) StateAccountKey(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (address.Address, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateAccountKey", arg0, arg1, arg2)
+ ret0, _ := ret[0].(address.Address)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateAccountKey indicates an expected call of StateAccountKey.
+func (mr *MockFullNodeMockRecorder) StateAccountKey(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateAccountKey", reflect.TypeOf((*MockFullNode)(nil).StateAccountKey), arg0, arg1, arg2)
+}
+
+// StateAllMinerFaults mocks base method.
+func (m *MockFullNode) StateAllMinerFaults(arg0 context.Context, arg1 abi.ChainEpoch, arg2 types.TipSetKey) ([]*api.Fault, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateAllMinerFaults", arg0, arg1, arg2)
+ ret0, _ := ret[0].([]*api.Fault)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateAllMinerFaults indicates an expected call of StateAllMinerFaults.
+func (mr *MockFullNodeMockRecorder) StateAllMinerFaults(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateAllMinerFaults", reflect.TypeOf((*MockFullNode)(nil).StateAllMinerFaults), arg0, arg1, arg2)
+}
+
+// StateCall mocks base method.
+func (m *MockFullNode) StateCall(arg0 context.Context, arg1 *types.Message, arg2 types.TipSetKey) (*api.InvocResult, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateCall", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*api.InvocResult)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateCall indicates an expected call of StateCall.
+func (mr *MockFullNodeMockRecorder) StateCall(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateCall", reflect.TypeOf((*MockFullNode)(nil).StateCall), arg0, arg1, arg2)
+}
+
+// StateChangedActors mocks base method.
+func (m *MockFullNode) StateChangedActors(arg0 context.Context, arg1, arg2 cid.Cid) (map[string]types.Actor, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateChangedActors", arg0, arg1, arg2)
+ ret0, _ := ret[0].(map[string]types.Actor)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateChangedActors indicates an expected call of StateChangedActors.
+func (mr *MockFullNodeMockRecorder) StateChangedActors(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateChangedActors", reflect.TypeOf((*MockFullNode)(nil).StateChangedActors), arg0, arg1, arg2)
+}
+
+// StateCirculatingSupply mocks base method.
+func (m *MockFullNode) StateCirculatingSupply(arg0 context.Context, arg1 types.TipSetKey) (big.Int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateCirculatingSupply", arg0, arg1)
+ ret0, _ := ret[0].(big.Int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateCirculatingSupply indicates an expected call of StateCirculatingSupply.
+func (mr *MockFullNodeMockRecorder) StateCirculatingSupply(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateCirculatingSupply", reflect.TypeOf((*MockFullNode)(nil).StateCirculatingSupply), arg0, arg1)
+}
+
+// StateCompute mocks base method.
+func (m *MockFullNode) StateCompute(arg0 context.Context, arg1 abi.ChainEpoch, arg2 []*types.Message, arg3 types.TipSetKey) (*api.ComputeStateOutput, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateCompute", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(*api.ComputeStateOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateCompute indicates an expected call of StateCompute.
+func (mr *MockFullNodeMockRecorder) StateCompute(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateCompute", reflect.TypeOf((*MockFullNode)(nil).StateCompute), arg0, arg1, arg2, arg3)
+}
+
+// StateDealProviderCollateralBounds mocks base method.
+func (m *MockFullNode) StateDealProviderCollateralBounds(arg0 context.Context, arg1 abi.PaddedPieceSize, arg2 bool, arg3 types.TipSetKey) (api.DealCollateralBounds, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateDealProviderCollateralBounds", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(api.DealCollateralBounds)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateDealProviderCollateralBounds indicates an expected call of StateDealProviderCollateralBounds.
+func (mr *MockFullNodeMockRecorder) StateDealProviderCollateralBounds(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateDealProviderCollateralBounds", reflect.TypeOf((*MockFullNode)(nil).StateDealProviderCollateralBounds), arg0, arg1, arg2, arg3)
+}
+
+// StateDecodeParams mocks base method.
+func (m *MockFullNode) StateDecodeParams(arg0 context.Context, arg1 address.Address, arg2 abi.MethodNum, arg3 []byte, arg4 types.TipSetKey) (interface{}, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateDecodeParams", arg0, arg1, arg2, arg3, arg4)
+ ret0, _ := ret[0].(interface{})
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateDecodeParams indicates an expected call of StateDecodeParams.
+func (mr *MockFullNodeMockRecorder) StateDecodeParams(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateDecodeParams", reflect.TypeOf((*MockFullNode)(nil).StateDecodeParams), arg0, arg1, arg2, arg3, arg4)
+}
+
+// StateGetActor mocks base method.
+func (m *MockFullNode) StateGetActor(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*types.Actor, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateGetActor", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*types.Actor)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateGetActor indicates an expected call of StateGetActor.
+func (mr *MockFullNodeMockRecorder) StateGetActor(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetActor", reflect.TypeOf((*MockFullNode)(nil).StateGetActor), arg0, arg1, arg2)
+}
+
+// StateGetReceipt mocks base method.
+func (m *MockFullNode) StateGetReceipt(arg0 context.Context, arg1 cid.Cid, arg2 types.TipSetKey) (*types.MessageReceipt, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateGetReceipt", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*types.MessageReceipt)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateGetReceipt indicates an expected call of StateGetReceipt.
+func (mr *MockFullNodeMockRecorder) StateGetReceipt(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetReceipt", reflect.TypeOf((*MockFullNode)(nil).StateGetReceipt), arg0, arg1, arg2)
+}
+
+// StateListActors mocks base method.
+func (m *MockFullNode) StateListActors(arg0 context.Context, arg1 types.TipSetKey) ([]address.Address, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateListActors", arg0, arg1)
+ ret0, _ := ret[0].([]address.Address)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateListActors indicates an expected call of StateListActors.
+func (mr *MockFullNodeMockRecorder) StateListActors(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateListActors", reflect.TypeOf((*MockFullNode)(nil).StateListActors), arg0, arg1)
+}
+
+// StateListMessages mocks base method.
+func (m *MockFullNode) StateListMessages(arg0 context.Context, arg1 *api.MessageMatch, arg2 types.TipSetKey, arg3 abi.ChainEpoch) ([]cid.Cid, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateListMessages", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].([]cid.Cid)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateListMessages indicates an expected call of StateListMessages.
+func (mr *MockFullNodeMockRecorder) StateListMessages(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateListMessages", reflect.TypeOf((*MockFullNode)(nil).StateListMessages), arg0, arg1, arg2, arg3)
+}
+
+// StateListMiners mocks base method.
+func (m *MockFullNode) StateListMiners(arg0 context.Context, arg1 types.TipSetKey) ([]address.Address, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateListMiners", arg0, arg1)
+ ret0, _ := ret[0].([]address.Address)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateListMiners indicates an expected call of StateListMiners.
+func (mr *MockFullNodeMockRecorder) StateListMiners(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateListMiners", reflect.TypeOf((*MockFullNode)(nil).StateListMiners), arg0, arg1)
+}
+
+// StateLookupID mocks base method.
+func (m *MockFullNode) StateLookupID(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (address.Address, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateLookupID", arg0, arg1, arg2)
+ ret0, _ := ret[0].(address.Address)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateLookupID indicates an expected call of StateLookupID.
+func (mr *MockFullNodeMockRecorder) StateLookupID(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateLookupID", reflect.TypeOf((*MockFullNode)(nil).StateLookupID), arg0, arg1, arg2)
+}
+
+// StateMarketBalance mocks base method.
+func (m *MockFullNode) StateMarketBalance(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (api.MarketBalance, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMarketBalance", arg0, arg1, arg2)
+ ret0, _ := ret[0].(api.MarketBalance)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMarketBalance indicates an expected call of StateMarketBalance.
+func (mr *MockFullNodeMockRecorder) StateMarketBalance(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketBalance", reflect.TypeOf((*MockFullNode)(nil).StateMarketBalance), arg0, arg1, arg2)
+}
+
+// StateMarketDeals mocks base method.
+func (m *MockFullNode) StateMarketDeals(arg0 context.Context, arg1 types.TipSetKey) (map[string]api.MarketDeal, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMarketDeals", arg0, arg1)
+ ret0, _ := ret[0].(map[string]api.MarketDeal)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMarketDeals indicates an expected call of StateMarketDeals.
+func (mr *MockFullNodeMockRecorder) StateMarketDeals(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketDeals", reflect.TypeOf((*MockFullNode)(nil).StateMarketDeals), arg0, arg1)
+}
+
+// StateMarketParticipants mocks base method.
+func (m *MockFullNode) StateMarketParticipants(arg0 context.Context, arg1 types.TipSetKey) (map[string]api.MarketBalance, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMarketParticipants", arg0, arg1)
+ ret0, _ := ret[0].(map[string]api.MarketBalance)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMarketParticipants indicates an expected call of StateMarketParticipants.
+func (mr *MockFullNodeMockRecorder) StateMarketParticipants(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketParticipants", reflect.TypeOf((*MockFullNode)(nil).StateMarketParticipants), arg0, arg1)
+}
+
+// StateMarketStorageDeal mocks base method.
+func (m *MockFullNode) StateMarketStorageDeal(arg0 context.Context, arg1 abi.DealID, arg2 types.TipSetKey) (*api.MarketDeal, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMarketStorageDeal", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*api.MarketDeal)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMarketStorageDeal indicates an expected call of StateMarketStorageDeal.
+func (mr *MockFullNodeMockRecorder) StateMarketStorageDeal(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketStorageDeal", reflect.TypeOf((*MockFullNode)(nil).StateMarketStorageDeal), arg0, arg1, arg2)
+}
+
+// StateMinerActiveSectors mocks base method.
+func (m *MockFullNode) StateMinerActiveSectors(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMinerActiveSectors", arg0, arg1, arg2)
+ ret0, _ := ret[0].([]*miner.SectorOnChainInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMinerActiveSectors indicates an expected call of StateMinerActiveSectors.
+func (mr *MockFullNodeMockRecorder) StateMinerActiveSectors(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerActiveSectors", reflect.TypeOf((*MockFullNode)(nil).StateMinerActiveSectors), arg0, arg1, arg2)
+}
+
+// StateMinerAvailableBalance mocks base method.
+func (m *MockFullNode) StateMinerAvailableBalance(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (big.Int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMinerAvailableBalance", arg0, arg1, arg2)
+ ret0, _ := ret[0].(big.Int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMinerAvailableBalance indicates an expected call of StateMinerAvailableBalance.
+func (mr *MockFullNodeMockRecorder) StateMinerAvailableBalance(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerAvailableBalance", reflect.TypeOf((*MockFullNode)(nil).StateMinerAvailableBalance), arg0, arg1, arg2)
+}
+
+// StateMinerDeadlines mocks base method.
+func (m *MockFullNode) StateMinerDeadlines(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) ([]api.Deadline, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMinerDeadlines", arg0, arg1, arg2)
+ ret0, _ := ret[0].([]api.Deadline)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMinerDeadlines indicates an expected call of StateMinerDeadlines.
+func (mr *MockFullNodeMockRecorder) StateMinerDeadlines(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerDeadlines", reflect.TypeOf((*MockFullNode)(nil).StateMinerDeadlines), arg0, arg1, arg2)
+}
+
+// StateMinerFaults mocks base method.
+func (m *MockFullNode) StateMinerFaults(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (bitfield.BitField, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMinerFaults", arg0, arg1, arg2)
+ ret0, _ := ret[0].(bitfield.BitField)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMinerFaults indicates an expected call of StateMinerFaults.
+func (mr *MockFullNodeMockRecorder) StateMinerFaults(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerFaults", reflect.TypeOf((*MockFullNode)(nil).StateMinerFaults), arg0, arg1, arg2)
+}
+
+// StateMinerInfo mocks base method.
+func (m *MockFullNode) StateMinerInfo(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (miner.MinerInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMinerInfo", arg0, arg1, arg2)
+ ret0, _ := ret[0].(miner.MinerInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMinerInfo indicates an expected call of StateMinerInfo.
+func (mr *MockFullNodeMockRecorder) StateMinerInfo(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerInfo", reflect.TypeOf((*MockFullNode)(nil).StateMinerInfo), arg0, arg1, arg2)
+}
+
+// StateMinerInitialPledgeCollateral mocks base method.
+func (m *MockFullNode) StateMinerInitialPledgeCollateral(arg0 context.Context, arg1 address.Address, arg2 miner0.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMinerInitialPledgeCollateral", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(big.Int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMinerInitialPledgeCollateral indicates an expected call of StateMinerInitialPledgeCollateral.
+func (mr *MockFullNodeMockRecorder) StateMinerInitialPledgeCollateral(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerInitialPledgeCollateral", reflect.TypeOf((*MockFullNode)(nil).StateMinerInitialPledgeCollateral), arg0, arg1, arg2, arg3)
+}
+
+// StateMinerPartitions mocks base method.
+func (m *MockFullNode) StateMinerPartitions(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 types.TipSetKey) ([]api.Partition, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMinerPartitions", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].([]api.Partition)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMinerPartitions indicates an expected call of StateMinerPartitions.
+func (mr *MockFullNodeMockRecorder) StateMinerPartitions(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerPartitions", reflect.TypeOf((*MockFullNode)(nil).StateMinerPartitions), arg0, arg1, arg2, arg3)
+}
+
+// StateMinerPower mocks base method.
+func (m *MockFullNode) StateMinerPower(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*api.MinerPower, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMinerPower", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*api.MinerPower)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMinerPower indicates an expected call of StateMinerPower.
+func (mr *MockFullNodeMockRecorder) StateMinerPower(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerPower", reflect.TypeOf((*MockFullNode)(nil).StateMinerPower), arg0, arg1, arg2)
+}
+
+// StateMinerPreCommitDepositForPower mocks base method.
+func (m *MockFullNode) StateMinerPreCommitDepositForPower(arg0 context.Context, arg1 address.Address, arg2 miner0.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMinerPreCommitDepositForPower", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(big.Int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMinerPreCommitDepositForPower indicates an expected call of StateMinerPreCommitDepositForPower.
+func (mr *MockFullNodeMockRecorder) StateMinerPreCommitDepositForPower(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerPreCommitDepositForPower", reflect.TypeOf((*MockFullNode)(nil).StateMinerPreCommitDepositForPower), arg0, arg1, arg2, arg3)
+}
+
+// StateMinerProvingDeadline mocks base method.
+func (m *MockFullNode) StateMinerProvingDeadline(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*dline.Info, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMinerProvingDeadline", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*dline.Info)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMinerProvingDeadline indicates an expected call of StateMinerProvingDeadline.
+func (mr *MockFullNodeMockRecorder) StateMinerProvingDeadline(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerProvingDeadline", reflect.TypeOf((*MockFullNode)(nil).StateMinerProvingDeadline), arg0, arg1, arg2)
+}
+
+// StateMinerRecoveries mocks base method.
+func (m *MockFullNode) StateMinerRecoveries(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (bitfield.BitField, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMinerRecoveries", arg0, arg1, arg2)
+ ret0, _ := ret[0].(bitfield.BitField)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMinerRecoveries indicates an expected call of StateMinerRecoveries.
+func (mr *MockFullNodeMockRecorder) StateMinerRecoveries(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerRecoveries", reflect.TypeOf((*MockFullNode)(nil).StateMinerRecoveries), arg0, arg1, arg2)
+}
+
+// StateMinerSectorAllocated mocks base method.
+func (m *MockFullNode) StateMinerSectorAllocated(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (bool, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMinerSectorAllocated", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(bool)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMinerSectorAllocated indicates an expected call of StateMinerSectorAllocated.
+func (mr *MockFullNodeMockRecorder) StateMinerSectorAllocated(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerSectorAllocated", reflect.TypeOf((*MockFullNode)(nil).StateMinerSectorAllocated), arg0, arg1, arg2, arg3)
+}
+
+// StateMinerSectorCount mocks base method.
+func (m *MockFullNode) StateMinerSectorCount(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (api.MinerSectors, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMinerSectorCount", arg0, arg1, arg2)
+ ret0, _ := ret[0].(api.MinerSectors)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMinerSectorCount indicates an expected call of StateMinerSectorCount.
+func (mr *MockFullNodeMockRecorder) StateMinerSectorCount(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerSectorCount", reflect.TypeOf((*MockFullNode)(nil).StateMinerSectorCount), arg0, arg1, arg2)
+}
+
+// StateMinerSectors mocks base method.
+func (m *MockFullNode) StateMinerSectors(arg0 context.Context, arg1 address.Address, arg2 *bitfield.BitField, arg3 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateMinerSectors", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].([]*miner.SectorOnChainInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateMinerSectors indicates an expected call of StateMinerSectors.
+func (mr *MockFullNodeMockRecorder) StateMinerSectors(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerSectors", reflect.TypeOf((*MockFullNode)(nil).StateMinerSectors), arg0, arg1, arg2, arg3)
+}
+
+// StateNetworkName mocks base method.
+func (m *MockFullNode) StateNetworkName(arg0 context.Context) (dtypes.NetworkName, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateNetworkName", arg0)
+ ret0, _ := ret[0].(dtypes.NetworkName)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateNetworkName indicates an expected call of StateNetworkName.
+func (mr *MockFullNodeMockRecorder) StateNetworkName(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateNetworkName", reflect.TypeOf((*MockFullNode)(nil).StateNetworkName), arg0)
+}
+
+// StateNetworkVersion mocks base method.
+func (m *MockFullNode) StateNetworkVersion(arg0 context.Context, arg1 types.TipSetKey) (network.Version, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateNetworkVersion", arg0, arg1)
+ ret0, _ := ret[0].(network.Version)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateNetworkVersion indicates an expected call of StateNetworkVersion.
+func (mr *MockFullNodeMockRecorder) StateNetworkVersion(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateNetworkVersion", reflect.TypeOf((*MockFullNode)(nil).StateNetworkVersion), arg0, arg1)
+}
+
+// StateReadState mocks base method.
+func (m *MockFullNode) StateReadState(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*api.ActorState, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateReadState", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*api.ActorState)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateReadState indicates an expected call of StateReadState.
+func (mr *MockFullNodeMockRecorder) StateReadState(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateReadState", reflect.TypeOf((*MockFullNode)(nil).StateReadState), arg0, arg1, arg2)
+}
+
+// StateReplay mocks base method.
+func (m *MockFullNode) StateReplay(arg0 context.Context, arg1 types.TipSetKey, arg2 cid.Cid) (*api.InvocResult, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateReplay", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*api.InvocResult)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateReplay indicates an expected call of StateReplay.
+func (mr *MockFullNodeMockRecorder) StateReplay(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateReplay", reflect.TypeOf((*MockFullNode)(nil).StateReplay), arg0, arg1, arg2)
+}
+
+// StateSearchMsg mocks base method.
+func (m *MockFullNode) StateSearchMsg(arg0 context.Context, arg1 cid.Cid) (*api.MsgLookup, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateSearchMsg", arg0, arg1)
+ ret0, _ := ret[0].(*api.MsgLookup)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateSearchMsg indicates an expected call of StateSearchMsg.
+func (mr *MockFullNodeMockRecorder) StateSearchMsg(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSearchMsg", reflect.TypeOf((*MockFullNode)(nil).StateSearchMsg), arg0, arg1)
+}
+
+// StateSearchMsgLimited mocks base method.
+func (m *MockFullNode) StateSearchMsgLimited(arg0 context.Context, arg1 cid.Cid, arg2 abi.ChainEpoch) (*api.MsgLookup, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateSearchMsgLimited", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*api.MsgLookup)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateSearchMsgLimited indicates an expected call of StateSearchMsgLimited.
+func (mr *MockFullNodeMockRecorder) StateSearchMsgLimited(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSearchMsgLimited", reflect.TypeOf((*MockFullNode)(nil).StateSearchMsgLimited), arg0, arg1, arg2)
+}
+
+// StateSectorExpiration mocks base method.
+func (m *MockFullNode) StateSectorExpiration(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner.SectorExpiration, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateSectorExpiration", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(*miner.SectorExpiration)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateSectorExpiration indicates an expected call of StateSectorExpiration.
+func (mr *MockFullNodeMockRecorder) StateSectorExpiration(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorExpiration", reflect.TypeOf((*MockFullNode)(nil).StateSectorExpiration), arg0, arg1, arg2, arg3)
+}
+
+// StateSectorGetInfo mocks base method.
+func (m *MockFullNode) StateSectorGetInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner.SectorOnChainInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateSectorGetInfo", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(*miner.SectorOnChainInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateSectorGetInfo indicates an expected call of StateSectorGetInfo.
+func (mr *MockFullNodeMockRecorder) StateSectorGetInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorGetInfo", reflect.TypeOf((*MockFullNode)(nil).StateSectorGetInfo), arg0, arg1, arg2, arg3)
+}
+
+// StateSectorPartition mocks base method.
+func (m *MockFullNode) StateSectorPartition(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner.SectorLocation, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateSectorPartition", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(*miner.SectorLocation)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateSectorPartition indicates an expected call of StateSectorPartition.
+func (mr *MockFullNodeMockRecorder) StateSectorPartition(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorPartition", reflect.TypeOf((*MockFullNode)(nil).StateSectorPartition), arg0, arg1, arg2, arg3)
+}
+
+// StateSectorPreCommitInfo mocks base method.
+func (m *MockFullNode) StateSectorPreCommitInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateSectorPreCommitInfo", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(miner.SectorPreCommitOnChainInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateSectorPreCommitInfo indicates an expected call of StateSectorPreCommitInfo.
+func (mr *MockFullNodeMockRecorder) StateSectorPreCommitInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorPreCommitInfo", reflect.TypeOf((*MockFullNode)(nil).StateSectorPreCommitInfo), arg0, arg1, arg2, arg3)
+}
+
+// StateVMCirculatingSupplyInternal mocks base method.
+func (m *MockFullNode) StateVMCirculatingSupplyInternal(arg0 context.Context, arg1 types.TipSetKey) (api.CirculatingSupply, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateVMCirculatingSupplyInternal", arg0, arg1)
+ ret0, _ := ret[0].(api.CirculatingSupply)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateVMCirculatingSupplyInternal indicates an expected call of StateVMCirculatingSupplyInternal.
+func (mr *MockFullNodeMockRecorder) StateVMCirculatingSupplyInternal(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVMCirculatingSupplyInternal", reflect.TypeOf((*MockFullNode)(nil).StateVMCirculatingSupplyInternal), arg0, arg1)
+}
+
+// StateVerifiedClientStatus mocks base method.
+func (m *MockFullNode) StateVerifiedClientStatus(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*big.Int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateVerifiedClientStatus", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*big.Int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateVerifiedClientStatus indicates an expected call of StateVerifiedClientStatus.
+func (mr *MockFullNodeMockRecorder) StateVerifiedClientStatus(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVerifiedClientStatus", reflect.TypeOf((*MockFullNode)(nil).StateVerifiedClientStatus), arg0, arg1, arg2)
+}
+
+// StateVerifiedRegistryRootKey mocks base method.
+func (m *MockFullNode) StateVerifiedRegistryRootKey(arg0 context.Context, arg1 types.TipSetKey) (address.Address, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateVerifiedRegistryRootKey", arg0, arg1)
+ ret0, _ := ret[0].(address.Address)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateVerifiedRegistryRootKey indicates an expected call of StateVerifiedRegistryRootKey.
+func (mr *MockFullNodeMockRecorder) StateVerifiedRegistryRootKey(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVerifiedRegistryRootKey", reflect.TypeOf((*MockFullNode)(nil).StateVerifiedRegistryRootKey), arg0, arg1)
+}
+
+// StateVerifierStatus mocks base method.
+func (m *MockFullNode) StateVerifierStatus(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*big.Int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateVerifierStatus", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*big.Int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateVerifierStatus indicates an expected call of StateVerifierStatus.
+func (mr *MockFullNodeMockRecorder) StateVerifierStatus(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVerifierStatus", reflect.TypeOf((*MockFullNode)(nil).StateVerifierStatus), arg0, arg1, arg2)
+}
+
+// StateWaitMsg mocks base method.
+func (m *MockFullNode) StateWaitMsg(arg0 context.Context, arg1 cid.Cid, arg2 uint64) (*api.MsgLookup, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateWaitMsg", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*api.MsgLookup)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateWaitMsg indicates an expected call of StateWaitMsg.
+func (mr *MockFullNodeMockRecorder) StateWaitMsg(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateWaitMsg", reflect.TypeOf((*MockFullNode)(nil).StateWaitMsg), arg0, arg1, arg2)
+}
+
+// StateWaitMsgLimited mocks base method.
+func (m *MockFullNode) StateWaitMsgLimited(arg0 context.Context, arg1 cid.Cid, arg2 uint64, arg3 abi.ChainEpoch) (*api.MsgLookup, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateWaitMsgLimited", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(*api.MsgLookup)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateWaitMsgLimited indicates an expected call of StateWaitMsgLimited.
+func (mr *MockFullNodeMockRecorder) StateWaitMsgLimited(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateWaitMsgLimited", reflect.TypeOf((*MockFullNode)(nil).StateWaitMsgLimited), arg0, arg1, arg2, arg3)
+}
+
+// SyncCheckBad mocks base method.
+func (m *MockFullNode) SyncCheckBad(arg0 context.Context, arg1 cid.Cid) (string, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SyncCheckBad", arg0, arg1)
+ ret0, _ := ret[0].(string)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// SyncCheckBad indicates an expected call of SyncCheckBad.
+func (mr *MockFullNodeMockRecorder) SyncCheckBad(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncCheckBad", reflect.TypeOf((*MockFullNode)(nil).SyncCheckBad), arg0, arg1)
+}
+
+// SyncCheckpoint mocks base method.
+func (m *MockFullNode) SyncCheckpoint(arg0 context.Context, arg1 types.TipSetKey) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SyncCheckpoint", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// SyncCheckpoint indicates an expected call of SyncCheckpoint.
+func (mr *MockFullNodeMockRecorder) SyncCheckpoint(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncCheckpoint", reflect.TypeOf((*MockFullNode)(nil).SyncCheckpoint), arg0, arg1)
+}
+
+// SyncIncomingBlocks mocks base method.
+func (m *MockFullNode) SyncIncomingBlocks(arg0 context.Context) (<-chan *types.BlockHeader, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SyncIncomingBlocks", arg0)
+ ret0, _ := ret[0].(<-chan *types.BlockHeader)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// SyncIncomingBlocks indicates an expected call of SyncIncomingBlocks.
+func (mr *MockFullNodeMockRecorder) SyncIncomingBlocks(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncIncomingBlocks", reflect.TypeOf((*MockFullNode)(nil).SyncIncomingBlocks), arg0)
+}
+
+// SyncMarkBad mocks base method.
+func (m *MockFullNode) SyncMarkBad(arg0 context.Context, arg1 cid.Cid) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SyncMarkBad", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// SyncMarkBad indicates an expected call of SyncMarkBad.
+func (mr *MockFullNodeMockRecorder) SyncMarkBad(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncMarkBad", reflect.TypeOf((*MockFullNode)(nil).SyncMarkBad), arg0, arg1)
+}
+
+// SyncState mocks base method.
+func (m *MockFullNode) SyncState(arg0 context.Context) (*api.SyncState, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SyncState", arg0)
+ ret0, _ := ret[0].(*api.SyncState)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// SyncState indicates an expected call of SyncState.
+func (mr *MockFullNodeMockRecorder) SyncState(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncState", reflect.TypeOf((*MockFullNode)(nil).SyncState), arg0)
+}
+
+// SyncSubmitBlock mocks base method.
+func (m *MockFullNode) SyncSubmitBlock(arg0 context.Context, arg1 *types.BlockMsg) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SyncSubmitBlock", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// SyncSubmitBlock indicates an expected call of SyncSubmitBlock.
+func (mr *MockFullNodeMockRecorder) SyncSubmitBlock(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncSubmitBlock", reflect.TypeOf((*MockFullNode)(nil).SyncSubmitBlock), arg0, arg1)
+}
+
+// SyncUnmarkAllBad mocks base method.
+func (m *MockFullNode) SyncUnmarkAllBad(arg0 context.Context) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SyncUnmarkAllBad", arg0)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// SyncUnmarkAllBad indicates an expected call of SyncUnmarkAllBad.
+func (mr *MockFullNodeMockRecorder) SyncUnmarkAllBad(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncUnmarkAllBad", reflect.TypeOf((*MockFullNode)(nil).SyncUnmarkAllBad), arg0)
+}
+
+// SyncUnmarkBad mocks base method.
+func (m *MockFullNode) SyncUnmarkBad(arg0 context.Context, arg1 cid.Cid) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SyncUnmarkBad", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// SyncUnmarkBad indicates an expected call of SyncUnmarkBad.
+func (mr *MockFullNodeMockRecorder) SyncUnmarkBad(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncUnmarkBad", reflect.TypeOf((*MockFullNode)(nil).SyncUnmarkBad), arg0, arg1)
+}
+
+// SyncValidateTipset mocks base method.
+func (m *MockFullNode) SyncValidateTipset(arg0 context.Context, arg1 types.TipSetKey) (bool, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SyncValidateTipset", arg0, arg1)
+ ret0, _ := ret[0].(bool)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// SyncValidateTipset indicates an expected call of SyncValidateTipset.
+func (mr *MockFullNodeMockRecorder) SyncValidateTipset(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncValidateTipset", reflect.TypeOf((*MockFullNode)(nil).SyncValidateTipset), arg0, arg1)
+}
+
+// Version mocks base method.
+func (m *MockFullNode) Version(arg0 context.Context) (api.APIVersion, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Version", arg0)
+ ret0, _ := ret[0].(api.APIVersion)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Version indicates an expected call of Version.
+func (mr *MockFullNodeMockRecorder) Version(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockFullNode)(nil).Version), arg0)
+}
+
+// WalletBalance mocks base method.
+func (m *MockFullNode) WalletBalance(arg0 context.Context, arg1 address.Address) (big.Int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WalletBalance", arg0, arg1)
+ ret0, _ := ret[0].(big.Int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// WalletBalance indicates an expected call of WalletBalance.
+func (mr *MockFullNodeMockRecorder) WalletBalance(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletBalance", reflect.TypeOf((*MockFullNode)(nil).WalletBalance), arg0, arg1)
+}
+
+// WalletDefaultAddress mocks base method.
+func (m *MockFullNode) WalletDefaultAddress(arg0 context.Context) (address.Address, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WalletDefaultAddress", arg0)
+ ret0, _ := ret[0].(address.Address)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// WalletDefaultAddress indicates an expected call of WalletDefaultAddress.
+func (mr *MockFullNodeMockRecorder) WalletDefaultAddress(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletDefaultAddress", reflect.TypeOf((*MockFullNode)(nil).WalletDefaultAddress), arg0)
+}
+
+// WalletDelete mocks base method.
+func (m *MockFullNode) WalletDelete(arg0 context.Context, arg1 address.Address) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WalletDelete", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// WalletDelete indicates an expected call of WalletDelete.
+func (mr *MockFullNodeMockRecorder) WalletDelete(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletDelete", reflect.TypeOf((*MockFullNode)(nil).WalletDelete), arg0, arg1)
+}
+
+// WalletExport mocks base method.
+func (m *MockFullNode) WalletExport(arg0 context.Context, arg1 address.Address) (*types.KeyInfo, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WalletExport", arg0, arg1)
+ ret0, _ := ret[0].(*types.KeyInfo)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// WalletExport indicates an expected call of WalletExport.
+func (mr *MockFullNodeMockRecorder) WalletExport(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletExport", reflect.TypeOf((*MockFullNode)(nil).WalletExport), arg0, arg1)
+}
+
+// WalletHas mocks base method.
+func (m *MockFullNode) WalletHas(arg0 context.Context, arg1 address.Address) (bool, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WalletHas", arg0, arg1)
+ ret0, _ := ret[0].(bool)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// WalletHas indicates an expected call of WalletHas.
+func (mr *MockFullNodeMockRecorder) WalletHas(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletHas", reflect.TypeOf((*MockFullNode)(nil).WalletHas), arg0, arg1)
+}
+
+// WalletImport mocks base method.
+func (m *MockFullNode) WalletImport(arg0 context.Context, arg1 *types.KeyInfo) (address.Address, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WalletImport", arg0, arg1)
+ ret0, _ := ret[0].(address.Address)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// WalletImport indicates an expected call of WalletImport.
+func (mr *MockFullNodeMockRecorder) WalletImport(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletImport", reflect.TypeOf((*MockFullNode)(nil).WalletImport), arg0, arg1)
+}
+
+// WalletList mocks base method.
+func (m *MockFullNode) WalletList(arg0 context.Context) ([]address.Address, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WalletList", arg0)
+ ret0, _ := ret[0].([]address.Address)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// WalletList indicates an expected call of WalletList.
+func (mr *MockFullNodeMockRecorder) WalletList(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletList", reflect.TypeOf((*MockFullNode)(nil).WalletList), arg0)
+}
+
+// WalletNew mocks base method.
+func (m *MockFullNode) WalletNew(arg0 context.Context, arg1 types.KeyType) (address.Address, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WalletNew", arg0, arg1)
+ ret0, _ := ret[0].(address.Address)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// WalletNew indicates an expected call of WalletNew.
+func (mr *MockFullNodeMockRecorder) WalletNew(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletNew", reflect.TypeOf((*MockFullNode)(nil).WalletNew), arg0, arg1)
+}
+
+// WalletSetDefault mocks base method.
+func (m *MockFullNode) WalletSetDefault(arg0 context.Context, arg1 address.Address) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WalletSetDefault", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// WalletSetDefault indicates an expected call of WalletSetDefault.
+func (mr *MockFullNodeMockRecorder) WalletSetDefault(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletSetDefault", reflect.TypeOf((*MockFullNode)(nil).WalletSetDefault), arg0, arg1)
+}
+
+// WalletSign mocks base method.
+func (m *MockFullNode) WalletSign(arg0 context.Context, arg1 address.Address, arg2 []byte) (*crypto.Signature, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WalletSign", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*crypto.Signature)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// WalletSign indicates an expected call of WalletSign.
+func (mr *MockFullNodeMockRecorder) WalletSign(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletSign", reflect.TypeOf((*MockFullNode)(nil).WalletSign), arg0, arg1, arg2)
+}
+
+// WalletSignMessage mocks base method.
+func (m *MockFullNode) WalletSignMessage(arg0 context.Context, arg1 address.Address, arg2 *types.Message) (*types.SignedMessage, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WalletSignMessage", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*types.SignedMessage)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// WalletSignMessage indicates an expected call of WalletSignMessage.
+func (mr *MockFullNodeMockRecorder) WalletSignMessage(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletSignMessage", reflect.TypeOf((*MockFullNode)(nil).WalletSignMessage), arg0, arg1, arg2)
+}
+
+// WalletValidateAddress mocks base method.
+func (m *MockFullNode) WalletValidateAddress(arg0 context.Context, arg1 string) (address.Address, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WalletValidateAddress", arg0, arg1)
+ ret0, _ := ret[0].(address.Address)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// WalletValidateAddress indicates an expected call of WalletValidateAddress.
+func (mr *MockFullNodeMockRecorder) WalletValidateAddress(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletValidateAddress", reflect.TypeOf((*MockFullNode)(nil).WalletValidateAddress), arg0, arg1)
+}
+
+// WalletVerify mocks base method.
+func (m *MockFullNode) WalletVerify(arg0 context.Context, arg1 address.Address, arg2 []byte, arg3 *crypto.Signature) (bool, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WalletVerify", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(bool)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// WalletVerify indicates an expected call of WalletVerify.
+func (mr *MockFullNodeMockRecorder) WalletVerify(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletVerify", reflect.TypeOf((*MockFullNode)(nil).WalletVerify), arg0, arg1, arg2, arg3)
+}
diff --git a/api/v0api/v1_wrapper.go b/api/v0api/v1_wrapper.go
new file mode 100644
index 00000000000..ff4474fe57a
--- /dev/null
+++ b/api/v0api/v1_wrapper.go
@@ -0,0 +1,187 @@
+package v0api
+
+import (
+ "context"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/lotus/chain/types"
+ "golang.org/x/xerrors"
+
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/go-state-types/abi"
+
+ "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/api/v1api"
+)
+
+type WrapperV1Full struct {
+ v1api.FullNode
+}
+
+func (w *WrapperV1Full) StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error) {
+ return w.FullNode.StateSearchMsg(ctx, types.EmptyTSK, msg, api.LookbackNoLimit, true)
+}
+
+func (w *WrapperV1Full) StateSearchMsgLimited(ctx context.Context, msg cid.Cid, limit abi.ChainEpoch) (*api.MsgLookup, error) {
+ return w.FullNode.StateSearchMsg(ctx, types.EmptyTSK, msg, limit, true)
+}
+
+func (w *WrapperV1Full) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error) {
+ return w.FullNode.StateWaitMsg(ctx, msg, confidence, api.LookbackNoLimit, true)
+}
+
+func (w *WrapperV1Full) StateWaitMsgLimited(ctx context.Context, msg cid.Cid, confidence uint64, limit abi.ChainEpoch) (*api.MsgLookup, error) {
+ return w.FullNode.StateWaitMsg(ctx, msg, confidence, limit, true)
+}
+
+func (w *WrapperV1Full) StateGetReceipt(ctx context.Context, msg cid.Cid, from types.TipSetKey) (*types.MessageReceipt, error) {
+ ml, err := w.FullNode.StateSearchMsg(ctx, from, msg, api.LookbackNoLimit, true)
+ if err != nil {
+ return nil, err
+ }
+
+ if ml == nil {
+ return nil, nil
+ }
+
+ return &ml.Receipt, nil
+}
+
+func (w *WrapperV1Full) Version(ctx context.Context) (api.APIVersion, error) {
+ ver, err := w.FullNode.Version(ctx)
+ if err != nil {
+ return api.APIVersion{}, err
+ }
+
+ ver.APIVersion = api.FullAPIVersion0
+
+ return ver, nil
+}
+
+func (w *WrapperV1Full) executePrototype(ctx context.Context, p *api.MessagePrototype) (cid.Cid, error) {
+ sm, err := w.FullNode.MpoolPushMessage(ctx, &p.Message, nil)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("pushing message: %w", err)
+ }
+
+ return sm.Cid(), nil
+}
+func (w *WrapperV1Full) MsigCreate(ctx context.Context, req uint64, addrs []address.Address, duration abi.ChainEpoch, val types.BigInt, src address.Address, gp types.BigInt) (cid.Cid, error) {
+
+ p, err := w.FullNode.MsigCreate(ctx, req, addrs, duration, val, src, gp)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
+ }
+
+ return w.executePrototype(ctx, p)
+}
+
+func (w *WrapperV1Full) MsigPropose(ctx context.Context, msig address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) {
+
+ p, err := w.FullNode.MsigPropose(ctx, msig, to, amt, src, method, params)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
+ }
+
+ return w.executePrototype(ctx, p)
+}
+func (w *WrapperV1Full) MsigApprove(ctx context.Context, msig address.Address, txID uint64, src address.Address) (cid.Cid, error) {
+
+ p, err := w.FullNode.MsigApprove(ctx, msig, txID, src)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
+ }
+
+ return w.executePrototype(ctx, p)
+}
+
+func (w *WrapperV1Full) MsigApproveTxnHash(ctx context.Context, msig address.Address, txID uint64, proposer address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) {
+ p, err := w.FullNode.MsigApproveTxnHash(ctx, msig, txID, proposer, to, amt, src, method, params)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
+ }
+
+ return w.executePrototype(ctx, p)
+}
+
+func (w *WrapperV1Full) MsigCancel(ctx context.Context, msig address.Address, txID uint64, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) {
+ p, err := w.FullNode.MsigCancel(ctx, msig, txID, to, amt, src, method, params)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
+ }
+
+ return w.executePrototype(ctx, p)
+}
+
+func (w *WrapperV1Full) MsigAddPropose(ctx context.Context, msig address.Address, src address.Address, newAdd address.Address, inc bool) (cid.Cid, error) {
+
+ p, err := w.FullNode.MsigAddPropose(ctx, msig, src, newAdd, inc)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
+ }
+
+ return w.executePrototype(ctx, p)
+}
+
+func (w *WrapperV1Full) MsigAddApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, newAdd address.Address, inc bool) (cid.Cid, error) {
+
+ p, err := w.FullNode.MsigAddApprove(ctx, msig, src, txID, proposer, newAdd, inc)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
+ }
+
+ return w.executePrototype(ctx, p)
+}
+
+func (w *WrapperV1Full) MsigAddCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, newAdd address.Address, inc bool) (cid.Cid, error) {
+
+ p, err := w.FullNode.MsigAddCancel(ctx, msig, src, txID, newAdd, inc)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
+ }
+
+ return w.executePrototype(ctx, p)
+}
+
+func (w *WrapperV1Full) MsigSwapPropose(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
+
+ p, err := w.FullNode.MsigSwapPropose(ctx, msig, src, oldAdd, newAdd)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
+ }
+
+ return w.executePrototype(ctx, p)
+}
+
+func (w *WrapperV1Full) MsigSwapApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
+
+ p, err := w.FullNode.MsigSwapApprove(ctx, msig, src, txID, proposer, oldAdd, newAdd)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
+ }
+
+ return w.executePrototype(ctx, p)
+}
+
+func (w *WrapperV1Full) MsigSwapCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
+
+ p, err := w.FullNode.MsigSwapCancel(ctx, msig, src, txID, oldAdd, newAdd)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
+ }
+
+ return w.executePrototype(ctx, p)
+}
+
+func (w *WrapperV1Full) MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error) {
+
+ p, err := w.FullNode.MsigRemoveSigner(ctx, msig, proposer, toRemove, decrease)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
+ }
+
+ return w.executePrototype(ctx, p)
+}
+
+var _ FullNode = &WrapperV1Full{}
diff --git a/api/v1api/latest.go b/api/v1api/latest.go
new file mode 100644
index 00000000000..6f57d88262c
--- /dev/null
+++ b/api/v1api/latest.go
@@ -0,0 +1,12 @@
+package v1api
+
+import (
+ "github.com/filecoin-project/lotus/api"
+)
+
+type FullNode = api.FullNode
+type FullNodeStruct = api.FullNodeStruct
+
+func PermissionedFullAPI(a FullNode) FullNode {
+ return api.PermissionedFullAPI(a)
+}
diff --git a/api/version.go b/api/version.go
new file mode 100644
index 00000000000..687f5135a89
--- /dev/null
+++ b/api/version.go
@@ -0,0 +1,73 @@
+package api
+
+import (
+ "fmt"
+
+ xerrors "golang.org/x/xerrors"
+)
+
+type Version uint32
+
+func newVer(major, minor, patch uint8) Version {
+ return Version(uint32(major)<<16 | uint32(minor)<<8 | uint32(patch))
+}
+
+// Ints returns (major, minor, patch) versions
+func (ve Version) Ints() (uint32, uint32, uint32) {
+ v := uint32(ve)
+ return (v & majorOnlyMask) >> 16, (v & minorOnlyMask) >> 8, v & patchOnlyMask
+}
+
+func (ve Version) String() string {
+ vmj, vmi, vp := ve.Ints()
+ return fmt.Sprintf("%d.%d.%d", vmj, vmi, vp)
+}
+
+func (ve Version) EqMajorMinor(v2 Version) bool {
+ return ve&minorMask == v2&minorMask
+}
+
+type NodeType int
+
+const (
+ NodeUnknown NodeType = iota
+
+ NodeFull
+ NodeMiner
+ NodeWorker
+)
+
+var RunningNodeType NodeType
+
+func VersionForType(nodeType NodeType) (Version, error) {
+ switch nodeType {
+ case NodeFull:
+ return FullAPIVersion1, nil
+ case NodeMiner:
+ return MinerAPIVersion0, nil
+ case NodeWorker:
+ return WorkerAPIVersion0, nil
+ default:
+ return Version(0), xerrors.Errorf("unknown node type %d", nodeType)
+ }
+}
+
+// semver versions of the rpc api exposed
+var (
+ FullAPIVersion0 = newVer(1, 3, 0)
+ FullAPIVersion1 = newVer(2, 1, 0)
+
+ MinerAPIVersion0 = newVer(1, 2, 0)
+ WorkerAPIVersion0 = newVer(1, 1, 0)
+)
+
+//nolint:varcheck,deadcode
+const (
+ majorMask = 0xff0000
+ minorMask = 0xffff00
+ patchMask = 0xffffff
+
+ majorOnlyMask = 0xff0000
+ minorOnlyMask = 0x00ff00
+ patchOnlyMask = 0x0000ff
+)
diff --git a/api/wrap.go b/api/wrap.go
new file mode 100644
index 00000000000..b26489a42d7
--- /dev/null
+++ b/api/wrap.go
@@ -0,0 +1,53 @@
+package api
+
+import (
+ "reflect"
+)
+
+// Wrap adapts partial api impl to another version
+// proxyT is the proxy type used as input in wrapperT
+// Usage: Wrap(new(v1api.FullNodeStruct), new(v0api.WrapperV1Full), eventsApi).(EventAPI)
+func Wrap(proxyT, wrapperT, impl interface{}) interface{} {
+ proxy := reflect.New(reflect.TypeOf(proxyT).Elem())
+ proxyMethods := proxy.Elem().FieldByName("Internal")
+ ri := reflect.ValueOf(impl)
+
+ for i := 0; i < ri.NumMethod(); i++ {
+ mt := ri.Type().Method(i)
+ if proxyMethods.FieldByName(mt.Name).Kind() == reflect.Invalid {
+ continue
+ }
+
+ fn := ri.Method(i)
+ of := proxyMethods.FieldByName(mt.Name)
+
+ proxyMethods.FieldByName(mt.Name).Set(reflect.MakeFunc(of.Type(), func(args []reflect.Value) (results []reflect.Value) {
+ return fn.Call(args)
+ }))
+ }
+
+ for i := 0; i < proxy.Elem().NumField(); i++ {
+ if proxy.Elem().Type().Field(i).Name == "Internal" {
+ continue
+ }
+
+ subProxy := proxy.Elem().Field(i).FieldByName("Internal")
+ for i := 0; i < ri.NumMethod(); i++ {
+ mt := ri.Type().Method(i)
+ if subProxy.FieldByName(mt.Name).Kind() == reflect.Invalid {
+ continue
+ }
+
+ fn := ri.Method(i)
+ of := subProxy.FieldByName(mt.Name)
+
+ subProxy.FieldByName(mt.Name).Set(reflect.MakeFunc(of.Type(), func(args []reflect.Value) (results []reflect.Value) {
+ return fn.Call(args)
+ }))
+ }
+ }
+
+ wp := reflect.New(reflect.TypeOf(wrapperT).Elem())
+ wp.Elem().Field(0).Set(proxy)
+ return wp.Interface()
+}
diff --git a/blockstore/api.go b/blockstore/api.go
new file mode 100644
index 00000000000..6715b476677
--- /dev/null
+++ b/blockstore/api.go
@@ -0,0 +1,66 @@
+package blockstore
+
+import (
+ "context"
+
+ blocks "github.com/ipfs/go-block-format"
+ "github.com/ipfs/go-cid"
+ "golang.org/x/xerrors"
+)
+
+type ChainIO interface {
+ ChainReadObj(context.Context, cid.Cid) ([]byte, error)
+ ChainHasObj(context.Context, cid.Cid) (bool, error)
+}
+
+type apiBlockstore struct {
+ api ChainIO
+}
+
+// This blockstore is adapted in the constructor.
+var _ BasicBlockstore = (*apiBlockstore)(nil)
+
+func NewAPIBlockstore(cio ChainIO) Blockstore {
+ bs := &apiBlockstore{api: cio}
+ return Adapt(bs) // return an adapted blockstore.
+}
+
+func (a *apiBlockstore) DeleteBlock(cid.Cid) error {
+ return xerrors.New("not supported")
+}
+
+func (a *apiBlockstore) Has(c cid.Cid) (bool, error) {
+ return a.api.ChainHasObj(context.TODO(), c)
+}
+
+func (a *apiBlockstore) Get(c cid.Cid) (blocks.Block, error) {
+ bb, err := a.api.ChainReadObj(context.TODO(), c)
+ if err != nil {
+ return nil, err
+ }
+ return blocks.NewBlockWithCid(bb, c)
+}
+
+func (a *apiBlockstore) GetSize(c cid.Cid) (int, error) {
+ bb, err := a.api.ChainReadObj(context.TODO(), c)
+ if err != nil {
+ return 0, err
+ }
+ return len(bb), nil
+}
+
+func (a *apiBlockstore) Put(blocks.Block) error {
+ return xerrors.New("not supported")
+}
+
+func (a *apiBlockstore) PutMany([]blocks.Block) error {
+ return xerrors.New("not supported")
+}
+
+func (a *apiBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
+ return nil, xerrors.New("not supported")
+}
+
+func (a *apiBlockstore) HashOnRead(enabled bool) {
+ return
+}
diff --git a/blockstore/badger/blockstore.go b/blockstore/badger/blockstore.go
new file mode 100644
index 00000000000..82f0e3360c3
--- /dev/null
+++ b/blockstore/badger/blockstore.go
@@ -0,0 +1,581 @@
+package badgerbs
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "runtime"
+ "sync"
+
+ "github.com/dgraph-io/badger/v2"
+ "github.com/dgraph-io/badger/v2/options"
+ "github.com/multiformats/go-base32"
+ "go.uber.org/zap"
+
+ blocks "github.com/ipfs/go-block-format"
+ "github.com/ipfs/go-cid"
+ logger "github.com/ipfs/go-log/v2"
+ pool "github.com/libp2p/go-buffer-pool"
+
+ "github.com/filecoin-project/lotus/blockstore"
+)
+
+var (
+ // KeyPool is the buffer pool we use to compute storage keys.
+ KeyPool *pool.BufferPool = pool.GlobalPool
+)
+
+var (
+ // ErrBlockstoreClosed is returned from blockstore operations after
+ // the blockstore has been closed.
+ ErrBlockstoreClosed = fmt.Errorf("badger blockstore closed")
+
+ log = logger.Logger("badgerbs")
+)
+
+// aliases to mask badger dependencies.
+const (
+ // FileIO is equivalent to badger/options.FileIO.
+ FileIO = options.FileIO
+ // MemoryMap is equivalent to badger/options.MemoryMap.
+ MemoryMap = options.MemoryMap
+ // LoadToRAM is equivalent to badger/options.LoadToRAM.
+ LoadToRAM = options.LoadToRAM
+)
+
+// Options embeds the badger options themselves, and augments them with
+// blockstore-specific options.
+type Options struct {
+ badger.Options
+
+ // Prefix is an optional prefix to prepend to keys. Default: "".
+ Prefix string
+}
+
+func DefaultOptions(path string) Options {
+ return Options{
+ Options: badger.DefaultOptions(path),
+ Prefix: "",
+ }
+}
+
+// badgerLogger is a local wrapper for go-log to make the interface
+// compatible with badger.Logger (namely, aliasing Warnf to Warningf)
+type badgerLogger struct {
+ *zap.SugaredLogger // skips 1 caller to get useful line info, skipping over badger.Options.
+
+ skip2 *zap.SugaredLogger // skips 2 callers, just like above + this logger.
+}
+
+// Warningf is required by the badger logger APIs.
+func (b *badgerLogger) Warningf(format string, args ...interface{}) {
+ b.skip2.Warnf(format, args...)
+}
+
+const (
+ stateOpen = iota
+ stateClosing
+ stateClosed
+)
+
+// Blockstore is a badger-backed IPLD blockstore.
+type Blockstore struct {
+ stateLk sync.RWMutex
+ state int
+ viewers sync.WaitGroup
+
+ DB *badger.DB
+
+ prefixing bool
+ prefix []byte
+ prefixLen int
+}
+
+var _ blockstore.Blockstore = (*Blockstore)(nil)
+var _ blockstore.Viewer = (*Blockstore)(nil)
+var _ blockstore.BlockstoreIterator = (*Blockstore)(nil)
+var _ blockstore.BlockstoreGC = (*Blockstore)(nil)
+var _ io.Closer = (*Blockstore)(nil)
+
+// Open creates a new badger-backed blockstore, with the supplied options.
+func Open(opts Options) (*Blockstore, error) {
+ opts.Logger = &badgerLogger{
+ SugaredLogger: log.Desugar().WithOptions(zap.AddCallerSkip(1)).Sugar(),
+ skip2: log.Desugar().WithOptions(zap.AddCallerSkip(2)).Sugar(),
+ }
+
+ db, err := badger.Open(opts.Options)
+ if err != nil {
+ return nil, fmt.Errorf("failed to open badger blockstore: %w", err)
+ }
+
+ bs := &Blockstore{DB: db}
+ if p := opts.Prefix; p != "" {
+ bs.prefixing = true
+ bs.prefix = []byte(p)
+ bs.prefixLen = len(bs.prefix)
+ }
+
+ return bs, nil
+}
+
+// Close closes the store. If the store has already been closed, this noops and
+// returns an error, even if the first closure resulted in error.
+func (b *Blockstore) Close() error {
+ b.stateLk.Lock()
+ if b.state != stateOpen {
+ b.stateLk.Unlock()
+ return nil
+ }
+ b.state = stateClosing
+ b.stateLk.Unlock()
+
+ defer func() {
+ b.stateLk.Lock()
+ b.state = stateClosed
+ b.stateLk.Unlock()
+ }()
+
+ // wait for all accesses to complete
+ b.viewers.Wait()
+
+ return b.DB.Close()
+}
+
+func (b *Blockstore) access() error {
+ b.stateLk.RLock()
+ defer b.stateLk.RUnlock()
+
+ if b.state != stateOpen {
+ return ErrBlockstoreClosed
+ }
+
+ b.viewers.Add(1)
+ return nil
+}
+
+func (b *Blockstore) isOpen() bool {
+ b.stateLk.RLock()
+ defer b.stateLk.RUnlock()
+
+ return b.state == stateOpen
+}
+
+// CollectGarbage runs garbage collection on the value log
+func (b *Blockstore) CollectGarbage() error {
+ if err := b.access(); err != nil {
+ return err
+ }
+ defer b.viewers.Done()
+
+ // compact first to gather the necessary statistics for GC
+ nworkers := runtime.NumCPU() / 2
+ if nworkers < 2 {
+ nworkers = 2
+ }
+
+ err := b.DB.Flatten(nworkers)
+ if err != nil {
+ return err
+ }
+
+ for err == nil {
+ err = b.DB.RunValueLogGC(0.125)
+ }
+
+ if err == badger.ErrNoRewrite {
+ // not really an error in this case, it signals the end of GC
+ return nil
+ }
+
+ return err
+}
+
+// View implements blockstore.Viewer, which leverages zero-copy read-only
+// access to values.
+func (b *Blockstore) View(cid cid.Cid, fn func([]byte) error) error {
+ if err := b.access(); err != nil {
+ return err
+ }
+ defer b.viewers.Done()
+
+ k, pooled := b.PooledStorageKey(cid)
+ if pooled {
+ defer KeyPool.Put(k)
+ }
+
+ return b.DB.View(func(txn *badger.Txn) error {
+ switch item, err := txn.Get(k); err {
+ case nil:
+ return item.Value(fn)
+ case badger.ErrKeyNotFound:
+ return blockstore.ErrNotFound
+ default:
+ return fmt.Errorf("failed to view block from badger blockstore: %w", err)
+ }
+ })
+}
+
+// Has implements Blockstore.Has.
+func (b *Blockstore) Has(cid cid.Cid) (bool, error) {
+ if err := b.access(); err != nil {
+ return false, err
+ }
+ defer b.viewers.Done()
+
+ k, pooled := b.PooledStorageKey(cid)
+ if pooled {
+ defer KeyPool.Put(k)
+ }
+
+ err := b.DB.View(func(txn *badger.Txn) error {
+ _, err := txn.Get(k)
+ return err
+ })
+
+ switch err {
+ case badger.ErrKeyNotFound:
+ return false, nil
+ case nil:
+ return true, nil
+ default:
+ return false, fmt.Errorf("failed to check if block exists in badger blockstore: %w", err)
+ }
+}
+
+// Get implements Blockstore.Get.
+func (b *Blockstore) Get(cid cid.Cid) (blocks.Block, error) {
+ if !cid.Defined() {
+ return nil, blockstore.ErrNotFound
+ }
+
+ if err := b.access(); err != nil {
+ return nil, err
+ }
+ defer b.viewers.Done()
+
+ k, pooled := b.PooledStorageKey(cid)
+ if pooled {
+ defer KeyPool.Put(k)
+ }
+
+ var val []byte
+ err := b.DB.View(func(txn *badger.Txn) error {
+ switch item, err := txn.Get(k); err {
+ case nil:
+ val, err = item.ValueCopy(nil)
+ return err
+ case badger.ErrKeyNotFound:
+ return blockstore.ErrNotFound
+ default:
+ return fmt.Errorf("failed to get block from badger blockstore: %w", err)
+ }
+ })
+ if err != nil {
+ return nil, err
+ }
+ return blocks.NewBlockWithCid(val, cid)
+}
+
+// GetSize implements Blockstore.GetSize.
+func (b *Blockstore) GetSize(cid cid.Cid) (int, error) {
+ if err := b.access(); err != nil {
+ return 0, err
+ }
+ defer b.viewers.Done()
+
+ k, pooled := b.PooledStorageKey(cid)
+ if pooled {
+ defer KeyPool.Put(k)
+ }
+
+ var size int
+ err := b.DB.View(func(txn *badger.Txn) error {
+ switch item, err := txn.Get(k); err {
+ case nil:
+ size = int(item.ValueSize())
+ case badger.ErrKeyNotFound:
+ return blockstore.ErrNotFound
+ default:
+ return fmt.Errorf("failed to get block size from badger blockstore: %w", err)
+ }
+ return nil
+ })
+ if err != nil {
+ size = -1
+ }
+ return size, err
+}
+
+// Put implements Blockstore.Put.
+func (b *Blockstore) Put(block blocks.Block) error {
+ if err := b.access(); err != nil {
+ return err
+ }
+ defer b.viewers.Done()
+
+ k, pooled := b.PooledStorageKey(block.Cid())
+ if pooled {
+ defer KeyPool.Put(k)
+ }
+
+ err := b.DB.Update(func(txn *badger.Txn) error {
+ return txn.Set(k, block.RawData())
+ })
+ if err != nil {
+ err = fmt.Errorf("failed to put block in badger blockstore: %w", err)
+ }
+ return err
+}
+
+// PutMany implements Blockstore.PutMany.
+func (b *Blockstore) PutMany(blocks []blocks.Block) error {
+ if err := b.access(); err != nil {
+ return err
+ }
+ defer b.viewers.Done()
+
+ // toReturn tracks the byte slices to return to the pool, if we're using key
+ // prefixing. we can't return each slice to the pool after each Set, because
+ // badger holds on to the slice.
+ var toReturn [][]byte
+ if b.prefixing {
+ toReturn = make([][]byte, 0, len(blocks))
+ defer func() {
+ for _, b := range toReturn {
+ KeyPool.Put(b)
+ }
+ }()
+ }
+
+ batch := b.DB.NewWriteBatch()
+ defer batch.Cancel()
+
+ for _, block := range blocks {
+ k, pooled := b.PooledStorageKey(block.Cid())
+ if pooled {
+ toReturn = append(toReturn, k)
+ }
+ if err := batch.Set(k, block.RawData()); err != nil {
+ return err
+ }
+ }
+
+ err := batch.Flush()
+ if err != nil {
+ err = fmt.Errorf("failed to put blocks in badger blockstore: %w", err)
+ }
+ return err
+}
+
+// DeleteBlock implements Blockstore.DeleteBlock.
+func (b *Blockstore) DeleteBlock(cid cid.Cid) error {
+ if err := b.access(); err != nil {
+ return err
+ }
+ defer b.viewers.Done()
+
+ k, pooled := b.PooledStorageKey(cid)
+ if pooled {
+ defer KeyPool.Put(k)
+ }
+
+ return b.DB.Update(func(txn *badger.Txn) error {
+ return txn.Delete(k)
+ })
+}
+
+func (b *Blockstore) DeleteMany(cids []cid.Cid) error {
+ if err := b.access(); err != nil {
+ return err
+ }
+ defer b.viewers.Done()
+
+ // toReturn tracks the byte slices to return to the pool, if we're using key
+ // prefixing. we can't return each slice to the pool after each Set, because
+ // badger holds on to the slice.
+ var toReturn [][]byte
+ if b.prefixing {
+ toReturn = make([][]byte, 0, len(cids))
+ defer func() {
+ for _, b := range toReturn {
+ KeyPool.Put(b)
+ }
+ }()
+ }
+
+ batch := b.DB.NewWriteBatch()
+ defer batch.Cancel()
+
+ for _, cid := range cids {
+ k, pooled := b.PooledStorageKey(cid)
+ if pooled {
+ toReturn = append(toReturn, k)
+ }
+ if err := batch.Delete(k); err != nil {
+ return err
+ }
+ }
+
+ err := batch.Flush()
+ if err != nil {
+ err = fmt.Errorf("failed to delete blocks from badger blockstore: %w", err)
+ }
+ return err
+}
+
+// AllKeysChan implements Blockstore.AllKeysChan.
+func (b *Blockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
+ if err := b.access(); err != nil {
+ return nil, err
+ }
+
+ txn := b.DB.NewTransaction(false)
+ opts := badger.IteratorOptions{PrefetchSize: 100}
+ if b.prefixing {
+ opts.Prefix = b.prefix
+ }
+ iter := txn.NewIterator(opts)
+
+ ch := make(chan cid.Cid)
+ go func() {
+ defer b.viewers.Done()
+ defer close(ch)
+ defer iter.Close()
+
+ // NewCidV1 makes a copy of the multihash buffer, so we can reuse it to
+ // contain allocs.
+ var buf []byte
+ for iter.Rewind(); iter.Valid(); iter.Next() {
+ if ctx.Err() != nil {
+ return // context has fired.
+ }
+ if !b.isOpen() {
+ // open iterators will run even after the database is closed...
+ return // closing, yield.
+ }
+ k := iter.Item().Key()
+ if b.prefixing {
+ k = k[b.prefixLen:]
+ }
+
+ if reqlen := base32.RawStdEncoding.DecodedLen(len(k)); len(buf) < reqlen {
+ buf = make([]byte, reqlen)
+ }
+ if n, err := base32.RawStdEncoding.Decode(buf, k); err == nil {
+ select {
+ case ch <- cid.NewCidV1(cid.Raw, buf[:n]):
+ case <-ctx.Done():
+ return
+ }
+ } else {
+ log.Warnf("failed to decode key %s in badger AllKeysChan; err: %s", k, err)
+ }
+ }
+ }()
+
+ return ch, nil
+}
+
+// Implementation of BlockstoreIterator interface
+func (b *Blockstore) ForEachKey(f func(cid.Cid) error) error {
+ if err := b.access(); err != nil {
+ return err
+ }
+ defer b.viewers.Done()
+
+ txn := b.DB.NewTransaction(false)
+ defer txn.Discard()
+
+ opts := badger.IteratorOptions{PrefetchSize: 100}
+ if b.prefixing {
+ opts.Prefix = b.prefix
+ }
+
+ iter := txn.NewIterator(opts)
+ defer iter.Close()
+
+ var buf []byte
+ for iter.Rewind(); iter.Valid(); iter.Next() {
+ if !b.isOpen() {
+ return ErrBlockstoreClosed
+ }
+
+ k := iter.Item().Key()
+ if b.prefixing {
+ k = k[b.prefixLen:]
+ }
+
+ klen := base32.RawStdEncoding.DecodedLen(len(k))
+ if klen > len(buf) {
+ buf = make([]byte, klen)
+ }
+
+ n, err := base32.RawStdEncoding.Decode(buf, k)
+ if err != nil {
+ return err
+ }
+
+ c := cid.NewCidV1(cid.Raw, buf[:n])
+
+ err = f(c)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// HashOnRead implements Blockstore.HashOnRead. It is not supported by this
+// blockstore.
+func (b *Blockstore) HashOnRead(_ bool) {
+ log.Warnf("called HashOnRead on badger blockstore; function not supported; ignoring")
+}
+
+// PooledStorageKey returns the storage key under which this CID is stored.
+//
+// The key is: prefix + base32_no_padding(cid.Hash)
+//
+// This method may return pooled byte slice, which MUST be returned to the
+// KeyPool if pooled=true, or a leak will occur.
+func (b *Blockstore) PooledStorageKey(cid cid.Cid) (key []byte, pooled bool) {
+ h := cid.Hash()
+ size := base32.RawStdEncoding.EncodedLen(len(h))
+ if !b.prefixing { // optimize for branch prediction.
+ k := pool.Get(size)
+ base32.RawStdEncoding.Encode(k, h)
+ return k, true // slicing upto length unnecessary; the pool has already done this.
+ }
+
+ size += b.prefixLen
+ k := pool.Get(size)
+ copy(k, b.prefix)
+ base32.RawStdEncoding.Encode(k[b.prefixLen:], h)
+ return k, true // slicing upto length unnecessary; the pool has already done this.
+}
+
+// Storage acts like PooledStorageKey, but attempts to write the storage key
+// into the provided slice. If the slice capacity is insufficient, it allocates
+// a new byte slice with enough capacity to accommodate the result. This method
+// returns the resulting slice.
+func (b *Blockstore) StorageKey(dst []byte, cid cid.Cid) []byte {
+ h := cid.Hash()
+ reqsize := base32.RawStdEncoding.EncodedLen(len(h)) + b.prefixLen
+ if reqsize > cap(dst) {
+ // passed slice is smaller than required size; create new.
+ dst = make([]byte, reqsize)
+ } else if reqsize > len(dst) {
+ // passed slice has enough capacity, but its length is
+ // restricted, expand.
+ dst = dst[:cap(dst)]
+ }
+
+ if b.prefixing { // optimize for branch prediction.
+ copy(dst, b.prefix)
+ base32.RawStdEncoding.Encode(dst[b.prefixLen:], h)
+ } else {
+ base32.RawStdEncoding.Encode(dst, h)
+ }
+ return dst[:reqsize]
+}
diff --git a/blockstore/badger/blockstore_test.go b/blockstore/badger/blockstore_test.go
new file mode 100644
index 00000000000..3221458d28f
--- /dev/null
+++ b/blockstore/badger/blockstore_test.go
@@ -0,0 +1,91 @@
+package badgerbs
+
+import (
+ "io/ioutil"
+ "os"
+ "testing"
+
+ blocks "github.com/ipfs/go-block-format"
+ "github.com/stretchr/testify/require"
+
+ "github.com/filecoin-project/lotus/blockstore"
+)
+
+func TestBadgerBlockstore(t *testing.T) {
+ (&Suite{
+ NewBlockstore: newBlockstore(DefaultOptions),
+ OpenBlockstore: openBlockstore(DefaultOptions),
+ }).RunTests(t, "non_prefixed")
+
+ prefixed := func(path string) Options {
+ opts := DefaultOptions(path)
+ opts.Prefix = "/prefixed/"
+ return opts
+ }
+
+ (&Suite{
+ NewBlockstore: newBlockstore(prefixed),
+ OpenBlockstore: openBlockstore(prefixed),
+ }).RunTests(t, "prefixed")
+}
+
+func TestStorageKey(t *testing.T) {
+ bs, _ := newBlockstore(DefaultOptions)(t)
+ bbs := bs.(*Blockstore)
+ defer bbs.Close() //nolint:errcheck
+
+ cid1 := blocks.NewBlock([]byte("some data")).Cid()
+ cid2 := blocks.NewBlock([]byte("more data")).Cid()
+ cid3 := blocks.NewBlock([]byte("a little more data")).Cid()
+ require.NotEqual(t, cid1, cid2) // sanity check
+ require.NotEqual(t, cid2, cid3) // sanity check
+
+ // nil slice; let StorageKey allocate for us.
+ k1 := bbs.StorageKey(nil, cid1)
+ require.Len(t, k1, 55)
+ require.True(t, cap(k1) == len(k1))
+
+ // k1's backing array is reused.
+ k2 := bbs.StorageKey(k1, cid2)
+ require.Len(t, k2, 55)
+ require.True(t, cap(k2) == len(k1))
+
+ // bring k2 to len=0, and verify that its backing array gets reused
+ // (i.e. k1 and k2 are overwritten)
+ k3 := bbs.StorageKey(k2[:0], cid3)
+ require.Len(t, k3, 55)
+ require.True(t, cap(k3) == len(k3))
+
+ // backing array of k1 and k2 has been modified, i.e. memory is shared.
+ require.Equal(t, k3, k1)
+ require.Equal(t, k3, k2)
+}
+
+func newBlockstore(optsSupplier func(path string) Options) func(tb testing.TB) (bs blockstore.BasicBlockstore, path string) {
+ return func(tb testing.TB) (bs blockstore.BasicBlockstore, path string) {
+ tb.Helper()
+
+ path, err := ioutil.TempDir("", "")
+ if err != nil {
+ tb.Fatal(err)
+ }
+
+ db, err := Open(optsSupplier(path))
+ if err != nil {
+ tb.Fatal(err)
+ }
+
+ tb.Cleanup(func() {
+ _ = os.RemoveAll(path)
+ })
+
+ return db, path
+ }
+}
+
+func openBlockstore(optsSupplier func(path string) Options) func(tb testing.TB, path string) (bs blockstore.BasicBlockstore, err error) {
+ return func(tb testing.TB, path string) (bs blockstore.BasicBlockstore, err error) {
+ tb.Helper()
+ return Open(optsSupplier(path))
+ }
+}
diff --git a/blockstore/badger/blockstore_test_suite.go b/blockstore/badger/blockstore_test_suite.go
new file mode 100644
index 00000000000..93be82ac87e
--- /dev/null
+++ b/blockstore/badger/blockstore_test_suite.go
@@ -0,0 +1,313 @@
+package badgerbs
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "testing"
+
+ blocks "github.com/ipfs/go-block-format"
+ "github.com/ipfs/go-cid"
+ u "github.com/ipfs/go-ipfs-util"
+
+ "github.com/filecoin-project/lotus/blockstore"
+
+ "github.com/stretchr/testify/require"
+)
+
+// TODO: move this to go-ipfs-blockstore.
+type Suite struct {
+ NewBlockstore func(tb testing.TB) (bs blockstore.BasicBlockstore, path string)
+ OpenBlockstore func(tb testing.TB, path string) (bs blockstore.BasicBlockstore, err error)
+}
+
+func (s *Suite) RunTests(t *testing.T, prefix string) {
+ v := reflect.TypeOf(s)
+ f := func(t *testing.T) {
+ for i := 0; i < v.NumMethod(); i++ {
+ if m := v.Method(i); strings.HasPrefix(m.Name, "Test") {
+ f := m.Func.Interface().(func(*Suite, *testing.T))
+ t.Run(m.Name, func(t *testing.T) {
+ f(s, t)
+ })
+ }
+ }
+ }
+
+ if prefix == "" {
+ f(t)
+ } else {
+ t.Run(prefix, f)
+ }
+}
+
+func (s *Suite) TestGetWhenKeyNotPresent(t *testing.T) {
+ bs, _ := s.NewBlockstore(t)
+ if c, ok := bs.(io.Closer); ok {
+ defer func() { require.NoError(t, c.Close()) }()
+ }
+
+ c := cid.NewCidV0(u.Hash([]byte("stuff")))
+ bl, err := bs.Get(c)
+ require.Nil(t, bl)
+ require.Equal(t, blockstore.ErrNotFound, err)
+}
+
+func (s *Suite) TestGetWhenKeyIsNil(t *testing.T) {
+ bs, _ := s.NewBlockstore(t)
+ if c, ok := bs.(io.Closer); ok {
+ defer func() { require.NoError(t, c.Close()) }()
+ }
+
+ _, err := bs.Get(cid.Undef)
+ require.Equal(t, blockstore.ErrNotFound, err)
+}
+
+func (s *Suite) TestPutThenGetBlock(t *testing.T) {
+ bs, _ := s.NewBlockstore(t)
+ if c, ok := bs.(io.Closer); ok {
+ defer func() { require.NoError(t, c.Close()) }()
+ }
+
+ orig := blocks.NewBlock([]byte("some data"))
+
+ err := bs.Put(orig)
+ require.NoError(t, err)
+
+ fetched, err := bs.Get(orig.Cid())
+ require.NoError(t, err)
+ require.Equal(t, orig.RawData(), fetched.RawData())
+}
+
+func (s *Suite) TestHas(t *testing.T) {
+ bs, _ := s.NewBlockstore(t)
+ if c, ok := bs.(io.Closer); ok {
+ defer func() { require.NoError(t, c.Close()) }()
+ }
+
+ orig := blocks.NewBlock([]byte("some data"))
+
+ err := bs.Put(orig)
+ require.NoError(t, err)
+
+ ok, err := bs.Has(orig.Cid())
+ require.NoError(t, err)
+ require.True(t, ok)
+
+ ok, err = bs.Has(blocks.NewBlock([]byte("another thing")).Cid())
+ require.NoError(t, err)
+ require.False(t, ok)
+}
+
+func (s *Suite) TestCidv0v1(t *testing.T) {
+ bs, _ := s.NewBlockstore(t)
+ if c, ok := bs.(io.Closer); ok {
+ defer func() { require.NoError(t, c.Close()) }()
+ }
+
+ orig := blocks.NewBlock([]byte("some data"))
+
+ err := bs.Put(orig)
+ require.NoError(t, err)
+
+ fetched, err := bs.Get(cid.NewCidV1(cid.DagProtobuf, orig.Cid().Hash()))
+ require.NoError(t, err)
+ require.Equal(t, orig.RawData(), fetched.RawData())
+}
+
+func (s *Suite) TestPutThenGetSizeBlock(t *testing.T) {
+ bs, _ := s.NewBlockstore(t)
+ if c, ok := bs.(io.Closer); ok {
+ defer func() { require.NoError(t, c.Close()) }()
+ }
+
+ block := blocks.NewBlock([]byte("some data"))
+ missingBlock := blocks.NewBlock([]byte("missingBlock"))
+ emptyBlock := blocks.NewBlock([]byte{})
+
+ err := bs.Put(block)
+ require.NoError(t, err)
+
+ blockSize, err := bs.GetSize(block.Cid())
+ require.NoError(t, err)
+ require.Len(t, block.RawData(), blockSize)
+
+ err = bs.Put(emptyBlock)
+ require.NoError(t, err)
+
+ emptySize, err := bs.GetSize(emptyBlock.Cid())
+ require.NoError(t, err)
+ require.Zero(t, emptySize)
+
+ missingSize, err := bs.GetSize(missingBlock.Cid())
+ require.Equal(t, blockstore.ErrNotFound, err)
+ require.Equal(t, -1, missingSize)
+}
+
+func (s *Suite) TestAllKeysSimple(t *testing.T) {
+ bs, _ := s.NewBlockstore(t)
+ if c, ok := bs.(io.Closer); ok {
+ defer func() { require.NoError(t, c.Close()) }()
+ }
+
+ keys := insertBlocks(t, bs, 100)
+
+ ctx := context.Background()
+ ch, err := bs.AllKeysChan(ctx)
+ require.NoError(t, err)
+ actual := collect(ch)
+
+ require.ElementsMatch(t, keys, actual)
+}
+
+func (s *Suite) TestAllKeysRespectsContext(t *testing.T) {
+ bs, _ := s.NewBlockstore(t)
+ if c, ok := bs.(io.Closer); ok {
+ defer func() { require.NoError(t, c.Close()) }()
+ }
+
+ _ = insertBlocks(t, bs, 100)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ ch, err := bs.AllKeysChan(ctx)
+ require.NoError(t, err)
+
+ // consume 2, then cancel context.
+ v, ok := <-ch
+ require.NotEqual(t, cid.Undef, v)
+ require.True(t, ok)
+
+ v, ok = <-ch
+ require.NotEqual(t, cid.Undef, v)
+ require.True(t, ok)
+
+ cancel()
+ // pull one value out to avoid race
+ _, _ = <-ch
+
+ v, ok = <-ch
+ require.Equal(t, cid.Undef, v)
+ require.False(t, ok)
+}
+
+func (s *Suite) TestDoubleClose(t *testing.T) {
+ bs, _ := s.NewBlockstore(t)
+ c, ok := bs.(io.Closer)
+ if !ok {
+ t.SkipNow()
+ }
+ require.NoError(t, c.Close())
+ require.NoError(t, c.Close())
+}
+
+func (s *Suite) TestReopenPutGet(t *testing.T) {
+ bs, path := s.NewBlockstore(t)
+ c, ok := bs.(io.Closer)
+ if !ok {
+ t.SkipNow()
+ }
+
+ orig := blocks.NewBlock([]byte("some data"))
+ err := bs.Put(orig)
+ require.NoError(t, err)
+
+ err = c.Close()
+ require.NoError(t, err)
+
+ bs, err = s.OpenBlockstore(t, path)
+ require.NoError(t, err)
+
+ fetched, err := bs.Get(orig.Cid())
+ require.NoError(t, err)
+ require.Equal(t, orig.RawData(), fetched.RawData())
+
+ err = bs.(io.Closer).Close()
+ require.NoError(t, err)
+}
+
+func (s *Suite) TestPutMany(t *testing.T) {
+ bs, _ := s.NewBlockstore(t)
+ if c, ok := bs.(io.Closer); ok {
+ defer func() { require.NoError(t, c.Close()) }()
+ }
+
+ blks := []blocks.Block{
+ blocks.NewBlock([]byte("foo1")),
+ blocks.NewBlock([]byte("foo2")),
+ blocks.NewBlock([]byte("foo3")),
+ }
+ err := bs.PutMany(blks)
+ require.NoError(t, err)
+
+ for _, blk := range blks {
+ fetched, err := bs.Get(blk.Cid())
+ require.NoError(t, err)
+ require.Equal(t, blk.RawData(), fetched.RawData())
+
+ ok, err := bs.Has(blk.Cid())
+ require.NoError(t, err)
+ require.True(t, ok)
+ }
+
+ ch, err := bs.AllKeysChan(context.Background())
+ require.NoError(t, err)
+
+ cids := collect(ch)
+ require.Len(t, cids, 3)
+}
+
+func (s *Suite) TestDelete(t *testing.T) {
+ bs, _ := s.NewBlockstore(t)
+ if c, ok := bs.(io.Closer); ok {
+ defer func() { require.NoError(t, c.Close()) }()
+ }
+
+ blks := []blocks.Block{
+ blocks.NewBlock([]byte("foo1")),
+ blocks.NewBlock([]byte("foo2")),
+ blocks.NewBlock([]byte("foo3")),
+ }
+ err := bs.PutMany(blks)
+ require.NoError(t, err)
+
+ err = bs.DeleteBlock(blks[1].Cid())
+ require.NoError(t, err)
+
+ ch, err := bs.AllKeysChan(context.Background())
+ require.NoError(t, err)
+
+ cids := collect(ch)
+ require.Len(t, cids, 2)
+ require.ElementsMatch(t, cids, []cid.Cid{
+ cid.NewCidV1(cid.Raw, blks[0].Cid().Hash()),
+ cid.NewCidV1(cid.Raw, blks[2].Cid().Hash()),
+ })
+
+ has, err := bs.Has(blks[1].Cid())
+ require.NoError(t, err)
+ require.False(t, has)
+
+}
+
+func insertBlocks(t *testing.T, bs blockstore.BasicBlockstore, count int) []cid.Cid {
+ keys := make([]cid.Cid, count)
+ for i := 0; i < count; i++ {
+ block := blocks.NewBlock([]byte(fmt.Sprintf("some data %d", i)))
+ err := bs.Put(block)
+ require.NoError(t, err)
+ // NewBlock assigns a CIDv0; we convert it to CIDv1 because that's what
+ // the store returns.
+ keys[i] = cid.NewCidV1(cid.Raw, block.Multihash())
+ }
+ return keys
+}
+
+func collect(ch <-chan cid.Cid) []cid.Cid {
+ var keys []cid.Cid
+ for k := range ch {
+ keys = append(keys, k)
+ }
+ return keys
+}
diff --git a/blockstore/blockstore.go b/blockstore/blockstore.go
new file mode 100644
index 00000000000..97f9f5f7b58
--- /dev/null
+++ b/blockstore/blockstore.go
@@ -0,0 +1,105 @@
+package blockstore
+
+import (
+ cid "github.com/ipfs/go-cid"
+ ds "github.com/ipfs/go-datastore"
+ logging "github.com/ipfs/go-log/v2"
+
+ blockstore "github.com/ipfs/go-ipfs-blockstore"
+)
+
+var log = logging.Logger("blockstore")
+
+var ErrNotFound = blockstore.ErrNotFound
+
+// Blockstore is the blockstore interface used by Lotus. It is the union
+// of the basic go-ipfs blockstore, with other capabilities required by Lotus,
+// e.g. View or Sync.
+type Blockstore interface {
+ blockstore.Blockstore
+ blockstore.Viewer
+ BatchDeleter
+}
+
+// BasicBlockstore is an alias to the original IPFS Blockstore.
+type BasicBlockstore = blockstore.Blockstore
+
+type Viewer = blockstore.Viewer
+
+type BatchDeleter interface {
+ DeleteMany(cids []cid.Cid) error
+}
+
+// BlockstoreIterator is a trait for efficient iteration
+type BlockstoreIterator interface {
+ ForEachKey(func(cid.Cid) error) error
+}
+
+// BlockstoreGC is a trait for blockstores that support online garbage collection
+type BlockstoreGC interface {
+ CollectGarbage() error
+}
+
+// WrapIDStore wraps the underlying blockstore in an "identity" blockstore.
+// The ID store filters out all puts for blocks with CIDs using the "identity"
+// hash function. It also extracts inlined blocks from CIDs using the identity
+// hash function and returns them on get/has, ignoring the contents of the
+// blockstore.
+func WrapIDStore(bstore blockstore.Blockstore) Blockstore {
+ if is, ok := bstore.(*idstore); ok {
+ // already wrapped
+ return is
+ }
+
+ if bs, ok := bstore.(Blockstore); ok {
+ // we need to wrap our own because we don't want to neuter the DeleteMany method
+ // the underlying blockstore has implemented an (efficient) DeleteMany
+ return NewIDStore(bs)
+ }
+
+ // The underlying blockstore does not implement DeleteMany, so we need to shim it.
+ // This is less efficient as it'll iterate and perform single deletes.
+ return NewIDStore(Adapt(bstore))
+}
+
+// FromDatastore creates a new blockstore backed by the given datastore.
+func FromDatastore(dstore ds.Batching) Blockstore {
+ return WrapIDStore(blockstore.NewBlockstore(dstore))
+}
+
+type adaptedBlockstore struct {
+ blockstore.Blockstore
+}
+
+var _ Blockstore = (*adaptedBlockstore)(nil)
+
+func (a *adaptedBlockstore) View(cid cid.Cid, callback func([]byte) error) error {
+ blk, err := a.Get(cid)
+ if err != nil {
+ return err
+ }
+ return callback(blk.RawData())
+}
+
+func (a *adaptedBlockstore) DeleteMany(cids []cid.Cid) error {
+ for _, cid := range cids {
+ err := a.DeleteBlock(cid)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Adapt adapts a standard blockstore to a Lotus blockstore by
+// enriching it with the extra methods that Lotus requires (e.g. View, Sync).
+//
+// View proxies over to Get and calls the callback with the value supplied by Get.
+// Sync noops.
+func Adapt(bs blockstore.Blockstore) Blockstore {
+ if ret, ok := bs.(Blockstore); ok {
+ return ret
+ }
+ return &adaptedBlockstore{bs}
+}
diff --git a/blockstore/buffered.go b/blockstore/buffered.go
new file mode 100644
index 00000000000..5d3d38f78f9
--- /dev/null
+++ b/blockstore/buffered.go
@@ -0,0 +1,174 @@
+package blockstore
+
+import (
+ "context"
+ "os"
+
+ block "github.com/ipfs/go-block-format"
+ "github.com/ipfs/go-cid"
+)
+
+// buflog is a logger for the buffered blockstore. It is subscoped from the
+// blockstore logger.
+var buflog = log.Named("buf")
+
+type BufferedBlockstore struct {
+ read Blockstore
+ write Blockstore
+}
+
+func NewBuffered(base Blockstore) *BufferedBlockstore {
+ var buf Blockstore
+ if os.Getenv("LOTUS_DISABLE_VM_BUF") == "iknowitsabadidea" {
+ buflog.Warn("VM BLOCKSTORE BUFFERING IS DISABLED")
+ buf = base
+ } else {
+ buf = NewMemory()
+ }
+
+ bs := &BufferedBlockstore{
+ read: base,
+ write: buf,
+ }
+ return bs
+}
+
+func NewTieredBstore(r Blockstore, w Blockstore) *BufferedBlockstore {
+ return &BufferedBlockstore{
+ read: r,
+ write: w,
+ }
+}
+
+var (
+ _ Blockstore = (*BufferedBlockstore)(nil)
+ _ Viewer = (*BufferedBlockstore)(nil)
+)
+
+func (bs *BufferedBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
+ a, err := bs.read.AllKeysChan(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ b, err := bs.write.AllKeysChan(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ out := make(chan cid.Cid)
+ go func() {
+ defer close(out)
+ for a != nil || b != nil {
+ select {
+ case val, ok := <-a:
+ if !ok {
+ a = nil
+ } else {
+ select {
+ case out <- val:
+ case <-ctx.Done():
+ return
+ }
+ }
+ case val, ok := <-b:
+ if !ok {
+ b = nil
+ } else {
+ select {
+ case out <- val:
+ case <-ctx.Done():
+ return
+ }
+ }
+ }
+ }
+ }()
+
+ return out, nil
+}
+
+func (bs *BufferedBlockstore) DeleteBlock(c cid.Cid) error {
+ if err := bs.read.DeleteBlock(c); err != nil {
+ return err
+ }
+
+ return bs.write.DeleteBlock(c)
+}
+
+func (bs *BufferedBlockstore) DeleteMany(cids []cid.Cid) error {
+ if err := bs.read.DeleteMany(cids); err != nil {
+ return err
+ }
+
+ return bs.write.DeleteMany(cids)
+}
+
+func (bs *BufferedBlockstore) View(c cid.Cid, callback func([]byte) error) error {
+ // both stores are viewable.
+ if err := bs.write.View(c, callback); err == ErrNotFound {
+ // not found in write blockstore; fall through.
+ } else {
+ return err // propagate errors, or nil, i.e. found.
+ }
+ return bs.read.View(c, callback)
+}
+
+func (bs *BufferedBlockstore) Get(c cid.Cid) (block.Block, error) {
+ if out, err := bs.write.Get(c); err != nil {
+ if err != ErrNotFound {
+ return nil, err
+ }
+ } else {
+ return out, nil
+ }
+
+ return bs.read.Get(c)
+}
+
+func (bs *BufferedBlockstore) GetSize(c cid.Cid) (int, error) {
+ s, err := bs.read.GetSize(c)
+ if err == ErrNotFound || s == 0 {
+ return bs.write.GetSize(c)
+ }
+
+ return s, err
+}
+
+func (bs *BufferedBlockstore) Put(blk block.Block) error {
+ has, err := bs.read.Has(blk.Cid()) // TODO: consider dropping this check
+ if err != nil {
+ return err
+ }
+
+ if has {
+ return nil
+ }
+
+ return bs.write.Put(blk)
+}
+
+func (bs *BufferedBlockstore) Has(c cid.Cid) (bool, error) {
+ has, err := bs.write.Has(c)
+ if err != nil {
+ return false, err
+ }
+ if has {
+ return true, nil
+ }
+
+ return bs.read.Has(c)
+}
+
+func (bs *BufferedBlockstore) HashOnRead(hor bool) {
+ bs.read.HashOnRead(hor)
+ bs.write.HashOnRead(hor)
+}
+
+func (bs *BufferedBlockstore) PutMany(blks []block.Block) error {
+ return bs.write.PutMany(blks)
+}
+
+func (bs *BufferedBlockstore) Read() Blockstore {
+ return bs.read
+}
diff --git a/blockstore/discard.go b/blockstore/discard.go
new file mode 100644
index 00000000000..afd0651bc07
--- /dev/null
+++ b/blockstore/discard.go
@@ -0,0 +1,66 @@
+package blockstore
+
+import (
+ "context"
+ "io"
+
+ blocks "github.com/ipfs/go-block-format"
+ cid "github.com/ipfs/go-cid"
+)
+
+var _ Blockstore = (*discardstore)(nil)
+
+type discardstore struct {
+ bs Blockstore
+}
+
+func NewDiscardStore(bs Blockstore) Blockstore {
+ return &discardstore{bs: bs}
+}
+
+func (b *discardstore) Has(cid cid.Cid) (bool, error) {
+ return b.bs.Has(cid)
+}
+
+func (b *discardstore) HashOnRead(hor bool) {
+ b.bs.HashOnRead(hor)
+}
+
+func (b *discardstore) Get(cid cid.Cid) (blocks.Block, error) {
+ return b.bs.Get(cid)
+}
+
+func (b *discardstore) GetSize(cid cid.Cid) (int, error) {
+ return b.bs.GetSize(cid)
+}
+
+func (b *discardstore) View(cid cid.Cid, f func([]byte) error) error {
+ return b.bs.View(cid, f)
+}
+
+func (b *discardstore) Put(blk blocks.Block) error {
+ return nil
+}
+
+func (b *discardstore) PutMany(blks []blocks.Block) error {
+ return nil
+}
+
+func (b *discardstore) DeleteBlock(cid cid.Cid) error {
+ return nil
+}
+
+func (b *discardstore) DeleteMany(cids []cid.Cid) error {
+ return nil
+}
+
+func (b *discardstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
+ return b.bs.AllKeysChan(ctx)
+}
+
+func (b *discardstore) Close() error {
+ if c, ok := b.bs.(io.Closer); ok {
+ return c.Close()
+ }
+ return nil
+}
diff --git a/blockstore/doc.go b/blockstore/doc.go
new file mode 100644
index 00000000000..fea1126f5ca
--- /dev/null
+++ b/blockstore/doc.go
@@ -0,0 +1,9 @@
+// Package blockstore and subpackages contain most of the blockstore
+// implementations used by Lotus.
+//
+// Blockstores not ultimately constructed out of the building blocks in this
+// package may not work properly.
+//
+// This package re-exports parts of the go-ipfs-blockstore package such that
+// no other package needs to import it directly, for ergonomics and traceability.
+package blockstore
diff --git a/blockstore/fallback.go b/blockstore/fallback.go
new file mode 100644
index 00000000000..5f220f941bb
--- /dev/null
+++ b/blockstore/fallback.go
@@ -0,0 +1,106 @@
+package blockstore
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "golang.org/x/xerrors"
+
+ blocks "github.com/ipfs/go-block-format"
+ "github.com/ipfs/go-cid"
+)
+
+// UnwrapFallbackStore takes a blockstore, and returns the underlying blockstore
+// if it was a FallbackStore. Otherwise, it just returns the supplied store
+// unmodified.
+func UnwrapFallbackStore(bs Blockstore) (Blockstore, bool) {
+ if fbs, ok := bs.(*FallbackStore); ok {
+ return fbs.Blockstore, true
+ }
+ return bs, false
+}
+
+// FallbackStore is a read-through store that queries another (potentially
+// remote) source if the block is not found locally. If the block is found
+// during the fallback, it stores it in the local store.
+type FallbackStore struct {
+ Blockstore
+
+ lk sync.RWMutex
+ // missFn is the function that will be invoked on a local miss to pull the
+ // block from elsewhere.
+ missFn func(context.Context, cid.Cid) (blocks.Block, error)
+}
+
+var _ Blockstore = (*FallbackStore)(nil)
+
+func (fbs *FallbackStore) SetFallback(missFn func(context.Context, cid.Cid) (blocks.Block, error)) {
+ fbs.lk.Lock()
+ defer fbs.lk.Unlock()
+
+ fbs.missFn = missFn
+}
+
+func (fbs *FallbackStore) getFallback(c cid.Cid) (blocks.Block, error) {
+ log.Warnf("fallbackstore: block not found locally, fetching from the network; cid: %s", c)
+ fbs.lk.RLock()
+ defer fbs.lk.RUnlock()
+
+ if fbs.missFn == nil {
+ // FallbackStore wasn't configured yet (chainstore/bitswap aren't up yet)
+ // Wait for a bit and retry
+ fbs.lk.RUnlock()
+ time.Sleep(5 * time.Second)
+ fbs.lk.RLock()
+
+ if fbs.missFn == nil {
+ log.Errorw("fallbackstore: missFn not configured yet")
+ return nil, ErrNotFound
+ }
+ }
+
+ ctx, cancel := context.WithTimeout(context.TODO(), 120*time.Second)
+ defer cancel()
+
+ b, err := fbs.missFn(ctx, c)
+ if err != nil {
+ return nil, err
+ }
+
+ // chain bitswap puts blocks in temp blockstore which is cleaned up
+ // every few min (to drop any messages we fetched but don't want)
+ // in this case we want to keep this block around
+ if err := fbs.Put(b); err != nil {
+ return nil, xerrors.Errorf("persisting fallback-fetched block: %w", err)
+ }
+ return b, nil
+}
+
+func (fbs *FallbackStore) Get(c cid.Cid) (blocks.Block, error) {
+ b, err := fbs.Blockstore.Get(c)
+ switch err {
+ case nil:
+ return b, nil
+ case ErrNotFound:
+ return fbs.getFallback(c)
+ default:
+ return b, err
+ }
+}
+
+func (fbs *FallbackStore) GetSize(c cid.Cid) (int, error) {
+ sz, err := fbs.Blockstore.GetSize(c)
+ switch err {
+ case nil:
+ return sz, nil
+ case ErrNotFound:
+ b, err := fbs.getFallback(c)
+ if err != nil {
+ return 0, err
+ }
+ return len(b.RawData()), nil
+ default:
+ return sz, err
+ }
+}
diff --git a/blockstore/idstore.go b/blockstore/idstore.go
new file mode 100644
index 00000000000..e6148ff04e2
--- /dev/null
+++ b/blockstore/idstore.go
@@ -0,0 +1,174 @@
+package blockstore
+
+import (
+ "context"
+ "io"
+
+ "golang.org/x/xerrors"
+
+ blocks "github.com/ipfs/go-block-format"
+ cid "github.com/ipfs/go-cid"
+ mh "github.com/multiformats/go-multihash"
+)
+
+var _ Blockstore = (*idstore)(nil)
+
+type idstore struct {
+ bs Blockstore
+}
+
+func NewIDStore(bs Blockstore) Blockstore {
+ return &idstore{bs: bs}
+}
+
+func decodeCid(cid cid.Cid) (inline bool, data []byte, err error) {
+ if cid.Prefix().MhType != mh.IDENTITY {
+ return false, nil, nil
+ }
+
+ dmh, err := mh.Decode(cid.Hash())
+ if err != nil {
+ return false, nil, err
+ }
+
+ if dmh.Code == mh.IDENTITY {
+ return true, dmh.Digest, nil
+ }
+
+ return false, nil, err
+}
+
+func (b *idstore) Has(cid cid.Cid) (bool, error) {
+ inline, _, err := decodeCid(cid)
+ if err != nil {
+ return false, xerrors.Errorf("error decoding Cid: %w", err)
+ }
+
+ if inline {
+ return true, nil
+ }
+
+ return b.bs.Has(cid)
+}
+
+func (b *idstore) Get(cid cid.Cid) (blocks.Block, error) {
+ inline, data, err := decodeCid(cid)
+ if err != nil {
+ return nil, xerrors.Errorf("error decoding Cid: %w", err)
+ }
+
+ if inline {
+ return blocks.NewBlockWithCid(data, cid)
+ }
+
+ return b.bs.Get(cid)
+}
+
+func (b *idstore) GetSize(cid cid.Cid) (int, error) {
+ inline, data, err := decodeCid(cid)
+ if err != nil {
+ return 0, xerrors.Errorf("error decoding Cid: %w", err)
+ }
+
+ if inline {
+ return len(data), err
+ }
+
+ return b.bs.GetSize(cid)
+}
+
+func (b *idstore) View(cid cid.Cid, cb func([]byte) error) error {
+ inline, data, err := decodeCid(cid)
+ if err != nil {
+ return xerrors.Errorf("error decoding Cid: %w", err)
+ }
+
+ if inline {
+ return cb(data)
+ }
+
+ return b.bs.View(cid, cb)
+}
+
+func (b *idstore) Put(blk blocks.Block) error {
+ inline, _, err := decodeCid(blk.Cid())
+ if err != nil {
+ return xerrors.Errorf("error decoding Cid: %w", err)
+ }
+
+ if inline {
+ return nil
+ }
+
+ return b.bs.Put(blk)
+}
+
+func (b *idstore) PutMany(blks []blocks.Block) error {
+ toPut := make([]blocks.Block, 0, len(blks))
+ for _, blk := range blks {
+ inline, _, err := decodeCid(blk.Cid())
+ if err != nil {
+ return xerrors.Errorf("error decoding Cid: %w", err)
+ }
+
+ if inline {
+ continue
+ }
+ toPut = append(toPut, blk)
+ }
+
+ if len(toPut) > 0 {
+ return b.bs.PutMany(toPut)
+ }
+
+ return nil
+}
+
+func (b *idstore) DeleteBlock(cid cid.Cid) error {
+ inline, _, err := decodeCid(cid)
+ if err != nil {
+ return xerrors.Errorf("error decoding Cid: %w", err)
+ }
+
+ if inline {
+ return nil
+ }
+
+ return b.bs.DeleteBlock(cid)
+}
+
+func (b *idstore) DeleteMany(cids []cid.Cid) error {
+ toDelete := make([]cid.Cid, 0, len(cids))
+ for _, cid := range cids {
+ inline, _, err := decodeCid(cid)
+ if err != nil {
+ return xerrors.Errorf("error decoding Cid: %w", err)
+ }
+
+ if inline {
+ continue
+ }
+ toDelete = append(toDelete, cid)
+ }
+
+ if len(toDelete) > 0 {
+ return b.bs.DeleteMany(toDelete)
+ }
+
+ return nil
+}
+
+func (b *idstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
+ return b.bs.AllKeysChan(ctx)
+}
+
+func (b *idstore) HashOnRead(enabled bool) {
+ b.bs.HashOnRead(enabled)
+}
+
+func (b *idstore) Close() error {
+ if c, ok := b.bs.(io.Closer); ok {
+ return c.Close()
+ }
+ return nil
+}
diff --git a/lib/ipfsbstore/ipfsbstore.go b/blockstore/ipfs.go
similarity index 57%
rename from lib/ipfsbstore/ipfsbstore.go
rename to blockstore/ipfs.go
index 748afee5187..51b4bd95123 100644
--- a/lib/ipfsbstore/ipfsbstore.go
+++ b/blockstore/ipfs.go
@@ -1,4 +1,4 @@
-package ipfsbstore
+package blockstore
import (
"bytes"
@@ -16,53 +16,75 @@ import (
iface "github.com/ipfs/interface-go-ipfs-core"
"github.com/ipfs/interface-go-ipfs-core/options"
"github.com/ipfs/interface-go-ipfs-core/path"
-
- "github.com/filecoin-project/lotus/lib/blockstore"
)
-type IpfsBstore struct {
- ctx context.Context
- api iface.CoreAPI
+type IPFSBlockstore struct {
+ ctx context.Context
+ api, offlineAPI iface.CoreAPI
}
-func NewIpfsBstore(ctx context.Context) (*IpfsBstore, error) {
+var _ BasicBlockstore = (*IPFSBlockstore)(nil)
+
+func NewLocalIPFSBlockstore(ctx context.Context, onlineMode bool) (Blockstore, error) {
localApi, err := httpapi.NewLocalApi()
if err != nil {
return nil, xerrors.Errorf("getting local ipfs api: %w", err)
}
- api, err := localApi.WithOptions(options.Api.Offline(true))
+ api, err := localApi.WithOptions(options.Api.Offline(!onlineMode))
if err != nil {
return nil, xerrors.Errorf("setting offline mode: %s", err)
}
- return &IpfsBstore{
- ctx: ctx,
- api: api,
- }, nil
+ offlineAPI := api
+ if onlineMode {
+ offlineAPI, err = localApi.WithOptions(options.Api.Offline(true))
+ if err != nil {
+ return nil, xerrors.Errorf("applying offline mode: %s", err)
+ }
+ }
+
+ bs := &IPFSBlockstore{
+ ctx: ctx,
+ api: api,
+ offlineAPI: offlineAPI,
+ }
+
+ return Adapt(bs), nil
}
-func NewRemoteIpfsBstore(ctx context.Context, maddr multiaddr.Multiaddr) (*IpfsBstore, error) {
+func NewRemoteIPFSBlockstore(ctx context.Context, maddr multiaddr.Multiaddr, onlineMode bool) (Blockstore, error) {
httpApi, err := httpapi.NewApi(maddr)
if err != nil {
return nil, xerrors.Errorf("setting remote ipfs api: %w", err)
}
- api, err := httpApi.WithOptions(options.Api.Offline(true))
+ api, err := httpApi.WithOptions(options.Api.Offline(!onlineMode))
if err != nil {
return nil, xerrors.Errorf("applying offline mode: %s", err)
}
- return &IpfsBstore{
- ctx: ctx,
- api: api,
- }, nil
+ offlineAPI := api
+ if onlineMode {
+ offlineAPI, err = httpApi.WithOptions(options.Api.Offline(true))
+ if err != nil {
+ return nil, xerrors.Errorf("applying offline mode: %s", err)
+ }
+ }
+
+ bs := &IPFSBlockstore{
+ ctx: ctx,
+ api: api,
+ offlineAPI: offlineAPI,
+ }
+
+ return Adapt(bs), nil
}
-func (i *IpfsBstore) DeleteBlock(cid cid.Cid) error {
+func (i *IPFSBlockstore) DeleteBlock(cid cid.Cid) error {
return xerrors.Errorf("not supported")
}
-func (i *IpfsBstore) Has(cid cid.Cid) (bool, error) {
- _, err := i.api.Block().Stat(i.ctx, path.IpldPath(cid))
+func (i *IPFSBlockstore) Has(cid cid.Cid) (bool, error) {
+ _, err := i.offlineAPI.Block().Stat(i.ctx, path.IpldPath(cid))
if err != nil {
// The underlying client is running in Offline mode.
// Stat() will fail with an err if the block isn't in the
@@ -77,7 +99,7 @@ func (i *IpfsBstore) Has(cid cid.Cid) (bool, error) {
return true, nil
}
-func (i *IpfsBstore) Get(cid cid.Cid) (blocks.Block, error) {
+func (i *IPFSBlockstore) Get(cid cid.Cid) (blocks.Block, error) {
rd, err := i.api.Block().Get(i.ctx, path.IpldPath(cid))
if err != nil {
return nil, xerrors.Errorf("getting ipfs block: %w", err)
@@ -91,7 +113,7 @@ func (i *IpfsBstore) Get(cid cid.Cid) (blocks.Block, error) {
return blocks.NewBlockWithCid(data, cid)
}
-func (i *IpfsBstore) GetSize(cid cid.Cid) (int, error) {
+func (i *IPFSBlockstore) GetSize(cid cid.Cid) (int, error) {
st, err := i.api.Block().Stat(i.ctx, path.IpldPath(cid))
if err != nil {
return 0, xerrors.Errorf("getting ipfs block: %w", err)
@@ -100,7 +122,7 @@ func (i *IpfsBstore) GetSize(cid cid.Cid) (int, error) {
return st.Size(), nil
}
-func (i *IpfsBstore) Put(block blocks.Block) error {
+func (i *IPFSBlockstore) Put(block blocks.Block) error {
mhd, err := multihash.Decode(block.Cid().Hash())
if err != nil {
return err
@@ -112,7 +134,7 @@ func (i *IpfsBstore) Put(block blocks.Block) error {
return err
}
-func (i *IpfsBstore) PutMany(blocks []blocks.Block) error {
+func (i *IPFSBlockstore) PutMany(blocks []blocks.Block) error {
// TODO: could be done in parallel
for _, block := range blocks {
@@ -124,12 +146,10 @@ func (i *IpfsBstore) PutMany(blocks []blocks.Block) error {
return nil
}
-func (i *IpfsBstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
+func (i *IPFSBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
return nil, xerrors.Errorf("not supported")
}
-func (i *IpfsBstore) HashOnRead(enabled bool) {
+func (i *IPFSBlockstore) HashOnRead(enabled bool) {
return // TODO: We could technically support this, but..
}
-
-var _ blockstore.Blockstore = &IpfsBstore{}
diff --git a/lib/blockstore/memstore.go b/blockstore/mem.go
similarity index 56%
rename from lib/blockstore/memstore.go
rename to blockstore/mem.go
index 9745d6f0395..8ea69d46a49 100644
--- a/lib/blockstore/memstore.go
+++ b/blockstore/mem.go
@@ -5,38 +5,60 @@ import (
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
- blockstore "github.com/ipfs/go-ipfs-blockstore"
)
-type MemStore map[cid.Cid]blocks.Block
+// NewMemory returns a temporary memory-backed blockstore.
+func NewMemory() MemBlockstore {
+ return make(MemBlockstore)
+}
+
+// MemBlockstore is a terminal blockstore that keeps blocks in memory.
+type MemBlockstore map[cid.Cid]blocks.Block
-func (m MemStore) DeleteBlock(k cid.Cid) error {
+func (m MemBlockstore) DeleteBlock(k cid.Cid) error {
delete(m, k)
return nil
}
-func (m MemStore) Has(k cid.Cid) (bool, error) {
+
+func (m MemBlockstore) DeleteMany(ks []cid.Cid) error {
+ for _, k := range ks {
+ delete(m, k)
+ }
+ return nil
+}
+
+func (m MemBlockstore) Has(k cid.Cid) (bool, error) {
_, ok := m[k]
return ok, nil
}
-func (m MemStore) Get(k cid.Cid) (blocks.Block, error) {
+
+func (m MemBlockstore) View(k cid.Cid, callback func([]byte) error) error {
+ b, ok := m[k]
+ if !ok {
+ return ErrNotFound
+ }
+ return callback(b.RawData())
+}
+
+func (m MemBlockstore) Get(k cid.Cid) (blocks.Block, error) {
b, ok := m[k]
if !ok {
- return nil, blockstore.ErrNotFound
+ return nil, ErrNotFound
}
return b, nil
}
// GetSize returns the CIDs mapped BlockSize
-func (m MemStore) GetSize(k cid.Cid) (int, error) {
+func (m MemBlockstore) GetSize(k cid.Cid) (int, error) {
b, ok := m[k]
if !ok {
- return 0, blockstore.ErrNotFound
+ return 0, ErrNotFound
}
return len(b.RawData()), nil
}
// Put puts a given block to the underlying datastore
-func (m MemStore) Put(b blocks.Block) error {
+func (m MemBlockstore) Put(b blocks.Block) error {
// Convert to a basic block for safety, but try to reuse the existing
// block if it's already a basic block.
k := b.Cid()
@@ -54,7 +76,7 @@ func (m MemStore) Put(b blocks.Block) error {
// PutMany puts a slice of blocks at the same time using batching
// capabilities of the underlying datastore whenever possible.
-func (m MemStore) PutMany(bs []blocks.Block) error {
+func (m MemBlockstore) PutMany(bs []blocks.Block) error {
for _, b := range bs {
_ = m.Put(b) // can't fail
}
@@ -64,7 +86,7 @@ func (m MemStore) PutMany(bs []blocks.Block) error {
// AllKeysChan returns a channel from which
// the CIDs in the Blockstore can be read. It should respect
// the given context, closing the channel if it becomes Done.
-func (m MemStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
+func (m MemBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
ch := make(chan cid.Cid, len(m))
for k := range m {
ch <- k
@@ -75,6 +97,6 @@ func (m MemStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
// HashOnRead specifies if every read block should be
// rehashed to make sure it matches its CID.
-func (m MemStore) HashOnRead(enabled bool) {
+func (m MemBlockstore) HashOnRead(enabled bool) {
// no-op
}
diff --git a/blockstore/metrics.go b/blockstore/metrics.go
new file mode 100644
index 00000000000..737690a1106
--- /dev/null
+++ b/blockstore/metrics.go
@@ -0,0 +1,154 @@
+package blockstore
+
+import (
+ "time"
+
+ "go.opencensus.io/stats"
+ "go.opencensus.io/stats/view"
+ "go.opencensus.io/tag"
+)
+
+//
+// Currently unused, but kept in repo in case we introduce one of the candidate
+// cache implementations (Freecache, Ristretto), both of which report these
+// metrics.
+//
+
+// CacheMetricsEmitInterval is the interval at which metrics are emitted onto
+// OpenCensus.
+var CacheMetricsEmitInterval = 5 * time.Second
+
+var (
+ CacheName, _ = tag.NewKey("cache_name")
+)
+
+// CacheMeasures groups all metrics emitted by the blockstore caches.
+var CacheMeasures = struct {
+ HitRatio *stats.Float64Measure
+ Hits *stats.Int64Measure
+ Misses *stats.Int64Measure
+ Entries *stats.Int64Measure
+ QueriesServed *stats.Int64Measure
+ Adds *stats.Int64Measure
+ Updates *stats.Int64Measure
+ Evictions *stats.Int64Measure
+ CostAdded *stats.Int64Measure
+ CostEvicted *stats.Int64Measure
+ SetsDropped *stats.Int64Measure
+ SetsRejected *stats.Int64Measure
+ QueriesDropped *stats.Int64Measure
+}{
+ HitRatio: stats.Float64("blockstore/cache/hit_ratio", "Hit ratio of blockstore cache", stats.UnitDimensionless),
+ Hits: stats.Int64("blockstore/cache/hits", "Total number of hits at blockstore cache", stats.UnitDimensionless),
+ Misses: stats.Int64("blockstore/cache/misses", "Total number of misses at blockstore cache", stats.UnitDimensionless),
+ Entries: stats.Int64("blockstore/cache/entry_count", "Total number of entries currently in the blockstore cache", stats.UnitDimensionless),
+ QueriesServed: stats.Int64("blockstore/cache/queries_served", "Total number of queries served by the blockstore cache", stats.UnitDimensionless),
+ Adds: stats.Int64("blockstore/cache/adds", "Total number of adds to blockstore cache", stats.UnitDimensionless),
+ Updates: stats.Int64("blockstore/cache/updates", "Total number of updates in blockstore cache", stats.UnitDimensionless),
+ Evictions: stats.Int64("blockstore/cache/evictions", "Total number of evictions from blockstore cache", stats.UnitDimensionless),
+ CostAdded: stats.Int64("blockstore/cache/cost_added", "Total cost (byte size) of entries added into blockstore cache", stats.UnitBytes),
+ CostEvicted: stats.Int64("blockstore/cache/cost_evicted", "Total cost (byte size) of entries evicted by blockstore cache", stats.UnitBytes),
+ SetsDropped: stats.Int64("blockstore/cache/sets_dropped", "Total number of sets dropped by blockstore cache", stats.UnitDimensionless),
+ SetsRejected: stats.Int64("blockstore/cache/sets_rejected", "Total number of sets rejected by blockstore cache", stats.UnitDimensionless),
+ QueriesDropped: stats.Int64("blockstore/cache/queries_dropped", "Total number of queries dropped by blockstore cache", stats.UnitDimensionless),
+}
+
+// CacheViews groups all cache-related default views.
+var CacheViews = struct {
+ HitRatio *view.View
+ Hits *view.View
+ Misses *view.View
+ Entries *view.View
+ QueriesServed *view.View
+ Adds *view.View
+ Updates *view.View
+ Evictions *view.View
+ CostAdded *view.View
+ CostEvicted *view.View
+ SetsDropped *view.View
+ SetsRejected *view.View
+ QueriesDropped *view.View
+}{
+ HitRatio: &view.View{
+ Measure: CacheMeasures.HitRatio,
+ Aggregation: view.LastValue(),
+ TagKeys: []tag.Key{CacheName},
+ },
+ Hits: &view.View{
+ Measure: CacheMeasures.Hits,
+ Aggregation: view.LastValue(),
+ TagKeys: []tag.Key{CacheName},
+ },
+ Misses: &view.View{
+ Measure: CacheMeasures.Misses,
+ Aggregation: view.LastValue(),
+ TagKeys: []tag.Key{CacheName},
+ },
+ Entries: &view.View{
+ Measure: CacheMeasures.Entries,
+ Aggregation: view.LastValue(),
+ TagKeys: []tag.Key{CacheName},
+ },
+ QueriesServed: &view.View{
+ Measure: CacheMeasures.QueriesServed,
+ Aggregation: view.LastValue(),
+ TagKeys: []tag.Key{CacheName},
+ },
+ Adds: &view.View{
+ Measure: CacheMeasures.Adds,
+ Aggregation: view.LastValue(),
+ TagKeys: []tag.Key{CacheName},
+ },
+ Updates: &view.View{
+ Measure: CacheMeasures.Updates,
+ Aggregation: view.LastValue(),
+ TagKeys: []tag.Key{CacheName},
+ },
+ Evictions: &view.View{
+ Measure: CacheMeasures.Evictions,
+ Aggregation: view.LastValue(),
+ TagKeys: []tag.Key{CacheName},
+ },
+ CostAdded: &view.View{
+ Measure: CacheMeasures.CostAdded,
+ Aggregation: view.LastValue(),
+ TagKeys: []tag.Key{CacheName},
+ },
+ CostEvicted: &view.View{
+ Measure: CacheMeasures.CostEvicted,
+ Aggregation: view.LastValue(),
+ TagKeys: []tag.Key{CacheName},
+ },
+ SetsDropped: &view.View{
+ Measure: CacheMeasures.SetsDropped,
+ Aggregation: view.LastValue(),
+ TagKeys: []tag.Key{CacheName},
+ },
+ SetsRejected: &view.View{
+ Measure: CacheMeasures.SetsRejected,
+ Aggregation: view.LastValue(),
+ TagKeys: []tag.Key{CacheName},
+ },
+ QueriesDropped: &view.View{
+ Measure: CacheMeasures.QueriesDropped,
+ Aggregation: view.LastValue(),
+ TagKeys: []tag.Key{CacheName},
+ },
+}
+
+// DefaultViews exports all default views for this package.
+var DefaultViews = []*view.View{
+ CacheViews.HitRatio,
+ CacheViews.Hits,
+ CacheViews.Misses,
+ CacheViews.Entries,
+ CacheViews.QueriesServed,
+ CacheViews.Adds,
+ CacheViews.Updates,
+ CacheViews.Evictions,
+ CacheViews.CostAdded,
+ CacheViews.CostEvicted,
+ CacheViews.SetsDropped,
+ CacheViews.SetsRejected,
+ CacheViews.QueriesDropped,
+}
diff --git a/blockstore/splitstore/README.md b/blockstore/splitstore/README.md
new file mode 100644
index 00000000000..1c6569a34e7
--- /dev/null
+++ b/blockstore/splitstore/README.md
@@ -0,0 +1,72 @@
+# SplitStore: An actively scalable blockstore for the Filecoin chain
+
+The SplitStore was first introduced in lotus v1.5.1, as an experiment
+in reducing the performance impact of large blockstores.
+
+With lotus v1.11.1, we introduce the next iteration in design and
+implementation, which we call SplitStore v1.
+
+The new design (see [#6474](https://github.com/filecoin-project/lotus/pull/6474)
+evolves the splitstore to be a freestanding compacting blockstore that
+allows us to keep a small (60-100GB) working set in a hot blockstore
+and reliably archive out of scope objects in a coldstore. The
+coldstore can also be a discard store, whereby out of scope objects
+are discarded or a regular badger blockstore (the default), which can
+be periodically garbage collected according to configurable user
+retention policies.
+
+To enable the splitstore, edit `.lotus/config.toml` and add the following:
+```
+[Chainstore]
+ EnableSplitstore = true
+```
+
+If you intend to use the discard coldstore, your also need to add the following:
+```
+ [Chainstore.Splitstore]
+ ColdStoreType = "discard"
+```
+In general you _should not_ have to use the discard store, unless you
+are running a network booster or have very constrained hardware with
+not enough disk space to maintain a coldstore, even with garbage
+collection.
+
+
+## Operation
+
+When the splitstore is first enabled, the existing blockstore becomes
+the coldstore and a fresh hotstore is initialized.
+
+The hotstore is warmed up on first startup so as to load all chain
+headers and state roots in the current head. This allows us to
+immediately gain the performance benefits of a smallerblockstore which
+can be substantial for full archival nodes.
+
+All new writes are directed to the hotstore, while reads first hit the
+hotstore, with fallback to the coldstore.
+
+Once 5 finalities have ellapsed, and every finality henceforth, the
+blockstore _compacts_. Compaction is the process of moving all
+unreachable objects within the last 4 finalities from the hotstore to
+the coldstore. If the system is configured with a discard coldstore,
+these objects are discarded. Note that chain headers, all the way to
+genesis, are considered reachable. Stateroots and messages are
+considered reachable only within the last 4 finalities, unless there
+is a live reference to them.
+
+## Compaction
+
+Compaction works transactionally with the following algorithm:
+- We prepare a transaction, whereby all i/o referenced objects through the API are tracked.
+- We walk the chain and mark reachable objects, keeping 4 finalities of state roots and messages and all headers all the way to genesis.
+- Once the chain walk is complete, we begin full transaction protection with concurrent marking; we walk and mark all references created during the chain walk. On the same time, all I/O through the API concurrently marks objects as live references.
+- We collect cold objects by iterating through the hotstore and checking the mark set; if an object is not marked, then it is candidate for purge.
+- When running with a coldstore, we next copy all cold objects to the coldstore.
+- At this point we are ready to begin purging:
+ - We sort cold objects heaviest first, so as to never delete the consituents of a DAG before the DAG itself (which would leave dangling references)
+ - We delete in small batches taking a lock; each batch is checked again for marks, from the concurrent transactional mark, so as to never delete anything live
+- We then end the transaction and compact/gc the hotstore.
+
+## Coldstore Garbage Collection
+
+TBD -- see [#6577](https://github.com/filecoin-project/lotus/issues/6577)
diff --git a/blockstore/splitstore/debug.go b/blockstore/splitstore/debug.go
new file mode 100644
index 00000000000..2be85ebfe8d
--- /dev/null
+++ b/blockstore/splitstore/debug.go
@@ -0,0 +1,273 @@
+package splitstore
+
+import (
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime/debug"
+ "strings"
+ "sync"
+ "time"
+
+ "go.uber.org/multierr"
+ "golang.org/x/xerrors"
+
+ blocks "github.com/ipfs/go-block-format"
+ cid "github.com/ipfs/go-cid"
+)
+
+type debugLog struct {
+ readLog, writeLog, deleteLog, stackLog *debugLogOp
+
+ stackMx sync.Mutex
+ stackMap map[string]string
+}
+
+type debugLogOp struct {
+ path string
+ mx sync.Mutex
+ log *os.File
+ count int
+}
+
+func openDebugLog(path string) (*debugLog, error) {
+ basePath := filepath.Join(path, "debug")
+ err := os.MkdirAll(basePath, 0755)
+ if err != nil {
+ return nil, err
+ }
+
+ readLog, err := openDebugLogOp(basePath, "read.log")
+ if err != nil {
+ return nil, err
+ }
+
+ writeLog, err := openDebugLogOp(basePath, "write.log")
+ if err != nil {
+ _ = readLog.Close()
+ return nil, err
+ }
+
+ deleteLog, err := openDebugLogOp(basePath, "delete.log")
+ if err != nil {
+ _ = readLog.Close()
+ _ = writeLog.Close()
+ return nil, err
+ }
+
+ stackLog, err := openDebugLogOp(basePath, "stack.log")
+ if err != nil {
+ _ = readLog.Close()
+ _ = writeLog.Close()
+ _ = deleteLog.Close()
+ return nil, xerrors.Errorf("error opening stack log: %w", err)
+ }
+
+ return &debugLog{
+ readLog: readLog,
+ writeLog: writeLog,
+ deleteLog: deleteLog,
+ stackLog: stackLog,
+ stackMap: make(map[string]string),
+ }, nil
+}
+
+func (d *debugLog) LogReadMiss(cid cid.Cid) {
+ if d == nil {
+ return
+ }
+
+ stack := d.getStack()
+ err := d.readLog.Log("%s %s %s\n", d.timestamp(), cid, stack)
+ if err != nil {
+ log.Warnf("error writing read log: %s", err)
+ }
+}
+
+func (d *debugLog) LogWrite(blk blocks.Block) {
+ if d == nil {
+ return
+ }
+
+ var stack string
+ if enableDebugLogWriteTraces {
+ stack = " " + d.getStack()
+ }
+
+ err := d.writeLog.Log("%s %s%s\n", d.timestamp(), blk.Cid(), stack)
+ if err != nil {
+ log.Warnf("error writing write log: %s", err)
+ }
+}
+
+func (d *debugLog) LogWriteMany(blks []blocks.Block) {
+ if d == nil {
+ return
+ }
+
+ var stack string
+ if enableDebugLogWriteTraces {
+ stack = " " + d.getStack()
+ }
+
+ now := d.timestamp()
+ for _, blk := range blks {
+ err := d.writeLog.Log("%s %s%s\n", now, blk.Cid(), stack)
+ if err != nil {
+ log.Warnf("error writing write log: %s", err)
+ break
+ }
+ }
+}
+
+func (d *debugLog) LogDelete(cids []cid.Cid) {
+ if d == nil {
+ return
+ }
+
+ now := d.timestamp()
+ for _, c := range cids {
+ err := d.deleteLog.Log("%s %s\n", now, c)
+ if err != nil {
+ log.Warnf("error writing delete log: %s", err)
+ break
+ }
+ }
+}
+
+func (d *debugLog) Flush() {
+ if d == nil {
+ return
+ }
+
+ // rotate non-empty logs
+ d.readLog.Rotate()
+ d.writeLog.Rotate()
+ d.deleteLog.Rotate()
+ d.stackLog.Rotate()
+}
+
+func (d *debugLog) Close() error {
+ if d == nil {
+ return nil
+ }
+
+ err1 := d.readLog.Close()
+ err2 := d.writeLog.Close()
+ err3 := d.deleteLog.Close()
+ err4 := d.stackLog.Close()
+
+ return multierr.Combine(err1, err2, err3, err4)
+}
+
+func (d *debugLog) getStack() string {
+ sk := d.getNormalizedStackTrace()
+ hash := sha256.Sum256([]byte(sk))
+ key := string(hash[:])
+
+ d.stackMx.Lock()
+ repr, ok := d.stackMap[key]
+ if !ok {
+ repr = hex.EncodeToString(hash[:])
+ d.stackMap[key] = repr
+
+ err := d.stackLog.Log("%s\n%s\n", repr, sk)
+ if err != nil {
+ log.Warnf("error writing stack trace for %s: %s", repr, err)
+ }
+ }
+ d.stackMx.Unlock()
+
+ return repr
+}
+
+func (d *debugLog) getNormalizedStackTrace() string {
+ sk := string(debug.Stack())
+
+ // Normalization for deduplication
+ // skip first line -- it's the goroutine
+ // for each line that ends in a ), remove the call args -- these are the registers
+ lines := strings.Split(sk, "\n")[1:]
+ for i, line := range lines {
+ if len(line) > 0 && line[len(line)-1] == ')' {
+ idx := strings.LastIndex(line, "(")
+ if idx < 0 {
+ continue
+ }
+ lines[i] = line[:idx]
+ }
+ }
+
+ return strings.Join(lines, "\n")
+}
+
+func (d *debugLog) timestamp() string {
+ ts, _ := time.Now().MarshalText()
+ return string(ts)
+}
+
+func openDebugLogOp(basePath, name string) (*debugLogOp, error) {
+ path := filepath.Join(basePath, name)
+ file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
+ if err != nil {
+ return nil, xerrors.Errorf("error opening %s: %w", name, err)
+ }
+
+ return &debugLogOp{path: path, log: file}, nil
+}
+
+func (d *debugLogOp) Close() error {
+ d.mx.Lock()
+ defer d.mx.Unlock()
+
+ return d.log.Close()
+}
+
+func (d *debugLogOp) Log(template string, arg ...interface{}) error {
+ d.mx.Lock()
+ defer d.mx.Unlock()
+
+ d.count++
+ _, err := fmt.Fprintf(d.log, template, arg...)
+ return err
+}
+
+func (d *debugLogOp) Rotate() {
+ d.mx.Lock()
+ defer d.mx.Unlock()
+
+ if d.count == 0 {
+ return
+ }
+
+ err := d.log.Close()
+ if err != nil {
+ log.Warnf("error closing log (file: %s): %s", d.path, err)
+ return
+ }
+
+ arxivPath := fmt.Sprintf("%s-%d", d.path, time.Now().Unix())
+ err = os.Rename(d.path, arxivPath)
+ if err != nil {
+ log.Warnf("error moving log (file: %s): %s", d.path, err)
+ return
+ }
+
+ go func() {
+ cmd := exec.Command("gzip", arxivPath)
+ err := cmd.Run()
+ if err != nil {
+ log.Warnf("error compressing log (file: %s): %s", arxivPath, err)
+ }
+ }()
+
+ d.count = 0
+ d.log, err = os.OpenFile(d.path, os.O_WRONLY|os.O_CREATE, 0644)
+ if err != nil {
+ log.Warnf("error opening log (file: %s): %s", d.path, err)
+ return
+ }
+}
diff --git a/blockstore/splitstore/markset.go b/blockstore/splitstore/markset.go
new file mode 100644
index 00000000000..a644e727955
--- /dev/null
+++ b/blockstore/splitstore/markset.go
@@ -0,0 +1,38 @@
+package splitstore
+
+import (
+ "errors"
+
+ "golang.org/x/xerrors"
+
+ cid "github.com/ipfs/go-cid"
+)
+
+var errMarkSetClosed = errors.New("markset closed")
+
+// MarkSet is a utility to keep track of seen CID, and later query for them.
+//
+// * If the expected dataset is large, it can be backed by a datastore (e.g. bbolt).
+// * If a probabilistic result is acceptable, it can be backed by a bloom filter
+type MarkSet interface {
+ Mark(cid.Cid) error
+ Has(cid.Cid) (bool, error)
+ Close() error
+ SetConcurrent()
+}
+
+type MarkSetEnv interface {
+ Create(name string, sizeHint int64) (MarkSet, error)
+ Close() error
+}
+
+func OpenMarkSetEnv(path string, mtype string) (MarkSetEnv, error) {
+ switch mtype {
+ case "bloom":
+ return NewBloomMarkSetEnv()
+ case "map":
+ return NewMapMarkSetEnv()
+ default:
+ return nil, xerrors.Errorf("unknown mark set type %s", mtype)
+ }
+}
diff --git a/blockstore/splitstore/markset_bloom.go b/blockstore/splitstore/markset_bloom.go
new file mode 100644
index 00000000000..9261de7c753
--- /dev/null
+++ b/blockstore/splitstore/markset_bloom.go
@@ -0,0 +1,107 @@
+package splitstore
+
+import (
+ "crypto/rand"
+ "crypto/sha256"
+ "sync"
+
+ "golang.org/x/xerrors"
+
+ bbloom "github.com/ipfs/bbloom"
+ cid "github.com/ipfs/go-cid"
+)
+
+const (
+ BloomFilterMinSize = 10_000_000
+ BloomFilterProbability = 0.01
+)
+
+type BloomMarkSetEnv struct{}
+
+var _ MarkSetEnv = (*BloomMarkSetEnv)(nil)
+
+type BloomMarkSet struct {
+ salt []byte
+ mx sync.RWMutex
+ bf *bbloom.Bloom
+ ts bool
+}
+
+var _ MarkSet = (*BloomMarkSet)(nil)
+
+func NewBloomMarkSetEnv() (*BloomMarkSetEnv, error) {
+ return &BloomMarkSetEnv{}, nil
+}
+
+func (e *BloomMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error) {
+ size := int64(BloomFilterMinSize)
+ for size < sizeHint {
+ size += BloomFilterMinSize
+ }
+
+ salt := make([]byte, 4)
+ _, err := rand.Read(salt)
+ if err != nil {
+ return nil, xerrors.Errorf("error reading salt: %w", err)
+ }
+
+ bf, err := bbloom.New(float64(size), BloomFilterProbability)
+ if err != nil {
+ return nil, xerrors.Errorf("error creating bloom filter: %w", err)
+ }
+
+ return &BloomMarkSet{salt: salt, bf: bf}, nil
+}
+
+func (e *BloomMarkSetEnv) Close() error {
+ return nil
+}
+
+func (s *BloomMarkSet) saltedKey(cid cid.Cid) []byte {
+ hash := cid.Hash()
+ key := make([]byte, len(s.salt)+len(hash))
+ n := copy(key, s.salt)
+ copy(key[n:], hash)
+ rehash := sha256.Sum256(key)
+ return rehash[:]
+}
+
+func (s *BloomMarkSet) Mark(cid cid.Cid) error {
+ if s.ts {
+ s.mx.Lock()
+ defer s.mx.Unlock()
+ }
+
+ if s.bf == nil {
+ return errMarkSetClosed
+ }
+
+ s.bf.Add(s.saltedKey(cid))
+ return nil
+}
+
+func (s *BloomMarkSet) Has(cid cid.Cid) (bool, error) {
+ if s.ts {
+ s.mx.RLock()
+ defer s.mx.RUnlock()
+ }
+
+ if s.bf == nil {
+ return false, errMarkSetClosed
+ }
+
+ return s.bf.Has(s.saltedKey(cid)), nil
+}
+
+func (s *BloomMarkSet) Close() error {
+ if s.ts {
+ s.mx.Lock()
+ defer s.mx.Unlock()
+ }
+ s.bf = nil
+ return nil
+}
+
+func (s *BloomMarkSet) SetConcurrent() {
+ s.ts = true
+}
diff --git a/blockstore/splitstore/markset_map.go b/blockstore/splitstore/markset_map.go
new file mode 100644
index 00000000000..197c824242a
--- /dev/null
+++ b/blockstore/splitstore/markset_map.go
@@ -0,0 +1,75 @@
+package splitstore
+
+import (
+ "sync"
+
+ cid "github.com/ipfs/go-cid"
+)
+
+type MapMarkSetEnv struct{}
+
+var _ MarkSetEnv = (*MapMarkSetEnv)(nil)
+
+type MapMarkSet struct {
+ mx sync.RWMutex
+ set map[string]struct{}
+
+ ts bool
+}
+
+var _ MarkSet = (*MapMarkSet)(nil)
+
+func NewMapMarkSetEnv() (*MapMarkSetEnv, error) {
+ return &MapMarkSetEnv{}, nil
+}
+
+func (e *MapMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error) {
+ return &MapMarkSet{
+ set: make(map[string]struct{}, sizeHint),
+ }, nil
+}
+
+func (e *MapMarkSetEnv) Close() error {
+ return nil
+}
+
+func (s *MapMarkSet) Mark(cid cid.Cid) error {
+ if s.ts {
+ s.mx.Lock()
+ defer s.mx.Unlock()
+ }
+
+ if s.set == nil {
+ return errMarkSetClosed
+ }
+
+ s.set[string(cid.Hash())] = struct{}{}
+ return nil
+}
+
+func (s *MapMarkSet) Has(cid cid.Cid) (bool, error) {
+ if s.ts {
+ s.mx.RLock()
+ defer s.mx.RUnlock()
+ }
+
+ if s.set == nil {
+ return false, errMarkSetClosed
+ }
+
+ _, ok := s.set[string(cid.Hash())]
+ return ok, nil
+}
+
+func (s *MapMarkSet) Close() error {
+ if s.ts {
+ s.mx.Lock()
+ defer s.mx.Unlock()
+ }
+ s.set = nil
+ return nil
+}
+
+func (s *MapMarkSet) SetConcurrent() {
+ s.ts = true
+}
diff --git a/blockstore/splitstore/markset_test.go b/blockstore/splitstore/markset_test.go
new file mode 100644
index 00000000000..d5c01e22029
--- /dev/null
+++ b/blockstore/splitstore/markset_test.go
@@ -0,0 +1,138 @@
+package splitstore
+
+import (
+ "io/ioutil"
+ "testing"
+
+ cid "github.com/ipfs/go-cid"
+ "github.com/multiformats/go-multihash"
+)
+
+func TestMapMarkSet(t *testing.T) {
+ testMarkSet(t, "map")
+}
+
+func TestBloomMarkSet(t *testing.T) {
+ testMarkSet(t, "bloom")
+}
+
+func testMarkSet(t *testing.T, lsType string) {
+ t.Helper()
+
+ path, err := ioutil.TempDir("", "sweep-test.*")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ env, err := OpenMarkSetEnv(path, lsType)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer env.Close() //nolint:errcheck
+
+ hotSet, err := env.Create("hot", 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ coldSet, err := env.Create("cold", 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ makeCid := func(key string) cid.Cid {
+ h, err := multihash.Sum([]byte(key), multihash.SHA2_256, -1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ return cid.NewCidV1(cid.Raw, h)
+ }
+
+ mustHave := func(s MarkSet, cid cid.Cid) {
+ has, err := s.Has(cid)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !has {
+ t.Fatal("mark not found")
+ }
+ }
+
+ mustNotHave := func(s MarkSet, cid cid.Cid) {
+ has, err := s.Has(cid)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if has {
+ t.Fatal("unexpected mark")
+ }
+ }
+
+ k1 := makeCid("a")
+ k2 := makeCid("b")
+ k3 := makeCid("c")
+ k4 := makeCid("d")
+
+ hotSet.Mark(k1) //nolint
+ hotSet.Mark(k2) //nolint
+ coldSet.Mark(k3) //nolint
+
+ mustHave(hotSet, k1)
+ mustHave(hotSet, k2)
+ mustNotHave(hotSet, k3)
+ mustNotHave(hotSet, k4)
+
+ mustNotHave(coldSet, k1)
+ mustNotHave(coldSet, k2)
+ mustHave(coldSet, k3)
+ mustNotHave(coldSet, k4)
+
+ // close them and reopen to redo the dance
+
+ err = hotSet.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = coldSet.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ hotSet, err = env.Create("hot", 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ coldSet, err = env.Create("cold", 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ hotSet.Mark(k3) //nolint
+ hotSet.Mark(k4) //nolint
+ coldSet.Mark(k1) //nolint
+
+ mustNotHave(hotSet, k1)
+ mustNotHave(hotSet, k2)
+ mustHave(hotSet, k3)
+ mustHave(hotSet, k4)
+
+ mustHave(coldSet, k1)
+ mustNotHave(coldSet, k2)
+ mustNotHave(coldSet, k3)
+ mustNotHave(coldSet, k4)
+
+ err = hotSet.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = coldSet.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/blockstore/splitstore/splitstore.go b/blockstore/splitstore/splitstore.go
new file mode 100644
index 00000000000..821ebb2b6c2
--- /dev/null
+++ b/blockstore/splitstore/splitstore.go
@@ -0,0 +1,568 @@
+package splitstore
+
+import (
+ "context"
+ "errors"
+ "os"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "go.uber.org/multierr"
+ "golang.org/x/xerrors"
+
+ blocks "github.com/ipfs/go-block-format"
+ cid "github.com/ipfs/go-cid"
+ dstore "github.com/ipfs/go-datastore"
+ logging "github.com/ipfs/go-log/v2"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ bstore "github.com/filecoin-project/lotus/blockstore"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/metrics"
+
+ "go.opencensus.io/stats"
+)
+
+var (
+ // baseEpochKey stores the base epoch (last compaction epoch) in the
+ // metadata store.
+ baseEpochKey = dstore.NewKey("/splitstore/baseEpoch")
+
+ // warmupEpochKey stores whether a hot store warmup has been performed.
+ // On first start, the splitstore will walk the state tree and will copy
+ // all active blocks into the hotstore.
+ warmupEpochKey = dstore.NewKey("/splitstore/warmupEpoch")
+
+ // markSetSizeKey stores the current estimate for the mark set size.
+ // this is first computed at warmup and updated in every compaction
+ markSetSizeKey = dstore.NewKey("/splitstore/markSetSize")
+
+ // compactionIndexKey stores the compaction index (serial number)
+ compactionIndexKey = dstore.NewKey("/splitstore/compactionIndex")
+
+ log = logging.Logger("splitstore")
+
+ // set this to true if you are debugging the splitstore to enable debug logging
+ enableDebugLog = false
+ // set this to true if you want to track origin stack traces in the write log
+ enableDebugLogWriteTraces = false
+)
+
+func init() {
+ if os.Getenv("LOTUS_SPLITSTORE_DEBUG_LOG") == "1" {
+ enableDebugLog = true
+ }
+
+ if os.Getenv("LOTUS_SPLITSTORE_DEBUG_LOG_WRITE_TRACES") == "1" {
+ enableDebugLogWriteTraces = true
+ }
+}
+
+type Config struct {
+ // MarkSetType is the type of mark set to use.
+ //
+ // Only current sane value is "map", but we may add an option for a disk-backed
+ // markset for memory-constrained situations.
+ MarkSetType string
+
+ // DiscardColdBlocks indicates whether to skip moving cold blocks to the coldstore.
+ // If the splitstore is running with a noop coldstore then this option is set to true
+ // which skips moving (as it is a noop, but still takes time to read all the cold objects)
+ // and directly purges cold blocks.
+ DiscardColdBlocks bool
+
+ // HotstoreMessageRetention indicates the hotstore retention policy for messages.
+ // It has the following semantics:
+ // - a value of 0 will only retain messages within the compaction boundary (4 finalities)
+ // - a positive integer indicates the number of finalities, outside the compaction boundary,
+ // for which messages will be retained in the hotstore.
+ HotStoreMessageRetention uint64
+}
+
+// ChainAccessor allows the Splitstore to access the chain. It will most likely
+// be a ChainStore at runtime.
+type ChainAccessor interface {
+ GetTipsetByHeight(context.Context, abi.ChainEpoch, *types.TipSet, bool) (*types.TipSet, error)
+ GetHeaviestTipSet() *types.TipSet
+ SubscribeHeadChanges(change func(revert []*types.TipSet, apply []*types.TipSet) error)
+}
+
+// hotstore is the interface that must be satisfied by the hot blockstore; it is an extension
+// of the Blockstore interface with the traits we need for compaction.
+type hotstore interface {
+ bstore.Blockstore
+ bstore.BlockstoreIterator
+}
+
+type SplitStore struct {
+ compacting int32 // compaction/prune/warmup in progress
+ closing int32 // the splitstore is closing
+
+ cfg *Config
+
+ mx sync.Mutex
+ warmupEpoch abi.ChainEpoch // protected by mx
+ baseEpoch abi.ChainEpoch // protected by compaction lock
+
+ headChangeMx sync.Mutex
+
+ coldPurgeSize int
+
+ chain ChainAccessor
+ ds dstore.Datastore
+ cold bstore.Blockstore
+ hot hotstore
+
+ markSetEnv MarkSetEnv
+ markSetSize int64
+
+ compactionIndex int64
+
+ ctx context.Context
+ cancel func()
+
+ debug *debugLog
+
+ // transactional protection for concurrent read/writes during compaction
+ txnLk sync.RWMutex
+ txnViewsMx sync.Mutex
+ txnViewsCond sync.Cond
+ txnViews int
+ txnViewsWaiting bool
+ txnActive bool
+ txnProtect MarkSet
+ txnRefsMx sync.Mutex
+ txnRefs map[cid.Cid]struct{}
+ txnMissing map[cid.Cid]struct{}
+
+ // registered protectors
+ protectors []func(func(cid.Cid) error) error
+}
+
+var _ bstore.Blockstore = (*SplitStore)(nil)
+
+// Open opens an existing splistore, or creates a new splitstore. The splitstore
+// is backed by the provided hot and cold stores. The returned SplitStore MUST be
+// attached to the ChainStore with Start in order to trigger compaction.
+func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Config) (*SplitStore, error) {
+ // hot blockstore must support the hotstore interface
+ hots, ok := hot.(hotstore)
+ if !ok {
+ // be specific about what is missing
+ if _, ok := hot.(bstore.BlockstoreIterator); !ok {
+ return nil, xerrors.Errorf("hot blockstore does not support efficient iteration: %T", hot)
+ }
+
+ return nil, xerrors.Errorf("hot blockstore does not support the necessary traits: %T", hot)
+ }
+
+ // the markset env
+ markSetEnv, err := OpenMarkSetEnv(path, cfg.MarkSetType)
+ if err != nil {
+ return nil, err
+ }
+
+ // and now we can make a SplitStore
+ ss := &SplitStore{
+ cfg: cfg,
+ ds: ds,
+ cold: cold,
+ hot: hots,
+ markSetEnv: markSetEnv,
+
+ coldPurgeSize: defaultColdPurgeSize,
+ }
+
+ ss.txnViewsCond.L = &ss.txnViewsMx
+ ss.ctx, ss.cancel = context.WithCancel(context.Background())
+
+ if enableDebugLog {
+ ss.debug, err = openDebugLog(path)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return ss, nil
+}
+
+// Blockstore interface
+func (s *SplitStore) DeleteBlock(_ cid.Cid) error {
+ // afaict we don't seem to be using this method, so it's not implemented
+ return errors.New("DeleteBlock not implemented on SplitStore; don't do this Luke!") //nolint
+}
+
+func (s *SplitStore) DeleteMany(_ []cid.Cid) error {
+ // afaict we don't seem to be using this method, so it's not implemented
+ return errors.New("DeleteMany not implemented on SplitStore; don't do this Luke!") //nolint
+}
+
+func (s *SplitStore) Has(cid cid.Cid) (bool, error) {
+ if isIdentiyCid(cid) {
+ return true, nil
+ }
+
+ s.txnLk.RLock()
+ defer s.txnLk.RUnlock()
+
+ has, err := s.hot.Has(cid)
+
+ if err != nil {
+ return has, err
+ }
+
+ if has {
+ s.trackTxnRef(cid)
+ return true, nil
+ }
+
+ return s.cold.Has(cid)
+}
+
+func (s *SplitStore) Get(cid cid.Cid) (blocks.Block, error) {
+ if isIdentiyCid(cid) {
+ data, err := decodeIdentityCid(cid)
+ if err != nil {
+ return nil, err
+ }
+
+ return blocks.NewBlockWithCid(data, cid)
+ }
+
+ s.txnLk.RLock()
+ defer s.txnLk.RUnlock()
+
+ blk, err := s.hot.Get(cid)
+
+ switch err {
+ case nil:
+ s.trackTxnRef(cid)
+ return blk, nil
+
+ case bstore.ErrNotFound:
+ if s.isWarm() {
+ s.debug.LogReadMiss(cid)
+ }
+
+ blk, err = s.cold.Get(cid)
+ if err == nil {
+ stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
+
+ }
+ return blk, err
+
+ default:
+ return nil, err
+ }
+}
+
+func (s *SplitStore) GetSize(cid cid.Cid) (int, error) {
+ if isIdentiyCid(cid) {
+ data, err := decodeIdentityCid(cid)
+ if err != nil {
+ return 0, err
+ }
+
+ return len(data), nil
+ }
+
+ s.txnLk.RLock()
+ defer s.txnLk.RUnlock()
+
+ size, err := s.hot.GetSize(cid)
+
+ switch err {
+ case nil:
+ s.trackTxnRef(cid)
+ return size, nil
+
+ case bstore.ErrNotFound:
+ if s.isWarm() {
+ s.debug.LogReadMiss(cid)
+ }
+
+ size, err = s.cold.GetSize(cid)
+ if err == nil {
+ stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
+ }
+ return size, err
+
+ default:
+ return 0, err
+ }
+}
+
+func (s *SplitStore) Put(blk blocks.Block) error {
+ if isIdentiyCid(blk.Cid()) {
+ return nil
+ }
+
+ s.txnLk.RLock()
+ defer s.txnLk.RUnlock()
+
+ err := s.hot.Put(blk)
+ if err != nil {
+ return err
+ }
+
+ s.debug.LogWrite(blk)
+
+ s.trackTxnRef(blk.Cid())
+ return nil
+}
+
+func (s *SplitStore) PutMany(blks []blocks.Block) error {
+ // filter identites
+ idcids := 0
+ for _, blk := range blks {
+ if isIdentiyCid(blk.Cid()) {
+ idcids++
+ }
+ }
+
+ if idcids > 0 {
+ if idcids == len(blks) {
+ // it's all identities
+ return nil
+ }
+
+ filtered := make([]blocks.Block, 0, len(blks)-idcids)
+ for _, blk := range blks {
+ if isIdentiyCid(blk.Cid()) {
+ continue
+ }
+ filtered = append(filtered, blk)
+ }
+
+ blks = filtered
+ }
+
+ batch := make([]cid.Cid, 0, len(blks))
+ for _, blk := range blks {
+ batch = append(batch, blk.Cid())
+ }
+
+ s.txnLk.RLock()
+ defer s.txnLk.RUnlock()
+
+ err := s.hot.PutMany(blks)
+ if err != nil {
+ return err
+ }
+
+ s.debug.LogWriteMany(blks)
+
+ s.trackTxnRefMany(batch)
+ return nil
+}
+
+func (s *SplitStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
+ ctx, cancel := context.WithCancel(ctx)
+
+ chHot, err := s.hot.AllKeysChan(ctx)
+ if err != nil {
+ cancel()
+ return nil, err
+ }
+
+ chCold, err := s.cold.AllKeysChan(ctx)
+ if err != nil {
+ cancel()
+ return nil, err
+ }
+
+ seen := cid.NewSet()
+ ch := make(chan cid.Cid, 8) // buffer is arbitrary, just enough to avoid context switches
+ go func() {
+ defer cancel()
+ defer close(ch)
+
+ for _, in := range []<-chan cid.Cid{chHot, chCold} {
+ for c := range in {
+ // ensure we only emit each key once
+ if !seen.Visit(c) {
+ continue
+ }
+
+ select {
+ case ch <- c:
+ case <-ctx.Done():
+ return
+ }
+ }
+ }
+ }()
+
+ return ch, nil
+}
+
+func (s *SplitStore) HashOnRead(enabled bool) {
+ s.hot.HashOnRead(enabled)
+ s.cold.HashOnRead(enabled)
+}
+
+func (s *SplitStore) View(cid cid.Cid, cb func([]byte) error) error {
+ if isIdentiyCid(cid) {
+ data, err := decodeIdentityCid(cid)
+ if err != nil {
+ return err
+ }
+
+ return cb(data)
+ }
+
+ // views are (optimistically) protected two-fold:
+ // - if there is an active transaction, then the reference is protected.
+ // - if there is no active transaction, active views are tracked in a
+ // wait group and compaction is inhibited from starting until they
+ // have all completed. this is necessary to ensure that a (very) long-running
+ // view can't have its data pointer deleted, which would be catastrophic.
+ // Note that we can't just RLock for the duration of the view, as this could
+ // lead to deadlock with recursive views.
+ s.protectView(cid)
+ defer s.viewDone()
+
+ err := s.hot.View(cid, cb)
+ switch err {
+ case bstore.ErrNotFound:
+ if s.isWarm() {
+ s.debug.LogReadMiss(cid)
+ }
+
+ err = s.cold.View(cid, cb)
+ if err == nil {
+ stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
+ }
+ return err
+
+ default:
+ return err
+ }
+}
+
+func (s *SplitStore) isWarm() bool {
+ s.mx.Lock()
+ defer s.mx.Unlock()
+ return s.warmupEpoch > 0
+}
+
+// State tracking
+func (s *SplitStore) Start(chain ChainAccessor) error {
+ s.chain = chain
+ curTs := chain.GetHeaviestTipSet()
+
+ // should we warmup
+ warmup := false
+
+ // load base epoch from metadata ds
+ // if none, then use current epoch because it's a fresh start
+ bs, err := s.ds.Get(baseEpochKey)
+ switch err {
+ case nil:
+ s.baseEpoch = bytesToEpoch(bs)
+
+ case dstore.ErrNotFound:
+ if curTs == nil {
+ // this can happen in some tests
+ break
+ }
+
+ err = s.setBaseEpoch(curTs.Height())
+ if err != nil {
+ return xerrors.Errorf("error saving base epoch: %w", err)
+ }
+
+ default:
+ return xerrors.Errorf("error loading base epoch: %w", err)
+ }
+
+ // load warmup epoch from metadata ds
+ bs, err = s.ds.Get(warmupEpochKey)
+ switch err {
+ case nil:
+ s.warmupEpoch = bytesToEpoch(bs)
+
+ case dstore.ErrNotFound:
+ warmup = true
+
+ default:
+ return xerrors.Errorf("error loading warmup epoch: %w", err)
+ }
+
+ // load markSetSize from metadata ds to provide a size hint for marksets
+ bs, err = s.ds.Get(markSetSizeKey)
+ switch err {
+ case nil:
+ s.markSetSize = bytesToInt64(bs)
+
+ case dstore.ErrNotFound:
+ default:
+ return xerrors.Errorf("error loading mark set size: %w", err)
+ }
+
+ // load compactionIndex from metadata ds to provide a hint as to when to perform moving gc
+ bs, err = s.ds.Get(compactionIndexKey)
+ switch err {
+ case nil:
+ s.compactionIndex = bytesToInt64(bs)
+
+ case dstore.ErrNotFound:
+ // this is potentially an upgrade from splitstore v0; schedule a warmup as v0 has
+ // some issues with hot references leaking into the coldstore.
+ warmup = true
+ default:
+ return xerrors.Errorf("error loading compaction index: %w", err)
+ }
+
+ log.Infow("starting splitstore", "baseEpoch", s.baseEpoch, "warmupEpoch", s.warmupEpoch)
+
+ if warmup {
+ err = s.warmup(curTs)
+ if err != nil {
+ return xerrors.Errorf("error starting warmup: %w", err)
+ }
+ }
+
+ // watch the chain
+ chain.SubscribeHeadChanges(s.HeadChange)
+
+ return nil
+}
+
+func (s *SplitStore) AddProtector(protector func(func(cid.Cid) error) error) {
+ s.mx.Lock()
+ defer s.mx.Unlock()
+
+ s.protectors = append(s.protectors, protector)
+}
+
+func (s *SplitStore) Close() error {
+ if !atomic.CompareAndSwapInt32(&s.closing, 0, 1) {
+ // already closing
+ return nil
+ }
+
+ if atomic.LoadInt32(&s.compacting) == 1 {
+ log.Warn("close with ongoing compaction in progress; waiting for it to finish...")
+ for atomic.LoadInt32(&s.compacting) == 1 {
+ time.Sleep(time.Second)
+ }
+ }
+
+ s.cancel()
+ return multierr.Combine(s.markSetEnv.Close(), s.debug.Close())
+}
+
+func (s *SplitStore) checkClosing() error {
+ if atomic.LoadInt32(&s.closing) == 1 {
+ return xerrors.Errorf("splitstore is closing")
+ }
+
+ return nil
+}
+
+func (s *SplitStore) setBaseEpoch(epoch abi.ChainEpoch) error {
+ s.baseEpoch = epoch
+ return s.ds.Put(baseEpochKey, epochToBytes(epoch))
+}
diff --git a/blockstore/splitstore/splitstore_compact.go b/blockstore/splitstore/splitstore_compact.go
new file mode 100644
index 00000000000..86f035e6f4a
--- /dev/null
+++ b/blockstore/splitstore/splitstore_compact.go
@@ -0,0 +1,1144 @@
+package splitstore
+
+import (
+ "bytes"
+ "errors"
+ "runtime"
+ "sort"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/sync/errgroup"
+ "golang.org/x/xerrors"
+
+ blocks "github.com/ipfs/go-block-format"
+ cid "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ bstore "github.com/filecoin-project/lotus/blockstore"
+ "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/metrics"
+
+ "go.opencensus.io/stats"
+)
+
+var (
+ // CompactionThreshold is the number of epochs that need to have elapsed
+ // from the previously compacted epoch to trigger a new compaction.
+ //
+ // |················· CompactionThreshold ··················|
+ // | |
+ // =======‖≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡‖------------------------»
+ // | | chain --> ↑__ current epoch
+ // | archived epochs ___↑
+ // ↑________ CompactionBoundary
+ //
+ // === :: cold (already archived)
+ // ≡≡≡ :: to be archived in this compaction
+ // --- :: hot
+ CompactionThreshold = 5 * build.Finality
+
+ // CompactionBoundary is the number of epochs from the current epoch at which
+ // we will walk the chain for live objects.
+ CompactionBoundary = 4 * build.Finality
+
+ // SyncGapTime is the time delay from a tipset's min timestamp before we decide
+ // there is a sync gap
+ SyncGapTime = time.Minute
+)
+
+var (
+ // used to signal end of walk
+ errStopWalk = errors.New("stop walk")
+)
+
+const (
+ batchSize = 16384
+
+ defaultColdPurgeSize = 7_000_000
+)
+
+func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error {
+ s.headChangeMx.Lock()
+ defer s.headChangeMx.Unlock()
+
+ // Revert only.
+ if len(apply) == 0 {
+ return nil
+ }
+
+ curTs := apply[len(apply)-1]
+ epoch := curTs.Height()
+
+ // NOTE: there is an implicit invariant assumption that HeadChange is invoked
+ // synchronously and no other HeadChange can be invoked while one is in
+ // progress.
+ // this is guaranteed by the chainstore, and it is pervasive in all lotus
+ // -- if that ever changes then all hell will break loose in general and
+ // we will have a rance to protectTipSets here.
+ // Reagrdless, we put a mutex in HeadChange just to be safe
+
+ if !atomic.CompareAndSwapInt32(&s.compacting, 0, 1) {
+ // we are currently compacting -- protect the new tipset(s)
+ s.protectTipSets(apply)
+ return nil
+ }
+
+ // check if we are actually closing first
+ if atomic.LoadInt32(&s.closing) == 1 {
+ atomic.StoreInt32(&s.compacting, 0)
+ return nil
+ }
+
+ timestamp := time.Unix(int64(curTs.MinTimestamp()), 0)
+ if time.Since(timestamp) > SyncGapTime {
+ // don't attempt compaction before we have caught up syncing
+ atomic.StoreInt32(&s.compacting, 0)
+ return nil
+ }
+
+ if epoch-s.baseEpoch > CompactionThreshold {
+ // it's time to compact -- prepare the transaction and go!
+ s.beginTxnProtect()
+ go func() {
+ defer atomic.StoreInt32(&s.compacting, 0)
+ defer s.endTxnProtect()
+
+ log.Info("compacting splitstore")
+ start := time.Now()
+
+ s.compact(curTs)
+
+ log.Infow("compaction done", "took", time.Since(start))
+ }()
+ } else {
+ // no compaction necessary
+ atomic.StoreInt32(&s.compacting, 0)
+ }
+
+ return nil
+}
+
+// transactionally protect incoming tipsets
+func (s *SplitStore) protectTipSets(apply []*types.TipSet) {
+ s.txnLk.RLock()
+ defer s.txnLk.RUnlock()
+
+ if !s.txnActive {
+ return
+ }
+
+ var cids []cid.Cid
+ for _, ts := range apply {
+ cids = append(cids, ts.Cids()...)
+ }
+
+ s.trackTxnRefMany(cids)
+}
+
+// transactionally protect a view
+func (s *SplitStore) protectView(c cid.Cid) {
+ s.txnLk.RLock()
+ defer s.txnLk.RUnlock()
+
+ if s.txnActive {
+ s.trackTxnRef(c)
+ }
+
+ s.txnViewsMx.Lock()
+ s.txnViews++
+ s.txnViewsMx.Unlock()
+}
+
+func (s *SplitStore) viewDone() {
+ s.txnViewsMx.Lock()
+ defer s.txnViewsMx.Unlock()
+
+ s.txnViews--
+ if s.txnViews == 0 && s.txnViewsWaiting {
+ s.txnViewsCond.Broadcast()
+ }
+}
+
+func (s *SplitStore) viewWait() {
+ s.txnViewsMx.Lock()
+ defer s.txnViewsMx.Unlock()
+
+ s.txnViewsWaiting = true
+ for s.txnViews > 0 {
+ s.txnViewsCond.Wait()
+ }
+ s.txnViewsWaiting = false
+}
+
+// transactionally protect a reference to an object
+func (s *SplitStore) trackTxnRef(c cid.Cid) {
+ if !s.txnActive {
+ // not compacting
+ return
+ }
+
+ if isUnitaryObject(c) {
+ return
+ }
+
+ if s.txnProtect != nil {
+ mark, err := s.txnProtect.Has(c)
+ if err != nil {
+ log.Warnf("error checking markset: %s", err)
+ // track it anyways
+ } else if mark {
+ return
+ }
+ }
+
+ s.txnRefsMx.Lock()
+ s.txnRefs[c] = struct{}{}
+ s.txnRefsMx.Unlock()
+}
+
+// transactionally protect a batch of references
+func (s *SplitStore) trackTxnRefMany(cids []cid.Cid) {
+ if !s.txnActive {
+ // not compacting
+ return
+ }
+
+ s.txnRefsMx.Lock()
+ defer s.txnRefsMx.Unlock()
+
+ quiet := false
+ for _, c := range cids {
+ if isUnitaryObject(c) {
+ continue
+ }
+
+ if s.txnProtect != nil {
+ mark, err := s.txnProtect.Has(c)
+ if err != nil {
+ if !quiet {
+ quiet = true
+ log.Warnf("error checking markset: %s", err)
+ }
+ // track it anyways
+ }
+
+ if mark {
+ continue
+ }
+ }
+
+ s.txnRefs[c] = struct{}{}
+ }
+
+ return
+}
+
+// protect all pending transactional references
+func (s *SplitStore) protectTxnRefs(markSet MarkSet) error {
+ for {
+ var txnRefs map[cid.Cid]struct{}
+
+ s.txnRefsMx.Lock()
+ if len(s.txnRefs) > 0 {
+ txnRefs = s.txnRefs
+ s.txnRefs = make(map[cid.Cid]struct{})
+ }
+ s.txnRefsMx.Unlock()
+
+ if len(txnRefs) == 0 {
+ return nil
+ }
+
+ log.Infow("protecting transactional references", "refs", len(txnRefs))
+ count := 0
+ workch := make(chan cid.Cid, len(txnRefs))
+ startProtect := time.Now()
+
+ for c := range txnRefs {
+ mark, err := markSet.Has(c)
+ if err != nil {
+ return xerrors.Errorf("error checking markset: %w", err)
+ }
+
+ if mark {
+ continue
+ }
+
+ workch <- c
+ count++
+ }
+ close(workch)
+
+ if count == 0 {
+ return nil
+ }
+
+ workers := runtime.NumCPU() / 2
+ if workers < 2 {
+ workers = 2
+ }
+ if workers > count {
+ workers = count
+ }
+
+ worker := func() error {
+ for c := range workch {
+ err := s.doTxnProtect(c, markSet)
+ if err != nil {
+ return xerrors.Errorf("error protecting transactional references to %s: %w", c, err)
+ }
+ }
+ return nil
+ }
+
+ g := new(errgroup.Group)
+ for i := 0; i < workers; i++ {
+ g.Go(worker)
+ }
+
+ if err := g.Wait(); err != nil {
+ return err
+ }
+
+ log.Infow("protecting transactional refs done", "took", time.Since(startProtect), "protected", count)
+ }
+}
+
+// transactionally protect a reference by walking the object and marking.
+// concurrent markings are short circuited by checking the markset.
+func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSet) error {
+ if err := s.checkClosing(); err != nil {
+ return err
+ }
+
+ // Note: cold objects are deleted heaviest first, so the consituents of an object
+ // cannot be deleted before the object itself.
+ return s.walkObjectIncomplete(root, cid.NewSet(),
+ func(c cid.Cid) error {
+ if isUnitaryObject(c) {
+ return errStopWalk
+ }
+
+ mark, err := markSet.Has(c)
+ if err != nil {
+ return xerrors.Errorf("error checking markset: %w", err)
+ }
+
+ // it's marked, nothing to do
+ if mark {
+ return errStopWalk
+ }
+
+ return markSet.Mark(c)
+ },
+ func(c cid.Cid) error {
+ if s.txnMissing != nil {
+ log.Warnf("missing object reference %s in %s", c, root)
+ s.txnRefsMx.Lock()
+ s.txnMissing[c] = struct{}{}
+ s.txnRefsMx.Unlock()
+ }
+ return errStopWalk
+ })
+}
+
+func (s *SplitStore) applyProtectors() error {
+ s.mx.Lock()
+ defer s.mx.Unlock()
+
+ count := 0
+ for _, protect := range s.protectors {
+ err := protect(func(c cid.Cid) error {
+ s.trackTxnRef(c)
+ count++
+ return nil
+ })
+
+ if err != nil {
+ return xerrors.Errorf("error applynig protector: %w", err)
+ }
+ }
+
+ if count > 0 {
+ log.Infof("protected %d references through %d protectors", count, len(s.protectors))
+ }
+
+ return nil
+}
+
+// --- Compaction ---
+// Compaction works transactionally with the following algorithm:
+// - We prepare a transaction, whereby all i/o referenced objects through the API are tracked.
+// - We walk the chain and mark reachable objects, keeping 4 finalities of state roots and messages and all headers all the way to genesis.
+// - Once the chain walk is complete, we begin full transaction protection with concurrent marking; we walk and mark all references created during the chain walk. On the same time, all I/O through the API concurrently marks objects as live references.
+// - We collect cold objects by iterating through the hotstore and checking the mark set; if an object is not marked, then it is candidate for purge.
+// - When running with a coldstore, we next copy all cold objects to the coldstore.
+// - At this point we are ready to begin purging:
+// - We sort cold objects heaviest first, so as to never delete the consituents of a DAG before the DAG itself (which would leave dangling references)
+// - We delete in small batches taking a lock; each batch is checked again for marks, from the concurrent transactional mark, so as to never delete anything live
+// - We then end the transaction and compact/gc the hotstore.
+func (s *SplitStore) compact(curTs *types.TipSet) {
+ log.Info("waiting for active views to complete")
+ start := time.Now()
+ s.viewWait()
+ log.Infow("waiting for active views done", "took", time.Since(start))
+
+ start = time.Now()
+ err := s.doCompact(curTs)
+ took := time.Since(start).Milliseconds()
+ stats.Record(s.ctx, metrics.SplitstoreCompactionTimeSeconds.M(float64(took)/1e3))
+
+ if err != nil {
+ log.Errorf("COMPACTION ERROR: %s", err)
+ }
+}
+
+func (s *SplitStore) doCompact(curTs *types.TipSet) error {
+ currentEpoch := curTs.Height()
+ boundaryEpoch := currentEpoch - CompactionBoundary
+
+ var inclMsgsEpoch abi.ChainEpoch
+ inclMsgsRange := abi.ChainEpoch(s.cfg.HotStoreMessageRetention) * build.Finality
+ if inclMsgsRange < boundaryEpoch {
+ inclMsgsEpoch = boundaryEpoch - inclMsgsRange
+ }
+
+ log.Infow("running compaction", "currentEpoch", currentEpoch, "baseEpoch", s.baseEpoch, "boundaryEpoch", boundaryEpoch, "inclMsgsEpoch", inclMsgsEpoch, "compactionIndex", s.compactionIndex)
+
+ markSet, err := s.markSetEnv.Create("live", s.markSetSize)
+ if err != nil {
+ return xerrors.Errorf("error creating mark set: %w", err)
+ }
+ defer markSet.Close() //nolint:errcheck
+ defer s.debug.Flush()
+
+ if err := s.checkClosing(); err != nil {
+ return err
+ }
+
+ // we are ready for concurrent marking
+ s.beginTxnMarking(markSet)
+
+ // 0. track all protected references at beginning of compaction; anything added later should
+ // be transactionally protected by the write
+ log.Info("protecting references with registered protectors")
+ err = s.applyProtectors()
+ if err != nil {
+ return err
+ }
+
+ // 1. mark reachable objects by walking the chain from the current epoch; we keep state roots
+ // and messages until the boundary epoch.
+ log.Info("marking reachable objects")
+ startMark := time.Now()
+
+ var count int64
+ err = s.walkChain(curTs, boundaryEpoch, inclMsgsEpoch,
+ func(c cid.Cid) error {
+ if isUnitaryObject(c) {
+ return errStopWalk
+ }
+
+ count++
+ return markSet.Mark(c)
+ })
+
+ if err != nil {
+ return xerrors.Errorf("error marking: %w", err)
+ }
+
+ s.markSetSize = count + count>>2 // overestimate a bit
+
+ log.Infow("marking done", "took", time.Since(startMark), "marked", count)
+
+ if err := s.checkClosing(); err != nil {
+ return err
+ }
+
+ // 1.1 protect transactional refs
+ err = s.protectTxnRefs(markSet)
+ if err != nil {
+ return xerrors.Errorf("error protecting transactional refs: %w", err)
+ }
+
+ if err := s.checkClosing(); err != nil {
+ return err
+ }
+
+ // 2. iterate through the hotstore to collect cold objects
+ log.Info("collecting cold objects")
+ startCollect := time.Now()
+
+ // some stats for logging
+ var hotCnt, coldCnt int
+
+ cold := make([]cid.Cid, 0, s.coldPurgeSize)
+ err = s.hot.ForEachKey(func(c cid.Cid) error {
+ // was it marked?
+ mark, err := markSet.Has(c)
+ if err != nil {
+ return xerrors.Errorf("error checking mark set for %s: %w", c, err)
+ }
+
+ if mark {
+ hotCnt++
+ return nil
+ }
+
+ // it's cold, mark it as candidate for move
+ cold = append(cold, c)
+ coldCnt++
+
+ return nil
+ })
+
+ if err != nil {
+ return xerrors.Errorf("error collecting cold objects: %w", err)
+ }
+
+ log.Infow("cold collection done", "took", time.Since(startCollect))
+
+ if coldCnt > 0 {
+ s.coldPurgeSize = coldCnt + coldCnt>>2 // overestimate a bit
+ }
+
+ log.Infow("compaction stats", "hot", hotCnt, "cold", coldCnt)
+ stats.Record(s.ctx, metrics.SplitstoreCompactionHot.M(int64(hotCnt)))
+ stats.Record(s.ctx, metrics.SplitstoreCompactionCold.M(int64(coldCnt)))
+
+ if err := s.checkClosing(); err != nil {
+ return err
+ }
+
+ // now that we have collected cold objects, check for missing references from transactional i/o
+ // and disable further collection of such references (they will not be acted upon as we can't
+ // possibly delete objects we didn't have when we were collecting cold objects)
+ s.waitForMissingRefs(markSet)
+
+ if err := s.checkClosing(); err != nil {
+ return err
+ }
+
+ // 3. copy the cold objects to the coldstore -- if we have one
+ if !s.cfg.DiscardColdBlocks {
+ log.Info("moving cold objects to the coldstore")
+ startMove := time.Now()
+ err = s.moveColdBlocks(cold)
+ if err != nil {
+ return xerrors.Errorf("error moving cold objects: %w", err)
+ }
+ log.Infow("moving done", "took", time.Since(startMove))
+
+ if err := s.checkClosing(); err != nil {
+ return err
+ }
+ }
+
+ // 4. sort cold objects so that the dags with most references are deleted first
+ // this ensures that we can't refer to a dag with its consituents already deleted, ie
+ // we lave no dangling references.
+ log.Info("sorting cold objects")
+ startSort := time.Now()
+ err = s.sortObjects(cold)
+ if err != nil {
+ return xerrors.Errorf("error sorting objects: %w", err)
+ }
+ log.Infow("sorting done", "took", time.Since(startSort))
+
+ // 4.1 protect transactional refs once more
+ // strictly speaking, this is not necessary as purge will do it before deleting each
+ // batch. however, there is likely a largish number of references accumulated during
+ // ths sort and this protects before entering pruge context.
+ err = s.protectTxnRefs(markSet)
+ if err != nil {
+ return xerrors.Errorf("error protecting transactional refs: %w", err)
+ }
+
+ if err := s.checkClosing(); err != nil {
+ return err
+ }
+
+ // 5. purge cold objects from the hotstore, taking protected references into account
+ log.Info("purging cold objects from the hotstore")
+ startPurge := time.Now()
+ err = s.purge(cold, markSet)
+ if err != nil {
+ return xerrors.Errorf("error purging cold blocks: %w", err)
+ }
+ log.Infow("purging cold objects from hotstore done", "took", time.Since(startPurge))
+
+ // we are done; do some housekeeping
+ s.endTxnProtect()
+ s.gcHotstore()
+
+ err = s.setBaseEpoch(boundaryEpoch)
+ if err != nil {
+ return xerrors.Errorf("error saving base epoch: %w", err)
+ }
+
+ err = s.ds.Put(markSetSizeKey, int64ToBytes(s.markSetSize))
+ if err != nil {
+ return xerrors.Errorf("error saving mark set size: %w", err)
+ }
+
+ s.compactionIndex++
+ err = s.ds.Put(compactionIndexKey, int64ToBytes(s.compactionIndex))
+ if err != nil {
+ return xerrors.Errorf("error saving compaction index: %w", err)
+ }
+
+ return nil
+}
+
+func (s *SplitStore) beginTxnProtect() {
+ log.Info("preparing compaction transaction")
+
+ s.txnLk.Lock()
+ defer s.txnLk.Unlock()
+
+ s.txnActive = true
+ s.txnRefs = make(map[cid.Cid]struct{})
+ s.txnMissing = make(map[cid.Cid]struct{})
+}
+
+func (s *SplitStore) beginTxnMarking(markSet MarkSet) {
+ markSet.SetConcurrent()
+
+ s.txnLk.Lock()
+ s.txnProtect = markSet
+ s.txnLk.Unlock()
+}
+
+func (s *SplitStore) endTxnProtect() {
+ s.txnLk.Lock()
+ defer s.txnLk.Unlock()
+
+ if !s.txnActive {
+ return
+ }
+
+ // release markset memory
+ if s.txnProtect != nil {
+ _ = s.txnProtect.Close()
+ }
+
+ s.txnActive = false
+ s.txnProtect = nil
+ s.txnRefs = nil
+ s.txnMissing = nil
+}
+
+func (s *SplitStore) walkChain(ts *types.TipSet, inclState abi.ChainEpoch, inclMsgs abi.ChainEpoch,
+ f func(cid.Cid) error) error {
+ visited := cid.NewSet()
+ walked := cid.NewSet()
+ toWalk := ts.Cids()
+ walkCnt := 0
+ scanCnt := 0
+
+ walkBlock := func(c cid.Cid) error {
+ if !visited.Visit(c) {
+ return nil
+ }
+
+ walkCnt++
+
+ if err := f(c); err != nil {
+ return err
+ }
+
+ var hdr types.BlockHeader
+ err := s.view(c, func(data []byte) error {
+ return hdr.UnmarshalCBOR(bytes.NewBuffer(data))
+ })
+
+ if err != nil {
+ return xerrors.Errorf("error unmarshaling block header (cid: %s): %w", c, err)
+ }
+
+ // message are retained if within the inclMsgs boundary
+ if hdr.Height >= inclMsgs && hdr.Height > 0 {
+ if inclMsgs < inclState {
+ // we need to use walkObjectIncomplete here, as messages may be missing early on if we
+ // synced from snapshot and have a long HotStoreMessageRetentionPolicy.
+ stopWalk := func(_ cid.Cid) error { return errStopWalk }
+ if err := s.walkObjectIncomplete(hdr.Messages, walked, f, stopWalk); err != nil {
+ return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err)
+ }
+ } else {
+ if err := s.walkObject(hdr.Messages, walked, f); err != nil {
+ return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err)
+ }
+ }
+ }
+
+ // state and message receipts is only retained if within the inclState boundary
+ if hdr.Height >= inclState || hdr.Height == 0 {
+ if hdr.Height > 0 {
+ if err := s.walkObject(hdr.ParentMessageReceipts, walked, f); err != nil {
+ return xerrors.Errorf("error walking message receipts (cid: %s): %w", hdr.ParentMessageReceipts, err)
+ }
+ }
+
+ if err := s.walkObject(hdr.ParentStateRoot, walked, f); err != nil {
+ return xerrors.Errorf("error walking state root (cid: %s): %w", hdr.ParentStateRoot, err)
+ }
+ scanCnt++
+ }
+
+ if hdr.Height > 0 {
+ toWalk = append(toWalk, hdr.Parents...)
+ }
+
+ return nil
+ }
+
+ for len(toWalk) > 0 {
+ // walking can take a while, so check this with every opportunity
+ if err := s.checkClosing(); err != nil {
+ return err
+ }
+
+ walking := toWalk
+ toWalk = nil
+ for _, c := range walking {
+ if err := walkBlock(c); err != nil {
+ return xerrors.Errorf("error walking block (cid: %s): %w", c, err)
+ }
+ }
+ }
+
+ log.Infow("chain walk done", "walked", walkCnt, "scanned", scanCnt)
+
+ return nil
+}
+
+func (s *SplitStore) walkObject(c cid.Cid, walked *cid.Set, f func(cid.Cid) error) error {
+ if !walked.Visit(c) {
+ return nil
+ }
+
+ if err := f(c); err != nil {
+ if err == errStopWalk {
+ return nil
+ }
+
+ return err
+ }
+
+ if c.Prefix().Codec != cid.DagCBOR {
+ return nil
+ }
+
+ // check this before recursing
+ if err := s.checkClosing(); err != nil {
+ return err
+ }
+
+ var links []cid.Cid
+ err := s.view(c, func(data []byte) error {
+ return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) {
+ links = append(links, c)
+ })
+ })
+
+ if err != nil {
+ return xerrors.Errorf("error scanning linked block (cid: %s): %w", c, err)
+ }
+
+ for _, c := range links {
+ err := s.walkObject(c, walked, f)
+ if err != nil {
+ return xerrors.Errorf("error walking link (cid: %s): %w", c, err)
+ }
+ }
+
+ return nil
+}
+
+// like walkObject, but the object may be potentially incomplete (references missing)
+func (s *SplitStore) walkObjectIncomplete(c cid.Cid, walked *cid.Set, f, missing func(cid.Cid) error) error {
+ if !walked.Visit(c) {
+ return nil
+ }
+
+ // occurs check -- only for DAGs
+ if c.Prefix().Codec == cid.DagCBOR {
+ has, err := s.has(c)
+ if err != nil {
+ return xerrors.Errorf("error occur checking %s: %w", c, err)
+ }
+
+ if !has {
+ err = missing(c)
+ if err == errStopWalk {
+ return nil
+ }
+
+ return err
+ }
+ }
+
+ if err := f(c); err != nil {
+ if err == errStopWalk {
+ return nil
+ }
+
+ return err
+ }
+
+ if c.Prefix().Codec != cid.DagCBOR {
+ return nil
+ }
+
+ // check this before recursing
+ if err := s.checkClosing(); err != nil {
+ return err
+ }
+
+ var links []cid.Cid
+ err := s.view(c, func(data []byte) error {
+ return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) {
+ links = append(links, c)
+ })
+ })
+
+ if err != nil {
+ return xerrors.Errorf("error scanning linked block (cid: %s): %w", c, err)
+ }
+
+ for _, c := range links {
+ err := s.walkObjectIncomplete(c, walked, f, missing)
+ if err != nil {
+ return xerrors.Errorf("error walking link (cid: %s): %w", c, err)
+ }
+ }
+
+ return nil
+}
+
+// internal version used by walk
+func (s *SplitStore) view(c cid.Cid, cb func([]byte) error) error {
+ if isIdentiyCid(c) {
+ data, err := decodeIdentityCid(c)
+ if err != nil {
+ return err
+ }
+
+ return cb(data)
+ }
+
+ err := s.hot.View(c, cb)
+ switch err {
+ case bstore.ErrNotFound:
+ return s.cold.View(c, cb)
+
+ default:
+ return err
+ }
+}
+
+func (s *SplitStore) has(c cid.Cid) (bool, error) {
+ if isIdentiyCid(c) {
+ return true, nil
+ }
+
+ has, err := s.hot.Has(c)
+
+ if has || err != nil {
+ return has, err
+ }
+
+ return s.cold.Has(c)
+}
+
+func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error {
+ batch := make([]blocks.Block, 0, batchSize)
+
+ for _, c := range cold {
+ if err := s.checkClosing(); err != nil {
+ return err
+ }
+
+ blk, err := s.hot.Get(c)
+ if err != nil {
+ if err == bstore.ErrNotFound {
+ log.Warnf("hotstore missing block %s", c)
+ continue
+ }
+
+ return xerrors.Errorf("error retrieving block %s from hotstore: %w", c, err)
+ }
+
+ batch = append(batch, blk)
+ if len(batch) == batchSize {
+ err = s.cold.PutMany(batch)
+ if err != nil {
+ return xerrors.Errorf("error putting batch to coldstore: %w", err)
+ }
+ batch = batch[:0]
+ }
+ }
+
+ if len(batch) > 0 {
+ err := s.cold.PutMany(batch)
+ if err != nil {
+ return xerrors.Errorf("error putting batch to coldstore: %w", err)
+ }
+ }
+
+ return nil
+}
+
+// sorts a slice of objects heaviest first -- it's a little expensive but worth the
+// guarantee that we don't leave dangling references behind, e.g. if we die in the middle
+// of a purge.
+func (s *SplitStore) sortObjects(cids []cid.Cid) error {
+ // we cache the keys to avoid making a gazillion of strings
+ keys := make(map[cid.Cid]string)
+ key := func(c cid.Cid) string {
+ s, ok := keys[c]
+ if !ok {
+ s = string(c.Hash())
+ keys[c] = s
+ }
+ return s
+ }
+
+ // compute sorting weights as the cumulative number of DAG links
+ weights := make(map[string]int)
+ for _, c := range cids {
+ // this can take quite a while, so check for shutdown with every opportunity
+ if err := s.checkClosing(); err != nil {
+ return err
+ }
+
+ w := s.getObjectWeight(c, weights, key)
+ weights[key(c)] = w
+ }
+
+ // sort!
+ sort.Slice(cids, func(i, j int) bool {
+ wi := weights[key(cids[i])]
+ wj := weights[key(cids[j])]
+ if wi == wj {
+ return bytes.Compare(cids[i].Hash(), cids[j].Hash()) > 0
+ }
+
+ return wi > wj
+ })
+
+ return nil
+}
+
+func (s *SplitStore) getObjectWeight(c cid.Cid, weights map[string]int, key func(cid.Cid) string) int {
+ w, ok := weights[key(c)]
+ if ok {
+ return w
+ }
+
+ // we treat block headers specially to avoid walking the entire chain
+ var hdr types.BlockHeader
+ err := s.view(c, func(data []byte) error {
+ return hdr.UnmarshalCBOR(bytes.NewBuffer(data))
+ })
+ if err == nil {
+ w1 := s.getObjectWeight(hdr.ParentStateRoot, weights, key)
+ weights[key(hdr.ParentStateRoot)] = w1
+
+ w2 := s.getObjectWeight(hdr.Messages, weights, key)
+ weights[key(hdr.Messages)] = w2
+
+ return 1 + w1 + w2
+ }
+
+ var links []cid.Cid
+ err = s.view(c, func(data []byte) error {
+ return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) {
+ links = append(links, c)
+ })
+ })
+ if err != nil {
+ return 1
+ }
+
+ w = 1
+ for _, c := range links {
+ // these are internal refs, so dags will be dags
+ if c.Prefix().Codec != cid.DagCBOR {
+ w++
+ continue
+ }
+
+ wc := s.getObjectWeight(c, weights, key)
+ weights[key(c)] = wc
+
+ w += wc
+ }
+
+ return w
+}
+
+func (s *SplitStore) purgeBatch(cids []cid.Cid, deleteBatch func([]cid.Cid) error) error {
+ if len(cids) == 0 {
+ return nil
+ }
+
+ // we don't delete one giant batch of millions of objects, but rather do smaller batches
+ // so that we don't stop the world for an extended period of time
+ done := false
+ for i := 0; !done; i++ {
+ start := i * batchSize
+ end := start + batchSize
+ if end >= len(cids) {
+ end = len(cids)
+ done = true
+ }
+
+ err := deleteBatch(cids[start:end])
+ if err != nil {
+ return xerrors.Errorf("error deleting batch: %w", err)
+ }
+ }
+
+ return nil
+}
+
+func (s *SplitStore) purge(cids []cid.Cid, markSet MarkSet) error {
+ deadCids := make([]cid.Cid, 0, batchSize)
+ var purgeCnt, liveCnt int
+ defer func() {
+ log.Infow("purged cold objects", "purged", purgeCnt, "live", liveCnt)
+ }()
+
+ return s.purgeBatch(cids,
+ func(cids []cid.Cid) error {
+ deadCids := deadCids[:0]
+
+ for {
+ if err := s.checkClosing(); err != nil {
+ return err
+ }
+
+ s.txnLk.Lock()
+ if len(s.txnRefs) == 0 {
+ // keep the lock!
+ break
+ }
+
+ // unlock and protect
+ s.txnLk.Unlock()
+
+ err := s.protectTxnRefs(markSet)
+ if err != nil {
+ return xerrors.Errorf("error protecting transactional refs: %w", err)
+ }
+ }
+
+ defer s.txnLk.Unlock()
+
+ for _, c := range cids {
+ live, err := markSet.Has(c)
+ if err != nil {
+ return xerrors.Errorf("error checking for liveness: %w", err)
+ }
+
+ if live {
+ liveCnt++
+ continue
+ }
+
+ deadCids = append(deadCids, c)
+ }
+
+ err := s.hot.DeleteMany(deadCids)
+ if err != nil {
+ return xerrors.Errorf("error purging cold objects: %w", err)
+ }
+
+ s.debug.LogDelete(deadCids)
+
+ purgeCnt += len(deadCids)
+ return nil
+ })
+}
+
+// I really don't like having this code, but we seem to have some occasional DAG references with
+// missing constituents. During testing in mainnet *some* of these references *sometimes* appeared
+// after a little bit.
+// We need to figure out where they are coming from and eliminate that vector, but until then we
+// have this gem[TM].
+// My best guess is that they are parent message receipts or yet to be computed state roots; magik
+// thinks the cause may be block validation.
+func (s *SplitStore) waitForMissingRefs(markSet MarkSet) {
+ s.txnLk.Lock()
+ missing := s.txnMissing
+ s.txnMissing = nil
+ s.txnLk.Unlock()
+
+ if len(missing) == 0 {
+ return
+ }
+
+ log.Info("waiting for missing references")
+ start := time.Now()
+ count := 0
+ defer func() {
+ log.Infow("waiting for missing references done", "took", time.Since(start), "marked", count)
+ }()
+
+ for i := 0; i < 3 && len(missing) > 0; i++ {
+ if err := s.checkClosing(); err != nil {
+ return
+ }
+
+ wait := time.Duration(i) * time.Minute
+ log.Infof("retrying for %d missing references in %s (attempt: %d)", len(missing), wait, i+1)
+ if wait > 0 {
+ time.Sleep(wait)
+ }
+
+ towalk := missing
+ walked := cid.NewSet()
+ missing = make(map[cid.Cid]struct{})
+
+ for c := range towalk {
+ err := s.walkObjectIncomplete(c, walked,
+ func(c cid.Cid) error {
+ if isUnitaryObject(c) {
+ return errStopWalk
+ }
+
+ mark, err := markSet.Has(c)
+ if err != nil {
+ return xerrors.Errorf("error checking markset for %s: %w", c, err)
+ }
+
+ if mark {
+ return errStopWalk
+ }
+
+ count++
+ return markSet.Mark(c)
+ },
+ func(c cid.Cid) error {
+ missing[c] = struct{}{}
+ return errStopWalk
+ })
+
+ if err != nil {
+ log.Warnf("error marking: %s", err)
+ }
+ }
+ }
+
+ if len(missing) > 0 {
+ log.Warnf("still missing %d references", len(missing))
+ for c := range missing {
+ log.Warnf("unresolved missing reference: %s", c)
+ }
+ }
+}
diff --git a/blockstore/splitstore/splitstore_expose.go b/blockstore/splitstore/splitstore_expose.go
new file mode 100644
index 00000000000..1065e460c2d
--- /dev/null
+++ b/blockstore/splitstore/splitstore_expose.go
@@ -0,0 +1,114 @@
+package splitstore
+
+import (
+ "context"
+ "errors"
+
+ blocks "github.com/ipfs/go-block-format"
+ cid "github.com/ipfs/go-cid"
+
+ bstore "github.com/filecoin-project/lotus/blockstore"
+)
+
+type exposedSplitStore struct {
+ s *SplitStore
+}
+
+var _ bstore.Blockstore = (*exposedSplitStore)(nil)
+
+func (s *SplitStore) Expose() bstore.Blockstore {
+ return &exposedSplitStore{s: s}
+}
+
+func (es *exposedSplitStore) DeleteBlock(_ cid.Cid) error {
+ return errors.New("DeleteBlock: operation not supported")
+}
+
+func (es *exposedSplitStore) DeleteMany(_ []cid.Cid) error {
+ return errors.New("DeleteMany: operation not supported")
+}
+
+func (es *exposedSplitStore) Has(c cid.Cid) (bool, error) {
+ if isIdentiyCid(c) {
+ return true, nil
+ }
+
+ has, err := es.s.hot.Has(c)
+ if has || err != nil {
+ return has, err
+ }
+
+ return es.s.cold.Has(c)
+}
+
+func (es *exposedSplitStore) Get(c cid.Cid) (blocks.Block, error) {
+ if isIdentiyCid(c) {
+ data, err := decodeIdentityCid(c)
+ if err != nil {
+ return nil, err
+ }
+
+ return blocks.NewBlockWithCid(data, c)
+ }
+
+ blk, err := es.s.hot.Get(c)
+ switch err {
+ case bstore.ErrNotFound:
+ return es.s.cold.Get(c)
+ default:
+ return blk, err
+ }
+}
+
+func (es *exposedSplitStore) GetSize(c cid.Cid) (int, error) {
+ if isIdentiyCid(c) {
+ data, err := decodeIdentityCid(c)
+ if err != nil {
+ return 0, err
+ }
+
+ return len(data), nil
+ }
+
+ size, err := es.s.hot.GetSize(c)
+ switch err {
+ case bstore.ErrNotFound:
+ return es.s.cold.GetSize(c)
+ default:
+ return size, err
+ }
+}
+
+func (es *exposedSplitStore) Put(blk blocks.Block) error {
+ return es.s.Put(blk)
+}
+
+func (es *exposedSplitStore) PutMany(blks []blocks.Block) error {
+ return es.s.PutMany(blks)
+}
+
+func (es *exposedSplitStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
+ return es.s.AllKeysChan(ctx)
+}
+
+func (es *exposedSplitStore) HashOnRead(enabled bool) {}
+
+func (es *exposedSplitStore) View(c cid.Cid, f func([]byte) error) error {
+ if isIdentiyCid(c) {
+ data, err := decodeIdentityCid(c)
+ if err != nil {
+ return err
+ }
+
+ return f(data)
+ }
+
+ err := es.s.hot.View(c, f)
+ switch err {
+ case bstore.ErrNotFound:
+ return es.s.cold.View(c, f)
+
+ default:
+ return err
+ }
+}
diff --git a/blockstore/splitstore/splitstore_gc.go b/blockstore/splitstore/splitstore_gc.go
new file mode 100644
index 00000000000..46668167ccc
--- /dev/null
+++ b/blockstore/splitstore/splitstore_gc.go
@@ -0,0 +1,30 @@
+package splitstore
+
+import (
+ "fmt"
+ "time"
+
+ bstore "github.com/filecoin-project/lotus/blockstore"
+)
+
+func (s *SplitStore) gcHotstore() {
+ if err := s.gcBlockstoreOnline(s.hot); err != nil {
+ log.Warnf("error garbage collecting hostore: %s", err)
+ }
+}
+
+func (s *SplitStore) gcBlockstoreOnline(b bstore.Blockstore) error {
+ if gc, ok := b.(bstore.BlockstoreGC); ok {
+ log.Info("garbage collecting blockstore")
+ startGC := time.Now()
+
+ if err := gc.CollectGarbage(); err != nil {
+ return err
+ }
+
+ log.Infow("garbage collecting hotstore done", "took", time.Since(startGC))
+ return nil
+ }
+
+ return fmt.Errorf("blockstore doesn't support online gc: %T", b)
+}
diff --git a/blockstore/splitstore/splitstore_test.go b/blockstore/splitstore/splitstore_test.go
new file mode 100644
index 00000000000..26e5c3cc0b6
--- /dev/null
+++ b/blockstore/splitstore/splitstore_test.go
@@ -0,0 +1,381 @@
+package splitstore
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/lotus/blockstore"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/types/mock"
+
+ blocks "github.com/ipfs/go-block-format"
+ cid "github.com/ipfs/go-cid"
+ datastore "github.com/ipfs/go-datastore"
+ dssync "github.com/ipfs/go-datastore/sync"
+ logging "github.com/ipfs/go-log/v2"
+)
+
+func init() {
+ CompactionThreshold = 5
+ CompactionBoundary = 2
+ logging.SetLogLevel("splitstore", "DEBUG")
+}
+
+func testSplitStore(t *testing.T, cfg *Config) {
+ chain := &mockChain{t: t}
+
+ // the myriads of stores
+ ds := dssync.MutexWrap(datastore.NewMapDatastore())
+ hot := newMockStore()
+ cold := newMockStore()
+
+ // this is necessary to avoid the garbage mock puts in the blocks
+ garbage := blocks.NewBlock([]byte{1, 2, 3})
+ err := cold.Put(garbage)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // genesis
+ genBlock := mock.MkBlock(nil, 0, 0)
+ genBlock.Messages = garbage.Cid()
+ genBlock.ParentMessageReceipts = garbage.Cid()
+ genBlock.ParentStateRoot = garbage.Cid()
+ genBlock.Timestamp = uint64(time.Now().Unix())
+
+ genTs := mock.TipSet(genBlock)
+ chain.push(genTs)
+
+ // put the genesis block to cold store
+ blk, err := genBlock.ToStorageBlock()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = cold.Put(blk)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // create a garbage block that is protected with a rgistered protector
+ protected := blocks.NewBlock([]byte("protected!"))
+ err = hot.Put(protected)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // and another one that is not protected
+ unprotected := blocks.NewBlock([]byte("unprotected!"))
+ err = hot.Put(unprotected)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // open the splitstore
+ ss, err := Open("", ds, hot, cold, cfg)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ss.Close() //nolint
+
+ // register our protector
+ ss.AddProtector(func(protect func(cid.Cid) error) error {
+ return protect(protected.Cid())
+ })
+
+ err = ss.Start(chain)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // make some tipsets, but not enough to cause compaction
+ mkBlock := func(curTs *types.TipSet, i int, stateRoot blocks.Block) *types.TipSet {
+ blk := mock.MkBlock(curTs, uint64(i), uint64(i))
+
+ blk.Messages = garbage.Cid()
+ blk.ParentMessageReceipts = garbage.Cid()
+ blk.ParentStateRoot = stateRoot.Cid()
+ blk.Timestamp = uint64(time.Now().Unix())
+
+ sblk, err := blk.ToStorageBlock()
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = ss.Put(stateRoot)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = ss.Put(sblk)
+ if err != nil {
+ t.Fatal(err)
+ }
+ ts := mock.TipSet(blk)
+ chain.push(ts)
+
+ return ts
+ }
+
+ waitForCompaction := func() {
+ for atomic.LoadInt32(&ss.compacting) == 1 {
+ time.Sleep(100 * time.Millisecond)
+ }
+ }
+
+ curTs := genTs
+ for i := 1; i < 5; i++ {
+ stateRoot := blocks.NewBlock([]byte{byte(i), 3, 3, 7})
+ curTs = mkBlock(curTs, i, stateRoot)
+ waitForCompaction()
+ }
+
+ // count objects in the cold and hot stores
+ countBlocks := func(bs blockstore.Blockstore) int {
+ count := 0
+ _ = bs.(blockstore.BlockstoreIterator).ForEachKey(func(_ cid.Cid) error {
+ count++
+ return nil
+ })
+ return count
+ }
+
+ coldCnt := countBlocks(cold)
+ hotCnt := countBlocks(hot)
+
+ if coldCnt != 2 {
+ t.Errorf("expected %d blocks, but got %d", 2, coldCnt)
+ }
+
+ if hotCnt != 12 {
+ t.Errorf("expected %d blocks, but got %d", 12, hotCnt)
+ }
+
+ // trigger a compaction
+ for i := 5; i < 10; i++ {
+ stateRoot := blocks.NewBlock([]byte{byte(i), 3, 3, 7})
+ curTs = mkBlock(curTs, i, stateRoot)
+ waitForCompaction()
+ }
+
+ coldCnt = countBlocks(cold)
+ hotCnt = countBlocks(hot)
+
+ if coldCnt != 6 {
+ t.Errorf("expected %d cold blocks, but got %d", 6, coldCnt)
+ }
+
+ if hotCnt != 18 {
+ t.Errorf("expected %d hot blocks, but got %d", 18, hotCnt)
+ }
+
+ // ensure our protected block is still there
+ has, err := hot.Has(protected.Cid())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !has {
+ t.Fatal("protected block is missing from hotstore")
+ }
+
+ // ensure our unprotected block is in the coldstore now
+ has, err = hot.Has(unprotected.Cid())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if has {
+ t.Fatal("unprotected block is still in hotstore")
+ }
+
+ has, err = cold.Has(unprotected.Cid())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !has {
+ t.Fatal("unprotected block is missing from coldstore")
+ }
+
+ // Make sure we can revert without panicking.
+ chain.revert(2)
+}
+
+func TestSplitStoreCompaction(t *testing.T) {
+ testSplitStore(t, &Config{MarkSetType: "map"})
+}
+
+type mockChain struct {
+ t testing.TB
+
+ sync.Mutex
+ genesis *types.BlockHeader
+ tipsets []*types.TipSet
+ listener func(revert []*types.TipSet, apply []*types.TipSet) error
+}
+
+func (c *mockChain) push(ts *types.TipSet) {
+ c.Lock()
+ c.tipsets = append(c.tipsets, ts)
+ if c.genesis == nil {
+ c.genesis = ts.Blocks()[0]
+ }
+ c.Unlock()
+
+ if c.listener != nil {
+ err := c.listener(nil, []*types.TipSet{ts})
+ if err != nil {
+ c.t.Errorf("mockchain: error dispatching listener: %s", err)
+ }
+ }
+}
+
+func (c *mockChain) revert(count int) {
+ c.Lock()
+ revert := make([]*types.TipSet, count)
+ if count > len(c.tipsets) {
+ c.Unlock()
+ c.t.Fatalf("not enough tipsets to revert")
+ }
+ copy(revert, c.tipsets[len(c.tipsets)-count:])
+ c.tipsets = c.tipsets[:len(c.tipsets)-count]
+ c.Unlock()
+
+ if c.listener != nil {
+ err := c.listener(revert, nil)
+ if err != nil {
+ c.t.Errorf("mockchain: error dispatching listener: %s", err)
+ }
+ }
+}
+
+func (c *mockChain) GetTipsetByHeight(_ context.Context, epoch abi.ChainEpoch, _ *types.TipSet, _ bool) (*types.TipSet, error) {
+ c.Lock()
+ defer c.Unlock()
+
+ iEpoch := int(epoch)
+ if iEpoch > len(c.tipsets) {
+ return nil, fmt.Errorf("bad epoch %d", epoch)
+ }
+
+ return c.tipsets[iEpoch], nil
+}
+
+func (c *mockChain) GetHeaviestTipSet() *types.TipSet {
+ c.Lock()
+ defer c.Unlock()
+
+ return c.tipsets[len(c.tipsets)-1]
+}
+
+func (c *mockChain) SubscribeHeadChanges(change func(revert []*types.TipSet, apply []*types.TipSet) error) {
+ c.listener = change
+}
+
+type mockStore struct {
+ mx sync.Mutex
+ set map[cid.Cid]blocks.Block
+}
+
+func newMockStore() *mockStore {
+ return &mockStore{set: make(map[cid.Cid]blocks.Block)}
+}
+
+func (b *mockStore) Has(cid cid.Cid) (bool, error) {
+ b.mx.Lock()
+ defer b.mx.Unlock()
+ _, ok := b.set[cid]
+ return ok, nil
+}
+
+func (b *mockStore) HashOnRead(hor bool) {}
+
+func (b *mockStore) Get(cid cid.Cid) (blocks.Block, error) {
+ b.mx.Lock()
+ defer b.mx.Unlock()
+
+ blk, ok := b.set[cid]
+ if !ok {
+ return nil, blockstore.ErrNotFound
+ }
+ return blk, nil
+}
+
+func (b *mockStore) GetSize(cid cid.Cid) (int, error) {
+ blk, err := b.Get(cid)
+ if err != nil {
+ return 0, err
+ }
+
+ return len(blk.RawData()), nil
+}
+
+func (b *mockStore) View(cid cid.Cid, f func([]byte) error) error {
+ blk, err := b.Get(cid)
+ if err != nil {
+ return err
+ }
+ return f(blk.RawData())
+}
+
+func (b *mockStore) Put(blk blocks.Block) error {
+ b.mx.Lock()
+ defer b.mx.Unlock()
+
+ b.set[blk.Cid()] = blk
+ return nil
+}
+
+func (b *mockStore) PutMany(blks []blocks.Block) error {
+ b.mx.Lock()
+ defer b.mx.Unlock()
+
+ for _, blk := range blks {
+ b.set[blk.Cid()] = blk
+ }
+ return nil
+}
+
+func (b *mockStore) DeleteBlock(cid cid.Cid) error {
+ b.mx.Lock()
+ defer b.mx.Unlock()
+
+ delete(b.set, cid)
+ return nil
+}
+
+func (b *mockStore) DeleteMany(cids []cid.Cid) error {
+ b.mx.Lock()
+ defer b.mx.Unlock()
+
+ for _, c := range cids {
+ delete(b.set, c)
+ }
+ return nil
+}
+
+func (b *mockStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
+ return nil, errors.New("not implemented")
+}
+
+func (b *mockStore) ForEachKey(f func(cid.Cid) error) error {
+ b.mx.Lock()
+ defer b.mx.Unlock()
+
+ for c := range b.set {
+ err := f(c)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *mockStore) Close() error {
+ return nil
+}
diff --git a/blockstore/splitstore/splitstore_util.go b/blockstore/splitstore/splitstore_util.go
new file mode 100644
index 00000000000..aef845832c0
--- /dev/null
+++ b/blockstore/splitstore/splitstore_util.go
@@ -0,0 +1,67 @@
+package splitstore
+
+import (
+ "encoding/binary"
+
+ "golang.org/x/xerrors"
+
+ cid "github.com/ipfs/go-cid"
+ mh "github.com/multiformats/go-multihash"
+
+ "github.com/filecoin-project/go-state-types/abi"
+)
+
+func epochToBytes(epoch abi.ChainEpoch) []byte {
+ return uint64ToBytes(uint64(epoch))
+}
+
+func bytesToEpoch(buf []byte) abi.ChainEpoch {
+ return abi.ChainEpoch(bytesToUint64(buf))
+}
+
+func int64ToBytes(i int64) []byte {
+ return uint64ToBytes(uint64(i))
+}
+
+func bytesToInt64(buf []byte) int64 {
+ return int64(bytesToUint64(buf))
+}
+
+func uint64ToBytes(i uint64) []byte {
+ buf := make([]byte, 16)
+ n := binary.PutUvarint(buf, i)
+ return buf[:n]
+}
+
+func bytesToUint64(buf []byte) uint64 {
+ i, _ := binary.Uvarint(buf)
+ return i
+}
+
+func isUnitaryObject(c cid.Cid) bool {
+ pre := c.Prefix()
+ switch pre.Codec {
+ case cid.FilCommitmentSealed, cid.FilCommitmentUnsealed:
+ return true
+ default:
+ return pre.MhType == mh.IDENTITY
+ }
+}
+
+func isIdentiyCid(c cid.Cid) bool {
+ return c.Prefix().MhType == mh.IDENTITY
+}
+
+func decodeIdentityCid(c cid.Cid) ([]byte, error) {
+ dmh, err := mh.Decode(c.Hash())
+ if err != nil {
+ return nil, xerrors.Errorf("error decoding identity cid %s: %w", c, err)
+ }
+
+ // sanity check
+ if dmh.Code != mh.IDENTITY {
+ return nil, xerrors.Errorf("error decoding identity cid %s: hash type is not identity", c)
+ }
+
+ return dmh.Digest, nil
+}
diff --git a/blockstore/splitstore/splitstore_warmup.go b/blockstore/splitstore/splitstore_warmup.go
new file mode 100644
index 00000000000..55fa94c6ffb
--- /dev/null
+++ b/blockstore/splitstore/splitstore_warmup.go
@@ -0,0 +1,126 @@
+package splitstore
+
+import (
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/xerrors"
+
+ blocks "github.com/ipfs/go-block-format"
+ cid "github.com/ipfs/go-cid"
+
+ bstore "github.com/filecoin-project/lotus/blockstore"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+// warmup acuiqres the compaction lock and spawns a goroutine to warm up the hotstore;
+// this is necessary when we sync from a snapshot or when we enable the splitstore
+// on top of an existing blockstore (which becomes the coldstore).
+func (s *SplitStore) warmup(curTs *types.TipSet) error {
+ if !atomic.CompareAndSwapInt32(&s.compacting, 0, 1) {
+ return xerrors.Errorf("error locking compaction")
+ }
+
+ go func() {
+ defer atomic.StoreInt32(&s.compacting, 0)
+
+ log.Info("warming up hotstore")
+ start := time.Now()
+
+ err := s.doWarmup(curTs)
+ if err != nil {
+ log.Errorf("error warming up hotstore: %s", err)
+ return
+ }
+
+ log.Infow("warm up done", "took", time.Since(start))
+ }()
+
+ return nil
+}
+
+// the actual warmup procedure; it walks the chain loading all state roots at the boundary
+// and headers all the way up to genesis.
+// objects are written in batches so as to minimize overhead.
+func (s *SplitStore) doWarmup(curTs *types.TipSet) error {
+ epoch := curTs.Height()
+ batchHot := make([]blocks.Block, 0, batchSize)
+ count := int64(0)
+ xcount := int64(0)
+ missing := int64(0)
+ err := s.walkChain(curTs, epoch, epoch+1, // we don't load messages in warmup
+ func(c cid.Cid) error {
+ if isUnitaryObject(c) {
+ return errStopWalk
+ }
+
+ count++
+
+ has, err := s.hot.Has(c)
+ if err != nil {
+ return err
+ }
+
+ if has {
+ return nil
+ }
+
+ blk, err := s.cold.Get(c)
+ if err != nil {
+ if err == bstore.ErrNotFound {
+ missing++
+ return nil
+ }
+ return err
+ }
+
+ xcount++
+
+ batchHot = append(batchHot, blk)
+ if len(batchHot) == batchSize {
+ err = s.hot.PutMany(batchHot)
+ if err != nil {
+ return err
+ }
+ batchHot = batchHot[:0]
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ return err
+ }
+
+ if len(batchHot) > 0 {
+ err = s.hot.PutMany(batchHot)
+ if err != nil {
+ return err
+ }
+ }
+
+ log.Infow("warmup stats", "visited", count, "warm", xcount, "missing", missing)
+
+ s.markSetSize = count + count>>2 // overestimate a bit
+ err = s.ds.Put(markSetSizeKey, int64ToBytes(s.markSetSize))
+ if err != nil {
+ log.Warnf("error saving mark set size: %s", err)
+ }
+
+ // save the warmup epoch
+ err = s.ds.Put(warmupEpochKey, epochToBytes(epoch))
+ if err != nil {
+ return xerrors.Errorf("error saving warm up epoch: %w", err)
+ }
+ s.mx.Lock()
+ s.warmupEpoch = epoch
+ s.mx.Unlock()
+
+ // also save the compactionIndex, as this is used as an indicator of warmup for upgraded nodes
+ err = s.ds.Put(compactionIndexKey, int64ToBytes(s.compactionIndex))
+ if err != nil {
+ return xerrors.Errorf("error saving compaction index: %w", err)
+ }
+
+ return nil
+}
diff --git a/blockstore/sync.go b/blockstore/sync.go
new file mode 100644
index 00000000000..848ccd19d2b
--- /dev/null
+++ b/blockstore/sync.go
@@ -0,0 +1,81 @@
+package blockstore
+
+import (
+ "context"
+ "sync"
+
+ blocks "github.com/ipfs/go-block-format"
+ "github.com/ipfs/go-cid"
+)
+
+// NewMemorySync returns a thread-safe in-memory blockstore.
+func NewMemorySync() *SyncBlockstore {
+ return &SyncBlockstore{bs: make(MemBlockstore)}
+}
+
+// SyncBlockstore is a terminal blockstore that is a synchronized version
+// of MemBlockstore.
+type SyncBlockstore struct {
+ mu sync.RWMutex
+ bs MemBlockstore // specifically use a memStore to save indirection overhead.
+}
+
+func (m *SyncBlockstore) DeleteBlock(k cid.Cid) error {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ return m.bs.DeleteBlock(k)
+}
+
+func (m *SyncBlockstore) DeleteMany(ks []cid.Cid) error {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ return m.bs.DeleteMany(ks)
+}
+
+func (m *SyncBlockstore) Has(k cid.Cid) (bool, error) {
+ m.mu.RLock()
+ defer m.mu.RUnlock()
+ return m.bs.Has(k)
+}
+
+func (m *SyncBlockstore) View(k cid.Cid, callback func([]byte) error) error {
+ m.mu.RLock()
+ defer m.mu.RUnlock()
+
+ return m.bs.View(k, callback)
+}
+
+func (m *SyncBlockstore) Get(k cid.Cid) (blocks.Block, error) {
+ m.mu.RLock()
+ defer m.mu.RUnlock()
+ return m.bs.Get(k)
+}
+
+func (m *SyncBlockstore) GetSize(k cid.Cid) (int, error) {
+ m.mu.RLock()
+ defer m.mu.RUnlock()
+ return m.bs.GetSize(k)
+}
+
+func (m *SyncBlockstore) Put(b blocks.Block) error {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ return m.bs.Put(b)
+}
+
+func (m *SyncBlockstore) PutMany(bs []blocks.Block) error {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ return m.bs.PutMany(bs)
+}
+
+func (m *SyncBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
+ m.mu.RLock()
+ defer m.mu.RUnlock()
+ // this blockstore implementation doesn't do any async work.
+ return m.bs.AllKeysChan(ctx)
+}
+
+func (m *SyncBlockstore) HashOnRead(enabled bool) {
+ // noop
+}
diff --git a/lib/timedbs/timedbs.go b/blockstore/timed.go
similarity index 51%
rename from lib/timedbs/timedbs.go
rename to blockstore/timed.go
index c5c1a8fe003..80e6c8a080f 100644
--- a/lib/timedbs/timedbs.go
+++ b/blockstore/timed.go
@@ -1,4 +1,4 @@
-package timedbs
+package blockstore
import (
"context"
@@ -10,37 +10,37 @@ import (
"github.com/ipfs/go-cid"
"github.com/raulk/clock"
"go.uber.org/multierr"
-
- "github.com/filecoin-project/lotus/build"
- "github.com/filecoin-project/lotus/lib/blockstore"
)
-// TimedCacheBS is a blockstore that keeps blocks for at least the specified
-// caching interval before discarding them. Garbage collection must be started
-// and stopped by calling Start/Stop.
+// TimedCacheBlockstore is a blockstore that keeps blocks for at least the
+// specified caching interval before discarding them. Garbage collection must
+// be started and stopped by calling Start/Stop.
//
// Under the covers, it's implemented with an active and an inactive blockstore
// that are rotated every cache time interval. This means all blocks will be
// stored at most 2x the cache interval.
-type TimedCacheBS struct {
+//
+// Create a new instance by calling the NewTimedCacheBlockstore constructor.
+type TimedCacheBlockstore struct {
mu sync.RWMutex
- active, inactive blockstore.MemStore
+ active, inactive MemBlockstore
clock clock.Clock
interval time.Duration
closeCh chan struct{}
doneRotatingCh chan struct{}
}
-func NewTimedCacheBS(cacheTime time.Duration) *TimedCacheBS {
- return &TimedCacheBS{
- active: blockstore.NewTemporary(),
- inactive: blockstore.NewTemporary(),
- interval: cacheTime,
- clock: build.Clock,
+func NewTimedCacheBlockstore(interval time.Duration) *TimedCacheBlockstore {
+ b := &TimedCacheBlockstore{
+ active: NewMemory(),
+ inactive: NewMemory(),
+ interval: interval,
+ clock: clock.New(),
}
+ return b
}
-func (t *TimedCacheBS) Start(ctx context.Context) error {
+func (t *TimedCacheBlockstore) Start(_ context.Context) error {
t.mu.Lock()
defer t.mu.Unlock()
if t.closeCh != nil {
@@ -65,11 +65,11 @@ func (t *TimedCacheBS) Start(ctx context.Context) error {
return nil
}
-func (t *TimedCacheBS) Stop(ctx context.Context) error {
+func (t *TimedCacheBlockstore) Stop(_ context.Context) error {
t.mu.Lock()
defer t.mu.Unlock()
if t.closeCh == nil {
- return fmt.Errorf("not started started")
+ return fmt.Errorf("not started")
}
select {
case <-t.closeCh:
@@ -80,15 +80,15 @@ func (t *TimedCacheBS) Stop(ctx context.Context) error {
return nil
}
-func (t *TimedCacheBS) rotate() {
- newBs := blockstore.NewTemporary()
+func (t *TimedCacheBlockstore) rotate() {
+ newBs := NewMemory()
t.mu.Lock()
t.inactive, t.active = t.active, newBs
t.mu.Unlock()
}
-func (t *TimedCacheBS) Put(b blocks.Block) error {
+func (t *TimedCacheBlockstore) Put(b blocks.Block) error {
// Don't check the inactive set here. We want to keep this block for at
// least one interval.
t.mu.Lock()
@@ -96,33 +96,50 @@ func (t *TimedCacheBS) Put(b blocks.Block) error {
return t.active.Put(b)
}
-func (t *TimedCacheBS) PutMany(bs []blocks.Block) error {
+func (t *TimedCacheBlockstore) PutMany(bs []blocks.Block) error {
t.mu.Lock()
defer t.mu.Unlock()
return t.active.PutMany(bs)
}
-func (t *TimedCacheBS) Get(k cid.Cid) (blocks.Block, error) {
+func (t *TimedCacheBlockstore) View(k cid.Cid, callback func([]byte) error) error {
+ // The underlying blockstore is always a "mem" blockstore so there's no difference,
+ // from a performance perspective, between view & get. So we call Get to avoid
+ // calling an arbitrary callback while holding a lock.
+ t.mu.RLock()
+ block, err := t.active.Get(k)
+ if err == ErrNotFound {
+ block, err = t.inactive.Get(k)
+ }
+ t.mu.RUnlock()
+
+ if err != nil {
+ return err
+ }
+ return callback(block.RawData())
+}
+
+func (t *TimedCacheBlockstore) Get(k cid.Cid) (blocks.Block, error) {
t.mu.RLock()
defer t.mu.RUnlock()
b, err := t.active.Get(k)
- if err == blockstore.ErrNotFound {
+ if err == ErrNotFound {
b, err = t.inactive.Get(k)
}
return b, err
}
-func (t *TimedCacheBS) GetSize(k cid.Cid) (int, error) {
+func (t *TimedCacheBlockstore) GetSize(k cid.Cid) (int, error) {
t.mu.RLock()
defer t.mu.RUnlock()
size, err := t.active.GetSize(k)
- if err == blockstore.ErrNotFound {
+ if err == ErrNotFound {
size, err = t.inactive.GetSize(k)
}
return size, err
}
-func (t *TimedCacheBS) Has(k cid.Cid) (bool, error) {
+func (t *TimedCacheBlockstore) Has(k cid.Cid) (bool, error) {
t.mu.RLock()
defer t.mu.RUnlock()
if has, err := t.active.Has(k); err != nil {
@@ -133,17 +150,23 @@ func (t *TimedCacheBS) Has(k cid.Cid) (bool, error) {
return t.inactive.Has(k)
}
-func (t *TimedCacheBS) HashOnRead(_ bool) {
+func (t *TimedCacheBlockstore) HashOnRead(_ bool) {
// no-op
}
-func (t *TimedCacheBS) DeleteBlock(k cid.Cid) error {
+func (t *TimedCacheBlockstore) DeleteBlock(k cid.Cid) error {
t.mu.Lock()
defer t.mu.Unlock()
return multierr.Combine(t.active.DeleteBlock(k), t.inactive.DeleteBlock(k))
}
-func (t *TimedCacheBS) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
+func (t *TimedCacheBlockstore) DeleteMany(ks []cid.Cid) error {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ return multierr.Combine(t.active.DeleteMany(ks), t.inactive.DeleteMany(ks))
+}
+
+func (t *TimedCacheBlockstore) AllKeysChan(_ context.Context) (<-chan cid.Cid, error) {
t.mu.RLock()
defer t.mu.RUnlock()
diff --git a/lib/timedbs/timedbs_test.go b/blockstore/timed_test.go
similarity index 93%
rename from lib/timedbs/timedbs_test.go
rename to blockstore/timed_test.go
index e01215bbdb0..d5fefff9461 100644
--- a/lib/timedbs/timedbs_test.go
+++ b/blockstore/timed_test.go
@@ -1,4 +1,4 @@
-package timedbs
+package blockstore
import (
"context"
@@ -12,8 +12,8 @@ import (
"github.com/ipfs/go-cid"
)
-func TestTimedBSSimple(t *testing.T) {
- tc := NewTimedCacheBS(10 * time.Millisecond)
+func TestTimedCacheBlockstoreSimple(t *testing.T) {
+ tc := NewTimedCacheBlockstore(10 * time.Millisecond)
mClock := clock.NewMock()
mClock.Set(time.Now())
tc.clock = mClock
diff --git a/blockstore/union.go b/blockstore/union.go
new file mode 100644
index 00000000000..a99ba259133
--- /dev/null
+++ b/blockstore/union.go
@@ -0,0 +1,119 @@
+package blockstore
+
+import (
+ "context"
+
+ blocks "github.com/ipfs/go-block-format"
+ "github.com/ipfs/go-cid"
+)
+
+type unionBlockstore []Blockstore
+
+// Union returns an unioned blockstore.
+//
+// * Reads return from the first blockstore that has the value, querying in the
+// supplied order.
+// * Writes (puts and deltes) are broadcast to all stores.
+//
+func Union(stores ...Blockstore) Blockstore {
+ return unionBlockstore(stores)
+}
+
+func (m unionBlockstore) Has(cid cid.Cid) (has bool, err error) {
+ for _, bs := range m {
+ if has, err = bs.Has(cid); has || err != nil {
+ break
+ }
+ }
+ return has, err
+}
+
+func (m unionBlockstore) Get(cid cid.Cid) (blk blocks.Block, err error) {
+ for _, bs := range m {
+ if blk, err = bs.Get(cid); err == nil || err != ErrNotFound {
+ break
+ }
+ }
+ return blk, err
+}
+
+func (m unionBlockstore) View(cid cid.Cid, callback func([]byte) error) (err error) {
+ for _, bs := range m {
+ if err = bs.View(cid, callback); err == nil || err != ErrNotFound {
+ break
+ }
+ }
+ return err
+}
+
+func (m unionBlockstore) GetSize(cid cid.Cid) (size int, err error) {
+ for _, bs := range m {
+ if size, err = bs.GetSize(cid); err == nil || err != ErrNotFound {
+ break
+ }
+ }
+ return size, err
+}
+
+func (m unionBlockstore) Put(block blocks.Block) (err error) {
+ for _, bs := range m {
+ if err = bs.Put(block); err != nil {
+ break
+ }
+ }
+ return err
+}
+
+func (m unionBlockstore) PutMany(blks []blocks.Block) (err error) {
+ for _, bs := range m {
+ if err = bs.PutMany(blks); err != nil {
+ break
+ }
+ }
+ return err
+}
+
+func (m unionBlockstore) DeleteBlock(cid cid.Cid) (err error) {
+ for _, bs := range m {
+ if err = bs.DeleteBlock(cid); err != nil {
+ break
+ }
+ }
+ return err
+}
+
+func (m unionBlockstore) DeleteMany(cids []cid.Cid) (err error) {
+ for _, bs := range m {
+ if err = bs.DeleteMany(cids); err != nil {
+ break
+ }
+ }
+ return err
+}
+
+func (m unionBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
+ // this does not deduplicate; this interface needs to be revisited.
+ outCh := make(chan cid.Cid)
+
+ go func() {
+ defer close(outCh)
+
+ for _, bs := range m {
+ ch, err := bs.AllKeysChan(ctx)
+ if err != nil {
+ return
+ }
+ for cid := range ch {
+ outCh <- cid
+ }
+ }
+ }()
+
+ return outCh, nil
+}
+
+func (m unionBlockstore) HashOnRead(enabled bool) {
+ for _, bs := range m {
+ bs.HashOnRead(enabled)
+ }
+}
diff --git a/blockstore/union_test.go b/blockstore/union_test.go
new file mode 100644
index 00000000000..b6202689227
--- /dev/null
+++ b/blockstore/union_test.go
@@ -0,0 +1,102 @@
+package blockstore
+
+import (
+ "context"
+ "testing"
+
+ blocks "github.com/ipfs/go-block-format"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ b0 = blocks.NewBlock([]byte("abc"))
+ b1 = blocks.NewBlock([]byte("foo"))
+ b2 = blocks.NewBlock([]byte("bar"))
+)
+
+func TestUnionBlockstore_Get(t *testing.T) {
+ m1 := NewMemory()
+ m2 := NewMemory()
+
+ _ = m1.Put(b1)
+ _ = m2.Put(b2)
+
+ u := Union(m1, m2)
+
+ v1, err := u.Get(b1.Cid())
+ require.NoError(t, err)
+ require.Equal(t, b1.RawData(), v1.RawData())
+
+ v2, err := u.Get(b2.Cid())
+ require.NoError(t, err)
+ require.Equal(t, b2.RawData(), v2.RawData())
+}
+
+func TestUnionBlockstore_Put_PutMany_Delete_AllKeysChan(t *testing.T) {
+ m1 := NewMemory()
+ m2 := NewMemory()
+
+ u := Union(m1, m2)
+
+ err := u.Put(b0)
+ require.NoError(t, err)
+
+ var has bool
+
+ // write was broadcasted to all stores.
+ has, _ = m1.Has(b0.Cid())
+ require.True(t, has)
+
+ has, _ = m2.Has(b0.Cid())
+ require.True(t, has)
+
+ has, _ = u.Has(b0.Cid())
+ require.True(t, has)
+
+ // put many.
+ err = u.PutMany([]blocks.Block{b1, b2})
+ require.NoError(t, err)
+
+ // write was broadcasted to all stores.
+ has, _ = m1.Has(b1.Cid())
+ require.True(t, has)
+
+ has, _ = m1.Has(b2.Cid())
+ require.True(t, has)
+
+ has, _ = m2.Has(b1.Cid())
+ require.True(t, has)
+
+ has, _ = m2.Has(b2.Cid())
+ require.True(t, has)
+
+ // also in the union store.
+ has, _ = u.Has(b1.Cid())
+ require.True(t, has)
+
+ has, _ = u.Has(b2.Cid())
+ require.True(t, has)
+
+ // deleted from all stores.
+ err = u.DeleteBlock(b1.Cid())
+ require.NoError(t, err)
+
+ has, _ = u.Has(b1.Cid())
+ require.False(t, has)
+
+ has, _ = m1.Has(b1.Cid())
+ require.False(t, has)
+
+ has, _ = m2.Has(b1.Cid())
+ require.False(t, has)
+
+ // check that AllKeysChan returns b0 and b2, twice (once per backing store)
+ ch, err := u.AllKeysChan(context.Background())
+ require.NoError(t, err)
+
+ var i int
+ for range ch {
+ i++
+ }
+ require.Equal(t, 4, i)
+}
diff --git a/build/bootstrap.go b/build/bootstrap.go
index 80c1529ff6c..98fa2e2f9cf 100644
--- a/build/bootstrap.go
+++ b/build/bootstrap.go
@@ -2,39 +2,33 @@ package build
import (
"context"
- "os"
+ "embed"
+ "path"
"strings"
"github.com/filecoin-project/lotus/lib/addrutil"
- "golang.org/x/xerrors"
- rice "github.com/GeertJohan/go.rice"
"github.com/libp2p/go-libp2p-core/peer"
)
+//go:embed bootstrap
+var bootstrapfs embed.FS
+
func BuiltinBootstrap() ([]peer.AddrInfo, error) {
if DisableBuiltinAssets {
return nil, nil
}
-
- var out []peer.AddrInfo
-
- b := rice.MustFindBox("bootstrap")
- err := b.Walk("", func(path string, info os.FileInfo, err error) error {
+ if BootstrappersFile != "" {
+ spi, err := bootstrapfs.ReadFile(path.Join("bootstrap", BootstrappersFile))
if err != nil {
- return xerrors.Errorf("failed to walk box: %w", err)
- }
-
- if !strings.HasSuffix(path, ".pi") {
- return nil
+ return nil, err
}
- spi := b.MustString(path)
- if spi == "" {
- return nil
+ if len(spi) == 0 {
+ return nil, nil
}
- pi, err := addrutil.ParseAddresses(context.TODO(), strings.Split(strings.TrimSpace(spi), "\n"))
- out = append(out, pi...)
- return err
- })
- return out, err
+
+ return addrutil.ParseAddresses(context.TODO(), strings.Split(strings.TrimSpace(string(spi)), "\n"))
+ }
+
+ return nil, nil
}
diff --git a/build/bootstrap/butterflynet.pi b/build/bootstrap/butterflynet.pi
new file mode 100644
index 00000000000..cc4ce4f1d22
--- /dev/null
+++ b/build/bootstrap/butterflynet.pi
@@ -0,0 +1,2 @@
+/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWBbZd7Su9XfLUQ12RynGQ3ZmGY1nGqFntmqop9pLNJE6g
+/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWGKRzEY4tJFTmAmrYUpa1CVVohmV9YjJbC9v5XWY2gUji
diff --git a/build/bootstrap/calibnet.pi b/build/bootstrap/calibnet.pi
new file mode 100644
index 00000000000..20473eaaa61
--- /dev/null
+++ b/build/bootstrap/calibnet.pi
@@ -0,0 +1,4 @@
+/dns4/bootstrap-0.calibration.fildev.network/tcp/1347/p2p/12D3KooWJkikQQkxS58spo76BYzFt4fotaT5NpV2zngvrqm4u5ow
+/dns4/bootstrap-1.calibration.fildev.network/tcp/1347/p2p/12D3KooWLce5FDHR4EX4CrYavphA5xS3uDsX6aoowXh5tzDUxJav
+/dns4/bootstrap-2.calibration.fildev.network/tcp/1347/p2p/12D3KooWA9hFfQG9GjP6bHeuQQbMD3FDtZLdW1NayxKXUT26PQZu
+/dns4/bootstrap-3.calibration.fildev.network/tcp/1347/p2p/12D3KooWMHDi3LVTFG8Szqogt7RkNXvonbQYqSazxBx41A5aeuVz
diff --git a/build/bootstrap/interopnet.pi b/build/bootstrap/interopnet.pi
new file mode 100644
index 00000000000..923653d94e3
--- /dev/null
+++ b/build/bootstrap/interopnet.pi
@@ -0,0 +1,2 @@
+/dns4/bootstrap-0.interop.fildev.network/tcp/1347/p2p/12D3KooWLGPq9JL1xwL6gHok7HSNxtK1Q5kyfg4Hk69ifRPghn4i
+/dns4/bootstrap-1.interop.fildev.network/tcp/1347/p2p/12D3KooWFYS1f31zafv8mqqYu8U3hEqYvaZ6avWzYU3BmZdpyH3h
diff --git a/build/bootstrap/bootstrappers.pi b/build/bootstrap/mainnet.pi
similarity index 93%
rename from build/bootstrap/bootstrappers.pi
rename to build/bootstrap/mainnet.pi
index 886ac8e9991..370e954bd4a 100644
--- a/build/bootstrap/bootstrappers.pi
+++ b/build/bootstrap/mainnet.pi
@@ -7,7 +7,7 @@
/dns4/bootstrap-6.mainnet.filops.net/tcp/1347/p2p/12D3KooWP5MwCiqdMETF9ub1P3MbCvQCcfconnYHbWg6sUJcDRQQ
/dns4/bootstrap-7.mainnet.filops.net/tcp/1347/p2p/12D3KooWRs3aY1p3juFjPy8gPN95PEQChm2QKGUCAdcDCC4EBMKf
/dns4/bootstrap-8.mainnet.filops.net/tcp/1347/p2p/12D3KooWScFR7385LTyR4zU1bYdzSiiAb5rnNABfVahPvVSzyTkR
-/dns4/lotus-bootstrap.forceup.cn/tcp/41778/p2p/12D3KooWFQsv3nRMUevZNWWsY1Wu6NUzUbawnWU5NcRhgKuJA37C
+/dns4/lotus-bootstrap.ipfsforce.com/tcp/41778/p2p/12D3KooWGhufNmZHF3sv48aQeS13ng5XVJZ9E6qy2Ms4VzqeUsHk
/dns4/bootstrap-0.starpool.in/tcp/12757/p2p/12D3KooWGHpBMeZbestVEWkfdnC9u7p6uFHXL1n7m1ZBqsEmiUzz
/dns4/bootstrap-1.starpool.in/tcp/12757/p2p/12D3KooWQZrGH1PxSNZPum99M1zNvjNFM33d1AAu5DcvdHptuU7u
/dns4/node.glif.io/tcp/1235/p2p/12D3KooWBF8cpp65hp2u9LK5mh19x67ftAam84z9LsfaquTDSBpt
diff --git a/build/bootstrap/nerpanet.pi b/build/bootstrap/nerpanet.pi
new file mode 100644
index 00000000000..83ad1d184b8
--- /dev/null
+++ b/build/bootstrap/nerpanet.pi
@@ -0,0 +1,4 @@
+/dns4/bootstrap-2.nerpa.interplanetary.dev/tcp/1347/p2p/12D3KooWQcL6ReWmR6ASWx4iT7EiAmxKDQpvgq1MKNTQZp5NPnWW
+/dns4/bootstrap-0.nerpa.interplanetary.dev/tcp/1347/p2p/12D3KooWGyJCwCm7EfupM15CFPXM4c7zRVHwwwjcuy9umaGeztMX
+/dns4/bootstrap-3.nerpa.interplanetary.dev/tcp/1347/p2p/12D3KooWNK9RmfksKXSCQj7ZwAM7L6roqbN4kwJteihq7yPvSgPs
+/dns4/bootstrap-1.nerpa.interplanetary.dev/tcp/1347/p2p/12D3KooWCWSaH6iUyXYspYxELjDfzToBsyVGVz3QvC7ysXv7wESo
diff --git a/build/genesis.go b/build/genesis.go
index dc4ded27365..6d94b38cf68 100644
--- a/build/genesis.go
+++ b/build/genesis.go
@@ -1,23 +1,23 @@
package build
import (
- rice "github.com/GeertJohan/go.rice"
+ "embed"
+ "path"
+
logging "github.com/ipfs/go-log/v2"
)
// moved from now-defunct build/paramfetch.go
var log = logging.Logger("build")
+//go:embed genesis
+var genesisfs embed.FS
+
func MaybeGenesis() []byte {
- builtinGen, err := rice.FindBox("genesis")
+ genBytes, err := genesisfs.ReadFile(path.Join("genesis", GenesisFile))
if err != nil {
log.Warnf("loading built-in genesis: %s", err)
return nil
}
- genBytes, err := builtinGen.Bytes("devnet.car")
- if err != nil {
- log.Warnf("loading built-in genesis: %s", err)
- }
-
return genBytes
}
diff --git a/build/genesis/butterflynet.car b/build/genesis/butterflynet.car
new file mode 100644
index 00000000000..7c2d19251f7
Binary files /dev/null and b/build/genesis/butterflynet.car differ
diff --git a/build/genesis/calibnet.car b/build/genesis/calibnet.car
new file mode 100644
index 00000000000..cbade953f86
Binary files /dev/null and b/build/genesis/calibnet.car differ
diff --git a/build/genesis/interopnet.car b/build/genesis/interopnet.car
new file mode 100644
index 00000000000..2c7c2a49873
Binary files /dev/null and b/build/genesis/interopnet.car differ
diff --git a/build/genesis/devnet.car b/build/genesis/mainnet.car
similarity index 100%
rename from build/genesis/devnet.car
rename to build/genesis/mainnet.car
diff --git a/build/genesis/nerpanet.car b/build/genesis/nerpanet.car
new file mode 100644
index 00000000000..c32e0171bce
Binary files /dev/null and b/build/genesis/nerpanet.car differ
diff --git a/build/isnearupgrade.go b/build/isnearupgrade.go
new file mode 100644
index 00000000000..4273f0e9e3f
--- /dev/null
+++ b/build/isnearupgrade.go
@@ -0,0 +1,9 @@
+package build
+
+import (
+ "github.com/filecoin-project/go-state-types/abi"
+)
+
+func IsNearUpgrade(epoch, upgradeEpoch abi.ChainEpoch) bool {
+ return epoch > upgradeEpoch-Finality && epoch < upgradeEpoch+Finality
+}
diff --git a/build/openrpc.go b/build/openrpc.go
new file mode 100644
index 00000000000..ac951c17287
--- /dev/null
+++ b/build/openrpc.go
@@ -0,0 +1,54 @@
+package build
+
+import (
+ "bytes"
+ "compress/gzip"
+ "embed"
+ "encoding/json"
+
+ apitypes "github.com/filecoin-project/lotus/api/types"
+)
+
+//go:embed openrpc
+var openrpcfs embed.FS
+
+func mustReadGzippedOpenRPCDocument(data []byte) apitypes.OpenRPCDocument {
+ zr, err := gzip.NewReader(bytes.NewBuffer(data))
+ if err != nil {
+ log.Fatal(err)
+ }
+ m := apitypes.OpenRPCDocument{}
+ err = json.NewDecoder(zr).Decode(&m)
+ if err != nil {
+ log.Fatal(err)
+ }
+ err = zr.Close()
+ if err != nil {
+ log.Fatal(err)
+ }
+ return m
+}
+
+func OpenRPCDiscoverJSON_Full() apitypes.OpenRPCDocument {
+ data, err := openrpcfs.ReadFile("openrpc/full.json.gz")
+ if err != nil {
+ panic(err)
+ }
+ return mustReadGzippedOpenRPCDocument(data)
+}
+
+func OpenRPCDiscoverJSON_Miner() apitypes.OpenRPCDocument {
+ data, err := openrpcfs.ReadFile("openrpc/miner.json.gz")
+ if err != nil {
+ panic(err)
+ }
+ return mustReadGzippedOpenRPCDocument(data)
+}
+
+func OpenRPCDiscoverJSON_Worker() apitypes.OpenRPCDocument {
+ data, err := openrpcfs.ReadFile("openrpc/worker.json.gz")
+ if err != nil {
+ panic(err)
+ }
+ return mustReadGzippedOpenRPCDocument(data)
+}
diff --git a/build/openrpc/full.json.gz b/build/openrpc/full.json.gz
new file mode 100644
index 00000000000..56feb6ee5e4
Binary files /dev/null and b/build/openrpc/full.json.gz differ
diff --git a/build/openrpc/miner.json.gz b/build/openrpc/miner.json.gz
new file mode 100644
index 00000000000..aa8ba625d7a
Binary files /dev/null and b/build/openrpc/miner.json.gz differ
diff --git a/build/openrpc/worker.json.gz b/build/openrpc/worker.json.gz
new file mode 100644
index 00000000000..593a45b6acd
Binary files /dev/null and b/build/openrpc/worker.json.gz differ
diff --git a/build/openrpc_test.go b/build/openrpc_test.go
new file mode 100644
index 00000000000..20c77533193
--- /dev/null
+++ b/build/openrpc_test.go
@@ -0,0 +1,23 @@
+package build
+
+import (
+ "testing"
+
+ apitypes "github.com/filecoin-project/lotus/api/types"
+)
+
+func TestOpenRPCDiscoverJSON_Version(t *testing.T) {
+ // openRPCDocVersion is the current OpenRPC version of the API docs.
+ openRPCDocVersion := "1.2.6"
+
+ for i, docFn := range []func() apitypes.OpenRPCDocument{
+ OpenRPCDiscoverJSON_Full,
+ OpenRPCDiscoverJSON_Miner,
+ OpenRPCDiscoverJSON_Worker,
+ } {
+ doc := docFn()
+ if got, ok := doc["openrpc"]; !ok || got != openRPCDocVersion {
+ t.Fatalf("case: %d, want: %s, got: %v, doc: %v", i, openRPCDocVersion, got, doc)
+ }
+ }
+}
diff --git a/build/parameters.go b/build/parameters.go
index 7d34a783122..9e60f12a6a3 100644
--- a/build/parameters.go
+++ b/build/parameters.go
@@ -1,7 +1,19 @@
package build
-import rice "github.com/GeertJohan/go.rice"
+import (
+ _ "embed"
+)
+
+//go:embed proof-params/parameters.json
+var params []byte
+
+//go:embed proof-params/srs-inner-product.json
+var srs []byte
func ParametersJSON() []byte {
- return rice.MustFindBox("proof-params").MustBytes("parameters.json")
+ return params
+}
+
+func SrsJSON() []byte {
+ return srs
}
diff --git a/build/params_2k.go b/build/params_2k.go
index 5a0e8fd612c..387d2da0bbd 100644
--- a/build/params_2k.go
+++ b/build/params_2k.go
@@ -3,41 +3,85 @@
package build
import (
- "math"
"os"
+ "strconv"
- "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/actors/policy"
)
-const UpgradeBreezeHeight = -1
+const BootstrappersFile = ""
+const GenesisFile = ""
+
+var UpgradeBreezeHeight = abi.ChainEpoch(-1)
+
const BreezeGasTampingDuration = 0
-const UpgradeSmokeHeight = -1
-const UpgradeIgnitionHeight = -2
-const UpgradeRefuelHeight = -3
-const UpgradeTapeHeight = -4
+var UpgradeSmokeHeight = abi.ChainEpoch(-1)
+var UpgradeIgnitionHeight = abi.ChainEpoch(-2)
+var UpgradeRefuelHeight = abi.ChainEpoch(-3)
+var UpgradeTapeHeight = abi.ChainEpoch(-4)
+
+var UpgradeAssemblyHeight = abi.ChainEpoch(-5)
+var UpgradeLiftoffHeight = abi.ChainEpoch(-6)
+
+var UpgradeKumquatHeight = abi.ChainEpoch(-7)
+var UpgradeCalicoHeight = abi.ChainEpoch(-8)
+var UpgradePersianHeight = abi.ChainEpoch(-9)
+var UpgradeOrangeHeight = abi.ChainEpoch(-10)
+var UpgradeClausHeight = abi.ChainEpoch(-11)
+
+var UpgradeTrustHeight = abi.ChainEpoch(-12)
-var UpgradeActorsV2Height = abi.ChainEpoch(10)
-var UpgradeLiftoffHeight = abi.ChainEpoch(-5)
+var UpgradeNorwegianHeight = abi.ChainEpoch(-13)
-const UpgradeKumquatHeight = -6
+var UpgradeTurboHeight = abi.ChainEpoch(-14)
+
+var UpgradeHyperdriveHeight = abi.ChainEpoch(-15)
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet,
}
func init() {
- policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
+ policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1, abi.RegisteredSealProof_StackedDrg8MiBV1)
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
+ policy.SetPreCommitChallengeDelay(abi.ChainEpoch(10))
+
+ getUpgradeHeight := func(ev string, def abi.ChainEpoch) abi.ChainEpoch {
+ hs, found := os.LookupEnv(ev)
+ if found {
+ h, err := strconv.Atoi(hs)
+ if err != nil {
+ log.Panicf("failed to parse %s env var", ev)
+ }
+
+ return abi.ChainEpoch(h)
+ }
- if os.Getenv("LOTUS_DISABLE_V2_ACTOR_MIGRATION") == "1" {
- UpgradeActorsV2Height = math.MaxInt64
- UpgradeLiftoffHeight = 11
+ return def
}
+ UpgradeBreezeHeight = getUpgradeHeight("LOTUS_BREEZE_HEIGHT", UpgradeBreezeHeight)
+ UpgradeSmokeHeight = getUpgradeHeight("LOTUS_SMOKE_HEIGHT", UpgradeSmokeHeight)
+ UpgradeIgnitionHeight = getUpgradeHeight("LOTUS_IGNITION_HEIGHT", UpgradeIgnitionHeight)
+ UpgradeRefuelHeight = getUpgradeHeight("LOTUS_REFUEL_HEIGHT", UpgradeRefuelHeight)
+ UpgradeTapeHeight = getUpgradeHeight("LOTUS_TAPE_HEIGHT", UpgradeTapeHeight)
+ UpgradeAssemblyHeight = getUpgradeHeight("LOTUS_ACTORSV2_HEIGHT", UpgradeAssemblyHeight)
+ UpgradeLiftoffHeight = getUpgradeHeight("LOTUS_LIFTOFF_HEIGHT", UpgradeLiftoffHeight)
+ UpgradeKumquatHeight = getUpgradeHeight("LOTUS_KUMQUAT_HEIGHT", UpgradeKumquatHeight)
+ UpgradeCalicoHeight = getUpgradeHeight("LOTUS_CALICO_HEIGHT", UpgradeCalicoHeight)
+ UpgradePersianHeight = getUpgradeHeight("LOTUS_PERSIAN_HEIGHT", UpgradePersianHeight)
+ UpgradeOrangeHeight = getUpgradeHeight("LOTUS_ORANGE_HEIGHT", UpgradeOrangeHeight)
+ UpgradeClausHeight = getUpgradeHeight("LOTUS_CLAUS_HEIGHT", UpgradeClausHeight)
+ UpgradeTrustHeight = getUpgradeHeight("LOTUS_ACTORSV3_HEIGHT", UpgradeTrustHeight)
+ UpgradeNorwegianHeight = getUpgradeHeight("LOTUS_NORWEGIAN_HEIGHT", UpgradeNorwegianHeight)
+ UpgradeTurboHeight = getUpgradeHeight("LOTUS_ACTORSV4_HEIGHT", UpgradeTurboHeight)
+ UpgradeHyperdriveHeight = getUpgradeHeight("LOTUS_HYPERDRIVE_HEIGHT", UpgradeHyperdriveHeight)
+
BuildType |= Build2k
}
@@ -53,3 +97,7 @@ const SlashablePowerDelay = 20
// Epochs
const InteractivePoRepConfidence = 6
+
+const BootstrapPeerThreshold = 1
+
+var WhitelistedBlock = cid.Undef
diff --git a/build/params_butterfly.go b/build/params_butterfly.go
new file mode 100644
index 00000000000..258f6ab0f2e
--- /dev/null
+++ b/build/params_butterfly.go
@@ -0,0 +1,58 @@
+// +build butterflynet
+
+package build
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/lotus/chain/actors/policy"
+ builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+ "github.com/ipfs/go-cid"
+)
+
+var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
+ 0: DrandMainnet,
+}
+
+const BootstrappersFile = "butterflynet.pi"
+const GenesisFile = "butterflynet.car"
+
+const UpgradeBreezeHeight = -1
+const BreezeGasTampingDuration = 120
+const UpgradeSmokeHeight = -2
+const UpgradeIgnitionHeight = -3
+const UpgradeRefuelHeight = -4
+
+var UpgradeAssemblyHeight = abi.ChainEpoch(30)
+
+const UpgradeTapeHeight = 60
+const UpgradeLiftoffHeight = -5
+const UpgradeKumquatHeight = 90
+const UpgradeCalicoHeight = 120
+const UpgradePersianHeight = 150
+const UpgradeClausHeight = 180
+const UpgradeOrangeHeight = 210
+const UpgradeTrustHeight = 240
+const UpgradeNorwegianHeight = UpgradeTrustHeight + (builtin2.EpochsInHour * 12)
+const UpgradeTurboHeight = 8922
+const UpgradeHyperdriveHeight = 9999999
+
+func init() {
+ policy.SetConsensusMinerMinPower(abi.NewStoragePower(2 << 30))
+ policy.SetSupportedProofTypes(
+ abi.RegisteredSealProof_StackedDrg512MiBV1,
+ )
+
+ SetAddressNetwork(address.Testnet)
+
+ Devnet = true
+}
+
+const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
+
+const PropagationDelaySecs = uint64(6)
+
+// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
+const BootstrapPeerThreshold = 2
+
+var WhitelistedBlock = cid.Undef
diff --git a/build/params_calibnet.go b/build/params_calibnet.go
new file mode 100644
index 00000000000..df334a516a7
--- /dev/null
+++ b/build/params_calibnet.go
@@ -0,0 +1,72 @@
+// +build calibnet
+
+package build
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/lotus/chain/actors/policy"
+ builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+ "github.com/ipfs/go-cid"
+)
+
+var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
+ 0: DrandMainnet,
+}
+
+const BootstrappersFile = "calibnet.pi"
+const GenesisFile = "calibnet.car"
+
+const UpgradeBreezeHeight = -1
+const BreezeGasTampingDuration = 120
+
+const UpgradeSmokeHeight = -2
+
+const UpgradeIgnitionHeight = -3
+const UpgradeRefuelHeight = -4
+
+var UpgradeAssemblyHeight = abi.ChainEpoch(30)
+
+const UpgradeTapeHeight = 60
+
+const UpgradeLiftoffHeight = -5
+
+const UpgradeKumquatHeight = 90
+
+const UpgradeCalicoHeight = 120
+const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 1)
+
+const UpgradeClausHeight = 270
+
+const UpgradeOrangeHeight = 300
+
+const UpgradeTrustHeight = 330
+
+const UpgradeNorwegianHeight = 360
+
+const UpgradeTurboHeight = 390
+
+const UpgradeHyperdriveHeight = 420
+
+func init() {
+ policy.SetConsensusMinerMinPower(abi.NewStoragePower(32 << 30))
+ policy.SetSupportedProofTypes(
+ abi.RegisteredSealProof_StackedDrg32GiBV1,
+ abi.RegisteredSealProof_StackedDrg64GiBV1,
+ )
+
+ SetAddressNetwork(address.Testnet)
+
+ Devnet = true
+
+ BuildType = BuildCalibnet
+}
+
+const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
+
+const PropagationDelaySecs = uint64(6)
+
+// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
+const BootstrapPeerThreshold = 4
+
+var WhitelistedBlock = cid.Undef
diff --git a/build/params_interop.go b/build/params_interop.go
new file mode 100644
index 00000000000..73cc1c7d9ca
--- /dev/null
+++ b/build/params_interop.go
@@ -0,0 +1,104 @@
+// +build interopnet
+
+package build
+
+import (
+ "os"
+ "strconv"
+
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+
+ "github.com/filecoin-project/lotus/chain/actors/policy"
+)
+
+const BootstrappersFile = "interopnet.pi"
+const GenesisFile = "interopnet.car"
+
+var UpgradeBreezeHeight = abi.ChainEpoch(-1)
+
+const BreezeGasTampingDuration = 0
+
+var UpgradeSmokeHeight = abi.ChainEpoch(-1)
+var UpgradeIgnitionHeight = abi.ChainEpoch(-2)
+var UpgradeRefuelHeight = abi.ChainEpoch(-3)
+var UpgradeTapeHeight = abi.ChainEpoch(-4)
+
+var UpgradeAssemblyHeight = abi.ChainEpoch(-5)
+var UpgradeLiftoffHeight = abi.ChainEpoch(-6)
+
+var UpgradeKumquatHeight = abi.ChainEpoch(-7)
+var UpgradeCalicoHeight = abi.ChainEpoch(-8)
+var UpgradePersianHeight = abi.ChainEpoch(-9)
+var UpgradeOrangeHeight = abi.ChainEpoch(-10)
+var UpgradeClausHeight = abi.ChainEpoch(-11)
+
+var UpgradeTrustHeight = abi.ChainEpoch(-12)
+
+var UpgradeNorwegianHeight = abi.ChainEpoch(-13)
+
+var UpgradeTurboHeight = abi.ChainEpoch(-14)
+
+var UpgradeHyperdriveHeight = abi.ChainEpoch(-15)
+
+var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
+ 0: DrandMainnet,
+}
+
+func init() {
+ policy.SetSupportedProofTypes(
+ abi.RegisteredSealProof_StackedDrg2KiBV1,
+ abi.RegisteredSealProof_StackedDrg8MiBV1,
+ abi.RegisteredSealProof_StackedDrg512MiBV1,
+ )
+ policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
+ policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
+ policy.SetPreCommitChallengeDelay(abi.ChainEpoch(10))
+
+ getUpgradeHeight := func(ev string, def abi.ChainEpoch) abi.ChainEpoch {
+ hs, found := os.LookupEnv(ev)
+ if found {
+ h, err := strconv.Atoi(hs)
+ if err != nil {
+ log.Panicf("failed to parse %s env var", ev)
+ }
+
+ return abi.ChainEpoch(h)
+ }
+
+ return def
+ }
+
+ UpgradeBreezeHeight = getUpgradeHeight("LOTUS_BREEZE_HEIGHT", UpgradeBreezeHeight)
+ UpgradeSmokeHeight = getUpgradeHeight("LOTUS_SMOKE_HEIGHT", UpgradeSmokeHeight)
+ UpgradeIgnitionHeight = getUpgradeHeight("LOTUS_IGNITION_HEIGHT", UpgradeIgnitionHeight)
+ UpgradeRefuelHeight = getUpgradeHeight("LOTUS_REFUEL_HEIGHT", UpgradeRefuelHeight)
+ UpgradeTapeHeight = getUpgradeHeight("LOTUS_TAPE_HEIGHT", UpgradeTapeHeight)
+ UpgradeAssemblyHeight = getUpgradeHeight("LOTUS_ACTORSV2_HEIGHT", UpgradeAssemblyHeight)
+ UpgradeLiftoffHeight = getUpgradeHeight("LOTUS_LIFTOFF_HEIGHT", UpgradeLiftoffHeight)
+ UpgradeKumquatHeight = getUpgradeHeight("LOTUS_KUMQUAT_HEIGHT", UpgradeKumquatHeight)
+ UpgradeCalicoHeight = getUpgradeHeight("LOTUS_CALICO_HEIGHT", UpgradeCalicoHeight)
+ UpgradePersianHeight = getUpgradeHeight("LOTUS_PERSIAN_HEIGHT", UpgradePersianHeight)
+ UpgradeOrangeHeight = getUpgradeHeight("LOTUS_ORANGE_HEIGHT", UpgradeOrangeHeight)
+ UpgradeClausHeight = getUpgradeHeight("LOTUS_CLAUS_HEIGHT", UpgradeClausHeight)
+ UpgradeTrustHeight = getUpgradeHeight("LOTUS_ACTORSV3_HEIGHT", UpgradeTrustHeight)
+ UpgradeNorwegianHeight = getUpgradeHeight("LOTUS_NORWEGIAN_HEIGHT", UpgradeNorwegianHeight)
+ UpgradeTurboHeight = getUpgradeHeight("LOTUS_ACTORSV4_HEIGHT", UpgradeTurboHeight)
+ UpgradeHyperdriveHeight = getUpgradeHeight("LOTUS_HYPERDRIVE_HEIGHT", UpgradeHyperdriveHeight)
+
+ BuildType |= BuildInteropnet
+ SetAddressNetwork(address.Testnet)
+ Devnet = true
+}
+
+const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
+
+const PropagationDelaySecs = uint64(6)
+
+// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
+const BootstrapPeerThreshold = 2
+
+var WhitelistedBlock = cid.Undef
diff --git a/build/params_mainnet.go b/build/params_mainnet.go
index 94deedfec33..1c9b6946267 100644
--- a/build/params_mainnet.go
+++ b/build/params_mainnet.go
@@ -1,6 +1,10 @@
// +build !debug
// +build !2k
// +build !testground
+// +build !calibnet
+// +build !nerpanet
+// +build !butterflynet
+// +build !interopnet
package build
@@ -10,8 +14,6 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
- "github.com/filecoin-project/lotus/chain/actors/policy"
-
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
)
@@ -20,7 +22,11 @@ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
UpgradeSmokeHeight: DrandMainnet,
}
+const BootstrappersFile = "mainnet.pi"
+const GenesisFile = "mainnet.car"
+
const UpgradeBreezeHeight = 41280
+
const BreezeGasTampingDuration = 120
const UpgradeSmokeHeight = 51000
@@ -28,7 +34,7 @@ const UpgradeSmokeHeight = 51000
const UpgradeIgnitionHeight = 94000
const UpgradeRefuelHeight = 130800
-var UpgradeActorsV2Height = abi.ChainEpoch(138720)
+const UpgradeAssemblyHeight = 138720
const UpgradeTapeHeight = 140760
@@ -39,24 +45,46 @@ const UpgradeLiftoffHeight = 148888
const UpgradeKumquatHeight = 170000
-func init() {
- policy.SetConsensusMinerMinPower(abi.NewStoragePower(10 << 40))
- policy.SetSupportedProofTypes(
- abi.RegisteredSealProof_StackedDrg32GiBV1,
- abi.RegisteredSealProof_StackedDrg64GiBV1,
- )
+const UpgradeCalicoHeight = 265200
+const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 60)
+
+const UpgradeOrangeHeight = 336458
+
+// 2020-12-22T02:00:00Z
+var UpgradeClausHeight = abi.ChainEpoch(343200)
+
+// 2021-03-04T00:00:30Z
+const UpgradeTrustHeight = 550321
+// 2021-04-12T22:00:00Z
+const UpgradeNorwegianHeight = 665280
+
+// 2021-04-29T06:00:00Z
+const UpgradeTurboHeight = 712320
+
+// 2021-06-30T22:00:00Z
+var UpgradeHyperdriveHeight = abi.ChainEpoch(892800)
+
+func init() {
if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" {
SetAddressNetwork(address.Mainnet)
}
- if os.Getenv("LOTUS_DISABLE_V2_ACTOR_MIGRATION") == "1" {
- UpgradeActorsV2Height = math.MaxInt64
+ if os.Getenv("LOTUS_DISABLE_HYPERDRIVE") == "1" {
+ UpgradeHyperdriveHeight = math.MaxInt64
}
Devnet = false
+
+ BuildType = BuildMainnet
}
const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
const PropagationDelaySecs = uint64(6)
+
+// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
+const BootstrapPeerThreshold = 4
+
+// we skip checks on message validity in this block to sidestep the zero-bls signature
+var WhitelistedBlock = MustParseCid("bafy2bzaceapyg2uyzk7vueh3xccxkuwbz3nxewjyguoxvhx77malc2lzn2ybi")
diff --git a/build/params_nerpanet.go b/build/params_nerpanet.go
new file mode 100644
index 00000000000..6663a91628a
--- /dev/null
+++ b/build/params_nerpanet.go
@@ -0,0 +1,78 @@
+// +build nerpanet
+
+package build
+
+import (
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/lotus/chain/actors/policy"
+ "github.com/ipfs/go-cid"
+
+ builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+)
+
+var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
+ 0: DrandMainnet,
+}
+
+const BootstrappersFile = "nerpanet.pi"
+const GenesisFile = "nerpanet.car"
+
+const UpgradeBreezeHeight = -1
+const BreezeGasTampingDuration = 0
+
+const UpgradeSmokeHeight = -1
+
+const UpgradeIgnitionHeight = -2
+const UpgradeRefuelHeight = -3
+
+const UpgradeLiftoffHeight = -5
+
+const UpgradeAssemblyHeight = 30 // critical: the network can bootstrap from v1 only
+const UpgradeTapeHeight = 60
+
+const UpgradeKumquatHeight = 90
+
+const UpgradeCalicoHeight = 100
+const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 1)
+
+const UpgradeClausHeight = 250
+
+const UpgradeOrangeHeight = 300
+
+const UpgradeTrustHeight = 600
+const UpgradeNorwegianHeight = 201000
+const UpgradeTurboHeight = 203000
+const UpgradeHyperdriveHeight = 999999999
+
+func init() {
+ // Minimum block production power is set to 4 TiB
+ // Rationale is to discourage small-scale miners from trying to take over the network
+ // One needs to invest in ~2.3x the compute to break consensus, making it not worth it
+ //
+ // DOWNSIDE: the fake-seals need to be kept alive/protected, otherwise network will seize
+ //
+ policy.SetConsensusMinerMinPower(abi.NewStoragePower(4 << 40))
+
+ policy.SetSupportedProofTypes(
+ abi.RegisteredSealProof_StackedDrg512MiBV1,
+ abi.RegisteredSealProof_StackedDrg32GiBV1,
+ abi.RegisteredSealProof_StackedDrg64GiBV1,
+ )
+
+ // Lower the most time-consuming parts of PoRep
+ policy.SetPreCommitChallengeDelay(10)
+
+ // TODO - make this a variable
+ //miner.WPoStChallengeLookback = abi.ChainEpoch(2)
+
+ Devnet = false
+}
+
+const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
+
+const PropagationDelaySecs = uint64(6)
+
+// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
+const BootstrapPeerThreshold = 4
+
+var WhitelistedBlock = cid.Undef
diff --git a/build/params_shared_funcs.go b/build/params_shared_funcs.go
index 77fd9256d44..f59fee653e9 100644
--- a/build/params_shared_funcs.go
+++ b/build/params_shared_funcs.go
@@ -2,6 +2,7 @@ package build
import (
"github.com/filecoin-project/go-address"
+ "github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p-core/protocol"
@@ -19,3 +20,21 @@ func DhtProtocolName(netName dtypes.NetworkName) protocol.ID {
func SetAddressNetwork(n address.Network) {
address.CurrentNetwork = n
}
+
+func MustParseAddress(addr string) address.Address {
+ ret, err := address.NewFromString(addr)
+ if err != nil {
+ panic(err)
+ }
+
+ return ret
+}
+
+func MustParseCid(c string) cid.Cid {
+ ret, err := cid.Decode(c)
+ if err != nil {
+ panic(err)
+ }
+
+ return ret
+}
diff --git a/build/params_shared_vals.go b/build/params_shared_vals.go
index 5070777bd6f..e4240ccce12 100644
--- a/build/params_shared_vals.go
+++ b/build/params_shared_vals.go
@@ -25,7 +25,7 @@ const UnixfsLinksPerLevel = 1024
// Consensus / Network
const AllowableClockDriftSecs = uint64(1)
-const NewestNetworkVersion = network.Version6
+const NewestNetworkVersion = network.Version13
const ActorUpgradeNetworkVersion = network.Version4
// Epochs
@@ -61,6 +61,9 @@ const TicketRandomnessLookback = abi.ChainEpoch(1)
const AddressMainnetEnvVar = "_mainnet_"
+// the 'f' prefix doesn't matter
+var ZeroAddress = MustParseAddress("f3yaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaby2smx7a")
+
// /////
// Devnet settings
@@ -115,5 +118,5 @@ const PackingEfficiencyNum = 4
const PackingEfficiencyDenom = 5
// Actor consts
-// TODO: Pull from actors when its made not private
-var MinDealDuration = abi.ChainEpoch(180 * builtin2.EpochsInDay)
+// TODO: pieceSize unused from actors
+var MinDealDuration, MaxDealDuration = policy.DealDurationBounds(0)
diff --git a/build/params_testground.go b/build/params_testground.go
index d9893a5f5ea..252d23e759e 100644
--- a/build/params_testground.go
+++ b/build/params_testground.go
@@ -12,6 +12,7 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/network"
+ "github.com/ipfs/go-cid"
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
@@ -72,8 +73,8 @@ var (
}()
// Actor consts
- // TODO: Pull from actors when its made not private
- MinDealDuration = abi.ChainEpoch(180 * builtin2.EpochsInDay)
+ // TODO: pieceSize unused from actors
+ MinDealDuration, MaxDealDuration = policy.DealDurationBounds(0)
PackingEfficiencyNum int64 = 4
PackingEfficiencyDenom int64 = 5
@@ -81,20 +82,35 @@ var (
UpgradeBreezeHeight abi.ChainEpoch = -1
BreezeGasTampingDuration abi.ChainEpoch = 0
- UpgradeSmokeHeight abi.ChainEpoch = -1
- UpgradeIgnitionHeight abi.ChainEpoch = -2
- UpgradeRefuelHeight abi.ChainEpoch = -3
- UpgradeTapeHeight abi.ChainEpoch = -4
- UpgradeActorsV2Height abi.ChainEpoch = 10
- UpgradeLiftoffHeight abi.ChainEpoch = -5
- UpgradeKumquatHeight abi.ChainEpoch = -6
+ UpgradeSmokeHeight abi.ChainEpoch = -1
+ UpgradeIgnitionHeight abi.ChainEpoch = -2
+ UpgradeRefuelHeight abi.ChainEpoch = -3
+ UpgradeTapeHeight abi.ChainEpoch = -4
+ UpgradeAssemblyHeight abi.ChainEpoch = 10
+ UpgradeLiftoffHeight abi.ChainEpoch = -5
+ UpgradeKumquatHeight abi.ChainEpoch = -6
+ UpgradeCalicoHeight abi.ChainEpoch = -7
+ UpgradePersianHeight abi.ChainEpoch = -8
+ UpgradeOrangeHeight abi.ChainEpoch = -9
+ UpgradeClausHeight abi.ChainEpoch = -10
+ UpgradeTrustHeight abi.ChainEpoch = -11
+ UpgradeNorwegianHeight abi.ChainEpoch = -12
+ UpgradeTurboHeight abi.ChainEpoch = -13
+ UpgradeHyperdriveHeight abi.ChainEpoch = -13
DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet,
}
- NewestNetworkVersion = network.Version5
+ NewestNetworkVersion = network.Version11
ActorUpgradeNetworkVersion = network.Version4
- Devnet = true
+ Devnet = true
+ ZeroAddress = MustParseAddress("f3yaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaby2smx7a")
+
+ WhitelistedBlock = cid.Undef
+ BootstrappersFile = ""
+ GenesisFile = ""
)
+
+const BootstrapPeerThreshold = 1
diff --git a/build/proof-params/srs-inner-product.json b/build/proof-params/srs-inner-product.json
new file mode 100644
index 00000000000..8566bf5fd89
--- /dev/null
+++ b/build/proof-params/srs-inner-product.json
@@ -0,0 +1,7 @@
+{
+ "v28-fil-inner-product-v1.srs": {
+ "cid": "Qmdq44DjcQnFfU3PJcdX7J49GCqcUYszr1TxMbHtAkvQ3g",
+ "digest": "ae20310138f5ba81451d723f858e3797",
+ "sector_size": 0
+ }
+}
diff --git a/build/tools.go b/build/tools.go
index 638c335a669..57b6e7d1f36 100644
--- a/build/tools.go
+++ b/build/tools.go
@@ -3,5 +3,8 @@
package build
import (
+ _ "github.com/GeertJohan/go.rice/rice"
+ _ "github.com/golang/mock/mockgen"
_ "github.com/whyrusleeping/bencher"
+ _ "golang.org/x/tools/cmd/stringer"
)
diff --git a/build/version.go b/build/version.go
index 80977f2f147..c6a1be3e2dc 100644
--- a/build/version.go
+++ b/build/version.go
@@ -1,100 +1,45 @@
package build
-import (
- "fmt"
-
- "golang.org/x/xerrors"
-)
+import "os"
var CurrentCommit string
var BuildType int
const (
- BuildDefault = 0
- Build2k = 0x1
- BuildDebug = 0x3
+ BuildDefault = 0
+ BuildMainnet = 0x1
+ Build2k = 0x2
+ BuildDebug = 0x3
+ BuildCalibnet = 0x4
+ BuildInteropnet = 0x5
)
func buildType() string {
switch BuildType {
case BuildDefault:
return ""
- case BuildDebug:
- return "+debug"
+ case BuildMainnet:
+ return "+mainnet"
case Build2k:
return "+2k"
+ case BuildDebug:
+ return "+debug"
+ case BuildCalibnet:
+ return "+calibnet"
+ case BuildInteropnet:
+ return "+interopnet"
default:
return "+huh?"
}
}
// BuildVersion is the local build version, set by build system
-const BuildVersion = "1.1.2"
+const BuildVersion = "1.11.1-dev"
func UserVersion() string {
- return BuildVersion + buildType() + CurrentCommit
-}
-
-type Version uint32
-
-func newVer(major, minor, patch uint8) Version {
- return Version(uint32(major)<<16 | uint32(minor)<<8 | uint32(patch))
-}
-
-// Ints returns (major, minor, patch) versions
-func (ve Version) Ints() (uint32, uint32, uint32) {
- v := uint32(ve)
- return (v & majorOnlyMask) >> 16, (v & minorOnlyMask) >> 8, v & patchOnlyMask
-}
-
-func (ve Version) String() string {
- vmj, vmi, vp := ve.Ints()
- return fmt.Sprintf("%d.%d.%d", vmj, vmi, vp)
-}
-
-func (ve Version) EqMajorMinor(v2 Version) bool {
- return ve&minorMask == v2&minorMask
-}
-
-type NodeType int
-
-const (
- NodeUnknown NodeType = iota
-
- NodeFull
- NodeMiner
- NodeWorker
-)
-
-var RunningNodeType NodeType
-
-func VersionForType(nodeType NodeType) (Version, error) {
- switch nodeType {
- case NodeFull:
- return FullAPIVersion, nil
- case NodeMiner:
- return MinerAPIVersion, nil
- case NodeWorker:
- return WorkerAPIVersion, nil
- default:
- return Version(0), xerrors.Errorf("unknown node type %d", nodeType)
+ if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
+ return BuildVersion
}
-}
-
-// semver versions of the rpc api exposed
-var (
- FullAPIVersion = newVer(0, 17, 0)
- MinerAPIVersion = newVer(0, 17, 0)
- WorkerAPIVersion = newVer(0, 16, 0)
-)
-
-//nolint:varcheck,deadcode
-const (
- majorMask = 0xff0000
- minorMask = 0xffff00
- patchMask = 0xffffff
- majorOnlyMask = 0xff0000
- minorOnlyMask = 0x00ff00
- patchOnlyMask = 0x0000ff
-)
+ return BuildVersion + buildType() + CurrentCommit
+}
diff --git a/chain/actors/adt/adt.go b/chain/actors/adt/adt.go
index 6a454ac2657..084471bb8ff 100644
--- a/chain/actors/adt/adt.go
+++ b/chain/actors/adt/adt.go
@@ -2,16 +2,9 @@ package adt
import (
"github.com/ipfs/go-cid"
- "golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/cbor"
- "github.com/filecoin-project/go-state-types/network"
-
- "github.com/filecoin-project/lotus/chain/actors"
-
- adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
- adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt"
)
type Map interface {
@@ -24,26 +17,6 @@ type Map interface {
ForEach(v cbor.Unmarshaler, fn func(key string) error) error
}
-func AsMap(store Store, root cid.Cid, version actors.Version) (Map, error) {
- switch version {
- case actors.Version0:
- return adt0.AsMap(store, root)
- case actors.Version2:
- return adt2.AsMap(store, root)
- }
- return nil, xerrors.Errorf("unknown network version: %d", version)
-}
-
-func NewMap(store Store, version actors.Version) (Map, error) {
- switch version {
- case actors.Version0:
- return adt0.MakeEmptyMap(store), nil
- case actors.Version2:
- return adt2.MakeEmptyMap(store), nil
- }
- return nil, xerrors.Errorf("unknown network version: %d", version)
-}
-
type Array interface {
Root() (cid.Cid, error)
@@ -54,23 +27,3 @@ type Array interface {
ForEach(v cbor.Unmarshaler, fn func(idx int64) error) error
}
-
-func AsArray(store Store, root cid.Cid, version network.Version) (Array, error) {
- switch actors.VersionForNetwork(version) {
- case actors.Version0:
- return adt0.AsArray(store, root)
- case actors.Version2:
- return adt2.AsArray(store, root)
- }
- return nil, xerrors.Errorf("unknown network version: %d", version)
-}
-
-func NewArray(store Store, version actors.Version) (Array, error) {
- switch version {
- case actors.Version0:
- return adt0.MakeEmptyArray(store), nil
- case actors.Version2:
- return adt2.MakeEmptyArray(store), nil
- }
- return nil, xerrors.Errorf("unknown network version: %d", version)
-}
diff --git a/chain/actors/adt/diff_adt_test.go b/chain/actors/adt/diff_adt_test.go
index a187c9f3568..b0e01b78d31 100644
--- a/chain/actors/adt/diff_adt_test.go
+++ b/chain/actors/adt/diff_adt_test.go
@@ -16,7 +16,7 @@ import (
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt"
- bstore "github.com/filecoin-project/lotus/lib/blockstore"
+ bstore "github.com/filecoin-project/lotus/blockstore"
)
func TestDiffAdtArray(t *testing.T) {
@@ -295,7 +295,7 @@ func (t *TestDiffArray) Remove(key uint64, val *typegen.Deferred) error {
func newContextStore() Store {
ctx := context.Background()
- bs := bstore.NewTemporarySync()
+ bs := bstore.NewMemorySync()
store := cbornode.NewCborStore(bs)
return WrapStore(ctx, store)
}
diff --git a/chain/actors/agen/main.go b/chain/actors/agen/main.go
new file mode 100644
index 00000000000..9a3b8fd20f8
--- /dev/null
+++ b/chain/actors/agen/main.go
@@ -0,0 +1,224 @@
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "text/template"
+
+ lotusactors "github.com/filecoin-project/lotus/chain/actors"
+
+ "golang.org/x/xerrors"
+)
+
+var actors = map[string][]int{
+ "account": lotusactors.Versions,
+ "cron": lotusactors.Versions,
+ "init": lotusactors.Versions,
+ "market": lotusactors.Versions,
+ "miner": lotusactors.Versions,
+ "multisig": lotusactors.Versions,
+ "paych": lotusactors.Versions,
+ "power": lotusactors.Versions,
+ "system": lotusactors.Versions,
+ "reward": lotusactors.Versions,
+ "verifreg": lotusactors.Versions,
+}
+
+func main() {
+ if err := generateAdapters(); err != nil {
+ fmt.Println(err)
+ return
+ }
+
+ if err := generatePolicy("chain/actors/policy/policy.go"); err != nil {
+ fmt.Println(err)
+ return
+ }
+
+ if err := generateBuiltin("chain/actors/builtin/builtin.go"); err != nil {
+ fmt.Println(err)
+ return
+ }
+}
+
+func generateAdapters() error {
+ for act, versions := range actors {
+ actDir := filepath.Join("chain/actors/builtin", act)
+
+ if err := generateState(actDir); err != nil {
+ return err
+ }
+
+ if err := generateMessages(actDir); err != nil {
+ return err
+ }
+
+ {
+ af, err := ioutil.ReadFile(filepath.Join(actDir, "actor.go.template"))
+ if err != nil {
+ return xerrors.Errorf("loading actor template: %w", err)
+ }
+
+ tpl := template.Must(template.New("").Funcs(template.FuncMap{
+ "import": func(v int) string { return getVersionImports()[v] },
+ }).Parse(string(af)))
+
+ var b bytes.Buffer
+
+ err = tpl.Execute(&b, map[string]interface{}{
+ "versions": versions,
+ "latestVersion": lotusactors.LatestVersion,
+ })
+ if err != nil {
+ return err
+ }
+
+ if err := ioutil.WriteFile(filepath.Join(actDir, fmt.Sprintf("%s.go", act)), b.Bytes(), 0666); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func generateState(actDir string) error {
+ af, err := ioutil.ReadFile(filepath.Join(actDir, "state.go.template"))
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil // skip
+ }
+
+ return xerrors.Errorf("loading state adapter template: %w", err)
+ }
+
+ for _, version := range lotusactors.Versions {
+ tpl := template.Must(template.New("").Funcs(template.FuncMap{}).Parse(string(af)))
+
+ var b bytes.Buffer
+
+ err := tpl.Execute(&b, map[string]interface{}{
+ "v": version,
+ "import": getVersionImports()[version],
+ })
+ if err != nil {
+ return err
+ }
+
+ if err := ioutil.WriteFile(filepath.Join(actDir, fmt.Sprintf("v%d.go", version)), b.Bytes(), 0666); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func generateMessages(actDir string) error {
+ af, err := ioutil.ReadFile(filepath.Join(actDir, "message.go.template"))
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil // skip
+ }
+
+ return xerrors.Errorf("loading message adapter template: %w", err)
+ }
+
+ for _, version := range lotusactors.Versions {
+ tpl := template.Must(template.New("").Funcs(template.FuncMap{}).Parse(string(af)))
+
+ var b bytes.Buffer
+
+ err := tpl.Execute(&b, map[string]interface{}{
+ "v": version,
+ "import": getVersionImports()[version],
+ })
+ if err != nil {
+ return err
+ }
+
+ if err := ioutil.WriteFile(filepath.Join(actDir, fmt.Sprintf("message%d.go", version)), b.Bytes(), 0666); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func generatePolicy(policyPath string) error {
+
+ pf, err := ioutil.ReadFile(policyPath + ".template")
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil // skip
+ }
+
+ return xerrors.Errorf("loading policy template file: %w", err)
+ }
+
+ tpl := template.Must(template.New("").Funcs(template.FuncMap{
+ "import": func(v int) string { return getVersionImports()[v] },
+ }).Parse(string(pf)))
+ var b bytes.Buffer
+
+ err = tpl.Execute(&b, map[string]interface{}{
+ "versions": lotusactors.Versions,
+ "latestVersion": lotusactors.LatestVersion,
+ })
+ if err != nil {
+ return err
+ }
+
+ if err := ioutil.WriteFile(policyPath, b.Bytes(), 0666); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func generateBuiltin(builtinPath string) error {
+
+ bf, err := ioutil.ReadFile(builtinPath + ".template")
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil // skip
+ }
+
+ return xerrors.Errorf("loading builtin template file: %w", err)
+ }
+
+ tpl := template.Must(template.New("").Funcs(template.FuncMap{
+ "import": func(v int) string { return getVersionImports()[v] },
+ }).Parse(string(bf)))
+ var b bytes.Buffer
+
+ err = tpl.Execute(&b, map[string]interface{}{
+ "versions": lotusactors.Versions,
+ "latestVersion": lotusactors.LatestVersion,
+ })
+ if err != nil {
+ return err
+ }
+
+ if err := ioutil.WriteFile(builtinPath, b.Bytes(), 0666); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func getVersionImports() map[int]string {
+ versionImports := make(map[int]string, lotusactors.LatestVersion)
+ for _, v := range lotusactors.Versions {
+ if v == 0 {
+ versionImports[v] = "/"
+ } else {
+ versionImports[v] = "/v" + strconv.Itoa(v) + "/"
+ }
+ }
+
+ return versionImports
+}
diff --git a/chain/actors/builtin/account/account.go b/chain/actors/builtin/account/account.go
index 38ed2654b7b..04c82b340f4 100644
--- a/chain/actors/builtin/account/account.go
+++ b/chain/actors/builtin/account/account.go
@@ -1,6 +1,7 @@
package account
import (
+ "github.com/filecoin-project/lotus/chain/actors"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
@@ -12,32 +13,111 @@ import (
"github.com/filecoin-project/lotus/chain/types"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+
+ builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
+
+ builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
+
+ builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
)
func init() {
+
builtin.RegisterActorState(builtin0.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load0(store, root)
})
+
builtin.RegisterActorState(builtin2.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load2(store, root)
})
+
+ builtin.RegisterActorState(builtin3.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load3(store, root)
+ })
+
+ builtin.RegisterActorState(builtin4.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load4(store, root)
+ })
+
+ builtin.RegisterActorState(builtin5.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load5(store, root)
+ })
}
-var Methods = builtin2.MethodsAccount
+var Methods = builtin4.MethodsAccount
func Load(store adt.Store, act *types.Actor) (State, error) {
switch act.Code {
+
case builtin0.AccountActorCodeID:
return load0(store, act.Head)
+
case builtin2.AccountActorCodeID:
return load2(store, act.Head)
+
+ case builtin3.AccountActorCodeID:
+ return load3(store, act.Head)
+
+ case builtin4.AccountActorCodeID:
+ return load4(store, act.Head)
+
+ case builtin5.AccountActorCodeID:
+ return load5(store, act.Head)
+
}
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
}
+func MakeState(store adt.Store, av actors.Version, addr address.Address) (State, error) {
+ switch av {
+
+ case actors.Version0:
+ return make0(store, addr)
+
+ case actors.Version2:
+ return make2(store, addr)
+
+ case actors.Version3:
+ return make3(store, addr)
+
+ case actors.Version4:
+ return make4(store, addr)
+
+ case actors.Version5:
+ return make5(store, addr)
+
+ }
+ return nil, xerrors.Errorf("unknown actor version %d", av)
+}
+
+func GetActorCodeID(av actors.Version) (cid.Cid, error) {
+ switch av {
+
+ case actors.Version0:
+ return builtin0.AccountActorCodeID, nil
+
+ case actors.Version2:
+ return builtin2.AccountActorCodeID, nil
+
+ case actors.Version3:
+ return builtin3.AccountActorCodeID, nil
+
+ case actors.Version4:
+ return builtin4.AccountActorCodeID, nil
+
+ case actors.Version5:
+ return builtin5.AccountActorCodeID, nil
+
+ }
+
+ return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
+}
+
type State interface {
cbor.Marshaler
PubkeyAddress() (address.Address, error)
+ GetState() interface{}
}
diff --git a/chain/actors/builtin/account/actor.go.template b/chain/actors/builtin/account/actor.go.template
new file mode 100644
index 00000000000..53962cc9412
--- /dev/null
+++ b/chain/actors/builtin/account/actor.go.template
@@ -0,0 +1,64 @@
+package account
+
+import (
+ "github.com/filecoin-project/lotus/chain/actors"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/cbor"
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/types"
+{{range .versions}}
+ builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin"
+{{end}}
+)
+
+func init() {
+{{range .versions}}
+ builtin.RegisterActorState(builtin{{.}}.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load{{.}}(store, root)
+ })
+{{end}}}
+
+var Methods = builtin4.MethodsAccount
+
+func Load(store adt.Store, act *types.Actor) (State, error) {
+ switch act.Code {
+{{range .versions}}
+ case builtin{{.}}.AccountActorCodeID:
+ return load{{.}}(store, act.Head)
+{{end}}
+ }
+ return nil, xerrors.Errorf("unknown actor code %s", act.Code)
+}
+
+func MakeState(store adt.Store, av actors.Version, addr address.Address) (State, error) {
+ switch av {
+{{range .versions}}
+ case actors.Version{{.}}:
+ return make{{.}}(store, addr)
+{{end}}
+}
+ return nil, xerrors.Errorf("unknown actor version %d", av)
+}
+
+func GetActorCodeID(av actors.Version) (cid.Cid, error) {
+ switch av {
+{{range .versions}}
+ case actors.Version{{.}}:
+ return builtin{{.}}.AccountActorCodeID, nil
+{{end}}
+ }
+
+ return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
+}
+
+type State interface {
+ cbor.Marshaler
+
+ PubkeyAddress() (address.Address, error)
+ GetState() interface{}
+}
diff --git a/chain/actors/builtin/account/state.go.template b/chain/actors/builtin/account/state.go.template
new file mode 100644
index 00000000000..5be262eceb9
--- /dev/null
+++ b/chain/actors/builtin/account/state.go.template
@@ -0,0 +1,40 @@
+package account
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ account{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/account"
+)
+
+var _ State = (*state{{.v}})(nil)
+
+func load{{.v}}(store adt.Store, root cid.Cid) (State, error) {
+ out := state{{.v}}{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make{{.v}}(store adt.Store, addr address.Address) (State, error) {
+ out := state{{.v}}{store: store}
+ out.State = account{{.v}}.State{Address:addr}
+ return &out, nil
+}
+
+type state{{.v}} struct {
+ account{{.v}}.State
+ store adt.Store
+}
+
+func (s *state{{.v}}) PubkeyAddress() (address.Address, error) {
+ return s.Address, nil
+}
+
+func (s *state{{.v}}) GetState() interface{} {
+ return &s.State
+}
\ No newline at end of file
diff --git a/chain/actors/builtin/account/v0.go b/chain/actors/builtin/account/v0.go
index 67c555c5dcf..bdfca2fd705 100644
--- a/chain/actors/builtin/account/v0.go
+++ b/chain/actors/builtin/account/v0.go
@@ -20,6 +20,12 @@ func load0(store adt.Store, root cid.Cid) (State, error) {
return &out, nil
}
+func make0(store adt.Store, addr address.Address) (State, error) {
+ out := state0{store: store}
+ out.State = account0.State{Address: addr}
+ return &out, nil
+}
+
type state0 struct {
account0.State
store adt.Store
@@ -28,3 +34,7 @@ type state0 struct {
func (s *state0) PubkeyAddress() (address.Address, error) {
return s.Address, nil
}
+
+func (s *state0) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/account/v2.go b/chain/actors/builtin/account/v2.go
index 2664631bc92..66618e06a4e 100644
--- a/chain/actors/builtin/account/v2.go
+++ b/chain/actors/builtin/account/v2.go
@@ -20,6 +20,12 @@ func load2(store adt.Store, root cid.Cid) (State, error) {
return &out, nil
}
+func make2(store adt.Store, addr address.Address) (State, error) {
+ out := state2{store: store}
+ out.State = account2.State{Address: addr}
+ return &out, nil
+}
+
type state2 struct {
account2.State
store adt.Store
@@ -28,3 +34,7 @@ type state2 struct {
func (s *state2) PubkeyAddress() (address.Address, error) {
return s.Address, nil
}
+
+func (s *state2) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/account/v3.go b/chain/actors/builtin/account/v3.go
new file mode 100644
index 00000000000..dbe100a4f83
--- /dev/null
+++ b/chain/actors/builtin/account/v3.go
@@ -0,0 +1,40 @@
+package account
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ account3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/account"
+)
+
+var _ State = (*state3)(nil)
+
+func load3(store adt.Store, root cid.Cid) (State, error) {
+ out := state3{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make3(store adt.Store, addr address.Address) (State, error) {
+ out := state3{store: store}
+ out.State = account3.State{Address: addr}
+ return &out, nil
+}
+
+type state3 struct {
+ account3.State
+ store adt.Store
+}
+
+func (s *state3) PubkeyAddress() (address.Address, error) {
+ return s.Address, nil
+}
+
+func (s *state3) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/account/v4.go b/chain/actors/builtin/account/v4.go
new file mode 100644
index 00000000000..53f71dcc5e9
--- /dev/null
+++ b/chain/actors/builtin/account/v4.go
@@ -0,0 +1,40 @@
+package account
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ account4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/account"
+)
+
+var _ State = (*state4)(nil)
+
+func load4(store adt.Store, root cid.Cid) (State, error) {
+ out := state4{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make4(store adt.Store, addr address.Address) (State, error) {
+ out := state4{store: store}
+ out.State = account4.State{Address: addr}
+ return &out, nil
+}
+
+type state4 struct {
+ account4.State
+ store adt.Store
+}
+
+func (s *state4) PubkeyAddress() (address.Address, error) {
+ return s.Address, nil
+}
+
+func (s *state4) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/account/v5.go b/chain/actors/builtin/account/v5.go
new file mode 100644
index 00000000000..538f5698750
--- /dev/null
+++ b/chain/actors/builtin/account/v5.go
@@ -0,0 +1,40 @@
+package account
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ account5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/account"
+)
+
+var _ State = (*state5)(nil)
+
+func load5(store adt.Store, root cid.Cid) (State, error) {
+ out := state5{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make5(store adt.Store, addr address.Address) (State, error) {
+ out := state5{store: store}
+ out.State = account5.State{Address: addr}
+ return &out, nil
+}
+
+type state5 struct {
+ account5.State
+ store adt.Store
+}
+
+func (s *state5) PubkeyAddress() (address.Address, error) {
+ return s.Address, nil
+}
+
+func (s *state5) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/builtin.go b/chain/actors/builtin/builtin.go
index afba8efe88e..74d6228193b 100644
--- a/chain/actors/builtin/builtin.go
+++ b/chain/actors/builtin/builtin.go
@@ -2,12 +2,23 @@ package builtin
import (
"github.com/filecoin-project/go-address"
- builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
- smoothing2 "github.com/filecoin-project/specs-actors/v2/actors/util/smoothing"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+ smoothing0 "github.com/filecoin-project/specs-actors/actors/util/smoothing"
+
+ builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+ smoothing2 "github.com/filecoin-project/specs-actors/v2/actors/util/smoothing"
+
+ builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
+ smoothing3 "github.com/filecoin-project/specs-actors/v3/actors/util/smoothing"
+
+ builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
+ smoothing4 "github.com/filecoin-project/specs-actors/v4/actors/util/smoothing"
+
+ builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
+ smoothing5 "github.com/filecoin-project/specs-actors/v5/actors/util/smoothing"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/cbor"
@@ -15,49 +26,70 @@ import (
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/types"
- miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- proof0 "github.com/filecoin-project/specs-actors/actors/runtime/proof"
- smoothing0 "github.com/filecoin-project/specs-actors/actors/util/smoothing"
+ miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
+ proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
)
-var SystemActorAddr = builtin0.SystemActorAddr
-var BurntFundsActorAddr = builtin0.BurntFundsActorAddr
-var CronActorAddr = builtin0.CronActorAddr
+var SystemActorAddr = builtin5.SystemActorAddr
+var BurntFundsActorAddr = builtin5.BurntFundsActorAddr
+var CronActorAddr = builtin5.CronActorAddr
var SaftAddress = makeAddress("t0122")
var ReserveAddress = makeAddress("t090")
var RootVerifierAddress = makeAddress("t080")
var (
- ExpectedLeadersPerEpoch = builtin0.ExpectedLeadersPerEpoch
+ ExpectedLeadersPerEpoch = builtin5.ExpectedLeadersPerEpoch
)
const (
- EpochDurationSeconds = builtin0.EpochDurationSeconds
- EpochsInDay = builtin0.EpochsInDay
- SecondsInDay = builtin0.SecondsInDay
+ EpochDurationSeconds = builtin5.EpochDurationSeconds
+ EpochsInDay = builtin5.EpochsInDay
+ SecondsInDay = builtin5.SecondsInDay
)
const (
- MethodSend = builtin2.MethodSend
- MethodConstructor = builtin2.MethodConstructor
+ MethodSend = builtin5.MethodSend
+ MethodConstructor = builtin5.MethodConstructor
)
-// TODO: Why does actors have 2 different versions of this?
-type SectorInfo = proof0.SectorInfo
-type PoStProof = proof0.PoStProof
+// These are all just type aliases across actor versions. In the future, that might change
+// and we might need to do something fancier.
+type SectorInfo = proof5.SectorInfo
+type PoStProof = proof5.PoStProof
type FilterEstimate = smoothing0.FilterEstimate
+func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower {
+ return miner5.QAPowerForWeight(size, duration, dealWeight, verifiedWeight)
+}
+
func FromV0FilterEstimate(v0 smoothing0.FilterEstimate) FilterEstimate {
- return (FilterEstimate)(v0)
+
+ return (FilterEstimate)(v0) //nolint:unconvert
+
}
-// Doesn't change between actors v0 and v1
-func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower {
- return miner0.QAPowerForWeight(size, duration, dealWeight, verifiedWeight)
+func FromV2FilterEstimate(v2 smoothing2.FilterEstimate) FilterEstimate {
+
+ return (FilterEstimate)(v2)
+
+}
+
+func FromV3FilterEstimate(v3 smoothing3.FilterEstimate) FilterEstimate {
+
+ return (FilterEstimate)(v3)
+
}
-func FromV2FilterEstimate(v1 smoothing2.FilterEstimate) FilterEstimate {
- return (FilterEstimate)(v1)
+func FromV4FilterEstimate(v4 smoothing4.FilterEstimate) FilterEstimate {
+
+ return (FilterEstimate)(v4)
+
+}
+
+func FromV5FilterEstimate(v5 smoothing5.FilterEstimate) FilterEstimate {
+
+ return (FilterEstimate)(v5)
+
}
type ActorStateLoader func(store adt.Store, root cid.Cid) (cbor.Marshaler, error)
@@ -78,34 +110,150 @@ func Load(store adt.Store, act *types.Actor) (cbor.Marshaler, error) {
func ActorNameByCode(c cid.Cid) string {
switch {
+
case builtin0.IsBuiltinActor(c):
return builtin0.ActorNameByCode(c)
+
case builtin2.IsBuiltinActor(c):
return builtin2.ActorNameByCode(c)
+
+ case builtin3.IsBuiltinActor(c):
+ return builtin3.ActorNameByCode(c)
+
+ case builtin4.IsBuiltinActor(c):
+ return builtin4.ActorNameByCode(c)
+
+ case builtin5.IsBuiltinActor(c):
+ return builtin5.ActorNameByCode(c)
+
default:
return ""
}
}
func IsBuiltinActor(c cid.Cid) bool {
- return builtin0.IsBuiltinActor(c) || builtin2.IsBuiltinActor(c)
+
+ if builtin0.IsBuiltinActor(c) {
+ return true
+ }
+
+ if builtin2.IsBuiltinActor(c) {
+ return true
+ }
+
+ if builtin3.IsBuiltinActor(c) {
+ return true
+ }
+
+ if builtin4.IsBuiltinActor(c) {
+ return true
+ }
+
+ if builtin5.IsBuiltinActor(c) {
+ return true
+ }
+
+ return false
}
func IsAccountActor(c cid.Cid) bool {
- return c == builtin0.AccountActorCodeID || c == builtin2.AccountActorCodeID
+
+ if c == builtin0.AccountActorCodeID {
+ return true
+ }
+
+ if c == builtin2.AccountActorCodeID {
+ return true
+ }
+
+ if c == builtin3.AccountActorCodeID {
+ return true
+ }
+
+ if c == builtin4.AccountActorCodeID {
+ return true
+ }
+
+ if c == builtin5.AccountActorCodeID {
+ return true
+ }
+
+ return false
}
func IsStorageMinerActor(c cid.Cid) bool {
- return c == builtin0.StorageMinerActorCodeID || c == builtin2.StorageMinerActorCodeID
+
+ if c == builtin0.StorageMinerActorCodeID {
+ return true
+ }
+
+ if c == builtin2.StorageMinerActorCodeID {
+ return true
+ }
+
+ if c == builtin3.StorageMinerActorCodeID {
+ return true
+ }
+
+ if c == builtin4.StorageMinerActorCodeID {
+ return true
+ }
+
+ if c == builtin5.StorageMinerActorCodeID {
+ return true
+ }
+
+ return false
}
func IsMultisigActor(c cid.Cid) bool {
- return c == builtin0.MultisigActorCodeID || c == builtin2.MultisigActorCodeID
+ if c == builtin0.MultisigActorCodeID {
+ return true
+ }
+
+ if c == builtin2.MultisigActorCodeID {
+ return true
+ }
+
+ if c == builtin3.MultisigActorCodeID {
+ return true
+ }
+
+ if c == builtin4.MultisigActorCodeID {
+ return true
+ }
+
+ if c == builtin5.MultisigActorCodeID {
+ return true
+ }
+
+ return false
}
func IsPaymentChannelActor(c cid.Cid) bool {
- return c == builtin0.PaymentChannelActorCodeID || c == builtin2.PaymentChannelActorCodeID
+
+ if c == builtin0.PaymentChannelActorCodeID {
+ return true
+ }
+
+ if c == builtin2.PaymentChannelActorCodeID {
+ return true
+ }
+
+ if c == builtin3.PaymentChannelActorCodeID {
+ return true
+ }
+
+ if c == builtin4.PaymentChannelActorCodeID {
+ return true
+ }
+
+ if c == builtin5.PaymentChannelActorCodeID {
+ return true
+ }
+
+ return false
}
func makeAddress(addr string) address.Address {
diff --git a/chain/actors/builtin/builtin.go.template b/chain/actors/builtin/builtin.go.template
new file mode 100644
index 00000000000..031c05182e4
--- /dev/null
+++ b/chain/actors/builtin/builtin.go.template
@@ -0,0 +1,144 @@
+package builtin
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/ipfs/go-cid"
+ "golang.org/x/xerrors"
+
+ {{range .versions}}
+ builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin"
+ smoothing{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/util/smoothing"
+ {{end}}
+
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/cbor"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/types"
+
+ miner{{.latestVersion}} "github.com/filecoin-project/specs-actors{{import .latestVersion}}actors/builtin/miner"
+ proof{{.latestVersion}} "github.com/filecoin-project/specs-actors{{import .latestVersion}}actors/runtime/proof"
+)
+
+var SystemActorAddr = builtin{{.latestVersion}}.SystemActorAddr
+var BurntFundsActorAddr = builtin{{.latestVersion}}.BurntFundsActorAddr
+var CronActorAddr = builtin{{.latestVersion}}.CronActorAddr
+var SaftAddress = makeAddress("t0122")
+var ReserveAddress = makeAddress("t090")
+var RootVerifierAddress = makeAddress("t080")
+
+var (
+ ExpectedLeadersPerEpoch = builtin{{.latestVersion}}.ExpectedLeadersPerEpoch
+)
+
+const (
+ EpochDurationSeconds = builtin{{.latestVersion}}.EpochDurationSeconds
+ EpochsInDay = builtin{{.latestVersion}}.EpochsInDay
+ SecondsInDay = builtin{{.latestVersion}}.SecondsInDay
+)
+
+const (
+ MethodSend = builtin{{.latestVersion}}.MethodSend
+ MethodConstructor = builtin{{.latestVersion}}.MethodConstructor
+)
+
+// These are all just type aliases across actor versions. In the future, that might change
+// and we might need to do something fancier.
+type SectorInfo = proof{{.latestVersion}}.SectorInfo
+type PoStProof = proof{{.latestVersion}}.PoStProof
+type FilterEstimate = smoothing0.FilterEstimate
+
+func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower {
+ return miner{{.latestVersion}}.QAPowerForWeight(size, duration, dealWeight, verifiedWeight)
+}
+
+{{range .versions}}
+ func FromV{{.}}FilterEstimate(v{{.}} smoothing{{.}}.FilterEstimate) FilterEstimate {
+ {{if (eq . 0)}}
+ return (FilterEstimate)(v{{.}}) //nolint:unconvert
+ {{else}}
+ return (FilterEstimate)(v{{.}})
+ {{end}}
+ }
+{{end}}
+
+type ActorStateLoader func(store adt.Store, root cid.Cid) (cbor.Marshaler, error)
+
+var ActorStateLoaders = make(map[cid.Cid]ActorStateLoader)
+
+func RegisterActorState(code cid.Cid, loader ActorStateLoader) {
+ ActorStateLoaders[code] = loader
+}
+
+func Load(store adt.Store, act *types.Actor) (cbor.Marshaler, error) {
+ loader, found := ActorStateLoaders[act.Code]
+ if !found {
+ return nil, xerrors.Errorf("unknown actor code %s", act.Code)
+ }
+ return loader(store, act.Head)
+}
+
+func ActorNameByCode(c cid.Cid) string {
+ switch {
+ {{range .versions}}
+ case builtin{{.}}.IsBuiltinActor(c):
+ return builtin{{.}}.ActorNameByCode(c)
+ {{end}}
+ default:
+ return ""
+ }
+}
+
+func IsBuiltinActor(c cid.Cid) bool {
+ {{range .versions}}
+ if builtin{{.}}.IsBuiltinActor(c) {
+ return true
+ }
+ {{end}}
+ return false
+}
+
+func IsAccountActor(c cid.Cid) bool {
+ {{range .versions}}
+ if c == builtin{{.}}.AccountActorCodeID {
+ return true
+ }
+ {{end}}
+ return false
+}
+
+func IsStorageMinerActor(c cid.Cid) bool {
+ {{range .versions}}
+ if c == builtin{{.}}.StorageMinerActorCodeID {
+ return true
+ }
+ {{end}}
+ return false
+}
+
+func IsMultisigActor(c cid.Cid) bool {
+ {{range .versions}}
+ if c == builtin{{.}}.MultisigActorCodeID {
+ return true
+ }
+ {{end}}
+ return false
+}
+
+func IsPaymentChannelActor(c cid.Cid) bool {
+ {{range .versions}}
+ if c == builtin{{.}}.PaymentChannelActorCodeID {
+ return true
+ }
+ {{end}}
+ return false
+}
+
+func makeAddress(addr string) address.Address {
+ ret, err := address.NewFromString(addr)
+ if err != nil {
+ panic(err)
+ }
+
+ return ret
+}
diff --git a/chain/actors/builtin/cron/actor.go.template b/chain/actors/builtin/cron/actor.go.template
new file mode 100644
index 00000000000..d7380855632
--- /dev/null
+++ b/chain/actors/builtin/cron/actor.go.template
@@ -0,0 +1,42 @@
+package cron
+
+import (
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "golang.org/x/xerrors"
+ "github.com/ipfs/go-cid"
+{{range .versions}}
+ builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin"
+{{end}}
+)
+
+func MakeState(store adt.Store, av actors.Version) (State, error) {
+ switch av {
+{{range .versions}}
+ case actors.Version{{.}}:
+ return make{{.}}(store)
+{{end}}
+}
+ return nil, xerrors.Errorf("unknown actor version %d", av)
+}
+
+func GetActorCodeID(av actors.Version) (cid.Cid, error) {
+ switch av {
+{{range .versions}}
+ case actors.Version{{.}}:
+ return builtin{{.}}.CronActorCodeID, nil
+{{end}}
+ }
+
+ return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
+}
+
+var (
+ Address = builtin{{.latestVersion}}.CronActorAddr
+ Methods = builtin{{.latestVersion}}.MethodsCron
+)
+
+
+type State interface {
+ GetState() interface{}
+}
diff --git a/chain/actors/builtin/cron/cron.go b/chain/actors/builtin/cron/cron.go
index 65bfd992f75..2275e747f36 100644
--- a/chain/actors/builtin/cron/cron.go
+++ b/chain/actors/builtin/cron/cron.go
@@ -1,10 +1,72 @@
package cron
import (
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/ipfs/go-cid"
+ "golang.org/x/xerrors"
+
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+
+ builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
+
+ builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
+
+ builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
)
+func MakeState(store adt.Store, av actors.Version) (State, error) {
+ switch av {
+
+ case actors.Version0:
+ return make0(store)
+
+ case actors.Version2:
+ return make2(store)
+
+ case actors.Version3:
+ return make3(store)
+
+ case actors.Version4:
+ return make4(store)
+
+ case actors.Version5:
+ return make5(store)
+
+ }
+ return nil, xerrors.Errorf("unknown actor version %d", av)
+}
+
+func GetActorCodeID(av actors.Version) (cid.Cid, error) {
+ switch av {
+
+ case actors.Version0:
+ return builtin0.CronActorCodeID, nil
+
+ case actors.Version2:
+ return builtin2.CronActorCodeID, nil
+
+ case actors.Version3:
+ return builtin3.CronActorCodeID, nil
+
+ case actors.Version4:
+ return builtin4.CronActorCodeID, nil
+
+ case actors.Version5:
+ return builtin5.CronActorCodeID, nil
+
+ }
+
+ return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
+}
+
var (
- Address = builtin2.CronActorAddr
- Methods = builtin2.MethodsCron
+ Address = builtin5.CronActorAddr
+ Methods = builtin5.MethodsCron
)
+
+type State interface {
+ GetState() interface{}
+}
diff --git a/chain/actors/builtin/cron/state.go.template b/chain/actors/builtin/cron/state.go.template
new file mode 100644
index 00000000000..99a06d7f806
--- /dev/null
+++ b/chain/actors/builtin/cron/state.go.template
@@ -0,0 +1,35 @@
+package cron
+
+import (
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ cron{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/cron"
+)
+
+var _ State = (*state{{.v}})(nil)
+
+func load{{.v}}(store adt.Store, root cid.Cid) (State, error) {
+ out := state{{.v}}{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make{{.v}}(store adt.Store) (State, error) {
+ out := state{{.v}}{store: store}
+ out.State = *cron{{.v}}.ConstructState(cron{{.v}}.BuiltInEntries())
+ return &out, nil
+}
+
+type state{{.v}} struct {
+ cron{{.v}}.State
+ store adt.Store
+}
+
+func (s *state{{.v}}) GetState() interface{} {
+ return &s.State
+}
\ No newline at end of file
diff --git a/chain/actors/builtin/cron/v0.go b/chain/actors/builtin/cron/v0.go
new file mode 100644
index 00000000000..6147b858c10
--- /dev/null
+++ b/chain/actors/builtin/cron/v0.go
@@ -0,0 +1,35 @@
+package cron
+
+import (
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ cron0 "github.com/filecoin-project/specs-actors/actors/builtin/cron"
+)
+
+var _ State = (*state0)(nil)
+
+func load0(store adt.Store, root cid.Cid) (State, error) {
+ out := state0{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make0(store adt.Store) (State, error) {
+ out := state0{store: store}
+ out.State = *cron0.ConstructState(cron0.BuiltInEntries())
+ return &out, nil
+}
+
+type state0 struct {
+ cron0.State
+ store adt.Store
+}
+
+func (s *state0) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/cron/v2.go b/chain/actors/builtin/cron/v2.go
new file mode 100644
index 00000000000..51ca179d9ce
--- /dev/null
+++ b/chain/actors/builtin/cron/v2.go
@@ -0,0 +1,35 @@
+package cron
+
+import (
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ cron2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/cron"
+)
+
+var _ State = (*state2)(nil)
+
+func load2(store adt.Store, root cid.Cid) (State, error) {
+ out := state2{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make2(store adt.Store) (State, error) {
+ out := state2{store: store}
+ out.State = *cron2.ConstructState(cron2.BuiltInEntries())
+ return &out, nil
+}
+
+type state2 struct {
+ cron2.State
+ store adt.Store
+}
+
+func (s *state2) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/cron/v3.go b/chain/actors/builtin/cron/v3.go
new file mode 100644
index 00000000000..ff74d511de5
--- /dev/null
+++ b/chain/actors/builtin/cron/v3.go
@@ -0,0 +1,35 @@
+package cron
+
+import (
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ cron3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/cron"
+)
+
+var _ State = (*state3)(nil)
+
+func load3(store adt.Store, root cid.Cid) (State, error) {
+ out := state3{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make3(store adt.Store) (State, error) {
+ out := state3{store: store}
+ out.State = *cron3.ConstructState(cron3.BuiltInEntries())
+ return &out, nil
+}
+
+type state3 struct {
+ cron3.State
+ store adt.Store
+}
+
+func (s *state3) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/cron/v4.go b/chain/actors/builtin/cron/v4.go
new file mode 100644
index 00000000000..1cff8cc2813
--- /dev/null
+++ b/chain/actors/builtin/cron/v4.go
@@ -0,0 +1,35 @@
+package cron
+
+import (
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ cron4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/cron"
+)
+
+var _ State = (*state4)(nil)
+
+func load4(store adt.Store, root cid.Cid) (State, error) {
+ out := state4{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make4(store adt.Store) (State, error) {
+ out := state4{store: store}
+ out.State = *cron4.ConstructState(cron4.BuiltInEntries())
+ return &out, nil
+}
+
+type state4 struct {
+ cron4.State
+ store adt.Store
+}
+
+func (s *state4) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/cron/v5.go b/chain/actors/builtin/cron/v5.go
new file mode 100644
index 00000000000..2bb00dc21da
--- /dev/null
+++ b/chain/actors/builtin/cron/v5.go
@@ -0,0 +1,35 @@
+package cron
+
+import (
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ cron5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/cron"
+)
+
+var _ State = (*state5)(nil)
+
+func load5(store adt.Store, root cid.Cid) (State, error) {
+ out := state5{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make5(store adt.Store) (State, error) {
+ out := state5{store: store}
+ out.State = *cron5.ConstructState(cron5.BuiltInEntries())
+ return &out, nil
+}
+
+type state5 struct {
+ cron5.State
+ store adt.Store
+}
+
+func (s *state5) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/init/actor.go.template b/chain/actors/builtin/init/actor.go.template
new file mode 100644
index 00000000000..f825eb9fa45
--- /dev/null
+++ b/chain/actors/builtin/init/actor.go.template
@@ -0,0 +1,89 @@
+package init
+
+import (
+ "github.com/filecoin-project/lotus/chain/actors"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/cbor"
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/node/modules/dtypes"
+{{range .versions}}
+ builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin"
+{{end}}
+)
+
+func init() {
+{{range .versions}}
+ builtin.RegisterActorState(builtin{{.}}.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load{{.}}(store, root)
+ })
+{{end}}}
+
+var (
+ Address = builtin{{.latestVersion}}.InitActorAddr
+ Methods = builtin{{.latestVersion}}.MethodsInit
+)
+
+func Load(store adt.Store, act *types.Actor) (State, error) {
+ switch act.Code {
+{{range .versions}}
+ case builtin{{.}}.InitActorCodeID:
+ return load{{.}}(store, act.Head)
+{{end}}
+ }
+ return nil, xerrors.Errorf("unknown actor code %s", act.Code)
+}
+
+func MakeState(store adt.Store, av actors.Version, networkName string) (State, error) {
+ switch av {
+{{range .versions}}
+ case actors.Version{{.}}:
+ return make{{.}}(store, networkName)
+{{end}}
+}
+ return nil, xerrors.Errorf("unknown actor version %d", av)
+}
+
+func GetActorCodeID(av actors.Version) (cid.Cid, error) {
+ switch av {
+{{range .versions}}
+ case actors.Version{{.}}:
+ return builtin{{.}}.InitActorCodeID, nil
+{{end}}
+ }
+
+ return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
+}
+
+type State interface {
+ cbor.Marshaler
+
+ ResolveAddress(address address.Address) (address.Address, bool, error)
+ MapAddressToNewID(address address.Address) (address.Address, error)
+ NetworkName() (dtypes.NetworkName, error)
+
+ ForEachActor(func(id abi.ActorID, address address.Address) error) error
+
+ // Remove exists to support tooling that manipulates state for testing.
+ // It should not be used in production code, as init actor entries are
+ // immutable.
+ Remove(addrs ...address.Address) error
+
+ // Sets the network's name. This should only be used on upgrade/fork.
+ SetNetworkName(name string) error
+
+ // Sets the next ID for the init actor. This should only be used for testing.
+ SetNextID(id abi.ActorID) error
+
+ // Sets the address map for the init actor. This should only be used for testing.
+ SetAddressMap(mcid cid.Cid) error
+
+ AddressMap() (adt.Map, error)
+ GetState() interface{}
+}
diff --git a/chain/actors/builtin/init/diff.go b/chain/actors/builtin/init/diff.go
new file mode 100644
index 00000000000..5eb8f3c75b6
--- /dev/null
+++ b/chain/actors/builtin/init/diff.go
@@ -0,0 +1,152 @@
+package init
+
+import (
+ "bytes"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ typegen "github.com/whyrusleeping/cbor-gen"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+)
+
+func DiffAddressMap(pre, cur State) (*AddressMapChanges, error) {
+ prem, err := pre.AddressMap()
+ if err != nil {
+ return nil, err
+ }
+
+ curm, err := cur.AddressMap()
+ if err != nil {
+ return nil, err
+ }
+
+ preRoot, err := prem.Root()
+ if err != nil {
+ return nil, err
+ }
+
+ curRoot, err := curm.Root()
+ if err != nil {
+ return nil, err
+ }
+
+ results := new(AddressMapChanges)
+ // no change.
+ if curRoot.Equals(preRoot) {
+ return results, nil
+ }
+
+ err = adt.DiffAdtMap(prem, curm, &addressMapDiffer{results, pre, cur})
+ if err != nil {
+ return nil, err
+ }
+
+ return results, nil
+}
+
+type addressMapDiffer struct {
+ Results *AddressMapChanges
+ pre, adter State
+}
+
+type AddressMapChanges struct {
+ Added []AddressPair
+ Modified []AddressChange
+ Removed []AddressPair
+}
+
+func (i *addressMapDiffer) AsKey(key string) (abi.Keyer, error) {
+ addr, err := address.NewFromBytes([]byte(key))
+ if err != nil {
+ return nil, err
+ }
+ return abi.AddrKey(addr), nil
+}
+
+func (i *addressMapDiffer) Add(key string, val *typegen.Deferred) error {
+ pkAddr, err := address.NewFromBytes([]byte(key))
+ if err != nil {
+ return err
+ }
+ id := new(typegen.CborInt)
+ if err := id.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ return err
+ }
+ idAddr, err := address.NewIDAddress(uint64(*id))
+ if err != nil {
+ return err
+ }
+ i.Results.Added = append(i.Results.Added, AddressPair{
+ ID: idAddr,
+ PK: pkAddr,
+ })
+ return nil
+}
+
+func (i *addressMapDiffer) Modify(key string, from, to *typegen.Deferred) error {
+ pkAddr, err := address.NewFromBytes([]byte(key))
+ if err != nil {
+ return err
+ }
+
+ fromID := new(typegen.CborInt)
+ if err := fromID.UnmarshalCBOR(bytes.NewReader(from.Raw)); err != nil {
+ return err
+ }
+ fromIDAddr, err := address.NewIDAddress(uint64(*fromID))
+ if err != nil {
+ return err
+ }
+
+ toID := new(typegen.CborInt)
+ if err := toID.UnmarshalCBOR(bytes.NewReader(to.Raw)); err != nil {
+ return err
+ }
+ toIDAddr, err := address.NewIDAddress(uint64(*toID))
+ if err != nil {
+ return err
+ }
+
+ i.Results.Modified = append(i.Results.Modified, AddressChange{
+ From: AddressPair{
+ ID: fromIDAddr,
+ PK: pkAddr,
+ },
+ To: AddressPair{
+ ID: toIDAddr,
+ PK: pkAddr,
+ },
+ })
+ return nil
+}
+
+func (i *addressMapDiffer) Remove(key string, val *typegen.Deferred) error {
+ pkAddr, err := address.NewFromBytes([]byte(key))
+ if err != nil {
+ return err
+ }
+ id := new(typegen.CborInt)
+ if err := id.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ return err
+ }
+ idAddr, err := address.NewIDAddress(uint64(*id))
+ if err != nil {
+ return err
+ }
+ i.Results.Removed = append(i.Results.Removed, AddressPair{
+ ID: idAddr,
+ PK: pkAddr,
+ })
+ return nil
+}
+
+type AddressChange struct {
+ From AddressPair
+ To AddressPair
+}
+
+type AddressPair struct {
+ ID address.Address
+ PK address.Address
+}
diff --git a/chain/actors/builtin/init/init.go b/chain/actors/builtin/init/init.go
index 60dbdf4fea4..e1bd6f3711c 100644
--- a/chain/actors/builtin/init/init.go
+++ b/chain/actors/builtin/init/init.go
@@ -1,6 +1,7 @@
package init
import (
+ "github.com/filecoin-project/lotus/chain/actors"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
@@ -14,33 +15,111 @@ import (
"github.com/filecoin-project/lotus/node/modules/dtypes"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+
+ builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
+
+ builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
+
+ builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
)
func init() {
+
builtin.RegisterActorState(builtin0.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load0(store, root)
})
+
builtin.RegisterActorState(builtin2.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load2(store, root)
})
+
+ builtin.RegisterActorState(builtin3.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load3(store, root)
+ })
+
+ builtin.RegisterActorState(builtin4.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load4(store, root)
+ })
+
+ builtin.RegisterActorState(builtin5.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load5(store, root)
+ })
}
var (
- Address = builtin2.InitActorAddr
- Methods = builtin2.MethodsInit
+ Address = builtin5.InitActorAddr
+ Methods = builtin5.MethodsInit
)
func Load(store adt.Store, act *types.Actor) (State, error) {
switch act.Code {
+
case builtin0.InitActorCodeID:
return load0(store, act.Head)
+
case builtin2.InitActorCodeID:
return load2(store, act.Head)
+
+ case builtin3.InitActorCodeID:
+ return load3(store, act.Head)
+
+ case builtin4.InitActorCodeID:
+ return load4(store, act.Head)
+
+ case builtin5.InitActorCodeID:
+ return load5(store, act.Head)
+
}
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
}
+func MakeState(store adt.Store, av actors.Version, networkName string) (State, error) {
+ switch av {
+
+ case actors.Version0:
+ return make0(store, networkName)
+
+ case actors.Version2:
+ return make2(store, networkName)
+
+ case actors.Version3:
+ return make3(store, networkName)
+
+ case actors.Version4:
+ return make4(store, networkName)
+
+ case actors.Version5:
+ return make5(store, networkName)
+
+ }
+ return nil, xerrors.Errorf("unknown actor version %d", av)
+}
+
+func GetActorCodeID(av actors.Version) (cid.Cid, error) {
+ switch av {
+
+ case actors.Version0:
+ return builtin0.InitActorCodeID, nil
+
+ case actors.Version2:
+ return builtin2.InitActorCodeID, nil
+
+ case actors.Version3:
+ return builtin3.InitActorCodeID, nil
+
+ case actors.Version4:
+ return builtin4.InitActorCodeID, nil
+
+ case actors.Version5:
+ return builtin5.InitActorCodeID, nil
+
+ }
+
+ return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
+}
+
type State interface {
cbor.Marshaler
@@ -57,4 +136,13 @@ type State interface {
// Sets the network's name. This should only be used on upgrade/fork.
SetNetworkName(name string) error
+
+ // Sets the next ID for the init actor. This should only be used for testing.
+ SetNextID(id abi.ActorID) error
+
+ // Sets the address map for the init actor. This should only be used for testing.
+ SetAddressMap(mcid cid.Cid) error
+
+ AddressMap() (adt.Map, error)
+ GetState() interface{}
}
diff --git a/chain/actors/builtin/init/state.go.template b/chain/actors/builtin/init/state.go.template
new file mode 100644
index 00000000000..482ad4df526
--- /dev/null
+++ b/chain/actors/builtin/init/state.go.template
@@ -0,0 +1,123 @@
+package init
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/node/modules/dtypes"
+
+{{if (ge .v 3)}}
+ builtin{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin"
+{{end}}
+
+ init{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/init"
+ adt{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/util/adt"
+)
+
+var _ State = (*state{{.v}})(nil)
+
+func load{{.v}}(store adt.Store, root cid.Cid) (State, error) {
+ out := state{{.v}}{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make{{.v}}(store adt.Store, networkName string) (State, error) {
+ out := state{{.v}}{store: store}
+ {{if (le .v 2)}}
+ mr, err := adt{{.v}}.MakeEmptyMap(store).Root()
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *init{{.v}}.ConstructState(mr, networkName)
+ {{else}}
+ s, err := init{{.v}}.ConstructState(store, networkName)
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *s
+ {{end}}
+ return &out, nil
+}
+
+type state{{.v}} struct {
+ init{{.v}}.State
+ store adt.Store
+}
+
+func (s *state{{.v}}) ResolveAddress(address address.Address) (address.Address, bool, error) {
+ return s.State.ResolveAddress(s.store, address)
+}
+
+func (s *state{{.v}}) MapAddressToNewID(address address.Address) (address.Address, error) {
+ return s.State.MapAddressToNewID(s.store, address)
+}
+
+func (s *state{{.v}}) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error {
+ addrs, err := adt{{.v}}.AsMap(s.store, s.State.AddressMap{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}})
+ if err != nil {
+ return err
+ }
+ var actorID cbg.CborInt
+ return addrs.ForEach(&actorID, func(key string) error {
+ addr, err := address.NewFromBytes([]byte(key))
+ if err != nil {
+ return err
+ }
+ return cb(abi.ActorID(actorID), addr)
+ })
+}
+
+func (s *state{{.v}}) NetworkName() (dtypes.NetworkName, error) {
+ return dtypes.NetworkName(s.State.NetworkName), nil
+}
+
+func (s *state{{.v}}) SetNetworkName(name string) error {
+ s.State.NetworkName = name
+ return nil
+}
+
+func (s *state{{.v}}) SetNextID(id abi.ActorID) error {
+ s.State.NextID = id
+ return nil
+}
+
+func (s *state{{.v}}) Remove(addrs ...address.Address) (err error) {
+ m, err := adt{{.v}}.AsMap(s.store, s.State.AddressMap{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}})
+ if err != nil {
+ return err
+ }
+ for _, addr := range addrs {
+ if err = m.Delete(abi.AddrKey(addr)); err != nil {
+ return xerrors.Errorf("failed to delete entry for address: %s; err: %w", addr, err)
+ }
+ }
+ amr, err := m.Root()
+ if err != nil {
+ return xerrors.Errorf("failed to get address map root: %w", err)
+ }
+ s.State.AddressMap = amr
+ return nil
+}
+
+func (s *state{{.v}}) SetAddressMap(mcid cid.Cid) error {
+ s.State.AddressMap = mcid
+ return nil
+}
+
+func (s *state{{.v}}) AddressMap() (adt.Map, error) {
+ return adt{{.v}}.AsMap(s.store, s.State.AddressMap{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}})
+}
+
+func (s *state{{.v}}) GetState() interface{} {
+ return &s.State
+}
\ No newline at end of file
diff --git a/chain/actors/builtin/init/v0.go b/chain/actors/builtin/init/v0.go
index ceb87f97083..ddd2dab94f2 100644
--- a/chain/actors/builtin/init/v0.go
+++ b/chain/actors/builtin/init/v0.go
@@ -25,6 +25,19 @@ func load0(store adt.Store, root cid.Cid) (State, error) {
return &out, nil
}
+func make0(store adt.Store, networkName string) (State, error) {
+ out := state0{store: store}
+
+ mr, err := adt0.MakeEmptyMap(store).Root()
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *init0.ConstructState(mr, networkName)
+
+ return &out, nil
+}
+
type state0 struct {
init0.State
store adt.Store
@@ -62,6 +75,11 @@ func (s *state0) SetNetworkName(name string) error {
return nil
}
+func (s *state0) SetNextID(id abi.ActorID) error {
+ s.State.NextID = id
+ return nil
+}
+
func (s *state0) Remove(addrs ...address.Address) (err error) {
m, err := adt0.AsMap(s.store, s.State.AddressMap)
if err != nil {
@@ -79,3 +97,16 @@ func (s *state0) Remove(addrs ...address.Address) (err error) {
s.State.AddressMap = amr
return nil
}
+
+func (s *state0) SetAddressMap(mcid cid.Cid) error {
+ s.State.AddressMap = mcid
+ return nil
+}
+
+func (s *state0) AddressMap() (adt.Map, error) {
+ return adt0.AsMap(s.store, s.State.AddressMap)
+}
+
+func (s *state0) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/init/v2.go b/chain/actors/builtin/init/v2.go
index 5aa0ddc1839..72e2d56a522 100644
--- a/chain/actors/builtin/init/v2.go
+++ b/chain/actors/builtin/init/v2.go
@@ -25,6 +25,19 @@ func load2(store adt.Store, root cid.Cid) (State, error) {
return &out, nil
}
+func make2(store adt.Store, networkName string) (State, error) {
+ out := state2{store: store}
+
+ mr, err := adt2.MakeEmptyMap(store).Root()
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *init2.ConstructState(mr, networkName)
+
+ return &out, nil
+}
+
type state2 struct {
init2.State
store adt.Store
@@ -62,6 +75,11 @@ func (s *state2) SetNetworkName(name string) error {
return nil
}
+func (s *state2) SetNextID(id abi.ActorID) error {
+ s.State.NextID = id
+ return nil
+}
+
func (s *state2) Remove(addrs ...address.Address) (err error) {
m, err := adt2.AsMap(s.store, s.State.AddressMap)
if err != nil {
@@ -79,3 +97,16 @@ func (s *state2) Remove(addrs ...address.Address) (err error) {
s.State.AddressMap = amr
return nil
}
+
+func (s *state2) SetAddressMap(mcid cid.Cid) error {
+ s.State.AddressMap = mcid
+ return nil
+}
+
+func (s *state2) AddressMap() (adt.Map, error) {
+ return adt2.AsMap(s.store, s.State.AddressMap)
+}
+
+func (s *state2) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/init/v3.go b/chain/actors/builtin/init/v3.go
new file mode 100644
index 00000000000..4609c94a372
--- /dev/null
+++ b/chain/actors/builtin/init/v3.go
@@ -0,0 +1,114 @@
+package init
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/node/modules/dtypes"
+
+ builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
+
+ init3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/init"
+ adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt"
+)
+
+var _ State = (*state3)(nil)
+
+func load3(store adt.Store, root cid.Cid) (State, error) {
+ out := state3{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make3(store adt.Store, networkName string) (State, error) {
+ out := state3{store: store}
+
+ s, err := init3.ConstructState(store, networkName)
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *s
+
+ return &out, nil
+}
+
+type state3 struct {
+ init3.State
+ store adt.Store
+}
+
+func (s *state3) ResolveAddress(address address.Address) (address.Address, bool, error) {
+ return s.State.ResolveAddress(s.store, address)
+}
+
+func (s *state3) MapAddressToNewID(address address.Address) (address.Address, error) {
+ return s.State.MapAddressToNewID(s.store, address)
+}
+
+func (s *state3) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error {
+ addrs, err := adt3.AsMap(s.store, s.State.AddressMap, builtin3.DefaultHamtBitwidth)
+ if err != nil {
+ return err
+ }
+ var actorID cbg.CborInt
+ return addrs.ForEach(&actorID, func(key string) error {
+ addr, err := address.NewFromBytes([]byte(key))
+ if err != nil {
+ return err
+ }
+ return cb(abi.ActorID(actorID), addr)
+ })
+}
+
+func (s *state3) NetworkName() (dtypes.NetworkName, error) {
+ return dtypes.NetworkName(s.State.NetworkName), nil
+}
+
+func (s *state3) SetNetworkName(name string) error {
+ s.State.NetworkName = name
+ return nil
+}
+
+func (s *state3) SetNextID(id abi.ActorID) error {
+ s.State.NextID = id
+ return nil
+}
+
+func (s *state3) Remove(addrs ...address.Address) (err error) {
+ m, err := adt3.AsMap(s.store, s.State.AddressMap, builtin3.DefaultHamtBitwidth)
+ if err != nil {
+ return err
+ }
+ for _, addr := range addrs {
+ if err = m.Delete(abi.AddrKey(addr)); err != nil {
+ return xerrors.Errorf("failed to delete entry for address: %s; err: %w", addr, err)
+ }
+ }
+ amr, err := m.Root()
+ if err != nil {
+ return xerrors.Errorf("failed to get address map root: %w", err)
+ }
+ s.State.AddressMap = amr
+ return nil
+}
+
+func (s *state3) SetAddressMap(mcid cid.Cid) error {
+ s.State.AddressMap = mcid
+ return nil
+}
+
+func (s *state3) AddressMap() (adt.Map, error) {
+ return adt3.AsMap(s.store, s.State.AddressMap, builtin3.DefaultHamtBitwidth)
+}
+
+func (s *state3) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/init/v4.go b/chain/actors/builtin/init/v4.go
new file mode 100644
index 00000000000..dc56d1f196c
--- /dev/null
+++ b/chain/actors/builtin/init/v4.go
@@ -0,0 +1,114 @@
+package init
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/node/modules/dtypes"
+
+ builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
+
+ init4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/init"
+ adt4 "github.com/filecoin-project/specs-actors/v4/actors/util/adt"
+)
+
+var _ State = (*state4)(nil)
+
+func load4(store adt.Store, root cid.Cid) (State, error) {
+ out := state4{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make4(store adt.Store, networkName string) (State, error) {
+ out := state4{store: store}
+
+ s, err := init4.ConstructState(store, networkName)
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *s
+
+ return &out, nil
+}
+
+type state4 struct {
+ init4.State
+ store adt.Store
+}
+
+func (s *state4) ResolveAddress(address address.Address) (address.Address, bool, error) {
+ return s.State.ResolveAddress(s.store, address)
+}
+
+func (s *state4) MapAddressToNewID(address address.Address) (address.Address, error) {
+ return s.State.MapAddressToNewID(s.store, address)
+}
+
+func (s *state4) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error {
+ addrs, err := adt4.AsMap(s.store, s.State.AddressMap, builtin4.DefaultHamtBitwidth)
+ if err != nil {
+ return err
+ }
+ var actorID cbg.CborInt
+ return addrs.ForEach(&actorID, func(key string) error {
+ addr, err := address.NewFromBytes([]byte(key))
+ if err != nil {
+ return err
+ }
+ return cb(abi.ActorID(actorID), addr)
+ })
+}
+
+func (s *state4) NetworkName() (dtypes.NetworkName, error) {
+ return dtypes.NetworkName(s.State.NetworkName), nil
+}
+
+func (s *state4) SetNetworkName(name string) error {
+ s.State.NetworkName = name
+ return nil
+}
+
+func (s *state4) SetNextID(id abi.ActorID) error {
+ s.State.NextID = id
+ return nil
+}
+
+func (s *state4) Remove(addrs ...address.Address) (err error) {
+ m, err := adt4.AsMap(s.store, s.State.AddressMap, builtin4.DefaultHamtBitwidth)
+ if err != nil {
+ return err
+ }
+ for _, addr := range addrs {
+ if err = m.Delete(abi.AddrKey(addr)); err != nil {
+ return xerrors.Errorf("failed to delete entry for address: %s; err: %w", addr, err)
+ }
+ }
+ amr, err := m.Root()
+ if err != nil {
+ return xerrors.Errorf("failed to get address map root: %w", err)
+ }
+ s.State.AddressMap = amr
+ return nil
+}
+
+func (s *state4) SetAddressMap(mcid cid.Cid) error {
+ s.State.AddressMap = mcid
+ return nil
+}
+
+func (s *state4) AddressMap() (adt.Map, error) {
+ return adt4.AsMap(s.store, s.State.AddressMap, builtin4.DefaultHamtBitwidth)
+}
+
+func (s *state4) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/init/v5.go b/chain/actors/builtin/init/v5.go
new file mode 100644
index 00000000000..107366de536
--- /dev/null
+++ b/chain/actors/builtin/init/v5.go
@@ -0,0 +1,114 @@
+package init
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/node/modules/dtypes"
+
+ builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
+
+ init5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/init"
+ adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt"
+)
+
+var _ State = (*state5)(nil)
+
+func load5(store adt.Store, root cid.Cid) (State, error) {
+ out := state5{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make5(store adt.Store, networkName string) (State, error) {
+ out := state5{store: store}
+
+ s, err := init5.ConstructState(store, networkName)
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *s
+
+ return &out, nil
+}
+
+type state5 struct {
+ init5.State
+ store adt.Store
+}
+
+func (s *state5) ResolveAddress(address address.Address) (address.Address, bool, error) {
+ return s.State.ResolveAddress(s.store, address)
+}
+
+func (s *state5) MapAddressToNewID(address address.Address) (address.Address, error) {
+ return s.State.MapAddressToNewID(s.store, address)
+}
+
+func (s *state5) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error {
+ addrs, err := adt5.AsMap(s.store, s.State.AddressMap, builtin5.DefaultHamtBitwidth)
+ if err != nil {
+ return err
+ }
+ var actorID cbg.CborInt
+ return addrs.ForEach(&actorID, func(key string) error {
+ addr, err := address.NewFromBytes([]byte(key))
+ if err != nil {
+ return err
+ }
+ return cb(abi.ActorID(actorID), addr)
+ })
+}
+
+func (s *state5) NetworkName() (dtypes.NetworkName, error) {
+ return dtypes.NetworkName(s.State.NetworkName), nil
+}
+
+func (s *state5) SetNetworkName(name string) error {
+ s.State.NetworkName = name
+ return nil
+}
+
+func (s *state5) SetNextID(id abi.ActorID) error {
+ s.State.NextID = id
+ return nil
+}
+
+func (s *state5) Remove(addrs ...address.Address) (err error) {
+ m, err := adt5.AsMap(s.store, s.State.AddressMap, builtin5.DefaultHamtBitwidth)
+ if err != nil {
+ return err
+ }
+ for _, addr := range addrs {
+ if err = m.Delete(abi.AddrKey(addr)); err != nil {
+ return xerrors.Errorf("failed to delete entry for address: %s; err: %w", addr, err)
+ }
+ }
+ amr, err := m.Root()
+ if err != nil {
+ return xerrors.Errorf("failed to get address map root: %w", err)
+ }
+ s.State.AddressMap = amr
+ return nil
+}
+
+func (s *state5) SetAddressMap(mcid cid.Cid) error {
+ s.State.AddressMap = mcid
+ return nil
+}
+
+func (s *state5) AddressMap() (adt.Map, error) {
+ return adt5.AsMap(s.store, s.State.AddressMap, builtin5.DefaultHamtBitwidth)
+}
+
+func (s *state5) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/market/actor.go.template b/chain/actors/builtin/market/actor.go.template
new file mode 100644
index 00000000000..f78c84b8f92
--- /dev/null
+++ b/chain/actors/builtin/market/actor.go.template
@@ -0,0 +1,182 @@
+package market
+
+import (
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/cbor"
+ "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+
+ market0 "github.com/filecoin-project/specs-actors/actors/builtin/market"
+{{range .versions}}
+ builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin"
+{{end}}
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+func init() {
+{{range .versions}}
+ builtin.RegisterActorState(builtin{{.}}.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load{{.}}(store, root)
+ })
+{{end}}}
+
+var (
+ Address = builtin{{.latestVersion}}.StorageMarketActorAddr
+ Methods = builtin{{.latestVersion}}.MethodsMarket
+)
+
+func Load(store adt.Store, act *types.Actor) (State, error) {
+ switch act.Code {
+{{range .versions}}
+ case builtin{{.}}.StorageMarketActorCodeID:
+ return load{{.}}(store, act.Head)
+{{end}}
+ }
+ return nil, xerrors.Errorf("unknown actor code %s", act.Code)
+}
+
+func MakeState(store adt.Store, av actors.Version) (State, error) {
+ switch av {
+{{range .versions}}
+ case actors.Version{{.}}:
+ return make{{.}}(store)
+{{end}}
+}
+ return nil, xerrors.Errorf("unknown actor version %d", av)
+}
+
+func GetActorCodeID(av actors.Version) (cid.Cid, error) {
+ switch av {
+{{range .versions}}
+ case actors.Version{{.}}:
+ return builtin{{.}}.StorageMarketActorCodeID, nil
+{{end}}
+ }
+
+ return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
+}
+
+type State interface {
+ cbor.Marshaler
+ BalancesChanged(State) (bool, error)
+ EscrowTable() (BalanceTable, error)
+ LockedTable() (BalanceTable, error)
+ TotalLocked() (abi.TokenAmount, error)
+ StatesChanged(State) (bool, error)
+ States() (DealStates, error)
+ ProposalsChanged(State) (bool, error)
+ Proposals() (DealProposals, error)
+ VerifyDealsForActivation(
+ minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch,
+ ) (weight, verifiedWeight abi.DealWeight, err error)
+ NextID() (abi.DealID, error)
+ GetState() interface{}
+}
+
+type BalanceTable interface {
+ ForEach(cb func(address.Address, abi.TokenAmount) error) error
+ Get(key address.Address) (abi.TokenAmount, error)
+}
+
+type DealStates interface {
+ ForEach(cb func(id abi.DealID, ds DealState) error) error
+ Get(id abi.DealID) (*DealState, bool, error)
+
+ array() adt.Array
+ decode(*cbg.Deferred) (*DealState, error)
+}
+
+type DealProposals interface {
+ ForEach(cb func(id abi.DealID, dp DealProposal) error) error
+ Get(id abi.DealID) (*DealProposal, bool, error)
+
+ array() adt.Array
+ decode(*cbg.Deferred) (*DealProposal, error)
+}
+
+type PublishStorageDealsParams = market0.PublishStorageDealsParams
+type PublishStorageDealsReturn = market0.PublishStorageDealsReturn
+type VerifyDealsForActivationParams = market0.VerifyDealsForActivationParams
+type WithdrawBalanceParams = market0.WithdrawBalanceParams
+
+type ClientDealProposal = market0.ClientDealProposal
+
+type DealState struct {
+ SectorStartEpoch abi.ChainEpoch // -1 if not yet included in proven sector
+ LastUpdatedEpoch abi.ChainEpoch // -1 if deal state never updated
+ SlashEpoch abi.ChainEpoch // -1 if deal never slashed
+}
+
+type DealProposal struct {
+ PieceCID cid.Cid
+ PieceSize abi.PaddedPieceSize
+ VerifiedDeal bool
+ Client address.Address
+ Provider address.Address
+ Label string
+ StartEpoch abi.ChainEpoch
+ EndEpoch abi.ChainEpoch
+ StoragePricePerEpoch abi.TokenAmount
+ ProviderCollateral abi.TokenAmount
+ ClientCollateral abi.TokenAmount
+}
+
+type DealStateChanges struct {
+ Added []DealIDState
+ Modified []DealStateChange
+ Removed []DealIDState
+}
+
+type DealIDState struct {
+ ID abi.DealID
+ Deal DealState
+}
+
+// DealStateChange is a change in deal state from -> to
+type DealStateChange struct {
+ ID abi.DealID
+ From *DealState
+ To *DealState
+}
+
+type DealProposalChanges struct {
+ Added []ProposalIDState
+ Removed []ProposalIDState
+}
+
+type ProposalIDState struct {
+ ID abi.DealID
+ Proposal DealProposal
+}
+
+func EmptyDealState() *DealState {
+ return &DealState{
+ SectorStartEpoch: -1,
+ SlashEpoch: -1,
+ LastUpdatedEpoch: -1,
+ }
+}
+
+// returns the earned fees and pending fees for a given deal
+func (deal DealProposal) GetDealFees(height abi.ChainEpoch) (abi.TokenAmount, abi.TokenAmount) {
+ tf := big.Mul(deal.StoragePricePerEpoch, big.NewInt(int64(deal.EndEpoch-deal.StartEpoch)))
+
+ ef := big.Mul(deal.StoragePricePerEpoch, big.NewInt(int64(height-deal.StartEpoch)))
+ if ef.LessThan(big.Zero()) {
+ ef = big.Zero()
+ }
+
+ if ef.GreaterThan(tf) {
+ ef = tf
+ }
+
+ return ef, big.Sub(tf, ef)
+}
diff --git a/chain/actors/builtin/market/market.go b/chain/actors/builtin/market/market.go
index 195ca40b930..026e35d4e2f 100644
--- a/chain/actors/builtin/market/market.go
+++ b/chain/actors/builtin/market/market.go
@@ -5,43 +5,124 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/cbor"
"github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
- builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
market0 "github.com/filecoin-project/specs-actors/actors/builtin/market"
+
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+ builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
+
+ builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
+
+ builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
+
+ "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/types"
)
func init() {
+
builtin.RegisterActorState(builtin0.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load0(store, root)
})
+
builtin.RegisterActorState(builtin2.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load2(store, root)
})
+
+ builtin.RegisterActorState(builtin3.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load3(store, root)
+ })
+
+ builtin.RegisterActorState(builtin4.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load4(store, root)
+ })
+
+ builtin.RegisterActorState(builtin5.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load5(store, root)
+ })
}
var (
- Address = builtin2.StorageMarketActorAddr
- Methods = builtin2.MethodsMarket
+ Address = builtin5.StorageMarketActorAddr
+ Methods = builtin5.MethodsMarket
)
-func Load(store adt.Store, act *types.Actor) (st State, err error) {
+func Load(store adt.Store, act *types.Actor) (State, error) {
switch act.Code {
+
case builtin0.StorageMarketActorCodeID:
return load0(store, act.Head)
+
case builtin2.StorageMarketActorCodeID:
return load2(store, act.Head)
+
+ case builtin3.StorageMarketActorCodeID:
+ return load3(store, act.Head)
+
+ case builtin4.StorageMarketActorCodeID:
+ return load4(store, act.Head)
+
+ case builtin5.StorageMarketActorCodeID:
+ return load5(store, act.Head)
+
}
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
}
+func MakeState(store adt.Store, av actors.Version) (State, error) {
+ switch av {
+
+ case actors.Version0:
+ return make0(store)
+
+ case actors.Version2:
+ return make2(store)
+
+ case actors.Version3:
+ return make3(store)
+
+ case actors.Version4:
+ return make4(store)
+
+ case actors.Version5:
+ return make5(store)
+
+ }
+ return nil, xerrors.Errorf("unknown actor version %d", av)
+}
+
+func GetActorCodeID(av actors.Version) (cid.Cid, error) {
+ switch av {
+
+ case actors.Version0:
+ return builtin0.StorageMarketActorCodeID, nil
+
+ case actors.Version2:
+ return builtin2.StorageMarketActorCodeID, nil
+
+ case actors.Version3:
+ return builtin3.StorageMarketActorCodeID, nil
+
+ case actors.Version4:
+ return builtin4.StorageMarketActorCodeID, nil
+
+ case actors.Version5:
+ return builtin5.StorageMarketActorCodeID, nil
+
+ }
+
+ return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
+}
+
type State interface {
cbor.Marshaler
BalancesChanged(State) (bool, error)
@@ -55,6 +136,8 @@ type State interface {
VerifyDealsForActivation(
minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch,
) (weight, verifiedWeight abi.DealWeight, err error)
+ NextID() (abi.DealID, error)
+ GetState() interface{}
}
type BalanceTable interface {
@@ -81,6 +164,7 @@ type DealProposals interface {
type PublishStorageDealsParams = market0.PublishStorageDealsParams
type PublishStorageDealsReturn = market0.PublishStorageDealsReturn
type VerifyDealsForActivationParams = market0.VerifyDealsForActivationParams
+type WithdrawBalanceParams = market0.WithdrawBalanceParams
type ClientDealProposal = market0.ClientDealProposal
@@ -139,3 +223,19 @@ func EmptyDealState() *DealState {
LastUpdatedEpoch: -1,
}
}
+
+// returns the earned fees and pending fees for a given deal
+func (deal DealProposal) GetDealFees(height abi.ChainEpoch) (abi.TokenAmount, abi.TokenAmount) {
+ tf := big.Mul(deal.StoragePricePerEpoch, big.NewInt(int64(deal.EndEpoch-deal.StartEpoch)))
+
+ ef := big.Mul(deal.StoragePricePerEpoch, big.NewInt(int64(height-deal.StartEpoch)))
+ if ef.LessThan(big.Zero()) {
+ ef = big.Zero()
+ }
+
+ if ef.GreaterThan(tf) {
+ ef = tf
+ }
+
+ return ef, big.Sub(tf, ef)
+}
diff --git a/chain/actors/builtin/market/state.go.template b/chain/actors/builtin/market/state.go.template
new file mode 100644
index 00000000000..70b73114843
--- /dev/null
+++ b/chain/actors/builtin/market/state.go.template
@@ -0,0 +1,238 @@
+package market
+
+import (
+ "bytes"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/types"
+
+ market{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/market"
+ adt{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/util/adt"
+)
+
+var _ State = (*state{{.v}})(nil)
+
+func load{{.v}}(store adt.Store, root cid.Cid) (State, error) {
+ out := state{{.v}}{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make{{.v}}(store adt.Store) (State, error) {
+ out := state{{.v}}{store: store}
+ {{if (le .v 2)}}
+ ea, err := adt{{.v}}.MakeEmptyArray(store).Root()
+ if err != nil {
+ return nil, err
+ }
+
+ em, err := adt{{.v}}.MakeEmptyMap(store).Root()
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *market{{.v}}.ConstructState(ea, em, em)
+ {{else}}
+ s, err := market{{.v}}.ConstructState(store)
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *s
+ {{end}}
+ return &out, nil
+}
+
+type state{{.v}} struct {
+ market{{.v}}.State
+ store adt.Store
+}
+
+func (s *state{{.v}}) TotalLocked() (abi.TokenAmount, error) {
+ fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral)
+ fml = types.BigAdd(fml, s.TotalClientStorageFee)
+ return fml, nil
+}
+
+func (s *state{{.v}}) BalancesChanged(otherState State) (bool, error) {
+ otherState{{.v}}, ok := otherState.(*state{{.v}})
+ if !ok {
+ // there's no way to compare different versions of the state, so let's
+ // just say that means the state of balances has changed
+ return true, nil
+ }
+ return !s.State.EscrowTable.Equals(otherState{{.v}}.State.EscrowTable) || !s.State.LockedTable.Equals(otherState{{.v}}.State.LockedTable), nil
+}
+
+func (s *state{{.v}}) StatesChanged(otherState State) (bool, error) {
+ otherState{{.v}}, ok := otherState.(*state{{.v}})
+ if !ok {
+ // there's no way to compare different versions of the state, so let's
+ // just say that means the state of balances has changed
+ return true, nil
+ }
+ return !s.State.States.Equals(otherState{{.v}}.State.States), nil
+}
+
+func (s *state{{.v}}) States() (DealStates, error) {
+ stateArray, err := adt{{.v}}.AsArray(s.store, s.State.States{{if (ge .v 3)}}, market{{.v}}.StatesAmtBitwidth{{end}})
+ if err != nil {
+ return nil, err
+ }
+ return &dealStates{{.v}}{stateArray}, nil
+}
+
+func (s *state{{.v}}) ProposalsChanged(otherState State) (bool, error) {
+ otherState{{.v}}, ok := otherState.(*state{{.v}})
+ if !ok {
+ // there's no way to compare different versions of the state, so let's
+ // just say that means the state of balances has changed
+ return true, nil
+ }
+ return !s.State.Proposals.Equals(otherState{{.v}}.State.Proposals), nil
+}
+
+func (s *state{{.v}}) Proposals() (DealProposals, error) {
+ proposalArray, err := adt{{.v}}.AsArray(s.store, s.State.Proposals{{if (ge .v 3)}}, market{{.v}}.ProposalsAmtBitwidth{{end}})
+ if err != nil {
+ return nil, err
+ }
+ return &dealProposals{{.v}}{proposalArray}, nil
+}
+
+func (s *state{{.v}}) EscrowTable() (BalanceTable, error) {
+ bt, err := adt{{.v}}.AsBalanceTable(s.store, s.State.EscrowTable)
+ if err != nil {
+ return nil, err
+ }
+ return &balanceTable{{.v}}{bt}, nil
+}
+
+func (s *state{{.v}}) LockedTable() (BalanceTable, error) {
+ bt, err := adt{{.v}}.AsBalanceTable(s.store, s.State.LockedTable)
+ if err != nil {
+ return nil, err
+ }
+ return &balanceTable{{.v}}{bt}, nil
+}
+
+func (s *state{{.v}}) VerifyDealsForActivation(
+ minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch,
+) (weight, verifiedWeight abi.DealWeight, err error) {
+ w, vw{{if (ge .v 2)}}, _{{end}}, err := market{{.v}}.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch)
+ return w, vw, err
+}
+
+func (s *state{{.v}}) NextID() (abi.DealID, error) {
+ return s.State.NextID, nil
+}
+
+type balanceTable{{.v}} struct {
+ *adt{{.v}}.BalanceTable
+}
+
+func (bt *balanceTable{{.v}}) ForEach(cb func(address.Address, abi.TokenAmount) error) error {
+ asMap := (*adt{{.v}}.Map)(bt.BalanceTable)
+ var ta abi.TokenAmount
+ return asMap.ForEach(&ta, func(key string) error {
+ a, err := address.NewFromBytes([]byte(key))
+ if err != nil {
+ return err
+ }
+ return cb(a, ta)
+ })
+}
+
+type dealStates{{.v}} struct {
+ adt.Array
+}
+
+func (s *dealStates{{.v}}) Get(dealID abi.DealID) (*DealState, bool, error) {
+ var deal{{.v}} market{{.v}}.DealState
+ found, err := s.Array.Get(uint64(dealID), &deal{{.v}})
+ if err != nil {
+ return nil, false, err
+ }
+ if !found {
+ return nil, false, nil
+ }
+ deal := fromV{{.v}}DealState(deal{{.v}})
+ return &deal, true, nil
+}
+
+func (s *dealStates{{.v}}) ForEach(cb func(dealID abi.DealID, ds DealState) error) error {
+ var ds{{.v}} market{{.v}}.DealState
+ return s.Array.ForEach(&ds{{.v}}, func(idx int64) error {
+ return cb(abi.DealID(idx), fromV{{.v}}DealState(ds{{.v}}))
+ })
+}
+
+func (s *dealStates{{.v}}) decode(val *cbg.Deferred) (*DealState, error) {
+ var ds{{.v}} market{{.v}}.DealState
+ if err := ds{{.v}}.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ return nil, err
+ }
+ ds := fromV{{.v}}DealState(ds{{.v}})
+ return &ds, nil
+}
+
+func (s *dealStates{{.v}}) array() adt.Array {
+ return s.Array
+}
+
+func fromV{{.v}}DealState(v{{.v}} market{{.v}}.DealState) DealState {
+ return (DealState)(v{{.v}})
+}
+
+type dealProposals{{.v}} struct {
+ adt.Array
+}
+
+func (s *dealProposals{{.v}}) Get(dealID abi.DealID) (*DealProposal, bool, error) {
+ var proposal{{.v}} market{{.v}}.DealProposal
+ found, err := s.Array.Get(uint64(dealID), &proposal{{.v}})
+ if err != nil {
+ return nil, false, err
+ }
+ if !found {
+ return nil, false, nil
+ }
+ proposal := fromV{{.v}}DealProposal(proposal{{.v}})
+ return &proposal, true, nil
+}
+
+func (s *dealProposals{{.v}}) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error {
+ var dp{{.v}} market{{.v}}.DealProposal
+ return s.Array.ForEach(&dp{{.v}}, func(idx int64) error {
+ return cb(abi.DealID(idx), fromV{{.v}}DealProposal(dp{{.v}}))
+ })
+}
+
+func (s *dealProposals{{.v}}) decode(val *cbg.Deferred) (*DealProposal, error) {
+ var dp{{.v}} market{{.v}}.DealProposal
+ if err := dp{{.v}}.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ return nil, err
+ }
+ dp := fromV{{.v}}DealProposal(dp{{.v}})
+ return &dp, nil
+}
+
+func (s *dealProposals{{.v}}) array() adt.Array {
+ return s.Array
+}
+
+func fromV{{.v}}DealProposal(v{{.v}} market{{.v}}.DealProposal) DealProposal {
+ return (DealProposal)(v{{.v}})
+}
+
+func (s *state{{.v}}) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/market/v0.go b/chain/actors/builtin/market/v0.go
index 20d38b5f125..b3093b54b0f 100644
--- a/chain/actors/builtin/market/v0.go
+++ b/chain/actors/builtin/market/v0.go
@@ -26,6 +26,24 @@ func load0(store adt.Store, root cid.Cid) (State, error) {
return &out, nil
}
+func make0(store adt.Store) (State, error) {
+ out := state0{store: store}
+
+ ea, err := adt0.MakeEmptyArray(store).Root()
+ if err != nil {
+ return nil, err
+ }
+
+ em, err := adt0.MakeEmptyMap(store).Root()
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *market0.ConstructState(ea, em, em)
+
+ return &out, nil
+}
+
type state0 struct {
market0.State
store adt.Store
@@ -102,7 +120,12 @@ func (s *state0) LockedTable() (BalanceTable, error) {
func (s *state0) VerifyDealsForActivation(
minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch,
) (weight, verifiedWeight abi.DealWeight, err error) {
- return market0.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch)
+ w, vw, err := market0.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch)
+ return w, vw, err
+}
+
+func (s *state0) NextID() (abi.DealID, error) {
+ return s.State.NextID, nil
}
type balanceTable0 struct {
@@ -202,3 +225,7 @@ func (s *dealProposals0) array() adt.Array {
func fromV0DealProposal(v0 market0.DealProposal) DealProposal {
return (DealProposal)(v0)
}
+
+func (s *state0) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/market/v2.go b/chain/actors/builtin/market/v2.go
index a5e5c7b45d4..fdedcce8547 100644
--- a/chain/actors/builtin/market/v2.go
+++ b/chain/actors/builtin/market/v2.go
@@ -26,6 +26,24 @@ func load2(store adt.Store, root cid.Cid) (State, error) {
return &out, nil
}
+func make2(store adt.Store) (State, error) {
+ out := state2{store: store}
+
+ ea, err := adt2.MakeEmptyArray(store).Root()
+ if err != nil {
+ return nil, err
+ }
+
+ em, err := adt2.MakeEmptyMap(store).Root()
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *market2.ConstructState(ea, em, em)
+
+ return &out, nil
+}
+
type state2 struct {
market2.State
store adt.Store
@@ -106,6 +124,10 @@ func (s *state2) VerifyDealsForActivation(
return w, vw, err
}
+func (s *state2) NextID() (abi.DealID, error) {
+ return s.State.NextID, nil
+}
+
type balanceTable2 struct {
*adt2.BalanceTable
}
@@ -140,18 +162,18 @@ func (s *dealStates2) Get(dealID abi.DealID) (*DealState, bool, error) {
}
func (s *dealStates2) ForEach(cb func(dealID abi.DealID, ds DealState) error) error {
- var ds1 market2.DealState
- return s.Array.ForEach(&ds1, func(idx int64) error {
- return cb(abi.DealID(idx), fromV2DealState(ds1))
+ var ds2 market2.DealState
+ return s.Array.ForEach(&ds2, func(idx int64) error {
+ return cb(abi.DealID(idx), fromV2DealState(ds2))
})
}
func (s *dealStates2) decode(val *cbg.Deferred) (*DealState, error) {
- var ds1 market2.DealState
- if err := ds1.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ var ds2 market2.DealState
+ if err := ds2.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
return nil, err
}
- ds := fromV2DealState(ds1)
+ ds := fromV2DealState(ds2)
return &ds, nil
}
@@ -159,8 +181,8 @@ func (s *dealStates2) array() adt.Array {
return s.Array
}
-func fromV2DealState(v1 market2.DealState) DealState {
- return (DealState)(v1)
+func fromV2DealState(v2 market2.DealState) DealState {
+ return (DealState)(v2)
}
type dealProposals2 struct {
@@ -181,18 +203,18 @@ func (s *dealProposals2) Get(dealID abi.DealID) (*DealProposal, bool, error) {
}
func (s *dealProposals2) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error {
- var dp1 market2.DealProposal
- return s.Array.ForEach(&dp1, func(idx int64) error {
- return cb(abi.DealID(idx), fromV2DealProposal(dp1))
+ var dp2 market2.DealProposal
+ return s.Array.ForEach(&dp2, func(idx int64) error {
+ return cb(abi.DealID(idx), fromV2DealProposal(dp2))
})
}
func (s *dealProposals2) decode(val *cbg.Deferred) (*DealProposal, error) {
- var dp1 market2.DealProposal
- if err := dp1.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ var dp2 market2.DealProposal
+ if err := dp2.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
return nil, err
}
- dp := fromV2DealProposal(dp1)
+ dp := fromV2DealProposal(dp2)
return &dp, nil
}
@@ -200,6 +222,10 @@ func (s *dealProposals2) array() adt.Array {
return s.Array
}
-func fromV2DealProposal(v1 market2.DealProposal) DealProposal {
- return (DealProposal)(v1)
+func fromV2DealProposal(v2 market2.DealProposal) DealProposal {
+ return (DealProposal)(v2)
+}
+
+func (s *state2) GetState() interface{} {
+ return &s.State
}
diff --git a/chain/actors/builtin/market/v3.go b/chain/actors/builtin/market/v3.go
new file mode 100644
index 00000000000..53d26644380
--- /dev/null
+++ b/chain/actors/builtin/market/v3.go
@@ -0,0 +1,226 @@
+package market
+
+import (
+ "bytes"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/types"
+
+ market3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/market"
+ adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt"
+)
+
+var _ State = (*state3)(nil)
+
+func load3(store adt.Store, root cid.Cid) (State, error) {
+ out := state3{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make3(store adt.Store) (State, error) {
+ out := state3{store: store}
+
+ s, err := market3.ConstructState(store)
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *s
+
+ return &out, nil
+}
+
+type state3 struct {
+ market3.State
+ store adt.Store
+}
+
+func (s *state3) TotalLocked() (abi.TokenAmount, error) {
+ fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral)
+ fml = types.BigAdd(fml, s.TotalClientStorageFee)
+ return fml, nil
+}
+
+func (s *state3) BalancesChanged(otherState State) (bool, error) {
+ otherState3, ok := otherState.(*state3)
+ if !ok {
+ // there's no way to compare different versions of the state, so let's
+ // just say that means the state of balances has changed
+ return true, nil
+ }
+ return !s.State.EscrowTable.Equals(otherState3.State.EscrowTable) || !s.State.LockedTable.Equals(otherState3.State.LockedTable), nil
+}
+
+func (s *state3) StatesChanged(otherState State) (bool, error) {
+ otherState3, ok := otherState.(*state3)
+ if !ok {
+ // there's no way to compare different versions of the state, so let's
+ // just say that means the state of balances has changed
+ return true, nil
+ }
+ return !s.State.States.Equals(otherState3.State.States), nil
+}
+
+func (s *state3) States() (DealStates, error) {
+ stateArray, err := adt3.AsArray(s.store, s.State.States, market3.StatesAmtBitwidth)
+ if err != nil {
+ return nil, err
+ }
+ return &dealStates3{stateArray}, nil
+}
+
+func (s *state3) ProposalsChanged(otherState State) (bool, error) {
+ otherState3, ok := otherState.(*state3)
+ if !ok {
+ // there's no way to compare different versions of the state, so let's
+ // just say that means the state of balances has changed
+ return true, nil
+ }
+ return !s.State.Proposals.Equals(otherState3.State.Proposals), nil
+}
+
+func (s *state3) Proposals() (DealProposals, error) {
+ proposalArray, err := adt3.AsArray(s.store, s.State.Proposals, market3.ProposalsAmtBitwidth)
+ if err != nil {
+ return nil, err
+ }
+ return &dealProposals3{proposalArray}, nil
+}
+
+func (s *state3) EscrowTable() (BalanceTable, error) {
+ bt, err := adt3.AsBalanceTable(s.store, s.State.EscrowTable)
+ if err != nil {
+ return nil, err
+ }
+ return &balanceTable3{bt}, nil
+}
+
+func (s *state3) LockedTable() (BalanceTable, error) {
+ bt, err := adt3.AsBalanceTable(s.store, s.State.LockedTable)
+ if err != nil {
+ return nil, err
+ }
+ return &balanceTable3{bt}, nil
+}
+
+func (s *state3) VerifyDealsForActivation(
+ minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch,
+) (weight, verifiedWeight abi.DealWeight, err error) {
+ w, vw, _, err := market3.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch)
+ return w, vw, err
+}
+
+func (s *state3) NextID() (abi.DealID, error) {
+ return s.State.NextID, nil
+}
+
+type balanceTable3 struct {
+ *adt3.BalanceTable
+}
+
+func (bt *balanceTable3) ForEach(cb func(address.Address, abi.TokenAmount) error) error {
+ asMap := (*adt3.Map)(bt.BalanceTable)
+ var ta abi.TokenAmount
+ return asMap.ForEach(&ta, func(key string) error {
+ a, err := address.NewFromBytes([]byte(key))
+ if err != nil {
+ return err
+ }
+ return cb(a, ta)
+ })
+}
+
+type dealStates3 struct {
+ adt.Array
+}
+
+func (s *dealStates3) Get(dealID abi.DealID) (*DealState, bool, error) {
+ var deal3 market3.DealState
+ found, err := s.Array.Get(uint64(dealID), &deal3)
+ if err != nil {
+ return nil, false, err
+ }
+ if !found {
+ return nil, false, nil
+ }
+ deal := fromV3DealState(deal3)
+ return &deal, true, nil
+}
+
+func (s *dealStates3) ForEach(cb func(dealID abi.DealID, ds DealState) error) error {
+ var ds3 market3.DealState
+ return s.Array.ForEach(&ds3, func(idx int64) error {
+ return cb(abi.DealID(idx), fromV3DealState(ds3))
+ })
+}
+
+func (s *dealStates3) decode(val *cbg.Deferred) (*DealState, error) {
+ var ds3 market3.DealState
+ if err := ds3.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ return nil, err
+ }
+ ds := fromV3DealState(ds3)
+ return &ds, nil
+}
+
+func (s *dealStates3) array() adt.Array {
+ return s.Array
+}
+
+func fromV3DealState(v3 market3.DealState) DealState {
+ return (DealState)(v3)
+}
+
+type dealProposals3 struct {
+ adt.Array
+}
+
+func (s *dealProposals3) Get(dealID abi.DealID) (*DealProposal, bool, error) {
+ var proposal3 market3.DealProposal
+ found, err := s.Array.Get(uint64(dealID), &proposal3)
+ if err != nil {
+ return nil, false, err
+ }
+ if !found {
+ return nil, false, nil
+ }
+ proposal := fromV3DealProposal(proposal3)
+ return &proposal, true, nil
+}
+
+func (s *dealProposals3) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error {
+ var dp3 market3.DealProposal
+ return s.Array.ForEach(&dp3, func(idx int64) error {
+ return cb(abi.DealID(idx), fromV3DealProposal(dp3))
+ })
+}
+
+func (s *dealProposals3) decode(val *cbg.Deferred) (*DealProposal, error) {
+ var dp3 market3.DealProposal
+ if err := dp3.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ return nil, err
+ }
+ dp := fromV3DealProposal(dp3)
+ return &dp, nil
+}
+
+func (s *dealProposals3) array() adt.Array {
+ return s.Array
+}
+
+func fromV3DealProposal(v3 market3.DealProposal) DealProposal {
+ return (DealProposal)(v3)
+}
+
+func (s *state3) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/market/v4.go b/chain/actors/builtin/market/v4.go
new file mode 100644
index 00000000000..30aa2692057
--- /dev/null
+++ b/chain/actors/builtin/market/v4.go
@@ -0,0 +1,226 @@
+package market
+
+import (
+ "bytes"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/types"
+
+ market4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/market"
+ adt4 "github.com/filecoin-project/specs-actors/v4/actors/util/adt"
+)
+
+var _ State = (*state4)(nil)
+
+func load4(store adt.Store, root cid.Cid) (State, error) {
+ out := state4{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make4(store adt.Store) (State, error) {
+ out := state4{store: store}
+
+ s, err := market4.ConstructState(store)
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *s
+
+ return &out, nil
+}
+
+type state4 struct {
+ market4.State
+ store adt.Store
+}
+
+func (s *state4) TotalLocked() (abi.TokenAmount, error) {
+ fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral)
+ fml = types.BigAdd(fml, s.TotalClientStorageFee)
+ return fml, nil
+}
+
+func (s *state4) BalancesChanged(otherState State) (bool, error) {
+ otherState4, ok := otherState.(*state4)
+ if !ok {
+ // there's no way to compare different versions of the state, so let's
+ // just say that means the state of balances has changed
+ return true, nil
+ }
+ return !s.State.EscrowTable.Equals(otherState4.State.EscrowTable) || !s.State.LockedTable.Equals(otherState4.State.LockedTable), nil
+}
+
+func (s *state4) StatesChanged(otherState State) (bool, error) {
+ otherState4, ok := otherState.(*state4)
+ if !ok {
+ // there's no way to compare different versions of the state, so let's
+ // just say that means the state of balances has changed
+ return true, nil
+ }
+ return !s.State.States.Equals(otherState4.State.States), nil
+}
+
+func (s *state4) States() (DealStates, error) {
+ stateArray, err := adt4.AsArray(s.store, s.State.States, market4.StatesAmtBitwidth)
+ if err != nil {
+ return nil, err
+ }
+ return &dealStates4{stateArray}, nil
+}
+
+func (s *state4) ProposalsChanged(otherState State) (bool, error) {
+ otherState4, ok := otherState.(*state4)
+ if !ok {
+ // there's no way to compare different versions of the state, so let's
+ // just say that means the state of balances has changed
+ return true, nil
+ }
+ return !s.State.Proposals.Equals(otherState4.State.Proposals), nil
+}
+
+func (s *state4) Proposals() (DealProposals, error) {
+ proposalArray, err := adt4.AsArray(s.store, s.State.Proposals, market4.ProposalsAmtBitwidth)
+ if err != nil {
+ return nil, err
+ }
+ return &dealProposals4{proposalArray}, nil
+}
+
+func (s *state4) EscrowTable() (BalanceTable, error) {
+ bt, err := adt4.AsBalanceTable(s.store, s.State.EscrowTable)
+ if err != nil {
+ return nil, err
+ }
+ return &balanceTable4{bt}, nil
+}
+
+func (s *state4) LockedTable() (BalanceTable, error) {
+ bt, err := adt4.AsBalanceTable(s.store, s.State.LockedTable)
+ if err != nil {
+ return nil, err
+ }
+ return &balanceTable4{bt}, nil
+}
+
+func (s *state4) VerifyDealsForActivation(
+ minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch,
+) (weight, verifiedWeight abi.DealWeight, err error) {
+ w, vw, _, err := market4.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch)
+ return w, vw, err
+}
+
+func (s *state4) NextID() (abi.DealID, error) {
+ return s.State.NextID, nil
+}
+
+type balanceTable4 struct {
+ *adt4.BalanceTable
+}
+
+func (bt *balanceTable4) ForEach(cb func(address.Address, abi.TokenAmount) error) error {
+ asMap := (*adt4.Map)(bt.BalanceTable)
+ var ta abi.TokenAmount
+ return asMap.ForEach(&ta, func(key string) error {
+ a, err := address.NewFromBytes([]byte(key))
+ if err != nil {
+ return err
+ }
+ return cb(a, ta)
+ })
+}
+
+type dealStates4 struct {
+ adt.Array
+}
+
+func (s *dealStates4) Get(dealID abi.DealID) (*DealState, bool, error) {
+ var deal4 market4.DealState
+ found, err := s.Array.Get(uint64(dealID), &deal4)
+ if err != nil {
+ return nil, false, err
+ }
+ if !found {
+ return nil, false, nil
+ }
+ deal := fromV4DealState(deal4)
+ return &deal, true, nil
+}
+
+func (s *dealStates4) ForEach(cb func(dealID abi.DealID, ds DealState) error) error {
+ var ds4 market4.DealState
+ return s.Array.ForEach(&ds4, func(idx int64) error {
+ return cb(abi.DealID(idx), fromV4DealState(ds4))
+ })
+}
+
+func (s *dealStates4) decode(val *cbg.Deferred) (*DealState, error) {
+ var ds4 market4.DealState
+ if err := ds4.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ return nil, err
+ }
+ ds := fromV4DealState(ds4)
+ return &ds, nil
+}
+
+func (s *dealStates4) array() adt.Array {
+ return s.Array
+}
+
+func fromV4DealState(v4 market4.DealState) DealState {
+ return (DealState)(v4)
+}
+
+type dealProposals4 struct {
+ adt.Array
+}
+
+func (s *dealProposals4) Get(dealID abi.DealID) (*DealProposal, bool, error) {
+ var proposal4 market4.DealProposal
+ found, err := s.Array.Get(uint64(dealID), &proposal4)
+ if err != nil {
+ return nil, false, err
+ }
+ if !found {
+ return nil, false, nil
+ }
+ proposal := fromV4DealProposal(proposal4)
+ return &proposal, true, nil
+}
+
+func (s *dealProposals4) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error {
+ var dp4 market4.DealProposal
+ return s.Array.ForEach(&dp4, func(idx int64) error {
+ return cb(abi.DealID(idx), fromV4DealProposal(dp4))
+ })
+}
+
+func (s *dealProposals4) decode(val *cbg.Deferred) (*DealProposal, error) {
+ var dp4 market4.DealProposal
+ if err := dp4.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ return nil, err
+ }
+ dp := fromV4DealProposal(dp4)
+ return &dp, nil
+}
+
+func (s *dealProposals4) array() adt.Array {
+ return s.Array
+}
+
+func fromV4DealProposal(v4 market4.DealProposal) DealProposal {
+ return (DealProposal)(v4)
+}
+
+func (s *state4) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/market/v5.go b/chain/actors/builtin/market/v5.go
new file mode 100644
index 00000000000..12378c76dc1
--- /dev/null
+++ b/chain/actors/builtin/market/v5.go
@@ -0,0 +1,226 @@
+package market
+
+import (
+ "bytes"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/types"
+
+ market5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/market"
+ adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt"
+)
+
+var _ State = (*state5)(nil)
+
+func load5(store adt.Store, root cid.Cid) (State, error) {
+ out := state5{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make5(store adt.Store) (State, error) {
+ out := state5{store: store}
+
+ s, err := market5.ConstructState(store)
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *s
+
+ return &out, nil
+}
+
+type state5 struct {
+ market5.State
+ store adt.Store
+}
+
+func (s *state5) TotalLocked() (abi.TokenAmount, error) {
+ fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral)
+ fml = types.BigAdd(fml, s.TotalClientStorageFee)
+ return fml, nil
+}
+
+func (s *state5) BalancesChanged(otherState State) (bool, error) {
+ otherState5, ok := otherState.(*state5)
+ if !ok {
+ // there's no way to compare different versions of the state, so let's
+ // just say that means the state of balances has changed
+ return true, nil
+ }
+ return !s.State.EscrowTable.Equals(otherState5.State.EscrowTable) || !s.State.LockedTable.Equals(otherState5.State.LockedTable), nil
+}
+
+func (s *state5) StatesChanged(otherState State) (bool, error) {
+ otherState5, ok := otherState.(*state5)
+ if !ok {
+ // there's no way to compare different versions of the state, so let's
+ // just say that means the state of balances has changed
+ return true, nil
+ }
+ return !s.State.States.Equals(otherState5.State.States), nil
+}
+
+func (s *state5) States() (DealStates, error) {
+ stateArray, err := adt5.AsArray(s.store, s.State.States, market5.StatesAmtBitwidth)
+ if err != nil {
+ return nil, err
+ }
+ return &dealStates5{stateArray}, nil
+}
+
+func (s *state5) ProposalsChanged(otherState State) (bool, error) {
+ otherState5, ok := otherState.(*state5)
+ if !ok {
+ // there's no way to compare different versions of the state, so let's
+ // just say that means the state of balances has changed
+ return true, nil
+ }
+ return !s.State.Proposals.Equals(otherState5.State.Proposals), nil
+}
+
+func (s *state5) Proposals() (DealProposals, error) {
+ proposalArray, err := adt5.AsArray(s.store, s.State.Proposals, market5.ProposalsAmtBitwidth)
+ if err != nil {
+ return nil, err
+ }
+ return &dealProposals5{proposalArray}, nil
+}
+
+func (s *state5) EscrowTable() (BalanceTable, error) {
+ bt, err := adt5.AsBalanceTable(s.store, s.State.EscrowTable)
+ if err != nil {
+ return nil, err
+ }
+ return &balanceTable5{bt}, nil
+}
+
+func (s *state5) LockedTable() (BalanceTable, error) {
+ bt, err := adt5.AsBalanceTable(s.store, s.State.LockedTable)
+ if err != nil {
+ return nil, err
+ }
+ return &balanceTable5{bt}, nil
+}
+
+func (s *state5) VerifyDealsForActivation(
+ minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch,
+) (weight, verifiedWeight abi.DealWeight, err error) {
+ w, vw, _, err := market5.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch)
+ return w, vw, err
+}
+
+func (s *state5) NextID() (abi.DealID, error) {
+ return s.State.NextID, nil
+}
+
+type balanceTable5 struct {
+ *adt5.BalanceTable
+}
+
+func (bt *balanceTable5) ForEach(cb func(address.Address, abi.TokenAmount) error) error {
+ asMap := (*adt5.Map)(bt.BalanceTable)
+ var ta abi.TokenAmount
+ return asMap.ForEach(&ta, func(key string) error {
+ a, err := address.NewFromBytes([]byte(key))
+ if err != nil {
+ return err
+ }
+ return cb(a, ta)
+ })
+}
+
+type dealStates5 struct {
+ adt.Array
+}
+
+func (s *dealStates5) Get(dealID abi.DealID) (*DealState, bool, error) {
+ var deal5 market5.DealState
+ found, err := s.Array.Get(uint64(dealID), &deal5)
+ if err != nil {
+ return nil, false, err
+ }
+ if !found {
+ return nil, false, nil
+ }
+ deal := fromV5DealState(deal5)
+ return &deal, true, nil
+}
+
+func (s *dealStates5) ForEach(cb func(dealID abi.DealID, ds DealState) error) error {
+ var ds5 market5.DealState
+ return s.Array.ForEach(&ds5, func(idx int64) error {
+ return cb(abi.DealID(idx), fromV5DealState(ds5))
+ })
+}
+
+func (s *dealStates5) decode(val *cbg.Deferred) (*DealState, error) {
+ var ds5 market5.DealState
+ if err := ds5.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ return nil, err
+ }
+ ds := fromV5DealState(ds5)
+ return &ds, nil
+}
+
+func (s *dealStates5) array() adt.Array {
+ return s.Array
+}
+
+func fromV5DealState(v5 market5.DealState) DealState {
+ return (DealState)(v5)
+}
+
+type dealProposals5 struct {
+ adt.Array
+}
+
+func (s *dealProposals5) Get(dealID abi.DealID) (*DealProposal, bool, error) {
+ var proposal5 market5.DealProposal
+ found, err := s.Array.Get(uint64(dealID), &proposal5)
+ if err != nil {
+ return nil, false, err
+ }
+ if !found {
+ return nil, false, nil
+ }
+ proposal := fromV5DealProposal(proposal5)
+ return &proposal, true, nil
+}
+
+func (s *dealProposals5) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error {
+ var dp5 market5.DealProposal
+ return s.Array.ForEach(&dp5, func(idx int64) error {
+ return cb(abi.DealID(idx), fromV5DealProposal(dp5))
+ })
+}
+
+func (s *dealProposals5) decode(val *cbg.Deferred) (*DealProposal, error) {
+ var dp5 market5.DealProposal
+ if err := dp5.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ return nil, err
+ }
+ dp := fromV5DealProposal(dp5)
+ return &dp, nil
+}
+
+func (s *dealProposals5) array() adt.Array {
+ return s.Array
+}
+
+func fromV5DealProposal(v5 market5.DealProposal) DealProposal {
+ return (DealProposal)(v5)
+}
+
+func (s *state5) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/miner/actor.go.template b/chain/actors/builtin/miner/actor.go.template
new file mode 100644
index 00000000000..12f418b3784
--- /dev/null
+++ b/chain/actors/builtin/miner/actor.go.template
@@ -0,0 +1,305 @@
+package miner
+
+import (
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/network"
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/ipfs/go-cid"
+ "github.com/libp2p/go-libp2p-core/peer"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-bitfield"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/cbor"
+ "github.com/filecoin-project/go-state-types/dline"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/types"
+
+ miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
+ miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
+ miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner"
+ miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
+{{range .versions}}
+ builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin"
+{{end}}
+)
+
+func init() {
+{{range .versions}}
+ builtin.RegisterActorState(builtin{{.}}.StorageMinerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load{{.}}(store, root)
+ })
+{{end}}
+}
+
+var Methods = builtin{{.latestVersion}}.MethodsMiner
+
+// Unchanged between v0, v2, v3, and v4 actors
+var WPoStProvingPeriod = miner0.WPoStProvingPeriod
+var WPoStPeriodDeadlines = miner0.WPoStPeriodDeadlines
+var WPoStChallengeWindow = miner0.WPoStChallengeWindow
+var WPoStChallengeLookback = miner0.WPoStChallengeLookback
+var FaultDeclarationCutoff = miner0.FaultDeclarationCutoff
+
+const MinSectorExpiration = miner0.MinSectorExpiration
+
+// Not used / checked in v0
+// TODO: Abstract over network versions
+var DeclarationsMax = miner2.DeclarationsMax
+var AddressedSectorsMax = miner2.AddressedSectorsMax
+
+func Load(store adt.Store, act *types.Actor) (State, error) {
+ switch act.Code {
+{{range .versions}}
+ case builtin{{.}}.StorageMinerActorCodeID:
+ return load{{.}}(store, act.Head)
+{{end}}
+}
+ return nil, xerrors.Errorf("unknown actor code %s", act.Code)
+}
+
+func MakeState(store adt.Store, av actors.Version) (State, error) {
+ switch av {
+{{range .versions}}
+ case actors.Version{{.}}:
+ return make{{.}}(store)
+{{end}}
+}
+ return nil, xerrors.Errorf("unknown actor version %d", av)
+}
+
+func GetActorCodeID(av actors.Version) (cid.Cid, error) {
+ switch av {
+{{range .versions}}
+ case actors.Version{{.}}:
+ return builtin{{.}}.StorageMinerActorCodeID, nil
+{{end}}
+ }
+
+ return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
+}
+
+type State interface {
+ cbor.Marshaler
+
+ // Total available balance to spend.
+ AvailableBalance(abi.TokenAmount) (abi.TokenAmount, error)
+ // Funds that will vest by the given epoch.
+ VestedFunds(abi.ChainEpoch) (abi.TokenAmount, error)
+ // Funds locked for various reasons.
+ LockedFunds() (LockedFunds, error)
+ FeeDebt() (abi.TokenAmount, error)
+
+ GetSector(abi.SectorNumber) (*SectorOnChainInfo, error)
+ FindSector(abi.SectorNumber) (*SectorLocation, error)
+ GetSectorExpiration(abi.SectorNumber) (*SectorExpiration, error)
+ GetPrecommittedSector(abi.SectorNumber) (*SectorPreCommitOnChainInfo, error)
+ ForEachPrecommittedSector(func(SectorPreCommitOnChainInfo) error) error
+ LoadSectors(sectorNos *bitfield.BitField) ([]*SectorOnChainInfo, error)
+ NumLiveSectors() (uint64, error)
+ IsAllocated(abi.SectorNumber) (bool, error)
+ // UnallocatedSectorNumbers returns up to count unallocated sector numbers (or less than
+ // count if there aren't enough).
+ UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error)
+ GetAllocatedSectors() (*bitfield.BitField, error)
+
+ // Note that ProvingPeriodStart is deprecated and will be renamed / removed in a future version of actors
+ GetProvingPeriodStart() (abi.ChainEpoch, error)
+ // Testing only
+ EraseAllUnproven() error
+
+ LoadDeadline(idx uint64) (Deadline, error)
+ ForEachDeadline(cb func(idx uint64, dl Deadline) error) error
+ NumDeadlines() (uint64, error)
+ DeadlinesChanged(State) (bool, error)
+
+ Info() (MinerInfo, error)
+ MinerInfoChanged(State) (bool, error)
+
+ DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error)
+ DeadlineCronActive() (bool, error)
+
+ // Diff helpers. Used by Diff* functions internally.
+ sectors() (adt.Array, error)
+ decodeSectorOnChainInfo(*cbg.Deferred) (SectorOnChainInfo, error)
+ precommits() (adt.Map, error)
+ decodeSectorPreCommitOnChainInfo(*cbg.Deferred) (SectorPreCommitOnChainInfo, error)
+ GetState() interface{}
+}
+
+type Deadline interface {
+ LoadPartition(idx uint64) (Partition, error)
+ ForEachPartition(cb func(idx uint64, part Partition) error) error
+ PartitionsPoSted() (bitfield.BitField, error)
+
+ PartitionsChanged(Deadline) (bool, error)
+ DisputableProofCount() (uint64, error)
+}
+
+type Partition interface {
+ AllSectors() (bitfield.BitField, error)
+ FaultySectors() (bitfield.BitField, error)
+ RecoveringSectors() (bitfield.BitField, error)
+ LiveSectors() (bitfield.BitField, error)
+ ActiveSectors() (bitfield.BitField, error)
+}
+
+type SectorOnChainInfo struct {
+ SectorNumber abi.SectorNumber
+ SealProof abi.RegisteredSealProof
+ SealedCID cid.Cid
+ DealIDs []abi.DealID
+ Activation abi.ChainEpoch
+ Expiration abi.ChainEpoch
+ DealWeight abi.DealWeight
+ VerifiedDealWeight abi.DealWeight
+ InitialPledge abi.TokenAmount
+ ExpectedDayReward abi.TokenAmount
+ ExpectedStoragePledge abi.TokenAmount
+}
+
+type SectorPreCommitInfo = miner0.SectorPreCommitInfo
+
+type SectorPreCommitOnChainInfo struct {
+ Info SectorPreCommitInfo
+ PreCommitDeposit abi.TokenAmount
+ PreCommitEpoch abi.ChainEpoch
+ DealWeight abi.DealWeight
+ VerifiedDealWeight abi.DealWeight
+}
+
+type PoStPartition = miner0.PoStPartition
+type RecoveryDeclaration = miner0.RecoveryDeclaration
+type FaultDeclaration = miner0.FaultDeclaration
+
+// Params
+type DeclareFaultsParams = miner0.DeclareFaultsParams
+type DeclareFaultsRecoveredParams = miner0.DeclareFaultsRecoveredParams
+type SubmitWindowedPoStParams = miner0.SubmitWindowedPoStParams
+type ProveCommitSectorParams = miner0.ProveCommitSectorParams
+type DisputeWindowedPoStParams = miner3.DisputeWindowedPoStParams
+type ProveCommitAggregateParams = miner5.ProveCommitAggregateParams
+
+func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) {
+ // We added support for the new proofs in network version 7, and removed support for the old
+ // ones in network version 8.
+ if nver < network.Version7 {
+ switch proof {
+ case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1:
+ return abi.RegisteredSealProof_StackedDrg2KiBV1, nil
+ case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1:
+ return abi.RegisteredSealProof_StackedDrg8MiBV1, nil
+ case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1:
+ return abi.RegisteredSealProof_StackedDrg512MiBV1, nil
+ case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1:
+ return abi.RegisteredSealProof_StackedDrg32GiBV1, nil
+ case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1:
+ return abi.RegisteredSealProof_StackedDrg64GiBV1, nil
+ default:
+ return -1, xerrors.Errorf("unrecognized window post type: %d", proof)
+ }
+ }
+
+ switch proof {
+ case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1:
+ return abi.RegisteredSealProof_StackedDrg2KiBV1_1, nil
+ case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1:
+ return abi.RegisteredSealProof_StackedDrg8MiBV1_1, nil
+ case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1:
+ return abi.RegisteredSealProof_StackedDrg512MiBV1_1, nil
+ case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1:
+ return abi.RegisteredSealProof_StackedDrg32GiBV1_1, nil
+ case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1:
+ return abi.RegisteredSealProof_StackedDrg64GiBV1_1, nil
+ default:
+ return -1, xerrors.Errorf("unrecognized window post type: %d", proof)
+ }
+}
+
+func WinningPoStProofTypeFromWindowPoStProofType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredPoStProof, error) {
+ switch proof {
+ case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1:
+ return abi.RegisteredPoStProof_StackedDrgWinning2KiBV1, nil
+ case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1:
+ return abi.RegisteredPoStProof_StackedDrgWinning8MiBV1, nil
+ case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1:
+ return abi.RegisteredPoStProof_StackedDrgWinning512MiBV1, nil
+ case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1:
+ return abi.RegisteredPoStProof_StackedDrgWinning32GiBV1, nil
+ case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1:
+ return abi.RegisteredPoStProof_StackedDrgWinning64GiBV1, nil
+ default:
+ return -1, xerrors.Errorf("unknown proof type %d", proof)
+ }
+}
+
+type MinerInfo struct {
+ Owner address.Address // Must be an ID-address.
+ Worker address.Address // Must be an ID-address.
+ NewWorker address.Address // Must be an ID-address.
+ ControlAddresses []address.Address // Must be an ID-addresses.
+ WorkerChangeEpoch abi.ChainEpoch
+ PeerId *peer.ID
+ Multiaddrs []abi.Multiaddrs
+ WindowPoStProofType abi.RegisteredPoStProof
+ SectorSize abi.SectorSize
+ WindowPoStPartitionSectors uint64
+ ConsensusFaultElapsed abi.ChainEpoch
+}
+
+func (mi MinerInfo) IsController(addr address.Address) bool {
+ if addr == mi.Owner || addr == mi.Worker {
+ return true
+ }
+
+ for _, ca := range mi.ControlAddresses {
+ if addr == ca {
+ return true
+ }
+ }
+
+ return false
+}
+
+type SectorExpiration struct {
+ OnTime abi.ChainEpoch
+
+ // non-zero if sector is faulty, epoch at which it will be permanently
+ // removed if it doesn't recover
+ Early abi.ChainEpoch
+}
+
+type SectorLocation struct {
+ Deadline uint64
+ Partition uint64
+}
+
+type SectorChanges struct {
+ Added []SectorOnChainInfo
+ Extended []SectorExtensions
+ Removed []SectorOnChainInfo
+}
+
+type SectorExtensions struct {
+ From SectorOnChainInfo
+ To SectorOnChainInfo
+}
+
+type PreCommitChanges struct {
+ Added []SectorPreCommitOnChainInfo
+ Removed []SectorPreCommitOnChainInfo
+}
+
+type LockedFunds struct {
+ VestingFunds abi.TokenAmount
+ InitialPledgeRequirement abi.TokenAmount
+ PreCommitDeposits abi.TokenAmount
+}
+
+func (lf LockedFunds) TotalLockedFunds() abi.TokenAmount {
+ return big.Add(lf.VestingFunds, big.Add(lf.InitialPledgeRequirement, lf.PreCommitDeposits))
+}
diff --git a/chain/actors/builtin/miner/miner.go b/chain/actors/builtin/miner/miner.go
index 48236ffd0a4..fc1d60e718a 100644
--- a/chain/actors/builtin/miner/miner.go
+++ b/chain/actors/builtin/miner/miner.go
@@ -2,6 +2,8 @@ package miner
import (
"github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/network"
+ "github.com/filecoin-project/lotus/chain/actors"
"github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p-core/peer"
cbg "github.com/whyrusleeping/cbor-gen"
@@ -17,23 +19,49 @@ import (
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/types"
- builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
+ miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
+ miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner"
+ miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
+
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+
+ builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
+
+ builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
+
+ builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
)
func init() {
+
builtin.RegisterActorState(builtin0.StorageMinerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load0(store, root)
})
+
builtin.RegisterActorState(builtin2.StorageMinerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load2(store, root)
})
+
+ builtin.RegisterActorState(builtin3.StorageMinerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load3(store, root)
+ })
+
+ builtin.RegisterActorState(builtin4.StorageMinerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load4(store, root)
+ })
+
+ builtin.RegisterActorState(builtin5.StorageMinerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load5(store, root)
+ })
+
}
-var Methods = builtin2.MethodsMiner
+var Methods = builtin5.MethodsMiner
-// Unchanged between v0 and v2 actors
+// Unchanged between v0, v2, v3, and v4 actors
var WPoStProvingPeriod = miner0.WPoStProvingPeriod
var WPoStPeriodDeadlines = miner0.WPoStPeriodDeadlines
var WPoStChallengeWindow = miner0.WPoStChallengeWindow
@@ -42,16 +70,78 @@ var FaultDeclarationCutoff = miner0.FaultDeclarationCutoff
const MinSectorExpiration = miner0.MinSectorExpiration
-func Load(store adt.Store, act *types.Actor) (st State, err error) {
+// Not used / checked in v0
+// TODO: Abstract over network versions
+var DeclarationsMax = miner2.DeclarationsMax
+var AddressedSectorsMax = miner2.AddressedSectorsMax
+
+func Load(store adt.Store, act *types.Actor) (State, error) {
switch act.Code {
+
case builtin0.StorageMinerActorCodeID:
return load0(store, act.Head)
+
case builtin2.StorageMinerActorCodeID:
return load2(store, act.Head)
+
+ case builtin3.StorageMinerActorCodeID:
+ return load3(store, act.Head)
+
+ case builtin4.StorageMinerActorCodeID:
+ return load4(store, act.Head)
+
+ case builtin5.StorageMinerActorCodeID:
+ return load5(store, act.Head)
+
}
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
}
+func MakeState(store adt.Store, av actors.Version) (State, error) {
+ switch av {
+
+ case actors.Version0:
+ return make0(store)
+
+ case actors.Version2:
+ return make2(store)
+
+ case actors.Version3:
+ return make3(store)
+
+ case actors.Version4:
+ return make4(store)
+
+ case actors.Version5:
+ return make5(store)
+
+ }
+ return nil, xerrors.Errorf("unknown actor version %d", av)
+}
+
+func GetActorCodeID(av actors.Version) (cid.Cid, error) {
+ switch av {
+
+ case actors.Version0:
+ return builtin0.StorageMinerActorCodeID, nil
+
+ case actors.Version2:
+ return builtin2.StorageMinerActorCodeID, nil
+
+ case actors.Version3:
+ return builtin3.StorageMinerActorCodeID, nil
+
+ case actors.Version4:
+ return builtin4.StorageMinerActorCodeID, nil
+
+ case actors.Version5:
+ return builtin5.StorageMinerActorCodeID, nil
+
+ }
+
+ return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
+}
+
type State interface {
cbor.Marshaler
@@ -67,11 +157,20 @@ type State interface {
FindSector(abi.SectorNumber) (*SectorLocation, error)
GetSectorExpiration(abi.SectorNumber) (*SectorExpiration, error)
GetPrecommittedSector(abi.SectorNumber) (*SectorPreCommitOnChainInfo, error)
+ ForEachPrecommittedSector(func(SectorPreCommitOnChainInfo) error) error
LoadSectors(sectorNos *bitfield.BitField) ([]*SectorOnChainInfo, error)
NumLiveSectors() (uint64, error)
IsAllocated(abi.SectorNumber) (bool, error)
+ // UnallocatedSectorNumbers returns up to count unallocated sector numbers (or less than
+ // count if there aren't enough).
+ UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error)
GetAllocatedSectors() (*bitfield.BitField, error)
+ // Note that ProvingPeriodStart is deprecated and will be renamed / removed in a future version of actors
+ GetProvingPeriodStart() (abi.ChainEpoch, error)
+ // Testing only
+ EraseAllUnproven() error
+
LoadDeadline(idx uint64) (Deadline, error)
ForEachDeadline(cb func(idx uint64, dl Deadline) error) error
NumDeadlines() (uint64, error)
@@ -81,20 +180,23 @@ type State interface {
MinerInfoChanged(State) (bool, error)
DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error)
+ DeadlineCronActive() (bool, error)
// Diff helpers. Used by Diff* functions internally.
sectors() (adt.Array, error)
decodeSectorOnChainInfo(*cbg.Deferred) (SectorOnChainInfo, error)
precommits() (adt.Map, error)
decodeSectorPreCommitOnChainInfo(*cbg.Deferred) (SectorPreCommitOnChainInfo, error)
+ GetState() interface{}
}
type Deadline interface {
LoadPartition(idx uint64) (Partition, error)
ForEachPartition(cb func(idx uint64, part Partition) error) error
- PostSubmissions() (bitfield.BitField, error)
+ PartitionsPoSted() (bitfield.BitField, error)
PartitionsChanged(Deadline) (bool, error)
+ DisputableProofCount() (uint64, error)
}
type Partition interface {
@@ -138,6 +240,61 @@ type DeclareFaultsParams = miner0.DeclareFaultsParams
type DeclareFaultsRecoveredParams = miner0.DeclareFaultsRecoveredParams
type SubmitWindowedPoStParams = miner0.SubmitWindowedPoStParams
type ProveCommitSectorParams = miner0.ProveCommitSectorParams
+type DisputeWindowedPoStParams = miner3.DisputeWindowedPoStParams
+type ProveCommitAggregateParams = miner5.ProveCommitAggregateParams
+
+func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) {
+ // We added support for the new proofs in network version 7, and removed support for the old
+ // ones in network version 8.
+ if nver < network.Version7 {
+ switch proof {
+ case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1:
+ return abi.RegisteredSealProof_StackedDrg2KiBV1, nil
+ case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1:
+ return abi.RegisteredSealProof_StackedDrg8MiBV1, nil
+ case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1:
+ return abi.RegisteredSealProof_StackedDrg512MiBV1, nil
+ case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1:
+ return abi.RegisteredSealProof_StackedDrg32GiBV1, nil
+ case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1:
+ return abi.RegisteredSealProof_StackedDrg64GiBV1, nil
+ default:
+ return -1, xerrors.Errorf("unrecognized window post type: %d", proof)
+ }
+ }
+
+ switch proof {
+ case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1:
+ return abi.RegisteredSealProof_StackedDrg2KiBV1_1, nil
+ case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1:
+ return abi.RegisteredSealProof_StackedDrg8MiBV1_1, nil
+ case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1:
+ return abi.RegisteredSealProof_StackedDrg512MiBV1_1, nil
+ case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1:
+ return abi.RegisteredSealProof_StackedDrg32GiBV1_1, nil
+ case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1:
+ return abi.RegisteredSealProof_StackedDrg64GiBV1_1, nil
+ default:
+ return -1, xerrors.Errorf("unrecognized window post type: %d", proof)
+ }
+}
+
+func WinningPoStProofTypeFromWindowPoStProofType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredPoStProof, error) {
+ switch proof {
+ case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1:
+ return abi.RegisteredPoStProof_StackedDrgWinning2KiBV1, nil
+ case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1:
+ return abi.RegisteredPoStProof_StackedDrgWinning8MiBV1, nil
+ case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1:
+ return abi.RegisteredPoStProof_StackedDrgWinning512MiBV1, nil
+ case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1:
+ return abi.RegisteredPoStProof_StackedDrgWinning32GiBV1, nil
+ case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1:
+ return abi.RegisteredPoStProof_StackedDrgWinning64GiBV1, nil
+ default:
+ return -1, xerrors.Errorf("unknown proof type %d", proof)
+ }
+}
type MinerInfo struct {
Owner address.Address // Must be an ID-address.
@@ -147,7 +304,7 @@ type MinerInfo struct {
WorkerChangeEpoch abi.ChainEpoch
PeerId *peer.ID
Multiaddrs []abi.Multiaddrs
- SealProofType abi.RegisteredSealProof
+ WindowPoStProofType abi.RegisteredPoStProof
SectorSize abi.SectorSize
WindowPoStPartitionSectors uint64
ConsensusFaultElapsed abi.ChainEpoch
diff --git a/chain/actors/builtin/miner/state.go.template b/chain/actors/builtin/miner/state.go.template
new file mode 100644
index 00000000000..09c1202d95e
--- /dev/null
+++ b/chain/actors/builtin/miner/state.go.template
@@ -0,0 +1,585 @@
+package miner
+
+import (
+ "bytes"
+ "errors"
+{{if (le .v 1)}}
+ "github.com/filecoin-project/go-state-types/big"
+{{end}}
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-bitfield"
+ rle "github.com/filecoin-project/go-bitfield/rle"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/dline"
+ "github.com/ipfs/go-cid"
+ "github.com/libp2p/go-libp2p-core/peer"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+{{if (ge .v 3)}}
+ builtin{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin"
+{{end}}
+ miner{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/miner"
+ adt{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/util/adt"
+)
+
+var _ State = (*state{{.v}})(nil)
+
+func load{{.v}}(store adt.Store, root cid.Cid) (State, error) {
+ out := state{{.v}}{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make{{.v}}(store adt.Store) (State, error) {
+ out := state{{.v}}{store: store}
+ out.State = miner{{.v}}.State{}
+ return &out, nil
+}
+
+type state{{.v}} struct {
+ miner{{.v}}.State
+ store adt.Store
+}
+
+type deadline{{.v}} struct {
+ miner{{.v}}.Deadline
+ store adt.Store
+}
+
+type partition{{.v}} struct {
+ miner{{.v}}.Partition
+ store adt.Store
+}
+
+func (s *state{{.v}}) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ err = xerrors.Errorf("failed to get available balance: %w", r)
+ available = abi.NewTokenAmount(0)
+ }
+ }()
+ // this panics if the miner doesnt have enough funds to cover their locked pledge
+ available{{if (ge .v 2)}}, err{{end}} = s.GetAvailableBalance(bal)
+ return available, err
+}
+
+func (s *state{{.v}}) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) {
+ return s.CheckVestedFunds(s.store, epoch)
+}
+
+func (s *state{{.v}}) LockedFunds() (LockedFunds, error) {
+ return LockedFunds{
+ VestingFunds: s.State.LockedFunds,
+ InitialPledgeRequirement: s.State.InitialPledge{{if (le .v 1)}}Requirement{{end}},
+ PreCommitDeposits: s.State.PreCommitDeposits,
+ }, nil
+}
+
+func (s *state{{.v}}) FeeDebt() (abi.TokenAmount, error) {
+ return {{if (ge .v 2)}}s.State.FeeDebt{{else}}big.Zero(){{end}}, nil
+}
+
+func (s *state{{.v}}) InitialPledge() (abi.TokenAmount, error) {
+ return s.State.InitialPledge{{if (le .v 1)}}Requirement{{end}}, nil
+}
+
+func (s *state{{.v}}) PreCommitDeposits() (abi.TokenAmount, error) {
+ return s.State.PreCommitDeposits, nil
+}
+
+func (s *state{{.v}}) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) {
+ info, ok, err := s.State.GetSector(s.store, num)
+ if !ok || err != nil {
+ return nil, err
+ }
+
+ ret := fromV{{.v}}SectorOnChainInfo(*info)
+ return &ret, nil
+}
+
+func (s *state{{.v}}) FindSector(num abi.SectorNumber) (*SectorLocation, error) {
+ dlIdx, partIdx, err := s.State.FindSector(s.store, num)
+ if err != nil {
+ return nil, err
+ }
+ return &SectorLocation{
+ Deadline: dlIdx,
+ Partition: partIdx,
+ }, nil
+}
+
+func (s *state{{.v}}) NumLiveSectors() (uint64, error) {
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return 0, err
+ }
+ var total uint64
+ if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner{{.v}}.Deadline) error {
+ total += dl.LiveSectors
+ return nil
+ }); err != nil {
+ return 0, err
+ }
+ return total, nil
+}
+
+// GetSectorExpiration returns the effective expiration of the given sector.
+//
+// If the sector does not expire early, the Early expiration field is 0.
+func (s *state{{.v}}) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) {
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return nil, err
+ }
+ // NOTE: this can be optimized significantly.
+ // 1. If the sector is non-faulty, it will either expire on-time (can be
+ // learned from the sector info), or in the next quantized expiration
+ // epoch (i.e., the first element in the partition's expiration queue.
+ // 2. If it's faulty, it will expire early within the first 14 entries
+ // of the expiration queue.
+ stopErr := errors.New("stop")
+ out := SectorExpiration{}
+ err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner{{.v}}.Deadline) error {
+ partitions, err := dl.PartitionsArray(s.store)
+ if err != nil {
+ return err
+ }
+ quant := s.State.QuantSpecForDeadline(dlIdx)
+ var part miner{{.v}}.Partition
+ return partitions.ForEach(&part, func(partIdx int64) error {
+ if found, err := part.Sectors.IsSet(uint64(num)); err != nil {
+ return err
+ } else if !found {
+ return nil
+ }
+ if found, err := part.Terminated.IsSet(uint64(num)); err != nil {
+ return err
+ } else if found {
+ // already terminated
+ return stopErr
+ }
+
+ q, err := miner{{.v}}.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant{{if (ge .v 3)}}, miner{{.v}}.PartitionExpirationAmtBitwidth{{end}})
+ if err != nil {
+ return err
+ }
+ var exp miner{{.v}}.ExpirationSet
+ return q.ForEach(&exp, func(epoch int64) error {
+ if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil {
+ return err
+ } else if early {
+ out.Early = abi.ChainEpoch(epoch)
+ return nil
+ }
+ if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil {
+ return err
+ } else if onTime {
+ out.OnTime = abi.ChainEpoch(epoch)
+ return stopErr
+ }
+ return nil
+ })
+ })
+ })
+ if err == stopErr {
+ err = nil
+ }
+ if err != nil {
+ return nil, err
+ }
+ if out.Early == 0 && out.OnTime == 0 {
+ return nil, xerrors.Errorf("failed to find sector %d", num)
+ }
+ return &out, nil
+}
+
+func (s *state{{.v}}) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) {
+ info, ok, err := s.State.GetPrecommittedSector(s.store, num)
+ if !ok || err != nil {
+ return nil, err
+ }
+
+ ret := fromV{{.v}}SectorPreCommitOnChainInfo(*info)
+
+ return &ret, nil
+}
+
+func (s *state{{.v}}) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error {
+{{if (ge .v 3) -}}
+ precommitted, err := adt{{.v}}.AsMap(s.store, s.State.PreCommittedSectors, builtin{{.v}}.DefaultHamtBitwidth)
+{{- else -}}
+ precommitted, err := adt{{.v}}.AsMap(s.store, s.State.PreCommittedSectors)
+{{- end}}
+ if err != nil {
+ return err
+ }
+
+ var info miner{{.v}}.SectorPreCommitOnChainInfo
+ if err := precommitted.ForEach(&info, func(_ string) error {
+ return cb(fromV{{.v}}SectorPreCommitOnChainInfo(info))
+ }); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (s *state{{.v}}) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) {
+ sectors, err := miner{{.v}}.LoadSectors(s.store, s.State.Sectors)
+ if err != nil {
+ return nil, err
+ }
+
+ // If no sector numbers are specified, load all.
+ if snos == nil {
+ infos := make([]*SectorOnChainInfo, 0, sectors.Length())
+ var info{{.v}} miner{{.v}}.SectorOnChainInfo
+ if err := sectors.ForEach(&info{{.v}}, func(_ int64) error {
+ info := fromV{{.v}}SectorOnChainInfo(info{{.v}})
+ infos = append(infos, &info)
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+ return infos, nil
+ }
+
+ // Otherwise, load selected.
+ infos{{.v}}, err := sectors.Load(*snos)
+ if err != nil {
+ return nil, err
+ }
+ infos := make([]*SectorOnChainInfo, len(infos{{.v}}))
+ for i, info{{.v}} := range infos{{.v}} {
+ info := fromV{{.v}}SectorOnChainInfo(*info{{.v}})
+ infos[i] = &info
+ }
+ return infos, nil
+}
+
+func (s *state{{.v}}) loadAllocatedSectorNumbers() (bitfield.BitField, error) {
+ var allocatedSectors bitfield.BitField
+ err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors)
+ return allocatedSectors, err
+}
+
+func (s *state{{.v}}) IsAllocated(num abi.SectorNumber) (bool, error) {
+ allocatedSectors, err := s.loadAllocatedSectorNumbers()
+ if err != nil {
+ return false, err
+ }
+
+ return allocatedSectors.IsSet(uint64(num))
+}
+
+func (s *state{{.v}}) GetProvingPeriodStart() (abi.ChainEpoch, error) {
+ return s.State.ProvingPeriodStart, nil
+}
+
+func (s *state{{.v}}) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) {
+ allocatedSectors, err := s.loadAllocatedSectorNumbers()
+ if err != nil {
+ return nil, err
+ }
+
+ allocatedRuns, err := allocatedSectors.RunIterator()
+ if err != nil {
+ return nil, err
+ }
+
+ unallocatedRuns, err := rle.Subtract(
+ &rle.RunSliceIterator{Runs: []rle.Run{ {Val: true, Len: abi.MaxSectorNumber} }},
+ allocatedRuns,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ iter, err := rle.BitsFromRuns(unallocatedRuns)
+ if err != nil {
+ return nil, err
+ }
+
+ sectors := make([]abi.SectorNumber, 0, count)
+ for iter.HasNext() && len(sectors) < count {
+ nextNo, err := iter.Next()
+ if err != nil {
+ return nil, err
+ }
+ sectors = append(sectors, abi.SectorNumber(nextNo))
+ }
+
+ return sectors, nil
+}
+
+func (s *state{{.v}}) GetAllocatedSectors() (*bitfield.BitField, error) {
+ var allocatedSectors bitfield.BitField
+ if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
+ return nil, err
+ }
+
+ return &allocatedSectors, nil
+}
+
+func (s *state{{.v}}) LoadDeadline(idx uint64) (Deadline, error) {
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return nil, err
+ }
+ dl, err := dls.LoadDeadline(s.store, idx)
+ if err != nil {
+ return nil, err
+ }
+ return &deadline{{.v}}{*dl, s.store}, nil
+}
+
+func (s *state{{.v}}) ForEachDeadline(cb func(uint64, Deadline) error) error {
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return err
+ }
+ return dls.ForEach(s.store, func(i uint64, dl *miner{{.v}}.Deadline) error {
+ return cb(i, &deadline{{.v}}{*dl, s.store})
+ })
+}
+
+func (s *state{{.v}}) NumDeadlines() (uint64, error) {
+ return miner{{.v}}.WPoStPeriodDeadlines, nil
+}
+
+func (s *state{{.v}}) DeadlinesChanged(other State) (bool, error) {
+ other{{.v}}, ok := other.(*state{{.v}})
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+
+ return !s.State.Deadlines.Equals(other{{.v}}.Deadlines), nil
+}
+
+func (s *state{{.v}}) MinerInfoChanged(other State) (bool, error) {
+ other0, ok := other.(*state{{.v}})
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+ return !s.State.Info.Equals(other0.State.Info), nil
+}
+
+func (s *state{{.v}}) Info() (MinerInfo, error) {
+ info, err := s.State.GetInfo(s.store)
+ if err != nil {
+ return MinerInfo{}, err
+ }
+
+ var pid *peer.ID
+ if peerID, err := peer.IDFromBytes(info.PeerId); err == nil {
+ pid = &peerID
+ }
+{{if (le .v 2)}}
+ wpp, err := info.SealProofType.RegisteredWindowPoStProof()
+ if err != nil {
+ return MinerInfo{}, err
+ }
+{{end}}
+ mi := MinerInfo{
+ Owner: info.Owner,
+ Worker: info.Worker,
+ ControlAddresses: info.ControlAddresses,
+
+ NewWorker: address.Undef,
+ WorkerChangeEpoch: -1,
+
+ PeerId: pid,
+ Multiaddrs: info.Multiaddrs,
+ WindowPoStProofType: {{if (ge .v 3)}}info.WindowPoStProofType{{else}}wpp{{end}},
+ SectorSize: info.SectorSize,
+ WindowPoStPartitionSectors: info.WindowPoStPartitionSectors,
+ ConsensusFaultElapsed: {{if (ge .v 2)}}info.ConsensusFaultElapsed{{else}}-1{{end}},
+ }
+
+ if info.PendingWorkerKey != nil {
+ mi.NewWorker = info.PendingWorkerKey.NewWorker
+ mi.WorkerChangeEpoch = info.PendingWorkerKey.EffectiveAt
+ }
+
+ return mi, nil
+}
+
+func (s *state{{.v}}) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) {
+ return s.State.{{if (ge .v 4)}}Recorded{{end}}DeadlineInfo(epoch), nil
+}
+
+func (s *state{{.v}}) DeadlineCronActive() (bool, error) {
+ return {{if (ge .v 4)}}s.State.DeadlineCronActive{{else}}true{{end}}, nil{{if (lt .v 4)}} // always active in this version{{end}}
+}
+
+func (s *state{{.v}}) sectors() (adt.Array, error) {
+ return adt{{.v}}.AsArray(s.store, s.Sectors{{if (ge .v 3)}}, miner{{.v}}.SectorsAmtBitwidth{{end}})
+}
+
+func (s *state{{.v}}) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) {
+ var si miner{{.v}}.SectorOnChainInfo
+ err := si.UnmarshalCBOR(bytes.NewReader(val.Raw))
+ if err != nil {
+ return SectorOnChainInfo{}, err
+ }
+
+ return fromV{{.v}}SectorOnChainInfo(si), nil
+}
+
+func (s *state{{.v}}) precommits() (adt.Map, error) {
+ return adt{{.v}}.AsMap(s.store, s.PreCommittedSectors{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}})
+}
+
+func (s *state{{.v}}) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreCommitOnChainInfo, error) {
+ var sp miner{{.v}}.SectorPreCommitOnChainInfo
+ err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw))
+ if err != nil {
+ return SectorPreCommitOnChainInfo{}, err
+ }
+
+ return fromV{{.v}}SectorPreCommitOnChainInfo(sp), nil
+}
+
+func (s *state{{.v}}) EraseAllUnproven() error {
+ {{if (ge .v 2)}}
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return err
+ }
+
+ err = dls.ForEach(s.store, func(dindx uint64, dl *miner{{.v}}.Deadline) error {
+ ps, err := dl.PartitionsArray(s.store)
+ if err != nil {
+ return err
+ }
+
+ var part miner{{.v}}.Partition
+ err = ps.ForEach(&part, func(pindx int64) error {
+ _ = part.ActivateUnproven()
+ err = ps.Set(uint64(pindx), &part)
+ return nil
+ })
+
+ if err != nil {
+ return err
+ }
+
+ dl.Partitions, err = ps.Root()
+ if err != nil {
+ return err
+ }
+
+ return dls.UpdateDeadline(s.store, dindx, dl)
+ })
+
+ return s.State.SaveDeadlines(s.store, dls)
+ {{else}}
+ // field doesn't exist until v2
+ {{end}}
+ return nil
+}
+
+func (d *deadline{{.v}}) LoadPartition(idx uint64) (Partition, error) {
+ p, err := d.Deadline.LoadPartition(d.store, idx)
+ if err != nil {
+ return nil, err
+ }
+ return &partition{{.v}}{*p, d.store}, nil
+}
+
+func (d *deadline{{.v}}) ForEachPartition(cb func(uint64, Partition) error) error {
+ ps, err := d.Deadline.PartitionsArray(d.store)
+ if err != nil {
+ return err
+ }
+ var part miner{{.v}}.Partition
+ return ps.ForEach(&part, func(i int64) error {
+ return cb(uint64(i), &partition{{.v}}{part, d.store})
+ })
+}
+
+func (d *deadline{{.v}}) PartitionsChanged(other Deadline) (bool, error) {
+ other{{.v}}, ok := other.(*deadline{{.v}})
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+
+ return !d.Deadline.Partitions.Equals(other{{.v}}.Deadline.Partitions), nil
+}
+
+func (d *deadline{{.v}}) PartitionsPoSted() (bitfield.BitField, error) {
+ return d.Deadline.{{if (ge .v 3)}}PartitionsPoSted{{else}}PostSubmissions{{end}}, nil
+}
+
+func (d *deadline{{.v}}) DisputableProofCount() (uint64, error) {
+{{if (ge .v 3)}}
+ ops, err := d.OptimisticProofsSnapshotArray(d.store)
+ if err != nil {
+ return 0, err
+ }
+
+ return ops.Length(), nil
+{{else}}
+ // field doesn't exist until v3
+ return 0, nil
+{{end}}
+}
+
+func (p *partition{{.v}}) AllSectors() (bitfield.BitField, error) {
+ return p.Partition.Sectors, nil
+}
+
+func (p *partition{{.v}}) FaultySectors() (bitfield.BitField, error) {
+ return p.Partition.Faults, nil
+}
+
+func (p *partition{{.v}}) RecoveringSectors() (bitfield.BitField, error) {
+ return p.Partition.Recoveries, nil
+}
+
+func fromV{{.v}}SectorOnChainInfo(v{{.v}} miner{{.v}}.SectorOnChainInfo) SectorOnChainInfo {
+{{if (ge .v 2)}}
+ return SectorOnChainInfo{
+ SectorNumber: v{{.v}}.SectorNumber,
+ SealProof: v{{.v}}.SealProof,
+ SealedCID: v{{.v}}.SealedCID,
+ DealIDs: v{{.v}}.DealIDs,
+ Activation: v{{.v}}.Activation,
+ Expiration: v{{.v}}.Expiration,
+ DealWeight: v{{.v}}.DealWeight,
+ VerifiedDealWeight: v{{.v}}.VerifiedDealWeight,
+ InitialPledge: v{{.v}}.InitialPledge,
+ ExpectedDayReward: v{{.v}}.ExpectedDayReward,
+ ExpectedStoragePledge: v{{.v}}.ExpectedStoragePledge,
+ }
+{{else}}
+ return (SectorOnChainInfo)(v0)
+{{end}}
+}
+
+func fromV{{.v}}SectorPreCommitOnChainInfo(v{{.v}} miner{{.v}}.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
+{{if (ge .v 2)}}
+ return SectorPreCommitOnChainInfo{
+ Info: (SectorPreCommitInfo)(v{{.v}}.Info),
+ PreCommitDeposit: v{{.v}}.PreCommitDeposit,
+ PreCommitEpoch: v{{.v}}.PreCommitEpoch,
+ DealWeight: v{{.v}}.DealWeight,
+ VerifiedDealWeight: v{{.v}}.VerifiedDealWeight,
+ }
+{{else}}
+ return (SectorPreCommitOnChainInfo)(v0)
+{{end}}
+}
+
+func (s *state{{.v}}) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/miner/utils.go b/chain/actors/builtin/miner/utils.go
index f9c6b3da332..2f24e845401 100644
--- a/chain/actors/builtin/miner/utils.go
+++ b/chain/actors/builtin/miner/utils.go
@@ -4,6 +4,8 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-bitfield"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/network"
)
func AllPartSectors(mas State, sget func(Partition) (bitfield.BitField, error)) (bitfield.BitField, error) {
@@ -26,3 +28,42 @@ func AllPartSectors(mas State, sget func(Partition) (bitfield.BitField, error))
return bitfield.MultiMerge(parts...)
}
+
+// SealProofTypeFromSectorSize returns preferred seal proof type for creating
+// new miner actors and new sectors
+func SealProofTypeFromSectorSize(ssize abi.SectorSize, nv network.Version) (abi.RegisteredSealProof, error) {
+ switch {
+ case nv < network.Version7:
+ switch ssize {
+ case 2 << 10:
+ return abi.RegisteredSealProof_StackedDrg2KiBV1, nil
+ case 8 << 20:
+ return abi.RegisteredSealProof_StackedDrg8MiBV1, nil
+ case 512 << 20:
+ return abi.RegisteredSealProof_StackedDrg512MiBV1, nil
+ case 32 << 30:
+ return abi.RegisteredSealProof_StackedDrg32GiBV1, nil
+ case 64 << 30:
+ return abi.RegisteredSealProof_StackedDrg64GiBV1, nil
+ default:
+ return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize)
+ }
+ case nv >= network.Version7:
+ switch ssize {
+ case 2 << 10:
+ return abi.RegisteredSealProof_StackedDrg2KiBV1_1, nil
+ case 8 << 20:
+ return abi.RegisteredSealProof_StackedDrg8MiBV1_1, nil
+ case 512 << 20:
+ return abi.RegisteredSealProof_StackedDrg512MiBV1_1, nil
+ case 32 << 30:
+ return abi.RegisteredSealProof_StackedDrg32GiBV1_1, nil
+ case 64 << 30:
+ return abi.RegisteredSealProof_StackedDrg64GiBV1_1, nil
+ default:
+ return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize)
+ }
+ }
+
+ return 0, xerrors.Errorf("unsupported network version")
+}
diff --git a/chain/actors/builtin/miner/v0.go b/chain/actors/builtin/miner/v0.go
index a34cd5b43bc..cd922645ea4 100644
--- a/chain/actors/builtin/miner/v0.go
+++ b/chain/actors/builtin/miner/v0.go
@@ -8,6 +8,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
+ rle "github.com/filecoin-project/go-bitfield/rle"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/dline"
"github.com/ipfs/go-cid"
@@ -32,6 +33,12 @@ func load0(store adt.Store, root cid.Cid) (State, error) {
return &out, nil
}
+func make0(store adt.Store) (State, error) {
+ out := state0{store: store}
+ out.State = miner0.State{}
+ return &out, nil
+}
+
type state0 struct {
miner0.State
store adt.Store
@@ -196,9 +203,26 @@ func (s *state0) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOn
}
ret := fromV0SectorPreCommitOnChainInfo(*info)
+
return &ret, nil
}
+func (s *state0) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error {
+ precommitted, err := adt0.AsMap(s.store, s.State.PreCommittedSectors)
+ if err != nil {
+ return err
+ }
+
+ var info miner0.SectorPreCommitOnChainInfo
+ if err := precommitted.ForEach(&info, func(_ string) error {
+ return cb(fromV0SectorPreCommitOnChainInfo(info))
+ }); err != nil {
+ return err
+ }
+
+ return nil
+}
+
func (s *state0) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) {
sectors, err := miner0.LoadSectors(s.store, s.State.Sectors)
if err != nil {
@@ -232,15 +256,61 @@ func (s *state0) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, err
return infos, nil
}
-func (s *state0) IsAllocated(num abi.SectorNumber) (bool, error) {
+func (s *state0) loadAllocatedSectorNumbers() (bitfield.BitField, error) {
var allocatedSectors bitfield.BitField
- if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
+ err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors)
+ return allocatedSectors, err
+}
+
+func (s *state0) IsAllocated(num abi.SectorNumber) (bool, error) {
+ allocatedSectors, err := s.loadAllocatedSectorNumbers()
+ if err != nil {
return false, err
}
return allocatedSectors.IsSet(uint64(num))
}
+func (s *state0) GetProvingPeriodStart() (abi.ChainEpoch, error) {
+ return s.State.ProvingPeriodStart, nil
+}
+
+func (s *state0) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) {
+ allocatedSectors, err := s.loadAllocatedSectorNumbers()
+ if err != nil {
+ return nil, err
+ }
+
+ allocatedRuns, err := allocatedSectors.RunIterator()
+ if err != nil {
+ return nil, err
+ }
+
+ unallocatedRuns, err := rle.Subtract(
+ &rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}},
+ allocatedRuns,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ iter, err := rle.BitsFromRuns(unallocatedRuns)
+ if err != nil {
+ return nil, err
+ }
+
+ sectors := make([]abi.SectorNumber, 0, count)
+ for iter.HasNext() && len(sectors) < count {
+ nextNo, err := iter.Next()
+ if err != nil {
+ return nil, err
+ }
+ sectors = append(sectors, abi.SectorNumber(nextNo))
+ }
+
+ return sectors, nil
+}
+
func (s *state0) GetAllocatedSectors() (*bitfield.BitField, error) {
var allocatedSectors bitfield.BitField
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
@@ -306,6 +376,11 @@ func (s *state0) Info() (MinerInfo, error) {
pid = &peerID
}
+ wpp, err := info.SealProofType.RegisteredWindowPoStProof()
+ if err != nil {
+ return MinerInfo{}, err
+ }
+
mi := MinerInfo{
Owner: info.Owner,
Worker: info.Worker,
@@ -316,7 +391,7 @@ func (s *state0) Info() (MinerInfo, error) {
PeerId: pid,
Multiaddrs: info.Multiaddrs,
- SealProofType: info.SealProofType,
+ WindowPoStProofType: wpp,
SectorSize: info.SectorSize,
WindowPoStPartitionSectors: info.WindowPoStPartitionSectors,
ConsensusFaultElapsed: -1,
@@ -334,6 +409,10 @@ func (s *state0) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) {
return s.State.DeadlineInfo(epoch), nil
}
+func (s *state0) DeadlineCronActive() (bool, error) {
+ return true, nil // always active in this version
+}
+
func (s *state0) sectors() (adt.Array, error) {
return adt0.AsArray(s.store, s.Sectors)
}
@@ -362,6 +441,13 @@ func (s *state0) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreC
return fromV0SectorPreCommitOnChainInfo(sp), nil
}
+func (s *state0) EraseAllUnproven() error {
+
+ // field doesn't exist until v2
+
+ return nil
+}
+
func (d *deadline0) LoadPartition(idx uint64) (Partition, error) {
p, err := d.Deadline.LoadPartition(d.store, idx)
if err != nil {
@@ -391,10 +477,17 @@ func (d *deadline0) PartitionsChanged(other Deadline) (bool, error) {
return !d.Deadline.Partitions.Equals(other0.Deadline.Partitions), nil
}
-func (d *deadline0) PostSubmissions() (bitfield.BitField, error) {
+func (d *deadline0) PartitionsPoSted() (bitfield.BitField, error) {
return d.Deadline.PostSubmissions, nil
}
+func (d *deadline0) DisputableProofCount() (uint64, error) {
+
+ // field doesn't exist until v3
+ return 0, nil
+
+}
+
func (p *partition0) AllSectors() (bitfield.BitField, error) {
return p.Partition.Sectors, nil
}
@@ -408,9 +501,17 @@ func (p *partition0) RecoveringSectors() (bitfield.BitField, error) {
}
func fromV0SectorOnChainInfo(v0 miner0.SectorOnChainInfo) SectorOnChainInfo {
+
return (SectorOnChainInfo)(v0)
+
}
func fromV0SectorPreCommitOnChainInfo(v0 miner0.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
+
return (SectorPreCommitOnChainInfo)(v0)
+
+}
+
+func (s *state0) GetState() interface{} {
+ return &s.State
}
diff --git a/chain/actors/builtin/miner/v2.go b/chain/actors/builtin/miner/v2.go
index 118ac87d65f..5de653fe4e2 100644
--- a/chain/actors/builtin/miner/v2.go
+++ b/chain/actors/builtin/miner/v2.go
@@ -6,6 +6,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
+ rle "github.com/filecoin-project/go-bitfield/rle"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/dline"
"github.com/ipfs/go-cid"
@@ -30,6 +31,12 @@ func load2(store adt.Store, root cid.Cid) (State, error) {
return &out, nil
}
+func make2(store adt.Store) (State, error) {
+ out := state2{store: store}
+ out.State = miner2.State{}
+ return &out, nil
+}
+
type state2 struct {
miner2.State
store adt.Store
@@ -198,6 +205,22 @@ func (s *state2) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOn
return &ret, nil
}
+func (s *state2) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error {
+ precommitted, err := adt2.AsMap(s.store, s.State.PreCommittedSectors)
+ if err != nil {
+ return err
+ }
+
+ var info miner2.SectorPreCommitOnChainInfo
+ if err := precommitted.ForEach(&info, func(_ string) error {
+ return cb(fromV2SectorPreCommitOnChainInfo(info))
+ }); err != nil {
+ return err
+ }
+
+ return nil
+}
+
func (s *state2) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) {
sectors, err := miner2.LoadSectors(s.store, s.State.Sectors)
if err != nil {
@@ -231,22 +254,68 @@ func (s *state2) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, err
return infos, nil
}
-func (s *state2) GetAllocatedSectors() (*bitfield.BitField, error) {
+func (s *state2) loadAllocatedSectorNumbers() (bitfield.BitField, error) {
var allocatedSectors bitfield.BitField
- if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
+ err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors)
+ return allocatedSectors, err
+}
+
+func (s *state2) IsAllocated(num abi.SectorNumber) (bool, error) {
+ allocatedSectors, err := s.loadAllocatedSectorNumbers()
+ if err != nil {
+ return false, err
+ }
+
+ return allocatedSectors.IsSet(uint64(num))
+}
+
+func (s *state2) GetProvingPeriodStart() (abi.ChainEpoch, error) {
+ return s.State.ProvingPeriodStart, nil
+}
+
+func (s *state2) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) {
+ allocatedSectors, err := s.loadAllocatedSectorNumbers()
+ if err != nil {
return nil, err
}
- return &allocatedSectors, nil
+ allocatedRuns, err := allocatedSectors.RunIterator()
+ if err != nil {
+ return nil, err
+ }
+
+ unallocatedRuns, err := rle.Subtract(
+ &rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}},
+ allocatedRuns,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ iter, err := rle.BitsFromRuns(unallocatedRuns)
+ if err != nil {
+ return nil, err
+ }
+
+ sectors := make([]abi.SectorNumber, 0, count)
+ for iter.HasNext() && len(sectors) < count {
+ nextNo, err := iter.Next()
+ if err != nil {
+ return nil, err
+ }
+ sectors = append(sectors, abi.SectorNumber(nextNo))
+ }
+
+ return sectors, nil
}
-func (s *state2) IsAllocated(num abi.SectorNumber) (bool, error) {
+func (s *state2) GetAllocatedSectors() (*bitfield.BitField, error) {
var allocatedSectors bitfield.BitField
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
- return false, err
+ return nil, err
}
- return allocatedSectors.IsSet(uint64(num))
+ return &allocatedSectors, nil
}
func (s *state2) LoadDeadline(idx uint64) (Deadline, error) {
@@ -305,6 +374,11 @@ func (s *state2) Info() (MinerInfo, error) {
pid = &peerID
}
+ wpp, err := info.SealProofType.RegisteredWindowPoStProof()
+ if err != nil {
+ return MinerInfo{}, err
+ }
+
mi := MinerInfo{
Owner: info.Owner,
Worker: info.Worker,
@@ -315,7 +389,7 @@ func (s *state2) Info() (MinerInfo, error) {
PeerId: pid,
Multiaddrs: info.Multiaddrs,
- SealProofType: info.SealProofType,
+ WindowPoStProofType: wpp,
SectorSize: info.SectorSize,
WindowPoStPartitionSectors: info.WindowPoStPartitionSectors,
ConsensusFaultElapsed: info.ConsensusFaultElapsed,
@@ -333,6 +407,10 @@ func (s *state2) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) {
return s.State.DeadlineInfo(epoch), nil
}
+func (s *state2) DeadlineCronActive() (bool, error) {
+ return true, nil // always active in this version
+}
+
func (s *state2) sectors() (adt.Array, error) {
return adt2.AsArray(s.store, s.Sectors)
}
@@ -361,6 +439,43 @@ func (s *state2) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreC
return fromV2SectorPreCommitOnChainInfo(sp), nil
}
+func (s *state2) EraseAllUnproven() error {
+
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return err
+ }
+
+ err = dls.ForEach(s.store, func(dindx uint64, dl *miner2.Deadline) error {
+ ps, err := dl.PartitionsArray(s.store)
+ if err != nil {
+ return err
+ }
+
+ var part miner2.Partition
+ err = ps.ForEach(&part, func(pindx int64) error {
+ _ = part.ActivateUnproven()
+ err = ps.Set(uint64(pindx), &part)
+ return nil
+ })
+
+ if err != nil {
+ return err
+ }
+
+ dl.Partitions, err = ps.Root()
+ if err != nil {
+ return err
+ }
+
+ return dls.UpdateDeadline(s.store, dindx, dl)
+ })
+
+ return s.State.SaveDeadlines(s.store, dls)
+
+ return nil
+}
+
func (d *deadline2) LoadPartition(idx uint64) (Partition, error) {
p, err := d.Deadline.LoadPartition(d.store, idx)
if err != nil {
@@ -390,10 +505,17 @@ func (d *deadline2) PartitionsChanged(other Deadline) (bool, error) {
return !d.Deadline.Partitions.Equals(other2.Deadline.Partitions), nil
}
-func (d *deadline2) PostSubmissions() (bitfield.BitField, error) {
+func (d *deadline2) PartitionsPoSted() (bitfield.BitField, error) {
return d.Deadline.PostSubmissions, nil
}
+func (d *deadline2) DisputableProofCount() (uint64, error) {
+
+ // field doesn't exist until v3
+ return 0, nil
+
+}
+
func (p *partition2) AllSectors() (bitfield.BitField, error) {
return p.Partition.Sectors, nil
}
@@ -407,6 +529,7 @@ func (p *partition2) RecoveringSectors() (bitfield.BitField, error) {
}
func fromV2SectorOnChainInfo(v2 miner2.SectorOnChainInfo) SectorOnChainInfo {
+
return SectorOnChainInfo{
SectorNumber: v2.SectorNumber,
SealProof: v2.SealProof,
@@ -420,9 +543,11 @@ func fromV2SectorOnChainInfo(v2 miner2.SectorOnChainInfo) SectorOnChainInfo {
ExpectedDayReward: v2.ExpectedDayReward,
ExpectedStoragePledge: v2.ExpectedStoragePledge,
}
+
}
func fromV2SectorPreCommitOnChainInfo(v2 miner2.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
+
return SectorPreCommitOnChainInfo{
Info: (SectorPreCommitInfo)(v2.Info),
PreCommitDeposit: v2.PreCommitDeposit,
@@ -430,4 +555,9 @@ func fromV2SectorPreCommitOnChainInfo(v2 miner2.SectorPreCommitOnChainInfo) Sect
DealWeight: v2.DealWeight,
VerifiedDealWeight: v2.VerifiedDealWeight,
}
+
+}
+
+func (s *state2) GetState() interface{} {
+ return &s.State
}
diff --git a/chain/actors/builtin/miner/v3.go b/chain/actors/builtin/miner/v3.go
new file mode 100644
index 00000000000..1819428a6d3
--- /dev/null
+++ b/chain/actors/builtin/miner/v3.go
@@ -0,0 +1,564 @@
+package miner
+
+import (
+ "bytes"
+ "errors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-bitfield"
+ rle "github.com/filecoin-project/go-bitfield/rle"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/dline"
+ "github.com/ipfs/go-cid"
+ "github.com/libp2p/go-libp2p-core/peer"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
+
+ miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner"
+ adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt"
+)
+
+var _ State = (*state3)(nil)
+
+func load3(store adt.Store, root cid.Cid) (State, error) {
+ out := state3{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make3(store adt.Store) (State, error) {
+ out := state3{store: store}
+ out.State = miner3.State{}
+ return &out, nil
+}
+
+type state3 struct {
+ miner3.State
+ store adt.Store
+}
+
+type deadline3 struct {
+ miner3.Deadline
+ store adt.Store
+}
+
+type partition3 struct {
+ miner3.Partition
+ store adt.Store
+}
+
+func (s *state3) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ err = xerrors.Errorf("failed to get available balance: %w", r)
+ available = abi.NewTokenAmount(0)
+ }
+ }()
+ // this panics if the miner doesnt have enough funds to cover their locked pledge
+ available, err = s.GetAvailableBalance(bal)
+ return available, err
+}
+
+func (s *state3) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) {
+ return s.CheckVestedFunds(s.store, epoch)
+}
+
+func (s *state3) LockedFunds() (LockedFunds, error) {
+ return LockedFunds{
+ VestingFunds: s.State.LockedFunds,
+ InitialPledgeRequirement: s.State.InitialPledge,
+ PreCommitDeposits: s.State.PreCommitDeposits,
+ }, nil
+}
+
+func (s *state3) FeeDebt() (abi.TokenAmount, error) {
+ return s.State.FeeDebt, nil
+}
+
+func (s *state3) InitialPledge() (abi.TokenAmount, error) {
+ return s.State.InitialPledge, nil
+}
+
+func (s *state3) PreCommitDeposits() (abi.TokenAmount, error) {
+ return s.State.PreCommitDeposits, nil
+}
+
+func (s *state3) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) {
+ info, ok, err := s.State.GetSector(s.store, num)
+ if !ok || err != nil {
+ return nil, err
+ }
+
+ ret := fromV3SectorOnChainInfo(*info)
+ return &ret, nil
+}
+
+func (s *state3) FindSector(num abi.SectorNumber) (*SectorLocation, error) {
+ dlIdx, partIdx, err := s.State.FindSector(s.store, num)
+ if err != nil {
+ return nil, err
+ }
+ return &SectorLocation{
+ Deadline: dlIdx,
+ Partition: partIdx,
+ }, nil
+}
+
+func (s *state3) NumLiveSectors() (uint64, error) {
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return 0, err
+ }
+ var total uint64
+ if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner3.Deadline) error {
+ total += dl.LiveSectors
+ return nil
+ }); err != nil {
+ return 0, err
+ }
+ return total, nil
+}
+
+// GetSectorExpiration returns the effective expiration of the given sector.
+//
+// If the sector does not expire early, the Early expiration field is 0.
+func (s *state3) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) {
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return nil, err
+ }
+ // NOTE: this can be optimized significantly.
+ // 1. If the sector is non-faulty, it will either expire on-time (can be
+ // learned from the sector info), or in the next quantized expiration
+ // epoch (i.e., the first element in the partition's expiration queue.
+ // 2. If it's faulty, it will expire early within the first 14 entries
+ // of the expiration queue.
+ stopErr := errors.New("stop")
+ out := SectorExpiration{}
+ err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner3.Deadline) error {
+ partitions, err := dl.PartitionsArray(s.store)
+ if err != nil {
+ return err
+ }
+ quant := s.State.QuantSpecForDeadline(dlIdx)
+ var part miner3.Partition
+ return partitions.ForEach(&part, func(partIdx int64) error {
+ if found, err := part.Sectors.IsSet(uint64(num)); err != nil {
+ return err
+ } else if !found {
+ return nil
+ }
+ if found, err := part.Terminated.IsSet(uint64(num)); err != nil {
+ return err
+ } else if found {
+ // already terminated
+ return stopErr
+ }
+
+ q, err := miner3.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant, miner3.PartitionExpirationAmtBitwidth)
+ if err != nil {
+ return err
+ }
+ var exp miner3.ExpirationSet
+ return q.ForEach(&exp, func(epoch int64) error {
+ if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil {
+ return err
+ } else if early {
+ out.Early = abi.ChainEpoch(epoch)
+ return nil
+ }
+ if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil {
+ return err
+ } else if onTime {
+ out.OnTime = abi.ChainEpoch(epoch)
+ return stopErr
+ }
+ return nil
+ })
+ })
+ })
+ if err == stopErr {
+ err = nil
+ }
+ if err != nil {
+ return nil, err
+ }
+ if out.Early == 0 && out.OnTime == 0 {
+ return nil, xerrors.Errorf("failed to find sector %d", num)
+ }
+ return &out, nil
+}
+
+func (s *state3) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) {
+ info, ok, err := s.State.GetPrecommittedSector(s.store, num)
+ if !ok || err != nil {
+ return nil, err
+ }
+
+ ret := fromV3SectorPreCommitOnChainInfo(*info)
+
+ return &ret, nil
+}
+
+func (s *state3) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error {
+ precommitted, err := adt3.AsMap(s.store, s.State.PreCommittedSectors, builtin3.DefaultHamtBitwidth)
+ if err != nil {
+ return err
+ }
+
+ var info miner3.SectorPreCommitOnChainInfo
+ if err := precommitted.ForEach(&info, func(_ string) error {
+ return cb(fromV3SectorPreCommitOnChainInfo(info))
+ }); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (s *state3) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) {
+ sectors, err := miner3.LoadSectors(s.store, s.State.Sectors)
+ if err != nil {
+ return nil, err
+ }
+
+ // If no sector numbers are specified, load all.
+ if snos == nil {
+ infos := make([]*SectorOnChainInfo, 0, sectors.Length())
+ var info3 miner3.SectorOnChainInfo
+ if err := sectors.ForEach(&info3, func(_ int64) error {
+ info := fromV3SectorOnChainInfo(info3)
+ infos = append(infos, &info)
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+ return infos, nil
+ }
+
+ // Otherwise, load selected.
+ infos3, err := sectors.Load(*snos)
+ if err != nil {
+ return nil, err
+ }
+ infos := make([]*SectorOnChainInfo, len(infos3))
+ for i, info3 := range infos3 {
+ info := fromV3SectorOnChainInfo(*info3)
+ infos[i] = &info
+ }
+ return infos, nil
+}
+
+func (s *state3) loadAllocatedSectorNumbers() (bitfield.BitField, error) {
+ var allocatedSectors bitfield.BitField
+ err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors)
+ return allocatedSectors, err
+}
+
+func (s *state3) IsAllocated(num abi.SectorNumber) (bool, error) {
+ allocatedSectors, err := s.loadAllocatedSectorNumbers()
+ if err != nil {
+ return false, err
+ }
+
+ return allocatedSectors.IsSet(uint64(num))
+}
+
+func (s *state3) GetProvingPeriodStart() (abi.ChainEpoch, error) {
+ return s.State.ProvingPeriodStart, nil
+}
+
+func (s *state3) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) {
+ allocatedSectors, err := s.loadAllocatedSectorNumbers()
+ if err != nil {
+ return nil, err
+ }
+
+ allocatedRuns, err := allocatedSectors.RunIterator()
+ if err != nil {
+ return nil, err
+ }
+
+ unallocatedRuns, err := rle.Subtract(
+ &rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}},
+ allocatedRuns,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ iter, err := rle.BitsFromRuns(unallocatedRuns)
+ if err != nil {
+ return nil, err
+ }
+
+ sectors := make([]abi.SectorNumber, 0, count)
+ for iter.HasNext() && len(sectors) < count {
+ nextNo, err := iter.Next()
+ if err != nil {
+ return nil, err
+ }
+ sectors = append(sectors, abi.SectorNumber(nextNo))
+ }
+
+ return sectors, nil
+}
+
+func (s *state3) GetAllocatedSectors() (*bitfield.BitField, error) {
+ var allocatedSectors bitfield.BitField
+ if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
+ return nil, err
+ }
+
+ return &allocatedSectors, nil
+}
+
+func (s *state3) LoadDeadline(idx uint64) (Deadline, error) {
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return nil, err
+ }
+ dl, err := dls.LoadDeadline(s.store, idx)
+ if err != nil {
+ return nil, err
+ }
+ return &deadline3{*dl, s.store}, nil
+}
+
+func (s *state3) ForEachDeadline(cb func(uint64, Deadline) error) error {
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return err
+ }
+ return dls.ForEach(s.store, func(i uint64, dl *miner3.Deadline) error {
+ return cb(i, &deadline3{*dl, s.store})
+ })
+}
+
+func (s *state3) NumDeadlines() (uint64, error) {
+ return miner3.WPoStPeriodDeadlines, nil
+}
+
+func (s *state3) DeadlinesChanged(other State) (bool, error) {
+ other3, ok := other.(*state3)
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+
+ return !s.State.Deadlines.Equals(other3.Deadlines), nil
+}
+
+func (s *state3) MinerInfoChanged(other State) (bool, error) {
+ other0, ok := other.(*state3)
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+ return !s.State.Info.Equals(other0.State.Info), nil
+}
+
+func (s *state3) Info() (MinerInfo, error) {
+ info, err := s.State.GetInfo(s.store)
+ if err != nil {
+ return MinerInfo{}, err
+ }
+
+ var pid *peer.ID
+ if peerID, err := peer.IDFromBytes(info.PeerId); err == nil {
+ pid = &peerID
+ }
+
+ mi := MinerInfo{
+ Owner: info.Owner,
+ Worker: info.Worker,
+ ControlAddresses: info.ControlAddresses,
+
+ NewWorker: address.Undef,
+ WorkerChangeEpoch: -1,
+
+ PeerId: pid,
+ Multiaddrs: info.Multiaddrs,
+ WindowPoStProofType: info.WindowPoStProofType,
+ SectorSize: info.SectorSize,
+ WindowPoStPartitionSectors: info.WindowPoStPartitionSectors,
+ ConsensusFaultElapsed: info.ConsensusFaultElapsed,
+ }
+
+ if info.PendingWorkerKey != nil {
+ mi.NewWorker = info.PendingWorkerKey.NewWorker
+ mi.WorkerChangeEpoch = info.PendingWorkerKey.EffectiveAt
+ }
+
+ return mi, nil
+}
+
+func (s *state3) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) {
+ return s.State.DeadlineInfo(epoch), nil
+}
+
+func (s *state3) DeadlineCronActive() (bool, error) {
+ return true, nil // always active in this version
+}
+
+func (s *state3) sectors() (adt.Array, error) {
+ return adt3.AsArray(s.store, s.Sectors, miner3.SectorsAmtBitwidth)
+}
+
+func (s *state3) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) {
+ var si miner3.SectorOnChainInfo
+ err := si.UnmarshalCBOR(bytes.NewReader(val.Raw))
+ if err != nil {
+ return SectorOnChainInfo{}, err
+ }
+
+ return fromV3SectorOnChainInfo(si), nil
+}
+
+func (s *state3) precommits() (adt.Map, error) {
+ return adt3.AsMap(s.store, s.PreCommittedSectors, builtin3.DefaultHamtBitwidth)
+}
+
+func (s *state3) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreCommitOnChainInfo, error) {
+ var sp miner3.SectorPreCommitOnChainInfo
+ err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw))
+ if err != nil {
+ return SectorPreCommitOnChainInfo{}, err
+ }
+
+ return fromV3SectorPreCommitOnChainInfo(sp), nil
+}
+
+func (s *state3) EraseAllUnproven() error {
+
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return err
+ }
+
+ err = dls.ForEach(s.store, func(dindx uint64, dl *miner3.Deadline) error {
+ ps, err := dl.PartitionsArray(s.store)
+ if err != nil {
+ return err
+ }
+
+ var part miner3.Partition
+ err = ps.ForEach(&part, func(pindx int64) error {
+ _ = part.ActivateUnproven()
+ err = ps.Set(uint64(pindx), &part)
+ return nil
+ })
+
+ if err != nil {
+ return err
+ }
+
+ dl.Partitions, err = ps.Root()
+ if err != nil {
+ return err
+ }
+
+ return dls.UpdateDeadline(s.store, dindx, dl)
+ })
+
+ return s.State.SaveDeadlines(s.store, dls)
+
+ return nil
+}
+
+func (d *deadline3) LoadPartition(idx uint64) (Partition, error) {
+ p, err := d.Deadline.LoadPartition(d.store, idx)
+ if err != nil {
+ return nil, err
+ }
+ return &partition3{*p, d.store}, nil
+}
+
+func (d *deadline3) ForEachPartition(cb func(uint64, Partition) error) error {
+ ps, err := d.Deadline.PartitionsArray(d.store)
+ if err != nil {
+ return err
+ }
+ var part miner3.Partition
+ return ps.ForEach(&part, func(i int64) error {
+ return cb(uint64(i), &partition3{part, d.store})
+ })
+}
+
+func (d *deadline3) PartitionsChanged(other Deadline) (bool, error) {
+ other3, ok := other.(*deadline3)
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+
+ return !d.Deadline.Partitions.Equals(other3.Deadline.Partitions), nil
+}
+
+func (d *deadline3) PartitionsPoSted() (bitfield.BitField, error) {
+ return d.Deadline.PartitionsPoSted, nil
+}
+
+func (d *deadline3) DisputableProofCount() (uint64, error) {
+
+ ops, err := d.OptimisticProofsSnapshotArray(d.store)
+ if err != nil {
+ return 0, err
+ }
+
+ return ops.Length(), nil
+
+}
+
+func (p *partition3) AllSectors() (bitfield.BitField, error) {
+ return p.Partition.Sectors, nil
+}
+
+func (p *partition3) FaultySectors() (bitfield.BitField, error) {
+ return p.Partition.Faults, nil
+}
+
+func (p *partition3) RecoveringSectors() (bitfield.BitField, error) {
+ return p.Partition.Recoveries, nil
+}
+
+func fromV3SectorOnChainInfo(v3 miner3.SectorOnChainInfo) SectorOnChainInfo {
+
+ return SectorOnChainInfo{
+ SectorNumber: v3.SectorNumber,
+ SealProof: v3.SealProof,
+ SealedCID: v3.SealedCID,
+ DealIDs: v3.DealIDs,
+ Activation: v3.Activation,
+ Expiration: v3.Expiration,
+ DealWeight: v3.DealWeight,
+ VerifiedDealWeight: v3.VerifiedDealWeight,
+ InitialPledge: v3.InitialPledge,
+ ExpectedDayReward: v3.ExpectedDayReward,
+ ExpectedStoragePledge: v3.ExpectedStoragePledge,
+ }
+
+}
+
+func fromV3SectorPreCommitOnChainInfo(v3 miner3.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
+
+ return SectorPreCommitOnChainInfo{
+ Info: (SectorPreCommitInfo)(v3.Info),
+ PreCommitDeposit: v3.PreCommitDeposit,
+ PreCommitEpoch: v3.PreCommitEpoch,
+ DealWeight: v3.DealWeight,
+ VerifiedDealWeight: v3.VerifiedDealWeight,
+ }
+
+}
+
+func (s *state3) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/miner/v4.go b/chain/actors/builtin/miner/v4.go
new file mode 100644
index 00000000000..5a3a75053c3
--- /dev/null
+++ b/chain/actors/builtin/miner/v4.go
@@ -0,0 +1,564 @@
+package miner
+
+import (
+ "bytes"
+ "errors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-bitfield"
+ rle "github.com/filecoin-project/go-bitfield/rle"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/dline"
+ "github.com/ipfs/go-cid"
+ "github.com/libp2p/go-libp2p-core/peer"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
+
+ miner4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/miner"
+ adt4 "github.com/filecoin-project/specs-actors/v4/actors/util/adt"
+)
+
+var _ State = (*state4)(nil)
+
+func load4(store adt.Store, root cid.Cid) (State, error) {
+ out := state4{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make4(store adt.Store) (State, error) {
+ out := state4{store: store}
+ out.State = miner4.State{}
+ return &out, nil
+}
+
+type state4 struct {
+ miner4.State
+ store adt.Store
+}
+
+type deadline4 struct {
+ miner4.Deadline
+ store adt.Store
+}
+
+type partition4 struct {
+ miner4.Partition
+ store adt.Store
+}
+
+func (s *state4) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ err = xerrors.Errorf("failed to get available balance: %w", r)
+ available = abi.NewTokenAmount(0)
+ }
+ }()
+ // this panics if the miner doesnt have enough funds to cover their locked pledge
+ available, err = s.GetAvailableBalance(bal)
+ return available, err
+}
+
+func (s *state4) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) {
+ return s.CheckVestedFunds(s.store, epoch)
+}
+
+func (s *state4) LockedFunds() (LockedFunds, error) {
+ return LockedFunds{
+ VestingFunds: s.State.LockedFunds,
+ InitialPledgeRequirement: s.State.InitialPledge,
+ PreCommitDeposits: s.State.PreCommitDeposits,
+ }, nil
+}
+
+func (s *state4) FeeDebt() (abi.TokenAmount, error) {
+ return s.State.FeeDebt, nil
+}
+
+func (s *state4) InitialPledge() (abi.TokenAmount, error) {
+ return s.State.InitialPledge, nil
+}
+
+func (s *state4) PreCommitDeposits() (abi.TokenAmount, error) {
+ return s.State.PreCommitDeposits, nil
+}
+
+func (s *state4) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) {
+ info, ok, err := s.State.GetSector(s.store, num)
+ if !ok || err != nil {
+ return nil, err
+ }
+
+ ret := fromV4SectorOnChainInfo(*info)
+ return &ret, nil
+}
+
+func (s *state4) FindSector(num abi.SectorNumber) (*SectorLocation, error) {
+ dlIdx, partIdx, err := s.State.FindSector(s.store, num)
+ if err != nil {
+ return nil, err
+ }
+ return &SectorLocation{
+ Deadline: dlIdx,
+ Partition: partIdx,
+ }, nil
+}
+
+func (s *state4) NumLiveSectors() (uint64, error) {
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return 0, err
+ }
+ var total uint64
+ if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner4.Deadline) error {
+ total += dl.LiveSectors
+ return nil
+ }); err != nil {
+ return 0, err
+ }
+ return total, nil
+}
+
+// GetSectorExpiration returns the effective expiration of the given sector.
+//
+// If the sector does not expire early, the Early expiration field is 0.
+func (s *state4) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) {
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return nil, err
+ }
+ // NOTE: this can be optimized significantly.
+ // 1. If the sector is non-faulty, it will either expire on-time (can be
+ // learned from the sector info), or in the next quantized expiration
+ // epoch (i.e., the first element in the partition's expiration queue.
+ // 2. If it's faulty, it will expire early within the first 14 entries
+ // of the expiration queue.
+ stopErr := errors.New("stop")
+ out := SectorExpiration{}
+ err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner4.Deadline) error {
+ partitions, err := dl.PartitionsArray(s.store)
+ if err != nil {
+ return err
+ }
+ quant := s.State.QuantSpecForDeadline(dlIdx)
+ var part miner4.Partition
+ return partitions.ForEach(&part, func(partIdx int64) error {
+ if found, err := part.Sectors.IsSet(uint64(num)); err != nil {
+ return err
+ } else if !found {
+ return nil
+ }
+ if found, err := part.Terminated.IsSet(uint64(num)); err != nil {
+ return err
+ } else if found {
+ // already terminated
+ return stopErr
+ }
+
+ q, err := miner4.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant, miner4.PartitionExpirationAmtBitwidth)
+ if err != nil {
+ return err
+ }
+ var exp miner4.ExpirationSet
+ return q.ForEach(&exp, func(epoch int64) error {
+ if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil {
+ return err
+ } else if early {
+ out.Early = abi.ChainEpoch(epoch)
+ return nil
+ }
+ if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil {
+ return err
+ } else if onTime {
+ out.OnTime = abi.ChainEpoch(epoch)
+ return stopErr
+ }
+ return nil
+ })
+ })
+ })
+ if err == stopErr {
+ err = nil
+ }
+ if err != nil {
+ return nil, err
+ }
+ if out.Early == 0 && out.OnTime == 0 {
+ return nil, xerrors.Errorf("failed to find sector %d", num)
+ }
+ return &out, nil
+}
+
+func (s *state4) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) {
+ info, ok, err := s.State.GetPrecommittedSector(s.store, num)
+ if !ok || err != nil {
+ return nil, err
+ }
+
+ ret := fromV4SectorPreCommitOnChainInfo(*info)
+
+ return &ret, nil
+}
+
+func (s *state4) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error {
+ precommitted, err := adt4.AsMap(s.store, s.State.PreCommittedSectors, builtin4.DefaultHamtBitwidth)
+ if err != nil {
+ return err
+ }
+
+ var info miner4.SectorPreCommitOnChainInfo
+ if err := precommitted.ForEach(&info, func(_ string) error {
+ return cb(fromV4SectorPreCommitOnChainInfo(info))
+ }); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (s *state4) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) {
+ sectors, err := miner4.LoadSectors(s.store, s.State.Sectors)
+ if err != nil {
+ return nil, err
+ }
+
+ // If no sector numbers are specified, load all.
+ if snos == nil {
+ infos := make([]*SectorOnChainInfo, 0, sectors.Length())
+ var info4 miner4.SectorOnChainInfo
+ if err := sectors.ForEach(&info4, func(_ int64) error {
+ info := fromV4SectorOnChainInfo(info4)
+ infos = append(infos, &info)
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+ return infos, nil
+ }
+
+ // Otherwise, load selected.
+ infos4, err := sectors.Load(*snos)
+ if err != nil {
+ return nil, err
+ }
+ infos := make([]*SectorOnChainInfo, len(infos4))
+ for i, info4 := range infos4 {
+ info := fromV4SectorOnChainInfo(*info4)
+ infos[i] = &info
+ }
+ return infos, nil
+}
+
+func (s *state4) loadAllocatedSectorNumbers() (bitfield.BitField, error) {
+ var allocatedSectors bitfield.BitField
+ err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors)
+ return allocatedSectors, err
+}
+
+func (s *state4) IsAllocated(num abi.SectorNumber) (bool, error) {
+ allocatedSectors, err := s.loadAllocatedSectorNumbers()
+ if err != nil {
+ return false, err
+ }
+
+ return allocatedSectors.IsSet(uint64(num))
+}
+
+func (s *state4) GetProvingPeriodStart() (abi.ChainEpoch, error) {
+ return s.State.ProvingPeriodStart, nil
+}
+
+func (s *state4) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) {
+ allocatedSectors, err := s.loadAllocatedSectorNumbers()
+ if err != nil {
+ return nil, err
+ }
+
+ allocatedRuns, err := allocatedSectors.RunIterator()
+ if err != nil {
+ return nil, err
+ }
+
+ unallocatedRuns, err := rle.Subtract(
+ &rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}},
+ allocatedRuns,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ iter, err := rle.BitsFromRuns(unallocatedRuns)
+ if err != nil {
+ return nil, err
+ }
+
+ sectors := make([]abi.SectorNumber, 0, count)
+ for iter.HasNext() && len(sectors) < count {
+ nextNo, err := iter.Next()
+ if err != nil {
+ return nil, err
+ }
+ sectors = append(sectors, abi.SectorNumber(nextNo))
+ }
+
+ return sectors, nil
+}
+
+func (s *state4) GetAllocatedSectors() (*bitfield.BitField, error) {
+ var allocatedSectors bitfield.BitField
+ if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
+ return nil, err
+ }
+
+ return &allocatedSectors, nil
+}
+
+func (s *state4) LoadDeadline(idx uint64) (Deadline, error) {
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return nil, err
+ }
+ dl, err := dls.LoadDeadline(s.store, idx)
+ if err != nil {
+ return nil, err
+ }
+ return &deadline4{*dl, s.store}, nil
+}
+
+func (s *state4) ForEachDeadline(cb func(uint64, Deadline) error) error {
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return err
+ }
+ return dls.ForEach(s.store, func(i uint64, dl *miner4.Deadline) error {
+ return cb(i, &deadline4{*dl, s.store})
+ })
+}
+
+func (s *state4) NumDeadlines() (uint64, error) {
+ return miner4.WPoStPeriodDeadlines, nil
+}
+
+func (s *state4) DeadlinesChanged(other State) (bool, error) {
+ other4, ok := other.(*state4)
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+
+ return !s.State.Deadlines.Equals(other4.Deadlines), nil
+}
+
+func (s *state4) MinerInfoChanged(other State) (bool, error) {
+ other0, ok := other.(*state4)
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+ return !s.State.Info.Equals(other0.State.Info), nil
+}
+
+func (s *state4) Info() (MinerInfo, error) {
+ info, err := s.State.GetInfo(s.store)
+ if err != nil {
+ return MinerInfo{}, err
+ }
+
+ var pid *peer.ID
+ if peerID, err := peer.IDFromBytes(info.PeerId); err == nil {
+ pid = &peerID
+ }
+
+ mi := MinerInfo{
+ Owner: info.Owner,
+ Worker: info.Worker,
+ ControlAddresses: info.ControlAddresses,
+
+ NewWorker: address.Undef,
+ WorkerChangeEpoch: -1,
+
+ PeerId: pid,
+ Multiaddrs: info.Multiaddrs,
+ WindowPoStProofType: info.WindowPoStProofType,
+ SectorSize: info.SectorSize,
+ WindowPoStPartitionSectors: info.WindowPoStPartitionSectors,
+ ConsensusFaultElapsed: info.ConsensusFaultElapsed,
+ }
+
+ if info.PendingWorkerKey != nil {
+ mi.NewWorker = info.PendingWorkerKey.NewWorker
+ mi.WorkerChangeEpoch = info.PendingWorkerKey.EffectiveAt
+ }
+
+ return mi, nil
+}
+
+func (s *state4) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) {
+ return s.State.RecordedDeadlineInfo(epoch), nil
+}
+
+func (s *state4) DeadlineCronActive() (bool, error) {
+ return s.State.DeadlineCronActive, nil
+}
+
+func (s *state4) sectors() (adt.Array, error) {
+ return adt4.AsArray(s.store, s.Sectors, miner4.SectorsAmtBitwidth)
+}
+
+func (s *state4) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) {
+ var si miner4.SectorOnChainInfo
+ err := si.UnmarshalCBOR(bytes.NewReader(val.Raw))
+ if err != nil {
+ return SectorOnChainInfo{}, err
+ }
+
+ return fromV4SectorOnChainInfo(si), nil
+}
+
+func (s *state4) precommits() (adt.Map, error) {
+ return adt4.AsMap(s.store, s.PreCommittedSectors, builtin4.DefaultHamtBitwidth)
+}
+
+func (s *state4) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreCommitOnChainInfo, error) {
+ var sp miner4.SectorPreCommitOnChainInfo
+ err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw))
+ if err != nil {
+ return SectorPreCommitOnChainInfo{}, err
+ }
+
+ return fromV4SectorPreCommitOnChainInfo(sp), nil
+}
+
+func (s *state4) EraseAllUnproven() error {
+
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return err
+ }
+
+ err = dls.ForEach(s.store, func(dindx uint64, dl *miner4.Deadline) error {
+ ps, err := dl.PartitionsArray(s.store)
+ if err != nil {
+ return err
+ }
+
+ var part miner4.Partition
+ err = ps.ForEach(&part, func(pindx int64) error {
+ _ = part.ActivateUnproven()
+ err = ps.Set(uint64(pindx), &part)
+ return nil
+ })
+
+ if err != nil {
+ return err
+ }
+
+ dl.Partitions, err = ps.Root()
+ if err != nil {
+ return err
+ }
+
+ return dls.UpdateDeadline(s.store, dindx, dl)
+ })
+
+ return s.State.SaveDeadlines(s.store, dls)
+
+ return nil
+}
+
+func (d *deadline4) LoadPartition(idx uint64) (Partition, error) {
+ p, err := d.Deadline.LoadPartition(d.store, idx)
+ if err != nil {
+ return nil, err
+ }
+ return &partition4{*p, d.store}, nil
+}
+
+func (d *deadline4) ForEachPartition(cb func(uint64, Partition) error) error {
+ ps, err := d.Deadline.PartitionsArray(d.store)
+ if err != nil {
+ return err
+ }
+ var part miner4.Partition
+ return ps.ForEach(&part, func(i int64) error {
+ return cb(uint64(i), &partition4{part, d.store})
+ })
+}
+
+func (d *deadline4) PartitionsChanged(other Deadline) (bool, error) {
+ other4, ok := other.(*deadline4)
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+
+ return !d.Deadline.Partitions.Equals(other4.Deadline.Partitions), nil
+}
+
+func (d *deadline4) PartitionsPoSted() (bitfield.BitField, error) {
+ return d.Deadline.PartitionsPoSted, nil
+}
+
+func (d *deadline4) DisputableProofCount() (uint64, error) {
+
+ ops, err := d.OptimisticProofsSnapshotArray(d.store)
+ if err != nil {
+ return 0, err
+ }
+
+ return ops.Length(), nil
+
+}
+
+func (p *partition4) AllSectors() (bitfield.BitField, error) {
+ return p.Partition.Sectors, nil
+}
+
+func (p *partition4) FaultySectors() (bitfield.BitField, error) {
+ return p.Partition.Faults, nil
+}
+
+func (p *partition4) RecoveringSectors() (bitfield.BitField, error) {
+ return p.Partition.Recoveries, nil
+}
+
+func fromV4SectorOnChainInfo(v4 miner4.SectorOnChainInfo) SectorOnChainInfo {
+
+ return SectorOnChainInfo{
+ SectorNumber: v4.SectorNumber,
+ SealProof: v4.SealProof,
+ SealedCID: v4.SealedCID,
+ DealIDs: v4.DealIDs,
+ Activation: v4.Activation,
+ Expiration: v4.Expiration,
+ DealWeight: v4.DealWeight,
+ VerifiedDealWeight: v4.VerifiedDealWeight,
+ InitialPledge: v4.InitialPledge,
+ ExpectedDayReward: v4.ExpectedDayReward,
+ ExpectedStoragePledge: v4.ExpectedStoragePledge,
+ }
+
+}
+
+func fromV4SectorPreCommitOnChainInfo(v4 miner4.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
+
+ return SectorPreCommitOnChainInfo{
+ Info: (SectorPreCommitInfo)(v4.Info),
+ PreCommitDeposit: v4.PreCommitDeposit,
+ PreCommitEpoch: v4.PreCommitEpoch,
+ DealWeight: v4.DealWeight,
+ VerifiedDealWeight: v4.VerifiedDealWeight,
+ }
+
+}
+
+func (s *state4) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/miner/v5.go b/chain/actors/builtin/miner/v5.go
new file mode 100644
index 00000000000..82e98c2ef06
--- /dev/null
+++ b/chain/actors/builtin/miner/v5.go
@@ -0,0 +1,564 @@
+package miner
+
+import (
+ "bytes"
+ "errors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-bitfield"
+ rle "github.com/filecoin-project/go-bitfield/rle"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/dline"
+ "github.com/ipfs/go-cid"
+ "github.com/libp2p/go-libp2p-core/peer"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
+
+ miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
+ adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt"
+)
+
+var _ State = (*state5)(nil)
+
+func load5(store adt.Store, root cid.Cid) (State, error) {
+ out := state5{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make5(store adt.Store) (State, error) {
+ out := state5{store: store}
+ out.State = miner5.State{}
+ return &out, nil
+}
+
+type state5 struct {
+ miner5.State
+ store adt.Store
+}
+
+type deadline5 struct {
+ miner5.Deadline
+ store adt.Store
+}
+
+type partition5 struct {
+ miner5.Partition
+ store adt.Store
+}
+
+func (s *state5) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ err = xerrors.Errorf("failed to get available balance: %w", r)
+ available = abi.NewTokenAmount(0)
+ }
+ }()
+ // this panics if the miner doesnt have enough funds to cover their locked pledge
+ available, err = s.GetAvailableBalance(bal)
+ return available, err
+}
+
+func (s *state5) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) {
+ return s.CheckVestedFunds(s.store, epoch)
+}
+
+func (s *state5) LockedFunds() (LockedFunds, error) {
+ return LockedFunds{
+ VestingFunds: s.State.LockedFunds,
+ InitialPledgeRequirement: s.State.InitialPledge,
+ PreCommitDeposits: s.State.PreCommitDeposits,
+ }, nil
+}
+
+func (s *state5) FeeDebt() (abi.TokenAmount, error) {
+ return s.State.FeeDebt, nil
+}
+
+func (s *state5) InitialPledge() (abi.TokenAmount, error) {
+ return s.State.InitialPledge, nil
+}
+
+func (s *state5) PreCommitDeposits() (abi.TokenAmount, error) {
+ return s.State.PreCommitDeposits, nil
+}
+
+func (s *state5) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) {
+ info, ok, err := s.State.GetSector(s.store, num)
+ if !ok || err != nil {
+ return nil, err
+ }
+
+ ret := fromV5SectorOnChainInfo(*info)
+ return &ret, nil
+}
+
+func (s *state5) FindSector(num abi.SectorNumber) (*SectorLocation, error) {
+ dlIdx, partIdx, err := s.State.FindSector(s.store, num)
+ if err != nil {
+ return nil, err
+ }
+ return &SectorLocation{
+ Deadline: dlIdx,
+ Partition: partIdx,
+ }, nil
+}
+
+func (s *state5) NumLiveSectors() (uint64, error) {
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return 0, err
+ }
+ var total uint64
+ if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner5.Deadline) error {
+ total += dl.LiveSectors
+ return nil
+ }); err != nil {
+ return 0, err
+ }
+ return total, nil
+}
+
+// GetSectorExpiration returns the effective expiration of the given sector.
+//
+// If the sector does not expire early, the Early expiration field is 0.
+func (s *state5) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) {
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return nil, err
+ }
+ // NOTE: this can be optimized significantly.
+ // 1. If the sector is non-faulty, it will either expire on-time (can be
+ // learned from the sector info), or in the next quantized expiration
+ // epoch (i.e., the first element in the partition's expiration queue.
+ // 2. If it's faulty, it will expire early within the first 14 entries
+ // of the expiration queue.
+ stopErr := errors.New("stop")
+ out := SectorExpiration{}
+ err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner5.Deadline) error {
+ partitions, err := dl.PartitionsArray(s.store)
+ if err != nil {
+ return err
+ }
+ quant := s.State.QuantSpecForDeadline(dlIdx)
+ var part miner5.Partition
+ return partitions.ForEach(&part, func(partIdx int64) error {
+ if found, err := part.Sectors.IsSet(uint64(num)); err != nil {
+ return err
+ } else if !found {
+ return nil
+ }
+ if found, err := part.Terminated.IsSet(uint64(num)); err != nil {
+ return err
+ } else if found {
+ // already terminated
+ return stopErr
+ }
+
+ q, err := miner5.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant, miner5.PartitionExpirationAmtBitwidth)
+ if err != nil {
+ return err
+ }
+ var exp miner5.ExpirationSet
+ return q.ForEach(&exp, func(epoch int64) error {
+ if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil {
+ return err
+ } else if early {
+ out.Early = abi.ChainEpoch(epoch)
+ return nil
+ }
+ if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil {
+ return err
+ } else if onTime {
+ out.OnTime = abi.ChainEpoch(epoch)
+ return stopErr
+ }
+ return nil
+ })
+ })
+ })
+ if err == stopErr {
+ err = nil
+ }
+ if err != nil {
+ return nil, err
+ }
+ if out.Early == 0 && out.OnTime == 0 {
+ return nil, xerrors.Errorf("failed to find sector %d", num)
+ }
+ return &out, nil
+}
+
+func (s *state5) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) {
+ info, ok, err := s.State.GetPrecommittedSector(s.store, num)
+ if !ok || err != nil {
+ return nil, err
+ }
+
+ ret := fromV5SectorPreCommitOnChainInfo(*info)
+
+ return &ret, nil
+}
+
+func (s *state5) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error {
+ precommitted, err := adt5.AsMap(s.store, s.State.PreCommittedSectors, builtin5.DefaultHamtBitwidth)
+ if err != nil {
+ return err
+ }
+
+ var info miner5.SectorPreCommitOnChainInfo
+ if err := precommitted.ForEach(&info, func(_ string) error {
+ return cb(fromV5SectorPreCommitOnChainInfo(info))
+ }); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (s *state5) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) {
+ sectors, err := miner5.LoadSectors(s.store, s.State.Sectors)
+ if err != nil {
+ return nil, err
+ }
+
+ // If no sector numbers are specified, load all.
+ if snos == nil {
+ infos := make([]*SectorOnChainInfo, 0, sectors.Length())
+ var info5 miner5.SectorOnChainInfo
+ if err := sectors.ForEach(&info5, func(_ int64) error {
+ info := fromV5SectorOnChainInfo(info5)
+ infos = append(infos, &info)
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+ return infos, nil
+ }
+
+ // Otherwise, load selected.
+ infos5, err := sectors.Load(*snos)
+ if err != nil {
+ return nil, err
+ }
+ infos := make([]*SectorOnChainInfo, len(infos5))
+ for i, info5 := range infos5 {
+ info := fromV5SectorOnChainInfo(*info5)
+ infos[i] = &info
+ }
+ return infos, nil
+}
+
+func (s *state5) loadAllocatedSectorNumbers() (bitfield.BitField, error) {
+ var allocatedSectors bitfield.BitField
+ err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors)
+ return allocatedSectors, err
+}
+
+func (s *state5) IsAllocated(num abi.SectorNumber) (bool, error) {
+ allocatedSectors, err := s.loadAllocatedSectorNumbers()
+ if err != nil {
+ return false, err
+ }
+
+ return allocatedSectors.IsSet(uint64(num))
+}
+
+func (s *state5) GetProvingPeriodStart() (abi.ChainEpoch, error) {
+ return s.State.ProvingPeriodStart, nil
+}
+
+func (s *state5) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) {
+ allocatedSectors, err := s.loadAllocatedSectorNumbers()
+ if err != nil {
+ return nil, err
+ }
+
+ allocatedRuns, err := allocatedSectors.RunIterator()
+ if err != nil {
+ return nil, err
+ }
+
+ unallocatedRuns, err := rle.Subtract(
+ &rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}},
+ allocatedRuns,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ iter, err := rle.BitsFromRuns(unallocatedRuns)
+ if err != nil {
+ return nil, err
+ }
+
+ sectors := make([]abi.SectorNumber, 0, count)
+ for iter.HasNext() && len(sectors) < count {
+ nextNo, err := iter.Next()
+ if err != nil {
+ return nil, err
+ }
+ sectors = append(sectors, abi.SectorNumber(nextNo))
+ }
+
+ return sectors, nil
+}
+
+func (s *state5) GetAllocatedSectors() (*bitfield.BitField, error) {
+ var allocatedSectors bitfield.BitField
+ if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
+ return nil, err
+ }
+
+ return &allocatedSectors, nil
+}
+
+func (s *state5) LoadDeadline(idx uint64) (Deadline, error) {
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return nil, err
+ }
+ dl, err := dls.LoadDeadline(s.store, idx)
+ if err != nil {
+ return nil, err
+ }
+ return &deadline5{*dl, s.store}, nil
+}
+
+func (s *state5) ForEachDeadline(cb func(uint64, Deadline) error) error {
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return err
+ }
+ return dls.ForEach(s.store, func(i uint64, dl *miner5.Deadline) error {
+ return cb(i, &deadline5{*dl, s.store})
+ })
+}
+
+func (s *state5) NumDeadlines() (uint64, error) {
+ return miner5.WPoStPeriodDeadlines, nil
+}
+
+func (s *state5) DeadlinesChanged(other State) (bool, error) {
+ other5, ok := other.(*state5)
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+
+ return !s.State.Deadlines.Equals(other5.Deadlines), nil
+}
+
+func (s *state5) MinerInfoChanged(other State) (bool, error) {
+ other0, ok := other.(*state5)
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+ return !s.State.Info.Equals(other0.State.Info), nil
+}
+
+func (s *state5) Info() (MinerInfo, error) {
+ info, err := s.State.GetInfo(s.store)
+ if err != nil {
+ return MinerInfo{}, err
+ }
+
+ var pid *peer.ID
+ if peerID, err := peer.IDFromBytes(info.PeerId); err == nil {
+ pid = &peerID
+ }
+
+ mi := MinerInfo{
+ Owner: info.Owner,
+ Worker: info.Worker,
+ ControlAddresses: info.ControlAddresses,
+
+ NewWorker: address.Undef,
+ WorkerChangeEpoch: -1,
+
+ PeerId: pid,
+ Multiaddrs: info.Multiaddrs,
+ WindowPoStProofType: info.WindowPoStProofType,
+ SectorSize: info.SectorSize,
+ WindowPoStPartitionSectors: info.WindowPoStPartitionSectors,
+ ConsensusFaultElapsed: info.ConsensusFaultElapsed,
+ }
+
+ if info.PendingWorkerKey != nil {
+ mi.NewWorker = info.PendingWorkerKey.NewWorker
+ mi.WorkerChangeEpoch = info.PendingWorkerKey.EffectiveAt
+ }
+
+ return mi, nil
+}
+
+func (s *state5) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) {
+ return s.State.RecordedDeadlineInfo(epoch), nil
+}
+
+func (s *state5) DeadlineCronActive() (bool, error) {
+ return s.State.DeadlineCronActive, nil
+}
+
+func (s *state5) sectors() (adt.Array, error) {
+ return adt5.AsArray(s.store, s.Sectors, miner5.SectorsAmtBitwidth)
+}
+
+func (s *state5) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) {
+ var si miner5.SectorOnChainInfo
+ err := si.UnmarshalCBOR(bytes.NewReader(val.Raw))
+ if err != nil {
+ return SectorOnChainInfo{}, err
+ }
+
+ return fromV5SectorOnChainInfo(si), nil
+}
+
+func (s *state5) precommits() (adt.Map, error) {
+ return adt5.AsMap(s.store, s.PreCommittedSectors, builtin5.DefaultHamtBitwidth)
+}
+
+func (s *state5) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreCommitOnChainInfo, error) {
+ var sp miner5.SectorPreCommitOnChainInfo
+ err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw))
+ if err != nil {
+ return SectorPreCommitOnChainInfo{}, err
+ }
+
+ return fromV5SectorPreCommitOnChainInfo(sp), nil
+}
+
+func (s *state5) EraseAllUnproven() error {
+
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return err
+ }
+
+ err = dls.ForEach(s.store, func(dindx uint64, dl *miner5.Deadline) error {
+ ps, err := dl.PartitionsArray(s.store)
+ if err != nil {
+ return err
+ }
+
+ var part miner5.Partition
+ err = ps.ForEach(&part, func(pindx int64) error {
+ _ = part.ActivateUnproven()
+ err = ps.Set(uint64(pindx), &part)
+ return nil
+ })
+
+ if err != nil {
+ return err
+ }
+
+ dl.Partitions, err = ps.Root()
+ if err != nil {
+ return err
+ }
+
+ return dls.UpdateDeadline(s.store, dindx, dl)
+ })
+
+ return s.State.SaveDeadlines(s.store, dls)
+
+ return nil
+}
+
+func (d *deadline5) LoadPartition(idx uint64) (Partition, error) {
+ p, err := d.Deadline.LoadPartition(d.store, idx)
+ if err != nil {
+ return nil, err
+ }
+ return &partition5{*p, d.store}, nil
+}
+
+func (d *deadline5) ForEachPartition(cb func(uint64, Partition) error) error {
+ ps, err := d.Deadline.PartitionsArray(d.store)
+ if err != nil {
+ return err
+ }
+ var part miner5.Partition
+ return ps.ForEach(&part, func(i int64) error {
+ return cb(uint64(i), &partition5{part, d.store})
+ })
+}
+
+func (d *deadline5) PartitionsChanged(other Deadline) (bool, error) {
+ other5, ok := other.(*deadline5)
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+
+ return !d.Deadline.Partitions.Equals(other5.Deadline.Partitions), nil
+}
+
+func (d *deadline5) PartitionsPoSted() (bitfield.BitField, error) {
+ return d.Deadline.PartitionsPoSted, nil
+}
+
+func (d *deadline5) DisputableProofCount() (uint64, error) {
+
+ ops, err := d.OptimisticProofsSnapshotArray(d.store)
+ if err != nil {
+ return 0, err
+ }
+
+ return ops.Length(), nil
+
+}
+
+func (p *partition5) AllSectors() (bitfield.BitField, error) {
+ return p.Partition.Sectors, nil
+}
+
+func (p *partition5) FaultySectors() (bitfield.BitField, error) {
+ return p.Partition.Faults, nil
+}
+
+func (p *partition5) RecoveringSectors() (bitfield.BitField, error) {
+ return p.Partition.Recoveries, nil
+}
+
+func fromV5SectorOnChainInfo(v5 miner5.SectorOnChainInfo) SectorOnChainInfo {
+
+ return SectorOnChainInfo{
+ SectorNumber: v5.SectorNumber,
+ SealProof: v5.SealProof,
+ SealedCID: v5.SealedCID,
+ DealIDs: v5.DealIDs,
+ Activation: v5.Activation,
+ Expiration: v5.Expiration,
+ DealWeight: v5.DealWeight,
+ VerifiedDealWeight: v5.VerifiedDealWeight,
+ InitialPledge: v5.InitialPledge,
+ ExpectedDayReward: v5.ExpectedDayReward,
+ ExpectedStoragePledge: v5.ExpectedStoragePledge,
+ }
+
+}
+
+func fromV5SectorPreCommitOnChainInfo(v5 miner5.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
+
+ return SectorPreCommitOnChainInfo{
+ Info: (SectorPreCommitInfo)(v5.Info),
+ PreCommitDeposit: v5.PreCommitDeposit,
+ PreCommitEpoch: v5.PreCommitEpoch,
+ DealWeight: v5.DealWeight,
+ VerifiedDealWeight: v5.VerifiedDealWeight,
+ }
+
+}
+
+func (s *state5) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/multisig/actor.go.template b/chain/actors/builtin/multisig/actor.go.template
new file mode 100644
index 00000000000..b899815a668
--- /dev/null
+++ b/chain/actors/builtin/multisig/actor.go.template
@@ -0,0 +1,141 @@
+package multisig
+
+import (
+ "fmt"
+
+ "github.com/minio/blake2b-simd"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/cbor"
+ "github.com/ipfs/go-cid"
+
+ msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
+ msig{{.latestVersion}} "github.com/filecoin-project/specs-actors{{import .latestVersion}}actors/builtin/multisig"
+{{range .versions}}
+ builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin"
+{{end}}
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+func init() {
+{{range .versions}}
+ builtin.RegisterActorState(builtin{{.}}.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load{{.}}(store, root)
+ })
+{{end}}}
+
+func Load(store adt.Store, act *types.Actor) (State, error) {
+ switch act.Code {
+{{range .versions}}
+ case builtin{{.}}.MultisigActorCodeID:
+ return load{{.}}(store, act.Head)
+{{end}}
+ }
+ return nil, xerrors.Errorf("unknown actor code %s", act.Code)
+}
+
+func MakeState(store adt.Store, av actors.Version, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) {
+ switch av {
+{{range .versions}}
+ case actors.Version{{.}}:
+ return make{{.}}(store, signers, threshold, startEpoch, unlockDuration, initialBalance)
+{{end}}
+}
+ return nil, xerrors.Errorf("unknown actor version %d", av)
+}
+
+func GetActorCodeID(av actors.Version) (cid.Cid, error) {
+ switch av {
+{{range .versions}}
+ case actors.Version{{.}}:
+ return builtin{{.}}.MultisigActorCodeID, nil
+{{end}}
+ }
+
+ return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
+}
+
+type State interface {
+ cbor.Marshaler
+
+ LockedBalance(epoch abi.ChainEpoch) (abi.TokenAmount, error)
+ StartEpoch() (abi.ChainEpoch, error)
+ UnlockDuration() (abi.ChainEpoch, error)
+ InitialBalance() (abi.TokenAmount, error)
+ Threshold() (uint64, error)
+ Signers() ([]address.Address, error)
+
+ ForEachPendingTxn(func(id int64, txn Transaction) error) error
+ PendingTxnChanged(State) (bool, error)
+
+ transactions() (adt.Map, error)
+ decodeTransaction(val *cbg.Deferred) (Transaction, error)
+ GetState() interface{}
+}
+
+type Transaction = msig0.Transaction
+
+var Methods = builtin{{.latestVersion}}.MethodsMultisig
+
+func Message(version actors.Version, from address.Address) MessageBuilder {
+ switch version {
+{{range .versions}}
+ case actors.Version{{.}}:
+ return message{{.}}{{"{"}}{{if (ge . 2)}}message0{from}{{else}}from{{end}}}
+{{end}} default:
+ panic(fmt.Sprintf("unsupported actors version: %d", version))
+ }
+}
+
+type MessageBuilder interface {
+ // Create a new multisig with the specified parameters.
+ Create(signers []address.Address, threshold uint64,
+ vestingStart, vestingDuration abi.ChainEpoch,
+ initialAmount abi.TokenAmount) (*types.Message, error)
+
+ // Propose a transaction to the given multisig.
+ Propose(msig, target address.Address, amt abi.TokenAmount,
+ method abi.MethodNum, params []byte) (*types.Message, error)
+
+ // Approve a multisig transaction. The "hash" is optional.
+ Approve(msig address.Address, txID uint64, hash *ProposalHashData) (*types.Message, error)
+
+ // Cancel a multisig transaction. The "hash" is optional.
+ Cancel(msig address.Address, txID uint64, hash *ProposalHashData) (*types.Message, error)
+}
+
+// this type is the same between v0 and v2
+type ProposalHashData = msig{{.latestVersion}}.ProposalHashData
+type ProposeReturn = msig{{.latestVersion}}.ProposeReturn
+type ProposeParams = msig{{.latestVersion}}.ProposeParams
+type ApproveReturn = msig{{.latestVersion}}.ApproveReturn
+
+func txnParams(id uint64, data *ProposalHashData) ([]byte, error) {
+ params := msig{{.latestVersion}}.TxnIDParams{ID: msig{{.latestVersion}}.TxnID(id)}
+ if data != nil {
+ if data.Requester.Protocol() != address.ID {
+ return nil, xerrors.Errorf("proposer address must be an ID address, was %s", data.Requester)
+ }
+ if data.Value.Sign() == -1 {
+ return nil, xerrors.Errorf("proposal value must be non-negative, was %s", data.Value)
+ }
+ if data.To == address.Undef {
+ return nil, xerrors.Errorf("proposed destination address must be set")
+ }
+ pser, err := data.Serialize()
+ if err != nil {
+ return nil, err
+ }
+ hash := blake2b.Sum256(pser)
+ params.ProposalHash = hash[:]
+ }
+
+ return actors.SerializeParams(¶ms)
+}
diff --git a/chain/actors/builtin/multisig/diff.go b/chain/actors/builtin/multisig/diff.go
new file mode 100644
index 00000000000..680d0870ab1
--- /dev/null
+++ b/chain/actors/builtin/multisig/diff.go
@@ -0,0 +1,134 @@
+package multisig
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ cbg "github.com/whyrusleeping/cbor-gen"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+)
+
+type PendingTransactionChanges struct {
+ Added []TransactionChange
+ Modified []TransactionModification
+ Removed []TransactionChange
+}
+
+type TransactionChange struct {
+ TxID int64
+ Tx Transaction
+}
+
+type TransactionModification struct {
+ TxID int64
+ From Transaction
+ To Transaction
+}
+
+func DiffPendingTransactions(pre, cur State) (*PendingTransactionChanges, error) {
+ results := new(PendingTransactionChanges)
+ if changed, err := pre.PendingTxnChanged(cur); err != nil {
+ return nil, err
+ } else if !changed { // if nothing has changed then return an empty result and bail.
+ return results, nil
+ }
+
+ pret, err := pre.transactions()
+ if err != nil {
+ return nil, err
+ }
+
+ curt, err := cur.transactions()
+ if err != nil {
+ return nil, err
+ }
+
+ if err := adt.DiffAdtMap(pret, curt, &transactionDiffer{results, pre, cur}); err != nil {
+ return nil, err
+ }
+ return results, nil
+}
+
+type transactionDiffer struct {
+ Results *PendingTransactionChanges
+ pre, after State
+}
+
+func (t *transactionDiffer) AsKey(key string) (abi.Keyer, error) {
+ txID, err := abi.ParseIntKey(key)
+ if err != nil {
+ return nil, err
+ }
+ return abi.IntKey(txID), nil
+}
+
+func (t *transactionDiffer) Add(key string, val *cbg.Deferred) error {
+ txID, err := abi.ParseIntKey(key)
+ if err != nil {
+ return err
+ }
+ tx, err := t.after.decodeTransaction(val)
+ if err != nil {
+ return err
+ }
+ t.Results.Added = append(t.Results.Added, TransactionChange{
+ TxID: txID,
+ Tx: tx,
+ })
+ return nil
+}
+
+func (t *transactionDiffer) Modify(key string, from, to *cbg.Deferred) error {
+ txID, err := abi.ParseIntKey(key)
+ if err != nil {
+ return err
+ }
+
+ txFrom, err := t.pre.decodeTransaction(from)
+ if err != nil {
+ return err
+ }
+
+ txTo, err := t.after.decodeTransaction(to)
+ if err != nil {
+ return err
+ }
+
+ if approvalsChanged(txFrom.Approved, txTo.Approved) {
+ t.Results.Modified = append(t.Results.Modified, TransactionModification{
+ TxID: txID,
+ From: txFrom,
+ To: txTo,
+ })
+ }
+
+ return nil
+}
+
+func approvalsChanged(from, to []address.Address) bool {
+ if len(from) != len(to) {
+ return true
+ }
+ for idx := range from {
+ if from[idx] != to[idx] {
+ return true
+ }
+ }
+ return false
+}
+
+func (t *transactionDiffer) Remove(key string, val *cbg.Deferred) error {
+ txID, err := abi.ParseIntKey(key)
+ if err != nil {
+ return err
+ }
+ tx, err := t.pre.decodeTransaction(val)
+ if err != nil {
+ return err
+ }
+ t.Results.Removed = append(t.Results.Removed, TransactionChange{
+ TxID: txID,
+ Tx: tx,
+ })
+ return nil
+}
diff --git a/chain/actors/builtin/multisig/message.go b/chain/actors/builtin/multisig/message.go
deleted file mode 100644
index 3d2c66e6b02..00000000000
--- a/chain/actors/builtin/multisig/message.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package multisig
-
-import (
- "fmt"
-
- "github.com/minio/blake2b-simd"
- "golang.org/x/xerrors"
-
- "github.com/filecoin-project/go-address"
- "github.com/filecoin-project/go-state-types/abi"
-
- builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
- multisig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig"
-
- "github.com/filecoin-project/lotus/chain/actors"
- "github.com/filecoin-project/lotus/chain/types"
-)
-
-var Methods = builtin2.MethodsMultisig
-
-func Message(version actors.Version, from address.Address) MessageBuilder {
- switch version {
- case actors.Version0:
- return message0{from}
- case actors.Version2:
- return message2{message0{from}}
- default:
- panic(fmt.Sprintf("unsupported actors version: %d", version))
- }
-}
-
-type MessageBuilder interface {
- // Create a new multisig with the specified parameters.
- Create(signers []address.Address, threshold uint64,
- vestingStart, vestingDuration abi.ChainEpoch,
- initialAmount abi.TokenAmount) (*types.Message, error)
-
- // Propose a transaction to the given multisig.
- Propose(msig, target address.Address, amt abi.TokenAmount,
- method abi.MethodNum, params []byte) (*types.Message, error)
-
- // Approve a multisig transaction. The "hash" is optional.
- Approve(msig address.Address, txID uint64, hash *ProposalHashData) (*types.Message, error)
-
- // Cancel a multisig transaction. The "hash" is optional.
- Cancel(msig address.Address, txID uint64, hash *ProposalHashData) (*types.Message, error)
-}
-
-// this type is the same between v0 and v2
-type ProposalHashData = multisig2.ProposalHashData
-type ProposeReturn = multisig2.ProposeReturn
-
-func txnParams(id uint64, data *ProposalHashData) ([]byte, error) {
- params := multisig2.TxnIDParams{ID: multisig2.TxnID(id)}
- if data != nil {
- if data.Requester.Protocol() != address.ID {
- return nil, xerrors.Errorf("proposer address must be an ID address, was %s", data.Requester)
- }
- if data.Value.Sign() == -1 {
- return nil, xerrors.Errorf("proposal value must be non-negative, was %s", data.Value)
- }
- if data.To == address.Undef {
- return nil, xerrors.Errorf("proposed destination address must be set")
- }
- pser, err := data.Serialize()
- if err != nil {
- return nil, err
- }
- hash := blake2b.Sum256(pser)
- params.ProposalHash = hash[:]
- }
-
- return actors.SerializeParams(¶ms)
-}
diff --git a/chain/actors/builtin/multisig/message.go.template b/chain/actors/builtin/multisig/message.go.template
new file mode 100644
index 00000000000..6bff8983ab0
--- /dev/null
+++ b/chain/actors/builtin/multisig/message.go.template
@@ -0,0 +1,146 @@
+package multisig
+
+import (
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+
+ builtin{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin"
+ init{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/init"
+ multisig{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/multisig"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+type message{{.v}} struct{ {{if (ge .v 2)}}message0{{else}}from address.Address{{end}} }
+
+func (m message{{.v}}) Create(
+ signers []address.Address, threshold uint64,
+ unlockStart, unlockDuration abi.ChainEpoch,
+ initialAmount abi.TokenAmount,
+) (*types.Message, error) {
+
+ lenAddrs := uint64(len(signers))
+
+ if lenAddrs < threshold {
+ return nil, xerrors.Errorf("cannot require signing of more addresses than provided for multisig")
+ }
+
+ if threshold == 0 {
+ threshold = lenAddrs
+ }
+
+ if m.from == address.Undef {
+ return nil, xerrors.Errorf("must provide source address")
+ }
+{{if (le .v 1)}}
+ if unlockStart != 0 {
+ return nil, xerrors.Errorf("actors v0 does not support a non-zero vesting start time")
+ }
+{{end}}
+ // Set up constructor parameters for multisig
+ msigParams := &multisig{{.v}}.ConstructorParams{
+ Signers: signers,
+ NumApprovalsThreshold: threshold,
+ UnlockDuration: unlockDuration,{{if (ge .v 2)}}
+ StartEpoch: unlockStart,{{end}}
+ }
+
+ enc, actErr := actors.SerializeParams(msigParams)
+ if actErr != nil {
+ return nil, actErr
+ }
+
+ // new actors are created by invoking 'exec' on the init actor with the constructor params
+ execParams := &init{{.v}}.ExecParams{
+ CodeCID: builtin{{.v}}.MultisigActorCodeID,
+ ConstructorParams: enc,
+ }
+
+ enc, actErr = actors.SerializeParams(execParams)
+ if actErr != nil {
+ return nil, actErr
+ }
+
+ return &types.Message{
+ To: init_.Address,
+ From: m.from,
+ Method: builtin{{.v}}.MethodsInit.Exec,
+ Params: enc,
+ Value: initialAmount,
+ }, nil
+}
+
+{{if (le .v 1)}}
+
+func (m message0) Propose(msig, to address.Address, amt abi.TokenAmount,
+ method abi.MethodNum, params []byte) (*types.Message, error) {
+
+ if msig == address.Undef {
+ return nil, xerrors.Errorf("must provide a multisig address for proposal")
+ }
+
+ if to == address.Undef {
+ return nil, xerrors.Errorf("must provide a target address for proposal")
+ }
+
+ if amt.Sign() == -1 {
+ return nil, xerrors.Errorf("must provide a non-negative amount for proposed send")
+ }
+
+ if m.from == address.Undef {
+ return nil, xerrors.Errorf("must provide source address")
+ }
+
+ enc, actErr := actors.SerializeParams(&multisig0.ProposeParams{
+ To: to,
+ Value: amt,
+ Method: method,
+ Params: params,
+ })
+ if actErr != nil {
+ return nil, xerrors.Errorf("failed to serialize parameters: %w", actErr)
+ }
+
+ return &types.Message{
+ To: msig,
+ From: m.from,
+ Value: abi.NewTokenAmount(0),
+ Method: builtin0.MethodsMultisig.Propose,
+ Params: enc,
+ }, nil
+}
+
+func (m message0) Approve(msig address.Address, txID uint64, hashData *ProposalHashData) (*types.Message, error) {
+ enc, err := txnParams(txID, hashData)
+ if err != nil {
+ return nil, err
+ }
+
+ return &types.Message{
+ To: msig,
+ From: m.from,
+ Value: types.NewInt(0),
+ Method: builtin0.MethodsMultisig.Approve,
+ Params: enc,
+ }, nil
+}
+
+func (m message0) Cancel(msig address.Address, txID uint64, hashData *ProposalHashData) (*types.Message, error) {
+ enc, err := txnParams(txID, hashData)
+ if err != nil {
+ return nil, err
+ }
+
+ return &types.Message{
+ To: msig,
+ From: m.from,
+ Value: types.NewInt(0),
+ Method: builtin0.MethodsMultisig.Cancel,
+ Params: enc,
+ }, nil
+}
+{{end}}
diff --git a/chain/actors/builtin/multisig/message3.go b/chain/actors/builtin/multisig/message3.go
new file mode 100644
index 00000000000..f5f6d8cdfba
--- /dev/null
+++ b/chain/actors/builtin/multisig/message3.go
@@ -0,0 +1,71 @@
+package multisig
+
+import (
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+
+ builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
+ init3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/init"
+ multisig3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/multisig"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+type message3 struct{ message0 }
+
+func (m message3) Create(
+ signers []address.Address, threshold uint64,
+ unlockStart, unlockDuration abi.ChainEpoch,
+ initialAmount abi.TokenAmount,
+) (*types.Message, error) {
+
+ lenAddrs := uint64(len(signers))
+
+ if lenAddrs < threshold {
+ return nil, xerrors.Errorf("cannot require signing of more addresses than provided for multisig")
+ }
+
+ if threshold == 0 {
+ threshold = lenAddrs
+ }
+
+ if m.from == address.Undef {
+ return nil, xerrors.Errorf("must provide source address")
+ }
+
+ // Set up constructor parameters for multisig
+ msigParams := &multisig3.ConstructorParams{
+ Signers: signers,
+ NumApprovalsThreshold: threshold,
+ UnlockDuration: unlockDuration,
+ StartEpoch: unlockStart,
+ }
+
+ enc, actErr := actors.SerializeParams(msigParams)
+ if actErr != nil {
+ return nil, actErr
+ }
+
+ // new actors are created by invoking 'exec' on the init actor with the constructor params
+ execParams := &init3.ExecParams{
+ CodeCID: builtin3.MultisigActorCodeID,
+ ConstructorParams: enc,
+ }
+
+ enc, actErr = actors.SerializeParams(execParams)
+ if actErr != nil {
+ return nil, actErr
+ }
+
+ return &types.Message{
+ To: init_.Address,
+ From: m.from,
+ Method: builtin3.MethodsInit.Exec,
+ Params: enc,
+ Value: initialAmount,
+ }, nil
+}
diff --git a/chain/actors/builtin/multisig/message4.go b/chain/actors/builtin/multisig/message4.go
new file mode 100644
index 00000000000..90885aa0715
--- /dev/null
+++ b/chain/actors/builtin/multisig/message4.go
@@ -0,0 +1,71 @@
+package multisig
+
+import (
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+
+ builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
+ init4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/init"
+ multisig4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/multisig"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+type message4 struct{ message0 }
+
+func (m message4) Create(
+ signers []address.Address, threshold uint64,
+ unlockStart, unlockDuration abi.ChainEpoch,
+ initialAmount abi.TokenAmount,
+) (*types.Message, error) {
+
+ lenAddrs := uint64(len(signers))
+
+ if lenAddrs < threshold {
+ return nil, xerrors.Errorf("cannot require signing of more addresses than provided for multisig")
+ }
+
+ if threshold == 0 {
+ threshold = lenAddrs
+ }
+
+ if m.from == address.Undef {
+ return nil, xerrors.Errorf("must provide source address")
+ }
+
+ // Set up constructor parameters for multisig
+ msigParams := &multisig4.ConstructorParams{
+ Signers: signers,
+ NumApprovalsThreshold: threshold,
+ UnlockDuration: unlockDuration,
+ StartEpoch: unlockStart,
+ }
+
+ enc, actErr := actors.SerializeParams(msigParams)
+ if actErr != nil {
+ return nil, actErr
+ }
+
+ // new actors are created by invoking 'exec' on the init actor with the constructor params
+ execParams := &init4.ExecParams{
+ CodeCID: builtin4.MultisigActorCodeID,
+ ConstructorParams: enc,
+ }
+
+ enc, actErr = actors.SerializeParams(execParams)
+ if actErr != nil {
+ return nil, actErr
+ }
+
+ return &types.Message{
+ To: init_.Address,
+ From: m.from,
+ Method: builtin4.MethodsInit.Exec,
+ Params: enc,
+ Value: initialAmount,
+ }, nil
+}
diff --git a/chain/actors/builtin/multisig/message5.go b/chain/actors/builtin/multisig/message5.go
new file mode 100644
index 00000000000..9a8110f2cd5
--- /dev/null
+++ b/chain/actors/builtin/multisig/message5.go
@@ -0,0 +1,71 @@
+package multisig
+
+import (
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+
+ builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
+ init5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/init"
+ multisig5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/multisig"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+type message5 struct{ message0 }
+
+func (m message5) Create(
+ signers []address.Address, threshold uint64,
+ unlockStart, unlockDuration abi.ChainEpoch,
+ initialAmount abi.TokenAmount,
+) (*types.Message, error) {
+
+ lenAddrs := uint64(len(signers))
+
+ if lenAddrs < threshold {
+ return nil, xerrors.Errorf("cannot require signing of more addresses than provided for multisig")
+ }
+
+ if threshold == 0 {
+ threshold = lenAddrs
+ }
+
+ if m.from == address.Undef {
+ return nil, xerrors.Errorf("must provide source address")
+ }
+
+ // Set up constructor parameters for multisig
+ msigParams := &multisig5.ConstructorParams{
+ Signers: signers,
+ NumApprovalsThreshold: threshold,
+ UnlockDuration: unlockDuration,
+ StartEpoch: unlockStart,
+ }
+
+ enc, actErr := actors.SerializeParams(msigParams)
+ if actErr != nil {
+ return nil, actErr
+ }
+
+ // new actors are created by invoking 'exec' on the init actor with the constructor params
+ execParams := &init5.ExecParams{
+ CodeCID: builtin5.MultisigActorCodeID,
+ ConstructorParams: enc,
+ }
+
+ enc, actErr = actors.SerializeParams(execParams)
+ if actErr != nil {
+ return nil, actErr
+ }
+
+ return &types.Message{
+ To: init_.Address,
+ From: m.from,
+ Method: builtin5.MethodsInit.Exec,
+ Params: enc,
+ Value: initialAmount,
+ }, nil
+}
diff --git a/chain/actors/builtin/multisig/multisig.go b/chain/actors/builtin/multisig/multisig.go
new file mode 100644
index 00000000000..c950ced908e
--- /dev/null
+++ b/chain/actors/builtin/multisig/multisig.go
@@ -0,0 +1,212 @@
+package multisig
+
+import (
+ "fmt"
+
+ "github.com/minio/blake2b-simd"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/cbor"
+ "github.com/ipfs/go-cid"
+
+ msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
+ msig5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/multisig"
+
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+
+ builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+
+ builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
+
+ builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
+
+ builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+func init() {
+
+ builtin.RegisterActorState(builtin0.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load0(store, root)
+ })
+
+ builtin.RegisterActorState(builtin2.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load2(store, root)
+ })
+
+ builtin.RegisterActorState(builtin3.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load3(store, root)
+ })
+
+ builtin.RegisterActorState(builtin4.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load4(store, root)
+ })
+
+ builtin.RegisterActorState(builtin5.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load5(store, root)
+ })
+}
+
+func Load(store adt.Store, act *types.Actor) (State, error) {
+ switch act.Code {
+
+ case builtin0.MultisigActorCodeID:
+ return load0(store, act.Head)
+
+ case builtin2.MultisigActorCodeID:
+ return load2(store, act.Head)
+
+ case builtin3.MultisigActorCodeID:
+ return load3(store, act.Head)
+
+ case builtin4.MultisigActorCodeID:
+ return load4(store, act.Head)
+
+ case builtin5.MultisigActorCodeID:
+ return load5(store, act.Head)
+
+ }
+ return nil, xerrors.Errorf("unknown actor code %s", act.Code)
+}
+
+func MakeState(store adt.Store, av actors.Version, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) {
+ switch av {
+
+ case actors.Version0:
+ return make0(store, signers, threshold, startEpoch, unlockDuration, initialBalance)
+
+ case actors.Version2:
+ return make2(store, signers, threshold, startEpoch, unlockDuration, initialBalance)
+
+ case actors.Version3:
+ return make3(store, signers, threshold, startEpoch, unlockDuration, initialBalance)
+
+ case actors.Version4:
+ return make4(store, signers, threshold, startEpoch, unlockDuration, initialBalance)
+
+ case actors.Version5:
+ return make5(store, signers, threshold, startEpoch, unlockDuration, initialBalance)
+
+ }
+ return nil, xerrors.Errorf("unknown actor version %d", av)
+}
+
+func GetActorCodeID(av actors.Version) (cid.Cid, error) {
+ switch av {
+
+ case actors.Version0:
+ return builtin0.MultisigActorCodeID, nil
+
+ case actors.Version2:
+ return builtin2.MultisigActorCodeID, nil
+
+ case actors.Version3:
+ return builtin3.MultisigActorCodeID, nil
+
+ case actors.Version4:
+ return builtin4.MultisigActorCodeID, nil
+
+ case actors.Version5:
+ return builtin5.MultisigActorCodeID, nil
+
+ }
+
+ return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
+}
+
+type State interface {
+ cbor.Marshaler
+
+ LockedBalance(epoch abi.ChainEpoch) (abi.TokenAmount, error)
+ StartEpoch() (abi.ChainEpoch, error)
+ UnlockDuration() (abi.ChainEpoch, error)
+ InitialBalance() (abi.TokenAmount, error)
+ Threshold() (uint64, error)
+ Signers() ([]address.Address, error)
+
+ ForEachPendingTxn(func(id int64, txn Transaction) error) error
+ PendingTxnChanged(State) (bool, error)
+
+ transactions() (adt.Map, error)
+ decodeTransaction(val *cbg.Deferred) (Transaction, error)
+ GetState() interface{}
+}
+
+type Transaction = msig0.Transaction
+
+var Methods = builtin5.MethodsMultisig
+
+func Message(version actors.Version, from address.Address) MessageBuilder {
+ switch version {
+
+ case actors.Version0:
+ return message0{from}
+
+ case actors.Version2:
+ return message2{message0{from}}
+
+ case actors.Version3:
+ return message3{message0{from}}
+
+ case actors.Version4:
+ return message4{message0{from}}
+
+ case actors.Version5:
+ return message5{message0{from}}
+ default:
+ panic(fmt.Sprintf("unsupported actors version: %d", version))
+ }
+}
+
+type MessageBuilder interface {
+ // Create a new multisig with the specified parameters.
+ Create(signers []address.Address, threshold uint64,
+ vestingStart, vestingDuration abi.ChainEpoch,
+ initialAmount abi.TokenAmount) (*types.Message, error)
+
+ // Propose a transaction to the given multisig.
+ Propose(msig, target address.Address, amt abi.TokenAmount,
+ method abi.MethodNum, params []byte) (*types.Message, error)
+
+ // Approve a multisig transaction. The "hash" is optional.
+ Approve(msig address.Address, txID uint64, hash *ProposalHashData) (*types.Message, error)
+
+ // Cancel a multisig transaction. The "hash" is optional.
+ Cancel(msig address.Address, txID uint64, hash *ProposalHashData) (*types.Message, error)
+}
+
+// this type is the same between v0 and v2
+type ProposalHashData = msig5.ProposalHashData
+type ProposeReturn = msig5.ProposeReturn
+type ProposeParams = msig5.ProposeParams
+type ApproveReturn = msig5.ApproveReturn
+
+func txnParams(id uint64, data *ProposalHashData) ([]byte, error) {
+ params := msig5.TxnIDParams{ID: msig5.TxnID(id)}
+ if data != nil {
+ if data.Requester.Protocol() != address.ID {
+ return nil, xerrors.Errorf("proposer address must be an ID address, was %s", data.Requester)
+ }
+ if data.Value.Sign() == -1 {
+ return nil, xerrors.Errorf("proposal value must be non-negative, was %s", data.Value)
+ }
+ if data.To == address.Undef {
+ return nil, xerrors.Errorf("proposed destination address must be set")
+ }
+ pser, err := data.Serialize()
+ if err != nil {
+ return nil, err
+ }
+ hash := blake2b.Sum256(pser)
+ params.ProposalHash = hash[:]
+ }
+
+ return actors.SerializeParams(¶ms)
+}
diff --git a/chain/actors/builtin/multisig/state.go b/chain/actors/builtin/multisig/state.go
deleted file mode 100644
index 89a7eedade7..00000000000
--- a/chain/actors/builtin/multisig/state.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package multisig
-
-import (
- "golang.org/x/xerrors"
-
- "github.com/filecoin-project/go-address"
- "github.com/filecoin-project/go-state-types/abi"
- "github.com/filecoin-project/go-state-types/cbor"
- "github.com/ipfs/go-cid"
-
- builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
- msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
- builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
-
- "github.com/filecoin-project/lotus/chain/actors/adt"
- "github.com/filecoin-project/lotus/chain/actors/builtin"
- "github.com/filecoin-project/lotus/chain/types"
-)
-
-func init() {
- builtin.RegisterActorState(builtin0.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
- return load0(store, root)
- })
- builtin.RegisterActorState(builtin2.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
- return load2(store, root)
- })
-}
-
-func Load(store adt.Store, act *types.Actor) (State, error) {
- switch act.Code {
- case builtin0.MultisigActorCodeID:
- return load0(store, act.Head)
- case builtin2.MultisigActorCodeID:
- return load2(store, act.Head)
- }
- return nil, xerrors.Errorf("unknown actor code %s", act.Code)
-}
-
-type State interface {
- cbor.Marshaler
-
- LockedBalance(epoch abi.ChainEpoch) (abi.TokenAmount, error)
- StartEpoch() (abi.ChainEpoch, error)
- UnlockDuration() (abi.ChainEpoch, error)
- InitialBalance() (abi.TokenAmount, error)
- Threshold() (uint64, error)
- Signers() ([]address.Address, error)
-
- ForEachPendingTxn(func(id int64, txn Transaction) error) error
-}
-
-type Transaction = msig0.Transaction
diff --git a/chain/actors/builtin/multisig/state.go.template b/chain/actors/builtin/multisig/state.go.template
new file mode 100644
index 00000000000..6c0130c0998
--- /dev/null
+++ b/chain/actors/builtin/multisig/state.go.template
@@ -0,0 +1,127 @@
+package multisig
+
+import (
+ "bytes"
+ "encoding/binary"
+
+ adt{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/util/adt"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+{{if (ge .v 3)}}
+ builtin{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin"
+{{end}}
+ msig{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/multisig"
+)
+
+var _ State = (*state{{.v}})(nil)
+
+func load{{.v}}(store adt.Store, root cid.Cid) (State, error) {
+ out := state{{.v}}{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make{{.v}}(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) {
+ out := state{{.v}}{store: store}
+ out.State = msig{{.v}}.State{}
+ out.State.Signers = signers
+ out.State.NumApprovalsThreshold = threshold
+ out.State.StartEpoch = startEpoch
+ out.State.UnlockDuration = unlockDuration
+ out.State.InitialBalance = initialBalance
+ {{if (le .v 2)}}
+ em, err := adt{{.v}}.MakeEmptyMap(store).Root()
+ if err != nil {
+ return nil, err
+ }
+
+ out.State.PendingTxns = em
+ {{else}}
+ em, err := adt{{.v}}.StoreEmptyMap(store, builtin{{.v}}.DefaultHamtBitwidth)
+ if err != nil {
+ return nil, err
+ }
+
+ out.State.PendingTxns = em
+ {{end}}
+ return &out, nil
+}
+
+type state{{.v}} struct {
+ msig{{.v}}.State
+ store adt.Store
+}
+
+func (s *state{{.v}}) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) {
+ return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil
+}
+
+func (s *state{{.v}}) StartEpoch() (abi.ChainEpoch, error) {
+ return s.State.StartEpoch, nil
+}
+
+func (s *state{{.v}}) UnlockDuration() (abi.ChainEpoch, error) {
+ return s.State.UnlockDuration, nil
+}
+
+func (s *state{{.v}}) InitialBalance() (abi.TokenAmount, error) {
+ return s.State.InitialBalance, nil
+}
+
+func (s *state{{.v}}) Threshold() (uint64, error) {
+ return s.State.NumApprovalsThreshold, nil
+}
+
+func (s *state{{.v}}) Signers() ([]address.Address, error) {
+ return s.State.Signers, nil
+}
+
+func (s *state{{.v}}) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error {
+ arr, err := adt{{.v}}.AsMap(s.store, s.State.PendingTxns{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}})
+ if err != nil {
+ return err
+ }
+ var out msig{{.v}}.Transaction
+ return arr.ForEach(&out, func(key string) error {
+ txid, n := binary.Varint([]byte(key))
+ if n <= 0 {
+ return xerrors.Errorf("invalid pending transaction key: %v", key)
+ }
+ return cb(txid, (Transaction)(out)) //nolint:unconvert
+ })
+}
+
+func (s *state{{.v}}) PendingTxnChanged(other State) (bool, error) {
+ other{{.v}}, ok := other.(*state{{.v}})
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+ return !s.State.PendingTxns.Equals(other{{.v}}.PendingTxns), nil
+}
+
+func (s *state{{.v}}) transactions() (adt.Map, error) {
+ return adt{{.v}}.AsMap(s.store, s.PendingTxns{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}})
+}
+
+func (s *state{{.v}}) decodeTransaction(val *cbg.Deferred) (Transaction, error) {
+ var tx msig{{.v}}.Transaction
+ if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ return Transaction{}, err
+ }
+ return tx, nil
+}
+
+func (s *state{{.v}}) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/multisig/state0.go b/chain/actors/builtin/multisig/v0.go
similarity index 56%
rename from chain/actors/builtin/multisig/state0.go
rename to chain/actors/builtin/multisig/v0.go
index c934343e702..973ac920904 100644
--- a/chain/actors/builtin/multisig/state0.go
+++ b/chain/actors/builtin/multisig/v0.go
@@ -1,17 +1,20 @@
package multisig
import (
+ "bytes"
"encoding/binary"
+ adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
+
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/chain/actors/adt"
msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
- adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
)
var _ State = (*state0)(nil)
@@ -25,6 +28,25 @@ func load0(store adt.Store, root cid.Cid) (State, error) {
return &out, nil
}
+func make0(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) {
+ out := state0{store: store}
+ out.State = msig0.State{}
+ out.State.Signers = signers
+ out.State.NumApprovalsThreshold = threshold
+ out.State.StartEpoch = startEpoch
+ out.State.UnlockDuration = unlockDuration
+ out.State.InitialBalance = initialBalance
+
+ em, err := adt0.MakeEmptyMap(store).Root()
+ if err != nil {
+ return nil, err
+ }
+
+ out.State.PendingTxns = em
+
+ return &out, nil
+}
+
type state0 struct {
msig0.State
store adt.Store
@@ -65,6 +87,31 @@ func (s *state0) ForEachPendingTxn(cb func(id int64, txn Transaction) error) err
if n <= 0 {
return xerrors.Errorf("invalid pending transaction key: %v", key)
}
- return cb(txid, (Transaction)(out))
+ return cb(txid, (Transaction)(out)) //nolint:unconvert
})
}
+
+func (s *state0) PendingTxnChanged(other State) (bool, error) {
+ other0, ok := other.(*state0)
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+ return !s.State.PendingTxns.Equals(other0.PendingTxns), nil
+}
+
+func (s *state0) transactions() (adt.Map, error) {
+ return adt0.AsMap(s.store, s.PendingTxns)
+}
+
+func (s *state0) decodeTransaction(val *cbg.Deferred) (Transaction, error) {
+ var tx msig0.Transaction
+ if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ return Transaction{}, err
+ }
+ return tx, nil
+}
+
+func (s *state0) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/multisig/state2.go b/chain/actors/builtin/multisig/v2.go
similarity index 56%
rename from chain/actors/builtin/multisig/state2.go
rename to chain/actors/builtin/multisig/v2.go
index a78b07d551f..5b830e69530 100644
--- a/chain/actors/builtin/multisig/state2.go
+++ b/chain/actors/builtin/multisig/v2.go
@@ -1,17 +1,20 @@
package multisig
import (
+ "bytes"
"encoding/binary"
+ adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt"
+
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/chain/actors/adt"
msig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig"
- adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt"
)
var _ State = (*state2)(nil)
@@ -25,6 +28,25 @@ func load2(store adt.Store, root cid.Cid) (State, error) {
return &out, nil
}
+func make2(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) {
+ out := state2{store: store}
+ out.State = msig2.State{}
+ out.State.Signers = signers
+ out.State.NumApprovalsThreshold = threshold
+ out.State.StartEpoch = startEpoch
+ out.State.UnlockDuration = unlockDuration
+ out.State.InitialBalance = initialBalance
+
+ em, err := adt2.MakeEmptyMap(store).Root()
+ if err != nil {
+ return nil, err
+ }
+
+ out.State.PendingTxns = em
+
+ return &out, nil
+}
+
type state2 struct {
msig2.State
store adt.Store
@@ -65,6 +87,31 @@ func (s *state2) ForEachPendingTxn(cb func(id int64, txn Transaction) error) err
if n <= 0 {
return xerrors.Errorf("invalid pending transaction key: %v", key)
}
- return cb(txid, (Transaction)(out))
+ return cb(txid, (Transaction)(out)) //nolint:unconvert
})
}
+
+func (s *state2) PendingTxnChanged(other State) (bool, error) {
+ other2, ok := other.(*state2)
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+ return !s.State.PendingTxns.Equals(other2.PendingTxns), nil
+}
+
+func (s *state2) transactions() (adt.Map, error) {
+ return adt2.AsMap(s.store, s.PendingTxns)
+}
+
+func (s *state2) decodeTransaction(val *cbg.Deferred) (Transaction, error) {
+ var tx msig2.Transaction
+ if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ return Transaction{}, err
+ }
+ return tx, nil
+}
+
+func (s *state2) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/multisig/v3.go b/chain/actors/builtin/multisig/v3.go
new file mode 100644
index 00000000000..c4a2791b705
--- /dev/null
+++ b/chain/actors/builtin/multisig/v3.go
@@ -0,0 +1,119 @@
+package multisig
+
+import (
+ "bytes"
+ "encoding/binary"
+
+ adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
+
+ msig3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/multisig"
+)
+
+var _ State = (*state3)(nil)
+
+func load3(store adt.Store, root cid.Cid) (State, error) {
+ out := state3{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make3(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) {
+ out := state3{store: store}
+ out.State = msig3.State{}
+ out.State.Signers = signers
+ out.State.NumApprovalsThreshold = threshold
+ out.State.StartEpoch = startEpoch
+ out.State.UnlockDuration = unlockDuration
+ out.State.InitialBalance = initialBalance
+
+ em, err := adt3.StoreEmptyMap(store, builtin3.DefaultHamtBitwidth)
+ if err != nil {
+ return nil, err
+ }
+
+ out.State.PendingTxns = em
+
+ return &out, nil
+}
+
+type state3 struct {
+ msig3.State
+ store adt.Store
+}
+
+func (s *state3) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) {
+ return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil
+}
+
+func (s *state3) StartEpoch() (abi.ChainEpoch, error) {
+ return s.State.StartEpoch, nil
+}
+
+func (s *state3) UnlockDuration() (abi.ChainEpoch, error) {
+ return s.State.UnlockDuration, nil
+}
+
+func (s *state3) InitialBalance() (abi.TokenAmount, error) {
+ return s.State.InitialBalance, nil
+}
+
+func (s *state3) Threshold() (uint64, error) {
+ return s.State.NumApprovalsThreshold, nil
+}
+
+func (s *state3) Signers() ([]address.Address, error) {
+ return s.State.Signers, nil
+}
+
+func (s *state3) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error {
+ arr, err := adt3.AsMap(s.store, s.State.PendingTxns, builtin3.DefaultHamtBitwidth)
+ if err != nil {
+ return err
+ }
+ var out msig3.Transaction
+ return arr.ForEach(&out, func(key string) error {
+ txid, n := binary.Varint([]byte(key))
+ if n <= 0 {
+ return xerrors.Errorf("invalid pending transaction key: %v", key)
+ }
+ return cb(txid, (Transaction)(out)) //nolint:unconvert
+ })
+}
+
+func (s *state3) PendingTxnChanged(other State) (bool, error) {
+ other3, ok := other.(*state3)
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+ return !s.State.PendingTxns.Equals(other3.PendingTxns), nil
+}
+
+func (s *state3) transactions() (adt.Map, error) {
+ return adt3.AsMap(s.store, s.PendingTxns, builtin3.DefaultHamtBitwidth)
+}
+
+func (s *state3) decodeTransaction(val *cbg.Deferred) (Transaction, error) {
+ var tx msig3.Transaction
+ if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ return Transaction{}, err
+ }
+ return tx, nil
+}
+
+func (s *state3) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/multisig/v4.go b/chain/actors/builtin/multisig/v4.go
new file mode 100644
index 00000000000..a35a890f870
--- /dev/null
+++ b/chain/actors/builtin/multisig/v4.go
@@ -0,0 +1,119 @@
+package multisig
+
+import (
+ "bytes"
+ "encoding/binary"
+
+ adt4 "github.com/filecoin-project/specs-actors/v4/actors/util/adt"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
+
+ msig4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/multisig"
+)
+
+var _ State = (*state4)(nil)
+
+func load4(store adt.Store, root cid.Cid) (State, error) {
+ out := state4{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make4(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) {
+ out := state4{store: store}
+ out.State = msig4.State{}
+ out.State.Signers = signers
+ out.State.NumApprovalsThreshold = threshold
+ out.State.StartEpoch = startEpoch
+ out.State.UnlockDuration = unlockDuration
+ out.State.InitialBalance = initialBalance
+
+ em, err := adt4.StoreEmptyMap(store, builtin4.DefaultHamtBitwidth)
+ if err != nil {
+ return nil, err
+ }
+
+ out.State.PendingTxns = em
+
+ return &out, nil
+}
+
+type state4 struct {
+ msig4.State
+ store adt.Store
+}
+
+func (s *state4) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) {
+ return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil
+}
+
+func (s *state4) StartEpoch() (abi.ChainEpoch, error) {
+ return s.State.StartEpoch, nil
+}
+
+func (s *state4) UnlockDuration() (abi.ChainEpoch, error) {
+ return s.State.UnlockDuration, nil
+}
+
+func (s *state4) InitialBalance() (abi.TokenAmount, error) {
+ return s.State.InitialBalance, nil
+}
+
+func (s *state4) Threshold() (uint64, error) {
+ return s.State.NumApprovalsThreshold, nil
+}
+
+func (s *state4) Signers() ([]address.Address, error) {
+ return s.State.Signers, nil
+}
+
+func (s *state4) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error {
+ arr, err := adt4.AsMap(s.store, s.State.PendingTxns, builtin4.DefaultHamtBitwidth)
+ if err != nil {
+ return err
+ }
+ var out msig4.Transaction
+ return arr.ForEach(&out, func(key string) error {
+ txid, n := binary.Varint([]byte(key))
+ if n <= 0 {
+ return xerrors.Errorf("invalid pending transaction key: %v", key)
+ }
+ return cb(txid, (Transaction)(out)) //nolint:unconvert
+ })
+}
+
+func (s *state4) PendingTxnChanged(other State) (bool, error) {
+ other4, ok := other.(*state4)
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+ return !s.State.PendingTxns.Equals(other4.PendingTxns), nil
+}
+
+func (s *state4) transactions() (adt.Map, error) {
+ return adt4.AsMap(s.store, s.PendingTxns, builtin4.DefaultHamtBitwidth)
+}
+
+func (s *state4) decodeTransaction(val *cbg.Deferred) (Transaction, error) {
+ var tx msig4.Transaction
+ if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ return Transaction{}, err
+ }
+ return tx, nil
+}
+
+func (s *state4) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/multisig/v5.go b/chain/actors/builtin/multisig/v5.go
new file mode 100644
index 00000000000..4ad9aea941a
--- /dev/null
+++ b/chain/actors/builtin/multisig/v5.go
@@ -0,0 +1,119 @@
+package multisig
+
+import (
+ "bytes"
+ "encoding/binary"
+
+ adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
+
+ msig5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/multisig"
+)
+
+var _ State = (*state5)(nil)
+
+func load5(store adt.Store, root cid.Cid) (State, error) {
+ out := state5{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make5(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) {
+ out := state5{store: store}
+ out.State = msig5.State{}
+ out.State.Signers = signers
+ out.State.NumApprovalsThreshold = threshold
+ out.State.StartEpoch = startEpoch
+ out.State.UnlockDuration = unlockDuration
+ out.State.InitialBalance = initialBalance
+
+ em, err := adt5.StoreEmptyMap(store, builtin5.DefaultHamtBitwidth)
+ if err != nil {
+ return nil, err
+ }
+
+ out.State.PendingTxns = em
+
+ return &out, nil
+}
+
+type state5 struct {
+ msig5.State
+ store adt.Store
+}
+
+func (s *state5) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) {
+ return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil
+}
+
+func (s *state5) StartEpoch() (abi.ChainEpoch, error) {
+ return s.State.StartEpoch, nil
+}
+
+func (s *state5) UnlockDuration() (abi.ChainEpoch, error) {
+ return s.State.UnlockDuration, nil
+}
+
+func (s *state5) InitialBalance() (abi.TokenAmount, error) {
+ return s.State.InitialBalance, nil
+}
+
+func (s *state5) Threshold() (uint64, error) {
+ return s.State.NumApprovalsThreshold, nil
+}
+
+func (s *state5) Signers() ([]address.Address, error) {
+ return s.State.Signers, nil
+}
+
+func (s *state5) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error {
+ arr, err := adt5.AsMap(s.store, s.State.PendingTxns, builtin5.DefaultHamtBitwidth)
+ if err != nil {
+ return err
+ }
+ var out msig5.Transaction
+ return arr.ForEach(&out, func(key string) error {
+ txid, n := binary.Varint([]byte(key))
+ if n <= 0 {
+ return xerrors.Errorf("invalid pending transaction key: %v", key)
+ }
+ return cb(txid, (Transaction)(out)) //nolint:unconvert
+ })
+}
+
+func (s *state5) PendingTxnChanged(other State) (bool, error) {
+ other5, ok := other.(*state5)
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+ return !s.State.PendingTxns.Equals(other5.PendingTxns), nil
+}
+
+func (s *state5) transactions() (adt.Map, error) {
+ return adt5.AsMap(s.store, s.PendingTxns, builtin5.DefaultHamtBitwidth)
+}
+
+func (s *state5) decodeTransaction(val *cbg.Deferred) (Transaction, error) {
+ var tx msig5.Transaction
+ if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ return Transaction{}, err
+ }
+ return tx, nil
+}
+
+func (s *state5) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/paych/state.go b/chain/actors/builtin/paych/actor.go.template
similarity index 55%
rename from chain/actors/builtin/paych/state.go
rename to chain/actors/builtin/paych/actor.go.template
index 20c7a74b734..7699e76b631 100644
--- a/chain/actors/builtin/paych/state.go
+++ b/chain/actors/builtin/paych/actor.go.template
@@ -2,6 +2,7 @@ package paych
import (
"encoding/base64"
+ "fmt"
"golang.org/x/xerrors"
@@ -12,35 +13,56 @@ import (
"github.com/ipfs/go-cid"
ipldcbor "github.com/ipfs/go-ipld-cbor"
- builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
paych0 "github.com/filecoin-project/specs-actors/actors/builtin/paych"
- builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+{{range .versions}}
+ builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin"
+{{end}}
+ "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/types"
)
func init() {
- builtin.RegisterActorState(builtin0.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
- return load0(store, root)
+{{range .versions}}
+ builtin.RegisterActorState(builtin{{.}}.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load{{.}}(store, root)
})
- builtin.RegisterActorState(builtin2.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
- return load2(store, root)
- })
-}
+{{end}}}
// Load returns an abstract copy of payment channel state, irregardless of actor version
func Load(store adt.Store, act *types.Actor) (State, error) {
switch act.Code {
- case builtin0.PaymentChannelActorCodeID:
- return load0(store, act.Head)
- case builtin2.PaymentChannelActorCodeID:
- return load2(store, act.Head)
+{{range .versions}}
+ case builtin{{.}}.PaymentChannelActorCodeID:
+ return load{{.}}(store, act.Head)
+{{end}}
}
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
}
+func MakeState(store adt.Store, av actors.Version) (State, error) {
+ switch av {
+{{range .versions}}
+ case actors.Version{{.}}:
+ return make{{.}}(store)
+{{end}}
+}
+ return nil, xerrors.Errorf("unknown actor version %d", av)
+}
+
+func GetActorCodeID(av actors.Version) (cid.Cid, error) {
+ switch av {
+{{range .versions}}
+ case actors.Version{{.}}:
+ return builtin{{.}}.PaymentChannelActorCodeID, nil
+{{end}}
+ }
+
+ return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
+}
+
// State is an abstract version of payment channel state that works across
// versions
type State interface {
@@ -61,6 +83,8 @@ type State interface {
// Iterate lane states
ForEachLaneState(cb func(idx uint64, dl LaneState) error) error
+
+ GetState() interface{}
}
// LaneState is an abstract copy of the state of a single lane
@@ -86,3 +110,23 @@ func DecodeSignedVoucher(s string) (*SignedVoucher, error) {
return &sv, nil
}
+
+var Methods = builtin{{.latestVersion}}.MethodsPaych
+
+func Message(version actors.Version, from address.Address) MessageBuilder {
+ switch version {
+{{range .versions}}
+ case actors.Version{{.}}:
+ return message{{.}}{from}
+{{end}}
+ default:
+ panic(fmt.Sprintf("unsupported actors version: %d", version))
+ }
+}
+
+type MessageBuilder interface {
+ Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error)
+ Update(paych address.Address, voucher *SignedVoucher, secret []byte) (*types.Message, error)
+ Settle(paych address.Address) (*types.Message, error)
+ Collect(paych address.Address) (*types.Message, error)
+}
diff --git a/chain/actors/builtin/paych/message.go b/chain/actors/builtin/paych/message.go
deleted file mode 100644
index 5709d4b23d8..00000000000
--- a/chain/actors/builtin/paych/message.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package paych
-
-import (
- "fmt"
-
- "github.com/filecoin-project/go-address"
- "github.com/filecoin-project/go-state-types/abi"
- "github.com/filecoin-project/lotus/chain/actors"
- "github.com/filecoin-project/lotus/chain/types"
-
- builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
-)
-
-var Methods = builtin2.MethodsPaych
-
-func Message(version actors.Version, from address.Address) MessageBuilder {
- switch version {
- case actors.Version0:
- return message0{from}
- case actors.Version2:
- return message2{from}
- default:
- panic(fmt.Sprintf("unsupported actors version: %d", version))
- }
-}
-
-type MessageBuilder interface {
- Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error)
- Update(paych address.Address, voucher *SignedVoucher, secret []byte) (*types.Message, error)
- Settle(paych address.Address) (*types.Message, error)
- Collect(paych address.Address) (*types.Message, error)
-}
diff --git a/chain/actors/builtin/paych/message.go.template b/chain/actors/builtin/paych/message.go.template
new file mode 100644
index 00000000000..4a5ea2331e5
--- /dev/null
+++ b/chain/actors/builtin/paych/message.go.template
@@ -0,0 +1,74 @@
+package paych
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+
+ builtin{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin"
+ init{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/init"
+ paych{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/paych"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+type message{{.v}} struct{ from address.Address }
+
+func (m message{{.v}}) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) {
+ params, aerr := actors.SerializeParams(&paych{{.v}}.ConstructorParams{From: m.from, To: to})
+ if aerr != nil {
+ return nil, aerr
+ }
+ enc, aerr := actors.SerializeParams(&init{{.v}}.ExecParams{
+ CodeCID: builtin{{.v}}.PaymentChannelActorCodeID,
+ ConstructorParams: params,
+ })
+ if aerr != nil {
+ return nil, aerr
+ }
+
+ return &types.Message{
+ To: init_.Address,
+ From: m.from,
+ Value: initialAmount,
+ Method: builtin{{.v}}.MethodsInit.Exec,
+ Params: enc,
+ }, nil
+}
+
+func (m message{{.v}}) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) {
+ params, aerr := actors.SerializeParams(&paych{{.v}}.UpdateChannelStateParams{
+ Sv: *sv,
+ Secret: secret,
+ })
+ if aerr != nil {
+ return nil, aerr
+ }
+
+ return &types.Message{
+ To: paych,
+ From: m.from,
+ Value: abi.NewTokenAmount(0),
+ Method: builtin{{.v}}.MethodsPaych.UpdateChannelState,
+ Params: params,
+ }, nil
+}
+
+func (m message{{.v}}) Settle(paych address.Address) (*types.Message, error) {
+ return &types.Message{
+ To: paych,
+ From: m.from,
+ Value: abi.NewTokenAmount(0),
+ Method: builtin{{.v}}.MethodsPaych.Settle,
+ }, nil
+}
+
+func (m message{{.v}}) Collect(paych address.Address) (*types.Message, error) {
+ return &types.Message{
+ To: paych,
+ From: m.from,
+ Value: abi.NewTokenAmount(0),
+ Method: builtin{{.v}}.MethodsPaych.Collect,
+ }, nil
+}
diff --git a/chain/actors/builtin/paych/message3.go b/chain/actors/builtin/paych/message3.go
new file mode 100644
index 00000000000..50503a1409a
--- /dev/null
+++ b/chain/actors/builtin/paych/message3.go
@@ -0,0 +1,74 @@
+package paych
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+
+ builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
+ init3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/init"
+ paych3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/paych"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+type message3 struct{ from address.Address }
+
+func (m message3) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) {
+ params, aerr := actors.SerializeParams(&paych3.ConstructorParams{From: m.from, To: to})
+ if aerr != nil {
+ return nil, aerr
+ }
+ enc, aerr := actors.SerializeParams(&init3.ExecParams{
+ CodeCID: builtin3.PaymentChannelActorCodeID,
+ ConstructorParams: params,
+ })
+ if aerr != nil {
+ return nil, aerr
+ }
+
+ return &types.Message{
+ To: init_.Address,
+ From: m.from,
+ Value: initialAmount,
+ Method: builtin3.MethodsInit.Exec,
+ Params: enc,
+ }, nil
+}
+
+func (m message3) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) {
+ params, aerr := actors.SerializeParams(&paych3.UpdateChannelStateParams{
+ Sv: *sv,
+ Secret: secret,
+ })
+ if aerr != nil {
+ return nil, aerr
+ }
+
+ return &types.Message{
+ To: paych,
+ From: m.from,
+ Value: abi.NewTokenAmount(0),
+ Method: builtin3.MethodsPaych.UpdateChannelState,
+ Params: params,
+ }, nil
+}
+
+func (m message3) Settle(paych address.Address) (*types.Message, error) {
+ return &types.Message{
+ To: paych,
+ From: m.from,
+ Value: abi.NewTokenAmount(0),
+ Method: builtin3.MethodsPaych.Settle,
+ }, nil
+}
+
+func (m message3) Collect(paych address.Address) (*types.Message, error) {
+ return &types.Message{
+ To: paych,
+ From: m.from,
+ Value: abi.NewTokenAmount(0),
+ Method: builtin3.MethodsPaych.Collect,
+ }, nil
+}
diff --git a/chain/actors/builtin/paych/message4.go b/chain/actors/builtin/paych/message4.go
new file mode 100644
index 00000000000..b2c6b612e38
--- /dev/null
+++ b/chain/actors/builtin/paych/message4.go
@@ -0,0 +1,74 @@
+package paych
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+
+ builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
+ init4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/init"
+ paych4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/paych"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+type message4 struct{ from address.Address }
+
+func (m message4) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) {
+ params, aerr := actors.SerializeParams(&paych4.ConstructorParams{From: m.from, To: to})
+ if aerr != nil {
+ return nil, aerr
+ }
+ enc, aerr := actors.SerializeParams(&init4.ExecParams{
+ CodeCID: builtin4.PaymentChannelActorCodeID,
+ ConstructorParams: params,
+ })
+ if aerr != nil {
+ return nil, aerr
+ }
+
+ return &types.Message{
+ To: init_.Address,
+ From: m.from,
+ Value: initialAmount,
+ Method: builtin4.MethodsInit.Exec,
+ Params: enc,
+ }, nil
+}
+
+func (m message4) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) {
+ params, aerr := actors.SerializeParams(&paych4.UpdateChannelStateParams{
+ Sv: *sv,
+ Secret: secret,
+ })
+ if aerr != nil {
+ return nil, aerr
+ }
+
+ return &types.Message{
+ To: paych,
+ From: m.from,
+ Value: abi.NewTokenAmount(0),
+ Method: builtin4.MethodsPaych.UpdateChannelState,
+ Params: params,
+ }, nil
+}
+
+func (m message4) Settle(paych address.Address) (*types.Message, error) {
+ return &types.Message{
+ To: paych,
+ From: m.from,
+ Value: abi.NewTokenAmount(0),
+ Method: builtin4.MethodsPaych.Settle,
+ }, nil
+}
+
+func (m message4) Collect(paych address.Address) (*types.Message, error) {
+ return &types.Message{
+ To: paych,
+ From: m.from,
+ Value: abi.NewTokenAmount(0),
+ Method: builtin4.MethodsPaych.Collect,
+ }, nil
+}
diff --git a/chain/actors/builtin/paych/message5.go b/chain/actors/builtin/paych/message5.go
new file mode 100644
index 00000000000..37a2b6f04af
--- /dev/null
+++ b/chain/actors/builtin/paych/message5.go
@@ -0,0 +1,74 @@
+package paych
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+
+ builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
+ init5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/init"
+ paych5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/paych"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+type message5 struct{ from address.Address }
+
+func (m message5) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) {
+ params, aerr := actors.SerializeParams(&paych5.ConstructorParams{From: m.from, To: to})
+ if aerr != nil {
+ return nil, aerr
+ }
+ enc, aerr := actors.SerializeParams(&init5.ExecParams{
+ CodeCID: builtin5.PaymentChannelActorCodeID,
+ ConstructorParams: params,
+ })
+ if aerr != nil {
+ return nil, aerr
+ }
+
+ return &types.Message{
+ To: init_.Address,
+ From: m.from,
+ Value: initialAmount,
+ Method: builtin5.MethodsInit.Exec,
+ Params: enc,
+ }, nil
+}
+
+func (m message5) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) {
+ params, aerr := actors.SerializeParams(&paych5.UpdateChannelStateParams{
+ Sv: *sv,
+ Secret: secret,
+ })
+ if aerr != nil {
+ return nil, aerr
+ }
+
+ return &types.Message{
+ To: paych,
+ From: m.from,
+ Value: abi.NewTokenAmount(0),
+ Method: builtin5.MethodsPaych.UpdateChannelState,
+ Params: params,
+ }, nil
+}
+
+func (m message5) Settle(paych address.Address) (*types.Message, error) {
+ return &types.Message{
+ To: paych,
+ From: m.from,
+ Value: abi.NewTokenAmount(0),
+ Method: builtin5.MethodsPaych.Settle,
+ }, nil
+}
+
+func (m message5) Collect(paych address.Address) (*types.Message, error) {
+ return &types.Message{
+ To: paych,
+ From: m.from,
+ Value: abi.NewTokenAmount(0),
+ Method: builtin5.MethodsPaych.Collect,
+ }, nil
+}
diff --git a/chain/actors/builtin/paych/mock/mock.go b/chain/actors/builtin/paych/mock/mock.go
index 3b82511ffa0..1ecfa113070 100644
--- a/chain/actors/builtin/paych/mock/mock.go
+++ b/chain/actors/builtin/paych/mock/mock.go
@@ -17,6 +17,10 @@ type mockState struct {
lanes map[uint64]paych.LaneState
}
+func (ms *mockState) GetState() interface{} {
+ panic("implement me")
+}
+
type mockLaneState struct {
redeemed big.Int
nonce uint64
diff --git a/chain/actors/builtin/paych/paych.go b/chain/actors/builtin/paych/paych.go
new file mode 100644
index 00000000000..d87f70f0c2a
--- /dev/null
+++ b/chain/actors/builtin/paych/paych.go
@@ -0,0 +1,203 @@
+package paych
+
+import (
+ "encoding/base64"
+ "fmt"
+
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ big "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/cbor"
+ "github.com/ipfs/go-cid"
+ ipldcbor "github.com/ipfs/go-ipld-cbor"
+
+ paych0 "github.com/filecoin-project/specs-actors/actors/builtin/paych"
+
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+
+ builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+
+ builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
+
+ builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
+
+ builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+func init() {
+
+ builtin.RegisterActorState(builtin0.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load0(store, root)
+ })
+
+ builtin.RegisterActorState(builtin2.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load2(store, root)
+ })
+
+ builtin.RegisterActorState(builtin3.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load3(store, root)
+ })
+
+ builtin.RegisterActorState(builtin4.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load4(store, root)
+ })
+
+ builtin.RegisterActorState(builtin5.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load5(store, root)
+ })
+}
+
+// Load returns an abstract copy of payment channel state, irregardless of actor version
+func Load(store adt.Store, act *types.Actor) (State, error) {
+ switch act.Code {
+
+ case builtin0.PaymentChannelActorCodeID:
+ return load0(store, act.Head)
+
+ case builtin2.PaymentChannelActorCodeID:
+ return load2(store, act.Head)
+
+ case builtin3.PaymentChannelActorCodeID:
+ return load3(store, act.Head)
+
+ case builtin4.PaymentChannelActorCodeID:
+ return load4(store, act.Head)
+
+ case builtin5.PaymentChannelActorCodeID:
+ return load5(store, act.Head)
+
+ }
+ return nil, xerrors.Errorf("unknown actor code %s", act.Code)
+}
+
+func MakeState(store adt.Store, av actors.Version) (State, error) {
+ switch av {
+
+ case actors.Version0:
+ return make0(store)
+
+ case actors.Version2:
+ return make2(store)
+
+ case actors.Version3:
+ return make3(store)
+
+ case actors.Version4:
+ return make4(store)
+
+ case actors.Version5:
+ return make5(store)
+
+ }
+ return nil, xerrors.Errorf("unknown actor version %d", av)
+}
+
+func GetActorCodeID(av actors.Version) (cid.Cid, error) {
+ switch av {
+
+ case actors.Version0:
+ return builtin0.PaymentChannelActorCodeID, nil
+
+ case actors.Version2:
+ return builtin2.PaymentChannelActorCodeID, nil
+
+ case actors.Version3:
+ return builtin3.PaymentChannelActorCodeID, nil
+
+ case actors.Version4:
+ return builtin4.PaymentChannelActorCodeID, nil
+
+ case actors.Version5:
+ return builtin5.PaymentChannelActorCodeID, nil
+
+ }
+
+ return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
+}
+
+// State is an abstract version of payment channel state that works across
+// versions
+type State interface {
+ cbor.Marshaler
+ // Channel owner, who has funded the actor
+ From() (address.Address, error)
+ // Recipient of payouts from channel
+ To() (address.Address, error)
+
+ // Height at which the channel can be `Collected`
+ SettlingAt() (abi.ChainEpoch, error)
+
+ // Amount successfully redeemed through the payment channel, paid out on `Collect()`
+ ToSend() (abi.TokenAmount, error)
+
+ // Get total number of lanes
+ LaneCount() (uint64, error)
+
+ // Iterate lane states
+ ForEachLaneState(cb func(idx uint64, dl LaneState) error) error
+
+ GetState() interface{}
+}
+
+// LaneState is an abstract copy of the state of a single lane
+type LaneState interface {
+ Redeemed() (big.Int, error)
+ Nonce() (uint64, error)
+}
+
+type SignedVoucher = paych0.SignedVoucher
+type ModVerifyParams = paych0.ModVerifyParams
+
+// DecodeSignedVoucher decodes base64 encoded signed voucher.
+func DecodeSignedVoucher(s string) (*SignedVoucher, error) {
+ data, err := base64.RawURLEncoding.DecodeString(s)
+ if err != nil {
+ return nil, err
+ }
+
+ var sv SignedVoucher
+ if err := ipldcbor.DecodeInto(data, &sv); err != nil {
+ return nil, err
+ }
+
+ return &sv, nil
+}
+
+var Methods = builtin5.MethodsPaych
+
+func Message(version actors.Version, from address.Address) MessageBuilder {
+ switch version {
+
+ case actors.Version0:
+ return message0{from}
+
+ case actors.Version2:
+ return message2{from}
+
+ case actors.Version3:
+ return message3{from}
+
+ case actors.Version4:
+ return message4{from}
+
+ case actors.Version5:
+ return message5{from}
+
+ default:
+ panic(fmt.Sprintf("unsupported actors version: %d", version))
+ }
+}
+
+type MessageBuilder interface {
+ Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error)
+ Update(paych address.Address, voucher *SignedVoucher, secret []byte) (*types.Message, error)
+ Settle(paych address.Address) (*types.Message, error)
+ Collect(paych address.Address) (*types.Message, error)
+}
diff --git a/chain/actors/builtin/paych/state.go.template b/chain/actors/builtin/paych/state.go.template
new file mode 100644
index 00000000000..3e41f5be5f6
--- /dev/null
+++ b/chain/actors/builtin/paych/state.go.template
@@ -0,0 +1,114 @@
+package paych
+
+import (
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ paych{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/paych"
+ adt{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/util/adt"
+)
+
+var _ State = (*state{{.v}})(nil)
+
+func load{{.v}}(store adt.Store, root cid.Cid) (State, error) {
+ out := state{{.v}}{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make{{.v}}(store adt.Store) (State, error) {
+ out := state{{.v}}{store: store}
+ out.State = paych{{.v}}.State{}
+ return &out, nil
+}
+
+type state{{.v}} struct {
+ paych{{.v}}.State
+ store adt.Store
+ lsAmt *adt{{.v}}.Array
+}
+
+// Channel owner, who has funded the actor
+func (s *state{{.v}}) From() (address.Address, error) {
+ return s.State.From, nil
+}
+
+// Recipient of payouts from channel
+func (s *state{{.v}}) To() (address.Address, error) {
+ return s.State.To, nil
+}
+
+// Height at which the channel can be `Collected`
+func (s *state{{.v}}) SettlingAt() (abi.ChainEpoch, error) {
+ return s.State.SettlingAt, nil
+}
+
+// Amount successfully redeemed through the payment channel, paid out on `Collect()`
+func (s *state{{.v}}) ToSend() (abi.TokenAmount, error) {
+ return s.State.ToSend, nil
+}
+
+func (s *state{{.v}}) getOrLoadLsAmt() (*adt{{.v}}.Array, error) {
+ if s.lsAmt != nil {
+ return s.lsAmt, nil
+ }
+
+ // Get the lane state from the chain
+ lsamt, err := adt{{.v}}.AsArray(s.store, s.State.LaneStates{{if (ge .v 3)}}, paych{{.v}}.LaneStatesAmtBitwidth{{end}})
+ if err != nil {
+ return nil, err
+ }
+
+ s.lsAmt = lsamt
+ return lsamt, nil
+}
+
+// Get total number of lanes
+func (s *state{{.v}}) LaneCount() (uint64, error) {
+ lsamt, err := s.getOrLoadLsAmt()
+ if err != nil {
+ return 0, err
+ }
+ return lsamt.Length(), nil
+}
+
+func (s *state{{.v}}) GetState() interface{} {
+ return &s.State
+}
+
+// Iterate lane states
+func (s *state{{.v}}) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error {
+ // Get the lane state from the chain
+ lsamt, err := s.getOrLoadLsAmt()
+ if err != nil {
+ return err
+ }
+
+ // Note: we use a map instead of an array to store laneStates because the
+ // client sets the lane ID (the index) and potentially they could use a
+ // very large index.
+ var ls paych{{.v}}.LaneState
+ return lsamt.ForEach(&ls, func(i int64) error {
+ return cb(uint64(i), &laneState{{.v}}{ls})
+ })
+}
+
+type laneState{{.v}} struct {
+ paych{{.v}}.LaneState
+}
+
+func (ls *laneState{{.v}}) Redeemed() (big.Int, error) {
+ return ls.LaneState.Redeemed, nil
+}
+
+func (ls *laneState{{.v}}) Nonce() (uint64, error) {
+ return ls.LaneState.Nonce, nil
+}
diff --git a/chain/actors/builtin/paych/state0.go b/chain/actors/builtin/paych/v0.go
similarity index 92%
rename from chain/actors/builtin/paych/state0.go
rename to chain/actors/builtin/paych/v0.go
index 8e0e3434e07..e9bc30e3d18 100644
--- a/chain/actors/builtin/paych/state0.go
+++ b/chain/actors/builtin/paych/v0.go
@@ -24,6 +24,12 @@ func load0(store adt.Store, root cid.Cid) (State, error) {
return &out, nil
}
+func make0(store adt.Store) (State, error) {
+ out := state0{store: store}
+ out.State = paych0.State{}
+ return &out, nil
+}
+
type state0 struct {
paych0.State
store adt.Store
@@ -74,6 +80,10 @@ func (s *state0) LaneCount() (uint64, error) {
return lsamt.Length(), nil
}
+func (s *state0) GetState() interface{} {
+ return &s.State
+}
+
// Iterate lane states
func (s *state0) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error {
// Get the lane state from the chain
diff --git a/chain/actors/builtin/paych/state2.go b/chain/actors/builtin/paych/v2.go
similarity index 92%
rename from chain/actors/builtin/paych/state2.go
rename to chain/actors/builtin/paych/v2.go
index fbf4b9fde3b..400305e2fb0 100644
--- a/chain/actors/builtin/paych/state2.go
+++ b/chain/actors/builtin/paych/v2.go
@@ -24,6 +24,12 @@ func load2(store adt.Store, root cid.Cid) (State, error) {
return &out, nil
}
+func make2(store adt.Store) (State, error) {
+ out := state2{store: store}
+ out.State = paych2.State{}
+ return &out, nil
+}
+
type state2 struct {
paych2.State
store adt.Store
@@ -74,6 +80,10 @@ func (s *state2) LaneCount() (uint64, error) {
return lsamt.Length(), nil
}
+func (s *state2) GetState() interface{} {
+ return &s.State
+}
+
// Iterate lane states
func (s *state2) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error {
// Get the lane state from the chain
diff --git a/chain/actors/builtin/paych/v3.go b/chain/actors/builtin/paych/v3.go
new file mode 100644
index 00000000000..1d7c2f94b06
--- /dev/null
+++ b/chain/actors/builtin/paych/v3.go
@@ -0,0 +1,114 @@
+package paych
+
+import (
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ paych3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/paych"
+ adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt"
+)
+
+var _ State = (*state3)(nil)
+
+func load3(store adt.Store, root cid.Cid) (State, error) {
+ out := state3{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make3(store adt.Store) (State, error) {
+ out := state3{store: store}
+ out.State = paych3.State{}
+ return &out, nil
+}
+
+type state3 struct {
+ paych3.State
+ store adt.Store
+ lsAmt *adt3.Array
+}
+
+// Channel owner, who has funded the actor
+func (s *state3) From() (address.Address, error) {
+ return s.State.From, nil
+}
+
+// Recipient of payouts from channel
+func (s *state3) To() (address.Address, error) {
+ return s.State.To, nil
+}
+
+// Height at which the channel can be `Collected`
+func (s *state3) SettlingAt() (abi.ChainEpoch, error) {
+ return s.State.SettlingAt, nil
+}
+
+// Amount successfully redeemed through the payment channel, paid out on `Collect()`
+func (s *state3) ToSend() (abi.TokenAmount, error) {
+ return s.State.ToSend, nil
+}
+
+func (s *state3) getOrLoadLsAmt() (*adt3.Array, error) {
+ if s.lsAmt != nil {
+ return s.lsAmt, nil
+ }
+
+ // Get the lane state from the chain
+ lsamt, err := adt3.AsArray(s.store, s.State.LaneStates, paych3.LaneStatesAmtBitwidth)
+ if err != nil {
+ return nil, err
+ }
+
+ s.lsAmt = lsamt
+ return lsamt, nil
+}
+
+// Get total number of lanes
+func (s *state3) LaneCount() (uint64, error) {
+ lsamt, err := s.getOrLoadLsAmt()
+ if err != nil {
+ return 0, err
+ }
+ return lsamt.Length(), nil
+}
+
+func (s *state3) GetState() interface{} {
+ return &s.State
+}
+
+// Iterate lane states
+func (s *state3) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error {
+ // Get the lane state from the chain
+ lsamt, err := s.getOrLoadLsAmt()
+ if err != nil {
+ return err
+ }
+
+ // Note: we use a map instead of an array to store laneStates because the
+ // client sets the lane ID (the index) and potentially they could use a
+ // very large index.
+ var ls paych3.LaneState
+ return lsamt.ForEach(&ls, func(i int64) error {
+ return cb(uint64(i), &laneState3{ls})
+ })
+}
+
+type laneState3 struct {
+ paych3.LaneState
+}
+
+func (ls *laneState3) Redeemed() (big.Int, error) {
+ return ls.LaneState.Redeemed, nil
+}
+
+func (ls *laneState3) Nonce() (uint64, error) {
+ return ls.LaneState.Nonce, nil
+}
diff --git a/chain/actors/builtin/paych/v4.go b/chain/actors/builtin/paych/v4.go
new file mode 100644
index 00000000000..b7d1e52a5b8
--- /dev/null
+++ b/chain/actors/builtin/paych/v4.go
@@ -0,0 +1,114 @@
+package paych
+
+import (
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ paych4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/paych"
+ adt4 "github.com/filecoin-project/specs-actors/v4/actors/util/adt"
+)
+
+var _ State = (*state4)(nil)
+
+func load4(store adt.Store, root cid.Cid) (State, error) {
+ out := state4{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make4(store adt.Store) (State, error) {
+ out := state4{store: store}
+ out.State = paych4.State{}
+ return &out, nil
+}
+
+type state4 struct {
+ paych4.State
+ store adt.Store
+ lsAmt *adt4.Array
+}
+
+// Channel owner, who has funded the actor
+func (s *state4) From() (address.Address, error) {
+ return s.State.From, nil
+}
+
+// Recipient of payouts from channel
+func (s *state4) To() (address.Address, error) {
+ return s.State.To, nil
+}
+
+// Height at which the channel can be `Collected`
+func (s *state4) SettlingAt() (abi.ChainEpoch, error) {
+ return s.State.SettlingAt, nil
+}
+
+// Amount successfully redeemed through the payment channel, paid out on `Collect()`
+func (s *state4) ToSend() (abi.TokenAmount, error) {
+ return s.State.ToSend, nil
+}
+
+func (s *state4) getOrLoadLsAmt() (*adt4.Array, error) {
+ if s.lsAmt != nil {
+ return s.lsAmt, nil
+ }
+
+ // Get the lane state from the chain
+ lsamt, err := adt4.AsArray(s.store, s.State.LaneStates, paych4.LaneStatesAmtBitwidth)
+ if err != nil {
+ return nil, err
+ }
+
+ s.lsAmt = lsamt
+ return lsamt, nil
+}
+
+// Get total number of lanes
+func (s *state4) LaneCount() (uint64, error) {
+ lsamt, err := s.getOrLoadLsAmt()
+ if err != nil {
+ return 0, err
+ }
+ return lsamt.Length(), nil
+}
+
+func (s *state4) GetState() interface{} {
+ return &s.State
+}
+
+// Iterate lane states
+func (s *state4) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error {
+ // Get the lane state from the chain
+ lsamt, err := s.getOrLoadLsAmt()
+ if err != nil {
+ return err
+ }
+
+ // Note: we use a map instead of an array to store laneStates because the
+ // client sets the lane ID (the index) and potentially they could use a
+ // very large index.
+ var ls paych4.LaneState
+ return lsamt.ForEach(&ls, func(i int64) error {
+ return cb(uint64(i), &laneState4{ls})
+ })
+}
+
+type laneState4 struct {
+ paych4.LaneState
+}
+
+func (ls *laneState4) Redeemed() (big.Int, error) {
+ return ls.LaneState.Redeemed, nil
+}
+
+func (ls *laneState4) Nonce() (uint64, error) {
+ return ls.LaneState.Nonce, nil
+}
diff --git a/chain/actors/builtin/paych/v5.go b/chain/actors/builtin/paych/v5.go
new file mode 100644
index 00000000000..b331a1500bf
--- /dev/null
+++ b/chain/actors/builtin/paych/v5.go
@@ -0,0 +1,114 @@
+package paych
+
+import (
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ paych5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/paych"
+ adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt"
+)
+
+var _ State = (*state5)(nil)
+
+func load5(store adt.Store, root cid.Cid) (State, error) {
+ out := state5{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make5(store adt.Store) (State, error) {
+ out := state5{store: store}
+ out.State = paych5.State{}
+ return &out, nil
+}
+
+type state5 struct {
+ paych5.State
+ store adt.Store
+ lsAmt *adt5.Array
+}
+
+// Channel owner, who has funded the actor
+func (s *state5) From() (address.Address, error) {
+ return s.State.From, nil
+}
+
+// Recipient of payouts from channel
+func (s *state5) To() (address.Address, error) {
+ return s.State.To, nil
+}
+
+// Height at which the channel can be `Collected`
+func (s *state5) SettlingAt() (abi.ChainEpoch, error) {
+ return s.State.SettlingAt, nil
+}
+
+// Amount successfully redeemed through the payment channel, paid out on `Collect()`
+func (s *state5) ToSend() (abi.TokenAmount, error) {
+ return s.State.ToSend, nil
+}
+
+func (s *state5) getOrLoadLsAmt() (*adt5.Array, error) {
+ if s.lsAmt != nil {
+ return s.lsAmt, nil
+ }
+
+ // Get the lane state from the chain
+ lsamt, err := adt5.AsArray(s.store, s.State.LaneStates, paych5.LaneStatesAmtBitwidth)
+ if err != nil {
+ return nil, err
+ }
+
+ s.lsAmt = lsamt
+ return lsamt, nil
+}
+
+// Get total number of lanes
+func (s *state5) LaneCount() (uint64, error) {
+ lsamt, err := s.getOrLoadLsAmt()
+ if err != nil {
+ return 0, err
+ }
+ return lsamt.Length(), nil
+}
+
+func (s *state5) GetState() interface{} {
+ return &s.State
+}
+
+// Iterate lane states
+func (s *state5) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error {
+ // Get the lane state from the chain
+ lsamt, err := s.getOrLoadLsAmt()
+ if err != nil {
+ return err
+ }
+
+ // Note: we use a map instead of an array to store laneStates because the
+ // client sets the lane ID (the index) and potentially they could use a
+ // very large index.
+ var ls paych5.LaneState
+ return lsamt.ForEach(&ls, func(i int64) error {
+ return cb(uint64(i), &laneState5{ls})
+ })
+}
+
+type laneState5 struct {
+ paych5.LaneState
+}
+
+func (ls *laneState5) Redeemed() (big.Int, error) {
+ return ls.LaneState.Redeemed, nil
+}
+
+func (ls *laneState5) Nonce() (uint64, error) {
+ return ls.LaneState.Nonce, nil
+}
diff --git a/chain/actors/builtin/power/actor.go.template b/chain/actors/builtin/power/actor.go.template
new file mode 100644
index 00000000000..fe11fc16069
--- /dev/null
+++ b/chain/actors/builtin/power/actor.go.template
@@ -0,0 +1,107 @@
+package power
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/cbor"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/types"
+{{range .versions}}
+ builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin"
+{{end}}
+)
+
+func init() {
+{{range .versions}}
+ builtin.RegisterActorState(builtin{{.}}.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load{{.}}(store, root)
+ })
+{{end}}}
+
+var (
+ Address = builtin{{.latestVersion}}.StoragePowerActorAddr
+ Methods = builtin{{.latestVersion}}.MethodsPower
+)
+
+func Load(store adt.Store, act *types.Actor) (State, error) {
+ switch act.Code {
+{{range .versions}}
+ case builtin{{.}}.StoragePowerActorCodeID:
+ return load{{.}}(store, act.Head)
+{{end}}
+ }
+ return nil, xerrors.Errorf("unknown actor code %s", act.Code)
+}
+
+func MakeState(store adt.Store, av actors.Version) (State, error) {
+ switch av {
+{{range .versions}}
+ case actors.Version{{.}}:
+ return make{{.}}(store)
+{{end}}
+}
+ return nil, xerrors.Errorf("unknown actor version %d", av)
+}
+
+func GetActorCodeID(av actors.Version) (cid.Cid, error) {
+ switch av {
+{{range .versions}}
+ case actors.Version{{.}}:
+ return builtin{{.}}.StoragePowerActorCodeID, nil
+{{end}}
+ }
+
+ return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
+}
+
+type State interface {
+ cbor.Marshaler
+
+ TotalLocked() (abi.TokenAmount, error)
+ TotalPower() (Claim, error)
+ TotalCommitted() (Claim, error)
+ TotalPowerSmoothed() (builtin.FilterEstimate, error)
+ GetState() interface{}
+
+ // MinerCounts returns the number of miners. Participating is the number
+ // with power above the minimum miner threshold.
+ MinerCounts() (participating, total uint64, err error)
+ MinerPower(address.Address) (Claim, bool, error)
+ MinerNominalPowerMeetsConsensusMinimum(address.Address) (bool, error)
+ ListAllMiners() ([]address.Address, error)
+ ForEachClaim(func(miner address.Address, claim Claim) error) error
+ ClaimsChanged(State) (bool, error)
+
+ // Testing or genesis setup only
+ SetTotalQualityAdjPower(abi.StoragePower) error
+ SetTotalRawBytePower(abi.StoragePower) error
+ SetThisEpochQualityAdjPower(abi.StoragePower) error
+ SetThisEpochRawBytePower(abi.StoragePower) error
+
+ // Diff helpers. Used by Diff* functions internally.
+ claims() (adt.Map, error)
+ decodeClaim(*cbg.Deferred) (Claim, error)
+}
+
+type Claim struct {
+ // Sum of raw byte power for a miner's sectors.
+ RawBytePower abi.StoragePower
+
+ // Sum of quality adjusted power for a miner's sectors.
+ QualityAdjPower abi.StoragePower
+}
+
+func AddClaims(a Claim, b Claim) Claim {
+ return Claim{
+ RawBytePower: big.Add(a.RawBytePower, b.RawBytePower),
+ QualityAdjPower: big.Add(a.QualityAdjPower, b.QualityAdjPower),
+ }
+}
diff --git a/chain/actors/builtin/power/diff.go b/chain/actors/builtin/power/diff.go
new file mode 100644
index 00000000000..3daa7056956
--- /dev/null
+++ b/chain/actors/builtin/power/diff.go
@@ -0,0 +1,117 @@
+package power
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ cbg "github.com/whyrusleeping/cbor-gen"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+)
+
+type ClaimChanges struct {
+ Added []ClaimInfo
+ Modified []ClaimModification
+ Removed []ClaimInfo
+}
+
+type ClaimModification struct {
+ Miner address.Address
+ From Claim
+ To Claim
+}
+
+type ClaimInfo struct {
+ Miner address.Address
+ Claim Claim
+}
+
+func DiffClaims(pre, cur State) (*ClaimChanges, error) {
+ results := new(ClaimChanges)
+
+ prec, err := pre.claims()
+ if err != nil {
+ return nil, err
+ }
+
+ curc, err := cur.claims()
+ if err != nil {
+ return nil, err
+ }
+
+ if err := adt.DiffAdtMap(prec, curc, &claimDiffer{results, pre, cur}); err != nil {
+ return nil, err
+ }
+
+ return results, nil
+}
+
+type claimDiffer struct {
+ Results *ClaimChanges
+ pre, after State
+}
+
+func (c *claimDiffer) AsKey(key string) (abi.Keyer, error) {
+ addr, err := address.NewFromBytes([]byte(key))
+ if err != nil {
+ return nil, err
+ }
+ return abi.AddrKey(addr), nil
+}
+
+func (c *claimDiffer) Add(key string, val *cbg.Deferred) error {
+ ci, err := c.after.decodeClaim(val)
+ if err != nil {
+ return err
+ }
+ addr, err := address.NewFromBytes([]byte(key))
+ if err != nil {
+ return err
+ }
+ c.Results.Added = append(c.Results.Added, ClaimInfo{
+ Miner: addr,
+ Claim: ci,
+ })
+ return nil
+}
+
+func (c *claimDiffer) Modify(key string, from, to *cbg.Deferred) error {
+ ciFrom, err := c.pre.decodeClaim(from)
+ if err != nil {
+ return err
+ }
+
+ ciTo, err := c.after.decodeClaim(to)
+ if err != nil {
+ return err
+ }
+
+ addr, err := address.NewFromBytes([]byte(key))
+ if err != nil {
+ return err
+ }
+
+ if ciFrom != ciTo {
+ c.Results.Modified = append(c.Results.Modified, ClaimModification{
+ Miner: addr,
+ From: ciFrom,
+ To: ciTo,
+ })
+ }
+ return nil
+}
+
+func (c *claimDiffer) Remove(key string, val *cbg.Deferred) error {
+ ci, err := c.after.decodeClaim(val)
+ if err != nil {
+ return err
+ }
+ addr, err := address.NewFromBytes([]byte(key))
+ if err != nil {
+ return err
+ }
+ c.Results.Removed = append(c.Results.Removed, ClaimInfo{
+ Miner: addr,
+ Claim: ci,
+ })
+ return nil
+}
diff --git a/chain/actors/builtin/power/power.go b/chain/actors/builtin/power/power.go
index f941ce93e17..5b4aa1b04ff 100644
--- a/chain/actors/builtin/power/power.go
+++ b/chain/actors/builtin/power/power.go
@@ -3,7 +3,9 @@ package power
import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/lotus/chain/actors"
"github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi"
@@ -14,33 +16,111 @@ import (
"github.com/filecoin-project/lotus/chain/types"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+
+ builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
+
+ builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
+
+ builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
)
func init() {
+
builtin.RegisterActorState(builtin0.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load0(store, root)
})
+
builtin.RegisterActorState(builtin2.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load2(store, root)
})
+
+ builtin.RegisterActorState(builtin3.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load3(store, root)
+ })
+
+ builtin.RegisterActorState(builtin4.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load4(store, root)
+ })
+
+ builtin.RegisterActorState(builtin5.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load5(store, root)
+ })
}
var (
- Address = builtin2.StoragePowerActorAddr
- Methods = builtin2.MethodsPower
+ Address = builtin5.StoragePowerActorAddr
+ Methods = builtin5.MethodsPower
)
-func Load(store adt.Store, act *types.Actor) (st State, err error) {
+func Load(store adt.Store, act *types.Actor) (State, error) {
switch act.Code {
+
case builtin0.StoragePowerActorCodeID:
return load0(store, act.Head)
+
case builtin2.StoragePowerActorCodeID:
return load2(store, act.Head)
+
+ case builtin3.StoragePowerActorCodeID:
+ return load3(store, act.Head)
+
+ case builtin4.StoragePowerActorCodeID:
+ return load4(store, act.Head)
+
+ case builtin5.StoragePowerActorCodeID:
+ return load5(store, act.Head)
+
}
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
}
+func MakeState(store adt.Store, av actors.Version) (State, error) {
+ switch av {
+
+ case actors.Version0:
+ return make0(store)
+
+ case actors.Version2:
+ return make2(store)
+
+ case actors.Version3:
+ return make3(store)
+
+ case actors.Version4:
+ return make4(store)
+
+ case actors.Version5:
+ return make5(store)
+
+ }
+ return nil, xerrors.Errorf("unknown actor version %d", av)
+}
+
+func GetActorCodeID(av actors.Version) (cid.Cid, error) {
+ switch av {
+
+ case actors.Version0:
+ return builtin0.StoragePowerActorCodeID, nil
+
+ case actors.Version2:
+ return builtin2.StoragePowerActorCodeID, nil
+
+ case actors.Version3:
+ return builtin3.StoragePowerActorCodeID, nil
+
+ case actors.Version4:
+ return builtin4.StoragePowerActorCodeID, nil
+
+ case actors.Version5:
+ return builtin5.StoragePowerActorCodeID, nil
+
+ }
+
+ return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
+}
+
type State interface {
cbor.Marshaler
@@ -48,6 +128,7 @@ type State interface {
TotalPower() (Claim, error)
TotalCommitted() (Claim, error)
TotalPowerSmoothed() (builtin.FilterEstimate, error)
+ GetState() interface{}
// MinerCounts returns the number of miners. Participating is the number
// with power above the minimum miner threshold.
@@ -56,6 +137,17 @@ type State interface {
MinerNominalPowerMeetsConsensusMinimum(address.Address) (bool, error)
ListAllMiners() ([]address.Address, error)
ForEachClaim(func(miner address.Address, claim Claim) error) error
+ ClaimsChanged(State) (bool, error)
+
+ // Testing or genesis setup only
+ SetTotalQualityAdjPower(abi.StoragePower) error
+ SetTotalRawBytePower(abi.StoragePower) error
+ SetThisEpochQualityAdjPower(abi.StoragePower) error
+ SetThisEpochRawBytePower(abi.StoragePower) error
+
+ // Diff helpers. Used by Diff* functions internally.
+ claims() (adt.Map, error)
+ decodeClaim(*cbg.Deferred) (Claim, error)
}
type Claim struct {
diff --git a/chain/actors/builtin/power/state.go.template b/chain/actors/builtin/power/state.go.template
new file mode 100644
index 00000000000..fcdc5c35046
--- /dev/null
+++ b/chain/actors/builtin/power/state.go.template
@@ -0,0 +1,201 @@
+package power
+
+import (
+ "bytes"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+
+{{if (ge .v 3)}}
+ builtin{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin"
+{{end}}
+ power{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/power"
+ adt{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/util/adt"
+)
+
+var _ State = (*state{{.v}})(nil)
+
+func load{{.v}}(store adt.Store, root cid.Cid) (State, error) {
+ out := state{{.v}}{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make{{.v}}(store adt.Store) (State, error) {
+ out := state{{.v}}{store: store}
+ {{if (le .v 2)}}
+ em, err := adt{{.v}}.MakeEmptyMap(store).Root()
+ if err != nil {
+ return nil, err
+ }
+
+ emm, err := adt{{.v}}.MakeEmptyMultimap(store).Root()
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *power{{.v}}.ConstructState(em, emm)
+ {{else}}
+ s, err := power{{.v}}.ConstructState(store)
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *s
+ {{end}}
+
+ return &out, nil
+}
+
+type state{{.v}} struct {
+ power{{.v}}.State
+ store adt.Store
+}
+
+func (s *state{{.v}}) TotalLocked() (abi.TokenAmount, error) {
+ return s.TotalPledgeCollateral, nil
+}
+
+func (s *state{{.v}}) TotalPower() (Claim, error) {
+ return Claim{
+ RawBytePower: s.TotalRawBytePower,
+ QualityAdjPower: s.TotalQualityAdjPower,
+ }, nil
+}
+
+// Committed power to the network. Includes miners below the minimum threshold.
+func (s *state{{.v}}) TotalCommitted() (Claim, error) {
+ return Claim{
+ RawBytePower: s.TotalBytesCommitted,
+ QualityAdjPower: s.TotalQABytesCommitted,
+ }, nil
+}
+
+func (s *state{{.v}}) MinerPower(addr address.Address) (Claim, bool, error) {
+ claims, err := s.claims()
+ if err != nil {
+ return Claim{}, false, err
+ }
+ var claim power{{.v}}.Claim
+ ok, err := claims.Get(abi.AddrKey(addr), &claim)
+ if err != nil {
+ return Claim{}, false, err
+ }
+ return Claim{
+ RawBytePower: claim.RawBytePower,
+ QualityAdjPower: claim.QualityAdjPower,
+ }, ok, nil
+}
+
+func (s *state{{.v}}) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) {
+ return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a)
+}
+
+func (s *state{{.v}}) TotalPowerSmoothed() (builtin.FilterEstimate, error) {
+ return builtin.FromV{{.v}}FilterEstimate({{if (le .v 1)}}*{{end}}s.State.ThisEpochQAPowerSmoothed), nil
+}
+
+func (s *state{{.v}}) MinerCounts() (uint64, uint64, error) {
+ return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil
+}
+
+func (s *state{{.v}}) ListAllMiners() ([]address.Address, error) {
+ claims, err := s.claims()
+ if err != nil {
+ return nil, err
+ }
+
+ var miners []address.Address
+ err = claims.ForEach(nil, func(k string) error {
+ a, err := address.NewFromBytes([]byte(k))
+ if err != nil {
+ return err
+ }
+ miners = append(miners, a)
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return miners, nil
+}
+
+func (s *state{{.v}}) ForEachClaim(cb func(miner address.Address, claim Claim) error) error {
+ claims, err := s.claims()
+ if err != nil {
+ return err
+ }
+
+ var claim power{{.v}}.Claim
+ return claims.ForEach(&claim, func(k string) error {
+ a, err := address.NewFromBytes([]byte(k))
+ if err != nil {
+ return err
+ }
+ return cb(a, Claim{
+ RawBytePower: claim.RawBytePower,
+ QualityAdjPower: claim.QualityAdjPower,
+ })
+ })
+}
+
+func (s *state{{.v}}) ClaimsChanged(other State) (bool, error) {
+ other{{.v}}, ok := other.(*state{{.v}})
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+ return !s.State.Claims.Equals(other{{.v}}.State.Claims), nil
+}
+
+func (s *state{{.v}}) SetTotalQualityAdjPower(p abi.StoragePower) error {
+ s.State.TotalQualityAdjPower = p
+ return nil
+}
+
+func (s *state{{.v}}) SetTotalRawBytePower(p abi.StoragePower) error {
+ s.State.TotalRawBytePower = p
+ return nil
+}
+
+func (s *state{{.v}}) SetThisEpochQualityAdjPower(p abi.StoragePower) error {
+ s.State.ThisEpochQualityAdjPower = p
+ return nil
+}
+
+func (s *state{{.v}}) SetThisEpochRawBytePower(p abi.StoragePower) error {
+ s.State.ThisEpochRawBytePower = p
+ return nil
+}
+
+func (s *state{{.v}}) GetState() interface{} {
+ return &s.State
+}
+
+func (s *state{{.v}}) claims() (adt.Map, error) {
+ return adt{{.v}}.AsMap(s.store, s.Claims{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}})
+}
+
+func (s *state{{.v}}) decodeClaim(val *cbg.Deferred) (Claim, error) {
+ var ci power{{.v}}.Claim
+ if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ return Claim{}, err
+ }
+ return fromV{{.v}}Claim(ci), nil
+}
+
+func fromV{{.v}}Claim(v{{.v}} power{{.v}}.Claim) Claim {
+ return Claim{
+ RawBytePower: v{{.v}}.RawBytePower,
+ QualityAdjPower: v{{.v}}.QualityAdjPower,
+ }
+}
diff --git a/chain/actors/builtin/power/v0.go b/chain/actors/builtin/power/v0.go
index 3f9a657777f..465d16c5c35 100644
--- a/chain/actors/builtin/power/v0.go
+++ b/chain/actors/builtin/power/v0.go
@@ -1,9 +1,12 @@
package power
import (
+ "bytes"
+
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin"
@@ -23,6 +26,24 @@ func load0(store adt.Store, root cid.Cid) (State, error) {
return &out, nil
}
+func make0(store adt.Store) (State, error) {
+ out := state0{store: store}
+
+ em, err := adt0.MakeEmptyMap(store).Root()
+ if err != nil {
+ return nil, err
+ }
+
+ emm, err := adt0.MakeEmptyMultimap(store).Root()
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *power0.ConstructState(em, emm)
+
+ return &out, nil
+}
+
type state0 struct {
power0.State
store adt.Store
@@ -48,7 +69,7 @@ func (s *state0) TotalCommitted() (Claim, error) {
}
func (s *state0) MinerPower(addr address.Address) (Claim, bool, error) {
- claims, err := adt0.AsMap(s.store, s.Claims)
+ claims, err := s.claims()
if err != nil {
return Claim{}, false, err
}
@@ -76,7 +97,7 @@ func (s *state0) MinerCounts() (uint64, uint64, error) {
}
func (s *state0) ListAllMiners() ([]address.Address, error) {
- claims, err := adt0.AsMap(s.store, s.Claims)
+ claims, err := s.claims()
if err != nil {
return nil, err
}
@@ -98,7 +119,7 @@ func (s *state0) ListAllMiners() ([]address.Address, error) {
}
func (s *state0) ForEachClaim(cb func(miner address.Address, claim Claim) error) error {
- claims, err := adt0.AsMap(s.store, s.Claims)
+ claims, err := s.claims()
if err != nil {
return err
}
@@ -115,3 +136,55 @@ func (s *state0) ForEachClaim(cb func(miner address.Address, claim Claim) error)
})
})
}
+
+func (s *state0) ClaimsChanged(other State) (bool, error) {
+ other0, ok := other.(*state0)
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+ return !s.State.Claims.Equals(other0.State.Claims), nil
+}
+
+func (s *state0) SetTotalQualityAdjPower(p abi.StoragePower) error {
+ s.State.TotalQualityAdjPower = p
+ return nil
+}
+
+func (s *state0) SetTotalRawBytePower(p abi.StoragePower) error {
+ s.State.TotalRawBytePower = p
+ return nil
+}
+
+func (s *state0) SetThisEpochQualityAdjPower(p abi.StoragePower) error {
+ s.State.ThisEpochQualityAdjPower = p
+ return nil
+}
+
+func (s *state0) SetThisEpochRawBytePower(p abi.StoragePower) error {
+ s.State.ThisEpochRawBytePower = p
+ return nil
+}
+
+func (s *state0) GetState() interface{} {
+ return &s.State
+}
+
+func (s *state0) claims() (adt.Map, error) {
+ return adt0.AsMap(s.store, s.Claims)
+}
+
+func (s *state0) decodeClaim(val *cbg.Deferred) (Claim, error) {
+ var ci power0.Claim
+ if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ return Claim{}, err
+ }
+ return fromV0Claim(ci), nil
+}
+
+func fromV0Claim(v0 power0.Claim) Claim {
+ return Claim{
+ RawBytePower: v0.RawBytePower,
+ QualityAdjPower: v0.QualityAdjPower,
+ }
+}
diff --git a/chain/actors/builtin/power/v2.go b/chain/actors/builtin/power/v2.go
index 0c15f066928..606534cef26 100644
--- a/chain/actors/builtin/power/v2.go
+++ b/chain/actors/builtin/power/v2.go
@@ -1,9 +1,12 @@
package power
import (
+ "bytes"
+
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin"
@@ -23,6 +26,24 @@ func load2(store adt.Store, root cid.Cid) (State, error) {
return &out, nil
}
+func make2(store adt.Store) (State, error) {
+ out := state2{store: store}
+
+ em, err := adt2.MakeEmptyMap(store).Root()
+ if err != nil {
+ return nil, err
+ }
+
+ emm, err := adt2.MakeEmptyMultimap(store).Root()
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *power2.ConstructState(em, emm)
+
+ return &out, nil
+}
+
type state2 struct {
power2.State
store adt.Store
@@ -48,7 +69,7 @@ func (s *state2) TotalCommitted() (Claim, error) {
}
func (s *state2) MinerPower(addr address.Address) (Claim, bool, error) {
- claims, err := adt2.AsMap(s.store, s.Claims)
+ claims, err := s.claims()
if err != nil {
return Claim{}, false, err
}
@@ -76,7 +97,7 @@ func (s *state2) MinerCounts() (uint64, uint64, error) {
}
func (s *state2) ListAllMiners() ([]address.Address, error) {
- claims, err := adt2.AsMap(s.store, s.Claims)
+ claims, err := s.claims()
if err != nil {
return nil, err
}
@@ -98,7 +119,7 @@ func (s *state2) ListAllMiners() ([]address.Address, error) {
}
func (s *state2) ForEachClaim(cb func(miner address.Address, claim Claim) error) error {
- claims, err := adt2.AsMap(s.store, s.Claims)
+ claims, err := s.claims()
if err != nil {
return err
}
@@ -115,3 +136,55 @@ func (s *state2) ForEachClaim(cb func(miner address.Address, claim Claim) error)
})
})
}
+
+func (s *state2) ClaimsChanged(other State) (bool, error) {
+ other2, ok := other.(*state2)
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+ return !s.State.Claims.Equals(other2.State.Claims), nil
+}
+
+func (s *state2) SetTotalQualityAdjPower(p abi.StoragePower) error {
+ s.State.TotalQualityAdjPower = p
+ return nil
+}
+
+func (s *state2) SetTotalRawBytePower(p abi.StoragePower) error {
+ s.State.TotalRawBytePower = p
+ return nil
+}
+
+func (s *state2) SetThisEpochQualityAdjPower(p abi.StoragePower) error {
+ s.State.ThisEpochQualityAdjPower = p
+ return nil
+}
+
+func (s *state2) SetThisEpochRawBytePower(p abi.StoragePower) error {
+ s.State.ThisEpochRawBytePower = p
+ return nil
+}
+
+func (s *state2) GetState() interface{} {
+ return &s.State
+}
+
+func (s *state2) claims() (adt.Map, error) {
+ return adt2.AsMap(s.store, s.Claims)
+}
+
+func (s *state2) decodeClaim(val *cbg.Deferred) (Claim, error) {
+ var ci power2.Claim
+ if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ return Claim{}, err
+ }
+ return fromV2Claim(ci), nil
+}
+
+func fromV2Claim(v2 power2.Claim) Claim {
+ return Claim{
+ RawBytePower: v2.RawBytePower,
+ QualityAdjPower: v2.QualityAdjPower,
+ }
+}
diff --git a/chain/actors/builtin/power/v3.go b/chain/actors/builtin/power/v3.go
new file mode 100644
index 00000000000..3dec3c63ef6
--- /dev/null
+++ b/chain/actors/builtin/power/v3.go
@@ -0,0 +1,187 @@
+package power
+
+import (
+ "bytes"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+
+ builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
+
+ power3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/power"
+ adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt"
+)
+
+var _ State = (*state3)(nil)
+
+func load3(store adt.Store, root cid.Cid) (State, error) {
+ out := state3{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make3(store adt.Store) (State, error) {
+ out := state3{store: store}
+
+ s, err := power3.ConstructState(store)
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *s
+
+ return &out, nil
+}
+
+type state3 struct {
+ power3.State
+ store adt.Store
+}
+
+func (s *state3) TotalLocked() (abi.TokenAmount, error) {
+ return s.TotalPledgeCollateral, nil
+}
+
+func (s *state3) TotalPower() (Claim, error) {
+ return Claim{
+ RawBytePower: s.TotalRawBytePower,
+ QualityAdjPower: s.TotalQualityAdjPower,
+ }, nil
+}
+
+// Committed power to the network. Includes miners below the minimum threshold.
+func (s *state3) TotalCommitted() (Claim, error) {
+ return Claim{
+ RawBytePower: s.TotalBytesCommitted,
+ QualityAdjPower: s.TotalQABytesCommitted,
+ }, nil
+}
+
+func (s *state3) MinerPower(addr address.Address) (Claim, bool, error) {
+ claims, err := s.claims()
+ if err != nil {
+ return Claim{}, false, err
+ }
+ var claim power3.Claim
+ ok, err := claims.Get(abi.AddrKey(addr), &claim)
+ if err != nil {
+ return Claim{}, false, err
+ }
+ return Claim{
+ RawBytePower: claim.RawBytePower,
+ QualityAdjPower: claim.QualityAdjPower,
+ }, ok, nil
+}
+
+func (s *state3) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) {
+ return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a)
+}
+
+func (s *state3) TotalPowerSmoothed() (builtin.FilterEstimate, error) {
+ return builtin.FromV3FilterEstimate(s.State.ThisEpochQAPowerSmoothed), nil
+}
+
+func (s *state3) MinerCounts() (uint64, uint64, error) {
+ return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil
+}
+
+func (s *state3) ListAllMiners() ([]address.Address, error) {
+ claims, err := s.claims()
+ if err != nil {
+ return nil, err
+ }
+
+ var miners []address.Address
+ err = claims.ForEach(nil, func(k string) error {
+ a, err := address.NewFromBytes([]byte(k))
+ if err != nil {
+ return err
+ }
+ miners = append(miners, a)
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return miners, nil
+}
+
+func (s *state3) ForEachClaim(cb func(miner address.Address, claim Claim) error) error {
+ claims, err := s.claims()
+ if err != nil {
+ return err
+ }
+
+ var claim power3.Claim
+ return claims.ForEach(&claim, func(k string) error {
+ a, err := address.NewFromBytes([]byte(k))
+ if err != nil {
+ return err
+ }
+ return cb(a, Claim{
+ RawBytePower: claim.RawBytePower,
+ QualityAdjPower: claim.QualityAdjPower,
+ })
+ })
+}
+
+func (s *state3) ClaimsChanged(other State) (bool, error) {
+ other3, ok := other.(*state3)
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+ return !s.State.Claims.Equals(other3.State.Claims), nil
+}
+
+func (s *state3) SetTotalQualityAdjPower(p abi.StoragePower) error {
+ s.State.TotalQualityAdjPower = p
+ return nil
+}
+
+func (s *state3) SetTotalRawBytePower(p abi.StoragePower) error {
+ s.State.TotalRawBytePower = p
+ return nil
+}
+
+func (s *state3) SetThisEpochQualityAdjPower(p abi.StoragePower) error {
+ s.State.ThisEpochQualityAdjPower = p
+ return nil
+}
+
+func (s *state3) SetThisEpochRawBytePower(p abi.StoragePower) error {
+ s.State.ThisEpochRawBytePower = p
+ return nil
+}
+
+func (s *state3) GetState() interface{} {
+ return &s.State
+}
+
+func (s *state3) claims() (adt.Map, error) {
+ return adt3.AsMap(s.store, s.Claims, builtin3.DefaultHamtBitwidth)
+}
+
+func (s *state3) decodeClaim(val *cbg.Deferred) (Claim, error) {
+ var ci power3.Claim
+ if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ return Claim{}, err
+ }
+ return fromV3Claim(ci), nil
+}
+
+func fromV3Claim(v3 power3.Claim) Claim {
+ return Claim{
+ RawBytePower: v3.RawBytePower,
+ QualityAdjPower: v3.QualityAdjPower,
+ }
+}
diff --git a/chain/actors/builtin/power/v4.go b/chain/actors/builtin/power/v4.go
new file mode 100644
index 00000000000..b73eedf5a82
--- /dev/null
+++ b/chain/actors/builtin/power/v4.go
@@ -0,0 +1,187 @@
+package power
+
+import (
+ "bytes"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+
+ builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
+
+ power4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/power"
+ adt4 "github.com/filecoin-project/specs-actors/v4/actors/util/adt"
+)
+
+var _ State = (*state4)(nil)
+
+func load4(store adt.Store, root cid.Cid) (State, error) {
+ out := state4{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make4(store adt.Store) (State, error) {
+ out := state4{store: store}
+
+ s, err := power4.ConstructState(store)
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *s
+
+ return &out, nil
+}
+
+type state4 struct {
+ power4.State
+ store adt.Store
+}
+
+func (s *state4) TotalLocked() (abi.TokenAmount, error) {
+ return s.TotalPledgeCollateral, nil
+}
+
+func (s *state4) TotalPower() (Claim, error) {
+ return Claim{
+ RawBytePower: s.TotalRawBytePower,
+ QualityAdjPower: s.TotalQualityAdjPower,
+ }, nil
+}
+
+// Committed power to the network. Includes miners below the minimum threshold.
+func (s *state4) TotalCommitted() (Claim, error) {
+ return Claim{
+ RawBytePower: s.TotalBytesCommitted,
+ QualityAdjPower: s.TotalQABytesCommitted,
+ }, nil
+}
+
+func (s *state4) MinerPower(addr address.Address) (Claim, bool, error) {
+ claims, err := s.claims()
+ if err != nil {
+ return Claim{}, false, err
+ }
+ var claim power4.Claim
+ ok, err := claims.Get(abi.AddrKey(addr), &claim)
+ if err != nil {
+ return Claim{}, false, err
+ }
+ return Claim{
+ RawBytePower: claim.RawBytePower,
+ QualityAdjPower: claim.QualityAdjPower,
+ }, ok, nil
+}
+
+func (s *state4) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) {
+ return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a)
+}
+
+func (s *state4) TotalPowerSmoothed() (builtin.FilterEstimate, error) {
+ return builtin.FromV4FilterEstimate(s.State.ThisEpochQAPowerSmoothed), nil
+}
+
+func (s *state4) MinerCounts() (uint64, uint64, error) {
+ return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil
+}
+
+func (s *state4) ListAllMiners() ([]address.Address, error) {
+ claims, err := s.claims()
+ if err != nil {
+ return nil, err
+ }
+
+ var miners []address.Address
+ err = claims.ForEach(nil, func(k string) error {
+ a, err := address.NewFromBytes([]byte(k))
+ if err != nil {
+ return err
+ }
+ miners = append(miners, a)
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return miners, nil
+}
+
+func (s *state4) ForEachClaim(cb func(miner address.Address, claim Claim) error) error {
+ claims, err := s.claims()
+ if err != nil {
+ return err
+ }
+
+ var claim power4.Claim
+ return claims.ForEach(&claim, func(k string) error {
+ a, err := address.NewFromBytes([]byte(k))
+ if err != nil {
+ return err
+ }
+ return cb(a, Claim{
+ RawBytePower: claim.RawBytePower,
+ QualityAdjPower: claim.QualityAdjPower,
+ })
+ })
+}
+
+func (s *state4) ClaimsChanged(other State) (bool, error) {
+ other4, ok := other.(*state4)
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+ return !s.State.Claims.Equals(other4.State.Claims), nil
+}
+
+func (s *state4) SetTotalQualityAdjPower(p abi.StoragePower) error {
+ s.State.TotalQualityAdjPower = p
+ return nil
+}
+
+func (s *state4) SetTotalRawBytePower(p abi.StoragePower) error {
+ s.State.TotalRawBytePower = p
+ return nil
+}
+
+func (s *state4) SetThisEpochQualityAdjPower(p abi.StoragePower) error {
+ s.State.ThisEpochQualityAdjPower = p
+ return nil
+}
+
+func (s *state4) SetThisEpochRawBytePower(p abi.StoragePower) error {
+ s.State.ThisEpochRawBytePower = p
+ return nil
+}
+
+func (s *state4) GetState() interface{} {
+ return &s.State
+}
+
+func (s *state4) claims() (adt.Map, error) {
+ return adt4.AsMap(s.store, s.Claims, builtin4.DefaultHamtBitwidth)
+}
+
+func (s *state4) decodeClaim(val *cbg.Deferred) (Claim, error) {
+ var ci power4.Claim
+ if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ return Claim{}, err
+ }
+ return fromV4Claim(ci), nil
+}
+
+func fromV4Claim(v4 power4.Claim) Claim {
+ return Claim{
+ RawBytePower: v4.RawBytePower,
+ QualityAdjPower: v4.QualityAdjPower,
+ }
+}
diff --git a/chain/actors/builtin/power/v5.go b/chain/actors/builtin/power/v5.go
new file mode 100644
index 00000000000..84b23a5777f
--- /dev/null
+++ b/chain/actors/builtin/power/v5.go
@@ -0,0 +1,187 @@
+package power
+
+import (
+ "bytes"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+
+ builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
+
+ power5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/power"
+ adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt"
+)
+
+var _ State = (*state5)(nil)
+
+func load5(store adt.Store, root cid.Cid) (State, error) {
+ out := state5{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make5(store adt.Store) (State, error) {
+ out := state5{store: store}
+
+ s, err := power5.ConstructState(store)
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *s
+
+ return &out, nil
+}
+
+type state5 struct {
+ power5.State
+ store adt.Store
+}
+
+func (s *state5) TotalLocked() (abi.TokenAmount, error) {
+ return s.TotalPledgeCollateral, nil
+}
+
+func (s *state5) TotalPower() (Claim, error) {
+ return Claim{
+ RawBytePower: s.TotalRawBytePower,
+ QualityAdjPower: s.TotalQualityAdjPower,
+ }, nil
+}
+
+// Committed power to the network. Includes miners below the minimum threshold.
+func (s *state5) TotalCommitted() (Claim, error) {
+ return Claim{
+ RawBytePower: s.TotalBytesCommitted,
+ QualityAdjPower: s.TotalQABytesCommitted,
+ }, nil
+}
+
+func (s *state5) MinerPower(addr address.Address) (Claim, bool, error) {
+ claims, err := s.claims()
+ if err != nil {
+ return Claim{}, false, err
+ }
+ var claim power5.Claim
+ ok, err := claims.Get(abi.AddrKey(addr), &claim)
+ if err != nil {
+ return Claim{}, false, err
+ }
+ return Claim{
+ RawBytePower: claim.RawBytePower,
+ QualityAdjPower: claim.QualityAdjPower,
+ }, ok, nil
+}
+
+func (s *state5) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) {
+ return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a)
+}
+
+func (s *state5) TotalPowerSmoothed() (builtin.FilterEstimate, error) {
+ return builtin.FromV5FilterEstimate(s.State.ThisEpochQAPowerSmoothed), nil
+}
+
+func (s *state5) MinerCounts() (uint64, uint64, error) {
+ return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil
+}
+
+func (s *state5) ListAllMiners() ([]address.Address, error) {
+ claims, err := s.claims()
+ if err != nil {
+ return nil, err
+ }
+
+ var miners []address.Address
+ err = claims.ForEach(nil, func(k string) error {
+ a, err := address.NewFromBytes([]byte(k))
+ if err != nil {
+ return err
+ }
+ miners = append(miners, a)
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return miners, nil
+}
+
+func (s *state5) ForEachClaim(cb func(miner address.Address, claim Claim) error) error {
+ claims, err := s.claims()
+ if err != nil {
+ return err
+ }
+
+ var claim power5.Claim
+ return claims.ForEach(&claim, func(k string) error {
+ a, err := address.NewFromBytes([]byte(k))
+ if err != nil {
+ return err
+ }
+ return cb(a, Claim{
+ RawBytePower: claim.RawBytePower,
+ QualityAdjPower: claim.QualityAdjPower,
+ })
+ })
+}
+
+func (s *state5) ClaimsChanged(other State) (bool, error) {
+ other5, ok := other.(*state5)
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+ return !s.State.Claims.Equals(other5.State.Claims), nil
+}
+
+func (s *state5) SetTotalQualityAdjPower(p abi.StoragePower) error {
+ s.State.TotalQualityAdjPower = p
+ return nil
+}
+
+func (s *state5) SetTotalRawBytePower(p abi.StoragePower) error {
+ s.State.TotalRawBytePower = p
+ return nil
+}
+
+func (s *state5) SetThisEpochQualityAdjPower(p abi.StoragePower) error {
+ s.State.ThisEpochQualityAdjPower = p
+ return nil
+}
+
+func (s *state5) SetThisEpochRawBytePower(p abi.StoragePower) error {
+ s.State.ThisEpochRawBytePower = p
+ return nil
+}
+
+func (s *state5) GetState() interface{} {
+ return &s.State
+}
+
+func (s *state5) claims() (adt.Map, error) {
+ return adt5.AsMap(s.store, s.Claims, builtin5.DefaultHamtBitwidth)
+}
+
+func (s *state5) decodeClaim(val *cbg.Deferred) (Claim, error) {
+ var ci power5.Claim
+ if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ return Claim{}, err
+ }
+ return fromV5Claim(ci), nil
+}
+
+func fromV5Claim(v5 power5.Claim) Claim {
+ return Claim{
+ RawBytePower: v5.RawBytePower,
+ QualityAdjPower: v5.QualityAdjPower,
+ }
+}
diff --git a/chain/actors/builtin/reward/actor.go.template b/chain/actors/builtin/reward/actor.go.template
new file mode 100644
index 00000000000..89cdddaeceb
--- /dev/null
+++ b/chain/actors/builtin/reward/actor.go.template
@@ -0,0 +1,83 @@
+package reward
+
+import (
+ "github.com/filecoin-project/go-state-types/abi"
+ reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward"
+ "github.com/ipfs/go-cid"
+ "github.com/filecoin-project/lotus/chain/actors"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-state-types/cbor"
+{{range .versions}}
+ builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin"
+{{end}}
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+func init() {
+{{range .versions}}
+ builtin.RegisterActorState(builtin{{.}}.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load{{.}}(store, root)
+ })
+{{end}}}
+
+var (
+ Address = builtin{{.latestVersion}}.RewardActorAddr
+ Methods = builtin{{.latestVersion}}.MethodsReward
+)
+
+func Load(store adt.Store, act *types.Actor) (State, error) {
+ switch act.Code {
+{{range .versions}}
+ case builtin{{.}}.RewardActorCodeID:
+ return load{{.}}(store, act.Head)
+{{end}}
+ }
+ return nil, xerrors.Errorf("unknown actor code %s", act.Code)
+}
+
+func MakeState(store adt.Store, av actors.Version, currRealizedPower abi.StoragePower) (State, error) {
+ switch av {
+{{range .versions}}
+ case actors.Version{{.}}:
+ return make{{.}}(store, currRealizedPower)
+{{end}}
+}
+ return nil, xerrors.Errorf("unknown actor version %d", av)
+}
+
+func GetActorCodeID(av actors.Version) (cid.Cid, error) {
+ switch av {
+{{range .versions}}
+ case actors.Version{{.}}:
+ return builtin{{.}}.RewardActorCodeID, nil
+{{end}}
+ }
+
+ return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
+}
+
+type State interface {
+ cbor.Marshaler
+
+ ThisEpochBaselinePower() (abi.StoragePower, error)
+ ThisEpochReward() (abi.StoragePower, error)
+ ThisEpochRewardSmoothed() (builtin.FilterEstimate, error)
+
+ EffectiveBaselinePower() (abi.StoragePower, error)
+ EffectiveNetworkTime() (abi.ChainEpoch, error)
+
+ TotalStoragePowerReward() (abi.TokenAmount, error)
+
+ CumsumBaseline() (abi.StoragePower, error)
+ CumsumRealized() (abi.StoragePower, error)
+
+ InitialPledgeForPower(abi.StoragePower, abi.TokenAmount, *builtin.FilterEstimate, abi.TokenAmount) (abi.TokenAmount, error)
+ PreCommitDepositForPower(builtin.FilterEstimate, abi.StoragePower) (abi.TokenAmount, error)
+ GetState() interface{}
+}
+
+type AwardBlockRewardParams = reward0.AwardBlockRewardParams
diff --git a/chain/actors/builtin/reward/reward.go b/chain/actors/builtin/reward/reward.go
index 952ca270b9b..ebec85517fb 100644
--- a/chain/actors/builtin/reward/reward.go
+++ b/chain/actors/builtin/reward/reward.go
@@ -2,43 +2,123 @@ package reward
import (
"github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/lotus/chain/actors"
reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/cbor"
+
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+ builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
+
+ builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
+
+ builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
+
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/types"
)
func init() {
+
builtin.RegisterActorState(builtin0.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load0(store, root)
})
+
builtin.RegisterActorState(builtin2.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load2(store, root)
})
+
+ builtin.RegisterActorState(builtin3.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load3(store, root)
+ })
+
+ builtin.RegisterActorState(builtin4.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load4(store, root)
+ })
+
+ builtin.RegisterActorState(builtin5.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load5(store, root)
+ })
}
var (
- Address = builtin2.RewardActorAddr
- Methods = builtin2.MethodsReward
+ Address = builtin5.RewardActorAddr
+ Methods = builtin5.MethodsReward
)
-func Load(store adt.Store, act *types.Actor) (st State, err error) {
+func Load(store adt.Store, act *types.Actor) (State, error) {
switch act.Code {
+
case builtin0.RewardActorCodeID:
return load0(store, act.Head)
+
case builtin2.RewardActorCodeID:
return load2(store, act.Head)
+
+ case builtin3.RewardActorCodeID:
+ return load3(store, act.Head)
+
+ case builtin4.RewardActorCodeID:
+ return load4(store, act.Head)
+
+ case builtin5.RewardActorCodeID:
+ return load5(store, act.Head)
+
}
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
}
+func MakeState(store adt.Store, av actors.Version, currRealizedPower abi.StoragePower) (State, error) {
+ switch av {
+
+ case actors.Version0:
+ return make0(store, currRealizedPower)
+
+ case actors.Version2:
+ return make2(store, currRealizedPower)
+
+ case actors.Version3:
+ return make3(store, currRealizedPower)
+
+ case actors.Version4:
+ return make4(store, currRealizedPower)
+
+ case actors.Version5:
+ return make5(store, currRealizedPower)
+
+ }
+ return nil, xerrors.Errorf("unknown actor version %d", av)
+}
+
+func GetActorCodeID(av actors.Version) (cid.Cid, error) {
+ switch av {
+
+ case actors.Version0:
+ return builtin0.RewardActorCodeID, nil
+
+ case actors.Version2:
+ return builtin2.RewardActorCodeID, nil
+
+ case actors.Version3:
+ return builtin3.RewardActorCodeID, nil
+
+ case actors.Version4:
+ return builtin4.RewardActorCodeID, nil
+
+ case actors.Version5:
+ return builtin5.RewardActorCodeID, nil
+
+ }
+
+ return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
+}
+
type State interface {
cbor.Marshaler
@@ -56,6 +136,7 @@ type State interface {
InitialPledgeForPower(abi.StoragePower, abi.TokenAmount, *builtin.FilterEstimate, abi.TokenAmount) (abi.TokenAmount, error)
PreCommitDepositForPower(builtin.FilterEstimate, abi.StoragePower) (abi.TokenAmount, error)
+ GetState() interface{}
}
type AwardBlockRewardParams = reward0.AwardBlockRewardParams
diff --git a/chain/actors/builtin/reward/state.go.template b/chain/actors/builtin/reward/state.go.template
new file mode 100644
index 00000000000..2bc271cbbfa
--- /dev/null
+++ b/chain/actors/builtin/reward/state.go.template
@@ -0,0 +1,113 @@
+package reward
+
+import (
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+
+ miner{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/miner"
+ reward{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/reward"
+ smoothing{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/util/smoothing"
+)
+
+var _ State = (*state{{.v}})(nil)
+
+func load{{.v}}(store adt.Store, root cid.Cid) (State, error) {
+ out := state{{.v}}{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make{{.v}}(store adt.Store, currRealizedPower abi.StoragePower) (State, error) {
+ out := state{{.v}}{store: store}
+ out.State = *reward{{.v}}.ConstructState(currRealizedPower)
+ return &out, nil
+}
+
+type state{{.v}} struct {
+ reward{{.v}}.State
+ store adt.Store
+}
+
+func (s *state{{.v}}) ThisEpochReward() (abi.TokenAmount, error) {
+ return s.State.ThisEpochReward, nil
+}
+
+func (s *state{{.v}}) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) {
+{{if (ge .v 2)}}
+ return builtin.FilterEstimate{
+ PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate,
+ VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate,
+ }, nil
+{{else}}
+ return builtin.FromV0FilterEstimate(*s.State.ThisEpochRewardSmoothed), nil
+{{end}}
+}
+
+func (s *state{{.v}}) ThisEpochBaselinePower() (abi.StoragePower, error) {
+ return s.State.ThisEpochBaselinePower, nil
+}
+
+func (s *state{{.v}}) TotalStoragePowerReward() (abi.TokenAmount, error) {
+ return s.State.{{if (ge .v 2)}}TotalStoragePowerReward{{else}}TotalMined{{end}}, nil
+}
+
+func (s *state{{.v}}) EffectiveBaselinePower() (abi.StoragePower, error) {
+ return s.State.EffectiveBaselinePower, nil
+}
+
+func (s *state{{.v}}) EffectiveNetworkTime() (abi.ChainEpoch, error) {
+ return s.State.EffectiveNetworkTime, nil
+}
+
+func (s *state{{.v}}) CumsumBaseline() (reward{{.v}}.Spacetime, error) {
+ return s.State.CumsumBaseline, nil
+}
+
+func (s *state{{.v}}) CumsumRealized() (reward{{.v}}.Spacetime, error) {
+ return s.State.CumsumRealized, nil
+}
+{{if (ge .v 2)}}
+func (s *state{{.v}}) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) {
+ return miner{{.v}}.InitialPledgeForPower(
+ qaPower,
+ s.State.ThisEpochBaselinePower,
+ s.State.ThisEpochRewardSmoothed,
+ smoothing{{.v}}.FilterEstimate{
+ PositionEstimate: networkQAPower.PositionEstimate,
+ VelocityEstimate: networkQAPower.VelocityEstimate,
+ },
+ circSupply,
+ ), nil
+}
+{{else}}
+func (s *state0) InitialPledgeForPower(sectorWeight abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) {
+ return miner0.InitialPledgeForPower(
+ sectorWeight,
+ s.State.ThisEpochBaselinePower,
+ networkTotalPledge,
+ s.State.ThisEpochRewardSmoothed,
+ &smoothing0.FilterEstimate{
+ PositionEstimate: networkQAPower.PositionEstimate,
+ VelocityEstimate: networkQAPower.VelocityEstimate,
+ },
+ circSupply), nil
+}
+{{end}}
+func (s *state{{.v}}) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) {
+ return miner{{.v}}.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed,
+ {{if (le .v 0)}}&{{end}}smoothing{{.v}}.FilterEstimate{
+ PositionEstimate: networkQAPower.PositionEstimate,
+ VelocityEstimate: networkQAPower.VelocityEstimate,
+ },
+ sectorWeight), nil
+}
+
+func (s *state{{.v}}) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/reward/v0.go b/chain/actors/builtin/reward/v0.go
index 6a6e6d12e9d..cd098c151e8 100644
--- a/chain/actors/builtin/reward/v0.go
+++ b/chain/actors/builtin/reward/v0.go
@@ -23,17 +23,25 @@ func load0(store adt.Store, root cid.Cid) (State, error) {
return &out, nil
}
+func make0(store adt.Store, currRealizedPower abi.StoragePower) (State, error) {
+ out := state0{store: store}
+ out.State = *reward0.ConstructState(currRealizedPower)
+ return &out, nil
+}
+
type state0 struct {
reward0.State
store adt.Store
}
-func (s *state0) ThisEpochReward() (abi.StoragePower, error) {
+func (s *state0) ThisEpochReward() (abi.TokenAmount, error) {
return s.State.ThisEpochReward, nil
}
func (s *state0) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) {
+
return builtin.FromV0FilterEstimate(*s.State.ThisEpochRewardSmoothed), nil
+
}
func (s *state0) ThisEpochBaselinePower() (abi.StoragePower, error) {
@@ -52,11 +60,11 @@ func (s *state0) EffectiveNetworkTime() (abi.ChainEpoch, error) {
return s.State.EffectiveNetworkTime, nil
}
-func (s *state0) CumsumBaseline() (abi.StoragePower, error) {
+func (s *state0) CumsumBaseline() (reward0.Spacetime, error) {
return s.State.CumsumBaseline, nil
}
-func (s *state0) CumsumRealized() (abi.StoragePower, error) {
+func (s *state0) CumsumRealized() (reward0.Spacetime, error) {
return s.State.CumsumRealized, nil
}
@@ -81,3 +89,7 @@ func (s *state0) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate,
},
sectorWeight), nil
}
+
+func (s *state0) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/reward/v2.go b/chain/actors/builtin/reward/v2.go
index b7cb4910278..08e9a7bc39a 100644
--- a/chain/actors/builtin/reward/v2.go
+++ b/chain/actors/builtin/reward/v2.go
@@ -23,20 +23,28 @@ func load2(store adt.Store, root cid.Cid) (State, error) {
return &out, nil
}
+func make2(store adt.Store, currRealizedPower abi.StoragePower) (State, error) {
+ out := state2{store: store}
+ out.State = *reward2.ConstructState(currRealizedPower)
+ return &out, nil
+}
+
type state2 struct {
reward2.State
store adt.Store
}
-func (s *state2) ThisEpochReward() (abi.StoragePower, error) {
+func (s *state2) ThisEpochReward() (abi.TokenAmount, error) {
return s.State.ThisEpochReward, nil
}
func (s *state2) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) {
+
return builtin.FilterEstimate{
PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate,
VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate,
}, nil
+
}
func (s *state2) ThisEpochBaselinePower() (abi.StoragePower, error) {
@@ -55,11 +63,11 @@ func (s *state2) EffectiveNetworkTime() (abi.ChainEpoch, error) {
return s.State.EffectiveNetworkTime, nil
}
-func (s *state2) CumsumBaseline() (abi.StoragePower, error) {
+func (s *state2) CumsumBaseline() (reward2.Spacetime, error) {
return s.State.CumsumBaseline, nil
}
-func (s *state2) CumsumRealized() (abi.StoragePower, error) {
+func (s *state2) CumsumRealized() (reward2.Spacetime, error) {
return s.State.CumsumRealized, nil
}
@@ -84,3 +92,7 @@ func (s *state2) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate,
},
sectorWeight), nil
}
+
+func (s *state2) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/reward/v3.go b/chain/actors/builtin/reward/v3.go
new file mode 100644
index 00000000000..fd9fa56e27e
--- /dev/null
+++ b/chain/actors/builtin/reward/v3.go
@@ -0,0 +1,98 @@
+package reward
+
+import (
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+
+ miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner"
+ reward3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/reward"
+ smoothing3 "github.com/filecoin-project/specs-actors/v3/actors/util/smoothing"
+)
+
+var _ State = (*state3)(nil)
+
+func load3(store adt.Store, root cid.Cid) (State, error) {
+ out := state3{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make3(store adt.Store, currRealizedPower abi.StoragePower) (State, error) {
+ out := state3{store: store}
+ out.State = *reward3.ConstructState(currRealizedPower)
+ return &out, nil
+}
+
+type state3 struct {
+ reward3.State
+ store adt.Store
+}
+
+func (s *state3) ThisEpochReward() (abi.TokenAmount, error) {
+ return s.State.ThisEpochReward, nil
+}
+
+func (s *state3) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) {
+
+ return builtin.FilterEstimate{
+ PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate,
+ VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate,
+ }, nil
+
+}
+
+func (s *state3) ThisEpochBaselinePower() (abi.StoragePower, error) {
+ return s.State.ThisEpochBaselinePower, nil
+}
+
+func (s *state3) TotalStoragePowerReward() (abi.TokenAmount, error) {
+ return s.State.TotalStoragePowerReward, nil
+}
+
+func (s *state3) EffectiveBaselinePower() (abi.StoragePower, error) {
+ return s.State.EffectiveBaselinePower, nil
+}
+
+func (s *state3) EffectiveNetworkTime() (abi.ChainEpoch, error) {
+ return s.State.EffectiveNetworkTime, nil
+}
+
+func (s *state3) CumsumBaseline() (reward3.Spacetime, error) {
+ return s.State.CumsumBaseline, nil
+}
+
+func (s *state3) CumsumRealized() (reward3.Spacetime, error) {
+ return s.State.CumsumRealized, nil
+}
+
+func (s *state3) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) {
+ return miner3.InitialPledgeForPower(
+ qaPower,
+ s.State.ThisEpochBaselinePower,
+ s.State.ThisEpochRewardSmoothed,
+ smoothing3.FilterEstimate{
+ PositionEstimate: networkQAPower.PositionEstimate,
+ VelocityEstimate: networkQAPower.VelocityEstimate,
+ },
+ circSupply,
+ ), nil
+}
+
+func (s *state3) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) {
+ return miner3.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed,
+ smoothing3.FilterEstimate{
+ PositionEstimate: networkQAPower.PositionEstimate,
+ VelocityEstimate: networkQAPower.VelocityEstimate,
+ },
+ sectorWeight), nil
+}
+
+func (s *state3) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/reward/v4.go b/chain/actors/builtin/reward/v4.go
new file mode 100644
index 00000000000..310ca04e8df
--- /dev/null
+++ b/chain/actors/builtin/reward/v4.go
@@ -0,0 +1,98 @@
+package reward
+
+import (
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+
+ miner4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/miner"
+ reward4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/reward"
+ smoothing4 "github.com/filecoin-project/specs-actors/v4/actors/util/smoothing"
+)
+
+var _ State = (*state4)(nil)
+
+func load4(store adt.Store, root cid.Cid) (State, error) {
+ out := state4{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make4(store adt.Store, currRealizedPower abi.StoragePower) (State, error) {
+ out := state4{store: store}
+ out.State = *reward4.ConstructState(currRealizedPower)
+ return &out, nil
+}
+
+type state4 struct {
+ reward4.State
+ store adt.Store
+}
+
+func (s *state4) ThisEpochReward() (abi.TokenAmount, error) {
+ return s.State.ThisEpochReward, nil
+}
+
+func (s *state4) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) {
+
+ return builtin.FilterEstimate{
+ PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate,
+ VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate,
+ }, nil
+
+}
+
+func (s *state4) ThisEpochBaselinePower() (abi.StoragePower, error) {
+ return s.State.ThisEpochBaselinePower, nil
+}
+
+func (s *state4) TotalStoragePowerReward() (abi.TokenAmount, error) {
+ return s.State.TotalStoragePowerReward, nil
+}
+
+func (s *state4) EffectiveBaselinePower() (abi.StoragePower, error) {
+ return s.State.EffectiveBaselinePower, nil
+}
+
+func (s *state4) EffectiveNetworkTime() (abi.ChainEpoch, error) {
+ return s.State.EffectiveNetworkTime, nil
+}
+
+func (s *state4) CumsumBaseline() (reward4.Spacetime, error) {
+ return s.State.CumsumBaseline, nil
+}
+
+func (s *state4) CumsumRealized() (reward4.Spacetime, error) {
+ return s.State.CumsumRealized, nil
+}
+
+func (s *state4) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) {
+ return miner4.InitialPledgeForPower(
+ qaPower,
+ s.State.ThisEpochBaselinePower,
+ s.State.ThisEpochRewardSmoothed,
+ smoothing4.FilterEstimate{
+ PositionEstimate: networkQAPower.PositionEstimate,
+ VelocityEstimate: networkQAPower.VelocityEstimate,
+ },
+ circSupply,
+ ), nil
+}
+
+func (s *state4) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) {
+ return miner4.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed,
+ smoothing4.FilterEstimate{
+ PositionEstimate: networkQAPower.PositionEstimate,
+ VelocityEstimate: networkQAPower.VelocityEstimate,
+ },
+ sectorWeight), nil
+}
+
+func (s *state4) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/reward/v5.go b/chain/actors/builtin/reward/v5.go
new file mode 100644
index 00000000000..7200f7d11af
--- /dev/null
+++ b/chain/actors/builtin/reward/v5.go
@@ -0,0 +1,98 @@
+package reward
+
+import (
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+
+ miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
+ reward5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/reward"
+ smoothing5 "github.com/filecoin-project/specs-actors/v5/actors/util/smoothing"
+)
+
+var _ State = (*state5)(nil)
+
+func load5(store adt.Store, root cid.Cid) (State, error) {
+ out := state5{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make5(store adt.Store, currRealizedPower abi.StoragePower) (State, error) {
+ out := state5{store: store}
+ out.State = *reward5.ConstructState(currRealizedPower)
+ return &out, nil
+}
+
+type state5 struct {
+ reward5.State
+ store adt.Store
+}
+
+func (s *state5) ThisEpochReward() (abi.TokenAmount, error) {
+ return s.State.ThisEpochReward, nil
+}
+
+func (s *state5) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) {
+
+ return builtin.FilterEstimate{
+ PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate,
+ VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate,
+ }, nil
+
+}
+
+func (s *state5) ThisEpochBaselinePower() (abi.StoragePower, error) {
+ return s.State.ThisEpochBaselinePower, nil
+}
+
+func (s *state5) TotalStoragePowerReward() (abi.TokenAmount, error) {
+ return s.State.TotalStoragePowerReward, nil
+}
+
+func (s *state5) EffectiveBaselinePower() (abi.StoragePower, error) {
+ return s.State.EffectiveBaselinePower, nil
+}
+
+func (s *state5) EffectiveNetworkTime() (abi.ChainEpoch, error) {
+ return s.State.EffectiveNetworkTime, nil
+}
+
+func (s *state5) CumsumBaseline() (reward5.Spacetime, error) {
+ return s.State.CumsumBaseline, nil
+}
+
+func (s *state5) CumsumRealized() (reward5.Spacetime, error) {
+ return s.State.CumsumRealized, nil
+}
+
+func (s *state5) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) {
+ return miner5.InitialPledgeForPower(
+ qaPower,
+ s.State.ThisEpochBaselinePower,
+ s.State.ThisEpochRewardSmoothed,
+ smoothing5.FilterEstimate{
+ PositionEstimate: networkQAPower.PositionEstimate,
+ VelocityEstimate: networkQAPower.VelocityEstimate,
+ },
+ circSupply,
+ ), nil
+}
+
+func (s *state5) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) {
+ return miner5.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed,
+ smoothing5.FilterEstimate{
+ PositionEstimate: networkQAPower.PositionEstimate,
+ VelocityEstimate: networkQAPower.VelocityEstimate,
+ },
+ sectorWeight), nil
+}
+
+func (s *state5) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/system/actor.go.template b/chain/actors/builtin/system/actor.go.template
new file mode 100644
index 00000000000..9253199709c
--- /dev/null
+++ b/chain/actors/builtin/system/actor.go.template
@@ -0,0 +1,41 @@
+package system
+
+import (
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors"
+ "golang.org/x/xerrors"
+ "github.com/ipfs/go-cid"
+
+{{range .versions}}
+ builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin"
+{{end}}
+)
+
+var (
+ Address = builtin{{.latestVersion}}.SystemActorAddr
+)
+
+func MakeState(store adt.Store, av actors.Version) (State, error) {
+ switch av {
+{{range .versions}}
+ case actors.Version{{.}}:
+ return make{{.}}(store)
+{{end}}
+}
+ return nil, xerrors.Errorf("unknown actor version %d", av)
+}
+
+func GetActorCodeID(av actors.Version) (cid.Cid, error) {
+ switch av {
+{{range .versions}}
+ case actors.Version{{.}}:
+ return builtin{{.}}.SystemActorCodeID, nil
+{{end}}
+ }
+
+ return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
+}
+
+type State interface {
+ GetState() interface{}
+}
diff --git a/chain/actors/builtin/system/state.go.template b/chain/actors/builtin/system/state.go.template
new file mode 100644
index 00000000000..fa644f8c755
--- /dev/null
+++ b/chain/actors/builtin/system/state.go.template
@@ -0,0 +1,35 @@
+package system
+
+import (
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ system{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/system"
+)
+
+var _ State = (*state{{.v}})(nil)
+
+func load{{.v}}(store adt.Store, root cid.Cid) (State, error) {
+ out := state{{.v}}{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make{{.v}}(store adt.Store) (State, error) {
+ out := state{{.v}}{store: store}
+ out.State = system{{.v}}.State{}
+ return &out, nil
+}
+
+type state{{.v}} struct {
+ system{{.v}}.State
+ store adt.Store
+}
+
+func (s *state{{.v}}) GetState() interface{} {
+ return &s.State
+}
\ No newline at end of file
diff --git a/chain/actors/builtin/system/system.go b/chain/actors/builtin/system/system.go
new file mode 100644
index 00000000000..289fb4d5de6
--- /dev/null
+++ b/chain/actors/builtin/system/system.go
@@ -0,0 +1,71 @@
+package system
+
+import (
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/ipfs/go-cid"
+ "golang.org/x/xerrors"
+
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+
+ builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+
+ builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
+
+ builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
+
+ builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
+)
+
+var (
+ Address = builtin5.SystemActorAddr
+)
+
+func MakeState(store adt.Store, av actors.Version) (State, error) {
+ switch av {
+
+ case actors.Version0:
+ return make0(store)
+
+ case actors.Version2:
+ return make2(store)
+
+ case actors.Version3:
+ return make3(store)
+
+ case actors.Version4:
+ return make4(store)
+
+ case actors.Version5:
+ return make5(store)
+
+ }
+ return nil, xerrors.Errorf("unknown actor version %d", av)
+}
+
+func GetActorCodeID(av actors.Version) (cid.Cid, error) {
+ switch av {
+
+ case actors.Version0:
+ return builtin0.SystemActorCodeID, nil
+
+ case actors.Version2:
+ return builtin2.SystemActorCodeID, nil
+
+ case actors.Version3:
+ return builtin3.SystemActorCodeID, nil
+
+ case actors.Version4:
+ return builtin4.SystemActorCodeID, nil
+
+ case actors.Version5:
+ return builtin5.SystemActorCodeID, nil
+
+ }
+
+ return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
+}
+
+type State interface {
+ GetState() interface{}
+}
diff --git a/chain/actors/builtin/system/v0.go b/chain/actors/builtin/system/v0.go
new file mode 100644
index 00000000000..64c6f53d3cf
--- /dev/null
+++ b/chain/actors/builtin/system/v0.go
@@ -0,0 +1,35 @@
+package system
+
+import (
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ system0 "github.com/filecoin-project/specs-actors/actors/builtin/system"
+)
+
+var _ State = (*state0)(nil)
+
+func load0(store adt.Store, root cid.Cid) (State, error) {
+ out := state0{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make0(store adt.Store) (State, error) {
+ out := state0{store: store}
+ out.State = system0.State{}
+ return &out, nil
+}
+
+type state0 struct {
+ system0.State
+ store adt.Store
+}
+
+func (s *state0) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/system/v2.go b/chain/actors/builtin/system/v2.go
new file mode 100644
index 00000000000..eb540891cc3
--- /dev/null
+++ b/chain/actors/builtin/system/v2.go
@@ -0,0 +1,35 @@
+package system
+
+import (
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ system2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/system"
+)
+
+var _ State = (*state2)(nil)
+
+func load2(store adt.Store, root cid.Cid) (State, error) {
+ out := state2{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make2(store adt.Store) (State, error) {
+ out := state2{store: store}
+ out.State = system2.State{}
+ return &out, nil
+}
+
+type state2 struct {
+ system2.State
+ store adt.Store
+}
+
+func (s *state2) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/system/v3.go b/chain/actors/builtin/system/v3.go
new file mode 100644
index 00000000000..5b04e189ee6
--- /dev/null
+++ b/chain/actors/builtin/system/v3.go
@@ -0,0 +1,35 @@
+package system
+
+import (
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ system3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/system"
+)
+
+var _ State = (*state3)(nil)
+
+func load3(store adt.Store, root cid.Cid) (State, error) {
+ out := state3{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make3(store adt.Store) (State, error) {
+ out := state3{store: store}
+ out.State = system3.State{}
+ return &out, nil
+}
+
+type state3 struct {
+ system3.State
+ store adt.Store
+}
+
+func (s *state3) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/system/v4.go b/chain/actors/builtin/system/v4.go
new file mode 100644
index 00000000000..b6c92497884
--- /dev/null
+++ b/chain/actors/builtin/system/v4.go
@@ -0,0 +1,35 @@
+package system
+
+import (
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ system4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/system"
+)
+
+var _ State = (*state4)(nil)
+
+func load4(store adt.Store, root cid.Cid) (State, error) {
+ out := state4{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make4(store adt.Store) (State, error) {
+ out := state4{store: store}
+ out.State = system4.State{}
+ return &out, nil
+}
+
+type state4 struct {
+ system4.State
+ store adt.Store
+}
+
+func (s *state4) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/system/v5.go b/chain/actors/builtin/system/v5.go
new file mode 100644
index 00000000000..77d2a8478be
--- /dev/null
+++ b/chain/actors/builtin/system/v5.go
@@ -0,0 +1,35 @@
+package system
+
+import (
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ system5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/system"
+)
+
+var _ State = (*state5)(nil)
+
+func load5(store adt.Store, root cid.Cid) (State, error) {
+ out := state5{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make5(store adt.Store) (State, error) {
+ out := state5{store: store}
+ out.State = system5.State{}
+ return &out, nil
+}
+
+type state5 struct {
+ system5.State
+ store adt.Store
+}
+
+func (s *state5) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/verifreg/actor.go.template b/chain/actors/builtin/verifreg/actor.go.template
new file mode 100644
index 00000000000..9ea8e155aec
--- /dev/null
+++ b/chain/actors/builtin/verifreg/actor.go.template
@@ -0,0 +1,75 @@
+package verifreg
+
+import (
+ "github.com/ipfs/go-cid"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+
+ "github.com/filecoin-project/go-state-types/cbor"
+{{range .versions}}
+ builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin"
+{{end}}
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+func init() {
+{{range .versions}}
+ builtin.RegisterActorState(builtin{{.}}.VerifiedRegistryActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load{{.}}(store, root)
+ })
+{{end}}
+}
+
+var (
+ Address = builtin{{.latestVersion}}.VerifiedRegistryActorAddr
+ Methods = builtin{{.latestVersion}}.MethodsVerifiedRegistry
+)
+
+func Load(store adt.Store, act *types.Actor) (State, error) {
+ switch act.Code {
+{{range .versions}}
+ case builtin{{.}}.VerifiedRegistryActorCodeID:
+ return load{{.}}(store, act.Head)
+{{end}}
+ }
+ return nil, xerrors.Errorf("unknown actor code %s", act.Code)
+}
+
+func MakeState(store adt.Store, av actors.Version, rootKeyAddress address.Address) (State, error) {
+ switch av {
+{{range .versions}}
+ case actors.Version{{.}}:
+ return make{{.}}(store, rootKeyAddress)
+{{end}}
+}
+ return nil, xerrors.Errorf("unknown actor version %d", av)
+}
+
+func GetActorCodeID(av actors.Version) (cid.Cid, error) {
+ switch av {
+{{range .versions}}
+ case actors.Version{{.}}:
+ return builtin{{.}}.VerifiedRegistryActorCodeID, nil
+{{end}}
+ }
+
+ return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
+}
+
+
+type State interface {
+ cbor.Marshaler
+
+ RootKey() (address.Address, error)
+ VerifiedClientDataCap(address.Address) (bool, abi.StoragePower, error)
+ VerifierDataCap(address.Address) (bool, abi.StoragePower, error)
+ ForEachVerifier(func(addr address.Address, dcap abi.StoragePower) error) error
+ ForEachClient(func(addr address.Address, dcap abi.StoragePower) error) error
+ GetState() interface{}
+}
diff --git a/chain/actors/builtin/verifreg/state.go.template b/chain/actors/builtin/verifreg/state.go.template
new file mode 100644
index 00000000000..b59cfb6289d
--- /dev/null
+++ b/chain/actors/builtin/verifreg/state.go.template
@@ -0,0 +1,82 @@
+package verifreg
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+{{if (ge .v 3)}} builtin{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin"
+{{end}} verifreg{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/verifreg"
+ adt{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/util/adt"
+)
+
+var _ State = (*state{{.v}})(nil)
+
+func load{{.v}}(store adt.Store, root cid.Cid) (State, error) {
+ out := state{{.v}}{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make{{.v}}(store adt.Store, rootKeyAddress address.Address) (State, error) {
+ out := state{{.v}}{store: store}
+ {{if (le .v 2)}}
+ em, err := adt{{.v}}.MakeEmptyMap(store).Root()
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *verifreg{{.v}}.ConstructState(em, rootKeyAddress)
+ {{else}}
+ s, err := verifreg{{.v}}.ConstructState(store, rootKeyAddress)
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *s
+ {{end}}
+ return &out, nil
+}
+
+type state{{.v}} struct {
+ verifreg{{.v}}.State
+ store adt.Store
+}
+
+func (s *state{{.v}}) RootKey() (address.Address, error) {
+ return s.State.RootKey, nil
+}
+
+func (s *state{{.v}}) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) {
+ return getDataCap(s.store, actors.Version{{.v}}, s.verifiedClients, addr)
+}
+
+func (s *state{{.v}}) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) {
+ return getDataCap(s.store, actors.Version{{.v}}, s.verifiers, addr)
+}
+
+func (s *state{{.v}}) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
+ return forEachCap(s.store, actors.Version{{.v}}, s.verifiers, cb)
+}
+
+func (s *state{{.v}}) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error {
+ return forEachCap(s.store, actors.Version{{.v}}, s.verifiedClients, cb)
+}
+
+func (s *state{{.v}}) verifiedClients() (adt.Map, error) {
+ return adt{{.v}}.AsMap(s.store, s.VerifiedClients{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}})
+}
+
+func (s *state{{.v}}) verifiers() (adt.Map, error) {
+ return adt{{.v}}.AsMap(s.store, s.Verifiers{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}})
+}
+
+func (s *state{{.v}}) GetState() interface{} {
+ return &s.State
+}
\ No newline at end of file
diff --git a/chain/actors/builtin/verifreg/util.go b/chain/actors/builtin/verifreg/util.go
index 4136c0c3077..16e50c50a77 100644
--- a/chain/actors/builtin/verifreg/util.go
+++ b/chain/actors/builtin/verifreg/util.go
@@ -6,16 +6,21 @@ import (
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt"
- "github.com/ipfs/go-cid"
"golang.org/x/xerrors"
)
-func getDataCap(store adt.Store, ver actors.Version, root cid.Cid, addr address.Address) (bool, abi.StoragePower, error) {
+// taking this as a function instead of asking the caller to call it helps reduce some of the error
+// checking boilerplate.
+//
+// "go made me do it"
+type rootFunc func() (adt.Map, error)
+
+// Assumes that the bitwidth for v3 HAMTs is the DefaultHamtBitwidth
+func getDataCap(store adt.Store, ver actors.Version, root rootFunc, addr address.Address) (bool, abi.StoragePower, error) {
if addr.Protocol() != address.ID {
return false, big.Zero(), xerrors.Errorf("can only look up ID addresses")
}
-
- vh, err := adt.AsMap(store, root, ver)
+ vh, err := root()
if err != nil {
return false, big.Zero(), xerrors.Errorf("loading verifreg: %w", err)
}
@@ -30,8 +35,9 @@ func getDataCap(store adt.Store, ver actors.Version, root cid.Cid, addr address.
return true, dcap, nil
}
-func forEachCap(store adt.Store, ver actors.Version, root cid.Cid, cb func(addr address.Address, dcap abi.StoragePower) error) error {
- vh, err := adt.AsMap(store, root, ver)
+// Assumes that the bitwidth for v3 HAMTs is the DefaultHamtBitwidth
+func forEachCap(store adt.Store, ver actors.Version, root rootFunc, cb func(addr address.Address, dcap abi.StoragePower) error) error {
+ vh, err := root()
if err != nil {
return xerrors.Errorf("loading verified clients: %w", err)
}
diff --git a/chain/actors/builtin/verifreg/v0.go b/chain/actors/builtin/verifreg/v0.go
index 64def470695..e70b0e3c92d 100644
--- a/chain/actors/builtin/verifreg/v0.go
+++ b/chain/actors/builtin/verifreg/v0.go
@@ -9,6 +9,7 @@ import (
"github.com/filecoin-project/lotus/chain/actors/adt"
verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
+ adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
)
var _ State = (*state0)(nil)
@@ -22,6 +23,19 @@ func load0(store adt.Store, root cid.Cid) (State, error) {
return &out, nil
}
+func make0(store adt.Store, rootKeyAddress address.Address) (State, error) {
+ out := state0{store: store}
+
+ em, err := adt0.MakeEmptyMap(store).Root()
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *verifreg0.ConstructState(em, rootKeyAddress)
+
+ return &out, nil
+}
+
type state0 struct {
verifreg0.State
store adt.Store
@@ -32,17 +46,29 @@ func (s *state0) RootKey() (address.Address, error) {
}
func (s *state0) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) {
- return getDataCap(s.store, actors.Version0, s.State.VerifiedClients, addr)
+ return getDataCap(s.store, actors.Version0, s.verifiedClients, addr)
}
func (s *state0) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) {
- return getDataCap(s.store, actors.Version0, s.State.Verifiers, addr)
+ return getDataCap(s.store, actors.Version0, s.verifiers, addr)
}
func (s *state0) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
- return forEachCap(s.store, actors.Version0, s.State.Verifiers, cb)
+ return forEachCap(s.store, actors.Version0, s.verifiers, cb)
}
func (s *state0) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error {
- return forEachCap(s.store, actors.Version0, s.State.VerifiedClients, cb)
+ return forEachCap(s.store, actors.Version0, s.verifiedClients, cb)
+}
+
+func (s *state0) verifiedClients() (adt.Map, error) {
+ return adt0.AsMap(s.store, s.VerifiedClients)
+}
+
+func (s *state0) verifiers() (adt.Map, error) {
+ return adt0.AsMap(s.store, s.Verifiers)
+}
+
+func (s *state0) GetState() interface{} {
+ return &s.State
}
diff --git a/chain/actors/builtin/verifreg/v2.go b/chain/actors/builtin/verifreg/v2.go
index 5ee3bad05d7..0bcbe02121d 100644
--- a/chain/actors/builtin/verifreg/v2.go
+++ b/chain/actors/builtin/verifreg/v2.go
@@ -9,6 +9,7 @@ import (
"github.com/filecoin-project/lotus/chain/actors/adt"
verifreg2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/verifreg"
+ adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt"
)
var _ State = (*state2)(nil)
@@ -22,6 +23,19 @@ func load2(store adt.Store, root cid.Cid) (State, error) {
return &out, nil
}
+func make2(store adt.Store, rootKeyAddress address.Address) (State, error) {
+ out := state2{store: store}
+
+ em, err := adt2.MakeEmptyMap(store).Root()
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *verifreg2.ConstructState(em, rootKeyAddress)
+
+ return &out, nil
+}
+
type state2 struct {
verifreg2.State
store adt.Store
@@ -32,17 +46,29 @@ func (s *state2) RootKey() (address.Address, error) {
}
func (s *state2) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) {
- return getDataCap(s.store, actors.Version2, s.State.VerifiedClients, addr)
+ return getDataCap(s.store, actors.Version2, s.verifiedClients, addr)
}
func (s *state2) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) {
- return getDataCap(s.store, actors.Version2, s.State.Verifiers, addr)
+ return getDataCap(s.store, actors.Version2, s.verifiers, addr)
}
func (s *state2) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
- return forEachCap(s.store, actors.Version2, s.State.Verifiers, cb)
+ return forEachCap(s.store, actors.Version2, s.verifiers, cb)
}
func (s *state2) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error {
- return forEachCap(s.store, actors.Version2, s.State.VerifiedClients, cb)
+ return forEachCap(s.store, actors.Version2, s.verifiedClients, cb)
+}
+
+func (s *state2) verifiedClients() (adt.Map, error) {
+ return adt2.AsMap(s.store, s.VerifiedClients)
+}
+
+func (s *state2) verifiers() (adt.Map, error) {
+ return adt2.AsMap(s.store, s.Verifiers)
+}
+
+func (s *state2) GetState() interface{} {
+ return &s.State
}
diff --git a/chain/actors/builtin/verifreg/v3.go b/chain/actors/builtin/verifreg/v3.go
new file mode 100644
index 00000000000..32003ca3a30
--- /dev/null
+++ b/chain/actors/builtin/verifreg/v3.go
@@ -0,0 +1,75 @@
+package verifreg
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
+ verifreg3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/verifreg"
+ adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt"
+)
+
+var _ State = (*state3)(nil)
+
+func load3(store adt.Store, root cid.Cid) (State, error) {
+ out := state3{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make3(store adt.Store, rootKeyAddress address.Address) (State, error) {
+ out := state3{store: store}
+
+ s, err := verifreg3.ConstructState(store, rootKeyAddress)
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *s
+
+ return &out, nil
+}
+
+type state3 struct {
+ verifreg3.State
+ store adt.Store
+}
+
+func (s *state3) RootKey() (address.Address, error) {
+ return s.State.RootKey, nil
+}
+
+func (s *state3) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) {
+ return getDataCap(s.store, actors.Version3, s.verifiedClients, addr)
+}
+
+func (s *state3) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) {
+ return getDataCap(s.store, actors.Version3, s.verifiers, addr)
+}
+
+func (s *state3) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
+ return forEachCap(s.store, actors.Version3, s.verifiers, cb)
+}
+
+func (s *state3) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error {
+ return forEachCap(s.store, actors.Version3, s.verifiedClients, cb)
+}
+
+func (s *state3) verifiedClients() (adt.Map, error) {
+ return adt3.AsMap(s.store, s.VerifiedClients, builtin3.DefaultHamtBitwidth)
+}
+
+func (s *state3) verifiers() (adt.Map, error) {
+ return adt3.AsMap(s.store, s.Verifiers, builtin3.DefaultHamtBitwidth)
+}
+
+func (s *state3) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/verifreg/v4.go b/chain/actors/builtin/verifreg/v4.go
new file mode 100644
index 00000000000..b752e747bb3
--- /dev/null
+++ b/chain/actors/builtin/verifreg/v4.go
@@ -0,0 +1,75 @@
+package verifreg
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
+ verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg"
+ adt4 "github.com/filecoin-project/specs-actors/v4/actors/util/adt"
+)
+
+var _ State = (*state4)(nil)
+
+func load4(store adt.Store, root cid.Cid) (State, error) {
+ out := state4{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make4(store adt.Store, rootKeyAddress address.Address) (State, error) {
+ out := state4{store: store}
+
+ s, err := verifreg4.ConstructState(store, rootKeyAddress)
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *s
+
+ return &out, nil
+}
+
+type state4 struct {
+ verifreg4.State
+ store adt.Store
+}
+
+func (s *state4) RootKey() (address.Address, error) {
+ return s.State.RootKey, nil
+}
+
+func (s *state4) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) {
+ return getDataCap(s.store, actors.Version4, s.verifiedClients, addr)
+}
+
+func (s *state4) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) {
+ return getDataCap(s.store, actors.Version4, s.verifiers, addr)
+}
+
+func (s *state4) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
+ return forEachCap(s.store, actors.Version4, s.verifiers, cb)
+}
+
+func (s *state4) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error {
+ return forEachCap(s.store, actors.Version4, s.verifiedClients, cb)
+}
+
+func (s *state4) verifiedClients() (adt.Map, error) {
+ return adt4.AsMap(s.store, s.VerifiedClients, builtin4.DefaultHamtBitwidth)
+}
+
+func (s *state4) verifiers() (adt.Map, error) {
+ return adt4.AsMap(s.store, s.Verifiers, builtin4.DefaultHamtBitwidth)
+}
+
+func (s *state4) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/verifreg/v5.go b/chain/actors/builtin/verifreg/v5.go
new file mode 100644
index 00000000000..6fefd711540
--- /dev/null
+++ b/chain/actors/builtin/verifreg/v5.go
@@ -0,0 +1,75 @@
+package verifreg
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
+ verifreg5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/verifreg"
+ adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt"
+)
+
+var _ State = (*state5)(nil)
+
+func load5(store adt.Store, root cid.Cid) (State, error) {
+ out := state5{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make5(store adt.Store, rootKeyAddress address.Address) (State, error) {
+ out := state5{store: store}
+
+ s, err := verifreg5.ConstructState(store, rootKeyAddress)
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *s
+
+ return &out, nil
+}
+
+type state5 struct {
+ verifreg5.State
+ store adt.Store
+}
+
+func (s *state5) RootKey() (address.Address, error) {
+ return s.State.RootKey, nil
+}
+
+func (s *state5) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) {
+ return getDataCap(s.store, actors.Version5, s.verifiedClients, addr)
+}
+
+func (s *state5) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) {
+ return getDataCap(s.store, actors.Version5, s.verifiers, addr)
+}
+
+func (s *state5) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
+ return forEachCap(s.store, actors.Version5, s.verifiers, cb)
+}
+
+func (s *state5) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error {
+ return forEachCap(s.store, actors.Version5, s.verifiedClients, cb)
+}
+
+func (s *state5) verifiedClients() (adt.Map, error) {
+ return adt5.AsMap(s.store, s.VerifiedClients, builtin5.DefaultHamtBitwidth)
+}
+
+func (s *state5) verifiers() (adt.Map, error) {
+ return adt5.AsMap(s.store, s.Verifiers, builtin5.DefaultHamtBitwidth)
+}
+
+func (s *state5) GetState() interface{} {
+ return &s.State
+}
diff --git a/chain/actors/builtin/verifreg/verifreg.go b/chain/actors/builtin/verifreg/verifreg.go
index a4468d8a0d2..88104ad6955 100644
--- a/chain/actors/builtin/verifreg/verifreg.go
+++ b/chain/actors/builtin/verifreg/verifreg.go
@@ -1,44 +1,126 @@
package verifreg
import (
- "github.com/filecoin-project/go-address"
- "github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+
"github.com/filecoin-project/go-state-types/cbor"
+
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+ builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
+
+ builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
+
+ builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
+
+ "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/types"
)
func init() {
+
builtin.RegisterActorState(builtin0.VerifiedRegistryActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load0(store, root)
})
+
builtin.RegisterActorState(builtin2.VerifiedRegistryActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load2(store, root)
})
+
+ builtin.RegisterActorState(builtin3.VerifiedRegistryActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load3(store, root)
+ })
+
+ builtin.RegisterActorState(builtin4.VerifiedRegistryActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load4(store, root)
+ })
+
+ builtin.RegisterActorState(builtin5.VerifiedRegistryActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
+ return load5(store, root)
+ })
+
}
var (
- Address = builtin2.VerifiedRegistryActorAddr
- Methods = builtin2.MethodsVerifiedRegistry
+ Address = builtin5.VerifiedRegistryActorAddr
+ Methods = builtin5.MethodsVerifiedRegistry
)
func Load(store adt.Store, act *types.Actor) (State, error) {
switch act.Code {
+
case builtin0.VerifiedRegistryActorCodeID:
return load0(store, act.Head)
+
case builtin2.VerifiedRegistryActorCodeID:
return load2(store, act.Head)
+
+ case builtin3.VerifiedRegistryActorCodeID:
+ return load3(store, act.Head)
+
+ case builtin4.VerifiedRegistryActorCodeID:
+ return load4(store, act.Head)
+
+ case builtin5.VerifiedRegistryActorCodeID:
+ return load5(store, act.Head)
+
}
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
}
+func MakeState(store adt.Store, av actors.Version, rootKeyAddress address.Address) (State, error) {
+ switch av {
+
+ case actors.Version0:
+ return make0(store, rootKeyAddress)
+
+ case actors.Version2:
+ return make2(store, rootKeyAddress)
+
+ case actors.Version3:
+ return make3(store, rootKeyAddress)
+
+ case actors.Version4:
+ return make4(store, rootKeyAddress)
+
+ case actors.Version5:
+ return make5(store, rootKeyAddress)
+
+ }
+ return nil, xerrors.Errorf("unknown actor version %d", av)
+}
+
+func GetActorCodeID(av actors.Version) (cid.Cid, error) {
+ switch av {
+
+ case actors.Version0:
+ return builtin0.VerifiedRegistryActorCodeID, nil
+
+ case actors.Version2:
+ return builtin2.VerifiedRegistryActorCodeID, nil
+
+ case actors.Version3:
+ return builtin3.VerifiedRegistryActorCodeID, nil
+
+ case actors.Version4:
+ return builtin4.VerifiedRegistryActorCodeID, nil
+
+ case actors.Version5:
+ return builtin5.VerifiedRegistryActorCodeID, nil
+
+ }
+
+ return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
+}
+
type State interface {
cbor.Marshaler
@@ -47,4 +129,5 @@ type State interface {
VerifierDataCap(address.Address) (bool, abi.StoragePower, error)
ForEachVerifier(func(addr address.Address, dcap abi.StoragePower) error) error
ForEachClient(func(addr address.Address, dcap abi.StoragePower) error) error
+ GetState() interface{}
}
diff --git a/chain/actors/params.go b/chain/actors/params.go
index e14dcafc9ff..6dc0b1084db 100644
--- a/chain/actors/params.go
+++ b/chain/actors/params.go
@@ -3,6 +3,8 @@ package actors
import (
"bytes"
+ "github.com/filecoin-project/go-state-types/exitcode"
+
"github.com/filecoin-project/lotus/chain/actors/aerrors"
cbg "github.com/whyrusleeping/cbor-gen"
)
@@ -11,7 +13,7 @@ func SerializeParams(i cbg.CBORMarshaler) ([]byte, aerrors.ActorError) {
buf := new(bytes.Buffer)
if err := i.MarshalCBOR(buf); err != nil {
// TODO: shouldnt this be a fatal error?
- return nil, aerrors.Absorb(err, 1, "failed to encode parameter")
+ return nil, aerrors.Absorb(err, exitcode.ErrSerialization, "failed to encode parameter")
}
return buf.Bytes(), nil
}
diff --git a/chain/actors/policy/policy.go b/chain/actors/policy/policy.go
index c1a971db51e..c06c85d380c 100644
--- a/chain/actors/policy/policy.go
+++ b/chain/actors/policy/policy.go
@@ -3,45 +3,105 @@ package policy
import (
"sort"
+ "github.com/filecoin-project/go-state-types/big"
+
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/chain/actors"
+
market0 "github.com/filecoin-project/specs-actors/actors/builtin/market"
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
+
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
- paych2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/paych"
verifreg2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/verifreg"
+
+ builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
+ market3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/market"
+ miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner"
+ verifreg3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/verifreg"
+
+ builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
+ market4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/market"
+ miner4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/miner"
+ verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg"
+
+ builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
+ market5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/market"
+ miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
+ verifreg5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/verifreg"
+
+ paych5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/paych"
)
const (
- ChainFinality = miner0.ChainFinality
- SealRandomnessLookback = ChainFinality
- PaychSettleDelay = paych2.SettleDelay
+ ChainFinality = miner5.ChainFinality
+ SealRandomnessLookback = ChainFinality
+ PaychSettleDelay = paych5.SettleDelay
+ MaxPreCommitRandomnessLookback = builtin5.EpochsInDay + SealRandomnessLookback
)
// SetSupportedProofTypes sets supported proof types, across all actor versions.
// This should only be used for testing.
func SetSupportedProofTypes(types ...abi.RegisteredSealProof) {
- newTypes := make(map[abi.RegisteredSealProof]struct{}, len(types))
- for _, t := range types {
- newTypes[t] = struct{}{}
- }
- // Set for all miner versions.
- miner0.SupportedProofTypes = newTypes
- miner2.SupportedProofTypes = newTypes
+
+ miner0.SupportedProofTypes = make(map[abi.RegisteredSealProof]struct{}, len(types))
+
+ miner2.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types))
+ miner2.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2)
+ miner2.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
+
+ miner3.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types))
+ miner3.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2)
+ miner3.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
+
+ miner4.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types))
+ miner4.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2)
+ miner4.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
+
+ miner5.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
+
+ AddSupportedProofTypes(types...)
}
// AddSupportedProofTypes sets supported proof types, across all actor versions.
// This should only be used for testing.
func AddSupportedProofTypes(types ...abi.RegisteredSealProof) {
for _, t := range types {
+ if t >= abi.RegisteredSealProof_StackedDrg2KiBV1_1 {
+ panic("must specify v1 proof types only")
+ }
// Set for all miner versions.
+
miner0.SupportedProofTypes[t] = struct{}{}
- miner2.SupportedProofTypes[t] = struct{}{}
+
+ miner2.PreCommitSealProofTypesV0[t] = struct{}{}
+ miner2.PreCommitSealProofTypesV7[t] = struct{}{}
+ miner2.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
+ miner2.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
+
+ miner3.PreCommitSealProofTypesV0[t] = struct{}{}
+ miner3.PreCommitSealProofTypesV7[t] = struct{}{}
+ miner3.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
+ miner3.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
+
+ miner4.PreCommitSealProofTypesV0[t] = struct{}{}
+ miner4.PreCommitSealProofTypesV7[t] = struct{}{}
+ miner4.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
+ miner4.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
+
+ miner5.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
+ wpp, err := t.RegisteredWindowPoStProof()
+ if err != nil {
+ // Fine to panic, this is a test-only method
+ panic(err)
+ }
+
+ miner5.WindowPoStProofTypes[wpp] = struct{}{}
+
}
}
@@ -49,66 +109,187 @@ func AddSupportedProofTypes(types ...abi.RegisteredSealProof) {
// actors versions. Use for testing.
func SetPreCommitChallengeDelay(delay abi.ChainEpoch) {
// Set for all miner versions.
+
miner0.PreCommitChallengeDelay = delay
+
miner2.PreCommitChallengeDelay = delay
+
+ miner3.PreCommitChallengeDelay = delay
+
+ miner4.PreCommitChallengeDelay = delay
+
+ miner5.PreCommitChallengeDelay = delay
+
}
// TODO: this function shouldn't really exist. Instead, the API should expose the precommit delay.
func GetPreCommitChallengeDelay() abi.ChainEpoch {
- return miner0.PreCommitChallengeDelay
+ return miner5.PreCommitChallengeDelay
}
// SetConsensusMinerMinPower sets the minimum power of an individual miner must
// meet for leader election, across all actor versions. This should only be used
// for testing.
func SetConsensusMinerMinPower(p abi.StoragePower) {
+
power0.ConsensusMinerMinPower = p
+
for _, policy := range builtin2.SealProofPolicies {
policy.ConsensusMinerMinPower = p
}
+
+ for _, policy := range builtin3.PoStProofPolicies {
+ policy.ConsensusMinerMinPower = p
+ }
+
+ for _, policy := range builtin4.PoStProofPolicies {
+ policy.ConsensusMinerMinPower = p
+ }
+
+ for _, policy := range builtin5.PoStProofPolicies {
+ policy.ConsensusMinerMinPower = p
+ }
+
}
// SetMinVerifiedDealSize sets the minimum size of a verified deal. This should
// only be used for testing.
func SetMinVerifiedDealSize(size abi.StoragePower) {
+
verifreg0.MinVerifiedDealSize = size
+
verifreg2.MinVerifiedDealSize = size
+
+ verifreg3.MinVerifiedDealSize = size
+
+ verifreg4.MinVerifiedDealSize = size
+
+ verifreg5.MinVerifiedDealSize = size
+
}
func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) abi.ChainEpoch {
switch ver {
+
case actors.Version0:
+
return miner0.MaxSealDuration[t]
+
case actors.Version2:
+
return miner2.MaxProveCommitDuration[t]
+
+ case actors.Version3:
+
+ return miner3.MaxProveCommitDuration[t]
+
+ case actors.Version4:
+
+ return miner4.MaxProveCommitDuration[t]
+
+ case actors.Version5:
+
+ return miner5.MaxProveCommitDuration[t]
+
default:
panic("unsupported actors version")
}
}
+// SetProviderCollateralSupplyTarget sets the percentage of normalized circulating
+// supply that must be covered by provider collateral in a deal. This should
+// only be used for testing.
+func SetProviderCollateralSupplyTarget(num, denom big.Int) {
+
+ market2.ProviderCollateralSupplyTarget = builtin2.BigFrac{
+ Numerator: num,
+ Denominator: denom,
+ }
+
+ market3.ProviderCollateralSupplyTarget = builtin3.BigFrac{
+ Numerator: num,
+ Denominator: denom,
+ }
+
+ market4.ProviderCollateralSupplyTarget = builtin4.BigFrac{
+ Numerator: num,
+ Denominator: denom,
+ }
+
+ market5.ProviderCollateralSupplyTarget = builtin5.BigFrac{
+ Numerator: num,
+ Denominator: denom,
+ }
+
+}
+
func DealProviderCollateralBounds(
size abi.PaddedPieceSize, verified bool,
rawBytePower, qaPower, baselinePower abi.StoragePower,
circulatingFil abi.TokenAmount, nwVer network.Version,
) (min, max abi.TokenAmount) {
switch actors.VersionForNetwork(nwVer) {
+
case actors.Version0:
+
return market0.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil, nwVer)
+
case actors.Version2:
+
return market2.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
+
+ case actors.Version3:
+
+ return market3.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
+
+ case actors.Version4:
+
+ return market4.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
+
+ case actors.Version5:
+
+ return market5.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
+
default:
- panic("unsupported network version")
+ panic("unsupported actors version")
}
}
+func DealDurationBounds(pieceSize abi.PaddedPieceSize) (min, max abi.ChainEpoch) {
+ return market5.DealDurationBounds(pieceSize)
+}
+
// Sets the challenge window and scales the proving period to match (such that
// there are always 48 challenge windows in a proving period).
func SetWPoStChallengeWindow(period abi.ChainEpoch) {
+
miner0.WPoStChallengeWindow = period
miner0.WPoStProvingPeriod = period * abi.ChainEpoch(miner0.WPoStPeriodDeadlines)
miner2.WPoStChallengeWindow = period
miner2.WPoStProvingPeriod = period * abi.ChainEpoch(miner2.WPoStPeriodDeadlines)
+
+ miner3.WPoStChallengeWindow = period
+ miner3.WPoStProvingPeriod = period * abi.ChainEpoch(miner3.WPoStPeriodDeadlines)
+
+ // by default, this is 2x finality which is 30 periods.
+ // scale it if we're scaling the challenge period.
+ miner3.WPoStDisputeWindow = period * 30
+
+ miner4.WPoStChallengeWindow = period
+ miner4.WPoStProvingPeriod = period * abi.ChainEpoch(miner4.WPoStPeriodDeadlines)
+
+ // by default, this is 2x finality which is 30 periods.
+ // scale it if we're scaling the challenge period.
+ miner4.WPoStDisputeWindow = period * 30
+
+ miner5.WPoStChallengeWindow = period
+ miner5.WPoStProvingPeriod = period * abi.ChainEpoch(miner5.WPoStPeriodDeadlines)
+
+ // by default, this is 2x finality which is 30 periods.
+ // scale it if we're scaling the challenge period.
+ miner5.WPoStDisputeWindow = period * 30
+
}
func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch {
@@ -116,26 +297,27 @@ func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch {
return 10
}
+ // NOTE: if this ever changes, adjust it in a (*Miner).mineOne() logline as well
return ChainFinality
}
func GetMaxSectorExpirationExtension() abi.ChainEpoch {
- return miner0.MaxSectorExpirationExtension
+ return miner5.MaxSectorExpirationExtension
}
-// TODO: we'll probably need to abstract over this better in the future.
-func GetMaxPoStPartitions(p abi.RegisteredPoStProof) (int, error) {
- sectorsPerPart, err := builtin2.PoStProofWindowPoStPartitionSectors(p)
+func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, error) {
+ sectorsPerPart, err := builtin5.PoStProofWindowPoStPartitionSectors(p)
if err != nil {
return 0, err
}
- return int(miner2.AddressedSectorsMax / sectorsPerPart), nil
+ maxSectors := uint64(GetAddressedSectorsMax(nv))
+ return int(maxSectors / sectorsPerPart), nil
}
func GetDefaultSectorSize() abi.SectorSize {
- // supported proof types are the same across versions.
- szs := make([]abi.SectorSize, 0, len(miner2.SupportedProofTypes))
- for spt := range miner2.SupportedProofTypes {
+ // supported sector sizes are the same across versions.
+ szs := make([]abi.SectorSize, 0, len(miner5.PreCommitSealProofTypesV8))
+ for spt := range miner5.PreCommitSealProofTypesV8 {
ss, err := spt.SectorSize()
if err != nil {
panic(err)
@@ -150,3 +332,95 @@ func GetDefaultSectorSize() abi.SectorSize {
return szs[0]
}
+
+func GetDefaultAggregationProof() abi.RegisteredAggregationProof {
+ return abi.RegisteredAggregationProof_SnarkPackV1
+}
+
+func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) abi.ChainEpoch {
+ if nwVer <= network.Version10 {
+ return builtin4.SealProofPoliciesV0[proof].SectorMaxLifetime
+ }
+
+ return builtin5.SealProofPoliciesV11[proof].SectorMaxLifetime
+}
+
+func GetAddressedSectorsMax(nwVer network.Version) int {
+ switch actors.VersionForNetwork(nwVer) {
+
+ case actors.Version0:
+ return miner0.AddressedSectorsMax
+
+ case actors.Version2:
+ return miner2.AddressedSectorsMax
+
+ case actors.Version3:
+ return miner3.AddressedSectorsMax
+
+ case actors.Version4:
+ return miner4.AddressedSectorsMax
+
+ case actors.Version5:
+ return miner5.AddressedSectorsMax
+
+ default:
+ panic("unsupported network version")
+ }
+}
+
+func GetDeclarationsMax(nwVer network.Version) int {
+ switch actors.VersionForNetwork(nwVer) {
+
+ case actors.Version0:
+
+ // TODO: Should we instead panic here since the concept doesn't exist yet?
+ return miner0.AddressedPartitionsMax
+
+ case actors.Version2:
+
+ return miner2.DeclarationsMax
+
+ case actors.Version3:
+
+ return miner3.DeclarationsMax
+
+ case actors.Version4:
+
+ return miner4.DeclarationsMax
+
+ case actors.Version5:
+
+ return miner5.DeclarationsMax
+
+ default:
+ panic("unsupported network version")
+ }
+}
+
+func AggregateNetworkFee(nwVer network.Version, aggregateSize int, baseFee abi.TokenAmount) abi.TokenAmount {
+ switch actors.VersionForNetwork(nwVer) {
+
+ case actors.Version0:
+
+ return big.Zero()
+
+ case actors.Version2:
+
+ return big.Zero()
+
+ case actors.Version3:
+
+ return big.Zero()
+
+ case actors.Version4:
+
+ return big.Zero()
+
+ case actors.Version5:
+
+ return miner5.AggregateNetworkFee(aggregateSize, baseFee)
+
+ default:
+ panic("unsupported network version")
+ }
+}
diff --git a/chain/actors/policy/policy.go.template b/chain/actors/policy/policy.go.template
new file mode 100644
index 00000000000..3257feffd41
--- /dev/null
+++ b/chain/actors/policy/policy.go.template
@@ -0,0 +1,279 @@
+package policy
+
+import (
+ "sort"
+
+ "github.com/filecoin-project/go-state-types/big"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/network"
+ "github.com/filecoin-project/lotus/chain/actors"
+
+ {{range .versions}}
+ {{if (ge . 2)}} builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" {{end}}
+ market{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin/market"
+ miner{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin/miner"
+ verifreg{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin/verifreg"
+ {{if (eq . 0)}} power{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin/power" {{end}}
+ {{end}}
+
+ paych{{.latestVersion}} "github.com/filecoin-project/specs-actors{{import .latestVersion}}actors/builtin/paych"
+)
+
+const (
+ ChainFinality = miner{{.latestVersion}}.ChainFinality
+ SealRandomnessLookback = ChainFinality
+ PaychSettleDelay = paych{{.latestVersion}}.SettleDelay
+ MaxPreCommitRandomnessLookback = builtin{{.latestVersion}}.EpochsInDay + SealRandomnessLookback
+)
+
+// SetSupportedProofTypes sets supported proof types, across all actor versions.
+// This should only be used for testing.
+func SetSupportedProofTypes(types ...abi.RegisteredSealProof) {
+ {{range .versions}}
+ {{if (eq . 0)}}
+ miner{{.}}.SupportedProofTypes = make(map[abi.RegisteredSealProof]struct{}, len(types))
+ {{else if (le . 4)}}
+ miner{{.}}.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types))
+ miner{{.}}.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2)
+ miner{{.}}.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
+ {{else}}
+ miner{{.}}.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
+ {{end}}
+ {{end}}
+
+ AddSupportedProofTypes(types...)
+}
+
+// AddSupportedProofTypes sets supported proof types, across all actor versions.
+// This should only be used for testing.
+func AddSupportedProofTypes(types ...abi.RegisteredSealProof) {
+ for _, t := range types {
+ if t >= abi.RegisteredSealProof_StackedDrg2KiBV1_1 {
+ panic("must specify v1 proof types only")
+ }
+ // Set for all miner versions.
+
+ {{range .versions}}
+ {{if (eq . 0)}}
+ miner{{.}}.SupportedProofTypes[t] = struct{}{}
+ {{else if (le . 4)}}
+ miner{{.}}.PreCommitSealProofTypesV0[t] = struct{}{}
+ miner{{.}}.PreCommitSealProofTypesV7[t] = struct{}{}
+ miner{{.}}.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
+ miner{{.}}.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
+ {{else}}
+ miner{{.}}.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
+ wpp, err := t.RegisteredWindowPoStProof()
+ if err != nil {
+ // Fine to panic, this is a test-only method
+ panic(err)
+ }
+
+ miner{{.}}.WindowPoStProofTypes[wpp] = struct{}{}
+ {{end}}
+ {{end}}
+ }
+}
+
+// SetPreCommitChallengeDelay sets the pre-commit challenge delay across all
+// actors versions. Use for testing.
+func SetPreCommitChallengeDelay(delay abi.ChainEpoch) {
+ // Set for all miner versions.
+ {{range .versions}}
+ miner{{.}}.PreCommitChallengeDelay = delay
+ {{end}}
+}
+
+// TODO: this function shouldn't really exist. Instead, the API should expose the precommit delay.
+func GetPreCommitChallengeDelay() abi.ChainEpoch {
+ return miner{{.latestVersion}}.PreCommitChallengeDelay
+}
+
+// SetConsensusMinerMinPower sets the minimum power of an individual miner must
+// meet for leader election, across all actor versions. This should only be used
+// for testing.
+func SetConsensusMinerMinPower(p abi.StoragePower) {
+ {{range .versions}}
+ {{if (eq . 0)}}
+ power{{.}}.ConsensusMinerMinPower = p
+ {{else if (eq . 2)}}
+ for _, policy := range builtin{{.}}.SealProofPolicies {
+ policy.ConsensusMinerMinPower = p
+ }
+ {{else}}
+ for _, policy := range builtin{{.}}.PoStProofPolicies {
+ policy.ConsensusMinerMinPower = p
+ }
+ {{end}}
+ {{end}}
+}
+
+// SetMinVerifiedDealSize sets the minimum size of a verified deal. This should
+// only be used for testing.
+func SetMinVerifiedDealSize(size abi.StoragePower) {
+ {{range .versions}}
+ verifreg{{.}}.MinVerifiedDealSize = size
+ {{end}}
+}
+
+func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) abi.ChainEpoch {
+ switch ver {
+ {{range .versions}}
+ case actors.Version{{.}}:
+ {{if (eq . 0)}}
+ return miner{{.}}.MaxSealDuration[t]
+ {{else}}
+ return miner{{.}}.MaxProveCommitDuration[t]
+ {{end}}
+ {{end}}
+ default:
+ panic("unsupported actors version")
+ }
+}
+
+// SetProviderCollateralSupplyTarget sets the percentage of normalized circulating
+// supply that must be covered by provider collateral in a deal. This should
+// only be used for testing.
+func SetProviderCollateralSupplyTarget(num, denom big.Int) {
+{{range .versions}}
+ {{if (ge . 2)}}
+ market{{.}}.ProviderCollateralSupplyTarget = builtin{{.}}.BigFrac{
+ Numerator: num,
+ Denominator: denom,
+ }
+ {{end}}
+{{end}}
+}
+
+func DealProviderCollateralBounds(
+ size abi.PaddedPieceSize, verified bool,
+ rawBytePower, qaPower, baselinePower abi.StoragePower,
+ circulatingFil abi.TokenAmount, nwVer network.Version,
+) (min, max abi.TokenAmount) {
+ switch actors.VersionForNetwork(nwVer) {
+ {{range .versions}}
+ case actors.Version{{.}}:
+ {{if (eq . 0)}}
+ return market{{.}}.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil, nwVer)
+ {{else}}
+ return market{{.}}.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
+ {{end}}
+ {{end}}
+ default:
+ panic("unsupported actors version")
+ }
+}
+
+func DealDurationBounds(pieceSize abi.PaddedPieceSize) (min, max abi.ChainEpoch) {
+ return market{{.latestVersion}}.DealDurationBounds(pieceSize)
+}
+
+// Sets the challenge window and scales the proving period to match (such that
+// there are always 48 challenge windows in a proving period).
+func SetWPoStChallengeWindow(period abi.ChainEpoch) {
+ {{range .versions}}
+ miner{{.}}.WPoStChallengeWindow = period
+ miner{{.}}.WPoStProvingPeriod = period * abi.ChainEpoch(miner{{.}}.WPoStPeriodDeadlines)
+ {{if (ge . 3)}}
+ // by default, this is 2x finality which is 30 periods.
+ // scale it if we're scaling the challenge period.
+ miner{{.}}.WPoStDisputeWindow = period * 30
+ {{end}}
+ {{end}}
+}
+
+func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch {
+ if nwVer <= network.Version3 {
+ return 10
+ }
+
+ // NOTE: if this ever changes, adjust it in a (*Miner).mineOne() logline as well
+ return ChainFinality
+}
+
+func GetMaxSectorExpirationExtension() abi.ChainEpoch {
+ return miner{{.latestVersion}}.MaxSectorExpirationExtension
+}
+
+func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, error) {
+ sectorsPerPart, err := builtin{{.latestVersion}}.PoStProofWindowPoStPartitionSectors(p)
+ if err != nil {
+ return 0, err
+ }
+ maxSectors := uint64(GetAddressedSectorsMax(nv))
+ return int(maxSectors / sectorsPerPart), nil
+}
+
+func GetDefaultSectorSize() abi.SectorSize {
+ // supported sector sizes are the same across versions.
+ szs := make([]abi.SectorSize, 0, len(miner{{.latestVersion}}.PreCommitSealProofTypesV8))
+ for spt := range miner{{.latestVersion}}.PreCommitSealProofTypesV8 {
+ ss, err := spt.SectorSize()
+ if err != nil {
+ panic(err)
+ }
+
+ szs = append(szs, ss)
+ }
+
+ sort.Slice(szs, func(i, j int) bool {
+ return szs[i] < szs[j]
+ })
+
+ return szs[0]
+}
+
+func GetDefaultAggregationProof() abi.RegisteredAggregationProof {
+ return abi.RegisteredAggregationProof_SnarkPackV1
+}
+
+func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) abi.ChainEpoch {
+ if nwVer <= network.Version10 {
+ return builtin4.SealProofPoliciesV0[proof].SectorMaxLifetime
+ }
+
+ return builtin{{.latestVersion}}.SealProofPoliciesV11[proof].SectorMaxLifetime
+}
+
+func GetAddressedSectorsMax(nwVer network.Version) int {
+ switch actors.VersionForNetwork(nwVer) {
+ {{range .versions}}
+ case actors.Version{{.}}:
+ return miner{{.}}.AddressedSectorsMax
+ {{end}}
+ default:
+ panic("unsupported network version")
+ }
+}
+
+func GetDeclarationsMax(nwVer network.Version) int {
+ switch actors.VersionForNetwork(nwVer) {
+ {{range .versions}}
+ case actors.Version{{.}}:
+ {{if (eq . 0)}}
+ // TODO: Should we instead panic here since the concept doesn't exist yet?
+ return miner{{.}}.AddressedPartitionsMax
+ {{else}}
+ return miner{{.}}.DeclarationsMax
+ {{end}}
+ {{end}}
+ default:
+ panic("unsupported network version")
+ }
+}
+
+func AggregateNetworkFee(nwVer network.Version, aggregateSize int, baseFee abi.TokenAmount) abi.TokenAmount {
+ switch actors.VersionForNetwork(nwVer) {
+ {{range .versions}}
+ case actors.Version{{.}}:
+ {{if (le . 4)}}
+ return big.Zero()
+ {{else}}
+ return miner{{.}}.AggregateNetworkFee(aggregateSize, baseFee)
+ {{end}}
+ {{end}}
+ default:
+ panic("unsupported network version")
+ }
+}
diff --git a/chain/actors/policy/policy_test.go b/chain/actors/policy/policy_test.go
index af600cc75cd..f40250fba8e 100644
--- a/chain/actors/policy/policy_test.go
+++ b/chain/actors/policy/policy_test.go
@@ -6,6 +6,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/network"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
paych0 "github.com/filecoin-project/specs-actors/actors/builtin/paych"
@@ -44,7 +45,7 @@ func TestSupportedProofTypes(t *testing.T) {
// Tests assumptions about policies being the same between actor versions.
func TestAssumptions(t *testing.T) {
- require.EqualValues(t, miner0.SupportedProofTypes, miner2.SupportedProofTypes)
+ require.EqualValues(t, miner0.SupportedProofTypes, miner2.PreCommitSealProofTypesV0)
require.Equal(t, miner0.PreCommitChallengeDelay, miner2.PreCommitChallengeDelay)
require.Equal(t, miner0.MaxSectorExpirationExtension, miner2.MaxSectorExpirationExtension)
require.Equal(t, miner0.ChainFinality, miner2.ChainFinality)
@@ -57,10 +58,10 @@ func TestAssumptions(t *testing.T) {
}
func TestPartitionSizes(t *testing.T) {
- for p := range abi.PoStSealProofTypes {
- sizeNew, err := builtin2.PoStProofWindowPoStPartitionSectors(p)
+ for _, p := range abi.SealProofInfos {
+ sizeNew, err := builtin2.PoStProofWindowPoStPartitionSectors(p.WindowPoStProof)
require.NoError(t, err)
- sizeOld, err := builtin0.PoStProofWindowPoStPartitionSectors(p)
+ sizeOld, err := builtin0.PoStProofWindowPoStPartitionSectors(p.WindowPoStProof)
if err != nil {
// new proof type.
continue
@@ -68,3 +69,12 @@ func TestPartitionSizes(t *testing.T) {
require.Equal(t, sizeOld, sizeNew)
}
}
+
+func TestPoStSize(t *testing.T) {
+ v12PoStSize, err := GetMaxPoStPartitions(network.Version12, abi.RegisteredPoStProof_StackedDrgWindow64GiBV1)
+ require.Equal(t, 4, v12PoStSize)
+ require.NoError(t, err)
+ v13PoStSize, err := GetMaxPoStPartitions(network.Version13, abi.RegisteredPoStProof_StackedDrgWindow64GiBV1)
+ require.NoError(t, err)
+ require.Equal(t, 10, v13PoStSize)
+}
diff --git a/chain/actors/version.go b/chain/actors/version.go
index fe16d521eb2..9710e62fa8f 100644
--- a/chain/actors/version.go
+++ b/chain/actors/version.go
@@ -8,9 +8,16 @@ import (
type Version int
+var LatestVersion = 5
+
+var Versions = []int{0, 2, 3, 4, LatestVersion}
+
const (
Version0 Version = 0
Version2 Version = 2
+ Version3 Version = 3
+ Version4 Version = 4
+ Version5 Version = 5
)
// Converts a network version into an actors adt version.
@@ -18,8 +25,14 @@ func VersionForNetwork(version network.Version) Version {
switch version {
case network.Version0, network.Version1, network.Version2, network.Version3:
return Version0
- case network.Version4, network.Version5, network.Version6:
+ case network.Version4, network.Version5, network.Version6, network.Version7, network.Version8, network.Version9:
return Version2
+ case network.Version10, network.Version11:
+ return Version3
+ case network.Version12:
+ return Version4
+ case network.Version13:
+ return Version5
default:
panic(fmt.Sprintf("unsupported network version %d", version))
}
diff --git a/chain/beacon/beacon.go b/chain/beacon/beacon.go
index 9543bec54b3..220057282fd 100644
--- a/chain/beacon/beacon.go
+++ b/chain/beacon/beacon.go
@@ -4,7 +4,7 @@ import (
"context"
"github.com/filecoin-project/go-state-types/abi"
- logging "github.com/ipfs/go-log"
+ logging "github.com/ipfs/go-log/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/build"
diff --git a/chain/beacon/drand/drand.go b/chain/beacon/drand/drand.go
index 4abc12d2902..e7f673d7f66 100644
--- a/chain/beacon/drand/drand.go
+++ b/chain/beacon/drand/drand.go
@@ -3,7 +3,6 @@ package drand
import (
"bytes"
"context"
- "sync"
"time"
dchain "github.com/drand/drand/chain"
@@ -13,10 +12,11 @@ import (
gclient "github.com/drand/drand/lp2p/client"
"github.com/drand/kyber"
kzap "github.com/go-kit/kit/log/zap"
+ lru "github.com/hashicorp/golang-lru"
"go.uber.org/zap/zapcore"
"golang.org/x/xerrors"
- logging "github.com/ipfs/go-log"
+ logging "github.com/ipfs/go-log/v2"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/filecoin-project/go-state-types/abi"
@@ -61,8 +61,7 @@ type DrandBeacon struct {
filGenTime uint64
filRoundTime uint64
- cacheLk sync.Mutex
- localCache map[uint64]types.BeaconEntry
+ localCache *lru.Cache
}
// DrandHTTPClient interface overrides the user agent used by drand
@@ -111,9 +110,14 @@ func NewDrandBeacon(genesisTs, interval uint64, ps *pubsub.PubSub, config dtypes
return nil, xerrors.Errorf("creating drand client")
}
+ lc, err := lru.New(1024)
+ if err != nil {
+ return nil, err
+ }
+
db := &DrandBeacon{
client: client,
- localCache: make(map[uint64]types.BeaconEntry),
+ localCache: lc,
}
db.pubkey = drandChain.PublicKey
@@ -156,19 +160,16 @@ func (db *DrandBeacon) Entry(ctx context.Context, round uint64) <-chan beacon.Re
return out
}
func (db *DrandBeacon) cacheValue(e types.BeaconEntry) {
- db.cacheLk.Lock()
- defer db.cacheLk.Unlock()
- db.localCache[e.Round] = e
+ db.localCache.Add(e.Round, e)
}
func (db *DrandBeacon) getCachedValue(round uint64) *types.BeaconEntry {
- db.cacheLk.Lock()
- defer db.cacheLk.Unlock()
- v, ok := db.localCache[round]
+ v, ok := db.localCache.Get(round)
if !ok {
return nil
}
- return &v
+ e, _ := v.(types.BeaconEntry)
+ return &e
}
func (db *DrandBeacon) VerifyEntry(curr types.BeaconEntry, prev types.BeaconEntry) error {
@@ -177,6 +178,9 @@ func (db *DrandBeacon) VerifyEntry(curr types.BeaconEntry, prev types.BeaconEntr
return nil
}
if be := db.getCachedValue(curr.Round); be != nil {
+ if !bytes.Equal(curr.Data, be.Data) {
+ return xerrors.New("invalid beacon value, does not match cached good value")
+ }
// return no error if the value is in the cache already
return nil
}
diff --git a/chain/checkpoint.go b/chain/checkpoint.go
index 8f99d73e43d..a3660a45ce4 100644
--- a/chain/checkpoint.go
+++ b/chain/checkpoint.go
@@ -1,81 +1,57 @@
package chain
import (
- "encoding/json"
+ "context"
"github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/lotus/node/modules/dtypes"
- "github.com/ipfs/go-datastore"
"golang.org/x/xerrors"
)
-var CheckpointKey = datastore.NewKey("/chain/checks")
-
-func loadCheckpoint(ds dtypes.MetadataDS) (types.TipSetKey, error) {
- haveChks, err := ds.Has(CheckpointKey)
- if err != nil {
- return types.EmptyTSK, err
- }
-
- if !haveChks {
- return types.EmptyTSK, nil
+func (syncer *Syncer) SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) error {
+ if tsk == types.EmptyTSK {
+ return xerrors.Errorf("called with empty tsk")
}
- tskBytes, err := ds.Get(CheckpointKey)
+ ts, err := syncer.ChainStore().LoadTipSet(tsk)
if err != nil {
- return types.EmptyTSK, err
+ tss, err := syncer.Exchange.GetBlocks(ctx, tsk, 1)
+ if err != nil {
+ return xerrors.Errorf("failed to fetch tipset: %w", err)
+ } else if len(tss) != 1 {
+ return xerrors.Errorf("expected 1 tipset, got %d", len(tss))
+ }
+ ts = tss[0]
}
- var tsk types.TipSetKey
- err = json.Unmarshal(tskBytes, &tsk)
- if err != nil {
- return types.EmptyTSK, err
+ if err := syncer.switchChain(ctx, ts); err != nil {
+ return xerrors.Errorf("failed to switch chain when syncing checkpoint: %w", err)
}
- return tsk, err
-}
-
-func (syncer *Syncer) SetCheckpoint(tsk types.TipSetKey) error {
- if tsk == types.EmptyTSK {
- return xerrors.Errorf("called with empty tsk")
+ if err := syncer.ChainStore().SetCheckpoint(ts); err != nil {
+ return xerrors.Errorf("failed to set the chain checkpoint: %w", err)
}
- syncer.checkptLk.Lock()
- defer syncer.checkptLk.Unlock()
-
- ts, err := syncer.ChainStore().LoadTipSet(tsk)
- if err != nil {
- return xerrors.Errorf("cannot find tipset: %w", err)
- }
+ return nil
+}
+func (syncer *Syncer) switchChain(ctx context.Context, ts *types.TipSet) error {
hts := syncer.ChainStore().GetHeaviestTipSet()
- anc, err := syncer.ChainStore().IsAncestorOf(ts, hts)
- if err != nil {
- return xerrors.Errorf("cannot determine whether checkpoint tipset is in main-chain: %w", err)
+ if hts.Equals(ts) {
+ return nil
}
- if !hts.Equals(ts) && !anc {
- return xerrors.Errorf("cannot mark tipset as checkpoint, since it isn't in the main-chain: %w", err)
+ if anc, err := syncer.store.IsAncestorOf(ts, hts); err == nil && anc {
+ return nil
}
- tskBytes, err := json.Marshal(tsk)
- if err != nil {
- return err
+ // Otherwise, sync the chain and set the head.
+ if err := syncer.collectChain(ctx, ts, hts, true); err != nil {
+ return xerrors.Errorf("failed to collect chain for checkpoint: %w", err)
}
- err = syncer.ds.Put(CheckpointKey, tskBytes)
- if err != nil {
- return err
+ if err := syncer.ChainStore().SetHead(ts); err != nil {
+ return xerrors.Errorf("failed to set the chain head: %w", err)
}
-
- syncer.checkpt = tsk
-
return nil
}
-
-func (syncer *Syncer) GetCheckpoint() types.TipSetKey {
- syncer.checkptLk.Lock()
- defer syncer.checkptLk.Unlock()
- return syncer.checkpt
-}
diff --git a/chain/events/events.go b/chain/events/events.go
index e35e91366c3..8511de9217b 100644
--- a/chain/events/events.go
+++ b/chain/events/events.go
@@ -20,8 +20,10 @@ import (
var log = logging.Logger("events")
// HeightHandler `curH`-`ts.Height` = `confidence`
-type HeightHandler func(ctx context.Context, ts *types.TipSet, curH abi.ChainEpoch) error
-type RevertHandler func(ctx context.Context, ts *types.TipSet) error
+type (
+ HeightHandler func(ctx context.Context, ts *types.TipSet, curH abi.ChainEpoch) error
+ RevertHandler func(ctx context.Context, ts *types.TipSet) error
+)
type heightHandler struct {
confidence int
@@ -31,33 +33,33 @@ type heightHandler struct {
revert RevertHandler
}
-type eventAPI interface {
+type EventAPI interface {
ChainNotify(context.Context) (<-chan []*api.HeadChange, error)
ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error)
ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error)
ChainHead(context.Context) (*types.TipSet, error)
- StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error)
+ StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error)
ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error)
StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) // optional / for CalledMsg
}
type Events struct {
- api eventAPI
+ api EventAPI
tsc *tipSetCache
lk sync.Mutex
- ready sync.WaitGroup
+ ready chan struct{}
readyOnce sync.Once
heightEvents
*hcEvents
-}
-func NewEvents(ctx context.Context, api eventAPI) *Events {
- gcConfidence := 2 * build.ForkLengthThreshold
+ observers []TipSetObserver
+}
+func NewEventsWithConfidence(ctx context.Context, api EventAPI, gcConfidence abi.ChainEpoch) *Events {
tsc := newTSCache(gcConfidence, api)
e := &Events{
@@ -75,20 +77,27 @@ func NewEvents(ctx context.Context, api eventAPI) *Events {
htHeights: map[abi.ChainEpoch][]uint64{},
},
- hcEvents: newHCEvents(ctx, api, tsc, uint64(gcConfidence)),
+ hcEvents: newHCEvents(ctx, api, tsc, uint64(gcConfidence)),
+ ready: make(chan struct{}),
+ observers: []TipSetObserver{},
}
- e.ready.Add(1)
-
go e.listenHeadChanges(ctx)
- e.ready.Wait()
-
- // TODO: cleanup/gc goroutine
+ // Wait for the first tipset to be seen or bail if shutting down
+ select {
+ case <-e.ready:
+ case <-ctx.Done():
+ }
return e
}
+func NewEvents(ctx context.Context, api EventAPI) *Events {
+ gcConfidence := 2 * build.ForkLengthThreshold
+ return NewEventsWithConfidence(ctx, api, gcConfidence)
+}
+
func (e *Events) listenHeadChanges(ctx context.Context) {
for {
if err := e.listenHeadChangesOnce(ctx); err != nil {
@@ -96,11 +105,13 @@ func (e *Events) listenHeadChanges(ctx context.Context) {
} else {
log.Warn("listenHeadChanges quit")
}
- if ctx.Err() != nil {
+ select {
+ case <-build.Clock.After(time.Second):
+ case <-ctx.Done():
log.Warnf("not restarting listenHeadChanges: context error: %s", ctx.Err())
return
}
- build.Clock.Sleep(time.Second)
+
log.Info("restarting listenHeadChanges")
}
}
@@ -111,13 +122,21 @@ func (e *Events) listenHeadChangesOnce(ctx context.Context) error {
notifs, err := e.api.ChainNotify(ctx)
if err != nil {
- // TODO: retry
+ // Retry is handled by caller
return xerrors.Errorf("listenHeadChanges ChainNotify call failed: %w", err)
}
- cur, ok := <-notifs // TODO: timeout?
- if !ok {
- return xerrors.Errorf("notification channel closed")
+ var cur []*api.HeadChange
+ var ok bool
+
+ // Wait for first tipset or bail
+ select {
+ case cur, ok = <-notifs:
+ if !ok {
+ return xerrors.Errorf("notification channel closed")
+ }
+ case <-ctx.Done():
+ return ctx.Err()
}
if len(cur) != 1 {
@@ -129,13 +148,13 @@ func (e *Events) listenHeadChangesOnce(ctx context.Context) error {
}
if err := e.tsc.add(cur[0].Val); err != nil {
- log.Warn("tsc.add: adding current tipset failed: %w", err)
+ log.Warnf("tsc.add: adding current tipset failed: %v", err)
}
e.readyOnce.Do(func() {
e.lastTs = cur[0].Val
-
- e.ready.Done()
+ // Signal that we have seen first tipset
+ close(e.ready)
})
for notif := range notifs {
@@ -151,7 +170,7 @@ func (e *Events) listenHeadChangesOnce(ctx context.Context) error {
}
}
- if err := e.headChange(rev, app); err != nil {
+ if err := e.headChange(ctx, rev, app); err != nil {
log.Warnf("headChange failed: %s", err)
}
@@ -164,7 +183,7 @@ func (e *Events) listenHeadChangesOnce(ctx context.Context) error {
return nil
}
-func (e *Events) headChange(rev, app []*types.TipSet) error {
+func (e *Events) headChange(ctx context.Context, rev, app []*types.TipSet) error {
if len(app) == 0 {
return xerrors.New("events.headChange expected at least one applied tipset")
}
@@ -176,5 +195,39 @@ func (e *Events) headChange(rev, app []*types.TipSet) error {
return err
}
+ if err := e.observeChanges(ctx, rev, app); err != nil {
+ return err
+ }
return e.processHeadChangeEvent(rev, app)
}
+
+// A TipSetObserver receives notifications of tipsets
+type TipSetObserver interface {
+ Apply(ctx context.Context, ts *types.TipSet) error
+ Revert(ctx context.Context, ts *types.TipSet) error
+}
+
+// TODO: add a confidence level so we can have observers with difference levels of confidence
+func (e *Events) Observe(obs TipSetObserver) error {
+ e.lk.Lock()
+ defer e.lk.Unlock()
+ e.observers = append(e.observers, obs)
+ return nil
+}
+
+// observeChanges expects caller to hold e.lk
+func (e *Events) observeChanges(ctx context.Context, rev, app []*types.TipSet) error {
+ for _, ts := range rev {
+ for _, o := range e.observers {
+ _ = o.Revert(ctx, ts)
+ }
+ }
+
+ for _, ts := range app {
+ for _, o := range e.observers {
+ _ = o.Apply(ctx, ts)
+ }
+ }
+
+ return nil
+}
diff --git a/chain/events/events_called.go b/chain/events/events_called.go
index 7532060937d..1f0b80169e1 100644
--- a/chain/events/events_called.go
+++ b/chain/events/events_called.go
@@ -5,6 +5,11 @@ import (
"math"
"sync"
+ "github.com/filecoin-project/lotus/api"
+ lru "github.com/hashicorp/golang-lru"
+
+ "github.com/filecoin-project/lotus/chain/stmgr"
+
"github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
@@ -66,7 +71,7 @@ type queuedEvent struct {
// Manages chain head change events, which may be forward (new tipset added to
// chain) or backward (chain branch discarded in favour of heavier branch)
type hcEvents struct {
- cs eventAPI
+ cs EventAPI
tsc *tipSetCache
ctx context.Context
gcConfidence uint64
@@ -93,7 +98,7 @@ type hcEvents struct {
watcherEvents
}
-func newHCEvents(ctx context.Context, cs eventAPI, tsc *tipSetCache, gcConfidence uint64) *hcEvents {
+func newHCEvents(ctx context.Context, cs EventAPI, tsc *tipSetCache, gcConfidence uint64) *hcEvents {
e := hcEvents{
ctx: ctx,
cs: cs,
@@ -142,8 +147,10 @@ func (e *hcEvents) processHeadChangeEvent(rev, app []*types.TipSet) error {
// Queue up calls until there have been enough blocks to reach
// confidence on the message calls
- for tid, data := range newCalls {
- e.queueForConfidence(tid, data, nil, ts)
+ for tid, calls := range newCalls {
+ for _, data := range calls {
+ e.queueForConfidence(tid, data, nil, ts)
+ }
}
for at := e.lastTs.Height(); at <= ts.Height(); at++ {
@@ -353,14 +360,14 @@ type headChangeAPI interface {
// watcherEvents watches for a state change
type watcherEvents struct {
ctx context.Context
- cs eventAPI
+ cs EventAPI
hcAPI headChangeAPI
lk sync.RWMutex
matchers map[triggerID]StateMatchFunc
}
-func newWatcherEvents(ctx context.Context, hcAPI headChangeAPI, cs eventAPI) watcherEvents {
+func newWatcherEvents(ctx context.Context, hcAPI headChangeAPI, cs EventAPI) watcherEvents {
return watcherEvents{
ctx: ctx,
cs: cs,
@@ -455,24 +462,30 @@ func (we *watcherEvents) StateChanged(check CheckFunc, scHnd StateChangeHandler,
// messageEvents watches for message calls to actors
type messageEvents struct {
ctx context.Context
- cs eventAPI
+ cs EventAPI
hcAPI headChangeAPI
lk sync.RWMutex
- matchers map[triggerID][]MsgMatchFunc
+ matchers map[triggerID]MsgMatchFunc
+
+ blockMsgLk sync.Mutex
+ blockMsgCache *lru.ARCCache
}
-func newMessageEvents(ctx context.Context, hcAPI headChangeAPI, cs eventAPI) messageEvents {
+func newMessageEvents(ctx context.Context, hcAPI headChangeAPI, cs EventAPI) messageEvents {
+ blsMsgCache, _ := lru.NewARC(500)
return messageEvents{
- ctx: ctx,
- cs: cs,
- hcAPI: hcAPI,
- matchers: map[triggerID][]MsgMatchFunc{},
+ ctx: ctx,
+ cs: cs,
+ hcAPI: hcAPI,
+ matchers: make(map[triggerID]MsgMatchFunc),
+ blockMsgLk: sync.Mutex{},
+ blockMsgCache: blsMsgCache,
}
}
// Check if there are any new actor calls
-func (me *messageEvents) checkNewCalls(ts *types.TipSet) (map[triggerID]eventData, error) {
+func (me *messageEvents) checkNewCalls(ts *types.TipSet) (map[triggerID][]eventData, error) {
pts, err := me.cs.ChainGetTipSet(me.ctx, ts.Parents()) // we actually care about messages in the parent tipset here
if err != nil {
log.Errorf("getting parent tipset in checkNewCalls: %s", err)
@@ -482,32 +495,23 @@ func (me *messageEvents) checkNewCalls(ts *types.TipSet) (map[triggerID]eventDat
me.lk.RLock()
defer me.lk.RUnlock()
- res := make(map[triggerID]eventData)
+ // For each message in the tipset
+ res := make(map[triggerID][]eventData)
me.messagesForTs(pts, func(msg *types.Message) {
// TODO: provide receipts
- for tid, matchFns := range me.matchers {
- var matched bool
- var once bool
- for _, matchFn := range matchFns {
- matchOne, ok, err := matchFn(msg)
- if err != nil {
- log.Errorf("event matcher failed: %s", err)
- continue
- }
- matched = ok
- once = matchOne
-
- if matched {
- break
- }
+ // Run each trigger's matcher against the message
+ for tid, matchFn := range me.matchers {
+ matched, err := matchFn(msg)
+ if err != nil {
+ log.Errorf("event matcher failed: %s", err)
+ continue
}
+ // If there was a match, include the message in the results for the
+ // trigger
if matched {
- res[tid] = msg
- if once {
- break
- }
+ res[tid] = append(res[tid], msg)
}
}
})
@@ -520,14 +524,21 @@ func (me *messageEvents) messagesForTs(ts *types.TipSet, consume func(*types.Mes
seen := map[cid.Cid]struct{}{}
for _, tsb := range ts.Blocks() {
-
- msgs, err := me.cs.ChainGetBlockMessages(context.TODO(), tsb.Cid())
- if err != nil {
- log.Errorf("messagesForTs MessagesForBlock failed (ts.H=%d, Bcid:%s, B.Mcid:%s): %s", ts.Height(), tsb.Cid(), tsb.Messages, err)
- // this is quite bad, but probably better than missing all the other updates
- continue
+ me.blockMsgLk.Lock()
+ msgsI, ok := me.blockMsgCache.Get(tsb.Cid())
+ var err error
+ if !ok {
+ msgsI, err = me.cs.ChainGetBlockMessages(context.TODO(), tsb.Cid())
+ if err != nil {
+ log.Errorf("messagesForTs MessagesForBlock failed (ts.H=%d, Bcid:%s, B.Mcid:%s): %s", ts.Height(), tsb.Cid(), tsb.Messages, err)
+ // this is quite bad, but probably better than missing all the other updates
+ me.blockMsgLk.Unlock()
+ continue
+ }
+ me.blockMsgCache.Add(tsb.Cid(), msgsI)
}
-
+ me.blockMsgLk.Unlock()
+ msgs := msgsI.(*api.BlockMessages)
for _, m := range msgs.BlsMessages {
_, ok := seen[m.Cid()]
if ok {
@@ -555,7 +566,7 @@ func (me *messageEvents) messagesForTs(ts *types.TipSet, consume func(*types.Mes
// `curH`-`ts.Height` = `confidence`
type MsgHandler func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error)
-type MsgMatchFunc func(msg *types.Message) (matchOnce bool, matched bool, err error)
+type MsgMatchFunc func(msg *types.Message) (matched bool, err error)
// Called registers a callback which is triggered when a specified method is
// called on an actor, or a timeout is reached.
@@ -592,12 +603,16 @@ func (me *messageEvents) Called(check CheckFunc, msgHnd MsgHandler, rev RevertHa
panic("expected msg")
}
- rec, err := me.cs.StateGetReceipt(me.ctx, msg.Cid(), ts.Key())
+ ml, err := me.cs.StateSearchMsg(me.ctx, ts.Key(), msg.Cid(), stmgr.LookbackNoLimit, true)
if err != nil {
return false, err
}
- return msgHnd(msg, rec, ts, height)
+ if ml == nil {
+ return msgHnd(msg, nil, ts, height)
+ }
+
+ return msgHnd(msg, &ml.Receipt, ts, height)
}
id, err := me.hcAPI.onHeadChanged(check, hnd, rev, confidence, timeout)
@@ -607,7 +622,7 @@ func (me *messageEvents) Called(check CheckFunc, msgHnd MsgHandler, rev RevertHa
me.lk.Lock()
defer me.lk.Unlock()
- me.matchers[id] = append(me.matchers[id], mf)
+ me.matchers[id] = mf
return nil
}
diff --git a/chain/events/events_height.go b/chain/events/events_height.go
index c8dd905d9b1..1fcff9e68f1 100644
--- a/chain/events/events_height.go
+++ b/chain/events/events_height.go
@@ -153,6 +153,7 @@ func (e *heightEvents) ChainAt(hnd HeightHandler, rev RevertHandler, confidence
best, err := e.tsc.best()
if err != nil {
+ e.lk.Unlock()
return xerrors.Errorf("error getting best tipset: %w", err)
}
@@ -177,6 +178,7 @@ func (e *heightEvents) ChainAt(hnd HeightHandler, rev RevertHandler, confidence
e.lk.Lock()
best, err = e.tsc.best()
if err != nil {
+ e.lk.Unlock()
return xerrors.Errorf("error getting best tipset: %w", err)
}
bestH = best.Height()
diff --git a/chain/events/events_test.go b/chain/events/events_test.go
index 0e4fd34b213..04f938055f1 100644
--- a/chain/events/events_test.go
+++ b/chain/events/events_test.go
@@ -6,6 +6,8 @@ import (
"sync"
"testing"
+ "gotest.tools/assert"
+
"github.com/ipfs/go-cid"
"github.com/multiformats/go-multihash"
"github.com/stretchr/testify/require"
@@ -44,25 +46,43 @@ type fakeCS struct {
tipsets map[types.TipSetKey]*types.TipSet
sub func(rev, app []*types.TipSet)
+
+ callNumberLk sync.Mutex
+ callNumber map[string]int
}
func (fcs *fakeCS) ChainHead(ctx context.Context) (*types.TipSet, error) {
+ fcs.callNumberLk.Lock()
+ defer fcs.callNumberLk.Unlock()
+ fcs.callNumber["ChainHead"] = fcs.callNumber["ChainHead"] + 1
panic("implement me")
}
func (fcs *fakeCS) ChainGetTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) {
+ fcs.callNumberLk.Lock()
+ defer fcs.callNumberLk.Unlock()
+ fcs.callNumber["ChainGetTipSet"] = fcs.callNumber["ChainGetTipSet"] + 1
return fcs.tipsets[key], nil
}
-func (fcs *fakeCS) StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) {
+func (fcs *fakeCS) StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) {
+ fcs.callNumberLk.Lock()
+ defer fcs.callNumberLk.Unlock()
+ fcs.callNumber["StateSearchMsg"] = fcs.callNumber["StateSearchMsg"] + 1
return nil, nil
}
func (fcs *fakeCS) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) {
+ fcs.callNumberLk.Lock()
+ defer fcs.callNumberLk.Unlock()
+ fcs.callNumber["StateGetActor"] = fcs.callNumber["StateGetActor"] + 1
panic("Not Implemented")
}
func (fcs *fakeCS) ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) {
+ fcs.callNumberLk.Lock()
+ defer fcs.callNumberLk.Unlock()
+ fcs.callNumber["ChainGetTipSetByHeight"] = fcs.callNumber["ChainGetTipSetByHeight"] + 1
panic("Not Implemented")
}
@@ -113,6 +133,10 @@ func (fcs *fakeCS) makeTs(t *testing.T, parents []cid.Cid, h abi.ChainEpoch, msg
}
func (fcs *fakeCS) ChainNotify(context.Context) (<-chan []*api.HeadChange, error) {
+ fcs.callNumberLk.Lock()
+ defer fcs.callNumberLk.Unlock()
+ fcs.callNumber["ChainNotify"] = fcs.callNumber["ChainNotify"] + 1
+
out := make(chan []*api.HeadChange, 1)
best, err := fcs.tsc.best()
if err != nil {
@@ -143,6 +167,9 @@ func (fcs *fakeCS) ChainNotify(context.Context) (<-chan []*api.HeadChange, error
}
func (fcs *fakeCS) ChainGetBlockMessages(ctx context.Context, blk cid.Cid) (*api.BlockMessages, error) {
+ fcs.callNumberLk.Lock()
+ defer fcs.callNumberLk.Unlock()
+ fcs.callNumber["ChainGetBlockMessages"] = fcs.callNumber["ChainGetBlockMessages"] + 1
messages, ok := fcs.blkMsgs[blk]
if !ok {
return &api.BlockMessages{}, nil
@@ -152,8 +179,8 @@ func (fcs *fakeCS) ChainGetBlockMessages(ctx context.Context, blk cid.Cid) (*api
if !ok {
return &api.BlockMessages{}, nil
}
- return &api.BlockMessages{BlsMessages: ms.bmsgs, SecpkMessages: ms.smsgs}, nil
+ return &api.BlockMessages{BlsMessages: ms.bmsgs, SecpkMessages: ms.smsgs}, nil
}
func (fcs *fakeCS) fakeMsgs(m fakeMsg) cid.Cid {
@@ -229,13 +256,14 @@ func (fcs *fakeCS) notifDone() {
fcs.sync.Unlock()
}
-var _ eventAPI = &fakeCS{}
+var _ EventAPI = &fakeCS{}
func TestAt(t *testing.T) {
fcs := &fakeCS{
- t: t,
- h: 1,
- tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ t: t,
+ h: 1,
+ tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@@ -298,9 +326,10 @@ func TestAt(t *testing.T) {
func TestAtDoubleTrigger(t *testing.T) {
fcs := &fakeCS{
- t: t,
- h: 1,
- tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ t: t,
+ h: 1,
+ tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@@ -340,9 +369,10 @@ func TestAtDoubleTrigger(t *testing.T) {
func TestAtNullTrigger(t *testing.T) {
fcs := &fakeCS{
- t: t,
- h: 1,
- tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ t: t,
+ h: 1,
+ tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@@ -374,9 +404,10 @@ func TestAtNullTrigger(t *testing.T) {
func TestAtNullConf(t *testing.T) {
fcs := &fakeCS{
- t: t,
- h: 1,
- tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ t: t,
+ h: 1,
+ tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@@ -413,9 +444,10 @@ func TestAtNullConf(t *testing.T) {
func TestAtStart(t *testing.T) {
fcs := &fakeCS{
- t: t,
- h: 1,
- tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ t: t,
+ h: 1,
+ tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@@ -447,9 +479,10 @@ func TestAtStart(t *testing.T) {
func TestAtStartConfidence(t *testing.T) {
fcs := &fakeCS{
- t: t,
- h: 1,
- tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ t: t,
+ h: 1,
+ tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@@ -477,9 +510,10 @@ func TestAtStartConfidence(t *testing.T) {
func TestAtChained(t *testing.T) {
fcs := &fakeCS{
- t: t,
- h: 1,
- tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ t: t,
+ h: 1,
+ tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@@ -511,9 +545,10 @@ func TestAtChained(t *testing.T) {
func TestAtChainedConfidence(t *testing.T) {
fcs := &fakeCS{
- t: t,
- h: 1,
- tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ t: t,
+ h: 1,
+ tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@@ -545,9 +580,10 @@ func TestAtChainedConfidence(t *testing.T) {
func TestAtChainedConfidenceNull(t *testing.T) {
fcs := &fakeCS{
- t: t,
- h: 1,
- tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ t: t,
+ h: 1,
+ tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@@ -572,9 +608,9 @@ func TestAtChainedConfidenceNull(t *testing.T) {
require.Equal(t, false, reverted)
}
-func matchAddrMethod(to address.Address, m abi.MethodNum) func(msg *types.Message) (matchOnce bool, matched bool, err error) {
- return func(msg *types.Message) (matchOnce bool, matched bool, err error) {
- return true, to == msg.To && m == msg.Method, nil
+func matchAddrMethod(to address.Address, m abi.MethodNum) func(msg *types.Message) (matched bool, err error) {
+ return func(msg *types.Message) (matched bool, err error) {
+ return to == msg.To && m == msg.Method, nil
}
}
@@ -583,9 +619,10 @@ func TestCalled(t *testing.T) {
t: t,
h: 1,
- msgs: map[cid.Cid]fakeMsg{},
- blkMsgs: map[cid.Cid]cid.Cid{},
- tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ msgs: map[cid.Cid]fakeMsg{},
+ blkMsgs: map[cid.Cid]cid.Cid{},
+ tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@@ -795,9 +832,10 @@ func TestCalledTimeout(t *testing.T) {
t: t,
h: 1,
- msgs: map[cid.Cid]fakeMsg{},
- blkMsgs: map[cid.Cid]cid.Cid{},
- tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ msgs: map[cid.Cid]fakeMsg{},
+ blkMsgs: map[cid.Cid]cid.Cid{},
+ tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@@ -835,9 +873,10 @@ func TestCalledTimeout(t *testing.T) {
t: t,
h: 1,
- msgs: map[cid.Cid]fakeMsg{},
- blkMsgs: map[cid.Cid]cid.Cid{},
- tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ msgs: map[cid.Cid]fakeMsg{},
+ blkMsgs: map[cid.Cid]cid.Cid{},
+ callNumber: map[string]int{},
+ tsc: newTSCache(2*build.ForkLengthThreshold, nil),
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@@ -869,9 +908,10 @@ func TestCalledOrder(t *testing.T) {
t: t,
h: 1,
- msgs: map[cid.Cid]fakeMsg{},
- blkMsgs: map[cid.Cid]cid.Cid{},
- tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ msgs: map[cid.Cid]fakeMsg{},
+ blkMsgs: map[cid.Cid]cid.Cid{},
+ tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@@ -932,9 +972,10 @@ func TestCalledNull(t *testing.T) {
t: t,
h: 1,
- msgs: map[cid.Cid]fakeMsg{},
- blkMsgs: map[cid.Cid]cid.Cid{},
- tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ msgs: map[cid.Cid]fakeMsg{},
+ blkMsgs: map[cid.Cid]cid.Cid{},
+ tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@@ -997,9 +1038,10 @@ func TestRemoveTriggersOnMessage(t *testing.T) {
t: t,
h: 1,
- msgs: map[cid.Cid]fakeMsg{},
- blkMsgs: map[cid.Cid]cid.Cid{},
- tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ msgs: map[cid.Cid]fakeMsg{},
+ blkMsgs: map[cid.Cid]cid.Cid{},
+ tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@@ -1087,9 +1129,10 @@ func TestStateChanged(t *testing.T) {
t: t,
h: 1,
- msgs: map[cid.Cid]fakeMsg{},
- blkMsgs: map[cid.Cid]cid.Cid{},
- tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ msgs: map[cid.Cid]fakeMsg{},
+ blkMsgs: map[cid.Cid]cid.Cid{},
+ tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@@ -1175,9 +1218,10 @@ func TestStateChangedRevert(t *testing.T) {
t: t,
h: 1,
- msgs: map[cid.Cid]fakeMsg{},
- blkMsgs: map[cid.Cid]cid.Cid{},
- tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ msgs: map[cid.Cid]fakeMsg{},
+ blkMsgs: map[cid.Cid]cid.Cid{},
+ tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@@ -1253,9 +1297,10 @@ func TestStateChangedTimeout(t *testing.T) {
t: t,
h: 1,
- msgs: map[cid.Cid]fakeMsg{},
- blkMsgs: map[cid.Cid]cid.Cid{},
- tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ msgs: map[cid.Cid]fakeMsg{},
+ blkMsgs: map[cid.Cid]cid.Cid{},
+ tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@@ -1293,9 +1338,10 @@ func TestStateChangedTimeout(t *testing.T) {
t: t,
h: 1,
- msgs: map[cid.Cid]fakeMsg{},
- blkMsgs: map[cid.Cid]cid.Cid{},
- tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ msgs: map[cid.Cid]fakeMsg{},
+ blkMsgs: map[cid.Cid]cid.Cid{},
+ callNumber: map[string]int{},
+ tsc: newTSCache(2*build.ForkLengthThreshold, nil),
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@@ -1323,3 +1369,84 @@ func TestStateChangedTimeout(t *testing.T) {
fcs.advance(0, 5, nil)
require.False(t, called)
}
+
+func TestCalledMultiplePerEpoch(t *testing.T) {
+ fcs := &fakeCS{
+ t: t,
+ h: 1,
+
+ msgs: map[cid.Cid]fakeMsg{},
+ blkMsgs: map[cid.Cid]cid.Cid{},
+ callNumber: map[string]int{},
+ tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ }
+ require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
+
+ events := NewEvents(context.Background(), fcs)
+
+ t0123, err := address.NewFromString("t0123")
+ require.NoError(t, err)
+
+ at := 0
+
+ err = events.Called(func(ts *types.TipSet) (d bool, m bool, e error) {
+ return false, true, nil
+ }, func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (bool, error) {
+ switch at {
+ case 0:
+ require.Equal(t, uint64(1), msg.Nonce)
+ require.Equal(t, abi.ChainEpoch(4), ts.Height())
+ case 1:
+ require.Equal(t, uint64(2), msg.Nonce)
+ require.Equal(t, abi.ChainEpoch(4), ts.Height())
+ default:
+ t.Fatal("apply should only get called twice, at: ", at)
+ }
+ at++
+ return true, nil
+ }, func(_ context.Context, ts *types.TipSet) error {
+ switch at {
+ case 2:
+ require.Equal(t, abi.ChainEpoch(4), ts.Height())
+ case 3:
+ require.Equal(t, abi.ChainEpoch(4), ts.Height())
+ default:
+ t.Fatal("revert should only get called twice, at: ", at)
+ }
+ at++
+ return nil
+ }, 3, 20, matchAddrMethod(t0123, 5))
+ require.NoError(t, err)
+
+ fcs.advance(0, 10, map[int]cid.Cid{
+ 1: fcs.fakeMsgs(fakeMsg{
+ bmsgs: []*types.Message{
+ {To: t0123, From: t0123, Method: 5, Nonce: 1},
+ {To: t0123, From: t0123, Method: 5, Nonce: 2},
+ },
+ }),
+ })
+
+ fcs.advance(9, 1, nil)
+}
+
+func TestCachedSameBlock(t *testing.T) {
+ fcs := &fakeCS{
+ t: t,
+ h: 1,
+
+ msgs: map[cid.Cid]fakeMsg{},
+ blkMsgs: map[cid.Cid]cid.Cid{},
+ callNumber: map[string]int{},
+ tsc: newTSCache(2*build.ForkLengthThreshold, nil),
+ }
+ require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
+
+ _ = NewEvents(context.Background(), fcs)
+
+ fcs.advance(0, 10, map[int]cid.Cid{})
+ assert.Assert(t, fcs.callNumber["ChainGetBlockMessages"] == 20, "expect call ChainGetBlockMessages %d but got ", 20, fcs.callNumber["ChainGetBlockMessages"])
+
+ fcs.advance(5, 10, map[int]cid.Cid{})
+ assert.Assert(t, fcs.callNumber["ChainGetBlockMessages"] == 30, "expect call ChainGetBlockMessages %d but got ", 30, fcs.callNumber["ChainGetBlockMessages"])
+}
diff --git a/chain/events/state/fastapi.go b/chain/events/state/fastapi.go
new file mode 100644
index 00000000000..9375d9d7846
--- /dev/null
+++ b/chain/events/state/fastapi.go
@@ -0,0 +1,34 @@
+package state
+
+import (
+ "context"
+
+ "github.com/filecoin-project/go-address"
+
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+type FastChainApiAPI interface {
+ ChainAPI
+
+ ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error)
+}
+
+type fastAPI struct {
+ FastChainApiAPI
+}
+
+func WrapFastAPI(api FastChainApiAPI) ChainAPI {
+ return &fastAPI{
+ api,
+ }
+}
+
+func (a *fastAPI) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) {
+ ts, err := a.FastChainApiAPI.ChainGetTipSet(ctx, tsk)
+ if err != nil {
+ return nil, err
+ }
+
+ return a.FastChainApiAPI.StateGetActor(ctx, actor, ts.Parents())
+}
diff --git a/chain/events/state/mock/api.go b/chain/events/state/mock/api.go
new file mode 100644
index 00000000000..2ed48dc39c3
--- /dev/null
+++ b/chain/events/state/mock/api.go
@@ -0,0 +1,69 @@
+package test
+
+import (
+ "context"
+ "sync"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/lotus/blockstore"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/ipfs/go-cid"
+ "golang.org/x/xerrors"
+)
+
+type MockAPI struct {
+ bs blockstore.Blockstore
+
+ lk sync.Mutex
+ ts map[types.TipSetKey]*types.Actor
+ stateGetActorCalled int
+}
+
+func NewMockAPI(bs blockstore.Blockstore) *MockAPI {
+ return &MockAPI{
+ bs: bs,
+ ts: make(map[types.TipSetKey]*types.Actor),
+ }
+}
+
+func (m *MockAPI) ChainHasObj(ctx context.Context, c cid.Cid) (bool, error) {
+ return m.bs.Has(c)
+}
+
+func (m *MockAPI) ChainReadObj(ctx context.Context, c cid.Cid) ([]byte, error) {
+ blk, err := m.bs.Get(c)
+ if err != nil {
+ return nil, xerrors.Errorf("blockstore get: %w", err)
+ }
+
+ return blk.RawData(), nil
+}
+
+func (m *MockAPI) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) {
+ m.lk.Lock()
+ defer m.lk.Unlock()
+
+ m.stateGetActorCalled++
+ return m.ts[tsk], nil
+}
+
+func (m *MockAPI) StateGetActorCallCount() int {
+ m.lk.Lock()
+ defer m.lk.Unlock()
+
+ return m.stateGetActorCalled
+}
+
+func (m *MockAPI) ResetCallCounts() {
+ m.lk.Lock()
+ defer m.lk.Unlock()
+
+ m.stateGetActorCalled = 0
+}
+
+func (m *MockAPI) SetActor(tsk types.TipSetKey, act *types.Actor) {
+ m.lk.Lock()
+ defer m.lk.Unlock()
+
+ m.ts[tsk] = act
+}
diff --git a/chain/events/state/mock/state.go b/chain/events/state/mock/state.go
new file mode 100644
index 00000000000..bac06b59fcb
--- /dev/null
+++ b/chain/events/state/mock/state.go
@@ -0,0 +1,32 @@
+package test
+
+import (
+ "context"
+ "testing"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
+ "github.com/filecoin-project/specs-actors/v2/actors/util/adt"
+ "github.com/stretchr/testify/require"
+)
+
+func CreateEmptyMarketState(t *testing.T, store adt.Store) *market.State {
+ emptyArrayCid, err := adt.MakeEmptyArray(store).Root()
+ require.NoError(t, err)
+ emptyMap, err := adt.MakeEmptyMap(store).Root()
+ require.NoError(t, err)
+ return market.ConstructState(emptyArrayCid, emptyMap, emptyMap)
+}
+
+func CreateDealAMT(ctx context.Context, t *testing.T, store adt.Store, deals map[abi.DealID]*market.DealState) cid.Cid {
+ root := adt.MakeEmptyArray(store)
+ for dealID, dealState := range deals {
+ err := root.Set(uint64(dealID), dealState)
+ require.NoError(t, err)
+ }
+ rootCid, err := root.Root()
+ require.NoError(t, err)
+ return rootCid
+}
diff --git a/chain/events/state/mock/tipset.go b/chain/events/state/mock/tipset.go
new file mode 100644
index 00000000000..39d42d6e54d
--- /dev/null
+++ b/chain/events/state/mock/tipset.go
@@ -0,0 +1,27 @@
+package test
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/crypto"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/ipfs/go-cid"
+)
+
+var dummyCid cid.Cid
+
+func init() {
+ dummyCid, _ = cid.Parse("bafkqaaa")
+}
+
+func MockTipset(minerAddr address.Address, timestamp uint64) (*types.TipSet, error) {
+ return types.NewTipSet([]*types.BlockHeader{{
+ Miner: minerAddr,
+ Height: 5,
+ ParentStateRoot: dummyCid,
+ Messages: dummyCid,
+ ParentMessageReceipts: dummyCid,
+ BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS},
+ BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS},
+ Timestamp: timestamp,
+ }})
+}
diff --git a/chain/events/state/predicates.go b/chain/events/state/predicates.go
index 99b8480dc52..33f49628978 100644
--- a/chain/events/state/predicates.go
+++ b/chain/events/state/predicates.go
@@ -1,18 +1,17 @@
package state
import (
- "bytes"
"context"
+ "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
cbor "github.com/ipfs/go-ipld-cbor"
- typegen "github.com/whyrusleeping/cbor-gen"
- "github.com/filecoin-project/lotus/api/apibstore"
+ "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/chain/actors/adt"
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
@@ -25,7 +24,7 @@ type UserData interface{}
// ChainAPI abstracts out calls made by this class to external APIs
type ChainAPI interface {
- apibstore.ChainIO
+ api.ChainIO
StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error)
}
@@ -38,7 +37,7 @@ type StatePredicates struct {
func NewStatePredicates(api ChainAPI) *StatePredicates {
return &StatePredicates{
api: api,
- cst: cbor.NewCborStore(apibstore.NewAPIBlockstore(api)),
+ cst: cbor.NewCborStore(blockstore.NewAPIBlockstore(api)),
}
}
@@ -419,179 +418,17 @@ type AddressPair struct {
PK address.Address
}
-type InitActorAddressChanges struct {
- Added []AddressPair
- Modified []AddressChange
- Removed []AddressPair
-}
-
-type AddressChange struct {
- From AddressPair
- To AddressPair
-}
-
type DiffInitActorStateFunc func(ctx context.Context, oldState init_.State, newState init_.State) (changed bool, user UserData, err error)
-func (i *InitActorAddressChanges) AsKey(key string) (abi.Keyer, error) {
- addr, err := address.NewFromBytes([]byte(key))
- if err != nil {
- return nil, err
- }
- return abi.AddrKey(addr), nil
-}
-
-func (i *InitActorAddressChanges) Add(key string, val *typegen.Deferred) error {
- pkAddr, err := address.NewFromBytes([]byte(key))
- if err != nil {
- return err
- }
- id := new(typegen.CborInt)
- if err := id.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
- return err
- }
- idAddr, err := address.NewIDAddress(uint64(*id))
- if err != nil {
- return err
- }
- i.Added = append(i.Added, AddressPair{
- ID: idAddr,
- PK: pkAddr,
- })
- return nil
-}
-
-func (i *InitActorAddressChanges) Modify(key string, from, to *typegen.Deferred) error {
- pkAddr, err := address.NewFromBytes([]byte(key))
- if err != nil {
- return err
- }
-
- fromID := new(typegen.CborInt)
- if err := fromID.UnmarshalCBOR(bytes.NewReader(from.Raw)); err != nil {
- return err
- }
- fromIDAddr, err := address.NewIDAddress(uint64(*fromID))
- if err != nil {
- return err
- }
-
- toID := new(typegen.CborInt)
- if err := toID.UnmarshalCBOR(bytes.NewReader(to.Raw)); err != nil {
- return err
- }
- toIDAddr, err := address.NewIDAddress(uint64(*toID))
- if err != nil {
- return err
- }
-
- i.Modified = append(i.Modified, AddressChange{
- From: AddressPair{
- ID: fromIDAddr,
- PK: pkAddr,
- },
- To: AddressPair{
- ID: toIDAddr,
- PK: pkAddr,
- },
- })
- return nil
-}
-
-func (i *InitActorAddressChanges) Remove(key string, val *typegen.Deferred) error {
- pkAddr, err := address.NewFromBytes([]byte(key))
- if err != nil {
- return err
- }
- id := new(typegen.CborInt)
- if err := id.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
- return err
- }
- idAddr, err := address.NewIDAddress(uint64(*id))
- if err != nil {
- return err
- }
- i.Removed = append(i.Removed, AddressPair{
- ID: idAddr,
- PK: pkAddr,
- })
- return nil
-}
-
func (sp *StatePredicates) OnAddressMapChange() DiffInitActorStateFunc {
return func(ctx context.Context, oldState, newState init_.State) (changed bool, user UserData, err error) {
- addressChanges := &InitActorAddressChanges{
- Added: []AddressPair{},
- Modified: []AddressChange{},
- Removed: []AddressPair{},
- }
-
- err = oldState.ForEachActor(func(oldId abi.ActorID, oldAddress address.Address) error {
- oldIdAddress, err := address.NewIDAddress(uint64(oldId))
- if err != nil {
- return err
- }
-
- newIdAddress, found, err := newState.ResolveAddress(oldAddress)
- if err != nil {
- return err
- }
-
- if !found {
- addressChanges.Removed = append(addressChanges.Removed, AddressPair{
- ID: oldIdAddress,
- PK: oldAddress,
- })
- }
-
- if oldIdAddress != newIdAddress {
- addressChanges.Modified = append(addressChanges.Modified, AddressChange{
- From: AddressPair{
- ID: oldIdAddress,
- PK: oldAddress,
- },
- To: AddressPair{
- ID: newIdAddress,
- PK: oldAddress,
- },
- })
- }
-
- return nil
- })
-
+ addressChanges, err := init_.DiffAddressMap(oldState, newState)
if err != nil {
return false, nil, err
}
-
- err = newState.ForEachActor(func(newId abi.ActorID, newAddress address.Address) error {
- newIdAddress, err := address.NewIDAddress(uint64(newId))
- if err != nil {
- return err
- }
-
- _, found, err := newState.ResolveAddress(newAddress)
- if err != nil {
- return err
- }
-
- if !found {
- addressChanges.Added = append(addressChanges.Added, AddressPair{
- ID: newIdAddress,
- PK: newAddress,
- })
- }
-
- return nil
- })
-
- if err != nil {
- return false, nil, err
- }
-
- if len(addressChanges.Added)+len(addressChanges.Removed)+len(addressChanges.Modified) == 0 {
+ if len(addressChanges.Added)+len(addressChanges.Modified)+len(addressChanges.Removed) == 0 {
return false, nil, nil
}
-
return true, addressChanges, nil
}
}
diff --git a/chain/events/state/predicates_test.go b/chain/events/state/predicates_test.go
index 9b393f6e4ba..8af3bb6a0b9 100644
--- a/chain/events/state/predicates_test.go
+++ b/chain/events/state/predicates_test.go
@@ -4,30 +4,28 @@ import (
"context"
"testing"
+ test "github.com/filecoin-project/lotus/chain/events/state/mock"
+
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/go-bitfield"
- "github.com/stretchr/testify/require"
- "golang.org/x/xerrors"
-
"github.com/ipfs/go-cid"
cbornode "github.com/ipfs/go-ipld-cbor"
+ "github.com/stretchr/testify/require"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
- "github.com/filecoin-project/go-state-types/crypto"
-
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt"
tutils "github.com/filecoin-project/specs-actors/v2/support/testing"
+ bstore "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/types"
- bstore "github.com/filecoin-project/lotus/lib/blockstore"
)
var dummyCid cid.Cid
@@ -36,42 +34,9 @@ func init() {
dummyCid, _ = cid.Parse("bafkqaaa")
}
-type mockAPI struct {
- ts map[types.TipSetKey]*types.Actor
- bs bstore.Blockstore
-}
-
-func newMockAPI(bs bstore.Blockstore) *mockAPI {
- return &mockAPI{
- bs: bs,
- ts: make(map[types.TipSetKey]*types.Actor),
- }
-}
-
-func (m mockAPI) ChainHasObj(ctx context.Context, c cid.Cid) (bool, error) {
- return m.bs.Has(c)
-}
-
-func (m mockAPI) ChainReadObj(ctx context.Context, c cid.Cid) ([]byte, error) {
- blk, err := m.bs.Get(c)
- if err != nil {
- return nil, xerrors.Errorf("blockstore get: %w", err)
- }
-
- return blk.RawData(), nil
-}
-
-func (m mockAPI) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) {
- return m.ts[tsk], nil
-}
-
-func (m mockAPI) setActor(tsk types.TipSetKey, act *types.Actor) {
- m.ts[tsk] = act
-}
-
func TestMarketPredicates(t *testing.T) {
ctx := context.Background()
- bs := bstore.NewTemporarySync()
+ bs := bstore.NewMemorySync()
store := adt2.WrapStore(ctx, cbornode.NewCborStore(bs))
oldDeal1 := &market2.DealState{
@@ -177,14 +142,14 @@ func TestMarketPredicates(t *testing.T) {
minerAddr, err := address.NewFromString("t00")
require.NoError(t, err)
- oldState, err := mockTipset(minerAddr, 1)
+ oldState, err := test.MockTipset(minerAddr, 1)
require.NoError(t, err)
- newState, err := mockTipset(minerAddr, 2)
+ newState, err := test.MockTipset(minerAddr, 2)
require.NoError(t, err)
- api := newMockAPI(bs)
- api.setActor(oldState.Key(), &types.Actor{Code: builtin2.StorageMarketActorCodeID, Head: oldStateC})
- api.setActor(newState.Key(), &types.Actor{Code: builtin2.StorageMarketActorCodeID, Head: newStateC})
+ api := test.NewMockAPI(bs)
+ api.SetActor(oldState.Key(), &types.Actor{Code: builtin2.StorageMarketActorCodeID, Head: oldStateC})
+ api.SetActor(newState.Key(), &types.Actor{Code: builtin2.StorageMarketActorCodeID, Head: newStateC})
t.Run("deal ID predicate", func(t *testing.T) {
preds := NewStatePredicates(api)
@@ -239,7 +204,7 @@ func TestMarketPredicates(t *testing.T) {
t.Fatal("No state change so this should not be called")
return false, nil, nil
})
- marketState0 := createEmptyMarketState(t, store)
+ marketState0 := test.CreateEmptyMarketState(t, store)
marketCid, err := store.Put(ctx, marketState0)
require.NoError(t, err)
marketState, err := market.Load(store, &types.Actor{
@@ -352,7 +317,7 @@ func TestMarketPredicates(t *testing.T) {
t.Fatal("No state change so this should not be called")
return false, nil, nil
})
- marketState0 := createEmptyMarketState(t, store)
+ marketState0 := test.CreateEmptyMarketState(t, store)
marketCid, err := store.Put(ctx, marketState0)
require.NoError(t, err)
marketState, err := market.Load(store, &types.Actor{
@@ -369,7 +334,7 @@ func TestMarketPredicates(t *testing.T) {
func TestMinerSectorChange(t *testing.T) {
ctx := context.Background()
- bs := bstore.NewTemporarySync()
+ bs := bstore.NewMemorySync()
store := adt2.WrapStore(ctx, cbornode.NewCborStore(bs))
nextID := uint64(0)
@@ -394,14 +359,14 @@ func TestMinerSectorChange(t *testing.T) {
newMinerC := createMinerState(ctx, t, store, owner, worker, []miner.SectorOnChainInfo{si1Ext, si2, si3})
minerAddr := nextIDAddrF()
- oldState, err := mockTipset(minerAddr, 1)
+ oldState, err := test.MockTipset(minerAddr, 1)
require.NoError(t, err)
- newState, err := mockTipset(minerAddr, 2)
+ newState, err := test.MockTipset(minerAddr, 2)
require.NoError(t, err)
- api := newMockAPI(bs)
- api.setActor(oldState.Key(), &types.Actor{Head: oldMinerC, Code: builtin2.StorageMinerActorCodeID})
- api.setActor(newState.Key(), &types.Actor{Head: newMinerC, Code: builtin2.StorageMinerActorCodeID})
+ api := test.NewMockAPI(bs)
+ api.SetActor(oldState.Key(), &types.Actor{Head: oldMinerC, Code: builtin2.StorageMinerActorCodeID})
+ api.SetActor(newState.Key(), &types.Actor{Head: newMinerC, Code: builtin2.StorageMinerActorCodeID})
preds := NewStatePredicates(api)
@@ -449,29 +414,16 @@ func TestMinerSectorChange(t *testing.T) {
require.Equal(t, si1Ext, sectorChanges.Extended[0].From)
}
-func mockTipset(minerAddr address.Address, timestamp uint64) (*types.TipSet, error) {
- return types.NewTipSet([]*types.BlockHeader{{
- Miner: minerAddr,
- Height: 5,
- ParentStateRoot: dummyCid,
- Messages: dummyCid,
- ParentMessageReceipts: dummyCid,
- BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS},
- BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS},
- Timestamp: timestamp,
- }})
-}
-
type balance struct {
available abi.TokenAmount
locked abi.TokenAmount
}
func createMarketState(ctx context.Context, t *testing.T, store adt2.Store, deals map[abi.DealID]*market2.DealState, props map[abi.DealID]*market2.DealProposal, balances map[address.Address]balance) cid.Cid {
- dealRootCid := createDealAMT(ctx, t, store, deals)
+ dealRootCid := test.CreateDealAMT(ctx, t, store, deals)
propRootCid := createProposalAMT(ctx, t, store, props)
balancesCids := createBalanceTable(ctx, t, store, balances)
- state := createEmptyMarketState(t, store)
+ state := test.CreateEmptyMarketState(t, store)
state.States = dealRootCid
state.Proposals = propRootCid
state.EscrowTable = balancesCids[0]
@@ -482,25 +434,6 @@ func createMarketState(ctx context.Context, t *testing.T, store adt2.Store, deal
return stateC
}
-func createEmptyMarketState(t *testing.T, store adt2.Store) *market2.State {
- emptyArrayCid, err := adt2.MakeEmptyArray(store).Root()
- require.NoError(t, err)
- emptyMap, err := adt2.MakeEmptyMap(store).Root()
- require.NoError(t, err)
- return market2.ConstructState(emptyArrayCid, emptyMap, emptyMap)
-}
-
-func createDealAMT(ctx context.Context, t *testing.T, store adt2.Store, deals map[abi.DealID]*market2.DealState) cid.Cid {
- root := adt2.MakeEmptyArray(store)
- for dealID, dealState := range deals {
- err := root.Set(uint64(dealID), dealState)
- require.NoError(t, err)
- }
- rootCid, err := root.Root()
- require.NoError(t, err)
- return rootCid
-}
-
func createProposalAMT(ctx context.Context, t *testing.T, store adt2.Store, props map[abi.DealID]*market2.DealProposal) cid.Cid {
root := adt2.MakeEmptyArray(store)
for dealID, prop := range props {
diff --git a/chain/events/utils.go b/chain/events/utils.go
index e50dbc6feeb..91ea0cd7a07 100644
--- a/chain/events/utils.go
+++ b/chain/events/utils.go
@@ -3,6 +3,8 @@ package events
import (
"context"
+ "github.com/filecoin-project/lotus/chain/stmgr"
+
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/chain/types"
@@ -22,23 +24,27 @@ func (me *messageEvents) CheckMsg(ctx context.Context, smsg types.ChainMsg, hnd
return false, true, nil
}
- rec, err := me.cs.StateGetReceipt(ctx, smsg.VMMessage().Cid(), ts.Key())
+ ml, err := me.cs.StateSearchMsg(me.ctx, ts.Key(), msg.Cid(), stmgr.LookbackNoLimit, true)
if err != nil {
return false, true, xerrors.Errorf("getting receipt in CheckMsg: %w", err)
}
- more, err = hnd(msg, rec, ts, ts.Height())
+ if ml == nil {
+ more, err = hnd(msg, nil, ts, ts.Height())
+ } else {
+ more, err = hnd(msg, &ml.Receipt, ts, ts.Height())
+ }
return true, more, err
}
}
func (me *messageEvents) MatchMsg(inmsg *types.Message) MsgMatchFunc {
- return func(msg *types.Message) (matchOnce bool, matched bool, err error) {
+ return func(msg *types.Message) (matched bool, err error) {
if msg.From == inmsg.From && msg.Nonce == inmsg.Nonce && !inmsg.Equals(msg) {
- return true, false, xerrors.Errorf("matching msg %s from %s, nonce %d: got duplicate origin/nonce msg %d", inmsg.Cid(), inmsg.From, inmsg.Nonce, msg.Nonce)
+ return false, xerrors.Errorf("matching msg %s from %s, nonce %d: got duplicate origin/nonce msg %d", inmsg.Cid(), inmsg.From, inmsg.Nonce, msg.Nonce)
}
- return true, inmsg.Equals(msg), nil
+ return inmsg.Equals(msg), nil
}
}
diff --git a/chain/exchange/cbor_gen.go b/chain/exchange/cbor_gen.go
index 29b2580813a..da5f7cbe25b 100644
--- a/chain/exchange/cbor_gen.go
+++ b/chain/exchange/cbor_gen.go
@@ -5,6 +5,7 @@ package exchange
import (
"fmt"
"io"
+ "sort"
types "github.com/filecoin-project/lotus/chain/types"
cid "github.com/ipfs/go-cid"
@@ -13,6 +14,8 @@ import (
)
var _ = xerrors.Errorf
+var _ = cid.Undef
+var _ = sort.Sort
var lengthBufRequest = []byte{131}
diff --git a/chain/exchange/client.go b/chain/exchange/client.go
index cb030bcf785..fa9ed2974cd 100644
--- a/chain/exchange/client.go
+++ b/chain/exchange/client.go
@@ -7,7 +7,6 @@ import (
"math/rand"
"time"
- "github.com/libp2p/go-libp2p-core/helpers"
"github.com/libp2p/go-libp2p-core/host"
"github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
@@ -412,11 +411,7 @@ func (c *client) sendRequestToPeer(ctx context.Context, peer peer.ID, req *Reque
return nil, xerrors.Errorf("failed to open stream to peer: %w", err)
}
- defer func() {
- // Note: this will become just stream.Close once we've completed the go-libp2p migration to
- // go-libp2p-core 0.7.0
- go helpers.FullClose(stream) //nolint:errcheck
- }()
+ defer stream.Close() //nolint:errcheck
// Write request.
_ = stream.SetWriteDeadline(time.Now().Add(WriteReqDeadline))
diff --git a/chain/exchange/peer_tracker.go b/chain/exchange/peer_tracker.go
index 902baadcee0..835a5b8a479 100644
--- a/chain/exchange/peer_tracker.go
+++ b/chain/exchange/peer_tracker.go
@@ -38,20 +38,26 @@ func newPeerTracker(lc fx.Lifecycle, h host.Host, pmgr *peermgr.PeerMgr) *bsPeer
pmgr: pmgr,
}
- sub, err := h.EventBus().Subscribe(new(peermgr.NewFilPeer))
+ evtSub, err := h.EventBus().Subscribe(new(peermgr.FilPeerEvt))
if err != nil {
panic(err)
}
go func() {
- for newPeer := range sub.Out() {
- bsPt.addPeer(newPeer.(peermgr.NewFilPeer).Id)
+ for evt := range evtSub.Out() {
+ pEvt := evt.(peermgr.FilPeerEvt)
+ switch pEvt.Type {
+ case peermgr.AddFilPeerEvt:
+ bsPt.addPeer(pEvt.ID)
+ case peermgr.RemoveFilPeerEvt:
+ bsPt.removePeer(pEvt.ID)
+ }
}
}()
lc.Append(fx.Hook{
OnStop: func(ctx context.Context) error {
- return sub.Close()
+ return evtSub.Close()
},
})
diff --git a/chain/exchange/protocol.go b/chain/exchange/protocol.go
index 2114793359f..d0977e54c79 100644
--- a/chain/exchange/protocol.go
+++ b/chain/exchange/protocol.go
@@ -7,7 +7,7 @@ import (
"github.com/filecoin-project/lotus/chain/store"
"github.com/ipfs/go-cid"
- logging "github.com/ipfs/go-log"
+ logging "github.com/ipfs/go-log/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/chain/types"
diff --git a/chain/exchange/server.go b/chain/exchange/server.go
index dcdb5b3a5bf..7c1624e579a 100644
--- a/chain/exchange/server.go
+++ b/chain/exchange/server.go
@@ -15,7 +15,6 @@ import (
"github.com/filecoin-project/lotus/chain/types"
"github.com/ipfs/go-cid"
- "github.com/libp2p/go-libp2p-core/helpers"
inet "github.com/libp2p/go-libp2p-core/network"
)
@@ -40,16 +39,14 @@ func (s *server) HandleStream(stream inet.Stream) {
ctx, span := trace.StartSpan(context.Background(), "chainxchg.HandleStream")
defer span.End()
- // Note: this will become just stream.Close once we've completed the go-libp2p migration to
- // go-libp2p-core 0.7.0
- defer helpers.FullClose(stream) //nolint:errcheck
+ defer stream.Close() //nolint:errcheck
var req Request
if err := cborutil.ReadCborRPC(bufio.NewReader(stream), &req); err != nil {
log.Warnf("failed to read block sync request: %s", err)
return
}
- log.Infow("block sync request",
+ log.Debugw("block sync request",
"start", req.Head, "len", req.Length)
resp, err := s.processRequest(ctx, &req)
@@ -59,7 +56,11 @@ func (s *server) HandleStream(stream inet.Stream) {
}
_ = stream.SetDeadline(time.Now().Add(WriteResDeadline))
- if err := cborutil.WriteCborRPC(stream, resp); err != nil {
+ buffered := bufio.NewWriter(stream)
+ if err = cborutil.WriteCborRPC(buffered, resp); err == nil {
+ err = buffered.Flush()
+ }
+ if err != nil {
_ = stream.SetDeadline(time.Time{})
log.Warnw("failed to write back response for handle stream",
"err", err, "peer", stream.Conn().RemotePeer())
diff --git a/chain/gen/gen.go b/chain/gen/gen.go
index d56f285a019..424ee6edcb6 100644
--- a/chain/gen/gen.go
+++ b/chain/gen/gen.go
@@ -4,16 +4,18 @@ import (
"bytes"
"context"
"fmt"
+ "io"
"io/ioutil"
"sync/atomic"
"time"
+ "github.com/filecoin-project/go-state-types/network"
+
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/google/uuid"
- block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-blockservice"
"github.com/ipfs/go-cid"
offline "github.com/ipfs/go-ipfs-exchange-offline"
@@ -24,9 +26,10 @@ import (
"go.opencensus.io/trace"
"golang.org/x/xerrors"
- proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
+ proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/beacon"
@@ -40,7 +43,6 @@ import (
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/genesis"
"github.com/filecoin-project/lotus/journal"
- "github.com/filecoin-project/lotus/lib/blockstore"
"github.com/filecoin-project/lotus/lib/sigs"
"github.com/filecoin-project/lotus/node/repo"
)
@@ -50,7 +52,7 @@ const msgsPerBlock = 20
//nolint:deadcode,varcheck
var log = logging.Logger("gen")
-var ValidWpostForTesting = []proof2.PoStProof{{
+var ValidWpostForTesting = []proof5.PoStProof{{
ProofBytes: []byte("valid proof"),
}}
@@ -74,9 +76,10 @@ type ChainGen struct {
w *wallet.LocalWallet
- eppProvs map[address.Address]WinningPoStProver
- Miners []address.Address
- receivers []address.Address
+ eppProvs map[address.Address]WinningPoStProver
+ Miners []address.Address
+ receivers []address.Address
+ // a SecP address
banker address.Address
bankerNonce uint64
@@ -84,19 +87,6 @@ type ChainGen struct {
lr repo.LockedRepo
}
-type mybs struct {
- blockstore.Blockstore
-}
-
-func (m mybs) Get(c cid.Cid) (block.Block, error) {
- b, err := m.Blockstore.Get(c)
- if err != nil {
- return nil, err
- }
-
- return b, nil
-}
-
var rootkeyMultisig = genesis.MultisigMeta{
Signers: []address.Address{remAccTestKey},
Threshold: 1,
@@ -122,7 +112,7 @@ var DefaultRemainderAccountActor = genesis.Actor{
Meta: remAccMeta.ActorMeta(),
}
-func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
+func NewGeneratorWithSectorsAndUpgradeSchedule(numSectors int, us stmgr.UpgradeSchedule) (*ChainGen, error) {
j := journal.NilJournal()
// TODO: we really shouldn't modify a global variable here.
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
@@ -133,17 +123,23 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
return nil, xerrors.Errorf("taking mem-repo lock failed: %w", err)
}
- ds, err := lr.Datastore("/metadata")
+ ds, err := lr.Datastore(context.TODO(), "/metadata")
if err != nil {
return nil, xerrors.Errorf("failed to get metadata datastore: %w", err)
}
- bds, err := lr.Datastore("/chain")
+ bs, err := lr.Blockstore(context.TODO(), repo.UniversalBlockstore)
if err != nil {
- return nil, xerrors.Errorf("failed to get blocks datastore: %w", err)
+ return nil, err
}
- bs := mybs{blockstore.NewBlockstore(bds)}
+ defer func() {
+ if c, ok := bs.(io.Closer); ok {
+ if err := c.Close(); err != nil {
+ log.Warnf("failed to close blockstore: %s", err)
+ }
+ }
+ }()
ks, err := lr.KeyStore()
if err != nil {
@@ -204,6 +200,7 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
sys := vm.Syscalls(&genFakeVerifier{})
tpl := genesis.Template{
+ NetworkVersion: network.Version0,
Accounts: []genesis.Actor{
{
Type: genesis.TAccount,
@@ -236,7 +233,7 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
return nil, xerrors.Errorf("make genesis block failed: %w", err)
}
- cs := store.NewChainStore(bs, ds, sys, j)
+ cs := store.NewChainStore(bs, bs, ds, sys, j)
genfb := &types.FullBlock{Header: genb.Genesis}
gents := store.NewFullTipSet([]*types.FullBlock{genfb})
@@ -250,7 +247,10 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
mgen[genesis2.MinerAddress(uint64(i))] = &wppProvider{}
}
- sm := stmgr.NewStateManager(cs)
+ sm, err := stmgr.NewStateManagerWithUpgradeSchedule(cs, us)
+ if err != nil {
+ return nil, xerrors.Errorf("initing stmgr: %w", err)
+ }
miners := []address.Address{maddr1, maddr2}
@@ -288,6 +288,14 @@ func NewGenerator() (*ChainGen, error) {
return NewGeneratorWithSectors(1)
}
+func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
+ return NewGeneratorWithSectorsAndUpgradeSchedule(numSectors, stmgr.DefaultUpgradeSchedule())
+}
+
+func NewGeneratorWithUpgradeSchedule(us stmgr.UpgradeSchedule) (*ChainGen, error) {
+ return NewGeneratorWithSectorsAndUpgradeSchedule(1, us)
+}
+
func (cg *ChainGen) StateManager() *stmgr.StateManager {
return cg.sm
}
@@ -338,14 +346,8 @@ func (cg *ChainGen) nextBlockProof(ctx context.Context, pts *types.TipSet, m add
return nil, nil, nil, xerrors.Errorf("get miner base info: %w", err)
}
- prev := mbi.PrevBeaconEntry
-
- entries, err := beacon.BeaconEntriesForBlock(ctx, cg.beacon, round, pts.Height(), prev)
- if err != nil {
- return nil, nil, nil, xerrors.Errorf("get beacon entries for block: %w", err)
- }
-
- rbase := prev
+ entries := mbi.BeaconEntries
+ rbase := mbi.PrevBeaconEntry
if len(entries) > 0 {
rbase = entries[len(entries)-1]
}
@@ -396,7 +398,7 @@ type MinedTipSet struct {
}
func (cg *ChainGen) NextTipSet() (*MinedTipSet, error) {
- mts, err := cg.NextTipSetFromMiners(cg.CurTipset.TipSet(), cg.Miners)
+ mts, err := cg.NextTipSetFromMiners(cg.CurTipset.TipSet(), cg.Miners, 0)
if err != nil {
return nil, err
}
@@ -409,7 +411,7 @@ func (cg *ChainGen) SetWinningPoStProver(m address.Address, wpp WinningPoStProve
cg.eppProvs[m] = wpp
}
-func (cg *ChainGen) NextTipSetFromMiners(base *types.TipSet, miners []address.Address) (*MinedTipSet, error) {
+func (cg *ChainGen) NextTipSetFromMiners(base *types.TipSet, miners []address.Address, nulls abi.ChainEpoch) (*MinedTipSet, error) {
ms, err := cg.GetMessages(cg)
if err != nil {
return nil, xerrors.Errorf("get random messages: %w", err)
@@ -420,21 +422,23 @@ func (cg *ChainGen) NextTipSetFromMiners(base *types.TipSet, miners []address.Ad
msgs[i] = ms
}
- fts, err := cg.NextTipSetFromMinersWithMessages(base, miners, msgs)
+ fts, err := cg.NextTipSetFromMinersWithMessagesAndNulls(base, miners, msgs, nulls)
if err != nil {
return nil, err
}
+ cg.CurTipset = fts
+
return &MinedTipSet{
TipSet: fts,
Messages: ms,
}, nil
}
-func (cg *ChainGen) NextTipSetFromMinersWithMessages(base *types.TipSet, miners []address.Address, msgs [][]*types.SignedMessage) (*store.FullTipSet, error) {
+func (cg *ChainGen) NextTipSetFromMinersWithMessagesAndNulls(base *types.TipSet, miners []address.Address, msgs [][]*types.SignedMessage, nulls abi.ChainEpoch) (*store.FullTipSet, error) {
var blks []*types.FullBlock
- for round := base.Height() + 1; len(blks) == 0; round++ {
+ for round := base.Height() + nulls + 1; len(blks) == 0; round++ {
for mi, m := range miners {
bvals, et, ticket, err := cg.nextBlockProof(context.TODO(), base, m, round)
if err != nil {
@@ -462,12 +466,19 @@ func (cg *ChainGen) NextTipSetFromMinersWithMessages(base *types.TipSet, miners
}
}
- return store.NewFullTipSet(blks), nil
+ fts := store.NewFullTipSet(blks)
+ if err := cg.cs.PutTipSet(context.TODO(), fts.TipSet()); err != nil {
+ return nil, err
+ }
+
+ cg.CurTipset = fts
+
+ return fts, nil
}
func (cg *ChainGen) makeBlock(parents *types.TipSet, m address.Address, vrfticket *types.Ticket,
eticket *types.ElectionProof, bvals []types.BeaconEntry, height abi.ChainEpoch,
- wpost []proof2.PoStProof, msgs []*types.SignedMessage) (*types.FullBlock, error) {
+ wpost []proof5.PoStProof, msgs []*types.SignedMessage) (*types.FullBlock, error) {
var ts uint64
if cg.Timestamper != nil {
@@ -581,7 +592,11 @@ func (mca mca) ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipS
return nil, xerrors.Errorf("loading tipset key: %w", err)
}
- return mca.sm.ChainStore().GetChainRandomness(ctx, pts.Cids(), personalization, randEpoch, entropy)
+ if randEpoch > build.UpgradeHyperdriveHeight {
+ return mca.sm.ChainStore().GetChainRandomnessLookingForward(ctx, pts.Cids(), personalization, randEpoch, entropy)
+ }
+
+ return mca.sm.ChainStore().GetChainRandomnessLookingBack(ctx, pts.Cids(), personalization, randEpoch, entropy)
}
func (mca mca) ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) {
@@ -590,7 +605,11 @@ func (mca mca) ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSe
return nil, xerrors.Errorf("loading tipset key: %w", err)
}
- return mca.sm.ChainStore().GetBeaconRandomness(ctx, pts.Cids(), personalization, randEpoch, entropy)
+ if randEpoch > build.UpgradeHyperdriveHeight {
+ return mca.sm.ChainStore().GetBeaconRandomnessLookingForward(ctx, pts.Cids(), personalization, randEpoch, entropy)
+ }
+
+ return mca.sm.ChainStore().GetBeaconRandomnessLookingBack(ctx, pts.Cids(), personalization, randEpoch, entropy)
}
func (mca mca) MinerGetBaseInfo(ctx context.Context, maddr address.Address, epoch abi.ChainEpoch, tsk types.TipSetKey) (*api.MiningBaseInfo, error) {
@@ -605,7 +624,7 @@ func (mca mca) WalletSign(ctx context.Context, a address.Address, v []byte) (*cr
type WinningPoStProver interface {
GenerateCandidates(context.Context, abi.PoStRandomness, uint64) ([]uint64, error)
- ComputeProof(context.Context, []proof2.SectorInfo, abi.PoStRandomness) ([]proof2.PoStProof, error)
+ ComputeProof(context.Context, []proof5.SectorInfo, abi.PoStRandomness) ([]proof5.PoStProof, error)
}
type wppProvider struct{}
@@ -614,7 +633,7 @@ func (wpp *wppProvider) GenerateCandidates(ctx context.Context, _ abi.PoStRandom
return []uint64{0}, nil
}
-func (wpp *wppProvider) ComputeProof(context.Context, []proof2.SectorInfo, abi.PoStRandomness) ([]proof2.PoStProof, error) {
+func (wpp *wppProvider) ComputeProof(context.Context, []proof5.SectorInfo, abi.PoStRandomness) ([]proof5.PoStProof, error) {
return ValidWpostForTesting, nil
}
@@ -681,15 +700,19 @@ type genFakeVerifier struct{}
var _ ffiwrapper.Verifier = (*genFakeVerifier)(nil)
-func (m genFakeVerifier) VerifySeal(svi proof2.SealVerifyInfo) (bool, error) {
+func (m genFakeVerifier) VerifySeal(svi proof5.SealVerifyInfo) (bool, error) {
return true, nil
}
-func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) {
+func (m genFakeVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) {
+ panic("not supported")
+}
+
+func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) {
panic("not supported")
}
-func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStVerifyInfo) (bool, error) {
+func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) {
panic("not supported")
}
diff --git a/chain/gen/genesis/f00_system.go b/chain/gen/genesis/f00_system.go
new file mode 100644
index 00000000000..4fde2710745
--- /dev/null
+++ b/chain/gen/genesis/f00_system.go
@@ -0,0 +1,42 @@
+package genesis
+
+import (
+ "context"
+
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/system"
+
+ cbor "github.com/ipfs/go-ipld-cbor"
+
+ bstore "github.com/filecoin-project/lotus/blockstore"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+func SetupSystemActor(ctx context.Context, bs bstore.Blockstore, av actors.Version) (*types.Actor, error) {
+
+ cst := cbor.NewCborStore(bs)
+ st, err := system.MakeState(adt.WrapStore(ctx, cst), av)
+ if err != nil {
+ return nil, err
+ }
+
+ statecid, err := cst.Put(ctx, st.GetState())
+ if err != nil {
+ return nil, err
+ }
+
+ actcid, err := system.GetActorCodeID(av)
+ if err != nil {
+ return nil, err
+ }
+
+ act := &types.Actor{
+ Code: actcid,
+ Head: statecid,
+ Balance: big.Zero(),
+ }
+
+ return act, nil
+}
diff --git a/chain/gen/genesis/t01_init.go b/chain/gen/genesis/f01_init.go
similarity index 61%
rename from chain/gen/genesis/t01_init.go
rename to chain/gen/genesis/f01_init.go
index 667079a6db6..61ec917036a 100644
--- a/chain/gen/genesis/t01_init.go
+++ b/chain/gen/genesis/f01_init.go
@@ -5,33 +5,44 @@ import (
"encoding/json"
"fmt"
+ init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
- "github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/specs-actors/actors/util/adt"
- init_ "github.com/filecoin-project/specs-actors/actors/builtin/init"
cbor "github.com/ipfs/go-ipld-cbor"
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
+ bstore "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/genesis"
- bstore "github.com/filecoin-project/lotus/lib/blockstore"
)
-func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesis.Actor, rootVerifier genesis.Actor) (int64, *types.Actor, map[address.Address]address.Address, error) {
+func SetupInitActor(ctx context.Context, bs bstore.Blockstore, netname string, initialActors []genesis.Actor, rootVerifier genesis.Actor, remainder genesis.Actor, av actors.Version) (int64, *types.Actor, map[address.Address]address.Address, error) {
if len(initialActors) > MaxAccounts {
return 0, nil, nil, xerrors.New("too many initial actors")
}
- var ias init_.State
- ias.NextID = MinerStart
- ias.NetworkName = netname
+ cst := cbor.NewCborStore(bs)
+ ist, err := init_.MakeState(adt.WrapStore(ctx, cst), av, netname)
+ if err != nil {
+ return 0, nil, nil, err
+ }
+
+ if err = ist.SetNextID(MinerStart); err != nil {
+ return 0, nil, nil, err
+ }
- store := adt.WrapStore(context.TODO(), cbor.NewCborStore(bs))
- amap := adt.MakeEmptyMap(store)
+ amap, err := ist.AddressMap()
+ if err != nil {
+ return 0, nil, nil, err
+ }
keyToId := map[address.Address]address.Address{}
counter := int64(AccountStart)
@@ -90,19 +101,10 @@ func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesi
}
}
- if rootVerifier.Type == genesis.TAccount {
- var ainfo genesis.AccountMeta
- if err := json.Unmarshal(rootVerifier.Meta, &ainfo); err != nil {
- return 0, nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err)
- }
- value := cbg.CborInt(80)
- if err := amap.Put(abi.AddrKey(ainfo.Owner), &value); err != nil {
- return 0, nil, nil, err
- }
- } else if rootVerifier.Type == genesis.TMultisig {
+ setupMsig := func(meta json.RawMessage) error {
var ainfo genesis.MultisigMeta
- if err := json.Unmarshal(rootVerifier.Meta, &ainfo); err != nil {
- return 0, nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err)
+ if err := json.Unmarshal(meta, &ainfo); err != nil {
+ return xerrors.Errorf("unmarshaling account meta: %w", err)
}
for _, e := range ainfo.Signers {
if _, ok := keyToId[e]; ok {
@@ -112,32 +114,77 @@ func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesi
value := cbg.CborInt(counter)
if err := amap.Put(abi.AddrKey(e), &value); err != nil {
- return 0, nil, nil, err
+ return err
}
counter = counter + 1
var err error
keyToId[e], err = address.NewIDAddress(uint64(value))
if err != nil {
- return 0, nil, nil, err
+ return err
}
}
+
+ return nil
+ }
+
+ if rootVerifier.Type == genesis.TAccount {
+ var ainfo genesis.AccountMeta
+ if err := json.Unmarshal(rootVerifier.Meta, &ainfo); err != nil {
+ return 0, nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err)
+ }
+ value := cbg.CborInt(80)
+ if err := amap.Put(abi.AddrKey(ainfo.Owner), &value); err != nil {
+ return 0, nil, nil, err
+ }
+ } else if rootVerifier.Type == genesis.TMultisig {
+ err := setupMsig(rootVerifier.Meta)
+ if err != nil {
+ return 0, nil, nil, xerrors.Errorf("setting up root verifier msig: %w", err)
+ }
+ }
+
+ if remainder.Type == genesis.TAccount {
+ var ainfo genesis.AccountMeta
+ if err := json.Unmarshal(remainder.Meta, &ainfo); err != nil {
+ return 0, nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err)
+ }
+
+ // TODO: Use builtin.ReserveAddress...
+ value := cbg.CborInt(90)
+ if err := amap.Put(abi.AddrKey(ainfo.Owner), &value); err != nil {
+ return 0, nil, nil, err
+ }
+ } else if remainder.Type == genesis.TMultisig {
+ err := setupMsig(remainder.Meta)
+ if err != nil {
+ return 0, nil, nil, xerrors.Errorf("setting up remainder msig: %w", err)
+ }
}
amapaddr, err := amap.Root()
if err != nil {
return 0, nil, nil, err
}
- ias.AddressMap = amapaddr
- statecid, err := store.Put(store.Context(), &ias)
+ if err = ist.SetAddressMap(amapaddr); err != nil {
+ return 0, nil, nil, err
+ }
+
+ statecid, err := cst.Put(ctx, ist.GetState())
+ if err != nil {
+ return 0, nil, nil, err
+ }
+
+ actcid, err := init_.GetActorCodeID(av)
if err != nil {
return 0, nil, nil, err
}
act := &types.Actor{
- Code: builtin.InitActorCodeID,
- Head: statecid,
+ Code: actcid,
+ Head: statecid,
+ Balance: big.Zero(),
}
return counter, act, keyToId, nil
diff --git a/chain/gen/genesis/f02_reward.go b/chain/gen/genesis/f02_reward.go
new file mode 100644
index 00000000000..c8f479722f1
--- /dev/null
+++ b/chain/gen/genesis/f02_reward.go
@@ -0,0 +1,43 @@
+package genesis
+
+import (
+ "context"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/reward"
+
+ "github.com/filecoin-project/go-state-types/big"
+
+ cbor "github.com/ipfs/go-ipld-cbor"
+
+ bstore "github.com/filecoin-project/lotus/blockstore"
+ "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+func SetupRewardActor(ctx context.Context, bs bstore.Blockstore, qaPower big.Int, av actors.Version) (*types.Actor, error) {
+ cst := cbor.NewCborStore(bs)
+ rst, err := reward.MakeState(adt.WrapStore(ctx, cst), av, qaPower)
+ if err != nil {
+ return nil, err
+ }
+
+ statecid, err := cst.Put(ctx, rst.GetState())
+ if err != nil {
+ return nil, err
+ }
+
+ actcid, err := reward.GetActorCodeID(av)
+ if err != nil {
+ return nil, err
+ }
+
+ act := &types.Actor{
+ Code: actcid,
+ Balance: types.BigInt{Int: build.InitialRewardBalance},
+ Head: statecid,
+ }
+
+ return act, nil
+}
diff --git a/chain/gen/genesis/f03_cron.go b/chain/gen/genesis/f03_cron.go
new file mode 100644
index 00000000000..c9dd0d34117
--- /dev/null
+++ b/chain/gen/genesis/f03_cron.go
@@ -0,0 +1,41 @@
+package genesis
+
+import (
+ "context"
+
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/cron"
+
+ cbor "github.com/ipfs/go-ipld-cbor"
+
+ bstore "github.com/filecoin-project/lotus/blockstore"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+func SetupCronActor(ctx context.Context, bs bstore.Blockstore, av actors.Version) (*types.Actor, error) {
+ cst := cbor.NewCborStore(bs)
+ st, err := cron.MakeState(adt.WrapStore(ctx, cbor.NewCborStore(bs)), av)
+ if err != nil {
+ return nil, err
+ }
+
+ statecid, err := cst.Put(ctx, st.GetState())
+ if err != nil {
+ return nil, err
+ }
+
+ actcid, err := cron.GetActorCodeID(av)
+ if err != nil {
+ return nil, err
+ }
+
+ act := &types.Actor{
+ Code: actcid,
+ Head: statecid,
+ Balance: big.Zero(),
+ }
+
+ return act, nil
+}
diff --git a/chain/gen/genesis/f04_power.go b/chain/gen/genesis/f04_power.go
new file mode 100644
index 00000000000..b5e08cebe5a
--- /dev/null
+++ b/chain/gen/genesis/f04_power.go
@@ -0,0 +1,43 @@
+package genesis
+
+import (
+ "context"
+
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/power"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/specs-actors/actors/util/adt"
+
+ cbor "github.com/ipfs/go-ipld-cbor"
+
+ bstore "github.com/filecoin-project/lotus/blockstore"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+func SetupStoragePowerActor(ctx context.Context, bs bstore.Blockstore, av actors.Version) (*types.Actor, error) {
+
+ cst := cbor.NewCborStore(bs)
+ pst, err := power.MakeState(adt.WrapStore(ctx, cbor.NewCborStore(bs)), av)
+ if err != nil {
+ return nil, err
+ }
+
+ statecid, err := cst.Put(ctx, pst.GetState())
+ if err != nil {
+ return nil, err
+ }
+
+ actcid, err := power.GetActorCodeID(av)
+ if err != nil {
+ return nil, err
+ }
+
+ act := &types.Actor{
+ Code: actcid,
+ Head: statecid,
+ Balance: big.Zero(),
+ }
+
+ return act, nil
+}
diff --git a/chain/gen/genesis/f05_market.go b/chain/gen/genesis/f05_market.go
new file mode 100644
index 00000000000..ac32294c9f9
--- /dev/null
+++ b/chain/gen/genesis/f05_market.go
@@ -0,0 +1,41 @@
+package genesis
+
+import (
+ "context"
+
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/market"
+
+ cbor "github.com/ipfs/go-ipld-cbor"
+
+ bstore "github.com/filecoin-project/lotus/blockstore"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+func SetupStorageMarketActor(ctx context.Context, bs bstore.Blockstore, av actors.Version) (*types.Actor, error) {
+ cst := cbor.NewCborStore(bs)
+ mst, err := market.MakeState(adt.WrapStore(ctx, cbor.NewCborStore(bs)), av)
+ if err != nil {
+ return nil, err
+ }
+
+ statecid, err := cst.Put(ctx, mst.GetState())
+ if err != nil {
+ return nil, err
+ }
+
+ actcid, err := market.GetActorCodeID(av)
+ if err != nil {
+ return nil, err
+ }
+
+ act := &types.Actor{
+ Code: actcid,
+ Head: statecid,
+ Balance: big.Zero(),
+ }
+
+ return act, nil
+}
diff --git a/chain/gen/genesis/f06_vreg.go b/chain/gen/genesis/f06_vreg.go
new file mode 100644
index 00000000000..e61c951f50c
--- /dev/null
+++ b/chain/gen/genesis/f06_vreg.go
@@ -0,0 +1,56 @@
+package genesis
+
+import (
+ "context"
+
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+
+ "github.com/filecoin-project/go-address"
+ cbor "github.com/ipfs/go-ipld-cbor"
+
+ "github.com/filecoin-project/specs-actors/actors/util/adt"
+
+ bstore "github.com/filecoin-project/lotus/blockstore"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+var RootVerifierID address.Address
+
+func init() {
+
+ idk, err := address.NewFromString("t080")
+ if err != nil {
+ panic(err)
+ }
+
+ RootVerifierID = idk
+}
+
+func SetupVerifiedRegistryActor(ctx context.Context, bs bstore.Blockstore, av actors.Version) (*types.Actor, error) {
+ cst := cbor.NewCborStore(bs)
+ vst, err := verifreg.MakeState(adt.WrapStore(ctx, cbor.NewCborStore(bs)), av, RootVerifierID)
+ if err != nil {
+ return nil, err
+ }
+
+ statecid, err := cst.Put(ctx, vst.GetState())
+ if err != nil {
+ return nil, err
+ }
+
+ actcid, err := verifreg.GetActorCodeID(av)
+ if err != nil {
+ return nil, err
+ }
+
+ act := &types.Actor{
+ Code: actcid,
+ Head: statecid,
+ Balance: big.Zero(),
+ }
+
+ return act, nil
+}
diff --git a/chain/gen/genesis/genesis.go b/chain/gen/genesis/genesis.go
index 6a1090784d2..6dec3fea6d4 100644
--- a/chain/gen/genesis/genesis.go
+++ b/chain/gen/genesis/genesis.go
@@ -6,7 +6,34 @@ import (
"encoding/json"
"fmt"
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+ verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
+ adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
+
+ "github.com/filecoin-project/go-state-types/network"
+
+ "github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/account"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+
+ "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
+
+ "github.com/filecoin-project/lotus/chain/actors/builtin/market"
+
+ "github.com/filecoin-project/lotus/chain/actors/builtin/power"
+
+ "github.com/filecoin-project/lotus/chain/actors/builtin/cron"
+
+ init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/reward"
+
+ "github.com/filecoin-project/lotus/chain/actors/builtin/system"
+
"github.com/filecoin-project/lotus/chain/actors/builtin"
+
"github.com/filecoin-project/lotus/journal"
"github.com/ipfs/go-cid"
@@ -20,19 +47,14 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/crypto"
- builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
- account0 "github.com/filecoin-project/specs-actors/actors/builtin/account"
- multisig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
- verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
- adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
+ bstore "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/genesis"
- bstore "github.com/filecoin-project/lotus/lib/blockstore"
"github.com/filecoin-project/lotus/lib/sigs"
)
@@ -117,94 +139,92 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
return nil, nil, xerrors.Errorf("putting empty object: %w", err)
}
- state, err := state.NewStateTree(cst, types.StateTreeVersion0)
+ sv, err := state.VersionForNetwork(template.NetworkVersion)
+ if err != nil {
+ return nil, nil, xerrors.Errorf("getting state tree version: %w", err)
+ }
+
+ state, err := state.NewStateTree(cst, sv)
if err != nil {
return nil, nil, xerrors.Errorf("making new state tree: %w", err)
}
+ av := actors.VersionForNetwork(template.NetworkVersion)
+
// Create system actor
- sysact, err := SetupSystemActor(bs)
+ sysact, err := SetupSystemActor(ctx, bs, av)
if err != nil {
- return nil, nil, xerrors.Errorf("setup init actor: %w", err)
+ return nil, nil, xerrors.Errorf("setup system actor: %w", err)
}
- if err := state.SetActor(builtin0.SystemActorAddr, sysact); err != nil {
- return nil, nil, xerrors.Errorf("set init actor: %w", err)
+ if err := state.SetActor(system.Address, sysact); err != nil {
+ return nil, nil, xerrors.Errorf("set system actor: %w", err)
}
// Create init actor
- idStart, initact, keyIDs, err := SetupInitActor(bs, template.NetworkName, template.Accounts, template.VerifregRootKey)
+ idStart, initact, keyIDs, err := SetupInitActor(ctx, bs, template.NetworkName, template.Accounts, template.VerifregRootKey, template.RemainderAccount, av)
if err != nil {
return nil, nil, xerrors.Errorf("setup init actor: %w", err)
}
- if err := state.SetActor(builtin0.InitActorAddr, initact); err != nil {
+ if err := state.SetActor(init_.Address, initact); err != nil {
return nil, nil, xerrors.Errorf("set init actor: %w", err)
}
// Setup reward
- // RewardActor's state is overrwritten by SetupStorageMiners
- rewact, err := SetupRewardActor(bs, big.Zero())
+ // RewardActor's state is overwritten by SetupStorageMiners, but needs to exist for miner creation messages
+ rewact, err := SetupRewardActor(ctx, bs, big.Zero(), av)
if err != nil {
- return nil, nil, xerrors.Errorf("setup init actor: %w", err)
+ return nil, nil, xerrors.Errorf("setup reward actor: %w", err)
}
- err = state.SetActor(builtin0.RewardActorAddr, rewact)
+ err = state.SetActor(reward.Address, rewact)
if err != nil {
- return nil, nil, xerrors.Errorf("set network account actor: %w", err)
+ return nil, nil, xerrors.Errorf("set reward actor: %w", err)
}
// Setup cron
- cronact, err := SetupCronActor(bs)
+ cronact, err := SetupCronActor(ctx, bs, av)
if err != nil {
return nil, nil, xerrors.Errorf("setup cron actor: %w", err)
}
- if err := state.SetActor(builtin0.CronActorAddr, cronact); err != nil {
+ if err := state.SetActor(cron.Address, cronact); err != nil {
return nil, nil, xerrors.Errorf("set cron actor: %w", err)
}
// Create empty power actor
- spact, err := SetupStoragePowerActor(bs)
+ spact, err := SetupStoragePowerActor(ctx, bs, av)
if err != nil {
- return nil, nil, xerrors.Errorf("setup storage market actor: %w", err)
+ return nil, nil, xerrors.Errorf("setup storage power actor: %w", err)
}
- if err := state.SetActor(builtin0.StoragePowerActorAddr, spact); err != nil {
- return nil, nil, xerrors.Errorf("set storage market actor: %w", err)
+ if err := state.SetActor(power.Address, spact); err != nil {
+ return nil, nil, xerrors.Errorf("set storage power actor: %w", err)
}
// Create empty market actor
- marketact, err := SetupStorageMarketActor(bs)
+ marketact, err := SetupStorageMarketActor(ctx, bs, av)
if err != nil {
return nil, nil, xerrors.Errorf("setup storage market actor: %w", err)
}
- if err := state.SetActor(builtin0.StorageMarketActorAddr, marketact); err != nil {
- return nil, nil, xerrors.Errorf("set market actor: %w", err)
+ if err := state.SetActor(market.Address, marketact); err != nil {
+ return nil, nil, xerrors.Errorf("set storage market actor: %w", err)
}
// Create verified registry
- verifact, err := SetupVerifiedRegistryActor(bs)
+ verifact, err := SetupVerifiedRegistryActor(ctx, bs, av)
if err != nil {
- return nil, nil, xerrors.Errorf("setup storage market actor: %w", err)
+ return nil, nil, xerrors.Errorf("setup verified registry market actor: %w", err)
}
- if err := state.SetActor(builtin0.VerifiedRegistryActorAddr, verifact); err != nil {
- return nil, nil, xerrors.Errorf("set market actor: %w", err)
+ if err := state.SetActor(verifreg.Address, verifact); err != nil {
+ return nil, nil, xerrors.Errorf("set verified registry actor: %w", err)
}
- burntRoot, err := cst.Put(ctx, &account0.State{
- Address: builtin0.BurntFundsActorAddr,
- })
+ bact, err := makeAccountActor(ctx, cst, av, builtin.BurntFundsActorAddr, big.Zero())
if err != nil {
- return nil, nil, xerrors.Errorf("failed to setup burnt funds actor state: %w", err)
+ return nil, nil, xerrors.Errorf("setup burnt funds actor state: %w", err)
}
-
- // Setup burnt-funds
- err = state.SetActor(builtin0.BurntFundsActorAddr, &types.Actor{
- Code: builtin0.AccountActorCodeID,
- Balance: types.NewInt(0),
- Head: burntRoot,
- })
- if err != nil {
- return nil, nil, xerrors.Errorf("set burnt funds account actor: %w", err)
+ if err := state.SetActor(builtin.BurntFundsActorAddr, bact); err != nil {
+ return nil, nil, xerrors.Errorf("set burnt funds actor: %w", err)
}
// Create accounts
@@ -212,7 +232,7 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
switch info.Type {
case genesis.TAccount:
- if err := createAccountActor(ctx, cst, state, info, keyIDs); err != nil {
+ if err := createAccountActor(ctx, cst, state, info, keyIDs, av); err != nil {
return nil, nil, xerrors.Errorf("failed to create account actor: %w", err)
}
@@ -224,7 +244,7 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
}
idStart++
- if err := createMultisigAccount(ctx, bs, cst, state, ida, info, keyIDs); err != nil {
+ if err := createMultisigAccount(ctx, cst, state, ida, info, keyIDs, av); err != nil {
return nil, nil, err
}
default:
@@ -233,13 +253,31 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
}
- vregroot, err := address.NewIDAddress(80)
- if err != nil {
- return nil, nil, err
- }
+ switch template.VerifregRootKey.Type {
+ case genesis.TAccount:
+ var ainfo genesis.AccountMeta
+ if err := json.Unmarshal(template.VerifregRootKey.Meta, &ainfo); err != nil {
+ return nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err)
+ }
- if err = createMultisigAccount(ctx, bs, cst, state, vregroot, template.VerifregRootKey, keyIDs); err != nil {
- return nil, nil, xerrors.Errorf("failed to set up verified registry signer: %w", err)
+ _, ok := keyIDs[ainfo.Owner]
+ if ok {
+ return nil, nil, fmt.Errorf("rootkey account has already been declared, cannot be assigned 80: %s", ainfo.Owner)
+ }
+
+ vact, err := makeAccountActor(ctx, cst, av, ainfo.Owner, template.VerifregRootKey.Balance)
+ if err != nil {
+ return nil, nil, xerrors.Errorf("setup verifreg rootkey account state: %w", err)
+ }
+ if err = state.SetActor(builtin.RootVerifierAddress, vact); err != nil {
+ return nil, nil, xerrors.Errorf("set verifreg rootkey account actor: %w", err)
+ }
+ case genesis.TMultisig:
+ if err = createMultisigAccount(ctx, cst, state, builtin.RootVerifierAddress, template.VerifregRootKey, keyIDs, av); err != nil {
+ return nil, nil, xerrors.Errorf("failed to set up verified registry signer: %w", err)
+ }
+ default:
+ return nil, nil, xerrors.Errorf("unknown account type for verifreg rootkey: %w", err)
}
// Setup the first verifier as ID-address 81
@@ -264,27 +302,21 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
return nil, nil, err
}
- verifierState, err := cst.Put(ctx, &account0.State{Address: verifierAd})
+ verifierAct, err := makeAccountActor(ctx, cst, av, verifierAd, big.Zero())
if err != nil {
- return nil, nil, err
+ return nil, nil, xerrors.Errorf("setup first verifier state: %w", err)
}
- err = state.SetActor(verifierId, &types.Actor{
- Code: builtin0.AccountActorCodeID,
- Balance: types.NewInt(0),
- Head: verifierState,
- })
- if err != nil {
- return nil, nil, xerrors.Errorf("setting account from actmap: %w", err)
+ if err = state.SetActor(verifierId, verifierAct); err != nil {
+ return nil, nil, xerrors.Errorf("set first verifier actor: %w", err)
}
totalFilAllocated := big.Zero()
- // flush as ForEach works on the HAMT
- if _, err := state.Flush(ctx); err != nil {
- return nil, nil, err
- }
err = state.ForEach(func(addr address.Address, act *types.Actor) error {
+ if act.Balance.Nil() {
+ panic(fmt.Sprintf("actor %s (%s) has nil balance", addr, builtin.ActorNameByCode(act.Code)))
+ }
totalFilAllocated = big.Add(totalFilAllocated, act.Balance)
return nil
})
@@ -300,19 +332,67 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
template.RemainderAccount.Balance = remainingFil
- if err := createMultisigAccount(ctx, bs, cst, state, builtin.ReserveAddress, template.RemainderAccount, keyIDs); err != nil {
- return nil, nil, xerrors.Errorf("failed to set up remainder account: %w", err)
+ switch template.RemainderAccount.Type {
+ case genesis.TAccount:
+ var ainfo genesis.AccountMeta
+ if err := json.Unmarshal(template.RemainderAccount.Meta, &ainfo); err != nil {
+ return nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err)
+ }
+
+ _, ok := keyIDs[ainfo.Owner]
+ if ok {
+ return nil, nil, fmt.Errorf("remainder account has already been declared, cannot be assigned 90: %s", ainfo.Owner)
+ }
+
+ keyIDs[ainfo.Owner] = builtin.ReserveAddress
+ err = createAccountActor(ctx, cst, state, template.RemainderAccount, keyIDs, av)
+ if err != nil {
+ return nil, nil, xerrors.Errorf("creating remainder acct: %w", err)
+ }
+
+ case genesis.TMultisig:
+ if err = createMultisigAccount(ctx, cst, state, builtin.ReserveAddress, template.RemainderAccount, keyIDs, av); err != nil {
+ return nil, nil, xerrors.Errorf("failed to set up remainder: %w", err)
+ }
+ default:
+ return nil, nil, xerrors.Errorf("unknown account type for remainder: %w", err)
}
return state, keyIDs, nil
}
-func createAccountActor(ctx context.Context, cst cbor.IpldStore, state *state.StateTree, info genesis.Actor, keyIDs map[address.Address]address.Address) error {
+func makeAccountActor(ctx context.Context, cst cbor.IpldStore, av actors.Version, addr address.Address, bal types.BigInt) (*types.Actor, error) {
+ ast, err := account.MakeState(adt.WrapStore(ctx, cst), av, addr)
+ if err != nil {
+ return nil, err
+ }
+
+ statecid, err := cst.Put(ctx, ast.GetState())
+ if err != nil {
+ return nil, err
+ }
+
+ actcid, err := account.GetActorCodeID(av)
+ if err != nil {
+ return nil, err
+ }
+
+ act := &types.Actor{
+ Code: actcid,
+ Head: statecid,
+ Balance: bal,
+ }
+
+ return act, nil
+}
+
+func createAccountActor(ctx context.Context, cst cbor.IpldStore, state *state.StateTree, info genesis.Actor, keyIDs map[address.Address]address.Address, av actors.Version) error {
var ainfo genesis.AccountMeta
if err := json.Unmarshal(info.Meta, &ainfo); err != nil {
return xerrors.Errorf("unmarshaling account meta: %w", err)
}
- st, err := cst.Put(ctx, &account0.State{Address: ainfo.Owner})
+
+ aa, err := makeAccountActor(ctx, cst, av, ainfo.Owner, info.Balance)
if err != nil {
return err
}
@@ -322,18 +402,14 @@ func createAccountActor(ctx context.Context, cst cbor.IpldStore, state *state.St
return fmt.Errorf("no registered ID for account actor: %s", ainfo.Owner)
}
- err = state.SetActor(ida, &types.Actor{
- Code: builtin0.AccountActorCodeID,
- Balance: info.Balance,
- Head: st,
- })
+ err = state.SetActor(ida, aa)
if err != nil {
return xerrors.Errorf("setting account from actmap: %w", err)
}
return nil
}
-func createMultisigAccount(ctx context.Context, bs bstore.Blockstore, cst cbor.IpldStore, state *state.StateTree, ida address.Address, info genesis.Actor, keyIDs map[address.Address]address.Address) error {
+func createMultisigAccount(ctx context.Context, cst cbor.IpldStore, state *state.StateTree, ida address.Address, info genesis.Actor, keyIDs map[address.Address]address.Address, av actors.Version) error {
if info.Type != genesis.TMultisig {
return fmt.Errorf("can only call createMultisigAccount with multisig Actor info")
}
@@ -341,10 +417,6 @@ func createMultisigAccount(ctx context.Context, bs bstore.Blockstore, cst cbor.I
if err := json.Unmarshal(info.Meta, &ainfo); err != nil {
return xerrors.Errorf("unmarshaling account meta: %w", err)
}
- pending, err := adt0.MakeEmptyMap(adt0.WrapStore(ctx, cst)).Root()
- if err != nil {
- return xerrors.Errorf("failed to create empty map: %v", err)
- }
var signers []address.Address
@@ -361,44 +433,45 @@ func createMultisigAccount(ctx context.Context, bs bstore.Blockstore, cst cbor.I
continue
}
- st, err := cst.Put(ctx, &account0.State{Address: e})
+ aa, err := makeAccountActor(ctx, cst, av, e, big.Zero())
if err != nil {
return err
}
- err = state.SetActor(idAddress, &types.Actor{
- Code: builtin0.AccountActorCodeID,
- Balance: types.NewInt(0),
- Head: st,
- })
- if err != nil {
+
+ if err = state.SetActor(idAddress, aa); err != nil {
return xerrors.Errorf("setting account from actmap: %w", err)
}
signers = append(signers, idAddress)
}
- st, err := cst.Put(ctx, &multisig0.State{
- Signers: signers,
- NumApprovalsThreshold: uint64(ainfo.Threshold),
- StartEpoch: abi.ChainEpoch(ainfo.VestingStart),
- UnlockDuration: abi.ChainEpoch(ainfo.VestingDuration),
- PendingTxns: pending,
- InitialBalance: info.Balance,
- })
+ mst, err := multisig.MakeState(adt.WrapStore(ctx, cst), av, signers, uint64(ainfo.Threshold), abi.ChainEpoch(ainfo.VestingStart), abi.ChainEpoch(ainfo.VestingDuration), info.Balance)
if err != nil {
return err
}
+
+ statecid, err := cst.Put(ctx, mst.GetState())
+ if err != nil {
+ return err
+ }
+
+ actcid, err := multisig.GetActorCodeID(av)
+ if err != nil {
+ return err
+ }
+
err = state.SetActor(ida, &types.Actor{
- Code: builtin0.MultisigActorCodeID,
+ Code: actcid,
Balance: info.Balance,
- Head: st,
+ Head: statecid,
})
if err != nil {
return xerrors.Errorf("setting account from actmap: %w", err)
}
+
return nil
}
-func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, stateroot cid.Cid, template genesis.Template, keyIDs map[address.Address]address.Address) (cid.Cid, error) {
+func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, stateroot cid.Cid, template genesis.Template, keyIDs map[address.Address]address.Address, nv network.Version) (cid.Cid, error) {
verifNeeds := make(map[address.Address]abi.PaddedPieceSize)
var sum abi.PaddedPieceSize
@@ -406,11 +479,13 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, stateroot ci
StateBase: stateroot,
Epoch: 0,
Rand: &fakeRand{},
- Bstore: cs.Blockstore(),
+ Bstore: cs.StateBlockstore(),
Syscalls: mkFakedSigSyscalls(cs.VMSys()),
CircSupplyCalc: nil,
- NtwkVersion: genesisNetworkVersion,
- BaseFee: types.NewInt(0),
+ NtwkVersion: func(_ context.Context, _ abi.ChainEpoch) network.Version {
+ return nv
+ },
+ BaseFee: types.NewInt(0),
}
vm, err := vm.NewVM(ctx, &vmopt)
if err != nil {
@@ -439,7 +514,8 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, stateroot ci
return cid.Undef, err
}
- _, err = doExecValue(ctx, vm, builtin0.VerifiedRegistryActorAddr, verifregRoot, types.NewInt(0), builtin0.MethodsVerifiedRegistry.AddVerifier, mustEnc(&verifreg0.AddVerifierParams{
+ // Note: This is brittle, if the methodNum / param changes, it could break things
+ _, err = doExecValue(ctx, vm, verifreg.Address, verifregRoot, types.NewInt(0), builtin0.MethodsVerifiedRegistry.AddVerifier, mustEnc(&verifreg0.AddVerifierParams{
Address: verifier,
Allowance: abi.NewStoragePower(int64(sum)), // eh, close enough
@@ -450,7 +526,8 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, stateroot ci
}
for c, amt := range verifNeeds {
- _, err := doExecValue(ctx, vm, builtin0.VerifiedRegistryActorAddr, verifier, types.NewInt(0), builtin0.MethodsVerifiedRegistry.AddVerifiedClient, mustEnc(&verifreg0.AddVerifiedClientParams{
+ // Note: This is brittle, if the methodNum / param changes, it could break things
+ _, err := doExecValue(ctx, vm, verifreg.Address, verifier, types.NewInt(0), builtin0.MethodsVerifiedRegistry.AddVerifiedClient, mustEnc(&verifreg0.AddVerifiedClientParams{
Address: c,
Allowance: abi.NewStoragePower(int64(amt)),
}))
@@ -482,20 +559,20 @@ func MakeGenesisBlock(ctx context.Context, j journal.Journal, bs bstore.Blocksto
}
// temp chainstore
- cs := store.NewChainStore(bs, datastore.NewMapDatastore(), sys, j)
+ cs := store.NewChainStore(bs, bs, datastore.NewMapDatastore(), sys, j)
// Verify PreSealed Data
- stateroot, err = VerifyPreSealedData(ctx, cs, stateroot, template, keyIDs)
+ stateroot, err = VerifyPreSealedData(ctx, cs, stateroot, template, keyIDs, template.NetworkVersion)
if err != nil {
return nil, xerrors.Errorf("failed to verify presealed data: %w", err)
}
- stateroot, err = SetupStorageMiners(ctx, cs, stateroot, template.Miners)
+ stateroot, err = SetupStorageMiners(ctx, cs, stateroot, template.Miners, template.NetworkVersion)
if err != nil {
return nil, xerrors.Errorf("setup miners failed: %w", err)
}
- store := adt0.WrapStore(ctx, cbor.NewCborStore(bs))
+ store := adt.WrapStore(ctx, cbor.NewCborStore(bs))
emptyroot, err := adt0.MakeEmptyArray(store).Root()
if err != nil {
return nil, xerrors.Errorf("amt build failed: %w", err)
@@ -544,7 +621,7 @@ func MakeGenesisBlock(ctx context.Context, j journal.Journal, bs bstore.Blocksto
}
b := &types.BlockHeader{
- Miner: builtin0.SystemActorAddr,
+ Miner: system.Address,
Ticket: genesisticket,
Parents: []cid.Cid{filecoinGenesisCid},
Height: 0,
diff --git a/chain/gen/genesis/miners.go b/chain/gen/genesis/miners.go
index be83a871134..e6f17d6779a 100644
--- a/chain/gen/genesis/miners.go
+++ b/chain/gen/genesis/miners.go
@@ -6,6 +6,22 @@ import (
"fmt"
"math/rand"
+ power4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/power"
+
+ reward4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/reward"
+
+ market4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/market"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+
+ "github.com/filecoin-project/lotus/chain/actors/policy"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ "github.com/filecoin-project/go-state-types/network"
+
market0 "github.com/filecoin-project/specs-actors/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
@@ -23,13 +39,11 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/crypto"
- "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
-
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward"
- runtime2 "github.com/filecoin-project/specs-actors/v2/actors/runtime"
+ runtime5 "github.com/filecoin-project/specs-actors/v5/actors/runtime"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/store"
@@ -48,7 +62,7 @@ func MinerAddress(genesisIndex uint64) address.Address {
}
type fakedSigSyscalls struct {
- runtime2.Syscalls
+ runtime5.Syscalls
}
func (fss *fakedSigSyscalls) VerifySignature(signature crypto.Signature, signer address.Address, plaintext []byte) error {
@@ -56,14 +70,19 @@ func (fss *fakedSigSyscalls) VerifySignature(signature crypto.Signature, signer
}
func mkFakedSigSyscalls(base vm.SyscallBuilder) vm.SyscallBuilder {
- return func(ctx context.Context, rt *vm.Runtime) runtime2.Syscalls {
+ return func(ctx context.Context, rt *vm.Runtime) runtime5.Syscalls {
return &fakedSigSyscalls{
base(ctx, rt),
}
}
}
-func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid, miners []genesis.Miner) (cid.Cid, error) {
+// Note: Much of this is brittle, if the methodNum / param / return changes, it will break things
+func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid, miners []genesis.Miner, nv network.Version) (cid.Cid, error) {
+
+ cst := cbor.NewCborStore(cs.StateBlockstore())
+ av := actors.VersionForNetwork(nv)
+
csc := func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error) {
return big.Zero(), nil
}
@@ -72,11 +91,13 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
StateBase: sroot,
Epoch: 0,
Rand: &fakeRand{},
- Bstore: cs.Blockstore(),
+ Bstore: cs.StateBlockstore(),
Syscalls: mkFakedSigSyscalls(cs.VMSys()),
CircSupplyCalc: csc,
- NtwkVersion: genesisNetworkVersion,
- BaseFee: types.NewInt(0),
+ NtwkVersion: func(_ context.Context, _ abi.ChainEpoch) network.Version {
+ return nv
+ },
+ BaseFee: types.NewInt(0),
}
vm, err := vm.NewVM(ctx, vmopt)
@@ -96,12 +117,13 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
dealIDs []abi.DealID
}, len(miners))
+ maxPeriods := policy.GetMaxSectorExpirationExtension() / miner.WPoStProvingPeriod
for i, m := range miners {
// Create miner through power actor
i := i
m := m
- spt, err := ffiwrapper.SealProofTypeFromSectorSize(m.SectorSize)
+ spt, err := miner.SealProofTypeFromSectorSize(m.SectorSize, nv)
if err != nil {
return cid.Undef, err
}
@@ -115,7 +137,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
}
params := mustEnc(constructorParams)
- rval, err := doExecValue(ctx, vm, power.Address, m.Owner, m.PowerBalance, builtin0.MethodsPower.CreateMiner, params)
+ rval, err := doExecValue(ctx, vm, power.Address, m.Owner, m.PowerBalance, power.Methods.CreateMiner, params)
if err != nil {
return cid.Undef, xerrors.Errorf("failed to create genesis miner: %w", err)
}
@@ -131,23 +153,34 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
}
minerInfos[i].maddr = ma.IDAddress
- // TODO: ActorUpgrade
- err = vm.MutateState(ctx, minerInfos[i].maddr, func(cst cbor.IpldStore, st *miner0.State) error {
- maxPeriods := miner0.MaxSectorExpirationExtension / miner0.WPoStProvingPeriod
- minerInfos[i].presealExp = (maxPeriods-1)*miner0.WPoStProvingPeriod + st.ProvingPeriodStart - 1
+ _, err = vm.Flush(ctx)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("flushing vm: %w", err)
+ }
- return nil
- })
+ mact, err := vm.StateTree().GetActor(minerInfos[i].maddr)
if err != nil {
- return cid.Undef, xerrors.Errorf("mutating state: %w", err)
+ return cid.Undef, xerrors.Errorf("getting newly created miner actor: %w", err)
}
+
+ mst, err := miner.Load(adt.WrapStore(ctx, cst), mact)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("getting newly created miner state: %w", err)
+ }
+
+ pps, err := mst.GetProvingPeriodStart()
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("getting newly created miner proving period start: %w", err)
+ }
+
+ minerInfos[i].presealExp = (maxPeriods-1)*miner0.WPoStProvingPeriod + pps - 1
}
// Add market funds
if m.MarketBalance.GreaterThan(big.Zero()) {
params := mustEnc(&minerInfos[i].maddr)
- _, err := doExecValue(ctx, vm, market.Address, m.Worker, m.MarketBalance, builtin0.MethodsMarket.AddBalance, params)
+ _, err := doExecValue(ctx, vm, market.Address, m.Worker, m.MarketBalance, market.Methods.AddBalance, params)
if err != nil {
return cid.Undef, xerrors.Errorf("failed to create genesis miner (add balance): %w", err)
}
@@ -205,35 +238,66 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
for pi := range m.Sectors {
rawPow = types.BigAdd(rawPow, types.NewInt(uint64(m.SectorSize)))
- dweight, err := dealWeight(ctx, vm, minerInfos[i].maddr, []abi.DealID{minerInfos[i].dealIDs[pi]}, 0, minerInfos[i].presealExp)
+ dweight, vdweight, err := dealWeight(ctx, vm, minerInfos[i].maddr, []abi.DealID{minerInfos[i].dealIDs[pi]}, 0, minerInfos[i].presealExp, av)
if err != nil {
return cid.Undef, xerrors.Errorf("getting deal weight: %w", err)
}
- sectorWeight := miner0.QAPowerForWeight(m.SectorSize, minerInfos[i].presealExp, dweight.DealWeight, dweight.VerifiedDealWeight)
+ sectorWeight := builtin.QAPowerForWeight(m.SectorSize, minerInfos[i].presealExp, dweight, vdweight)
qaPow = types.BigAdd(qaPow, sectorWeight)
}
}
- err = vm.MutateState(ctx, power.Address, func(cst cbor.IpldStore, st *power0.State) error {
- st.TotalQualityAdjPower = qaPow
- st.TotalRawBytePower = rawPow
+ _, err = vm.Flush(ctx)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("flushing vm: %w", err)
+ }
+
+ pact, err := vm.StateTree().GetActor(power.Address)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("getting power actor: %w", err)
+ }
+
+ pst, err := power.Load(adt.WrapStore(ctx, cst), pact)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("getting power state: %w", err)
+ }
+
+ if err = pst.SetTotalQualityAdjPower(qaPow); err != nil {
+ return cid.Undef, xerrors.Errorf("setting TotalQualityAdjPower in power state: %w", err)
+ }
+
+ if err = pst.SetTotalRawBytePower(rawPow); err != nil {
+ return cid.Undef, xerrors.Errorf("setting TotalRawBytePower in power state: %w", err)
+ }
+
+ if err = pst.SetThisEpochQualityAdjPower(qaPow); err != nil {
+ return cid.Undef, xerrors.Errorf("setting ThisEpochQualityAdjPower in power state: %w", err)
+ }
+
+ if err = pst.SetThisEpochRawBytePower(rawPow); err != nil {
+ return cid.Undef, xerrors.Errorf("setting ThisEpochRawBytePower in power state: %w", err)
+ }
- st.ThisEpochQualityAdjPower = qaPow
- st.ThisEpochRawBytePower = rawPow
- return nil
- })
+ pcid, err := cst.Put(ctx, pst.GetState())
if err != nil {
- return cid.Undef, xerrors.Errorf("mutating state: %w", err)
+ return cid.Undef, xerrors.Errorf("putting power state: %w", err)
+ }
+
+ pact.Head = pcid
+
+ if err = vm.StateTree().SetActor(power.Address, pact); err != nil {
+ return cid.Undef, xerrors.Errorf("setting power state: %w", err)
}
- err = vm.MutateState(ctx, reward.Address, func(sct cbor.IpldStore, st *reward0.State) error {
- *st = *reward0.ConstructState(qaPow)
- return nil
- })
+ rewact, err := SetupRewardActor(ctx, cs.StateBlockstore(), big.Zero(), actors.VersionForNetwork(nv))
if err != nil {
- return cid.Undef, xerrors.Errorf("mutating state: %w", err)
+ return cid.Undef, xerrors.Errorf("setup reward actor: %w", err)
+ }
+
+ if err = vm.StateTree().SetActor(reward.Address, rewact); err != nil {
+ return cid.Undef, xerrors.Errorf("set reward actor: %w", err)
}
}
@@ -250,24 +314,55 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
Expiration: minerInfos[i].presealExp, // TODO: Allow setting externally!
}
- dweight, err := dealWeight(ctx, vm, minerInfos[i].maddr, params.DealIDs, 0, minerInfos[i].presealExp)
+ dweight, vdweight, err := dealWeight(ctx, vm, minerInfos[i].maddr, params.DealIDs, 0, minerInfos[i].presealExp, av)
if err != nil {
return cid.Undef, xerrors.Errorf("getting deal weight: %w", err)
}
- sectorWeight := miner0.QAPowerForWeight(m.SectorSize, minerInfos[i].presealExp, dweight.DealWeight, dweight.VerifiedDealWeight)
+ sectorWeight := builtin.QAPowerForWeight(m.SectorSize, minerInfos[i].presealExp, dweight, vdweight)
// we've added fake power for this sector above, remove it now
- err = vm.MutateState(ctx, power.Address, func(cst cbor.IpldStore, st *power0.State) error {
- st.TotalQualityAdjPower = types.BigSub(st.TotalQualityAdjPower, sectorWeight) //nolint:scopelint
- st.TotalRawBytePower = types.BigSub(st.TotalRawBytePower, types.NewInt(uint64(m.SectorSize)))
- return nil
- })
+
+ _, err = vm.Flush(ctx)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("flushing vm: %w", err)
+ }
+
+ pact, err := vm.StateTree().GetActor(power.Address)
if err != nil {
- return cid.Undef, xerrors.Errorf("removing fake power: %w", err)
+ return cid.Undef, xerrors.Errorf("getting power actor: %w", err)
+ }
+
+ pst, err := power.Load(adt.WrapStore(ctx, cst), pact)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("getting power state: %w", err)
+ }
+
+ pc, err := pst.TotalPower()
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("getting total power: %w", err)
+ }
+
+ if err = pst.SetTotalRawBytePower(types.BigSub(pc.RawBytePower, types.NewInt(uint64(m.SectorSize)))); err != nil {
+ return cid.Undef, xerrors.Errorf("setting TotalRawBytePower in power state: %w", err)
+ }
+
+ if err = pst.SetTotalQualityAdjPower(types.BigSub(pc.QualityAdjPower, sectorWeight)); err != nil {
+ return cid.Undef, xerrors.Errorf("setting TotalQualityAdjPower in power state: %w", err)
+ }
+
+ pcid, err := cst.Put(ctx, pst.GetState())
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("putting power state: %w", err)
+ }
+
+ pact.Head = pcid
+
+ if err = vm.StateTree().SetActor(power.Address, pact); err != nil {
+ return cid.Undef, xerrors.Errorf("setting power state: %w", err)
}
- epochReward, err := currentEpochBlockReward(ctx, vm, minerInfos[i].maddr)
+ baselinePower, rewardSmoothed, err := currentEpochBlockReward(ctx, vm, minerInfos[i].maddr, av)
if err != nil {
return cid.Undef, xerrors.Errorf("getting current epoch reward: %w", err)
}
@@ -277,13 +372,13 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
return cid.Undef, xerrors.Errorf("getting current total power: %w", err)
}
- pcd := miner0.PreCommitDepositForPower(epochReward.ThisEpochRewardSmoothed, tpow.QualityAdjPowerSmoothed, sectorWeight)
+ pcd := miner0.PreCommitDepositForPower(&rewardSmoothed, tpow.QualityAdjPowerSmoothed, sectorWeight)
pledge := miner0.InitialPledgeForPower(
sectorWeight,
- epochReward.ThisEpochBaselinePower,
+ baselinePower,
tpow.PledgeCollateral,
- epochReward.ThisEpochRewardSmoothed,
+ &rewardSmoothed,
tpow.QualityAdjPowerSmoothed,
circSupply(ctx, vm, minerInfos[i].maddr),
)
@@ -291,7 +386,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
pledge = big.Add(pcd, pledge)
fmt.Println(types.FIL(pledge))
- _, err = doExecValue(ctx, vm, minerInfos[i].maddr, m.Worker, pledge, builtin0.MethodsMiner.PreCommitSector, mustEnc(params))
+ _, err = doExecValue(ctx, vm, minerInfos[i].maddr, m.Worker, pledge, miner.Methods.PreCommitSector, mustEnc(params))
if err != nil {
return cid.Undef, xerrors.Errorf("failed to confirm presealed sectors: %w", err)
}
@@ -301,28 +396,84 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
Sectors: []abi.SectorNumber{preseal.SectorID},
}
- _, err = doExecValue(ctx, vm, minerInfos[i].maddr, power.Address, big.Zero(), builtin0.MethodsMiner.ConfirmSectorProofsValid, mustEnc(confirmParams))
+ _, err = doExecValue(ctx, vm, minerInfos[i].maddr, power.Address, big.Zero(), miner.Methods.ConfirmSectorProofsValid, mustEnc(confirmParams))
if err != nil {
return cid.Undef, xerrors.Errorf("failed to confirm presealed sectors: %w", err)
}
+
+ if av > actors.Version2 {
+ // post v2, we need to explicitly Claim this power since ConfirmSectorProofsValid doesn't do it anymore
+ claimParams := &power4.UpdateClaimedPowerParams{
+ RawByteDelta: types.NewInt(uint64(m.SectorSize)),
+ QualityAdjustedDelta: sectorWeight,
+ }
+
+ _, err = doExecValue(ctx, vm, power.Address, minerInfos[i].maddr, big.Zero(), power.Methods.UpdateClaimedPower, mustEnc(claimParams))
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("failed to confirm presealed sectors: %w", err)
+ }
+
+ _, err = vm.Flush(ctx)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("flushing vm: %w", err)
+ }
+
+ mact, err := vm.StateTree().GetActor(minerInfos[i].maddr)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("getting miner actor: %w", err)
+ }
+
+ mst, err := miner.Load(adt.WrapStore(ctx, cst), mact)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("getting miner state: %w", err)
+ }
+
+ if err = mst.EraseAllUnproven(); err != nil {
+ return cid.Undef, xerrors.Errorf("failed to erase unproven sectors: %w", err)
+ }
+
+ mcid, err := cst.Put(ctx, mst.GetState())
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("putting miner state: %w", err)
+ }
+
+ mact.Head = mcid
+
+ if err = vm.StateTree().SetActor(minerInfos[i].maddr, mact); err != nil {
+ return cid.Undef, xerrors.Errorf("setting miner state: %w", err)
+ }
+ }
}
}
}
// Sanity-check total network power
- err = vm.MutateState(ctx, power.Address, func(cst cbor.IpldStore, st *power0.State) error {
- if !st.TotalRawBytePower.Equals(rawPow) {
- return xerrors.Errorf("st.TotalRawBytePower doesn't match previously calculated rawPow")
- }
+ _, err = vm.Flush(ctx)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("flushing vm: %w", err)
+ }
- if !st.TotalQualityAdjPower.Equals(qaPow) {
- return xerrors.Errorf("st.TotalQualityAdjPower doesn't match previously calculated qaPow")
- }
+ pact, err := vm.StateTree().GetActor(power.Address)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("getting power actor: %w", err)
+ }
- return nil
- })
+ pst, err := power.Load(adt.WrapStore(ctx, cst), pact)
if err != nil {
- return cid.Undef, xerrors.Errorf("mutating state: %w", err)
+ return cid.Undef, xerrors.Errorf("getting power state: %w", err)
+ }
+
+ pc, err := pst.TotalPower()
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("getting total power: %w", err)
+ }
+
+ if !pc.RawBytePower.Equals(rawPow) {
+ return cid.Undef, xerrors.Errorf("TotalRawBytePower (%s) doesn't match previously calculated rawPow (%s)", pc.RawBytePower, rawPow)
+ }
+
+ if !pc.QualityAdjPower.Equals(qaPow) {
+ return cid.Undef, xerrors.Errorf("QualityAdjPower (%s) doesn't match previously calculated qaPow (%s)", pc.QualityAdjPower, qaPow)
}
// TODO: Should we re-ConstructState for the reward actor using rawPow as currRealizedPower here?
@@ -337,13 +488,25 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
// TODO: copied from actors test harness, deduplicate or remove from here
type fakeRand struct{}
-func (fr *fakeRand) GetChainRandomness(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
+func (fr *fakeRand) GetChainRandomnessLookingForward(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
+ out := make([]byte, 32)
+ _, _ = rand.New(rand.NewSource(int64(randEpoch * 1000))).Read(out) //nolint
+ return out, nil
+}
+
+func (fr *fakeRand) GetChainRandomnessLookingBack(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
out := make([]byte, 32)
_, _ = rand.New(rand.NewSource(int64(randEpoch * 1000))).Read(out) //nolint
return out, nil
}
-func (fr *fakeRand) GetBeaconRandomness(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
+func (fr *fakeRand) GetBeaconRandomnessLookingForward(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
+ out := make([]byte, 32)
+ _, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint
+ return out, nil
+}
+
+func (fr *fakeRand) GetBeaconRandomnessLookingBack(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
out := make([]byte, 32)
_, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint
return out, nil
@@ -362,43 +525,79 @@ func currentTotalPower(ctx context.Context, vm *vm.VM, maddr address.Address) (*
return &pwr, nil
}
-func dealWeight(ctx context.Context, vm *vm.VM, maddr address.Address, dealIDs []abi.DealID, sectorStart, sectorExpiry abi.ChainEpoch) (market0.VerifyDealsForActivationReturn, error) {
- params := &market.VerifyDealsForActivationParams{
- DealIDs: dealIDs,
- SectorStart: sectorStart,
- SectorExpiry: sectorExpiry,
+func dealWeight(ctx context.Context, vm *vm.VM, maddr address.Address, dealIDs []abi.DealID, sectorStart, sectorExpiry abi.ChainEpoch, av actors.Version) (abi.DealWeight, abi.DealWeight, error) {
+ // TODO: This hack should move to market actor wrapper
+ if av <= actors.Version2 {
+ params := &market0.VerifyDealsForActivationParams{
+ DealIDs: dealIDs,
+ SectorStart: sectorStart,
+ SectorExpiry: sectorExpiry,
+ }
+
+ var dealWeights market0.VerifyDealsForActivationReturn
+ ret, err := doExecValue(ctx, vm,
+ market.Address,
+ maddr,
+ abi.NewTokenAmount(0),
+ builtin0.MethodsMarket.VerifyDealsForActivation,
+ mustEnc(params),
+ )
+ if err != nil {
+ return big.Zero(), big.Zero(), err
+ }
+ if err := dealWeights.UnmarshalCBOR(bytes.NewReader(ret)); err != nil {
+ return big.Zero(), big.Zero(), err
+ }
+
+ return dealWeights.DealWeight, dealWeights.VerifiedDealWeight, nil
}
+ params := &market4.VerifyDealsForActivationParams{Sectors: []market4.SectorDeals{{
+ SectorExpiry: sectorExpiry,
+ DealIDs: dealIDs,
+ }}}
- var dealWeights market0.VerifyDealsForActivationReturn
+ var dealWeights market4.VerifyDealsForActivationReturn
ret, err := doExecValue(ctx, vm,
market.Address,
maddr,
abi.NewTokenAmount(0),
- builtin0.MethodsMarket.VerifyDealsForActivation,
+ market.Methods.VerifyDealsForActivation,
mustEnc(params),
)
if err != nil {
- return market0.VerifyDealsForActivationReturn{}, err
+ return big.Zero(), big.Zero(), err
}
if err := dealWeights.UnmarshalCBOR(bytes.NewReader(ret)); err != nil {
- return market0.VerifyDealsForActivationReturn{}, err
+ return big.Zero(), big.Zero(), err
}
- return dealWeights, nil
+ return dealWeights.Sectors[0].DealWeight, dealWeights.Sectors[0].VerifiedDealWeight, nil
}
-func currentEpochBlockReward(ctx context.Context, vm *vm.VM, maddr address.Address) (*reward0.ThisEpochRewardReturn, error) {
- rwret, err := doExecValue(ctx, vm, reward.Address, maddr, big.Zero(), builtin0.MethodsReward.ThisEpochReward, nil)
+func currentEpochBlockReward(ctx context.Context, vm *vm.VM, maddr address.Address, av actors.Version) (abi.StoragePower, builtin.FilterEstimate, error) {
+ rwret, err := doExecValue(ctx, vm, reward.Address, maddr, big.Zero(), reward.Methods.ThisEpochReward, nil)
if err != nil {
- return nil, err
+ return big.Zero(), builtin.FilterEstimate{}, err
}
- var epochReward reward0.ThisEpochRewardReturn
+ // TODO: This hack should move to reward actor wrapper
+ if av <= actors.Version2 {
+ var epochReward reward0.ThisEpochRewardReturn
+
+ if err := epochReward.UnmarshalCBOR(bytes.NewReader(rwret)); err != nil {
+ return big.Zero(), builtin.FilterEstimate{}, err
+ }
+
+ return epochReward.ThisEpochBaselinePower, *epochReward.ThisEpochRewardSmoothed, nil
+ }
+
+ var epochReward reward4.ThisEpochRewardReturn
+
if err := epochReward.UnmarshalCBOR(bytes.NewReader(rwret)); err != nil {
- return nil, err
+ return big.Zero(), builtin.FilterEstimate{}, err
}
- return &epochReward, nil
+ return epochReward.ThisEpochBaselinePower, builtin.FilterEstimate(epochReward.ThisEpochRewardSmoothed), nil
}
func circSupply(ctx context.Context, vmi *vm.VM, maddr address.Address) abi.TokenAmount {
diff --git a/chain/gen/genesis/t00_system.go b/chain/gen/genesis/t00_system.go
deleted file mode 100644
index 6e6cc976aba..00000000000
--- a/chain/gen/genesis/t00_system.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package genesis
-
-import (
- "context"
-
- "github.com/filecoin-project/specs-actors/actors/builtin/system"
-
- "github.com/filecoin-project/specs-actors/actors/builtin"
- cbor "github.com/ipfs/go-ipld-cbor"
-
- "github.com/filecoin-project/lotus/chain/types"
- bstore "github.com/filecoin-project/lotus/lib/blockstore"
-)
-
-func SetupSystemActor(bs bstore.Blockstore) (*types.Actor, error) {
- var st system.State
-
- cst := cbor.NewCborStore(bs)
-
- statecid, err := cst.Put(context.TODO(), &st)
- if err != nil {
- return nil, err
- }
-
- act := &types.Actor{
- Code: builtin.SystemActorCodeID,
- Head: statecid,
- }
-
- return act, nil
-}
diff --git a/chain/gen/genesis/t02_reward.go b/chain/gen/genesis/t02_reward.go
deleted file mode 100644
index 92531051b14..00000000000
--- a/chain/gen/genesis/t02_reward.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package genesis
-
-import (
- "context"
-
- "github.com/filecoin-project/go-state-types/big"
-
- "github.com/filecoin-project/specs-actors/actors/builtin"
- reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward"
- cbor "github.com/ipfs/go-ipld-cbor"
-
- "github.com/filecoin-project/lotus/build"
- "github.com/filecoin-project/lotus/chain/types"
- bstore "github.com/filecoin-project/lotus/lib/blockstore"
-)
-
-func SetupRewardActor(bs bstore.Blockstore, qaPower big.Int) (*types.Actor, error) {
- cst := cbor.NewCborStore(bs)
-
- st := reward0.ConstructState(qaPower)
-
- hcid, err := cst.Put(context.TODO(), st)
- if err != nil {
- return nil, err
- }
-
- return &types.Actor{
- Code: builtin.RewardActorCodeID,
- Balance: types.BigInt{Int: build.InitialRewardBalance},
- Head: hcid,
- }, nil
-}
diff --git a/chain/gen/genesis/t03_cron.go b/chain/gen/genesis/t03_cron.go
deleted file mode 100644
index cf2c0d7a741..00000000000
--- a/chain/gen/genesis/t03_cron.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package genesis
-
-import (
- "context"
-
- "github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/cron"
- cbor "github.com/ipfs/go-ipld-cbor"
-
- "github.com/filecoin-project/lotus/chain/types"
- bstore "github.com/filecoin-project/lotus/lib/blockstore"
-)
-
-func SetupCronActor(bs bstore.Blockstore) (*types.Actor, error) {
- cst := cbor.NewCborStore(bs)
- cas := cron.ConstructState(cron.BuiltInEntries())
-
- stcid, err := cst.Put(context.TODO(), cas)
- if err != nil {
- return nil, err
- }
-
- return &types.Actor{
- Code: builtin.CronActorCodeID,
- Head: stcid,
- Nonce: 0,
- Balance: types.NewInt(0),
- }, nil
-}
diff --git a/chain/gen/genesis/t04_power.go b/chain/gen/genesis/t04_power.go
deleted file mode 100644
index 2f1303ba46c..00000000000
--- a/chain/gen/genesis/t04_power.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package genesis
-
-import (
- "context"
-
- "github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/util/adt"
-
- power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
- cbor "github.com/ipfs/go-ipld-cbor"
-
- "github.com/filecoin-project/lotus/chain/types"
- bstore "github.com/filecoin-project/lotus/lib/blockstore"
-)
-
-func SetupStoragePowerActor(bs bstore.Blockstore) (*types.Actor, error) {
- store := adt.WrapStore(context.TODO(), cbor.NewCborStore(bs))
- emptyMap, err := adt.MakeEmptyMap(store).Root()
- if err != nil {
- return nil, err
- }
-
- multiMap, err := adt.AsMultimap(store, emptyMap)
- if err != nil {
- return nil, err
- }
-
- emptyMultiMap, err := multiMap.Root()
- if err != nil {
- return nil, err
- }
-
- sms := power0.ConstructState(emptyMap, emptyMultiMap)
-
- stcid, err := store.Put(store.Context(), sms)
- if err != nil {
- return nil, err
- }
-
- return &types.Actor{
- Code: builtin.StoragePowerActorCodeID,
- Head: stcid,
- Nonce: 0,
- Balance: types.NewInt(0),
- }, nil
-}
diff --git a/chain/gen/genesis/t05_market.go b/chain/gen/genesis/t05_market.go
deleted file mode 100644
index 615e8370ba5..00000000000
--- a/chain/gen/genesis/t05_market.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package genesis
-
-import (
- "context"
-
- "github.com/filecoin-project/specs-actors/actors/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/market"
- "github.com/filecoin-project/specs-actors/actors/util/adt"
- cbor "github.com/ipfs/go-ipld-cbor"
-
- "github.com/filecoin-project/lotus/chain/types"
- bstore "github.com/filecoin-project/lotus/lib/blockstore"
-)
-
-func SetupStorageMarketActor(bs bstore.Blockstore) (*types.Actor, error) {
- store := adt.WrapStore(context.TODO(), cbor.NewCborStore(bs))
-
- a, err := adt.MakeEmptyArray(store).Root()
- if err != nil {
- return nil, err
- }
- h, err := adt.MakeEmptyMap(store).Root()
- if err != nil {
- return nil, err
- }
-
- sms := market.ConstructState(a, h, h)
-
- stcid, err := store.Put(store.Context(), sms)
- if err != nil {
- return nil, err
- }
-
- act := &types.Actor{
- Code: builtin.StorageMarketActorCodeID,
- Head: stcid,
- Balance: types.NewInt(0),
- }
-
- return act, nil
-}
diff --git a/chain/gen/genesis/t06_vreg.go b/chain/gen/genesis/t06_vreg.go
deleted file mode 100644
index 1709b205f1f..00000000000
--- a/chain/gen/genesis/t06_vreg.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package genesis
-
-import (
- "context"
-
- "github.com/filecoin-project/go-address"
- cbor "github.com/ipfs/go-ipld-cbor"
-
- "github.com/filecoin-project/specs-actors/actors/builtin"
- verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
- "github.com/filecoin-project/specs-actors/actors/util/adt"
-
- "github.com/filecoin-project/lotus/chain/types"
- bstore "github.com/filecoin-project/lotus/lib/blockstore"
-)
-
-var RootVerifierID address.Address
-
-func init() {
-
- idk, err := address.NewFromString("t080")
- if err != nil {
- panic(err)
- }
-
- RootVerifierID = idk
-}
-
-func SetupVerifiedRegistryActor(bs bstore.Blockstore) (*types.Actor, error) {
- store := adt.WrapStore(context.TODO(), cbor.NewCborStore(bs))
-
- h, err := adt.MakeEmptyMap(store).Root()
- if err != nil {
- return nil, err
- }
-
- sms := verifreg0.ConstructState(h, RootVerifierID)
-
- stcid, err := store.Put(store.Context(), sms)
- if err != nil {
- return nil, err
- }
-
- act := &types.Actor{
- Code: builtin.VerifiedRegistryActorCodeID,
- Head: stcid,
- Balance: types.NewInt(0),
- }
-
- return act, nil
-}
diff --git a/chain/gen/genesis/util.go b/chain/gen/genesis/util.go
index 54cc30cc168..67a4e9579a7 100644
--- a/chain/gen/genesis/util.go
+++ b/chain/gen/genesis/util.go
@@ -3,9 +3,6 @@ package genesis
import (
"context"
- "github.com/filecoin-project/go-state-types/network"
- "github.com/filecoin-project/lotus/build"
-
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
cbg "github.com/whyrusleeping/cbor-gen"
@@ -49,29 +46,3 @@ func doExecValue(ctx context.Context, vm *vm.VM, to, from address.Address, value
return ret.Return, nil
}
-
-// TODO: Get from build
-// TODO: make a list/schedule of these.
-var GenesisNetworkVersion = func() network.Version {
- // returns the version _before_ the first upgrade.
- if build.UpgradeBreezeHeight >= 0 {
- return network.Version0
- }
- if build.UpgradeSmokeHeight >= 0 {
- return network.Version1
- }
- if build.UpgradeIgnitionHeight >= 0 {
- return network.Version2
- }
- if build.UpgradeActorsV2Height >= 0 {
- return network.Version3
- }
- if build.UpgradeLiftoffHeight >= 0 {
- return network.Version3
- }
- return build.ActorUpgradeNetworkVersion - 1 // genesis requires actors v0.
-}()
-
-func genesisNetworkVersion(context.Context, abi.ChainEpoch) network.Version { // TODO: Get from build/
- return GenesisNetworkVersion // TODO: Get from build/
-} // TODO: Get from build/
diff --git a/chain/gen/mining.go b/chain/gen/mining.go
index cca4b61699a..1400c12c51f 100644
--- a/chain/gen/mining.go
+++ b/chain/gen/mining.go
@@ -9,13 +9,13 @@ import (
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
+ ffi "github.com/filecoin-project/filecoin-ffi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/lotus/lib/sigs/bls"
)
-func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.WalletAPI, bt *api.BlockTemplate) (*types.FullBlock, error) {
+func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.Wallet, bt *api.BlockTemplate) (*types.FullBlock, error) {
pts, err := sm.ChainStore().LoadTipSet(bt.Parents)
if err != nil {
@@ -79,7 +79,7 @@ func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.WalletA
}
}
- store := sm.ChainStore().Store(ctx)
+ store := sm.ChainStore().ActorStore(ctx)
blsmsgroot, err := toArray(store, blsMsgCids)
if err != nil {
return nil, xerrors.Errorf("building bls amt: %w", err)
@@ -140,35 +140,29 @@ func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.WalletA
}
func aggregateSignatures(sigs []crypto.Signature) (*crypto.Signature, error) {
- sigsS := make([][]byte, len(sigs))
+ sigsS := make([]ffi.Signature, len(sigs))
for i := 0; i < len(sigs); i++ {
- sigsS[i] = sigs[i].Data
+ copy(sigsS[i][:], sigs[i].Data[:ffi.SignatureBytes])
}
- aggregator := new(bls.AggregateSignature).AggregateCompressed(sigsS)
- if aggregator == nil {
+ aggSig := ffi.Aggregate(sigsS)
+ if aggSig == nil {
if len(sigs) > 0 {
return nil, xerrors.Errorf("bls.Aggregate returned nil with %d signatures", len(sigs))
}
+ zeroSig := ffi.CreateZeroSignature()
+
// Note: for blst this condition should not happen - nil should not
// be returned
return &crypto.Signature{
Type: crypto.SigTypeBLS,
- Data: new(bls.Signature).Compress(),
- }, nil
- }
- aggSigAff := aggregator.ToAffine()
- if aggSigAff == nil {
- return &crypto.Signature{
- Type: crypto.SigTypeBLS,
- Data: new(bls.Signature).Compress(),
+ Data: zeroSig[:],
}, nil
}
- aggSig := aggSigAff.Compress()
return &crypto.Signature{
Type: crypto.SigTypeBLS,
- Data: aggSig,
+ Data: aggSig[:],
}, nil
}
diff --git a/chain/gen/slashfilter/slashfilter.go b/chain/gen/slashfilter/slashfilter.go
index ee04351566d..5edcd5439df 100644
--- a/chain/gen/slashfilter/slashfilter.go
+++ b/chain/gen/slashfilter/slashfilter.go
@@ -3,6 +3,8 @@ package slashfilter
import (
"fmt"
+ "github.com/filecoin-project/lotus/build"
+
"golang.org/x/xerrors"
"github.com/ipfs/go-cid"
@@ -26,6 +28,10 @@ func New(dstore ds.Batching) *SlashFilter {
}
func (f *SlashFilter) MinedBlock(bh *types.BlockHeader, parentEpoch abi.ChainEpoch) error {
+ if build.IsNearUpgrade(bh.Height, build.UpgradeOrangeHeight) {
+ return nil
+ }
+
epochKey := ds.NewKey(fmt.Sprintf("/%s/%d", bh.Miner, bh.Height))
{
// double-fork mining (2 blocks at one epoch)
diff --git a/chain/market/cbor_gen.go b/chain/market/cbor_gen.go
new file mode 100644
index 00000000000..7d9e55b3619
--- /dev/null
+++ b/chain/market/cbor_gen.go
@@ -0,0 +1,116 @@
+// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT.
+
+package market
+
+import (
+ "fmt"
+ "io"
+ "sort"
+
+ cid "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ xerrors "golang.org/x/xerrors"
+)
+
+var _ = xerrors.Errorf
+var _ = cid.Undef
+var _ = sort.Sort
+
+var lengthBufFundedAddressState = []byte{131}
+
+func (t *FundedAddressState) MarshalCBOR(w io.Writer) error {
+ if t == nil {
+ _, err := w.Write(cbg.CborNull)
+ return err
+ }
+ if _, err := w.Write(lengthBufFundedAddressState); err != nil {
+ return err
+ }
+
+ scratch := make([]byte, 9)
+
+ // t.Addr (address.Address) (struct)
+ if err := t.Addr.MarshalCBOR(w); err != nil {
+ return err
+ }
+
+ // t.AmtReserved (big.Int) (struct)
+ if err := t.AmtReserved.MarshalCBOR(w); err != nil {
+ return err
+ }
+
+ // t.MsgCid (cid.Cid) (struct)
+
+ if t.MsgCid == nil {
+ if _, err := w.Write(cbg.CborNull); err != nil {
+ return err
+ }
+ } else {
+ if err := cbg.WriteCidBuf(scratch, w, *t.MsgCid); err != nil {
+ return xerrors.Errorf("failed to write cid field t.MsgCid: %w", err)
+ }
+ }
+
+ return nil
+}
+
+func (t *FundedAddressState) UnmarshalCBOR(r io.Reader) error {
+ *t = FundedAddressState{}
+
+ br := cbg.GetPeeker(r)
+ scratch := make([]byte, 8)
+
+ maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajArray {
+ return fmt.Errorf("cbor input should be of type array")
+ }
+
+ if extra != 3 {
+ return fmt.Errorf("cbor input had wrong number of fields")
+ }
+
+ // t.Addr (address.Address) (struct)
+
+ {
+
+ if err := t.Addr.UnmarshalCBOR(br); err != nil {
+ return xerrors.Errorf("unmarshaling t.Addr: %w", err)
+ }
+
+ }
+ // t.AmtReserved (big.Int) (struct)
+
+ {
+
+ if err := t.AmtReserved.UnmarshalCBOR(br); err != nil {
+ return xerrors.Errorf("unmarshaling t.AmtReserved: %w", err)
+ }
+
+ }
+ // t.MsgCid (cid.Cid) (struct)
+
+ {
+
+ b, err := br.ReadByte()
+ if err != nil {
+ return err
+ }
+ if b != cbg.CborNull[0] {
+ if err := br.UnreadByte(); err != nil {
+ return err
+ }
+
+ c, err := cbg.ReadCid(br)
+ if err != nil {
+ return xerrors.Errorf("failed to read cid field t.MsgCid: %w", err)
+ }
+
+ t.MsgCid = &c
+ }
+
+ }
+ return nil
+}
diff --git a/chain/market/fundmanager.go b/chain/market/fundmanager.go
new file mode 100644
index 00000000000..5becfdfa717
--- /dev/null
+++ b/chain/market/fundmanager.go
@@ -0,0 +1,726 @@
+package market
+
+import (
+ "context"
+ "fmt"
+ "sync"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/market"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/node/impl/full"
+ "github.com/filecoin-project/lotus/node/modules/dtypes"
+ "github.com/ipfs/go-cid"
+ "github.com/ipfs/go-datastore"
+ logging "github.com/ipfs/go-log/v2"
+ "go.uber.org/fx"
+ "golang.org/x/xerrors"
+)
+
+var log = logging.Logger("market_adapter")
+
+// API is the fx dependencies need to run a fund manager
+type FundManagerAPI struct {
+ fx.In
+
+ full.StateAPI
+ full.MpoolAPI
+}
+
+// fundManagerAPI is the specific methods called by the FundManager
+// (used by the tests)
+type fundManagerAPI interface {
+ MpoolPushMessage(context.Context, *types.Message, *api.MessageSendSpec) (*types.SignedMessage, error)
+ StateMarketBalance(context.Context, address.Address, types.TipSetKey) (api.MarketBalance, error)
+ StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error)
+}
+
+// FundManager keeps track of funds in a set of addresses
+type FundManager struct {
+ ctx context.Context
+ shutdown context.CancelFunc
+ api fundManagerAPI
+ str *Store
+
+ lk sync.Mutex
+ fundedAddrs map[address.Address]*fundedAddress
+}
+
+func NewFundManager(lc fx.Lifecycle, api FundManagerAPI, ds dtypes.MetadataDS) *FundManager {
+ fm := newFundManager(&api, ds)
+ lc.Append(fx.Hook{
+ OnStart: func(ctx context.Context) error {
+ return fm.Start()
+ },
+ OnStop: func(ctx context.Context) error {
+ fm.Stop()
+ return nil
+ },
+ })
+ return fm
+}
+
+// newFundManager is used by the tests
+func newFundManager(api fundManagerAPI, ds datastore.Batching) *FundManager {
+ ctx, cancel := context.WithCancel(context.Background())
+ return &FundManager{
+ ctx: ctx,
+ shutdown: cancel,
+ api: api,
+ str: newStore(ds),
+ fundedAddrs: make(map[address.Address]*fundedAddress),
+ }
+}
+
+func (fm *FundManager) Stop() {
+ fm.shutdown()
+}
+
+func (fm *FundManager) Start() error {
+ fm.lk.Lock()
+ defer fm.lk.Unlock()
+
+ // TODO:
+ // To save memory:
+ // - in State() only load addresses with in-progress messages
+ // - load the others just-in-time from getFundedAddress
+ // - delete(fm.fundedAddrs, addr) when the queue has been processed
+ return fm.str.forEach(func(state *FundedAddressState) {
+ fa := newFundedAddress(fm, state.Addr)
+ fa.state = state
+ fm.fundedAddrs[fa.state.Addr] = fa
+ fa.start()
+ })
+}
+
+// Creates a fundedAddress if it doesn't already exist, and returns it
+func (fm *FundManager) getFundedAddress(addr address.Address) *fundedAddress {
+ fm.lk.Lock()
+ defer fm.lk.Unlock()
+
+ fa, ok := fm.fundedAddrs[addr]
+ if !ok {
+ fa = newFundedAddress(fm, addr)
+ fm.fundedAddrs[addr] = fa
+ }
+ return fa
+}
+
+// Reserve adds amt to `reserved`. If there are not enough available funds for
+// the address, submits a message on chain to top up available funds.
+// Returns the cid of the message that was submitted on chain, or cid.Undef if
+// the required funds were already available.
+func (fm *FundManager) Reserve(ctx context.Context, wallet, addr address.Address, amt abi.TokenAmount) (cid.Cid, error) {
+ return fm.getFundedAddress(addr).reserve(ctx, wallet, amt)
+}
+
+// Subtract from `reserved`.
+func (fm *FundManager) Release(addr address.Address, amt abi.TokenAmount) error {
+ return fm.getFundedAddress(addr).release(amt)
+}
+
+// Withdraw unreserved funds. Only succeeds if there are enough unreserved
+// funds for the address.
+// Returns the cid of the message that was submitted on chain.
+func (fm *FundManager) Withdraw(ctx context.Context, wallet, addr address.Address, amt abi.TokenAmount) (cid.Cid, error) {
+ return fm.getFundedAddress(addr).withdraw(ctx, wallet, amt)
+}
+
+// GetReserved returns the amount that is currently reserved for the address
+func (fm *FundManager) GetReserved(addr address.Address) abi.TokenAmount {
+ return fm.getFundedAddress(addr).getReserved()
+}
+
+// FundedAddressState keeps track of the state of an address with funds in the
+// datastore
+type FundedAddressState struct {
+ Addr address.Address
+ // AmtReserved is the amount that must be kept in the address (cannot be
+ // withdrawn)
+ AmtReserved abi.TokenAmount
+ // MsgCid is the cid of an in-progress on-chain message
+ MsgCid *cid.Cid
+}
+
+// fundedAddress keeps track of the state and request queues for a
+// particular address
+type fundedAddress struct {
+ ctx context.Context
+ env *fundManagerEnvironment
+ str *Store
+
+ lk sync.RWMutex
+ state *FundedAddressState
+
+ // Note: These request queues are ephemeral, they are not saved to store
+ reservations []*fundRequest
+ releases []*fundRequest
+ withdrawals []*fundRequest
+
+ // Used by the tests
+ onProcessStartListener func() bool
+}
+
+func newFundedAddress(fm *FundManager, addr address.Address) *fundedAddress {
+ return &fundedAddress{
+ ctx: fm.ctx,
+ env: &fundManagerEnvironment{api: fm.api},
+ str: fm.str,
+ state: &FundedAddressState{
+ Addr: addr,
+ AmtReserved: abi.NewTokenAmount(0),
+ },
+ }
+}
+
+// If there is an in-progress on-chain message, don't submit any more messages
+// on chain until it completes
+func (a *fundedAddress) start() {
+ a.lk.Lock()
+ defer a.lk.Unlock()
+
+ if a.state.MsgCid != nil {
+ a.debugf("restart: wait for %s", a.state.MsgCid)
+ a.startWaitForResults(*a.state.MsgCid)
+ }
+}
+
+func (a *fundedAddress) getReserved() abi.TokenAmount {
+ a.lk.RLock()
+ defer a.lk.RUnlock()
+
+ return a.state.AmtReserved
+}
+
+func (a *fundedAddress) reserve(ctx context.Context, wallet address.Address, amt abi.TokenAmount) (cid.Cid, error) {
+ return a.requestAndWait(ctx, wallet, amt, &a.reservations)
+}
+
+func (a *fundedAddress) release(amt abi.TokenAmount) error {
+ _, err := a.requestAndWait(context.Background(), address.Undef, amt, &a.releases)
+ return err
+}
+
+func (a *fundedAddress) withdraw(ctx context.Context, wallet address.Address, amt abi.TokenAmount) (cid.Cid, error) {
+ return a.requestAndWait(ctx, wallet, amt, &a.withdrawals)
+}
+
+func (a *fundedAddress) requestAndWait(ctx context.Context, wallet address.Address, amt abi.TokenAmount, reqs *[]*fundRequest) (cid.Cid, error) {
+ // Create a request and add it to the request queue
+ req := newFundRequest(ctx, wallet, amt)
+
+ a.lk.Lock()
+ *reqs = append(*reqs, req)
+ a.lk.Unlock()
+
+ // Process the queue
+ go a.process()
+
+ // Wait for the results
+ select {
+ case <-ctx.Done():
+ return cid.Undef, ctx.Err()
+ case r := <-req.Result:
+ return r.msgCid, r.err
+ }
+}
+
+// Used by the tests
+func (a *fundedAddress) onProcessStart(fn func() bool) {
+ a.lk.Lock()
+ defer a.lk.Unlock()
+
+ a.onProcessStartListener = fn
+}
+
+// Process queued requests
+func (a *fundedAddress) process() {
+ a.lk.Lock()
+ defer a.lk.Unlock()
+
+ // Used by the tests
+ if a.onProcessStartListener != nil {
+ done := a.onProcessStartListener()
+ if !done {
+ return
+ }
+ a.onProcessStartListener = nil
+ }
+
+ // Check if we're still waiting for the response to a message
+ if a.state.MsgCid != nil {
+ return
+ }
+
+ // Check if there's anything to do
+ haveReservations := len(a.reservations) > 0 || len(a.releases) > 0
+ haveWithdrawals := len(a.withdrawals) > 0
+ if !haveReservations && !haveWithdrawals {
+ return
+ }
+
+ // Process reservations / releases
+ if haveReservations {
+ res, err := a.processReservations(a.reservations, a.releases)
+ if err == nil {
+ a.applyStateChange(res.msgCid, res.amtReserved)
+ }
+ a.reservations = filterOutProcessedReqs(a.reservations)
+ a.releases = filterOutProcessedReqs(a.releases)
+ }
+
+ // If there was no message sent on chain by adding reservations, and all
+ // reservations have completed processing, process withdrawals
+ if haveWithdrawals && a.state.MsgCid == nil && len(a.reservations) == 0 {
+ withdrawalCid, err := a.processWithdrawals(a.withdrawals)
+ if err == nil && withdrawalCid != cid.Undef {
+ a.applyStateChange(&withdrawalCid, types.EmptyInt)
+ }
+ a.withdrawals = filterOutProcessedReqs(a.withdrawals)
+ }
+
+ // If a message was sent on-chain
+ if a.state.MsgCid != nil {
+ // Start waiting for results of message (async)
+ a.startWaitForResults(*a.state.MsgCid)
+ }
+
+ // Process any remaining queued requests
+ go a.process()
+}
+
+// Filter out completed requests
+func filterOutProcessedReqs(reqs []*fundRequest) []*fundRequest {
+ filtered := make([]*fundRequest, 0, len(reqs))
+ for _, req := range reqs {
+ if !req.Completed() {
+ filtered = append(filtered, req)
+ }
+ }
+ return filtered
+}
+
+// Apply the results of processing queues and save to the datastore
+func (a *fundedAddress) applyStateChange(msgCid *cid.Cid, amtReserved abi.TokenAmount) {
+ a.state.MsgCid = msgCid
+ if !amtReserved.Nil() {
+ a.state.AmtReserved = amtReserved
+ }
+ a.saveState()
+}
+
+// Clear the pending message cid so that a new message can be sent
+func (a *fundedAddress) clearWaitState() {
+ a.state.MsgCid = nil
+ a.saveState()
+}
+
+// Save state to datastore
+func (a *fundedAddress) saveState() {
+ // Not much we can do if saving to the datastore fails, just log
+ err := a.str.save(a.state)
+ if err != nil {
+ log.Errorf("saving state to store for addr %s: %v", a.state.Addr, err)
+ }
+}
+
+// The result of processing the reservation / release queues
+type processResult struct {
+ // Requests that completed without adding funds
+ covered []*fundRequest
+ // Requests that added funds
+ added []*fundRequest
+
+ // The new reserved amount
+ amtReserved abi.TokenAmount
+ // The message cid, if a message was submitted on-chain
+ msgCid *cid.Cid
+}
+
+// process reservations and releases, and return the resulting changes to state
+func (a *fundedAddress) processReservations(reservations []*fundRequest, releases []*fundRequest) (pr *processResult, prerr error) {
+ // When the function returns
+ defer func() {
+ // If there's an error, mark all requests as errored
+ if prerr != nil {
+ for _, req := range append(reservations, releases...) {
+ req.Complete(cid.Undef, prerr)
+ }
+ return
+ }
+
+ // Complete all release requests
+ for _, req := range releases {
+ req.Complete(cid.Undef, nil)
+ }
+
+ // Complete all requests that were covered by released amounts
+ for _, req := range pr.covered {
+ req.Complete(cid.Undef, nil)
+ }
+
+ // If a message was sent
+ if pr.msgCid != nil {
+ // Complete all add funds requests
+ for _, req := range pr.added {
+ req.Complete(*pr.msgCid, nil)
+ }
+ }
+ }()
+
+ // Split reservations into those that are covered by released amounts,
+ // and those to add to the reserved amount.
+ // Note that we process requests from the same wallet in batches. So some
+ // requests may not be included in covered if they don't match the first
+ // covered request's wallet. These will be processed on a subsequent
+ // invocation of processReservations.
+ toCancel, toAdd, reservedDelta := splitReservations(reservations, releases)
+
+ // Apply the reserved delta to the reserved amount
+ reserved := types.BigAdd(a.state.AmtReserved, reservedDelta)
+ if reserved.LessThan(abi.NewTokenAmount(0)) {
+ reserved = abi.NewTokenAmount(0)
+ }
+ res := &processResult{
+ amtReserved: reserved,
+ covered: toCancel,
+ }
+
+ // Work out the amount to add to the balance
+ amtToAdd := abi.NewTokenAmount(0)
+ if len(toAdd) > 0 && reserved.GreaterThan(abi.NewTokenAmount(0)) {
+ // Get available funds for address
+ avail, err := a.env.AvailableFunds(a.ctx, a.state.Addr)
+ if err != nil {
+ return res, err
+ }
+
+ // amount to add = new reserved amount - available
+ amtToAdd = types.BigSub(reserved, avail)
+ a.debugf("reserved %d - avail %d = to add %d", reserved, avail, amtToAdd)
+ }
+
+ // If there's nothing to add to the balance, bail out
+ if amtToAdd.LessThanEqual(abi.NewTokenAmount(0)) {
+ res.covered = append(res.covered, toAdd...)
+ return res, nil
+ }
+
+ // Add funds to address
+ a.debugf("add funds %d", amtToAdd)
+ addFundsCid, err := a.env.AddFunds(a.ctx, toAdd[0].Wallet, a.state.Addr, amtToAdd)
+ if err != nil {
+ return res, err
+ }
+
+ // Mark reservation requests as complete
+ res.added = toAdd
+
+ // Save the message CID to state
+ res.msgCid = &addFundsCid
+ return res, nil
+}
+
+// Split reservations into those that are under the total release amount
+// (covered) and those that exceed it (to add).
+// Note that we process requests from the same wallet in batches. So some
+// requests may not be included in covered if they don't match the first
+// covered request's wallet.
+func splitReservations(reservations []*fundRequest, releases []*fundRequest) ([]*fundRequest, []*fundRequest, abi.TokenAmount) {
+ toCancel := make([]*fundRequest, 0, len(reservations))
+ toAdd := make([]*fundRequest, 0, len(reservations))
+ toAddAmt := abi.NewTokenAmount(0)
+
+ // Sum release amounts
+ releaseAmt := abi.NewTokenAmount(0)
+ for _, req := range releases {
+ releaseAmt = types.BigAdd(releaseAmt, req.Amount())
+ }
+
+ // We only want to combine requests that come from the same wallet
+ batchWallet := address.Undef
+ for _, req := range reservations {
+ amt := req.Amount()
+
+ // If the amount to add to the reserve is cancelled out by a release
+ if amt.LessThanEqual(releaseAmt) {
+ // Cancel the request and update the release total
+ releaseAmt = types.BigSub(releaseAmt, amt)
+ toCancel = append(toCancel, req)
+ continue
+ }
+
+ // The amount to add is greater that the release total so we want
+ // to send an add funds request
+
+ // The first time the wallet will be undefined
+ if batchWallet == address.Undef {
+ batchWallet = req.Wallet
+ }
+ // If this request's wallet is the same as the batch wallet,
+ // the requests will be combined
+ if batchWallet == req.Wallet {
+ delta := types.BigSub(amt, releaseAmt)
+ toAddAmt = types.BigAdd(toAddAmt, delta)
+ releaseAmt = abi.NewTokenAmount(0)
+ toAdd = append(toAdd, req)
+ }
+ }
+
+ // The change in the reserved amount is "amount to add" - "amount to release"
+ reservedDelta := types.BigSub(toAddAmt, releaseAmt)
+
+ return toCancel, toAdd, reservedDelta
+}
+
+// process withdrawal queue
+func (a *fundedAddress) processWithdrawals(withdrawals []*fundRequest) (msgCid cid.Cid, prerr error) {
+ // If there's an error, mark all withdrawal requests as errored
+ defer func() {
+ if prerr != nil {
+ for _, req := range withdrawals {
+ req.Complete(cid.Undef, prerr)
+ }
+ }
+ }()
+
+ // Get the net available balance
+ avail, err := a.env.AvailableFunds(a.ctx, a.state.Addr)
+ if err != nil {
+ return cid.Undef, err
+ }
+
+ netAvail := types.BigSub(avail, a.state.AmtReserved)
+
+ // Fit as many withdrawals as possible into the available balance, and fail
+ // the rest
+ withdrawalAmt := abi.NewTokenAmount(0)
+ allowedAmt := abi.NewTokenAmount(0)
+ allowed := make([]*fundRequest, 0, len(withdrawals))
+ var batchWallet address.Address
+ for _, req := range withdrawals {
+ amt := req.Amount()
+ if amt.IsZero() {
+ // If the context for the request was cancelled, bail out
+ req.Complete(cid.Undef, err)
+ continue
+ }
+
+ // If the amount would exceed the available amount, complete the
+ // request with an error
+ newWithdrawalAmt := types.BigAdd(withdrawalAmt, amt)
+ if newWithdrawalAmt.GreaterThan(netAvail) {
+ msg := fmt.Sprintf("insufficient funds for withdrawal of %s: ", types.FIL(amt))
+ msg += fmt.Sprintf("net available (%s) = available (%s) - reserved (%s)",
+ types.FIL(types.BigSub(netAvail, withdrawalAmt)), types.FIL(avail), types.FIL(a.state.AmtReserved))
+ if !withdrawalAmt.IsZero() {
+ msg += fmt.Sprintf(" - queued withdrawals (%s)", types.FIL(withdrawalAmt))
+ }
+ err := xerrors.Errorf(msg)
+ a.debugf("%s", err)
+ req.Complete(cid.Undef, err)
+ continue
+ }
+
+ // If this is the first allowed withdrawal request in this batch, save
+ // its wallet address
+ if batchWallet == address.Undef {
+ batchWallet = req.Wallet
+ }
+ // If the request wallet doesn't match the batch wallet, bail out
+ // (the withdrawal will be processed after the current batch has
+ // completed)
+ if req.Wallet != batchWallet {
+ continue
+ }
+
+ // Include this withdrawal request in the batch
+ withdrawalAmt = newWithdrawalAmt
+ a.debugf("withdraw %d", amt)
+ allowed = append(allowed, req)
+ allowedAmt = types.BigAdd(allowedAmt, amt)
+ }
+
+ // Check if there is anything to withdraw.
+ // Note that if the context for a request is cancelled,
+ // req.Amount() returns zero
+ if allowedAmt.Equals(abi.NewTokenAmount(0)) {
+ // Mark allowed requests as complete
+ for _, req := range allowed {
+ req.Complete(cid.Undef, nil)
+ }
+ return cid.Undef, nil
+ }
+
+ // Withdraw funds
+ a.debugf("withdraw funds %d", allowedAmt)
+ withdrawFundsCid, err := a.env.WithdrawFunds(a.ctx, allowed[0].Wallet, a.state.Addr, allowedAmt)
+ if err != nil {
+ return cid.Undef, err
+ }
+
+ // Mark allowed requests as complete
+ for _, req := range allowed {
+ req.Complete(withdrawFundsCid, nil)
+ }
+
+ // Save the message CID to state
+ return withdrawFundsCid, nil
+}
+
+// asynchonously wait for results of message
+func (a *fundedAddress) startWaitForResults(msgCid cid.Cid) {
+ go func() {
+ err := a.env.WaitMsg(a.ctx, msgCid)
+ if err != nil {
+ // We don't really care about the results here, we're just waiting
+ // so as to only process one on-chain message at a time
+ log.Errorf("waiting for results of message %s for addr %s: %v", msgCid, a.state.Addr, err)
+ }
+
+ a.lk.Lock()
+ a.debugf("complete wait")
+ a.clearWaitState()
+ a.lk.Unlock()
+
+ a.process()
+ }()
+}
+
+func (a *fundedAddress) debugf(args ...interface{}) {
+ fmtStr := args[0].(string)
+ args = args[1:]
+ log.Debugf(a.state.Addr.String()+": "+fmtStr, args...)
+}
+
+// The result of a fund request
+type reqResult struct {
+ msgCid cid.Cid
+ err error
+}
+
+// A request to change funds
+type fundRequest struct {
+ ctx context.Context
+ amt abi.TokenAmount
+ completed chan struct{}
+ Wallet address.Address
+ Result chan reqResult
+}
+
+func newFundRequest(ctx context.Context, wallet address.Address, amt abi.TokenAmount) *fundRequest {
+ return &fundRequest{
+ ctx: ctx,
+ amt: amt,
+ Wallet: wallet,
+ Result: make(chan reqResult),
+ completed: make(chan struct{}),
+ }
+}
+
+// Amount returns zero if the context has expired
+func (frp *fundRequest) Amount() abi.TokenAmount {
+ if frp.ctx.Err() != nil {
+ return abi.NewTokenAmount(0)
+ }
+ return frp.amt
+}
+
+// Complete is called with the message CID when the funds request has been
+// started or with the error if there was an error
+func (frp *fundRequest) Complete(msgCid cid.Cid, err error) {
+ select {
+ case <-frp.completed:
+ case <-frp.ctx.Done():
+ case frp.Result <- reqResult{msgCid: msgCid, err: err}:
+ }
+ close(frp.completed)
+}
+
+// Completed indicates if Complete has already been called
+func (frp *fundRequest) Completed() bool {
+ select {
+ case <-frp.completed:
+ return true
+ default:
+ return false
+ }
+}
+
+// fundManagerEnvironment simplifies some API calls
+type fundManagerEnvironment struct {
+ api fundManagerAPI
+}
+
+func (env *fundManagerEnvironment) AvailableFunds(ctx context.Context, addr address.Address) (abi.TokenAmount, error) {
+ bal, err := env.api.StateMarketBalance(ctx, addr, types.EmptyTSK)
+ if err != nil {
+ return abi.NewTokenAmount(0), err
+ }
+
+ return types.BigSub(bal.Escrow, bal.Locked), nil
+}
+
+func (env *fundManagerEnvironment) AddFunds(
+ ctx context.Context,
+ wallet address.Address,
+ addr address.Address,
+ amt abi.TokenAmount,
+) (cid.Cid, error) {
+ params, err := actors.SerializeParams(&addr)
+ if err != nil {
+ return cid.Undef, err
+ }
+
+ smsg, aerr := env.api.MpoolPushMessage(ctx, &types.Message{
+ To: market.Address,
+ From: wallet,
+ Value: amt,
+ Method: market.Methods.AddBalance,
+ Params: params,
+ }, nil)
+
+ if aerr != nil {
+ return cid.Undef, aerr
+ }
+
+ return smsg.Cid(), nil
+}
+
+func (env *fundManagerEnvironment) WithdrawFunds(
+ ctx context.Context,
+ wallet address.Address,
+ addr address.Address,
+ amt abi.TokenAmount,
+) (cid.Cid, error) {
+ params, err := actors.SerializeParams(&market.WithdrawBalanceParams{
+ ProviderOrClientAddress: addr,
+ Amount: amt,
+ })
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("serializing params: %w", err)
+ }
+
+ smsg, aerr := env.api.MpoolPushMessage(ctx, &types.Message{
+ To: market.Address,
+ From: wallet,
+ Value: types.NewInt(0),
+ Method: market.Methods.WithdrawBalance,
+ Params: params,
+ }, nil)
+
+ if aerr != nil {
+ return cid.Undef, aerr
+ }
+
+ return smsg.Cid(), nil
+}
+
+func (env *fundManagerEnvironment) WaitMsg(ctx context.Context, c cid.Cid) error {
+ _, err := env.api.StateWaitMsg(ctx, c, build.MessageConfidence, api.LookbackNoLimit, true)
+ return err
+}
diff --git a/chain/market/fundmanager_test.go b/chain/market/fundmanager_test.go
new file mode 100644
index 00000000000..12530434311
--- /dev/null
+++ b/chain/market/fundmanager_test.go
@@ -0,0 +1,820 @@
+package market
+
+import (
+ "bytes"
+ "context"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/market"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/wallet"
+ tutils "github.com/filecoin-project/specs-actors/v2/support/testing"
+ "github.com/ipfs/go-cid"
+ ds "github.com/ipfs/go-datastore"
+ ds_sync "github.com/ipfs/go-datastore/sync"
+ "github.com/stretchr/testify/require"
+)
+
+// TestFundManagerBasic verifies that the basic fund manager operations work
+func TestFundManagerBasic(t *testing.T) {
+ s := setup(t)
+ defer s.fm.Stop()
+
+ // Reserve 10
+ // balance: 0 -> 10
+ // reserved: 0 -> 10
+ amt := abi.NewTokenAmount(10)
+ sentinel, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt)
+ require.NoError(t, err)
+
+ msg := s.mockApi.getSentMessage(sentinel)
+ checkAddMessageFields(t, msg, s.walletAddr, s.acctAddr, amt)
+
+ s.mockApi.completeMsg(sentinel)
+
+ // Reserve 7
+ // balance: 10 -> 17
+ // reserved: 10 -> 17
+ amt = abi.NewTokenAmount(7)
+ sentinel, err = s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt)
+ require.NoError(t, err)
+
+ msg = s.mockApi.getSentMessage(sentinel)
+ checkAddMessageFields(t, msg, s.walletAddr, s.acctAddr, amt)
+
+ s.mockApi.completeMsg(sentinel)
+
+ // Release 5
+ // balance: 17
+ // reserved: 17 -> 12
+ amt = abi.NewTokenAmount(5)
+ err = s.fm.Release(s.acctAddr, amt)
+ require.NoError(t, err)
+
+ // Withdraw 2
+ // balance: 17 -> 15
+ // reserved: 12
+ amt = abi.NewTokenAmount(2)
+ sentinel, err = s.fm.Withdraw(s.ctx, s.walletAddr, s.acctAddr, amt)
+ require.NoError(t, err)
+
+ msg = s.mockApi.getSentMessage(sentinel)
+ checkWithdrawMessageFields(t, msg, s.walletAddr, s.acctAddr, amt)
+
+ s.mockApi.completeMsg(sentinel)
+
+ // Reserve 3
+ // balance: 15
+ // reserved: 12 -> 15
+ // Note: reserved (15) is <= balance (15) so should not send on-chain
+ // message
+ msgCount := s.mockApi.messageCount()
+ amt = abi.NewTokenAmount(3)
+ sentinel, err = s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt)
+ require.NoError(t, err)
+ require.Equal(t, msgCount, s.mockApi.messageCount())
+ require.Equal(t, sentinel, cid.Undef)
+
+ // Reserve 1
+ // balance: 15 -> 16
+ // reserved: 15 -> 16
+ // Note: reserved (16) is above balance (15) so *should* send on-chain
+ // message to top up balance
+ amt = abi.NewTokenAmount(1)
+ topUp := abi.NewTokenAmount(1)
+ sentinel, err = s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt)
+ require.NoError(t, err)
+
+ s.mockApi.completeMsg(sentinel)
+ msg = s.mockApi.getSentMessage(sentinel)
+ checkAddMessageFields(t, msg, s.walletAddr, s.acctAddr, topUp)
+
+ // Withdraw 1
+ // balance: 16
+ // reserved: 16
+ // Note: Expect failure because there is no available balance to withdraw:
+ // balance - reserved = 16 - 16 = 0
+ amt = abi.NewTokenAmount(1)
+ sentinel, err = s.fm.Withdraw(s.ctx, s.walletAddr, s.acctAddr, amt)
+ require.Error(t, err)
+}
+
+// TestFundManagerParallel verifies that operations can be run in parallel
+func TestFundManagerParallel(t *testing.T) {
+ s := setup(t)
+ defer s.fm.Stop()
+
+ // Reserve 10
+ amt := abi.NewTokenAmount(10)
+ sentinelReserve10, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt)
+ require.NoError(t, err)
+
+ // Wait until all the subsequent requests are queued up
+ queueReady := make(chan struct{})
+ fa := s.fm.getFundedAddress(s.acctAddr)
+ fa.onProcessStart(func() bool {
+ if len(fa.withdrawals) == 1 && len(fa.reservations) == 2 && len(fa.releases) == 1 {
+ close(queueReady)
+ return true
+ }
+ return false
+ })
+
+ // Withdraw 5 (should not run until after reserves / releases)
+ withdrawReady := make(chan error)
+ go func() {
+ amt = abi.NewTokenAmount(5)
+ _, err := s.fm.Withdraw(s.ctx, s.walletAddr, s.acctAddr, amt)
+ withdrawReady <- err
+ }()
+
+ reserveSentinels := make(chan cid.Cid)
+
+ // Reserve 3
+ go func() {
+ amt := abi.NewTokenAmount(3)
+ sentinelReserve3, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt)
+ require.NoError(t, err)
+ reserveSentinels <- sentinelReserve3
+ }()
+
+ // Reserve 5
+ go func() {
+ amt := abi.NewTokenAmount(5)
+ sentinelReserve5, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt)
+ require.NoError(t, err)
+ reserveSentinels <- sentinelReserve5
+ }()
+
+ // Release 2
+ go func() {
+ amt := abi.NewTokenAmount(2)
+ err = s.fm.Release(s.acctAddr, amt)
+ require.NoError(t, err)
+ }()
+
+ // Everything is queued up
+ <-queueReady
+
+ // Complete the "Reserve 10" message
+ s.mockApi.completeMsg(sentinelReserve10)
+ msg := s.mockApi.getSentMessage(sentinelReserve10)
+ checkAddMessageFields(t, msg, s.walletAddr, s.acctAddr, abi.NewTokenAmount(10))
+
+ // The other requests should now be combined and be submitted on-chain as
+ // a single message
+ rs1 := <-reserveSentinels
+ rs2 := <-reserveSentinels
+ require.Equal(t, rs1, rs2)
+
+ // Withdraw should not have been called yet, because reserve / release
+ // requests run first
+ select {
+ case <-withdrawReady:
+ require.Fail(t, "Withdraw should run after reserve / release")
+ default:
+ }
+
+ // Complete the message
+ s.mockApi.completeMsg(rs1)
+ msg = s.mockApi.getSentMessage(rs1)
+
+ // "Reserve 3" +3
+ // "Reserve 5" +5
+ // "Release 2" -2
+ // Result: 6
+ checkAddMessageFields(t, msg, s.walletAddr, s.acctAddr, abi.NewTokenAmount(6))
+
+ // Expect withdraw to fail because not enough available funds
+ err = <-withdrawReady
+ require.Error(t, err)
+}
+
+// TestFundManagerReserveByWallet verifies that reserve requests are grouped by wallet
+func TestFundManagerReserveByWallet(t *testing.T) {
+ s := setup(t)
+ defer s.fm.Stop()
+
+ walletAddrA, err := s.wllt.WalletNew(context.Background(), types.KTSecp256k1)
+ require.NoError(t, err)
+ walletAddrB, err := s.wllt.WalletNew(context.Background(), types.KTSecp256k1)
+ require.NoError(t, err)
+
+ // Wait until all the reservation requests are queued up
+ walletAQueuedUp := make(chan struct{})
+ queueReady := make(chan struct{})
+ fa := s.fm.getFundedAddress(s.acctAddr)
+ fa.onProcessStart(func() bool {
+ if len(fa.reservations) == 1 {
+ close(walletAQueuedUp)
+ }
+ if len(fa.reservations) == 3 {
+ close(queueReady)
+ return true
+ }
+ return false
+ })
+
+ type reserveResult struct {
+ ws cid.Cid
+ err error
+ }
+ results := make(chan *reserveResult)
+
+ amtA1 := abi.NewTokenAmount(1)
+ go func() {
+ // Wallet A: Reserve 1
+ sentinelA1, err := s.fm.Reserve(s.ctx, walletAddrA, s.acctAddr, amtA1)
+ results <- &reserveResult{
+ ws: sentinelA1,
+ err: err,
+ }
+ }()
+
+ amtB1 := abi.NewTokenAmount(2)
+ amtB2 := abi.NewTokenAmount(3)
+ go func() {
+ // Wait for reservation for wallet A to be queued up
+ <-walletAQueuedUp
+
+ // Wallet B: Reserve 2
+ go func() {
+ sentinelB1, err := s.fm.Reserve(s.ctx, walletAddrB, s.acctAddr, amtB1)
+ results <- &reserveResult{
+ ws: sentinelB1,
+ err: err,
+ }
+ }()
+
+ // Wallet B: Reserve 3
+ sentinelB2, err := s.fm.Reserve(s.ctx, walletAddrB, s.acctAddr, amtB2)
+ results <- &reserveResult{
+ ws: sentinelB2,
+ err: err,
+ }
+ }()
+
+ // All reservation requests are queued up
+ <-queueReady
+
+ resA := <-results
+ sentinelA1 := resA.ws
+
+ // Should send to wallet A
+ msg := s.mockApi.getSentMessage(sentinelA1)
+ checkAddMessageFields(t, msg, walletAddrA, s.acctAddr, amtA1)
+
+ // Complete wallet A message
+ s.mockApi.completeMsg(sentinelA1)
+
+ resB1 := <-results
+ resB2 := <-results
+ require.NoError(t, resB1.err)
+ require.NoError(t, resB2.err)
+ sentinelB1 := resB1.ws
+ sentinelB2 := resB2.ws
+
+ // Should send different message to wallet B
+ require.NotEqual(t, sentinelA1, sentinelB1)
+ // Should be single message combining amount 1 and 2
+ require.Equal(t, sentinelB1, sentinelB2)
+ msg = s.mockApi.getSentMessage(sentinelB1)
+ checkAddMessageFields(t, msg, walletAddrB, s.acctAddr, types.BigAdd(amtB1, amtB2))
+}
+
+// TestFundManagerWithdrawal verifies that as many withdraw operations as
+// possible are processed
+func TestFundManagerWithdrawalLimit(t *testing.T) {
+ s := setup(t)
+ defer s.fm.Stop()
+
+ // Reserve 10
+ amt := abi.NewTokenAmount(10)
+ sentinelReserve10, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt)
+ require.NoError(t, err)
+
+ // Complete the "Reserve 10" message
+ s.mockApi.completeMsg(sentinelReserve10)
+
+ // Release 10
+ err = s.fm.Release(s.acctAddr, amt)
+ require.NoError(t, err)
+
+ // Queue up withdraw requests
+ queueReady := make(chan struct{})
+ fa := s.fm.getFundedAddress(s.acctAddr)
+ withdrawalReqTotal := 3
+ withdrawalReqEnqueued := 0
+ withdrawalReqQueue := make(chan func(), withdrawalReqTotal)
+ fa.onProcessStart(func() bool {
+ // If a new withdrawal request was enqueued
+ if len(fa.withdrawals) > withdrawalReqEnqueued {
+ withdrawalReqEnqueued++
+
+ // Pop the next request and run it
+ select {
+ case fn := <-withdrawalReqQueue:
+ go fn()
+ default:
+ }
+ }
+ // Once all the requests have arrived, we're ready to process the queue
+ if withdrawalReqEnqueued == withdrawalReqTotal {
+ close(queueReady)
+ return true
+ }
+ return false
+ })
+
+ type withdrawResult struct {
+ reqIndex int
+ ws cid.Cid
+ err error
+ }
+ withdrawRes := make(chan *withdrawResult)
+
+ // Queue up three "Withdraw 5" requests
+ enqueuedCount := 0
+ for i := 0; i < withdrawalReqTotal; i++ {
+ withdrawalReqQueue <- func() {
+ idx := enqueuedCount
+ enqueuedCount++
+
+ amt := abi.NewTokenAmount(5)
+ ws, err := s.fm.Withdraw(s.ctx, s.walletAddr, s.acctAddr, amt)
+ withdrawRes <- &withdrawResult{reqIndex: idx, ws: ws, err: err}
+ }
+ }
+ // Start the first request
+ fn := <-withdrawalReqQueue
+ go fn()
+
+ // All withdrawal requests are queued up and ready to be processed
+ <-queueReady
+
+ // Organize results in request order
+ results := make([]*withdrawResult, withdrawalReqTotal)
+ for i := 0; i < 3; i++ {
+ res := <-withdrawRes
+ results[res.reqIndex] = res
+ }
+
+ // Available 10
+ // Withdraw 5
+ // Expect Success
+ require.NoError(t, results[0].err)
+ // Available 5
+ // Withdraw 5
+ // Expect Success
+ require.NoError(t, results[1].err)
+ // Available 0
+ // Withdraw 5
+ // Expect FAIL
+ require.Error(t, results[2].err)
+
+ // Expect withdrawal requests that fit under reserved amount to be combined
+ // into a single message on-chain
+ require.Equal(t, results[0].ws, results[1].ws)
+}
+
+// TestFundManagerWithdrawByWallet verifies that withdraw requests are grouped by wallet
+func TestFundManagerWithdrawByWallet(t *testing.T) {
+ s := setup(t)
+ defer s.fm.Stop()
+
+ walletAddrA, err := s.wllt.WalletNew(context.Background(), types.KTSecp256k1)
+ require.NoError(t, err)
+ walletAddrB, err := s.wllt.WalletNew(context.Background(), types.KTSecp256k1)
+ require.NoError(t, err)
+
+ // Reserve 10
+ reserveAmt := abi.NewTokenAmount(10)
+ sentinelReserve, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, reserveAmt)
+ require.NoError(t, err)
+ s.mockApi.completeMsg(sentinelReserve)
+
+ time.Sleep(10 * time.Millisecond)
+
+ // Release 10
+ err = s.fm.Release(s.acctAddr, reserveAmt)
+ require.NoError(t, err)
+
+ type withdrawResult struct {
+ ws cid.Cid
+ err error
+ }
+ results := make(chan *withdrawResult)
+
+ // Wait until withdrawals are queued up
+ walletAQueuedUp := make(chan struct{})
+ queueReady := make(chan struct{})
+ withdrawalCount := 0
+ fa := s.fm.getFundedAddress(s.acctAddr)
+ fa.onProcessStart(func() bool {
+ if len(fa.withdrawals) == withdrawalCount {
+ return false
+ }
+ withdrawalCount = len(fa.withdrawals)
+
+ if withdrawalCount == 1 {
+ close(walletAQueuedUp)
+ } else if withdrawalCount == 3 {
+ close(queueReady)
+ return true
+ }
+ return false
+ })
+
+ amtA1 := abi.NewTokenAmount(1)
+ go func() {
+ // Wallet A: Withdraw 1
+ sentinelA1, err := s.fm.Withdraw(s.ctx, walletAddrA, s.acctAddr, amtA1)
+ results <- &withdrawResult{
+ ws: sentinelA1,
+ err: err,
+ }
+ }()
+
+ amtB1 := abi.NewTokenAmount(2)
+ amtB2 := abi.NewTokenAmount(3)
+ go func() {
+ // Wait until withdraw for wallet A is queued up
+ <-walletAQueuedUp
+
+ // Wallet B: Withdraw 2
+ go func() {
+ sentinelB1, err := s.fm.Withdraw(s.ctx, walletAddrB, s.acctAddr, amtB1)
+ results <- &withdrawResult{
+ ws: sentinelB1,
+ err: err,
+ }
+ }()
+
+ // Wallet B: Withdraw 3
+ sentinelB2, err := s.fm.Withdraw(s.ctx, walletAddrB, s.acctAddr, amtB2)
+ results <- &withdrawResult{
+ ws: sentinelB2,
+ err: err,
+ }
+ }()
+
+ // Withdrawals are queued up
+ <-queueReady
+
+ // Should withdraw from wallet A first
+ resA1 := <-results
+ sentinelA1 := resA1.ws
+ msg := s.mockApi.getSentMessage(sentinelA1)
+ checkWithdrawMessageFields(t, msg, walletAddrA, s.acctAddr, amtA1)
+
+ // Complete wallet A message
+ s.mockApi.completeMsg(sentinelA1)
+
+ resB1 := <-results
+ resB2 := <-results
+ require.NoError(t, resB1.err)
+ require.NoError(t, resB2.err)
+ sentinelB1 := resB1.ws
+ sentinelB2 := resB2.ws
+
+ // Should send different message for wallet B from wallet A
+ require.NotEqual(t, sentinelA1, sentinelB1)
+ // Should be single message combining amount 1 and 2
+ require.Equal(t, sentinelB1, sentinelB2)
+ msg = s.mockApi.getSentMessage(sentinelB1)
+ checkWithdrawMessageFields(t, msg, walletAddrB, s.acctAddr, types.BigAdd(amtB1, amtB2))
+}
+
+// TestFundManagerRestart verifies that waiting for incomplete requests resumes
+// on restart
+func TestFundManagerRestart(t *testing.T) {
+ s := setup(t)
+ defer s.fm.Stop()
+
+ acctAddr2 := tutils.NewActorAddr(t, "addr2")
+
+ // Address 1: Reserve 10
+ amt := abi.NewTokenAmount(10)
+ sentinelAddr1, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt)
+ require.NoError(t, err)
+
+ msg := s.mockApi.getSentMessage(sentinelAddr1)
+ checkAddMessageFields(t, msg, s.walletAddr, s.acctAddr, amt)
+
+ // Address 2: Reserve 7
+ amt2 := abi.NewTokenAmount(7)
+ sentinelAddr2Res7, err := s.fm.Reserve(s.ctx, s.walletAddr, acctAddr2, amt2)
+ require.NoError(t, err)
+
+ msg2 := s.mockApi.getSentMessage(sentinelAddr2Res7)
+ checkAddMessageFields(t, msg2, s.walletAddr, acctAddr2, amt2)
+
+ // Complete "Address 1: Reserve 10"
+ s.mockApi.completeMsg(sentinelAddr1)
+
+ // Give the completed state a moment to be stored before restart
+ time.Sleep(time.Millisecond * 10)
+
+ // Restart
+ mockApiAfter := s.mockApi
+ fmAfter := newFundManager(mockApiAfter, s.ds)
+ err = fmAfter.Start()
+ require.NoError(t, err)
+
+ amt3 := abi.NewTokenAmount(9)
+ reserveSentinel := make(chan cid.Cid)
+ go func() {
+ // Address 2: Reserve 9
+ sentinel3, err := fmAfter.Reserve(s.ctx, s.walletAddr, acctAddr2, amt3)
+ require.NoError(t, err)
+ reserveSentinel <- sentinel3
+ }()
+
+ // Expect no message to be sent, because still waiting for previous
+ // message "Address 2: Reserve 7" to complete on-chain
+ select {
+ case <-reserveSentinel:
+ require.Fail(t, "Expected no message to be sent")
+ case <-time.After(10 * time.Millisecond):
+ }
+
+ // Complete "Address 2: Reserve 7"
+ mockApiAfter.completeMsg(sentinelAddr2Res7)
+
+ // Expect waiting message to now be sent
+ sentinel3 := <-reserveSentinel
+ msg3 := mockApiAfter.getSentMessage(sentinel3)
+ checkAddMessageFields(t, msg3, s.walletAddr, acctAddr2, amt3)
+}
+
+// TestFundManagerReleaseAfterPublish verifies that release is successful in
+// the following scenario:
+// 1. Deal A adds 5 to addr1: reserved 0 -> 5 available 0 -> 5
+// 2. Deal B adds 7 to addr1: reserved 5 -> 12 available 5 -> 12
+// 3. Deal B completes, reducing addr1 by 7: reserved 12 available 12 -> 5
+// 4. Deal A releases 5 from addr1: reserved 12 -> 7 available 5
+func TestFundManagerReleaseAfterPublish(t *testing.T) {
+ s := setup(t)
+ defer s.fm.Stop()
+
+ // Deal A: Reserve 5
+ // balance: 0 -> 5
+ // reserved: 0 -> 5
+ amt := abi.NewTokenAmount(5)
+ sentinel, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt)
+ require.NoError(t, err)
+ s.mockApi.completeMsg(sentinel)
+
+ // Deal B: Reserve 7
+ // balance: 5 -> 12
+ // reserved: 5 -> 12
+ amt = abi.NewTokenAmount(7)
+ sentinel, err = s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt)
+ require.NoError(t, err)
+ s.mockApi.completeMsg(sentinel)
+
+ // Deal B: Publish (removes Deal B amount from balance)
+ // balance: 12 -> 5
+ // reserved: 12
+ amt = abi.NewTokenAmount(7)
+ s.mockApi.publish(s.acctAddr, amt)
+
+ // Deal A: Release 5
+ // balance: 5
+ // reserved: 12 -> 7
+ amt = abi.NewTokenAmount(5)
+ err = s.fm.Release(s.acctAddr, amt)
+ require.NoError(t, err)
+
+ // Deal B: Release 7
+ // balance: 5
+ // reserved: 12 -> 7
+ amt = abi.NewTokenAmount(5)
+ err = s.fm.Release(s.acctAddr, amt)
+ require.NoError(t, err)
+}
+
+type scaffold struct {
+ ctx context.Context
+ ds *ds_sync.MutexDatastore
+ wllt *wallet.LocalWallet
+ walletAddr address.Address
+ acctAddr address.Address
+ mockApi *mockFundManagerAPI
+ fm *FundManager
+}
+
+func setup(t *testing.T) *scaffold {
+ ctx := context.Background()
+
+ wllt, err := wallet.NewWallet(wallet.NewMemKeyStore())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ walletAddr, err := wllt.WalletNew(context.Background(), types.KTSecp256k1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ acctAddr := tutils.NewActorAddr(t, "addr")
+
+ mockApi := newMockFundManagerAPI(walletAddr)
+ dstore := ds_sync.MutexWrap(ds.NewMapDatastore())
+ fm := newFundManager(mockApi, dstore)
+ return &scaffold{
+ ctx: ctx,
+ ds: dstore,
+ wllt: wllt,
+ walletAddr: walletAddr,
+ acctAddr: acctAddr,
+ mockApi: mockApi,
+ fm: fm,
+ }
+}
+
+func checkAddMessageFields(t *testing.T, msg *types.Message, from address.Address, to address.Address, amt abi.TokenAmount) {
+ require.Equal(t, from, msg.From)
+ require.Equal(t, market.Address, msg.To)
+ require.Equal(t, amt, msg.Value)
+
+ var paramsTo address.Address
+ err := paramsTo.UnmarshalCBOR(bytes.NewReader(msg.Params))
+ require.NoError(t, err)
+ require.Equal(t, to, paramsTo)
+}
+
+func checkWithdrawMessageFields(t *testing.T, msg *types.Message, from address.Address, addr address.Address, amt abi.TokenAmount) {
+ require.Equal(t, from, msg.From)
+ require.Equal(t, market.Address, msg.To)
+ require.Equal(t, abi.NewTokenAmount(0), msg.Value)
+
+ var params market.WithdrawBalanceParams
+ err := params.UnmarshalCBOR(bytes.NewReader(msg.Params))
+ require.NoError(t, err)
+ require.Equal(t, addr, params.ProviderOrClientAddress)
+ require.Equal(t, amt, params.Amount)
+}
+
+type sentMsg struct {
+ msg *types.SignedMessage
+ ready chan struct{}
+}
+
+type mockFundManagerAPI struct {
+ wallet address.Address
+
+ lk sync.Mutex
+ escrow map[address.Address]abi.TokenAmount
+ sentMsgs map[cid.Cid]*sentMsg
+ completedMsgs map[cid.Cid]struct{}
+ waitingFor map[cid.Cid]chan struct{}
+}
+
+func newMockFundManagerAPI(wallet address.Address) *mockFundManagerAPI {
+ return &mockFundManagerAPI{
+ wallet: wallet,
+ escrow: make(map[address.Address]abi.TokenAmount),
+ sentMsgs: make(map[cid.Cid]*sentMsg),
+ completedMsgs: make(map[cid.Cid]struct{}),
+ waitingFor: make(map[cid.Cid]chan struct{}),
+ }
+}
+
+func (mapi *mockFundManagerAPI) MpoolPushMessage(ctx context.Context, message *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) {
+ mapi.lk.Lock()
+ defer mapi.lk.Unlock()
+
+ smsg := &types.SignedMessage{Message: *message}
+ mapi.sentMsgs[smsg.Cid()] = &sentMsg{msg: smsg, ready: make(chan struct{})}
+
+ return smsg, nil
+}
+
+func (mapi *mockFundManagerAPI) getSentMessage(c cid.Cid) *types.Message {
+ mapi.lk.Lock()
+ defer mapi.lk.Unlock()
+
+ for i := 0; i < 1000; i++ {
+ if pending, ok := mapi.sentMsgs[c]; ok {
+ return &pending.msg.Message
+ }
+ time.Sleep(time.Millisecond)
+ }
+ panic("expected message to be sent")
+}
+
+func (mapi *mockFundManagerAPI) messageCount() int {
+ mapi.lk.Lock()
+ defer mapi.lk.Unlock()
+
+ return len(mapi.sentMsgs)
+}
+
+func (mapi *mockFundManagerAPI) completeMsg(msgCid cid.Cid) {
+ mapi.lk.Lock()
+
+ pmsg, ok := mapi.sentMsgs[msgCid]
+ if ok {
+ if pmsg.msg.Message.Method == market.Methods.AddBalance {
+ var escrowAcct address.Address
+ err := escrowAcct.UnmarshalCBOR(bytes.NewReader(pmsg.msg.Message.Params))
+ if err != nil {
+ panic(err)
+ }
+
+ escrow := mapi.getEscrow(escrowAcct)
+ before := escrow
+ escrow = types.BigAdd(escrow, pmsg.msg.Message.Value)
+ mapi.escrow[escrowAcct] = escrow
+ log.Debugf("%s: escrow %d -> %d", escrowAcct, before, escrow)
+ } else {
+ var params market.WithdrawBalanceParams
+ err := params.UnmarshalCBOR(bytes.NewReader(pmsg.msg.Message.Params))
+ if err != nil {
+ panic(err)
+ }
+ escrowAcct := params.ProviderOrClientAddress
+
+ escrow := mapi.getEscrow(escrowAcct)
+ before := escrow
+ escrow = types.BigSub(escrow, params.Amount)
+ mapi.escrow[escrowAcct] = escrow
+ log.Debugf("%s: escrow %d -> %d", escrowAcct, before, escrow)
+ }
+ }
+
+ mapi.completedMsgs[msgCid] = struct{}{}
+
+ ready, ok := mapi.waitingFor[msgCid]
+
+ mapi.lk.Unlock()
+
+ if ok {
+ close(ready)
+ }
+}
+
+func (mapi *mockFundManagerAPI) StateMarketBalance(ctx context.Context, a address.Address, key types.TipSetKey) (api.MarketBalance, error) {
+ mapi.lk.Lock()
+ defer mapi.lk.Unlock()
+
+ return api.MarketBalance{
+ Locked: abi.NewTokenAmount(0),
+ Escrow: mapi.getEscrow(a),
+ }, nil
+}
+
+func (mapi *mockFundManagerAPI) getEscrow(a address.Address) abi.TokenAmount {
+ escrow := mapi.escrow[a]
+ if escrow.Nil() {
+ return abi.NewTokenAmount(0)
+ }
+ return escrow
+}
+
+func (mapi *mockFundManagerAPI) publish(addr address.Address, amt abi.TokenAmount) {
+ mapi.lk.Lock()
+ defer mapi.lk.Unlock()
+
+ escrow := mapi.escrow[addr]
+ if escrow.Nil() {
+ return
+ }
+ escrow = types.BigSub(escrow, amt)
+ if escrow.LessThan(abi.NewTokenAmount(0)) {
+ escrow = abi.NewTokenAmount(0)
+ }
+ mapi.escrow[addr] = escrow
+}
+
+func (mapi *mockFundManagerAPI) StateWaitMsg(ctx context.Context, c cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) {
+ res := &api.MsgLookup{
+ Message: c,
+ Receipt: types.MessageReceipt{
+ ExitCode: 0,
+ Return: nil,
+ },
+ }
+ ready := make(chan struct{})
+
+ mapi.lk.Lock()
+ _, ok := mapi.completedMsgs[c]
+ if !ok {
+ mapi.waitingFor[c] = ready
+ }
+ mapi.lk.Unlock()
+
+ if !ok {
+ select {
+ case <-ctx.Done():
+ case <-ready:
+ }
+ }
+ return res, nil
+}
diff --git a/chain/market/fundmgr.go b/chain/market/fundmgr.go
deleted file mode 100644
index 50467a6e153..00000000000
--- a/chain/market/fundmgr.go
+++ /dev/null
@@ -1,166 +0,0 @@
-package market
-
-import (
- "context"
- "sync"
-
- "github.com/filecoin-project/go-address"
- "github.com/filecoin-project/go-state-types/abi"
- "github.com/filecoin-project/go-state-types/big"
- "github.com/ipfs/go-cid"
- logging "github.com/ipfs/go-log"
- "go.uber.org/fx"
-
- "github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/chain/actors"
- "github.com/filecoin-project/lotus/chain/actors/builtin/market"
- "github.com/filecoin-project/lotus/chain/events"
- "github.com/filecoin-project/lotus/chain/events/state"
- "github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/lotus/node/impl/full"
-)
-
-var log = logging.Logger("market_adapter")
-
-// API is the dependencies need to run a fund manager
-type API struct {
- fx.In
-
- full.ChainAPI
- full.StateAPI
- full.MpoolAPI
-}
-
-// FundMgr monitors available balances and adds funds when EnsureAvailable is called
-type FundMgr struct {
- api fundMgrAPI
-
- lk sync.RWMutex
- available map[address.Address]types.BigInt
-}
-
-// StartFundManager creates a new fund manager and sets up event hooks to manage state changes
-func StartFundManager(lc fx.Lifecycle, api API) *FundMgr {
- fm := newFundMgr(&api)
- lc.Append(fx.Hook{
- OnStart: func(ctx context.Context) error {
- ev := events.NewEvents(ctx, &api)
- preds := state.NewStatePredicates(&api)
- dealDiffFn := preds.OnStorageMarketActorChanged(preds.OnBalanceChanged(preds.AvailableBalanceChangedForAddresses(fm.getAddresses)))
- match := func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) {
- return dealDiffFn(ctx, oldTs.Key(), newTs.Key())
- }
- return ev.StateChanged(fm.checkFunc, fm.stateChanged, fm.revert, 0, events.NoTimeout, match)
- },
- })
- return fm
-}
-
-type fundMgrAPI interface {
- StateMarketBalance(context.Context, address.Address, types.TipSetKey) (api.MarketBalance, error)
- MpoolPushMessage(context.Context, *types.Message, *api.MessageSendSpec) (*types.SignedMessage, error)
- StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error)
-}
-
-func newFundMgr(api fundMgrAPI) *FundMgr {
- return &FundMgr{
- api: api,
- available: map[address.Address]types.BigInt{},
- }
-}
-
-// checkFunc tells the events api to simply proceed (we always want to watch)
-func (fm *FundMgr) checkFunc(ts *types.TipSet) (done bool, more bool, err error) {
- return false, true, nil
-}
-
-// revert handles reverts to balances
-func (fm *FundMgr) revert(ctx context.Context, ts *types.TipSet) error {
- // TODO: Is it ok to just ignore this?
- log.Warn("balance change reverted; TODO: actually handle this!")
- return nil
-}
-
-// stateChanged handles balance changes monitored on the chain from one tipset to the next
-func (fm *FundMgr) stateChanged(ts *types.TipSet, ts2 *types.TipSet, states events.StateChange, h abi.ChainEpoch) (more bool, err error) {
- changedBalances, ok := states.(state.ChangedBalances)
- if !ok {
- panic("Expected state.ChangedBalances")
- }
- // overwrite our in memory cache with new values from chain (chain is canonical)
- fm.lk.Lock()
- for addr, balanceChange := range changedBalances {
- if fm.available[addr].Int != nil {
- log.Infof("State balance change recorded, prev: %s, new: %s", fm.available[addr].String(), balanceChange.To.String())
- }
-
- fm.available[addr] = balanceChange.To
- }
- fm.lk.Unlock()
- return true, nil
-}
-
-func (fm *FundMgr) getAddresses() []address.Address {
- fm.lk.RLock()
- defer fm.lk.RUnlock()
- addrs := make([]address.Address, 0, len(fm.available))
- for addr := range fm.available {
- addrs = append(addrs, addr)
- }
- return addrs
-}
-
-// EnsureAvailable looks at the available balance in escrow for a given
-// address, and if less than the passed in amount, adds the difference
-func (fm *FundMgr) EnsureAvailable(ctx context.Context, addr, wallet address.Address, amt types.BigInt) (cid.Cid, error) {
- idAddr, err := fm.api.StateLookupID(ctx, addr, types.EmptyTSK)
- if err != nil {
- return cid.Undef, err
- }
- fm.lk.Lock()
- defer fm.lk.Unlock()
-
- bal, err := fm.api.StateMarketBalance(ctx, addr, types.EmptyTSK)
- if err != nil {
- return cid.Undef, err
- }
-
- stateAvail := types.BigSub(bal.Escrow, bal.Locked)
-
- avail, ok := fm.available[idAddr]
- if !ok {
- avail = stateAvail
- }
-
- toAdd := types.BigSub(amt, avail)
- if toAdd.LessThan(types.NewInt(0)) {
- toAdd = types.NewInt(0)
- }
- fm.available[idAddr] = big.Add(avail, toAdd)
-
- log.Infof("Funds operation w/ Expected Balance: %s, In State: %s, Requested: %s, Adding: %s", avail.String(), stateAvail.String(), amt.String(), toAdd.String())
-
- if toAdd.LessThanEqual(big.Zero()) {
- return cid.Undef, nil
- }
-
- params, err := actors.SerializeParams(&addr)
- if err != nil {
- fm.available[idAddr] = avail
- return cid.Undef, err
- }
-
- smsg, err := fm.api.MpoolPushMessage(ctx, &types.Message{
- To: market.Address,
- From: wallet,
- Value: toAdd,
- Method: market.Methods.AddBalance,
- Params: params,
- }, nil)
- if err != nil {
- fm.available[idAddr] = avail
- return cid.Undef, err
- }
-
- return smsg.Cid(), nil
-}
diff --git a/chain/market/fundmgr_test.go b/chain/market/fundmgr_test.go
deleted file mode 100644
index 88ca2e16fa4..00000000000
--- a/chain/market/fundmgr_test.go
+++ /dev/null
@@ -1,199 +0,0 @@
-package market
-
-import (
- "context"
- "errors"
- "math/rand"
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-
- "github.com/filecoin-project/go-address"
- "github.com/filecoin-project/go-state-types/abi"
- "github.com/filecoin-project/go-state-types/crypto"
-
- tutils "github.com/filecoin-project/specs-actors/v2/support/testing"
-
- "github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/chain/actors"
- "github.com/filecoin-project/lotus/chain/actors/builtin/market"
- "github.com/filecoin-project/lotus/chain/types"
-)
-
-type fakeAPI struct {
- returnedBalance api.MarketBalance
- returnedBalanceErr error
- signature crypto.Signature
- receivedMessage *types.Message
- pushMessageErr error
- lookupIDErr error
-}
-
-func (fapi *fakeAPI) StateLookupID(_ context.Context, addr address.Address, _ types.TipSetKey) (address.Address, error) {
- return addr, fapi.lookupIDErr
-}
-func (fapi *fakeAPI) StateMarketBalance(context.Context, address.Address, types.TipSetKey) (api.MarketBalance, error) {
- return fapi.returnedBalance, fapi.returnedBalanceErr
-}
-
-func (fapi *fakeAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) {
- fapi.receivedMessage = msg
- return &types.SignedMessage{
- Message: *msg,
- Signature: fapi.signature,
- }, fapi.pushMessageErr
-}
-
-func addFundsMsg(toAdd abi.TokenAmount, addr address.Address, wallet address.Address) *types.Message {
- params, _ := actors.SerializeParams(&addr)
- return &types.Message{
- To: market.Address,
- From: wallet,
- Value: toAdd,
- Method: market.Methods.AddBalance,
- Params: params,
- }
-}
-
-type expectedResult struct {
- addAmt abi.TokenAmount
- shouldAdd bool
- err error
- cachedAvailable abi.TokenAmount
-}
-
-func TestAddFunds(t *testing.T) {
- ctx := context.Background()
- testCases := map[string]struct {
- returnedBalanceErr error
- returnedBalance api.MarketBalance
- addAmounts []abi.TokenAmount
- pushMessageErr error
- expectedResults []expectedResult
- lookupIDErr error
- }{
- "succeeds, trivial case": {
- returnedBalance: api.MarketBalance{Escrow: abi.NewTokenAmount(0), Locked: abi.NewTokenAmount(0)},
- addAmounts: []abi.TokenAmount{abi.NewTokenAmount(100)},
- expectedResults: []expectedResult{
- {
- addAmt: abi.NewTokenAmount(100),
- shouldAdd: true,
- err: nil,
- },
- },
- },
- "succeeds, money already present": {
- returnedBalance: api.MarketBalance{Escrow: abi.NewTokenAmount(150), Locked: abi.NewTokenAmount(50)},
- addAmounts: []abi.TokenAmount{abi.NewTokenAmount(100)},
- expectedResults: []expectedResult{
- {
- shouldAdd: false,
- err: nil,
- cachedAvailable: abi.NewTokenAmount(100),
- },
- },
- },
- "succeeds, multiple adds": {
- returnedBalance: api.MarketBalance{Escrow: abi.NewTokenAmount(150), Locked: abi.NewTokenAmount(50)},
- addAmounts: []abi.TokenAmount{abi.NewTokenAmount(100), abi.NewTokenAmount(200), abi.NewTokenAmount(250), abi.NewTokenAmount(250)},
- expectedResults: []expectedResult{
- {
- shouldAdd: false,
- err: nil,
- },
- {
- addAmt: abi.NewTokenAmount(100),
- shouldAdd: true,
- err: nil,
- cachedAvailable: abi.NewTokenAmount(200),
- },
- {
- addAmt: abi.NewTokenAmount(50),
- shouldAdd: true,
- err: nil,
- cachedAvailable: abi.NewTokenAmount(250),
- },
- {
- shouldAdd: false,
- err: nil,
- cachedAvailable: abi.NewTokenAmount(250),
- },
- },
- },
- "error on market balance": {
- returnedBalanceErr: errors.New("something went wrong"),
- addAmounts: []abi.TokenAmount{abi.NewTokenAmount(100)},
- expectedResults: []expectedResult{
- {
- err: errors.New("something went wrong"),
- },
- },
- },
- "error on push message": {
- returnedBalance: api.MarketBalance{Escrow: abi.NewTokenAmount(0), Locked: abi.NewTokenAmount(0)},
- pushMessageErr: errors.New("something went wrong"),
- addAmounts: []abi.TokenAmount{abi.NewTokenAmount(100)},
- expectedResults: []expectedResult{
- {
- err: errors.New("something went wrong"),
- cachedAvailable: abi.NewTokenAmount(0),
- },
- },
- },
- "error looking up address": {
- lookupIDErr: errors.New("something went wrong"),
- addAmounts: []abi.TokenAmount{abi.NewTokenAmount(100)},
- expectedResults: []expectedResult{
- {
- err: errors.New("something went wrong"),
- },
- },
- },
- }
-
- for testCase, data := range testCases {
- //nolint:scopelint
- t.Run(testCase, func(t *testing.T) {
- ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
- defer cancel()
- sig := make([]byte, 100)
- _, err := rand.Read(sig)
- require.NoError(t, err)
- fapi := &fakeAPI{
- returnedBalance: data.returnedBalance,
- returnedBalanceErr: data.returnedBalanceErr,
- signature: crypto.Signature{
- Type: crypto.SigTypeUnknown,
- Data: sig,
- },
- pushMessageErr: data.pushMessageErr,
- lookupIDErr: data.lookupIDErr,
- }
- fundMgr := newFundMgr(fapi)
- addr := tutils.NewIDAddr(t, uint64(rand.Uint32()))
- wallet := tutils.NewIDAddr(t, uint64(rand.Uint32()))
- for i, amount := range data.addAmounts {
- fapi.receivedMessage = nil
- _, err := fundMgr.EnsureAvailable(ctx, addr, wallet, amount)
- expected := data.expectedResults[i]
- if expected.err == nil {
- require.NoError(t, err)
- if expected.shouldAdd {
- expectedMessage := addFundsMsg(expected.addAmt, addr, wallet)
- require.Equal(t, expectedMessage, fapi.receivedMessage)
- } else {
- require.Nil(t, fapi.receivedMessage)
- }
- } else {
- require.EqualError(t, err, expected.err.Error())
- }
-
- if !expected.cachedAvailable.Nil() {
- require.Equal(t, expected.cachedAvailable, fundMgr.available[addr])
- }
- }
- })
- }
-}
diff --git a/chain/market/store.go b/chain/market/store.go
new file mode 100644
index 00000000000..e0d0e10be38
--- /dev/null
+++ b/chain/market/store.go
@@ -0,0 +1,90 @@
+package market
+
+import (
+ "bytes"
+
+ cborrpc "github.com/filecoin-project/go-cbor-util"
+ "github.com/ipfs/go-datastore"
+ "github.com/ipfs/go-datastore/namespace"
+ dsq "github.com/ipfs/go-datastore/query"
+
+ "github.com/filecoin-project/go-address"
+
+ "github.com/filecoin-project/lotus/node/modules/dtypes"
+)
+
+const dsKeyAddr = "Addr"
+
+type Store struct {
+ ds datastore.Batching
+}
+
+func newStore(ds dtypes.MetadataDS) *Store {
+ ds = namespace.Wrap(ds, datastore.NewKey("/fundmgr/"))
+ return &Store{
+ ds: ds,
+ }
+}
+
+// save the state to the datastore
+func (ps *Store) save(state *FundedAddressState) error {
+ k := dskeyForAddr(state.Addr)
+
+ b, err := cborrpc.Dump(state)
+ if err != nil {
+ return err
+ }
+
+ return ps.ds.Put(k, b)
+}
+
+// get the state for the given address
+func (ps *Store) get(addr address.Address) (*FundedAddressState, error) {
+ k := dskeyForAddr(addr)
+
+ data, err := ps.ds.Get(k)
+ if err != nil {
+ return nil, err
+ }
+
+ var state FundedAddressState
+ err = cborrpc.ReadCborRPC(bytes.NewReader(data), &state)
+ if err != nil {
+ return nil, err
+ }
+ return &state, nil
+}
+
+// forEach calls iter with each address in the datastore
+func (ps *Store) forEach(iter func(*FundedAddressState)) error {
+ res, err := ps.ds.Query(dsq.Query{Prefix: dsKeyAddr})
+ if err != nil {
+ return err
+ }
+ defer res.Close() //nolint:errcheck
+
+ for {
+ res, ok := res.NextSync()
+ if !ok {
+ break
+ }
+
+ if res.Error != nil {
+ return err
+ }
+
+ var stored FundedAddressState
+ if err := stored.UnmarshalCBOR(bytes.NewReader(res.Value)); err != nil {
+ return err
+ }
+
+ iter(&stored)
+ }
+
+ return nil
+}
+
+// The datastore key used to identify the address state
+func dskeyForAddr(addr address.Address) datastore.Key {
+ return datastore.KeyWithNamespaces([]string{dsKeyAddr, addr.String()})
+}
diff --git a/chain/messagepool/check.go b/chain/messagepool/check.go
new file mode 100644
index 00000000000..11203e7dffd
--- /dev/null
+++ b/chain/messagepool/check.go
@@ -0,0 +1,431 @@
+package messagepool
+
+import (
+ "context"
+ "fmt"
+ stdbig "math/big"
+ "sort"
+
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/vm"
+)
+
+var baseFeeUpperBoundFactor = types.NewInt(10)
+
+// CheckMessages performs a set of logic checks for a list of messages, prior to submitting it to the mpool
+func (mp *MessagePool) CheckMessages(ctx context.Context, protos []*api.MessagePrototype) ([][]api.MessageCheckStatus, error) {
+ flex := make([]bool, len(protos))
+ msgs := make([]*types.Message, len(protos))
+ for i, p := range protos {
+ flex[i] = !p.ValidNonce
+ msgs[i] = &p.Message
+ }
+ return mp.checkMessages(ctx, msgs, false, flex)
+}
+
+// CheckPendingMessages performs a set of logical sets for all messages pending from a given actor
+func (mp *MessagePool) CheckPendingMessages(ctx context.Context, from address.Address) ([][]api.MessageCheckStatus, error) {
+ var msgs []*types.Message
+ mp.lk.Lock()
+ mset, ok := mp.pending[from]
+ if ok {
+ for _, sm := range mset.msgs {
+ msgs = append(msgs, &sm.Message)
+ }
+ }
+ mp.lk.Unlock()
+
+ if len(msgs) == 0 {
+ return nil, nil
+ }
+
+ sort.Slice(msgs, func(i, j int) bool {
+ return msgs[i].Nonce < msgs[j].Nonce
+ })
+
+ return mp.checkMessages(ctx, msgs, true, nil)
+}
+
+// CheckReplaceMessages performs a set of logical checks for related messages while performing a
+// replacement.
+func (mp *MessagePool) CheckReplaceMessages(ctx context.Context, replace []*types.Message) ([][]api.MessageCheckStatus, error) {
+ msgMap := make(map[address.Address]map[uint64]*types.Message)
+ count := 0
+
+ mp.lk.Lock()
+ for _, m := range replace {
+ mmap, ok := msgMap[m.From]
+ if !ok {
+ mmap = make(map[uint64]*types.Message)
+ msgMap[m.From] = mmap
+ mset, ok := mp.pending[m.From]
+ if ok {
+ count += len(mset.msgs)
+ for _, sm := range mset.msgs {
+ mmap[sm.Message.Nonce] = &sm.Message
+ }
+ } else {
+ count++
+ }
+ }
+ mmap[m.Nonce] = m
+ }
+ mp.lk.Unlock()
+
+ msgs := make([]*types.Message, 0, count)
+ start := 0
+ for _, mmap := range msgMap {
+ end := start + len(mmap)
+
+ for _, m := range mmap {
+ msgs = append(msgs, m)
+ }
+
+ sort.Slice(msgs[start:end], func(i, j int) bool {
+ return msgs[start+i].Nonce < msgs[start+j].Nonce
+ })
+
+ start = end
+ }
+
+ return mp.checkMessages(ctx, msgs, true, nil)
+}
+
+// flexibleNonces should be either nil or of len(msgs), it signifies that message at given index
+// has non-determied nonce at this point
+func (mp *MessagePool) checkMessages(ctx context.Context, msgs []*types.Message, interned bool, flexibleNonces []bool) (result [][]api.MessageCheckStatus, err error) {
+ if mp.api.IsLite() {
+ return nil, nil
+ }
+ mp.curTsLk.Lock()
+ curTs := mp.curTs
+ mp.curTsLk.Unlock()
+
+ epoch := curTs.Height()
+
+ var baseFee big.Int
+ if len(curTs.Blocks()) > 0 {
+ baseFee = curTs.Blocks()[0].ParentBaseFee
+ } else {
+ baseFee, err = mp.api.ChainComputeBaseFee(context.Background(), curTs)
+ if err != nil {
+ return nil, xerrors.Errorf("error computing basefee: %w", err)
+ }
+ }
+
+ baseFeeLowerBound := getBaseFeeLowerBound(baseFee, baseFeeLowerBoundFactor)
+ baseFeeUpperBound := types.BigMul(baseFee, baseFeeUpperBoundFactor)
+
+ type actorState struct {
+ nextNonce uint64
+ requiredFunds *stdbig.Int
+ }
+
+ state := make(map[address.Address]*actorState)
+ balances := make(map[address.Address]big.Int)
+
+ result = make([][]api.MessageCheckStatus, len(msgs))
+
+ for i, m := range msgs {
+ // pre-check: actor nonce
+ check := api.MessageCheckStatus{
+ Cid: m.Cid(),
+ CheckStatus: api.CheckStatus{
+ Code: api.CheckStatusMessageGetStateNonce,
+ },
+ }
+
+ st, ok := state[m.From]
+ if !ok {
+ mp.lk.Lock()
+ mset, ok := mp.pending[m.From]
+ if ok && !interned {
+ st = &actorState{nextNonce: mset.nextNonce, requiredFunds: mset.requiredFunds}
+ for _, m := range mset.msgs {
+ st.requiredFunds = new(stdbig.Int).Add(st.requiredFunds, m.Message.Value.Int)
+ }
+ state[m.From] = st
+ mp.lk.Unlock()
+
+ check.OK = true
+ check.Hint = map[string]interface{}{
+ "nonce": st.nextNonce,
+ }
+ } else {
+ mp.lk.Unlock()
+
+ stateNonce, err := mp.getStateNonce(ctx, m.From, curTs)
+ if err != nil {
+ check.OK = false
+ check.Err = fmt.Sprintf("error retrieving state nonce: %s", err.Error())
+ } else {
+ check.OK = true
+ check.Hint = map[string]interface{}{
+ "nonce": stateNonce,
+ }
+ }
+
+ st = &actorState{nextNonce: stateNonce, requiredFunds: new(stdbig.Int)}
+ state[m.From] = st
+ }
+ } else {
+ check.OK = true
+ }
+
+ result[i] = append(result[i], check)
+ if !check.OK {
+ continue
+ }
+
+ // pre-check: actor balance
+ check = api.MessageCheckStatus{
+ Cid: m.Cid(),
+ CheckStatus: api.CheckStatus{
+ Code: api.CheckStatusMessageGetStateBalance,
+ },
+ }
+
+ balance, ok := balances[m.From]
+ if !ok {
+ balance, err = mp.getStateBalance(ctx, m.From, curTs)
+ if err != nil {
+ check.OK = false
+ check.Err = fmt.Sprintf("error retrieving state balance: %s", err)
+ } else {
+ check.OK = true
+ check.Hint = map[string]interface{}{
+ "balance": balance,
+ }
+ }
+
+ balances[m.From] = balance
+ } else {
+ check.OK = true
+ check.Hint = map[string]interface{}{
+ "balance": balance,
+ }
+ }
+
+ result[i] = append(result[i], check)
+ if !check.OK {
+ continue
+ }
+
+ // 1. Serialization
+ check = api.MessageCheckStatus{
+ Cid: m.Cid(),
+ CheckStatus: api.CheckStatus{
+ Code: api.CheckStatusMessageSerialize,
+ },
+ }
+
+ bytes, err := m.Serialize()
+ if err != nil {
+ check.OK = false
+ check.Err = err.Error()
+ } else {
+ check.OK = true
+ }
+
+ result[i] = append(result[i], check)
+
+ // 2. Message size
+ check = api.MessageCheckStatus{
+ Cid: m.Cid(),
+ CheckStatus: api.CheckStatus{
+ Code: api.CheckStatusMessageSize,
+ },
+ }
+
+ if len(bytes) > MaxMessageSize-128 { // 128 bytes to account for signature size
+ check.OK = false
+ check.Err = "message too big"
+ } else {
+ check.OK = true
+ }
+
+ result[i] = append(result[i], check)
+
+ // 3. Syntactic validation
+ check = api.MessageCheckStatus{
+ Cid: m.Cid(),
+ CheckStatus: api.CheckStatus{
+ Code: api.CheckStatusMessageValidity,
+ },
+ }
+
+ if err := m.ValidForBlockInclusion(0, build.NewestNetworkVersion); err != nil {
+ check.OK = false
+ check.Err = fmt.Sprintf("syntactically invalid message: %s", err.Error())
+ } else {
+ check.OK = true
+ }
+
+ result[i] = append(result[i], check)
+ if !check.OK {
+ // skip remaining checks if it is a syntatically invalid message
+ continue
+ }
+
+ // gas checks
+
+ // 4. Min Gas
+ minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength())
+
+ check = api.MessageCheckStatus{
+ Cid: m.Cid(),
+ CheckStatus: api.CheckStatus{
+ Code: api.CheckStatusMessageMinGas,
+ Hint: map[string]interface{}{
+ "minGas": minGas,
+ },
+ },
+ }
+
+ if m.GasLimit < minGas.Total() {
+ check.OK = false
+ check.Err = "GasLimit less than epoch minimum gas"
+ } else {
+ check.OK = true
+ }
+
+ result[i] = append(result[i], check)
+
+ // 5. Min Base Fee
+ check = api.MessageCheckStatus{
+ Cid: m.Cid(),
+ CheckStatus: api.CheckStatus{
+ Code: api.CheckStatusMessageMinBaseFee,
+ },
+ }
+
+ if m.GasFeeCap.LessThan(minimumBaseFee) {
+ check.OK = false
+ check.Err = "GasFeeCap less than minimum base fee"
+ } else {
+ check.OK = true
+ }
+
+ result[i] = append(result[i], check)
+ if !check.OK {
+ goto checkState
+ }
+
+ // 6. Base Fee
+ check = api.MessageCheckStatus{
+ Cid: m.Cid(),
+ CheckStatus: api.CheckStatus{
+ Code: api.CheckStatusMessageBaseFee,
+ Hint: map[string]interface{}{
+ "baseFee": baseFee,
+ },
+ },
+ }
+
+ if m.GasFeeCap.LessThan(baseFee) {
+ check.OK = false
+ check.Err = "GasFeeCap less than current base fee"
+ } else {
+ check.OK = true
+ }
+
+ result[i] = append(result[i], check)
+
+ // 7. Base Fee lower bound
+ check = api.MessageCheckStatus{
+ Cid: m.Cid(),
+ CheckStatus: api.CheckStatus{
+ Code: api.CheckStatusMessageBaseFeeLowerBound,
+ Hint: map[string]interface{}{
+ "baseFeeLowerBound": baseFeeLowerBound,
+ "baseFee": baseFee,
+ },
+ },
+ }
+
+ if m.GasFeeCap.LessThan(baseFeeLowerBound) {
+ check.OK = false
+ check.Err = "GasFeeCap less than base fee lower bound for inclusion in next 20 epochs"
+ } else {
+ check.OK = true
+ }
+
+ result[i] = append(result[i], check)
+
+ // 8. Base Fee upper bound
+ check = api.MessageCheckStatus{
+ Cid: m.Cid(),
+ CheckStatus: api.CheckStatus{
+ Code: api.CheckStatusMessageBaseFeeUpperBound,
+ Hint: map[string]interface{}{
+ "baseFeeUpperBound": baseFeeUpperBound,
+ "baseFee": baseFee,
+ },
+ },
+ }
+
+ if m.GasFeeCap.LessThan(baseFeeUpperBound) {
+ check.OK = true // on purpose, the checks is more of a warning
+ check.Err = "GasFeeCap less than base fee upper bound for inclusion in next 20 epochs"
+ } else {
+ check.OK = true
+ }
+
+ result[i] = append(result[i], check)
+
+ // stateful checks
+ checkState:
+ // 9. Message Nonce
+ check = api.MessageCheckStatus{
+ Cid: m.Cid(),
+ CheckStatus: api.CheckStatus{
+ Code: api.CheckStatusMessageNonce,
+ Hint: map[string]interface{}{
+ "nextNonce": st.nextNonce,
+ },
+ },
+ }
+
+ if (flexibleNonces == nil || !flexibleNonces[i]) && st.nextNonce != m.Nonce {
+ check.OK = false
+ check.Err = fmt.Sprintf("message nonce doesn't match next nonce (%d)", st.nextNonce)
+ } else {
+ check.OK = true
+ st.nextNonce++
+ }
+
+ result[i] = append(result[i], check)
+
+ // check required funds -vs- balance
+ st.requiredFunds = new(stdbig.Int).Add(st.requiredFunds, m.RequiredFunds().Int)
+ st.requiredFunds.Add(st.requiredFunds, m.Value.Int)
+
+ // 10. Balance
+ check = api.MessageCheckStatus{
+ Cid: m.Cid(),
+ CheckStatus: api.CheckStatus{
+ Code: api.CheckStatusMessageBalance,
+ Hint: map[string]interface{}{
+ "requiredFunds": big.Int{Int: stdbig.NewInt(0).Set(st.requiredFunds)},
+ },
+ },
+ }
+
+ if balance.Int.Cmp(st.requiredFunds) < 0 {
+ check.OK = false
+ check.Err = "insufficient balance"
+ } else {
+ check.OK = true
+ }
+
+ result[i] = append(result[i], check)
+ }
+
+ return result, nil
+}
diff --git a/chain/messagepool/config.go b/chain/messagepool/config.go
index f8f0ee98583..a511f84b7f4 100644
--- a/chain/messagepool/config.go
+++ b/chain/messagepool/config.go
@@ -48,9 +48,13 @@ func saveConfig(cfg *types.MpoolConfig, ds dtypes.MetadataDS) error {
}
func (mp *MessagePool) GetConfig() *types.MpoolConfig {
- mp.cfgLk.Lock()
- defer mp.cfgLk.Unlock()
- return mp.cfg.Clone()
+ return mp.getConfig().Clone()
+}
+
+func (mp *MessagePool) getConfig() *types.MpoolConfig {
+ mp.cfgLk.RLock()
+ defer mp.cfgLk.RUnlock()
+ return mp.cfg
}
func validateConfg(cfg *types.MpoolConfig) error {
diff --git a/chain/messagepool/messagepool.go b/chain/messagepool/messagepool.go
index 79ab572ba91..f6c8e3ac998 100644
--- a/chain/messagepool/messagepool.go
+++ b/chain/messagepool/messagepool.go
@@ -34,6 +34,7 @@ import (
"github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/journal"
"github.com/filecoin-project/lotus/lib/sigs"
+ "github.com/filecoin-project/lotus/metrics"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/raulk/clock"
@@ -59,7 +60,7 @@ var MaxUntrustedActorPendingMessages = 10
var MaxNonceGap = uint64(4)
-var DefaultMaxFee = abi.TokenAmount(types.MustParseFIL("0.007"))
+const MaxMessageSize = 64 << 10 // 64KiB
var (
ErrMessageTooBig = errors.New("message too big")
@@ -128,14 +129,18 @@ type MessagePool struct {
republished map[cid.Cid]struct{}
+ // do NOT access this map directly, use isLocal, setLocal, and forEachLocal respectively
localAddrs map[address.Address]struct{}
+ // do NOT access this map directly, use getPendingMset, setPendingMset, deletePendingMset, forEachPending, and clearPending respectively
pending map[address.Address]*msgSet
+ keyCache map[address.Address]address.Address
+
curTsLk sync.Mutex // DO NOT LOCK INSIDE lk
curTs *types.TipSet
- cfgLk sync.Mutex
+ cfgLk sync.RWMutex
cfg *types.MpoolConfig
api Provider
@@ -183,9 +188,18 @@ func ComputeMinRBF(curPrem abi.TokenAmount) abi.TokenAmount {
return types.BigAdd(minPrice, types.NewInt(1))
}
-func CapGasFee(msg *types.Message, maxFee abi.TokenAmount) {
- if maxFee.Equals(big.Zero()) {
- maxFee = DefaultMaxFee
+func CapGasFee(mff dtypes.DefaultMaxFeeFunc, msg *types.Message, sendSepc *api.MessageSendSpec) {
+ var maxFee abi.TokenAmount
+ if sendSepc != nil {
+ maxFee = sendSepc.MaxFee
+ }
+ if maxFee.Int == nil || maxFee.Equals(big.Zero()) {
+ mf, err := mff()
+ if err != nil {
+ log.Errorf("failed to get default max gas fee: %+v", err)
+ mf = big.Zero()
+ }
+ maxFee = mf
}
gl := types.NewInt(uint64(msg.GasLimit))
@@ -236,10 +250,13 @@ func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool, strict, untrusted
// check if RBF passes
minPrice := ComputeMinRBF(exms.Message.GasPremium)
if types.BigCmp(m.Message.GasPremium, minPrice) >= 0 {
- log.Infow("add with RBF", "oldpremium", exms.Message.GasPremium,
+ log.Debugw("add with RBF", "oldpremium", exms.Message.GasPremium,
"newpremium", m.Message.GasPremium, "addr", m.Message.From, "nonce", m.Message.Nonce)
} else {
- log.Info("add with duplicate nonce")
+ log.Debugf("add with duplicate nonce. message from %s with nonce %d already in mpool,"+
+ " increase GasPremium to %s from %s to trigger replace by fee: %s",
+ m.Message.From, m.Message.Nonce, minPrice, m.Message.GasPremium,
+ ErrRBFTooLowPremium)
return false, xerrors.Errorf("message from %s with nonce %d already in mpool,"+
" increase GasPremium to %s from %s to trigger replace by fee: %w",
m.Message.From, m.Message.Nonce, minPrice, m.Message.GasPremium,
@@ -260,7 +277,7 @@ func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool, strict, untrusted
}
if strict && nonceGap {
- log.Warnf("adding nonce-gapped message from %s (nonce: %d, nextNonce: %d)",
+ log.Debugf("adding nonce-gapped message from %s (nonce: %d, nextNonce: %d)",
m.Message.From, m.Message.Nonce, nextNonce)
}
@@ -319,6 +336,20 @@ func (ms *msgSet) getRequiredFunds(nonce uint64) types.BigInt {
return types.BigInt{Int: requiredFunds}
}
+func (ms *msgSet) toSlice() []*types.SignedMessage {
+ set := make([]*types.SignedMessage, 0, len(ms.msgs))
+
+ for _, m := range ms.msgs {
+ set = append(set, m)
+ }
+
+ sort.Slice(set, func(i, j int) bool {
+ return set[i].Message.Nonce < set[j].Message.Nonce
+ })
+
+ return set
+}
+
func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journal.Journal) (*MessagePool, error) {
cache, _ := lru.New2Q(build.BlsSignatureCacheSize)
verifcache, _ := lru.New2Q(build.VerifSigCacheSize)
@@ -340,6 +371,7 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journ
repubTrigger: make(chan struct{}, 1),
localAddrs: make(map[address.Address]struct{}),
pending: make(map[address.Address]*msgSet),
+ keyCache: make(map[address.Address]address.Address),
minGasPrice: types.NewInt(0),
pruneTrigger: make(chan struct{}, 1),
pruneCooldown: make(chan struct{}, 1),
@@ -361,9 +393,11 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journ
// enable initial prunes
mp.pruneCooldown <- struct{}{}
+ ctx, cancel := context.WithCancel(context.TODO())
+
// load the current tipset and subscribe to head changes _before_ loading local messages
mp.curTs = api.SubscribeHeadChanges(func(rev, app []*types.TipSet) error {
- err := mp.HeadChange(rev, app)
+ err := mp.HeadChange(ctx, rev, app)
if err != nil {
log.Errorf("mpool head notif handler error: %+v", err)
}
@@ -374,7 +408,8 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journ
mp.lk.Lock()
go func() {
- err := mp.loadLocal()
+ defer cancel()
+ err := mp.loadLocal(ctx)
mp.lk.Unlock()
mp.curTsLk.Unlock()
@@ -385,12 +420,127 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journ
log.Info("mpool ready")
- mp.runLoop()
+ mp.runLoop(ctx)
}()
return mp, nil
}
+func (mp *MessagePool) ForEachPendingMessage(f func(cid.Cid) error) error {
+ mp.lk.Lock()
+ defer mp.lk.Unlock()
+
+ for _, mset := range mp.pending {
+ for _, m := range mset.msgs {
+ err := f(m.Cid())
+ if err != nil {
+ return err
+ }
+
+ err = f(m.Message.Cid())
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (mp *MessagePool) resolveToKey(ctx context.Context, addr address.Address) (address.Address, error) {
+ // check the cache
+ a, f := mp.keyCache[addr]
+ if f {
+ return a, nil
+ }
+
+ // resolve the address
+ ka, err := mp.api.StateAccountKeyAtFinality(ctx, addr, mp.curTs)
+ if err != nil {
+ return address.Undef, err
+ }
+
+ // place both entries in the cache (may both be key addresses, which is fine)
+ mp.keyCache[addr] = ka
+ mp.keyCache[ka] = ka
+
+ return ka, nil
+}
+
+func (mp *MessagePool) getPendingMset(ctx context.Context, addr address.Address) (*msgSet, bool, error) {
+ ra, err := mp.resolveToKey(ctx, addr)
+ if err != nil {
+ return nil, false, err
+ }
+
+ ms, f := mp.pending[ra]
+
+ return ms, f, nil
+}
+
+func (mp *MessagePool) setPendingMset(ctx context.Context, addr address.Address, ms *msgSet) error {
+ ra, err := mp.resolveToKey(ctx, addr)
+ if err != nil {
+ return err
+ }
+
+ mp.pending[ra] = ms
+
+ return nil
+}
+
+// This method isn't strictly necessary, since it doesn't resolve any addresses, but it's safer to have
+func (mp *MessagePool) forEachPending(f func(address.Address, *msgSet)) {
+ for la, ms := range mp.pending {
+ f(la, ms)
+ }
+}
+
+func (mp *MessagePool) deletePendingMset(ctx context.Context, addr address.Address) error {
+ ra, err := mp.resolveToKey(ctx, addr)
+ if err != nil {
+ return err
+ }
+
+ delete(mp.pending, ra)
+
+ return nil
+}
+
+// This method isn't strictly necessary, since it doesn't resolve any addresses, but it's safer to have
+func (mp *MessagePool) clearPending() {
+ mp.pending = make(map[address.Address]*msgSet)
+}
+
+func (mp *MessagePool) isLocal(ctx context.Context, addr address.Address) (bool, error) {
+ ra, err := mp.resolveToKey(ctx, addr)
+ if err != nil {
+ return false, err
+ }
+
+ _, f := mp.localAddrs[ra]
+
+ return f, nil
+}
+
+func (mp *MessagePool) setLocal(ctx context.Context, addr address.Address) error {
+ ra, err := mp.resolveToKey(ctx, addr)
+ if err != nil {
+ return err
+ }
+
+ mp.localAddrs[ra] = struct{}{}
+
+ return nil
+}
+
+// This method isn't strictly necessary, since it doesn't resolve any addresses, but it's safer to have
+func (mp *MessagePool) forEachLocal(ctx context.Context, f func(context.Context, address.Address)) {
+ for la := range mp.localAddrs {
+ f(ctx, la)
+ }
+}
+
func (mp *MessagePool) Close() error {
close(mp.closer)
return nil
@@ -408,15 +558,15 @@ func (mp *MessagePool) Prune() {
mp.pruneTrigger <- struct{}{}
}
-func (mp *MessagePool) runLoop() {
+func (mp *MessagePool) runLoop(ctx context.Context) {
for {
select {
case <-mp.repubTk.C:
- if err := mp.republishPendingMessages(); err != nil {
+ if err := mp.republishPendingMessages(ctx); err != nil {
log.Errorf("error while republishing messages: %s", err)
}
case <-mp.repubTrigger:
- if err := mp.republishPendingMessages(); err != nil {
+ if err := mp.republishPendingMessages(ctx); err != nil {
log.Errorf("error while republishing messages: %s", err)
}
@@ -432,8 +582,10 @@ func (mp *MessagePool) runLoop() {
}
}
-func (mp *MessagePool) addLocal(m *types.SignedMessage) error {
- mp.localAddrs[m.Message.From] = struct{}{}
+func (mp *MessagePool) addLocal(ctx context.Context, m *types.SignedMessage) error {
+ if err := mp.setLocal(ctx, m.Message.From); err != nil {
+ return err
+ }
msgb, err := m.Serialize()
if err != nil {
@@ -447,7 +599,7 @@ func (mp *MessagePool) addLocal(m *types.SignedMessage) error {
return nil
}
-// verifyMsgBeforeAdd verifies that the message meets the minimum criteria for block inclusio
+// verifyMsgBeforeAdd verifies that the message meets the minimum criteria for block inclusion
// and whether the message has enough funds to be included in the next 20 blocks.
// If the message is not valid for block inclusion, it returns an error.
// For local messages, if the message can be included in the next 20 blocks, it returns true to
@@ -461,11 +613,11 @@ func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.T
epoch := curTs.Height()
minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength())
- if err := m.VMMessage().ValidForBlockInclusion(minGas.Total()); err != nil {
+ if err := m.VMMessage().ValidForBlockInclusion(minGas.Total(), build.NewestNetworkVersion); err != nil {
return false, xerrors.Errorf("message will not be included in a block: %w", err)
}
- // this checks if the GasFeeCap is suffisciently high for inclusion in the next 20 blocks
+ // this checks if the GasFeeCap is sufficiently high for inclusion in the next 20 blocks
// if the GasFeeCap is too low, we soft reject the message (Ignore in pubsub) and rely
// on republish to push it through later, if the baseFee has fallen.
// this is a defensive check that stops minimum baseFee spam attacks from overloading validation
@@ -500,7 +652,10 @@ func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.T
return publish, nil
}
-func (mp *MessagePool) Push(m *types.SignedMessage) (cid.Cid, error) {
+func (mp *MessagePool) Push(ctx context.Context, m *types.SignedMessage) (cid.Cid, error) {
+ done := metrics.Timer(ctx, metrics.MpoolPushDuration)
+ defer done()
+
err := mp.checkMessage(m)
if err != nil {
return cid.Undef, err
@@ -513,7 +668,7 @@ func (mp *MessagePool) Push(m *types.SignedMessage) (cid.Cid, error) {
}()
mp.curTsLk.Lock()
- publish, err := mp.addTs(m, mp.curTs, true, false)
+ publish, err := mp.addTs(ctx, m, mp.curTs, true, false)
if err != nil {
mp.curTsLk.Unlock()
return cid.Undef, err
@@ -537,12 +692,12 @@ func (mp *MessagePool) Push(m *types.SignedMessage) (cid.Cid, error) {
func (mp *MessagePool) checkMessage(m *types.SignedMessage) error {
// big messages are bad, anti DOS
- if m.Size() > 32*1024 {
+ if m.Size() > MaxMessageSize {
return xerrors.Errorf("mpool message too large (%dB): %w", m.Size(), ErrMessageTooBig)
}
// Perform syntactic validation, minGas=0 as we check the actual mingas before we add it
- if err := m.Message.ValidForBlockInclusion(0); err != nil {
+ if err := m.Message.ValidForBlockInclusion(0, build.NewestNetworkVersion); err != nil {
return xerrors.Errorf("message not valid for block inclusion: %w", err)
}
@@ -566,7 +721,10 @@ func (mp *MessagePool) checkMessage(m *types.SignedMessage) error {
return nil
}
-func (mp *MessagePool) Add(m *types.SignedMessage) error {
+func (mp *MessagePool) Add(ctx context.Context, m *types.SignedMessage) error {
+ done := metrics.Timer(ctx, metrics.MpoolAddDuration)
+ defer done()
+
err := mp.checkMessage(m)
if err != nil {
return err
@@ -581,7 +739,7 @@ func (mp *MessagePool) Add(m *types.SignedMessage) error {
mp.curTsLk.Lock()
defer mp.curTsLk.Unlock()
- _, err = mp.addTs(m, mp.curTs, false, false)
+ _, err = mp.addTs(ctx, m, mp.curTs, false, false)
return err
}
@@ -621,8 +779,8 @@ func (mp *MessagePool) VerifyMsgSig(m *types.SignedMessage) error {
return nil
}
-func (mp *MessagePool) checkBalance(m *types.SignedMessage, curTs *types.TipSet) error {
- balance, err := mp.getStateBalance(m.Message.From, curTs)
+func (mp *MessagePool) checkBalance(ctx context.Context, m *types.SignedMessage, curTs *types.TipSet) error {
+ balance, err := mp.getStateBalance(ctx, m.Message.From, curTs)
if err != nil {
return xerrors.Errorf("failed to check sender balance: %s: %w", err, ErrSoftValidationFailure)
}
@@ -635,7 +793,12 @@ func (mp *MessagePool) checkBalance(m *types.SignedMessage, curTs *types.TipSet)
// add Value for soft failure check
//requiredFunds = types.BigAdd(requiredFunds, m.Message.Value)
- mset, ok := mp.pending[m.Message.From]
+ mset, ok, err := mp.getPendingMset(ctx, m.Message.From)
+ if err != nil {
+ log.Debugf("mpoolcheckbalance failed to get pending mset: %s", err)
+ return err
+ }
+
if ok {
requiredFunds = types.BigAdd(requiredFunds, mset.getRequiredFunds(m.Message.Nonce))
}
@@ -649,8 +812,11 @@ func (mp *MessagePool) checkBalance(m *types.SignedMessage, curTs *types.TipSet)
return nil
}
-func (mp *MessagePool) addTs(m *types.SignedMessage, curTs *types.TipSet, local, untrusted bool) (bool, error) {
- snonce, err := mp.getStateNonce(m.Message.From, curTs)
+func (mp *MessagePool) addTs(ctx context.Context, m *types.SignedMessage, curTs *types.TipSet, local, untrusted bool) (bool, error) {
+ done := metrics.Timer(ctx, metrics.MpoolAddTsDuration)
+ defer done()
+
+ snonce, err := mp.getStateNonce(ctx, m.Message.From, curTs)
if err != nil {
return false, xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure)
}
@@ -667,17 +833,17 @@ func (mp *MessagePool) addTs(m *types.SignedMessage, curTs *types.TipSet, local,
return false, err
}
- if err := mp.checkBalance(m, curTs); err != nil {
+ if err := mp.checkBalance(ctx, m, curTs); err != nil {
return false, err
}
- err = mp.addLocked(m, !local, untrusted)
+ err = mp.addLocked(ctx, m, !local, untrusted)
if err != nil {
return false, err
}
if local {
- err = mp.addLocal(m)
+ err = mp.addLocal(ctx, m)
if err != nil {
return false, xerrors.Errorf("error persisting local message: %w", err)
}
@@ -686,7 +852,7 @@ func (mp *MessagePool) addTs(m *types.SignedMessage, curTs *types.TipSet, local,
return publish, nil
}
-func (mp *MessagePool) addLoaded(m *types.SignedMessage) error {
+func (mp *MessagePool) addLoaded(ctx context.Context, m *types.SignedMessage) error {
err := mp.checkMessage(m)
if err != nil {
return err
@@ -698,7 +864,7 @@ func (mp *MessagePool) addLoaded(m *types.SignedMessage) error {
return xerrors.Errorf("current tipset not loaded")
}
- snonce, err := mp.getStateNonce(m.Message.From, curTs)
+ snonce, err := mp.getStateNonce(ctx, m.Message.From, curTs)
if err != nil {
return xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure)
}
@@ -712,21 +878,21 @@ func (mp *MessagePool) addLoaded(m *types.SignedMessage) error {
return err
}
- if err := mp.checkBalance(m, curTs); err != nil {
+ if err := mp.checkBalance(ctx, m, curTs); err != nil {
return err
}
- return mp.addLocked(m, false, false)
+ return mp.addLocked(ctx, m, false, false)
}
-func (mp *MessagePool) addSkipChecks(m *types.SignedMessage) error {
+func (mp *MessagePool) addSkipChecks(ctx context.Context, m *types.SignedMessage) error {
mp.lk.Lock()
defer mp.lk.Unlock()
- return mp.addLocked(m, false, false)
+ return mp.addLocked(ctx, m, false, false)
}
-func (mp *MessagePool) addLocked(m *types.SignedMessage, strict, untrusted bool) error {
+func (mp *MessagePool) addLocked(ctx context.Context, m *types.SignedMessage, strict, untrusted bool) error {
log.Debugf("mpooladd: %s %d", m.Message.From, m.Message.Nonce)
if m.Signature.Type == crypto.SigTypeBLS {
mp.blsSigCache.Add(m.Cid(), m.Signature)
@@ -742,15 +908,23 @@ func (mp *MessagePool) addLocked(m *types.SignedMessage, strict, untrusted bool)
return err
}
- mset, ok := mp.pending[m.Message.From]
+ // Note: If performance becomes an issue, making this getOrCreatePendingMset will save some work
+ mset, ok, err := mp.getPendingMset(ctx, m.Message.From)
+ if err != nil {
+ log.Debug(err)
+ return err
+ }
+
if !ok {
- nonce, err := mp.getStateNonce(m.Message.From, mp.curTs)
+ nonce, err := mp.getStateNonce(ctx, m.Message.From, mp.curTs)
if err != nil {
return xerrors.Errorf("failed to get initial actor nonce: %w", err)
}
mset = newMsgSet(nonce)
- mp.pending[m.Message.From] = mset
+ if err = mp.setPendingMset(ctx, m.Message.From, mset); err != nil {
+ return xerrors.Errorf("failed to set pending mset: %w", err)
+ }
}
incr, err := mset.add(m, mp, strict, untrusted)
@@ -761,7 +935,7 @@ func (mp *MessagePool) addLocked(m *types.SignedMessage, strict, untrusted bool)
if incr {
mp.currentSize++
- if mp.currentSize > mp.cfg.SizeLimitHigh {
+ if mp.currentSize > mp.getConfig().SizeLimitHigh {
// send signal to prune messages if it hasnt already been sent
select {
case mp.pruneTrigger <- struct{}{}:
@@ -785,23 +959,35 @@ func (mp *MessagePool) addLocked(m *types.SignedMessage, strict, untrusted bool)
return nil
}
-func (mp *MessagePool) GetNonce(addr address.Address) (uint64, error) {
+func (mp *MessagePool) GetNonce(ctx context.Context, addr address.Address, _ types.TipSetKey) (uint64, error) {
mp.curTsLk.Lock()
defer mp.curTsLk.Unlock()
mp.lk.Lock()
defer mp.lk.Unlock()
- return mp.getNonceLocked(addr, mp.curTs)
+ return mp.getNonceLocked(ctx, addr, mp.curTs)
+}
+
+// GetActor should not be used. It is only here to satisfy interface mess caused by lite node handling
+func (mp *MessagePool) GetActor(_ context.Context, addr address.Address, _ types.TipSetKey) (*types.Actor, error) {
+ mp.curTsLk.Lock()
+ defer mp.curTsLk.Unlock()
+ return mp.api.GetActorAfter(addr, mp.curTs)
}
-func (mp *MessagePool) getNonceLocked(addr address.Address, curTs *types.TipSet) (uint64, error) {
- stateNonce, err := mp.getStateNonce(addr, curTs) // sanity check
+func (mp *MessagePool) getNonceLocked(ctx context.Context, addr address.Address, curTs *types.TipSet) (uint64, error) {
+ stateNonce, err := mp.getStateNonce(ctx, addr, curTs) // sanity check
if err != nil {
return 0, err
}
- mset, ok := mp.pending[addr]
+ mset, ok, err := mp.getPendingMset(ctx, addr)
+ if err != nil {
+ log.Debugf("mpoolgetnonce failed to get mset: %s", err)
+ return 0, err
+ }
+
if ok {
if stateNonce > mset.nextNonce {
log.Errorf("state nonce was larger than mset.nextNonce (%d > %d)", stateNonce, mset.nextNonce)
@@ -815,8 +1001,11 @@ func (mp *MessagePool) getNonceLocked(addr address.Address, curTs *types.TipSet)
return stateNonce, nil
}
-func (mp *MessagePool) getStateNonce(addr address.Address, curTs *types.TipSet) (uint64, error) {
- act, err := mp.api.GetActorAfter(addr, curTs)
+func (mp *MessagePool) getStateNonce(ctx context.Context, addr address.Address, ts *types.TipSet) (uint64, error) {
+ done := metrics.Timer(ctx, metrics.MpoolGetNonceDuration)
+ defer done()
+
+ act, err := mp.api.GetActorAfter(addr, ts)
if err != nil {
return 0, err
}
@@ -824,7 +1013,10 @@ func (mp *MessagePool) getStateNonce(addr address.Address, curTs *types.TipSet)
return act.Nonce, nil
}
-func (mp *MessagePool) getStateBalance(addr address.Address, ts *types.TipSet) (types.BigInt, error) {
+func (mp *MessagePool) getStateBalance(ctx context.Context, addr address.Address, ts *types.TipSet) (types.BigInt, error) {
+ done := metrics.Timer(ctx, metrics.MpoolGetBalanceDuration)
+ defer done()
+
act, err := mp.api.GetActorAfter(addr, ts)
if err != nil {
return types.EmptyInt, err
@@ -838,7 +1030,7 @@ func (mp *MessagePool) getStateBalance(addr address.Address, ts *types.TipSet) (
// - strict checks are enabled
// - extra strict add checks are used when adding the messages to the msgSet
// that means: no nonce gaps, at most 10 pending messages for the actor
-func (mp *MessagePool) PushUntrusted(m *types.SignedMessage) (cid.Cid, error) {
+func (mp *MessagePool) PushUntrusted(ctx context.Context, m *types.SignedMessage) (cid.Cid, error) {
err := mp.checkMessage(m)
if err != nil {
return cid.Undef, err
@@ -851,7 +1043,7 @@ func (mp *MessagePool) PushUntrusted(m *types.SignedMessage) (cid.Cid, error) {
}()
mp.curTsLk.Lock()
- publish, err := mp.addTs(m, mp.curTs, false, true)
+ publish, err := mp.addTs(ctx, m, mp.curTs, true, true)
if err != nil {
mp.curTsLk.Unlock()
return cid.Undef, err
@@ -873,15 +1065,20 @@ func (mp *MessagePool) PushUntrusted(m *types.SignedMessage) (cid.Cid, error) {
return m.Cid(), nil
}
-func (mp *MessagePool) Remove(from address.Address, nonce uint64, applied bool) {
+func (mp *MessagePool) Remove(ctx context.Context, from address.Address, nonce uint64, applied bool) {
mp.lk.Lock()
defer mp.lk.Unlock()
- mp.remove(from, nonce, applied)
+ mp.remove(ctx, from, nonce, applied)
}
-func (mp *MessagePool) remove(from address.Address, nonce uint64, applied bool) {
- mset, ok := mp.pending[from]
+func (mp *MessagePool) remove(ctx context.Context, from address.Address, nonce uint64, applied bool) {
+ mset, ok, err := mp.getPendingMset(ctx, from)
+ if err != nil {
+ log.Debugf("mpoolremove failed to get mset: %s", err)
+ return
+ }
+
if !ok {
return
}
@@ -906,58 +1103,57 @@ func (mp *MessagePool) remove(from address.Address, nonce uint64, applied bool)
mset.rm(nonce, applied)
if len(mset.msgs) == 0 {
- delete(mp.pending, from)
+ if err = mp.deletePendingMset(ctx, from); err != nil {
+ log.Debugf("mpoolremove failed to delete mset: %s", err)
+ return
+ }
}
}
-func (mp *MessagePool) Pending() ([]*types.SignedMessage, *types.TipSet) {
+func (mp *MessagePool) Pending(ctx context.Context) ([]*types.SignedMessage, *types.TipSet) {
mp.curTsLk.Lock()
defer mp.curTsLk.Unlock()
mp.lk.Lock()
defer mp.lk.Unlock()
- return mp.allPending()
+ return mp.allPending(ctx)
}
-func (mp *MessagePool) allPending() ([]*types.SignedMessage, *types.TipSet) {
+func (mp *MessagePool) allPending(ctx context.Context) ([]*types.SignedMessage, *types.TipSet) {
out := make([]*types.SignedMessage, 0)
- for a := range mp.pending {
- out = append(out, mp.pendingFor(a)...)
- }
+
+ mp.forEachPending(func(a address.Address, mset *msgSet) {
+ out = append(out, mset.toSlice()...)
+ })
return out, mp.curTs
}
-func (mp *MessagePool) PendingFor(a address.Address) ([]*types.SignedMessage, *types.TipSet) {
+func (mp *MessagePool) PendingFor(ctx context.Context, a address.Address) ([]*types.SignedMessage, *types.TipSet) {
mp.curTsLk.Lock()
defer mp.curTsLk.Unlock()
mp.lk.Lock()
defer mp.lk.Unlock()
- return mp.pendingFor(a), mp.curTs
+ return mp.pendingFor(ctx, a), mp.curTs
}
-func (mp *MessagePool) pendingFor(a address.Address) []*types.SignedMessage {
- mset := mp.pending[a]
- if mset == nil || len(mset.msgs) == 0 {
+func (mp *MessagePool) pendingFor(ctx context.Context, a address.Address) []*types.SignedMessage {
+ mset, ok, err := mp.getPendingMset(ctx, a)
+ if err != nil {
+ log.Debugf("mpoolpendingfor failed to get mset: %s", err)
return nil
}
- set := make([]*types.SignedMessage, 0, len(mset.msgs))
-
- for _, m := range mset.msgs {
- set = append(set, m)
+ if mset == nil || !ok || len(mset.msgs) == 0 {
+ return nil
}
- sort.Slice(set, func(i, j int) bool {
- return set[i].Message.Nonce < set[j].Message.Nonce
- })
-
- return set
+ return mset.toSlice()
}
-func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet) error {
+func (mp *MessagePool) HeadChange(ctx context.Context, revert []*types.TipSet, apply []*types.TipSet) error {
mp.curTsLk.Lock()
defer mp.curTsLk.Unlock()
@@ -974,7 +1170,7 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet)
rm := func(from address.Address, nonce uint64) {
s, ok := rmsgs[from]
if !ok {
- mp.Remove(from, nonce, true)
+ mp.Remove(ctx, from, nonce, true)
return
}
@@ -983,7 +1179,7 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet)
return
}
- mp.Remove(from, nonce, true)
+ mp.Remove(ctx, from, nonce, true)
}
maybeRepub := func(cid cid.Cid) {
@@ -1054,7 +1250,7 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet)
for _, s := range rmsgs {
for _, msg := range s {
- if err := mp.addSkipChecks(msg); err != nil {
+ if err := mp.addSkipChecks(ctx, msg); err != nil {
log.Errorf("Failed to readd message from reorg to mpool: %s", err)
}
}
@@ -1062,7 +1258,7 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet)
if len(revert) > 0 && futureDebug {
mp.lk.Lock()
- msgs, ts := mp.allPending()
+ msgs, ts := mp.allPending(ctx)
mp.lk.Unlock()
buckets := map[address.Address]*statBucket{}
@@ -1215,7 +1411,7 @@ func (mp *MessagePool) MessagesForBlocks(blks []*types.BlockHeader) ([]*types.Si
if smsg != nil {
out = append(out, smsg)
} else {
- log.Warnf("could not recover signature for bls message %s", msg.Cid())
+ log.Debugf("could not recover signature for bls message %s", msg.Cid())
}
}
}
@@ -1269,7 +1465,7 @@ func (mp *MessagePool) Updates(ctx context.Context) (<-chan api.MpoolUpdate, err
return out, nil
}
-func (mp *MessagePool) loadLocal() error {
+func (mp *MessagePool) loadLocal(ctx context.Context) error {
res, err := mp.localMsgs.Query(query.Query{})
if err != nil {
return xerrors.Errorf("query local messages: %w", err)
@@ -1285,7 +1481,7 @@ func (mp *MessagePool) loadLocal() error {
return xerrors.Errorf("unmarshaling local message: %w", err)
}
- if err := mp.addLoaded(&sm); err != nil {
+ if err := mp.addLoaded(ctx, &sm); err != nil {
if xerrors.Is(err, ErrNonceTooLow) {
continue // todo: drop the message from local cache (if above certain confidence threshold)
}
@@ -1293,47 +1489,61 @@ func (mp *MessagePool) loadLocal() error {
log.Errorf("adding local message: %+v", err)
}
- mp.localAddrs[sm.Message.From] = struct{}{}
+ if err = mp.setLocal(ctx, sm.Message.From); err != nil {
+ log.Debugf("mpoolloadLocal errored: %s", err)
+ return err
+ }
}
return nil
}
-func (mp *MessagePool) Clear(local bool) {
+func (mp *MessagePool) Clear(ctx context.Context, local bool) {
mp.lk.Lock()
defer mp.lk.Unlock()
// remove everything if local is true, including removing local messages from
// the datastore
if local {
- for a := range mp.localAddrs {
- mset, ok := mp.pending[a]
- if !ok {
- continue
+ mp.forEachLocal(ctx, func(ctx context.Context, la address.Address) {
+ mset, ok, err := mp.getPendingMset(ctx, la)
+ if err != nil {
+ log.Warnf("errored while getting pending mset: %w", err)
+ return
}
- for _, m := range mset.msgs {
- err := mp.localMsgs.Delete(datastore.NewKey(string(m.Cid().Bytes())))
- if err != nil {
- log.Warnf("error deleting local message: %s", err)
+ if ok {
+ for _, m := range mset.msgs {
+ err := mp.localMsgs.Delete(datastore.NewKey(string(m.Cid().Bytes())))
+ if err != nil {
+ log.Warnf("error deleting local message: %s", err)
+ }
}
}
- }
+ })
- mp.pending = make(map[address.Address]*msgSet)
+ mp.clearPending()
mp.republished = nil
return
}
- // remove everything except the local messages
- for a := range mp.pending {
- _, isLocal := mp.localAddrs[a]
+ mp.forEachPending(func(a address.Address, ms *msgSet) {
+ isLocal, err := mp.isLocal(ctx, a)
+ if err != nil {
+ log.Warnf("errored while determining isLocal: %w", err)
+ return
+ }
+
if isLocal {
- continue
+ return
}
- delete(mp.pending, a)
- }
+
+ if err = mp.deletePendingMset(ctx, a); err != nil {
+ log.Warnf("errored while deleting mset: %w", err)
+ return
+ }
+ })
}
func getBaseFeeLowerBound(baseFee, factor types.BigInt) types.BigInt {
diff --git a/chain/messagepool/messagepool_test.go b/chain/messagepool/messagepool_test.go
index e31df936c8f..2ea8fdec054 100644
--- a/chain/messagepool/messagepool_test.go
+++ b/chain/messagepool/messagepool_test.go
@@ -14,12 +14,14 @@ import (
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+ "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/mock"
"github.com/filecoin-project/lotus/chain/wallet"
_ "github.com/filecoin-project/lotus/lib/sigs/bls"
_ "github.com/filecoin-project/lotus/lib/sigs/secp"
+ "github.com/stretchr/testify/assert"
)
func init() {
@@ -104,6 +106,10 @@ func (tma *testMpoolAPI) PutMessage(m types.ChainMsg) (cid.Cid, error) {
return cid.Undef, nil
}
+func (tma *testMpoolAPI) IsLite() bool {
+ return false
+}
+
func (tma *testMpoolAPI) PubSubPublish(string, []byte) error {
tma.published++
return nil
@@ -150,7 +156,7 @@ func (tma *testMpoolAPI) GetActorAfter(addr address.Address, ts *types.TipSet) (
}, nil
}
-func (tma *testMpoolAPI) StateAccountKey(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) {
+func (tma *testMpoolAPI) StateAccountKeyAtFinality(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) {
if addr.Protocol() != address.BLS && addr.Protocol() != address.SECP256K1 {
return address.Undef, fmt.Errorf("given address was not a key addr")
}
@@ -199,7 +205,7 @@ func (tma *testMpoolAPI) ChainComputeBaseFee(ctx context.Context, ts *types.TipS
func assertNonce(t *testing.T, mp *MessagePool, addr address.Address, val uint64) {
t.Helper()
- n, err := mp.GetNonce(addr)
+ n, err := mp.GetNonce(context.TODO(), addr, types.EmptyTSK)
if err != nil {
t.Fatal(err)
}
@@ -211,7 +217,7 @@ func assertNonce(t *testing.T, mp *MessagePool, addr address.Address, val uint64
func mustAdd(t *testing.T, mp *MessagePool, msg *types.SignedMessage) {
t.Helper()
- if err := mp.Add(msg); err != nil {
+ if err := mp.Add(context.TODO(), msg); err != nil {
t.Fatal(err)
}
}
@@ -257,6 +263,72 @@ func TestMessagePool(t *testing.T) {
assertNonce(t, mp, sender, 2)
}
+func TestCheckMessageBig(t *testing.T) {
+ tma := newTestMpoolAPI()
+
+ w, err := wallet.NewWallet(wallet.NewMemKeyStore())
+ assert.NoError(t, err)
+
+ from, err := w.WalletNew(context.Background(), types.KTBLS)
+ assert.NoError(t, err)
+
+ tma.setBalance(from, 1000e9)
+
+ ds := datastore.NewMapDatastore()
+
+ mp, err := New(tma, ds, "mptest", nil)
+ assert.NoError(t, err)
+
+ to := mock.Address(1001)
+
+ {
+ msg := &types.Message{
+ To: to,
+ From: from,
+ Value: types.NewInt(1),
+ Nonce: 0,
+ GasLimit: 50000000,
+ GasFeeCap: types.NewInt(100),
+ GasPremium: types.NewInt(1),
+ Params: make([]byte, 41<<10), // 41KiB payload
+ }
+
+ sig, err := w.WalletSign(context.TODO(), from, msg.Cid().Bytes(), api.MsgMeta{})
+ if err != nil {
+ panic(err)
+ }
+ sm := &types.SignedMessage{
+ Message: *msg,
+ Signature: *sig,
+ }
+ mustAdd(t, mp, sm)
+ }
+
+ {
+ msg := &types.Message{
+ To: to,
+ From: from,
+ Value: types.NewInt(1),
+ Nonce: 0,
+ GasLimit: 50000000,
+ GasFeeCap: types.NewInt(100),
+ GasPremium: types.NewInt(1),
+ Params: make([]byte, 64<<10), // 64KiB payload
+ }
+
+ sig, err := w.WalletSign(context.TODO(), from, msg.Cid().Bytes(), api.MsgMeta{})
+ if err != nil {
+ panic(err)
+ }
+ sm := &types.SignedMessage{
+ Message: *msg,
+ Signature: *sig,
+ }
+ err = mp.Add(context.TODO(), sm)
+ assert.ErrorIs(t, err, ErrMessageTooBig)
+ }
+}
+
func TestMessagePoolMessagesInEachBlock(t *testing.T) {
tma := newTestMpoolAPI()
@@ -293,9 +365,9 @@ func TestMessagePoolMessagesInEachBlock(t *testing.T) {
tma.applyBlock(t, a)
tsa := mock.TipSet(a)
- _, _ = mp.Pending()
+ _, _ = mp.Pending(context.TODO())
- selm, _ := mp.SelectMessages(tsa, 1)
+ selm, _ := mp.SelectMessages(context.Background(), tsa, 1)
if len(selm) == 0 {
t.Fatal("should have returned the rest of the messages")
}
@@ -355,7 +427,7 @@ func TestRevertMessages(t *testing.T) {
assertNonce(t, mp, sender, 4)
- p, _ := mp.Pending()
+ p, _ := mp.Pending(context.TODO())
fmt.Printf("%+v\n", p)
if len(p) != 3 {
t.Fatal("expected three messages in mempool")
@@ -396,14 +468,14 @@ func TestPruningSimple(t *testing.T) {
for i := 0; i < 5; i++ {
smsg := mock.MkMessage(sender, target, uint64(i), w)
- if err := mp.Add(smsg); err != nil {
+ if err := mp.Add(context.TODO(), smsg); err != nil {
t.Fatal(err)
}
}
for i := 10; i < 50; i++ {
smsg := mock.MkMessage(sender, target, uint64(i), w)
- if err := mp.Add(smsg); err != nil {
+ if err := mp.Add(context.TODO(), smsg); err != nil {
t.Fatal(err)
}
}
@@ -413,7 +485,7 @@ func TestPruningSimple(t *testing.T) {
mp.Prune()
- msgs, _ := mp.Pending()
+ msgs, _ := mp.Pending(context.TODO())
if len(msgs) != 5 {
t.Fatal("expected only 5 messages in pool, got: ", len(msgs))
}
@@ -455,7 +527,7 @@ func TestLoadLocal(t *testing.T) {
msgs := make(map[cid.Cid]struct{})
for i := 0; i < 10; i++ {
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
- cid, err := mp.Push(m)
+ cid, err := mp.Push(context.TODO(), m)
if err != nil {
t.Fatal(err)
}
@@ -471,7 +543,7 @@ func TestLoadLocal(t *testing.T) {
t.Fatal(err)
}
- pmsgs, _ := mp.Pending()
+ pmsgs, _ := mp.Pending(context.TODO())
if len(msgs) != len(pmsgs) {
t.Fatalf("expected %d messages, but got %d", len(msgs), len(pmsgs))
}
@@ -526,7 +598,7 @@ func TestClearAll(t *testing.T) {
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}]
for i := 0; i < 10; i++ {
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
- _, err := mp.Push(m)
+ _, err := mp.Push(context.TODO(), m)
if err != nil {
t.Fatal(err)
}
@@ -537,9 +609,9 @@ func TestClearAll(t *testing.T) {
mustAdd(t, mp, m)
}
- mp.Clear(true)
+ mp.Clear(context.Background(), true)
- pending, _ := mp.Pending()
+ pending, _ := mp.Pending(context.TODO())
if len(pending) > 0 {
t.Fatalf("cleared the mpool, but got %d pending messages", len(pending))
}
@@ -581,7 +653,7 @@ func TestClearNonLocal(t *testing.T) {
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}]
for i := 0; i < 10; i++ {
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
- _, err := mp.Push(m)
+ _, err := mp.Push(context.TODO(), m)
if err != nil {
t.Fatal(err)
}
@@ -592,9 +664,9 @@ func TestClearNonLocal(t *testing.T) {
mustAdd(t, mp, m)
}
- mp.Clear(false)
+ mp.Clear(context.Background(), false)
- pending, _ := mp.Pending()
+ pending, _ := mp.Pending(context.TODO())
if len(pending) != 10 {
t.Fatalf("expected 10 pending messages, but got %d instead", len(pending))
}
@@ -651,7 +723,7 @@ func TestUpdates(t *testing.T) {
for i := 0; i < 10; i++ {
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
- _, err := mp.Push(m)
+ _, err := mp.Push(context.TODO(), m)
if err != nil {
t.Fatal(err)
}
diff --git a/chain/messagepool/provider.go b/chain/messagepool/provider.go
index 347e90044d5..0f904c52c49 100644
--- a/chain/messagepool/provider.go
+++ b/chain/messagepool/provider.go
@@ -2,39 +2,67 @@ package messagepool
import (
"context"
+ "time"
"github.com/ipfs/go-cid"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/lotus/chain/messagesigner"
"github.com/filecoin-project/lotus/chain/stmgr"
+ "github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
)
+var (
+ HeadChangeCoalesceMinDelay = 2 * time.Second
+ HeadChangeCoalesceMaxDelay = 6 * time.Second
+ HeadChangeCoalesceMergeInterval = time.Second
+)
+
type Provider interface {
SubscribeHeadChanges(func(rev, app []*types.TipSet) error) *types.TipSet
PutMessage(m types.ChainMsg) (cid.Cid, error)
PubSubPublish(string, []byte) error
GetActorAfter(address.Address, *types.TipSet) (*types.Actor, error)
- StateAccountKey(context.Context, address.Address, *types.TipSet) (address.Address, error)
+ StateAccountKeyAtFinality(context.Context, address.Address, *types.TipSet) (address.Address, error)
MessagesForBlock(*types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error)
MessagesForTipset(*types.TipSet) ([]types.ChainMsg, error)
LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error)
ChainComputeBaseFee(ctx context.Context, ts *types.TipSet) (types.BigInt, error)
+ IsLite() bool
}
type mpoolProvider struct {
sm *stmgr.StateManager
ps *pubsub.PubSub
+
+ lite messagesigner.MpoolNonceAPI
}
+var _ Provider = (*mpoolProvider)(nil)
+
func NewProvider(sm *stmgr.StateManager, ps *pubsub.PubSub) Provider {
return &mpoolProvider{sm: sm, ps: ps}
}
+func NewProviderLite(sm *stmgr.StateManager, ps *pubsub.PubSub, noncer messagesigner.MpoolNonceAPI) Provider {
+ return &mpoolProvider{sm: sm, ps: ps, lite: noncer}
+}
+
+func (mpp *mpoolProvider) IsLite() bool {
+ return mpp.lite != nil
+}
+
func (mpp *mpoolProvider) SubscribeHeadChanges(cb func(rev, app []*types.TipSet) error) *types.TipSet {
- mpp.sm.ChainStore().SubscribeHeadChanges(cb)
+ mpp.sm.ChainStore().SubscribeHeadChanges(
+ store.WrapHeadChangeCoalescer(
+ cb,
+ HeadChangeCoalesceMinDelay,
+ HeadChangeCoalesceMaxDelay,
+ HeadChangeCoalesceMergeInterval,
+ ))
return mpp.sm.ChainStore().GetHeaviestTipSet()
}
@@ -47,6 +75,19 @@ func (mpp *mpoolProvider) PubSubPublish(k string, v []byte) error {
}
func (mpp *mpoolProvider) GetActorAfter(addr address.Address, ts *types.TipSet) (*types.Actor, error) {
+ if mpp.IsLite() {
+ n, err := mpp.lite.GetNonce(context.TODO(), addr, ts.Key())
+ if err != nil {
+ return nil, xerrors.Errorf("getting nonce over lite: %w", err)
+ }
+ a, err := mpp.lite.GetActor(context.TODO(), addr, ts.Key())
+ if err != nil {
+ return nil, xerrors.Errorf("getting actor over lite: %w", err)
+ }
+ a.Nonce = n
+ return a, nil
+ }
+
stcid, _, err := mpp.sm.TipSetState(context.TODO(), ts)
if err != nil {
return nil, xerrors.Errorf("computing tipset state for GetActor: %w", err)
@@ -58,8 +99,8 @@ func (mpp *mpoolProvider) GetActorAfter(addr address.Address, ts *types.TipSet)
return st.GetActor(addr)
}
-func (mpp *mpoolProvider) StateAccountKey(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) {
- return mpp.sm.ResolveToKeyAddress(ctx, addr, ts)
+func (mpp *mpoolProvider) StateAccountKeyAtFinality(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) {
+ return mpp.sm.ResolveToKeyAddressAtFinality(ctx, addr, ts)
}
func (mpp *mpoolProvider) MessagesForBlock(h *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) {
diff --git a/chain/messagepool/pruning.go b/chain/messagepool/pruning.go
index d0e53795ad6..c10239b8e4a 100644
--- a/chain/messagepool/pruning.go
+++ b/chain/messagepool/pruning.go
@@ -19,7 +19,8 @@ func (mp *MessagePool) pruneExcessMessages() error {
mp.lk.Lock()
defer mp.lk.Unlock()
- if mp.currentSize < mp.cfg.SizeLimitHigh {
+ mpCfg := mp.getConfig()
+ if mp.currentSize < mpCfg.SizeLimitHigh {
return nil
}
@@ -27,7 +28,7 @@ func (mp *MessagePool) pruneExcessMessages() error {
case <-mp.pruneCooldown:
err := mp.pruneMessages(context.TODO(), ts)
go func() {
- time.Sleep(mp.cfg.PruneCooldown)
+ time.Sleep(mpCfg.PruneCooldown)
mp.pruneCooldown <- struct{}{}
}()
return err
@@ -53,15 +54,21 @@ func (mp *MessagePool) pruneMessages(ctx context.Context, ts *types.TipSet) erro
// protected actors -- not pruned
protected := make(map[address.Address]struct{})
+ mpCfg := mp.getConfig()
// we never prune priority addresses
- for _, actor := range mp.cfg.PriorityAddrs {
- protected[actor] = struct{}{}
+ for _, actor := range mpCfg.PriorityAddrs {
+ pk, err := mp.resolveToKey(ctx, actor)
+ if err != nil {
+ log.Debugf("pruneMessages failed to resolve priority address: %s", err)
+ }
+
+ protected[pk] = struct{}{}
}
// we also never prune locally published messages
- for actor := range mp.localAddrs {
+ mp.forEachLocal(ctx, func(ctx context.Context, actor address.Address) {
protected[actor] = struct{}{}
- }
+ })
// Collect all messages to track which ones to remove and create chains for block inclusion
pruneMsgs := make(map[cid.Cid]*types.SignedMessage, mp.currentSize)
@@ -90,7 +97,7 @@ func (mp *MessagePool) pruneMessages(ctx context.Context, ts *types.TipSet) erro
})
// Keep messages (remove them from pruneMsgs) from chains while we are under the low water mark
- loWaterMark := mp.cfg.SizeLimitLow
+ loWaterMark := mpCfg.SizeLimitLow
keepLoop:
for _, chain := range chains {
for _, m := range chain.msgs {
@@ -106,7 +113,7 @@ keepLoop:
// and remove all messages that are still in pruneMsgs after processing the chains
log.Infof("Pruning %d messages", len(pruneMsgs))
for _, m := range pruneMsgs {
- mp.remove(m.Message.From, m.Message.Nonce, false)
+ mp.remove(ctx, m.Message.From, m.Message.Nonce, false)
}
return nil
diff --git a/chain/messagepool/repub.go b/chain/messagepool/repub.go
index cdd169e1d38..4323bdee197 100644
--- a/chain/messagepool/repub.go
+++ b/chain/messagepool/repub.go
@@ -18,7 +18,7 @@ const repubMsgLimit = 30
var RepublishBatchDelay = 100 * time.Millisecond
-func (mp *MessagePool) republishPendingMessages() error {
+func (mp *MessagePool) republishPendingMessages(ctx context.Context) error {
mp.curTsLk.Lock()
ts := mp.curTs
@@ -32,13 +32,18 @@ func (mp *MessagePool) republishPendingMessages() error {
pending := make(map[address.Address]map[uint64]*types.SignedMessage)
mp.lk.Lock()
mp.republished = nil // clear this to avoid races triggering an early republish
- for actor := range mp.localAddrs {
- mset, ok := mp.pending[actor]
+ mp.forEachLocal(ctx, func(ctx context.Context, actor address.Address) {
+ mset, ok, err := mp.getPendingMset(ctx, actor)
+ if err != nil {
+ log.Debugf("failed to get mset: %w", err)
+ return
+ }
+
if !ok {
- continue
+ return
}
if len(mset.msgs) == 0 {
- continue
+ return
}
// we need to copy this while holding the lock to avoid races with concurrent modification
pend := make(map[uint64]*types.SignedMessage, len(mset.msgs))
@@ -46,7 +51,8 @@ func (mp *MessagePool) republishPendingMessages() error {
pend[nonce] = m
}
pending[actor] = pend
- }
+ })
+
mp.lk.Unlock()
mp.curTsLk.Unlock()
@@ -100,7 +106,7 @@ loop:
// check the baseFee lower bound -- only republish messages that can be included in the chain
// within the next 20 blocks.
for _, m := range chain.msgs {
- if !allowNegativeChains(ts.Height()) && m.Message.GasFeeCap.LessThan(baseFeeLowerBound) {
+ if m.Message.GasFeeCap.LessThan(baseFeeLowerBound) {
chain.Invalidate()
continue loop
}
@@ -115,7 +121,7 @@ loop:
// we can't fit the current chain but there is gas to spare
// trim it and push it down
- chain.Trim(gasLimit, mp, baseFee, true)
+ chain.Trim(gasLimit, mp, baseFee)
for j := i; j < len(chains)-1; j++ {
if chains[j].Before(chains[j+1]) {
break
diff --git a/chain/messagepool/repub_test.go b/chain/messagepool/repub_test.go
index 8da64f97493..580231f7af5 100644
--- a/chain/messagepool/repub_test.go
+++ b/chain/messagepool/repub_test.go
@@ -56,7 +56,7 @@ func TestRepubMessages(t *testing.T) {
for i := 0; i < 10; i++ {
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
- _, err := mp.Push(m)
+ _, err := mp.Push(context.TODO(), m)
if err != nil {
t.Fatal(err)
}
diff --git a/chain/messagepool/selection.go b/chain/messagepool/selection.go
index 4ade92a799c..611ab8e5fcd 100644
--- a/chain/messagepool/selection.go
+++ b/chain/messagepool/selection.go
@@ -10,7 +10,6 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/go-state-types/abi"
tbig "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/build"
@@ -21,11 +20,7 @@ import (
var bigBlockGasLimit = big.NewInt(build.BlockGasLimit)
-// this is *temporary* mutilation until we have implemented uncapped miner penalties -- it will go
-// away in the next fork.
-func allowNegativeChains(epoch abi.ChainEpoch) bool {
- return epoch < build.UpgradeBreezeHeight+5
-}
+var MaxBlockMessages = 16000
const MaxBlocks = 15
@@ -43,7 +38,7 @@ type msgChain struct {
prev *msgChain
}
-func (mp *MessagePool) SelectMessages(ts *types.TipSet, tq float64) ([]*types.SignedMessage, error) {
+func (mp *MessagePool) SelectMessages(ctx context.Context, ts *types.TipSet, tq float64) (msgs []*types.SignedMessage, err error) {
mp.curTsLk.Lock()
defer mp.curTsLk.Unlock()
@@ -54,13 +49,23 @@ func (mp *MessagePool) SelectMessages(ts *types.TipSet, tq float64) ([]*types.Si
// than any other block, then we don't bother with optimal selection because the
// first block will always have higher effective performance
if tq > 0.84 {
- return mp.selectMessagesGreedy(mp.curTs, ts)
+ msgs, err = mp.selectMessagesGreedy(ctx, mp.curTs, ts)
+ } else {
+ msgs, err = mp.selectMessagesOptimal(ctx, mp.curTs, ts, tq)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ if len(msgs) > MaxBlockMessages {
+ msgs = msgs[:MaxBlockMessages]
}
- return mp.selectMessagesOptimal(mp.curTs, ts, tq)
+ return msgs, nil
}
-func (mp *MessagePool) selectMessagesOptimal(curTs, ts *types.TipSet, tq float64) ([]*types.SignedMessage, error) {
+func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *types.TipSet, tq float64) ([]*types.SignedMessage, error) {
start := time.Now()
baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts)
@@ -86,7 +91,7 @@ func (mp *MessagePool) selectMessagesOptimal(curTs, ts *types.TipSet, tq float64
// 0b. Select all priority messages that fit in the block
minGas := int64(gasguess.MinGas)
- result, gasLimit := mp.selectPriorityMessages(pending, baseFee, ts)
+ result, gasLimit := mp.selectPriorityMessages(ctx, pending, baseFee, ts)
// have we filled the block?
if gasLimit < minGas {
@@ -109,7 +114,7 @@ func (mp *MessagePool) selectMessagesOptimal(curTs, ts *types.TipSet, tq float64
return chains[i].Before(chains[j])
})
- if !allowNegativeChains(curTs.Height()) && len(chains) != 0 && chains[0].gasPerf < 0 {
+ if len(chains) != 0 && chains[0].gasPerf < 0 {
log.Warnw("all messages in mpool have non-positive gas performance", "bestGasPerf", chains[0].gasPerf)
return result, nil
}
@@ -162,7 +167,7 @@ func (mp *MessagePool) selectMessagesOptimal(curTs, ts *types.TipSet, tq float64
last := len(chains)
for i, chain := range chains {
// did we run out of performing chains?
- if !allowNegativeChains(curTs.Height()) && chain.gasPerf < 0 {
+ if chain.gasPerf < 0 {
break
}
@@ -228,7 +233,7 @@ tailLoop:
for gasLimit >= minGas && last < len(chains) {
// trim if necessary
if chains[last].gasLimit > gasLimit {
- chains[last].Trim(gasLimit, mp, baseFee, allowNegativeChains(curTs.Height()))
+ chains[last].Trim(gasLimit, mp, baseFee)
}
// push down if it hasn't been invalidated
@@ -254,7 +259,7 @@ tailLoop:
}
// if gasPerf < 0 we have no more profitable chains
- if !allowNegativeChains(curTs.Height()) && chain.gasPerf < 0 {
+ if chain.gasPerf < 0 {
break tailLoop
}
@@ -295,7 +300,7 @@ tailLoop:
}
// dependencies fit, just trim it
- chain.Trim(gasLimit-depGasLimit, mp, baseFee, allowNegativeChains(curTs.Height()))
+ chain.Trim(gasLimit-depGasLimit, mp, baseFee)
last += i
continue tailLoop
}
@@ -328,7 +333,7 @@ tailLoop:
}
// is it negative?
- if !allowNegativeChains(curTs.Height()) && chain.gasPerf < 0 {
+ if chain.gasPerf < 0 {
continue
}
@@ -350,7 +355,7 @@ tailLoop:
// do they fit as is? if it doesn't, trim to make it fit if possible
if chainGasLimit > gasLimit {
- chain.Trim(gasLimit-depGasLimit, mp, baseFee, allowNegativeChains(curTs.Height()))
+ chain.Trim(gasLimit-depGasLimit, mp, baseFee)
if !chain.valid {
continue
@@ -384,7 +389,7 @@ tailLoop:
return result, nil
}
-func (mp *MessagePool) selectMessagesGreedy(curTs, ts *types.TipSet) ([]*types.SignedMessage, error) {
+func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTs, ts *types.TipSet) ([]*types.SignedMessage, error) {
start := time.Now()
baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts)
@@ -410,7 +415,7 @@ func (mp *MessagePool) selectMessagesGreedy(curTs, ts *types.TipSet) ([]*types.S
// 0b. Select all priority messages that fit in the block
minGas := int64(gasguess.MinGas)
- result, gasLimit := mp.selectPriorityMessages(pending, baseFee, ts)
+ result, gasLimit := mp.selectPriorityMessages(ctx, pending, baseFee, ts)
// have we filled the block?
if gasLimit < minGas {
@@ -433,7 +438,7 @@ func (mp *MessagePool) selectMessagesGreedy(curTs, ts *types.TipSet) ([]*types.S
return chains[i].Before(chains[j])
})
- if !allowNegativeChains(curTs.Height()) && len(chains) != 0 && chains[0].gasPerf < 0 {
+ if len(chains) != 0 && chains[0].gasPerf < 0 {
log.Warnw("all messages in mpool have non-positive gas performance", "bestGasPerf", chains[0].gasPerf)
return result, nil
}
@@ -444,7 +449,7 @@ func (mp *MessagePool) selectMessagesGreedy(curTs, ts *types.TipSet) ([]*types.S
last := len(chains)
for i, chain := range chains {
// did we run out of performing chains?
- if !allowNegativeChains(curTs.Height()) && chain.gasPerf < 0 {
+ if chain.gasPerf < 0 {
break
}
@@ -473,7 +478,7 @@ func (mp *MessagePool) selectMessagesGreedy(curTs, ts *types.TipSet) ([]*types.S
tailLoop:
for gasLimit >= minGas && last < len(chains) {
// trim
- chains[last].Trim(gasLimit, mp, baseFee, allowNegativeChains(curTs.Height()))
+ chains[last].Trim(gasLimit, mp, baseFee)
// push down if it hasn't been invalidated
if chains[last].valid {
@@ -493,7 +498,7 @@ tailLoop:
}
// if gasPerf < 0 we have no more profitable chains
- if !allowNegativeChains(curTs.Height()) && chain.gasPerf < 0 {
+ if chain.gasPerf < 0 {
break tailLoop
}
@@ -520,26 +525,32 @@ tailLoop:
return result, nil
}
-func (mp *MessagePool) selectPriorityMessages(pending map[address.Address]map[uint64]*types.SignedMessage, baseFee types.BigInt, ts *types.TipSet) ([]*types.SignedMessage, int64) {
+func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[address.Address]map[uint64]*types.SignedMessage, baseFee types.BigInt, ts *types.TipSet) ([]*types.SignedMessage, int64) {
start := time.Now()
defer func() {
if dt := time.Since(start); dt > time.Millisecond {
log.Infow("select priority messages done", "took", dt)
}
}()
-
- result := make([]*types.SignedMessage, 0, mp.cfg.SizeLimitLow)
+ mpCfg := mp.getConfig()
+ result := make([]*types.SignedMessage, 0, mpCfg.SizeLimitLow)
gasLimit := int64(build.BlockGasLimit)
minGas := int64(gasguess.MinGas)
// 1. Get priority actor chains
var chains []*msgChain
- priority := mp.cfg.PriorityAddrs
+ priority := mpCfg.PriorityAddrs
for _, actor := range priority {
- mset, ok := pending[actor]
+ pk, err := mp.resolveToKey(ctx, actor)
+ if err != nil {
+ log.Debugf("mpooladdlocal failed to resolve sender: %s", err)
+ return nil, gasLimit
+ }
+
+ mset, ok := pending[pk]
if ok {
// remove actor from pending set as we are already processed these messages
- delete(pending, actor)
+ delete(pending, pk)
// create chains for the priority actor
next := mp.createMessageChains(actor, mset, baseFee, ts)
chains = append(chains, next...)
@@ -555,7 +566,7 @@ func (mp *MessagePool) selectPriorityMessages(pending map[address.Address]map[ui
return chains[i].Before(chains[j])
})
- if !allowNegativeChains(ts.Height()) && len(chains) != 0 && chains[0].gasPerf < 0 {
+ if len(chains) != 0 && chains[0].gasPerf < 0 {
log.Warnw("all priority messages in mpool have negative gas performance", "bestGasPerf", chains[0].gasPerf)
return nil, gasLimit
}
@@ -563,7 +574,7 @@ func (mp *MessagePool) selectPriorityMessages(pending map[address.Address]map[ui
// 3. Merge chains until the block limit, as long as they have non-negative gas performance
last := len(chains)
for i, chain := range chains {
- if !allowNegativeChains(ts.Height()) && chain.gasPerf < 0 {
+ if chain.gasPerf < 0 {
break
}
@@ -581,7 +592,7 @@ func (mp *MessagePool) selectPriorityMessages(pending map[address.Address]map[ui
tailLoop:
for gasLimit >= minGas && last < len(chains) {
// trim, discarding negative performing messages
- chains[last].Trim(gasLimit, mp, baseFee, allowNegativeChains(ts.Height()))
+ chains[last].Trim(gasLimit, mp, baseFee)
// push down if it hasn't been invalidated
if chains[last].valid {
@@ -601,7 +612,7 @@ tailLoop:
}
// if gasPerf < 0 we have no more profitable chains
- if !allowNegativeChains(ts.Height()) && chain.gasPerf < 0 {
+ if chain.gasPerf < 0 {
break tailLoop
}
@@ -641,8 +652,7 @@ func (mp *MessagePool) getPendingMessages(curTs, ts *types.TipSet) (map[address.
inSync = true
}
- // first add our current pending messages
- for a, mset := range mp.pending {
+ mp.forEachPending(func(a address.Address, mset *msgSet) {
if inSync {
// no need to copy the map
result[a] = mset.msgs
@@ -655,7 +665,7 @@ func (mp *MessagePool) getPendingMessages(curTs, ts *types.TipSet) (map[address.
result[a] = msetCopy
}
- }
+ })
// we are in sync, that's the happy path
if inSync {
@@ -677,6 +687,10 @@ func (*MessagePool) getGasReward(msg *types.SignedMessage, baseFee types.BigInt)
}
gasReward := tbig.Mul(maxPremium, types.NewInt(uint64(msg.Message.GasLimit)))
+ if gasReward.Sign() == -1 {
+ // penalty multiplier
+ gasReward = tbig.Mul(gasReward, types.NewInt(3))
+ }
return gasReward.Int
}
@@ -710,7 +724,7 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6
// the balance
a, err := mp.api.GetActorAfter(actor, ts)
if err != nil {
- log.Errorf("failed to load actor state, not building chain for %s: %w", actor, err)
+ log.Errorf("failed to load actor state, not building chain for %s: %v", actor, err)
return nil
}
@@ -749,14 +763,11 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6
if balance.Cmp(required) < 0 {
break
}
+
balance = new(big.Int).Sub(balance, required)
value := m.Message.Value.Int
- if balance.Cmp(value) >= 0 {
- // Note: we only account for the value if the balance doesn't drop below 0
- // otherwise the message will fail and the miner can reap the gas rewards
- balance = new(big.Int).Sub(balance, value)
- }
+ balance = new(big.Int).Sub(balance, value)
gasReward := mp.getGasReward(m, baseFee)
rewards = append(rewards, gasReward)
@@ -859,9 +870,9 @@ func (mc *msgChain) Before(other *msgChain) bool {
(mc.gasPerf == other.gasPerf && mc.gasReward.Cmp(other.gasReward) > 0)
}
-func (mc *msgChain) Trim(gasLimit int64, mp *MessagePool, baseFee types.BigInt, allowNegative bool) {
+func (mc *msgChain) Trim(gasLimit int64, mp *MessagePool, baseFee types.BigInt) {
i := len(mc.msgs) - 1
- for i >= 0 && (mc.gasLimit > gasLimit || (!allowNegative && mc.gasPerf < 0)) {
+ for i >= 0 && (mc.gasLimit > gasLimit || mc.gasPerf < 0) {
gasReward := mp.getGasReward(mc.msgs[i], baseFee)
mc.gasReward = new(big.Int).Sub(mc.gasReward, gasReward)
mc.gasLimit -= mc.msgs[i].Message.GasLimit
diff --git a/chain/messagepool/selection_test.go b/chain/messagepool/selection_test.go
index 08cf286c8d8..4634732298f 100644
--- a/chain/messagepool/selection_test.go
+++ b/chain/messagepool/selection_test.go
@@ -16,7 +16,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
- logging "github.com/ipfs/go-log"
+ logging "github.com/ipfs/go-log/v2"
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
@@ -427,7 +427,7 @@ func TestBasicMessageSelection(t *testing.T) {
mustAdd(t, mp, m)
}
- msgs, err := mp.SelectMessages(ts, 1.0)
+ msgs, err := mp.SelectMessages(context.Background(), ts, 1.0)
if err != nil {
t.Fatal(err)
}
@@ -464,7 +464,7 @@ func TestBasicMessageSelection(t *testing.T) {
tma.applyBlock(t, block2)
// we should have no pending messages in the mpool
- pend, _ := mp.Pending()
+ pend, _ := mp.Pending(context.TODO())
if len(pend) != 0 {
t.Fatalf("expected no pending messages, but got %d", len(pend))
}
@@ -495,7 +495,7 @@ func TestBasicMessageSelection(t *testing.T) {
tma.setStateNonce(a1, 10)
tma.setStateNonce(a2, 10)
- msgs, err = mp.SelectMessages(ts3, 1.0)
+ msgs, err = mp.SelectMessages(context.Background(), ts3, 1.0)
if err != nil {
t.Fatal(err)
}
@@ -569,7 +569,7 @@ func TestMessageSelectionTrimming(t *testing.T) {
mustAdd(t, mp, m)
}
- msgs, err := mp.SelectMessages(ts, 1.0)
+ msgs, err := mp.SelectMessages(context.Background(), ts, 1.0)
if err != nil {
t.Fatal(err)
}
@@ -633,7 +633,7 @@ func TestPriorityMessageSelection(t *testing.T) {
mustAdd(t, mp, m)
}
- msgs, err := mp.SelectMessages(ts, 1.0)
+ msgs, err := mp.SelectMessages(context.Background(), ts, 1.0)
if err != nil {
t.Fatal(err)
}
@@ -712,7 +712,7 @@ func TestPriorityMessageSelection2(t *testing.T) {
mustAdd(t, mp, m)
}
- msgs, err := mp.SelectMessages(ts, 1.0)
+ msgs, err := mp.SelectMessages(context.Background(), ts, 1.0)
if err != nil {
t.Fatal(err)
}
@@ -736,8 +736,6 @@ func TestPriorityMessageSelection2(t *testing.T) {
}
func TestPriorityMessageSelection3(t *testing.T) {
- t.Skip("reenable after removing allow negative")
-
mp, tma := makeTestMpool()
// the actors
@@ -784,7 +782,7 @@ func TestPriorityMessageSelection3(t *testing.T) {
}
// test greedy selection
- msgs, err := mp.SelectMessages(ts, 1.0)
+ msgs, err := mp.SelectMessages(context.Background(), ts, 1.0)
if err != nil {
t.Fatal(err)
}
@@ -807,7 +805,7 @@ func TestPriorityMessageSelection3(t *testing.T) {
}
// test optimal selection
- msgs, err = mp.SelectMessages(ts, 0.1)
+ msgs, err = mp.SelectMessages(context.Background(), ts, 0.1)
if err != nil {
t.Fatal(err)
}
@@ -874,7 +872,7 @@ func TestOptimalMessageSelection1(t *testing.T) {
mustAdd(t, mp, m)
}
- msgs, err := mp.SelectMessages(ts, 0.25)
+ msgs, err := mp.SelectMessages(context.Background(), ts, 0.25)
if err != nil {
t.Fatal(err)
}
@@ -943,7 +941,7 @@ func TestOptimalMessageSelection2(t *testing.T) {
mustAdd(t, mp, m)
}
- msgs, err := mp.SelectMessages(ts, 0.1)
+ msgs, err := mp.SelectMessages(context.Background(), ts, 0.1)
if err != nil {
t.Fatal(err)
}
@@ -1022,7 +1020,7 @@ func TestOptimalMessageSelection3(t *testing.T) {
}
}
- msgs, err := mp.SelectMessages(ts, 0.1)
+ msgs, err := mp.SelectMessages(context.Background(), ts, 0.1)
if err != nil {
t.Fatal(err)
}
@@ -1110,7 +1108,7 @@ func testCompetitiveMessageSelection(t *testing.T, rng *rand.Rand, getPremium fu
logging.SetLogLevel("messagepool", "error")
// 1. greedy selection
- greedyMsgs, err := mp.selectMessagesGreedy(ts, ts)
+ greedyMsgs, err := mp.selectMessagesGreedy(context.Background(), ts, ts)
if err != nil {
t.Fatal(err)
}
@@ -1139,7 +1137,7 @@ func testCompetitiveMessageSelection(t *testing.T, rng *rand.Rand, getPremium fu
var bestMsgs []*types.SignedMessage
for j := 0; j < nMiners; j++ {
tq := rng.Float64()
- msgs, err := mp.SelectMessages(ts, tq)
+ msgs, err := mp.SelectMessages(context.Background(), ts, tq)
if err != nil {
t.Fatal(err)
}
@@ -1241,6 +1239,9 @@ func TestCompetitiveMessageSelectionExp(t *testing.T) {
}
func TestCompetitiveMessageSelectionZipf(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping in short mode")
+ }
var capacityBoost, rewardBoost, tqReward float64
seeds := []int64{1947, 1976, 2020, 2100, 10000, 143324, 432432, 131, 32, 45}
for _, seed := range seeds {
@@ -1268,9 +1269,9 @@ func TestGasReward(t *testing.T) {
GasReward int64
}{
{Premium: 100, FeeCap: 200, BaseFee: 100, GasReward: 100},
- {Premium: 100, FeeCap: 200, BaseFee: 210, GasReward: -10},
+ {Premium: 100, FeeCap: 200, BaseFee: 210, GasReward: -10 * 3},
{Premium: 200, FeeCap: 250, BaseFee: 210, GasReward: 40},
- {Premium: 200, FeeCap: 250, BaseFee: 2000, GasReward: -1750},
+ {Premium: 200, FeeCap: 250, BaseFee: 2000, GasReward: -1750 * 3},
}
mp := new(MessagePool)
@@ -1332,7 +1333,7 @@ readLoop:
}
actorMap := make(map[address.Address]address.Address)
- actorWallets := make(map[address.Address]api.WalletAPI)
+ actorWallets := make(map[address.Address]api.Wallet)
for _, m := range msgs {
baseNonce := baseNonces[m.Message.From]
@@ -1395,7 +1396,7 @@ readLoop:
minGasLimit := int64(0.9 * float64(build.BlockGasLimit))
// greedy first
- selected, err := mp.SelectMessages(ts, 1.0)
+ selected, err := mp.SelectMessages(context.Background(), ts, 1.0)
if err != nil {
t.Fatal(err)
}
@@ -1409,7 +1410,7 @@ readLoop:
}
// high quality ticket
- selected, err = mp.SelectMessages(ts, .8)
+ selected, err = mp.SelectMessages(context.Background(), ts, .8)
if err != nil {
t.Fatal(err)
}
@@ -1423,7 +1424,7 @@ readLoop:
}
// mid quality ticket
- selected, err = mp.SelectMessages(ts, .4)
+ selected, err = mp.SelectMessages(context.Background(), ts, .4)
if err != nil {
t.Fatal(err)
}
@@ -1437,7 +1438,7 @@ readLoop:
}
// low quality ticket
- selected, err = mp.SelectMessages(ts, .1)
+ selected, err = mp.SelectMessages(context.Background(), ts, .1)
if err != nil {
t.Fatal(err)
}
@@ -1451,7 +1452,7 @@ readLoop:
}
// very low quality ticket
- selected, err = mp.SelectMessages(ts, .01)
+ selected, err = mp.SelectMessages(context.Background(), ts, .01)
if err != nil {
t.Fatal(err)
}
diff --git a/chain/messagesigner/messagesigner.go b/chain/messagesigner/messagesigner.go
index 9b8f86b6459..063d1aa7d1a 100644
--- a/chain/messagesigner/messagesigner.go
+++ b/chain/messagesigner/messagesigner.go
@@ -23,19 +23,20 @@ const dsKeyActorNonce = "ActorNextNonce"
var log = logging.Logger("messagesigner")
type MpoolNonceAPI interface {
- GetNonce(address.Address) (uint64, error)
+ GetNonce(context.Context, address.Address, types.TipSetKey) (uint64, error)
+ GetActor(context.Context, address.Address, types.TipSetKey) (*types.Actor, error)
}
// MessageSigner keeps track of nonces per address, and increments the nonce
// when signing a message
type MessageSigner struct {
- wallet api.WalletAPI
+ wallet api.Wallet
lk sync.Mutex
mpool MpoolNonceAPI
ds datastore.Batching
}
-func NewMessageSigner(wallet api.WalletAPI, mpool MpoolNonceAPI, ds dtypes.MetadataDS) *MessageSigner {
+func NewMessageSigner(wallet api.Wallet, mpool MpoolNonceAPI, ds dtypes.MetadataDS) *MessageSigner {
ds = namespace.Wrap(ds, datastore.NewKey("/message-signer/"))
return &MessageSigner{
wallet: wallet,
@@ -51,7 +52,7 @@ func (ms *MessageSigner) SignMessage(ctx context.Context, msg *types.Message, cb
defer ms.lk.Unlock()
// Get the next message nonce
- nonce, err := ms.nextNonce(msg.From)
+ nonce, err := ms.nextNonce(ctx, msg.From)
if err != nil {
return nil, xerrors.Errorf("failed to create nonce: %w", err)
}
@@ -92,12 +93,12 @@ func (ms *MessageSigner) SignMessage(ctx context.Context, msg *types.Message, cb
// nextNonce gets the next nonce for the given address.
// If there is no nonce in the datastore, gets the nonce from the message pool.
-func (ms *MessageSigner) nextNonce(addr address.Address) (uint64, error) {
+func (ms *MessageSigner) nextNonce(ctx context.Context, addr address.Address) (uint64, error) {
// Nonces used to be created by the mempool and we need to support nodes
// that have mempool nonces, so first check the mempool for a nonce for
// this address. Note that the mempool returns the actor state's nonce
// by default.
- nonce, err := ms.mpool.GetNonce(addr)
+ nonce, err := ms.mpool.GetNonce(ctx, addr, types.EmptyTSK)
if err != nil {
return 0, xerrors.Errorf("failed to get nonce from mempool: %w", err)
}
diff --git a/chain/messagesigner/messagesigner_test.go b/chain/messagesigner/messagesigner_test.go
index 5eebd36da5e..20d9af38bb1 100644
--- a/chain/messagesigner/messagesigner_test.go
+++ b/chain/messagesigner/messagesigner_test.go
@@ -24,6 +24,8 @@ type mockMpool struct {
nonces map[address.Address]uint64
}
+var _ MpoolNonceAPI = (*mockMpool)(nil)
+
func newMockMpool() *mockMpool {
return &mockMpool{nonces: make(map[address.Address]uint64)}
}
@@ -35,12 +37,15 @@ func (mp *mockMpool) setNonce(addr address.Address, nonce uint64) {
mp.nonces[addr] = nonce
}
-func (mp *mockMpool) GetNonce(addr address.Address) (uint64, error) {
+func (mp *mockMpool) GetNonce(_ context.Context, addr address.Address, _ types.TipSetKey) (uint64, error) {
mp.lk.RLock()
defer mp.lk.RUnlock()
return mp.nonces[addr], nil
}
+func (mp *mockMpool) GetActor(_ context.Context, addr address.Address, _ types.TipSetKey) (*types.Actor, error) {
+ panic("don't use it")
+}
func TestMessageSignerSignMessage(t *testing.T) {
ctx := context.Background()
diff --git a/chain/state/statetree.go b/chain/state/statetree.go
index 7fa55b31c8d..8705aeff81b 100644
--- a/chain/state/statetree.go
+++ b/chain/state/statetree.go
@@ -14,12 +14,17 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/network"
- "github.com/filecoin-project/lotus/chain/actors"
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
cbg "github.com/whyrusleeping/cbor-gen"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/types"
+
+ states0 "github.com/filecoin-project/specs-actors/actors/states"
+ states2 "github.com/filecoin-project/specs-actors/v2/actors/states"
+ states3 "github.com/filecoin-project/specs-actors/v3/actors/states"
+ states4 "github.com/filecoin-project/specs-actors/v4/actors/states"
+ states5 "github.com/filecoin-project/specs-actors/v5/actors/states"
)
var log = logging.Logger("statetree")
@@ -137,21 +142,20 @@ func (ss *stateSnaps) deleteActor(addr address.Address) {
// VersionForNetwork returns the state tree version for the given network
// version.
-func VersionForNetwork(ver network.Version) types.StateTreeVersion {
- if actors.VersionForNetwork(ver) == actors.Version0 {
- return types.StateTreeVersion0
- }
- return types.StateTreeVersion1
-}
-
-func adtForSTVersion(ver types.StateTreeVersion) actors.Version {
+func VersionForNetwork(ver network.Version) (types.StateTreeVersion, error) {
switch ver {
- case types.StateTreeVersion0:
- return actors.Version0
- case types.StateTreeVersion1:
- return actors.Version2
+ case network.Version0, network.Version1, network.Version2, network.Version3:
+ return types.StateTreeVersion0, nil
+ case network.Version4, network.Version5, network.Version6, network.Version7, network.Version8, network.Version9:
+ return types.StateTreeVersion1, nil
+ case network.Version10, network.Version11:
+ return types.StateTreeVersion2, nil
+ case network.Version12:
+ return types.StateTreeVersion3, nil
+ case network.Version13:
+ return types.StateTreeVersion4, nil
default:
- panic("unhandled state tree version")
+ panic(fmt.Sprintf("unsupported network version %d", ver))
}
}
@@ -160,7 +164,7 @@ func NewStateTree(cst cbor.IpldStore, ver types.StateTreeVersion) (*StateTree, e
switch ver {
case types.StateTreeVersion0:
// info is undefined
- case types.StateTreeVersion1:
+ case types.StateTreeVersion1, types.StateTreeVersion2, types.StateTreeVersion3, types.StateTreeVersion4:
var err error
info, err = cst.Put(context.TODO(), new(types.StateInfo0))
if err != nil {
@@ -169,13 +173,46 @@ func NewStateTree(cst cbor.IpldStore, ver types.StateTreeVersion) (*StateTree, e
default:
return nil, xerrors.Errorf("unsupported state tree version: %d", ver)
}
- root, err := adt.NewMap(adt.WrapStore(context.TODO(), cst), adtForSTVersion(ver))
- if err != nil {
- return nil, err
+
+ store := adt.WrapStore(context.TODO(), cst)
+ var hamt adt.Map
+ switch ver {
+ case types.StateTreeVersion0:
+ tree, err := states0.NewTree(store)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to create state tree: %w", err)
+ }
+ hamt = tree.Map
+ case types.StateTreeVersion1:
+ tree, err := states2.NewTree(store)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to create state tree: %w", err)
+ }
+ hamt = tree.Map
+ case types.StateTreeVersion2:
+ tree, err := states3.NewTree(store)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to create state tree: %w", err)
+ }
+ hamt = tree.Map
+ case types.StateTreeVersion3:
+ tree, err := states4.NewTree(store)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to create state tree: %w", err)
+ }
+ hamt = tree.Map
+ case types.StateTreeVersion4:
+ tree, err := states5.NewTree(store)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to create state tree: %w", err)
+ }
+ hamt = tree.Map
+ default:
+ return nil, xerrors.Errorf("unsupported state tree version: %d", ver)
}
s := &StateTree{
- root: root,
+ root: hamt,
info: info,
version: ver,
Store: cst,
@@ -194,30 +231,61 @@ func LoadStateTree(cst cbor.IpldStore, c cid.Cid) (*StateTree, error) {
root.Version = types.StateTreeVersion0
}
+ store := adt.WrapStore(context.TODO(), cst)
+
+ var (
+ hamt adt.Map
+ err error
+ )
switch root.Version {
- case types.StateTreeVersion0, types.StateTreeVersion1:
- // Load the actual state-tree HAMT.
- nd, err := adt.AsMap(
- adt.WrapStore(context.TODO(), cst), root.Actors,
- adtForSTVersion(root.Version),
- )
- if err != nil {
- log.Errorf("loading hamt node %s failed: %s", c, err)
- return nil, err
+ case types.StateTreeVersion0:
+ var tree *states0.Tree
+ tree, err = states0.LoadTree(store, root.Actors)
+ if tree != nil {
+ hamt = tree.Map
}
-
- s := &StateTree{
- root: nd,
- info: root.Info,
- version: root.Version,
- Store: cst,
- snaps: newStateSnaps(),
+ case types.StateTreeVersion1:
+ var tree *states2.Tree
+ tree, err = states2.LoadTree(store, root.Actors)
+ if tree != nil {
+ hamt = tree.Map
+ }
+ case types.StateTreeVersion2:
+ var tree *states3.Tree
+ tree, err = states3.LoadTree(store, root.Actors)
+ if tree != nil {
+ hamt = tree.Map
+ }
+ case types.StateTreeVersion3:
+ var tree *states4.Tree
+ tree, err = states4.LoadTree(store, root.Actors)
+ if tree != nil {
+ hamt = tree.Map
+ }
+ case types.StateTreeVersion4:
+ var tree *states5.Tree
+ tree, err = states5.LoadTree(store, root.Actors)
+ if tree != nil {
+ hamt = tree.Map
}
- s.lookupIDFun = s.lookupIDinternal
- return s, nil
default:
return nil, xerrors.Errorf("unsupported state tree version: %d", root.Version)
}
+ if err != nil {
+ log.Errorf("failed to load state tree: %s", err)
+ return nil, xerrors.Errorf("failed to load state tree: %w", err)
+ }
+
+ s := &StateTree{
+ root: hamt,
+ info: root.Info,
+ version: root.Version,
+ Store: cst,
+ snaps: newStateSnaps(),
+ }
+ s.lookupIDFun = s.lookupIDinternal
+
+ return s, nil
}
func (st *StateTree) SetActor(addr address.Address, act *types.Actor) error {
@@ -436,6 +504,26 @@ func (st *StateTree) MutateActor(addr address.Address, f func(*types.Actor) erro
}
func (st *StateTree) ForEach(f func(address.Address, *types.Actor) error) error {
+ // Walk through layers, if any.
+ seen := make(map[address.Address]struct{})
+ for i := len(st.snaps.layers) - 1; i >= 0; i-- {
+ for addr, op := range st.snaps.layers[i].actors {
+ if _, ok := seen[addr]; ok {
+ continue
+ }
+ seen[addr] = struct{}{}
+ if op.Delete {
+ continue
+ }
+ act := op.Act // copy
+ if err := f(addr, &act); err != nil {
+ return err
+ }
+ }
+
+ }
+
+ // Now walk through the saved actors.
var act types.Actor
return st.root.ForEach(&act, func(k string) error {
act := act // copy
@@ -444,6 +532,12 @@ func (st *StateTree) ForEach(f func(address.Address, *types.Actor) error) error
return xerrors.Errorf("invalid address (%x) found in state tree key: %w", []byte(k), err)
}
+ // no need to record anything here, there are no duplicates in the actors HAMT
+ // iself.
+ if _, ok := seen[addr]; ok {
+ return nil
+ }
+
return f(addr, &act)
})
}
@@ -453,7 +547,7 @@ func (st *StateTree) Version() types.StateTreeVersion {
return st.version
}
-func Diff(oldTree, newTree *StateTree) (map[string]types.Actor, error) {
+func Diff(ctx context.Context, oldTree, newTree *StateTree) (map[string]types.Actor, error) {
out := map[string]types.Actor{}
var (
@@ -461,33 +555,38 @@ func Diff(oldTree, newTree *StateTree) (map[string]types.Actor, error) {
buf = bytes.NewReader(nil)
)
if err := newTree.root.ForEach(&ncval, func(k string) error {
- var act types.Actor
-
- addr, err := address.NewFromBytes([]byte(k))
- if err != nil {
- return xerrors.Errorf("address in state tree was not valid: %w", err)
- }
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ var act types.Actor
+
+ addr, err := address.NewFromBytes([]byte(k))
+ if err != nil {
+ return xerrors.Errorf("address in state tree was not valid: %w", err)
+ }
- found, err := oldTree.root.Get(abi.AddrKey(addr), &ocval)
- if err != nil {
- return err
- }
+ found, err := oldTree.root.Get(abi.AddrKey(addr), &ocval)
+ if err != nil {
+ return err
+ }
- if found && bytes.Equal(ocval.Raw, ncval.Raw) {
- return nil // not changed
- }
+ if found && bytes.Equal(ocval.Raw, ncval.Raw) {
+ return nil // not changed
+ }
- buf.Reset(ncval.Raw)
- err = act.UnmarshalCBOR(buf)
- buf.Reset(nil)
+ buf.Reset(ncval.Raw)
+ err = act.UnmarshalCBOR(buf)
+ buf.Reset(nil)
- if err != nil {
- return err
- }
+ if err != nil {
+ return err
+ }
- out[addr.String()] = act
+ out[addr.String()] = act
- return nil
+ return nil
+ }
}); err != nil {
return nil, err
}
diff --git a/chain/state/statetree_test.go b/chain/state/statetree_test.go
index 91674337b88..9177af31219 100644
--- a/chain/state/statetree_test.go
+++ b/chain/state/statetree_test.go
@@ -5,11 +5,12 @@ import (
"fmt"
"testing"
+ "github.com/filecoin-project/go-state-types/network"
+
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
address "github.com/filecoin-project/go-address"
- "github.com/filecoin-project/go-state-types/network"
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
"github.com/filecoin-project/lotus/build"
@@ -45,7 +46,12 @@ func BenchmarkStateTreeSet(b *testing.B) {
func BenchmarkStateTreeSetFlush(b *testing.B) {
cst := cbor.NewMemCborStore()
- st, err := NewStateTree(cst, VersionForNetwork(build.NewestNetworkVersion))
+ sv, err := VersionForNetwork(build.NewestNetworkVersion)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ st, err := NewStateTree(cst, sv)
if err != nil {
b.Fatal(err)
}
@@ -75,7 +81,12 @@ func BenchmarkStateTreeSetFlush(b *testing.B) {
func TestResolveCache(t *testing.T) {
cst := cbor.NewMemCborStore()
- st, err := NewStateTree(cst, VersionForNetwork(build.NewestNetworkVersion))
+ sv, err := VersionForNetwork(build.NewestNetworkVersion)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ st, err := NewStateTree(cst, sv)
if err != nil {
t.Fatal(err)
}
@@ -172,7 +183,12 @@ func TestResolveCache(t *testing.T) {
func BenchmarkStateTree10kGetActor(b *testing.B) {
cst := cbor.NewMemCborStore()
- st, err := NewStateTree(cst, VersionForNetwork(build.NewestNetworkVersion))
+ sv, err := VersionForNetwork(build.NewestNetworkVersion)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ st, err := NewStateTree(cst, sv)
if err != nil {
b.Fatal(err)
}
@@ -214,7 +230,12 @@ func BenchmarkStateTree10kGetActor(b *testing.B) {
func TestSetCache(t *testing.T) {
cst := cbor.NewMemCborStore()
- st, err := NewStateTree(cst, VersionForNetwork(build.NewestNetworkVersion))
+ sv, err := VersionForNetwork(build.NewestNetworkVersion)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ st, err := NewStateTree(cst, sv)
if err != nil {
t.Fatal(err)
}
@@ -251,7 +272,13 @@ func TestSetCache(t *testing.T) {
func TestSnapshots(t *testing.T) {
ctx := context.Background()
cst := cbor.NewMemCborStore()
- st, err := NewStateTree(cst, VersionForNetwork(build.NewestNetworkVersion))
+
+ sv, err := VersionForNetwork(build.NewestNetworkVersion)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ st, err := NewStateTree(cst, sv)
if err != nil {
t.Fatal(err)
}
@@ -334,8 +361,15 @@ func assertNotHas(t *testing.T, st *StateTree, addr address.Address) {
func TestStateTreeConsistency(t *testing.T) {
cst := cbor.NewMemCborStore()
+
// TODO: ActorUpgrade: this test tests pre actors v2
- st, err := NewStateTree(cst, VersionForNetwork(network.Version3))
+
+ sv, err := VersionForNetwork(network.Version3)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ st, err := NewStateTree(cst, sv)
if err != nil {
t.Fatal(err)
}
diff --git a/chain/stmgr/call.go b/chain/stmgr/call.go
index bb0f0e5ecd1..dc6da0f9c19 100644
--- a/chain/stmgr/call.go
+++ b/chain/stmgr/call.go
@@ -39,27 +39,31 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types.
}
bstate := ts.ParentState()
- bheight := ts.Height()
+ pts, err := sm.cs.LoadTipSet(ts.Parents())
+ if err != nil {
+ return nil, xerrors.Errorf("failed to load parent tipset: %w", err)
+ }
+ pheight := pts.Height()
// If we have to run an expensive migration, and we're not at genesis,
// return an error because the migration will take too long.
//
// We allow this at height 0 for at-genesis migrations (for testing).
- if bheight-1 > 0 && sm.hasExpensiveFork(ctx, bheight-1) {
+ if pheight > 0 && sm.hasExpensiveFork(ctx, pheight) {
return nil, ErrExpensiveFork
}
// Run the (not expensive) migration.
- bstate, err := sm.handleStateForks(ctx, bstate, bheight-1, nil, ts)
+ bstate, err = sm.handleStateForks(ctx, bstate, pheight, nil, ts)
if err != nil {
return nil, fmt.Errorf("failed to handle fork: %w", err)
}
vmopt := &vm.VMOpts{
StateBase: bstate,
- Epoch: bheight,
+ Epoch: pheight + 1,
Rand: store.NewChainRand(sm.cs, ts.Cids()),
- Bstore: sm.cs.Blockstore(),
+ Bstore: sm.cs.StateBlockstore(),
Syscalls: sm.cs.VMSys(),
CircSupplyCalc: sm.GetVMCirculatingSupply,
NtwkVersion: sm.GetNtwkVersion,
@@ -174,7 +178,7 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
StateBase: state,
Epoch: ts.Height() + 1,
Rand: r,
- Bstore: sm.cs.Blockstore(),
+ Bstore: sm.cs.StateBlockstore(),
Syscalls: sm.cs.VMSys(),
CircSupplyCalc: sm.GetVMCirculatingSupply,
NtwkVersion: sm.GetNtwkVersion,
@@ -244,24 +248,18 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
var errHaltExecution = fmt.Errorf("halt")
func (sm *StateManager) Replay(ctx context.Context, ts *types.TipSet, mcid cid.Cid) (*types.Message, *vm.ApplyRet, error) {
- var outm *types.Message
- var outr *vm.ApplyRet
-
- _, _, err := sm.computeTipSetState(ctx, ts, func(c cid.Cid, m *types.Message, ret *vm.ApplyRet) error {
- if c == mcid {
- outm = m
- outr = ret
- return errHaltExecution
- }
- return nil
- })
- if err != nil && err != errHaltExecution {
+ var finder messageFinder
+ // message to find
+ finder.mcid = mcid
+
+ _, _, err := sm.computeTipSetState(ctx, ts, &finder)
+ if err != nil && !xerrors.Is(err, errHaltExecution) {
return nil, nil, xerrors.Errorf("unexpected error during execution: %w", err)
}
- if outr == nil {
+ if finder.outr == nil {
return nil, nil, xerrors.Errorf("given message not found in tipset")
}
- return outm, outr, nil
+ return finder.outm, finder.outr, nil
}
diff --git a/chain/stmgr/forks.go b/chain/stmgr/forks.go
index e089a108467..bb87da44cf4 100644
--- a/chain/stmgr/forks.go
+++ b/chain/stmgr/forks.go
@@ -4,57 +4,127 @@ import (
"bytes"
"context"
"encoding/binary"
- "math"
+ "runtime"
+ "sort"
+ "sync"
+ "time"
- "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/specs-actors/v5/actors/migration/nv13"
+
+ "github.com/filecoin-project/go-state-types/rt"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/network"
- "github.com/ipfs/go-cid"
- cbor "github.com/ipfs/go-ipld-cbor"
- "golang.org/x/xerrors"
-
- builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
- miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
- multisig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
- power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
- adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
-
- "github.com/filecoin-project/specs-actors/actors/migration/nv3"
- m2 "github.com/filecoin-project/specs-actors/v2/actors/migration"
-
+ "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
"github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
- bstore "github.com/filecoin-project/lotus/lib/blockstore"
- "github.com/filecoin-project/lotus/lib/bufbstore"
+ builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
+ miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
+ multisig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
+ power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
+ "github.com/filecoin-project/specs-actors/actors/migration/nv3"
+ adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
+ "github.com/filecoin-project/specs-actors/v2/actors/migration/nv4"
+ "github.com/filecoin-project/specs-actors/v2/actors/migration/nv7"
+ "github.com/filecoin-project/specs-actors/v3/actors/migration/nv10"
+ "github.com/filecoin-project/specs-actors/v4/actors/migration/nv12"
+ "github.com/ipfs/go-cid"
+ cbor "github.com/ipfs/go-ipld-cbor"
+ "golang.org/x/xerrors"
)
-// UpgradeFunc is a migration function run at every upgrade.
+// MigrationCache can be used to cache information used by a migration. This is primarily useful to
+// "pre-compute" some migration state ahead of time, and make it accessible in the migration itself.
+type MigrationCache interface {
+ Write(key string, value cid.Cid) error
+ Read(key string) (bool, cid.Cid, error)
+ Load(key string, loadFunc func() (cid.Cid, error)) (cid.Cid, error)
+}
+
+// MigrationFunc is a migration function run at every upgrade.
//
+// - The cache is a per-upgrade cache, pre-populated by pre-migrations.
// - The oldState is the state produced by the upgrade epoch.
// - The returned newState is the new state that will be used by the next epoch.
// - The height is the upgrade epoch height (already executed).
// - The tipset is the tipset for the last non-null block before the upgrade. Do
// not assume that ts.Height() is the upgrade height.
-type UpgradeFunc func(ctx context.Context, sm *StateManager, cb ExecCallback, oldState cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (newState cid.Cid, err error)
+type MigrationFunc func(
+ ctx context.Context,
+ sm *StateManager, cache MigrationCache,
+ cb ExecMonitor, oldState cid.Cid,
+ height abi.ChainEpoch, ts *types.TipSet,
+) (newState cid.Cid, err error)
+
+// PreMigrationFunc is a function run _before_ a network upgrade to pre-compute part of the network
+// upgrade and speed it up.
+type PreMigrationFunc func(
+ ctx context.Context,
+ sm *StateManager, cache MigrationCache,
+ oldState cid.Cid,
+ height abi.ChainEpoch, ts *types.TipSet,
+) error
+
+// PreMigration describes a pre-migration step to prepare for a network state upgrade. Pre-migrations
+// are optimizations, are not guaranteed to run, and may be canceled and/or run multiple times.
+type PreMigration struct {
+ // PreMigration is the pre-migration function to run at the specified time. This function is
+ // run asynchronously and must abort promptly when canceled.
+ PreMigration PreMigrationFunc
+
+ // StartWithin specifies that this pre-migration should be started at most StartWithin
+ // epochs before the upgrade.
+ StartWithin abi.ChainEpoch
+
+ // DontStartWithin specifies that this pre-migration should not be started DontStartWithin
+ // epochs before the final upgrade epoch.
+ //
+ // This should be set such that the pre-migration is likely to complete before StopWithin.
+ DontStartWithin abi.ChainEpoch
+
+ // StopWithin specifies that this pre-migration should be stopped StopWithin epochs of the
+ // final upgrade epoch.
+ StopWithin abi.ChainEpoch
+}
type Upgrade struct {
Height abi.ChainEpoch
Network network.Version
Expensive bool
- Migration UpgradeFunc
+ Migration MigrationFunc
+
+ // PreMigrations specifies a set of pre-migration functions to run at the indicated epochs.
+ // These functions should fill the given cache with information that can speed up the
+ // eventual full migration at the upgrade epoch.
+ PreMigrations []PreMigration
}
type UpgradeSchedule []Upgrade
+type migrationLogger struct{}
+
+func (ml migrationLogger) Log(level rt.LogLevel, msg string, args ...interface{}) {
+ switch level {
+ case rt.DEBUG:
+ log.Debugf(msg, args...)
+ case rt.INFO:
+ log.Infof(msg, args...)
+ case rt.WARN:
+ log.Warnf(msg, args...)
+ case rt.ERROR:
+ log.Errorf(msg, args...)
+ }
+}
+
func DefaultUpgradeSchedule() UpgradeSchedule {
var us UpgradeSchedule
@@ -75,13 +145,14 @@ func DefaultUpgradeSchedule() UpgradeSchedule {
Network: network.Version3,
Migration: UpgradeRefuel,
}, {
- Height: build.UpgradeActorsV2Height,
+ Height: build.UpgradeAssemblyHeight,
Network: network.Version4,
Expensive: true,
Migration: UpgradeActorsV2,
}, {
- Height: build.UpgradeTapeHeight,
- Network: network.Version5,
+ Height: build.UpgradeTapeHeight,
+ Network: network.Version5,
+ Migration: nil,
}, {
Height: build.UpgradeLiftoffHeight,
Network: network.Version5,
@@ -90,31 +161,70 @@ func DefaultUpgradeSchedule() UpgradeSchedule {
Height: build.UpgradeKumquatHeight,
Network: network.Version6,
Migration: nil,
- }}
-
- if build.UpgradeActorsV2Height == math.MaxInt64 { // disable actors upgrade
- updates = []Upgrade{{
- Height: build.UpgradeBreezeHeight,
- Network: network.Version1,
- Migration: UpgradeFaucetBurnRecovery,
- }, {
- Height: build.UpgradeSmokeHeight,
- Network: network.Version2,
- Migration: nil,
+ }, {
+ Height: build.UpgradeCalicoHeight,
+ Network: network.Version7,
+ Migration: UpgradeCalico,
+ }, {
+ Height: build.UpgradePersianHeight,
+ Network: network.Version8,
+ Migration: nil,
+ }, {
+ Height: build.UpgradeOrangeHeight,
+ Network: network.Version9,
+ Migration: nil,
+ }, {
+ Height: build.UpgradeTrustHeight,
+ Network: network.Version10,
+ Migration: UpgradeActorsV3,
+ PreMigrations: []PreMigration{{
+ PreMigration: PreUpgradeActorsV3,
+ StartWithin: 120,
+ DontStartWithin: 60,
+ StopWithin: 35,
}, {
- Height: build.UpgradeIgnitionHeight,
- Network: network.Version3,
- Migration: UpgradeIgnition,
+ PreMigration: PreUpgradeActorsV3,
+ StartWithin: 30,
+ DontStartWithin: 15,
+ StopWithin: 5,
+ }},
+ Expensive: true,
+ }, {
+ Height: build.UpgradeNorwegianHeight,
+ Network: network.Version11,
+ Migration: nil,
+ }, {
+ Height: build.UpgradeTurboHeight,
+ Network: network.Version12,
+ Migration: UpgradeActorsV4,
+ PreMigrations: []PreMigration{{
+ PreMigration: PreUpgradeActorsV4,
+ StartWithin: 120,
+ DontStartWithin: 60,
+ StopWithin: 35,
}, {
- Height: build.UpgradeRefuelHeight,
- Network: network.Version3,
- Migration: UpgradeRefuel,
+ PreMigration: PreUpgradeActorsV4,
+ StartWithin: 30,
+ DontStartWithin: 15,
+ StopWithin: 5,
+ }},
+ Expensive: true,
+ }, {
+ Height: build.UpgradeHyperdriveHeight,
+ Network: network.Version13,
+ Migration: UpgradeActorsV5,
+ PreMigrations: []PreMigration{{
+ PreMigration: PreUpgradeActorsV5,
+ StartWithin: 120,
+ DontStartWithin: 60,
+ StopWithin: 35,
}, {
- Height: build.UpgradeLiftoffHeight,
- Network: network.Version3,
- Migration: UpgradeLiftoff,
- }}
- }
+ PreMigration: PreUpgradeActorsV5,
+ StartWithin: 30,
+ DontStartWithin: 15,
+ StopWithin: 5,
+ }},
+ Expensive: true}}
for _, u := range updates {
if u.Height < 0 {
@@ -127,14 +237,43 @@ func DefaultUpgradeSchedule() UpgradeSchedule {
}
func (us UpgradeSchedule) Validate() error {
- // Make sure we're not trying to upgrade to version 0.
+ // Make sure each upgrade is valid.
for _, u := range us {
if u.Network <= 0 {
return xerrors.Errorf("cannot upgrade to version <= 0: %d", u.Network)
}
+
+ for _, m := range u.PreMigrations {
+ if m.StartWithin <= 0 {
+ return xerrors.Errorf("pre-migration must specify a positive start-within epoch")
+ }
+
+ if m.DontStartWithin < 0 || m.StopWithin < 0 {
+ return xerrors.Errorf("pre-migration must specify non-negative epochs")
+ }
+
+ if m.StartWithin <= m.StopWithin {
+ return xerrors.Errorf("pre-migration start-within must come before stop-within")
+ }
+
+ // If we have a dont-start-within.
+ if m.DontStartWithin != 0 {
+ if m.DontStartWithin < m.StopWithin {
+ return xerrors.Errorf("pre-migration dont-start-within must come before stop-within")
+ }
+ if m.StartWithin <= m.DontStartWithin {
+ return xerrors.Errorf("pre-migration start-within must come after dont-start-within")
+ }
+ }
+ }
+ if !sort.SliceIsSorted(u.PreMigrations, func(i, j int) bool {
+ return u.PreMigrations[i].StartWithin > u.PreMigrations[j].StartWithin //nolint:scopelint,gosec
+ }) {
+ return xerrors.Errorf("pre-migrations must be sorted by start epoch")
+ }
}
- // Make sure all the upgrades make sense.
+ // Make sure the upgrade order makes sense.
for i := 1; i < len(us); i++ {
prev := &us[i-1]
curr := &us[i]
@@ -153,15 +292,31 @@ func (us UpgradeSchedule) Validate() error {
return nil
}
-func (sm *StateManager) handleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, cb ExecCallback, ts *types.TipSet) (cid.Cid, error) {
+func (sm *StateManager) handleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, cb ExecMonitor, ts *types.TipSet) (cid.Cid, error) {
retCid := root
var err error
- f, ok := sm.stateMigrations[height]
- if ok {
- retCid, err = f(ctx, sm, cb, root, height, ts)
+ u := sm.stateMigrations[height]
+ if u != nil && u.upgrade != nil {
+ startTime := time.Now()
+ log.Warnw("STARTING migration", "height", height, "from", root)
+ // Yes, we clone the cache, even for the final upgrade epoch. Why? Reverts. We may
+ // have to migrate multiple times.
+ tmpCache := u.cache.Clone()
+ retCid, err = u.upgrade(ctx, sm, tmpCache, cb, root, height, ts)
if err != nil {
+ log.Errorw("FAILED migration", "height", height, "from", root, "error", err)
return cid.Undef, err
}
+ // Yes, we update the cache, even for the final upgrade epoch. Why? Reverts. This
+ // can save us a _lot_ of time because very few actors will have changed if we
+ // do a small revert then need to re-run the migration.
+ u.cache.Update(tmpCache)
+ log.Warnw("COMPLETED migration",
+ "height", height,
+ "from", root,
+ "to", retCid,
+ "duration", time.Since(startTime),
+ )
}
return retCid, nil
@@ -172,6 +327,109 @@ func (sm *StateManager) hasExpensiveFork(ctx context.Context, height abi.ChainEp
return ok
}
+func runPreMigration(ctx context.Context, sm *StateManager, fn PreMigrationFunc, cache *nv10.MemMigrationCache, ts *types.TipSet) {
+ height := ts.Height()
+ parent := ts.ParentState()
+
+ startTime := time.Now()
+
+ log.Warn("STARTING pre-migration")
+ // Clone the cache so we don't actually _update_ it
+ // till we're done. Otherwise, if we fail, the next
+ // migration to use the cache may assume that
+ // certain blocks exist, even if they don't.
+ tmpCache := cache.Clone()
+ err := fn(ctx, sm, tmpCache, parent, height, ts)
+ if err != nil {
+ log.Errorw("FAILED pre-migration", "error", err)
+ return
+ }
+ // Finally, if everything worked, update the cache.
+ cache.Update(tmpCache)
+ log.Warnw("COMPLETED pre-migration", "duration", time.Since(startTime))
+}
+
+func (sm *StateManager) preMigrationWorker(ctx context.Context) {
+ defer close(sm.shutdown)
+
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ type op struct {
+ after abi.ChainEpoch
+ notAfter abi.ChainEpoch
+ run func(ts *types.TipSet)
+ }
+
+ var wg sync.WaitGroup
+ defer wg.Wait()
+
+ // Turn each pre-migration into an operation in a schedule.
+ var schedule []op
+ for upgradeEpoch, migration := range sm.stateMigrations {
+ cache := migration.cache
+ for _, prem := range migration.preMigrations {
+ preCtx, preCancel := context.WithCancel(ctx)
+ migrationFunc := prem.PreMigration
+
+ afterEpoch := upgradeEpoch - prem.StartWithin
+ notAfterEpoch := upgradeEpoch - prem.DontStartWithin
+ stopEpoch := upgradeEpoch - prem.StopWithin
+ // We can't start after we stop.
+ if notAfterEpoch > stopEpoch {
+ notAfterEpoch = stopEpoch - 1
+ }
+
+ // Add an op to start a pre-migration.
+ schedule = append(schedule, op{
+ after: afterEpoch,
+ notAfter: notAfterEpoch,
+
+ // TODO: are these values correct?
+ run: func(ts *types.TipSet) {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ runPreMigration(preCtx, sm, migrationFunc, cache, ts)
+ }()
+ },
+ })
+
+ // Add an op to cancel the pre-migration if it's still running.
+ schedule = append(schedule, op{
+ after: stopEpoch,
+ notAfter: -1,
+ run: func(ts *types.TipSet) { preCancel() },
+ })
+ }
+ }
+
+ // Then sort by epoch.
+ sort.Slice(schedule, func(i, j int) bool {
+ return schedule[i].after < schedule[j].after
+ })
+
+ // Finally, when the head changes, see if there's anything we need to do.
+ //
+ // We're intentionally ignoring reorgs as they don't matter for our purposes.
+ for change := range sm.cs.SubHeadChanges(ctx) {
+ for _, head := range change {
+ for len(schedule) > 0 {
+ op := &schedule[0]
+ if head.Val.Height() < op.after {
+ break
+ }
+
+ // If we haven't passed the pre-migration height...
+ if op.notAfter < 0 || head.Val.Height() < op.notAfter {
+ op.run(head.Val)
+ }
+ schedule = schedule[1:]
+ }
+ }
+ }
+}
+
func doTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmount, cb func(trace types.ExecutionTrace)) error {
fromAct, err := tree.GetActor(from)
if err != nil {
@@ -201,20 +459,9 @@ func doTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmo
if cb != nil {
// record the transfer in execution traces
- fakeMsg := &types.Message{
- From: from,
- To: to,
- Value: amt,
- }
- fakeRct := &types.MessageReceipt{
- ExitCode: 0,
- Return: nil,
- GasUsed: 0,
- }
-
cb(types.ExecutionTrace{
- Msg: fakeMsg,
- MsgRct: fakeRct,
+ Msg: makeFakeMsg(from, to, amt, 0),
+ MsgRct: makeFakeRct(),
Error: "",
Duration: 0,
GasCharges: nil,
@@ -225,7 +472,7 @@ func doTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmo
return nil
}
-func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
+func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ MigrationCache, em ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
// Some initial parameters
FundsForMiners := types.FromFil(1_000_000)
LookbackEpoch := abi.ChainEpoch(32000)
@@ -295,7 +542,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCal
}
case builtin0.StorageMinerActorCodeID:
var st miner0.State
- if err := sm.ChainStore().Store(ctx).Get(ctx, act.Head, &st); err != nil {
+ if err := sm.ChainStore().ActorStore(ctx).Get(ctx, act.Head, &st); err != nil {
return xerrors.Errorf("failed to load miner state: %w", err)
}
@@ -339,7 +586,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCal
return cid.Undef, xerrors.Errorf("failed to load power actor: %w", err)
}
- cst := cbor.NewCborStore(sm.ChainStore().Blockstore())
+ cst := cbor.NewCborStore(sm.ChainStore().StateBlockstore())
if err := cst.Get(ctx, powAct.Head, &ps); err != nil {
return cid.Undef, xerrors.Errorf("failed to get power actor state: %w", err)
}
@@ -373,7 +620,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCal
}
case builtin0.StorageMinerActorCodeID:
var st miner0.State
- if err := sm.ChainStore().Store(ctx).Get(ctx, act.Head, &st); err != nil {
+ if err := sm.ChainStore().ActorStore(ctx).Get(ctx, act.Head, &st); err != nil {
return xerrors.Errorf("failed to load miner state: %w", err)
}
@@ -382,7 +629,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCal
return xerrors.Errorf("failed to get miner info: %w", err)
}
- sectorsArr, err := adt0.AsArray(sm.ChainStore().Store(ctx), st.Sectors)
+ sectorsArr, err := adt0.AsArray(sm.ChainStore().ActorStore(ctx), st.Sectors)
if err != nil {
return xerrors.Errorf("failed to load sectors array: %w", err)
}
@@ -402,11 +649,11 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCal
lbact, err := lbtree.GetActor(addr)
if err == nil {
var lbst miner0.State
- if err := sm.ChainStore().Store(ctx).Get(ctx, lbact.Head, &lbst); err != nil {
+ if err := sm.ChainStore().ActorStore(ctx).Get(ctx, lbact.Head, &lbst); err != nil {
return xerrors.Errorf("failed to load miner state: %w", err)
}
- lbsectors, err := adt0.AsArray(sm.ChainStore().Store(ctx), lbst.Sectors)
+ lbsectors, err := adt0.AsArray(sm.ChainStore().ActorStore(ctx), lbst.Sectors)
if err != nil {
return xerrors.Errorf("failed to load lb sectors array: %w", err)
}
@@ -475,27 +722,17 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCal
return cid.Undef, xerrors.Errorf("resultant state tree account balance was not correct: %s", total)
}
- if cb != nil {
+ if em != nil {
// record the transfer in execution traces
- fakeMsg := &types.Message{
- From: builtin.SystemActorAddr,
- To: builtin.SystemActorAddr,
- Value: big.Zero(),
- Nonce: uint64(epoch),
- }
- fakeRct := &types.MessageReceipt{
- ExitCode: 0,
- Return: nil,
- GasUsed: 0,
- }
+ fakeMsg := makeFakeMsg(builtin.SystemActorAddr, builtin.SystemActorAddr, big.Zero(), uint64(epoch))
- if err := cb(fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
- MessageReceipt: *fakeRct,
+ if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
+ MessageReceipt: *makeFakeRct(),
ActorErr: nil,
ExecutionTrace: types.ExecutionTrace{
Msg: fakeMsg,
- MsgRct: fakeRct,
+ MsgRct: makeFakeRct(),
Error: "",
Duration: 0,
GasCharges: nil,
@@ -503,7 +740,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCal
},
Duration: 0,
GasCosts: nil,
- }); err != nil {
+ }, false); err != nil {
return cid.Undef, xerrors.Errorf("recording transfers: %w", err)
}
}
@@ -511,8 +748,8 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCal
return tree.Flush(ctx)
}
-func UpgradeIgnition(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
- store := sm.cs.Store(ctx)
+func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
+ store := sm.cs.ActorStore(ctx)
if build.UpgradeLiftoffHeight <= epoch {
return cid.Undef, xerrors.Errorf("liftoff height must be beyond ignition height")
@@ -548,12 +785,12 @@ func UpgradeIgnition(ctx context.Context, sm *StateManager, cb ExecCallback, roo
return cid.Undef, xerrors.Errorf("resetting genesis msig start epochs: %w", err)
}
- err = splitGenesisMultisig0(ctx, cb, split1, store, tree, 50, epoch)
+ err = splitGenesisMultisig0(ctx, cb, split1, store, tree, 50, epoch, ts)
if err != nil {
return cid.Undef, xerrors.Errorf("splitting first msig: %w", err)
}
- err = splitGenesisMultisig0(ctx, cb, split2, store, tree, 50, epoch)
+ err = splitGenesisMultisig0(ctx, cb, split2, store, tree, 50, epoch, ts)
if err != nil {
return cid.Undef, xerrors.Errorf("splitting second msig: %w", err)
}
@@ -566,9 +803,9 @@ func UpgradeIgnition(ctx context.Context, sm *StateManager, cb ExecCallback, roo
return tree.Flush(ctx)
}
-func UpgradeRefuel(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
+func UpgradeRefuel(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
- store := sm.cs.Store(ctx)
+ store := sm.cs.ActorStore(ctx)
tree, err := sm.StateTree(root)
if err != nil {
return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
@@ -592,8 +829,8 @@ func UpgradeRefuel(ctx context.Context, sm *StateManager, cb ExecCallback, root
return tree.Flush(ctx)
}
-func UpgradeActorsV2(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
- buf := bufbstore.NewTieredBstore(sm.cs.Blockstore(), bstore.NewTemporarySync())
+func UpgradeActorsV2(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
+ buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync())
store := store.ActorStore(ctx, buf)
info, err := store.Put(ctx, new(types.StateInfo0))
@@ -601,7 +838,7 @@ func UpgradeActorsV2(ctx context.Context, sm *StateManager, cb ExecCallback, roo
return cid.Undef, xerrors.Errorf("failed to create new state info for actors v2: %w", err)
}
- newHamtRoot, err := m2.MigrateStateTree(ctx, store, root, epoch, m2.DefaultConfig())
+ newHamtRoot, err := nv4.MigrateStateTree(ctx, store, root, epoch, nv4.DefaultConfig())
if err != nil {
return cid.Undef, xerrors.Errorf("upgrading to actors v2: %w", err)
}
@@ -638,13 +875,13 @@ func UpgradeActorsV2(ctx context.Context, sm *StateManager, cb ExecCallback, roo
return newRoot, nil
}
-func UpgradeLiftoff(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
+func UpgradeLiftoff(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
tree, err := sm.StateTree(root)
if err != nil {
return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
}
- err = setNetworkName(ctx, sm.cs.Store(ctx), tree, "mainnet")
+ err = setNetworkName(ctx, sm.cs.ActorStore(ctx), tree, "mainnet")
if err != nil {
return cid.Undef, xerrors.Errorf("setting network name: %w", err)
}
@@ -652,6 +889,386 @@ func UpgradeLiftoff(ctx context.Context, sm *StateManager, cb ExecCallback, root
return tree.Flush(ctx)
}
+func UpgradeCalico(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
+ if build.BuildType != build.BuildMainnet {
+ return root, nil
+ }
+
+ store := sm.cs.ActorStore(ctx)
+ var stateRoot types.StateRoot
+ if err := store.Get(ctx, root, &stateRoot); err != nil {
+ return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
+ }
+
+ if stateRoot.Version != types.StateTreeVersion1 {
+ return cid.Undef, xerrors.Errorf(
+ "expected state root version 1 for calico upgrade, got %d",
+ stateRoot.Version,
+ )
+ }
+
+ newHamtRoot, err := nv7.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, nv7.DefaultConfig())
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("running nv7 migration: %w", err)
+ }
+
+ newRoot, err := store.Put(ctx, &types.StateRoot{
+ Version: stateRoot.Version,
+ Actors: newHamtRoot,
+ Info: stateRoot.Info,
+ })
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
+ }
+
+ // perform some basic sanity checks to make sure everything still works.
+ if newSm, err := state.LoadStateTree(store, newRoot); err != nil {
+ return cid.Undef, xerrors.Errorf("state tree sanity load failed: %w", err)
+ } else if newRoot2, err := newSm.Flush(ctx); err != nil {
+ return cid.Undef, xerrors.Errorf("state tree sanity flush failed: %w", err)
+ } else if newRoot2 != newRoot {
+ return cid.Undef, xerrors.Errorf("state-root mismatch: %s != %s", newRoot, newRoot2)
+ } else if _, err := newSm.GetActor(builtin0.InitActorAddr); err != nil {
+ return cid.Undef, xerrors.Errorf("failed to load init actor after upgrade: %w", err)
+ }
+
+ return newRoot, nil
+}
+
+func terminateActor(ctx context.Context, tree *state.StateTree, addr address.Address, em ExecMonitor, epoch abi.ChainEpoch, ts *types.TipSet) error {
+ a, err := tree.GetActor(addr)
+ if xerrors.Is(err, types.ErrActorNotFound) {
+ return types.ErrActorNotFound
+ } else if err != nil {
+ return xerrors.Errorf("failed to get actor to delete: %w", err)
+ }
+
+ var trace types.ExecutionTrace
+ if err := doTransfer(tree, addr, builtin.BurntFundsActorAddr, a.Balance, func(t types.ExecutionTrace) {
+ trace = t
+ }); err != nil {
+ return xerrors.Errorf("transferring terminated actor's balance: %w", err)
+ }
+
+ if em != nil {
+ // record the transfer in execution traces
+
+ fakeMsg := makeFakeMsg(builtin.SystemActorAddr, addr, big.Zero(), uint64(epoch))
+
+ if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
+ MessageReceipt: *makeFakeRct(),
+ ActorErr: nil,
+ ExecutionTrace: trace,
+ Duration: 0,
+ GasCosts: nil,
+ }, false); err != nil {
+ return xerrors.Errorf("recording transfers: %w", err)
+ }
+ }
+
+ err = tree.DeleteActor(addr)
+ if err != nil {
+ return xerrors.Errorf("deleting actor from tree: %w", err)
+ }
+
+ ia, err := tree.GetActor(init_.Address)
+ if err != nil {
+ return xerrors.Errorf("loading init actor: %w", err)
+ }
+
+ ias, err := init_.Load(&state.AdtStore{IpldStore: tree.Store}, ia)
+ if err != nil {
+ return xerrors.Errorf("loading init actor state: %w", err)
+ }
+
+ if err := ias.Remove(addr); err != nil {
+ return xerrors.Errorf("deleting entry from address map: %w", err)
+ }
+
+ nih, err := tree.Store.Put(ctx, ias)
+ if err != nil {
+ return xerrors.Errorf("writing new init actor state: %w", err)
+ }
+
+ ia.Head = nih
+
+ return tree.SetActor(init_.Address, ia)
+}
+
+func UpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
+ // Use all the CPUs except 3.
+ workerCount := runtime.NumCPU() - 3
+ if workerCount <= 0 {
+ workerCount = 1
+ }
+
+ config := nv10.Config{
+ MaxWorkers: uint(workerCount),
+ JobQueueSize: 1000,
+ ResultQueueSize: 100,
+ ProgressLogPeriod: 10 * time.Second,
+ }
+ newRoot, err := upgradeActorsV3Common(ctx, sm, cache, root, epoch, ts, config)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("migrating actors v3 state: %w", err)
+ }
+
+ tree, err := sm.StateTree(newRoot)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
+ }
+
+ if build.BuildType == build.BuildMainnet {
+ err := terminateActor(ctx, tree, build.ZeroAddress, cb, epoch, ts)
+ if err != nil && !xerrors.Is(err, types.ErrActorNotFound) {
+ return cid.Undef, xerrors.Errorf("deleting zero bls actor: %w", err)
+ }
+
+ newRoot, err = tree.Flush(ctx)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("flushing state tree: %w", err)
+ }
+ }
+
+ return newRoot, nil
+}
+
+func PreUpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
+ // Use half the CPUs for pre-migration, but leave at least 3.
+ workerCount := runtime.NumCPU()
+ if workerCount <= 4 {
+ workerCount = 1
+ } else {
+ workerCount /= 2
+ }
+ config := nv10.Config{MaxWorkers: uint(workerCount)}
+ _, err := upgradeActorsV3Common(ctx, sm, cache, root, epoch, ts, config)
+ return err
+}
+
+func upgradeActorsV3Common(
+ ctx context.Context, sm *StateManager, cache MigrationCache,
+ root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
+ config nv10.Config,
+) (cid.Cid, error) {
+ buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync())
+ store := store.ActorStore(ctx, buf)
+
+ // Load the state root.
+ var stateRoot types.StateRoot
+ if err := store.Get(ctx, root, &stateRoot); err != nil {
+ return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
+ }
+
+ if stateRoot.Version != types.StateTreeVersion1 {
+ return cid.Undef, xerrors.Errorf(
+ "expected state root version 1 for actors v3 upgrade, got %d",
+ stateRoot.Version,
+ )
+ }
+
+ // Perform the migration
+ newHamtRoot, err := nv10.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("upgrading to actors v3: %w", err)
+ }
+
+ // Persist the result.
+ newRoot, err := store.Put(ctx, &types.StateRoot{
+ Version: types.StateTreeVersion2,
+ Actors: newHamtRoot,
+ Info: stateRoot.Info,
+ })
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
+ }
+
+ // Persist the new tree.
+
+ {
+ from := buf
+ to := buf.Read()
+
+ if err := vm.Copy(ctx, from, to, newRoot); err != nil {
+ return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err)
+ }
+ }
+
+ return newRoot, nil
+}
+
+func UpgradeActorsV4(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
+ // Use all the CPUs except 3.
+ workerCount := runtime.NumCPU() - 3
+ if workerCount <= 0 {
+ workerCount = 1
+ }
+
+ config := nv12.Config{
+ MaxWorkers: uint(workerCount),
+ JobQueueSize: 1000,
+ ResultQueueSize: 100,
+ ProgressLogPeriod: 10 * time.Second,
+ }
+
+ newRoot, err := upgradeActorsV4Common(ctx, sm, cache, root, epoch, ts, config)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("migrating actors v4 state: %w", err)
+ }
+
+ return newRoot, nil
+}
+
+func PreUpgradeActorsV4(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
+ // Use half the CPUs for pre-migration, but leave at least 3.
+ workerCount := runtime.NumCPU()
+ if workerCount <= 4 {
+ workerCount = 1
+ } else {
+ workerCount /= 2
+ }
+ config := nv12.Config{MaxWorkers: uint(workerCount)}
+ _, err := upgradeActorsV4Common(ctx, sm, cache, root, epoch, ts, config)
+ return err
+}
+
+func upgradeActorsV4Common(
+ ctx context.Context, sm *StateManager, cache MigrationCache,
+ root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
+ config nv12.Config,
+) (cid.Cid, error) {
+ buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync())
+ store := store.ActorStore(ctx, buf)
+
+ // Load the state root.
+ var stateRoot types.StateRoot
+ if err := store.Get(ctx, root, &stateRoot); err != nil {
+ return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
+ }
+
+ if stateRoot.Version != types.StateTreeVersion2 {
+ return cid.Undef, xerrors.Errorf(
+ "expected state root version 2 for actors v4 upgrade, got %d",
+ stateRoot.Version,
+ )
+ }
+
+ // Perform the migration
+ newHamtRoot, err := nv12.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("upgrading to actors v4: %w", err)
+ }
+
+ // Persist the result.
+ newRoot, err := store.Put(ctx, &types.StateRoot{
+ Version: types.StateTreeVersion3,
+ Actors: newHamtRoot,
+ Info: stateRoot.Info,
+ })
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
+ }
+
+ // Persist the new tree.
+
+ {
+ from := buf
+ to := buf.Read()
+
+ if err := vm.Copy(ctx, from, to, newRoot); err != nil {
+ return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err)
+ }
+ }
+
+ return newRoot, nil
+}
+
+func UpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
+ // Use all the CPUs except 3.
+ workerCount := runtime.NumCPU() - 3
+ if workerCount <= 0 {
+ workerCount = 1
+ }
+
+ config := nv13.Config{
+ MaxWorkers: uint(workerCount),
+ JobQueueSize: 1000,
+ ResultQueueSize: 100,
+ ProgressLogPeriod: 10 * time.Second,
+ }
+
+ newRoot, err := upgradeActorsV5Common(ctx, sm, cache, root, epoch, ts, config)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("migrating actors v5 state: %w", err)
+ }
+
+ return newRoot, nil
+}
+
+func PreUpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
+ // Use half the CPUs for pre-migration, but leave at least 3.
+ workerCount := runtime.NumCPU()
+ if workerCount <= 4 {
+ workerCount = 1
+ } else {
+ workerCount /= 2
+ }
+ config := nv13.Config{MaxWorkers: uint(workerCount)}
+ _, err := upgradeActorsV5Common(ctx, sm, cache, root, epoch, ts, config)
+ return err
+}
+
+func upgradeActorsV5Common(
+ ctx context.Context, sm *StateManager, cache MigrationCache,
+ root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
+ config nv13.Config,
+) (cid.Cid, error) {
+ buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync())
+ store := store.ActorStore(ctx, buf)
+
+ // Load the state root.
+ var stateRoot types.StateRoot
+ if err := store.Get(ctx, root, &stateRoot); err != nil {
+ return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
+ }
+
+ if stateRoot.Version != types.StateTreeVersion3 {
+ return cid.Undef, xerrors.Errorf(
+ "expected state root version 3 for actors v5 upgrade, got %d",
+ stateRoot.Version,
+ )
+ }
+
+ // Perform the migration
+ newHamtRoot, err := nv13.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("upgrading to actors v5: %w", err)
+ }
+
+ // Persist the result.
+ newRoot, err := store.Put(ctx, &types.StateRoot{
+ Version: types.StateTreeVersion4,
+ Actors: newHamtRoot,
+ Info: stateRoot.Info,
+ })
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
+ }
+
+ // Persist the new tree.
+
+ {
+ from := buf
+ to := buf.Read()
+
+ if err := vm.Copy(ctx, from, to, newRoot); err != nil {
+ return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err)
+ }
+ }
+
+ return newRoot, nil
+}
+
func setNetworkName(ctx context.Context, store adt.Store, tree *state.StateTree, name string) error {
ia, err := tree.GetActor(builtin0.InitActorAddr)
if err != nil {
@@ -679,7 +1296,7 @@ func setNetworkName(ctx context.Context, store adt.Store, tree *state.StateTree,
return nil
}
-func splitGenesisMultisig0(ctx context.Context, cb ExecCallback, addr address.Address, store adt0.Store, tree *state.StateTree, portions uint64, epoch abi.ChainEpoch) error {
+func splitGenesisMultisig0(ctx context.Context, em ExecMonitor, addr address.Address, store adt0.Store, tree *state.StateTree, portions uint64, epoch abi.ChainEpoch, ts *types.TipSet) error {
if portions < 1 {
return xerrors.Errorf("cannot split into 0 portions")
}
@@ -776,27 +1393,17 @@ func splitGenesisMultisig0(ctx context.Context, cb ExecCallback, addr address.Ad
i++
}
- if cb != nil {
+ if em != nil {
// record the transfer in execution traces
- fakeMsg := &types.Message{
- From: builtin.SystemActorAddr,
- To: addr,
- Value: big.Zero(),
- Nonce: uint64(epoch),
- }
- fakeRct := &types.MessageReceipt{
- ExitCode: 0,
- Return: nil,
- GasUsed: 0,
- }
+ fakeMsg := makeFakeMsg(builtin.SystemActorAddr, addr, big.Zero(), uint64(epoch))
- if err := cb(fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
- MessageReceipt: *fakeRct,
+ if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
+ MessageReceipt: *makeFakeRct(),
ActorErr: nil,
ExecutionTrace: types.ExecutionTrace{
Msg: fakeMsg,
- MsgRct: fakeRct,
+ MsgRct: makeFakeRct(),
Error: "",
Duration: 0,
GasCharges: nil,
@@ -804,7 +1411,7 @@ func splitGenesisMultisig0(ctx context.Context, cb ExecCallback, addr address.Ad
},
Duration: 0,
GasCosts: nil,
- }); err != nil {
+ }, false); err != nil {
return xerrors.Errorf("recording transfers: %w", err)
}
}
@@ -846,7 +1453,7 @@ func resetGenesisMsigs0(ctx context.Context, sm *StateManager, store adt0.Store,
return xerrors.Errorf("getting genesis tipset: %w", err)
}
- cst := cbor.NewCborStore(sm.cs.Blockstore())
+ cst := cbor.NewCborStore(sm.cs.StateBlockstore())
genesisTree, err := state.LoadStateTree(cst, gts.ParentState())
if err != nil {
return xerrors.Errorf("loading state tree: %w", err)
@@ -915,3 +1522,20 @@ func resetMultisigVesting0(ctx context.Context, store adt0.Store, tree *state.St
return nil
}
+
+func makeFakeMsg(from address.Address, to address.Address, amt abi.TokenAmount, nonce uint64) *types.Message {
+ return &types.Message{
+ From: from,
+ To: to,
+ Value: amt,
+ Nonce: nonce,
+ }
+}
+
+func makeFakeRct() *types.MessageReceipt {
+ return &types.MessageReceipt{
+ ExitCode: 0,
+ Return: nil,
+ GasUsed: 0,
+ }
+}
diff --git a/chain/stmgr/forks_test.go b/chain/stmgr/forks_test.go
index a2b7a179fba..6d22a294d71 100644
--- a/chain/stmgr/forks_test.go
+++ b/chain/stmgr/forks_test.go
@@ -4,11 +4,12 @@ import (
"context"
"fmt"
"io"
+ "sync"
"testing"
"github.com/ipfs/go-cid"
ipldcbor "github.com/ipfs/go-ipld-cbor"
- logging "github.com/ipfs/go-log"
+ logging "github.com/ipfs/go-log/v2"
"github.com/stretchr/testify/require"
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
@@ -122,9 +123,9 @@ func TestForkHeightTriggers(t *testing.T) {
cg.ChainStore(), UpgradeSchedule{{
Network: 1,
Height: testForkHeight,
- Migration: func(ctx context.Context, sm *StateManager, cb ExecCallback,
+ Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor,
root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
- cst := ipldcbor.NewCborStore(sm.ChainStore().Blockstore())
+ cst := ipldcbor.NewCborStore(sm.ChainStore().StateBlockstore())
st, err := sm.StateTree(root)
if err != nil {
@@ -252,7 +253,7 @@ func TestForkRefuseCall(t *testing.T) {
Network: 1,
Expensive: true,
Height: testForkHeight,
- Migration: func(ctx context.Context, sm *StateManager, cb ExecCallback,
+ Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor,
root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
return root, nil
}}})
@@ -296,24 +297,191 @@ func TestForkRefuseCall(t *testing.T) {
t.Fatal(err)
}
+ pts, err := cg.ChainStore().LoadTipSet(ts.TipSet.TipSet().Parents())
+ require.NoError(t, err)
+ parentHeight := pts.Height()
+ currentHeight := ts.TipSet.TipSet().Height()
+
+ // CallWithGas calls _at_ the current tipset.
ret, err := sm.CallWithGas(ctx, m, nil, ts.TipSet.TipSet())
- switch ts.TipSet.TipSet().Height() {
- case testForkHeight, testForkHeight + 1:
+ if parentHeight <= testForkHeight && currentHeight >= testForkHeight {
// If I had a fork, or I _will_ have a fork, it should fail.
require.Equal(t, ErrExpensiveFork, err)
- default:
+ } else {
require.NoError(t, err)
require.True(t, ret.MsgRct.ExitCode.IsSuccess())
}
- // Call just runs on the parent state for a tipset, so we only
- // expect an error at the fork height.
+
+ // Call always applies the message to the "next block" after the tipset's parent state.
ret, err = sm.Call(ctx, m, ts.TipSet.TipSet())
- switch ts.TipSet.TipSet().Height() {
- case testForkHeight + 1:
+ if parentHeight == testForkHeight {
require.Equal(t, ErrExpensiveFork, err)
- default:
+ } else {
require.NoError(t, err)
require.True(t, ret.MsgRct.ExitCode.IsSuccess())
}
}
}
+
+func TestForkPreMigration(t *testing.T) {
+ logging.SetAllLoggers(logging.LevelInfo)
+
+ cg, err := gen.NewGenerator()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ fooCid, err := abi.CidBuilder.Sum([]byte("foo"))
+ require.NoError(t, err)
+
+ barCid, err := abi.CidBuilder.Sum([]byte("bar"))
+ require.NoError(t, err)
+
+ failCid, err := abi.CidBuilder.Sum([]byte("fail"))
+ require.NoError(t, err)
+
+ var wait20 sync.WaitGroup
+ wait20.Add(3)
+
+ wasCanceled := make(chan struct{})
+
+ checkCache := func(t *testing.T, cache MigrationCache) {
+ found, value, err := cache.Read("foo")
+ require.NoError(t, err)
+ require.True(t, found)
+ require.Equal(t, fooCid, value)
+
+ found, value, err = cache.Read("bar")
+ require.NoError(t, err)
+ require.True(t, found)
+ require.Equal(t, barCid, value)
+
+ found, _, err = cache.Read("fail")
+ require.NoError(t, err)
+ require.False(t, found)
+ }
+
+ counter := make(chan struct{}, 10)
+
+ sm, err := NewStateManagerWithUpgradeSchedule(
+ cg.ChainStore(), UpgradeSchedule{{
+ Network: 1,
+ Height: testForkHeight,
+ Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor,
+ root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
+
+ // Make sure the test that should be canceled, is canceled.
+ select {
+ case <-wasCanceled:
+ case <-ctx.Done():
+ return cid.Undef, ctx.Err()
+ }
+
+ // the cache should be setup correctly.
+ checkCache(t, cache)
+
+ counter <- struct{}{}
+
+ return root, nil
+ },
+ PreMigrations: []PreMigration{{
+ StartWithin: 20,
+ PreMigration: func(ctx context.Context, _ *StateManager, cache MigrationCache,
+ _ cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) error {
+ wait20.Done()
+ wait20.Wait()
+
+ err := cache.Write("foo", fooCid)
+ require.NoError(t, err)
+
+ counter <- struct{}{}
+
+ return nil
+ },
+ }, {
+ StartWithin: 20,
+ PreMigration: func(ctx context.Context, _ *StateManager, cache MigrationCache,
+ _ cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) error {
+ wait20.Done()
+ wait20.Wait()
+
+ err := cache.Write("bar", barCid)
+ require.NoError(t, err)
+
+ counter <- struct{}{}
+
+ return nil
+ },
+ }, {
+ StartWithin: 20,
+ PreMigration: func(ctx context.Context, _ *StateManager, cache MigrationCache,
+ _ cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) error {
+ wait20.Done()
+ wait20.Wait()
+
+ err := cache.Write("fail", failCid)
+ require.NoError(t, err)
+
+ counter <- struct{}{}
+
+ // Fail this migration. The cached entry should not be persisted.
+ return fmt.Errorf("failed")
+ },
+ }, {
+ StartWithin: 15,
+ StopWithin: 5,
+ PreMigration: func(ctx context.Context, _ *StateManager, cache MigrationCache,
+ _ cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) error {
+
+ <-ctx.Done()
+ close(wasCanceled)
+
+ counter <- struct{}{}
+
+ return nil
+ },
+ }, {
+ StartWithin: 10,
+ PreMigration: func(ctx context.Context, _ *StateManager, cache MigrationCache,
+ _ cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) error {
+
+ checkCache(t, cache)
+
+ counter <- struct{}{}
+
+ return nil
+ },
+ }}},
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ require.NoError(t, sm.Start(context.Background()))
+ defer func() {
+ require.NoError(t, sm.Stop(context.Background()))
+ }()
+
+ inv := vm.NewActorRegistry()
+ inv.Register(nil, testActor{})
+
+ sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (*vm.VM, error) {
+ nvm, err := vm.NewVM(ctx, vmopt)
+ if err != nil {
+ return nil, err
+ }
+ nvm.SetInvoker(inv)
+ return nvm, nil
+ })
+
+ cg.SetStateManager(sm)
+
+ for i := 0; i < 50; i++ {
+ _, err := cg.NextTipSet()
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ // We have 5 pre-migration steps, and the migration. They should all have written something
+ // to this channel.
+ require.Equal(t, 6, len(counter))
+}
diff --git a/chain/stmgr/read.go b/chain/stmgr/read.go
index 9a9b8026576..3c7fb5d91e8 100644
--- a/chain/stmgr/read.go
+++ b/chain/stmgr/read.go
@@ -22,7 +22,7 @@ func (sm *StateManager) ParentStateTsk(tsk types.TipSetKey) (*state.StateTree, e
}
func (sm *StateManager) ParentState(ts *types.TipSet) (*state.StateTree, error) {
- cst := cbor.NewCborStore(sm.cs.Blockstore())
+ cst := cbor.NewCborStore(sm.cs.StateBlockstore())
state, err := state.LoadStateTree(cst, sm.parentState(ts))
if err != nil {
return nil, xerrors.Errorf("load state tree: %w", err)
@@ -32,7 +32,7 @@ func (sm *StateManager) ParentState(ts *types.TipSet) (*state.StateTree, error)
}
func (sm *StateManager) StateTree(st cid.Cid) (*state.StateTree, error) {
- cst := cbor.NewCborStore(sm.cs.Blockstore())
+ cst := cbor.NewCborStore(sm.cs.StateBlockstore())
state, err := state.LoadStateTree(cst, st)
if err != nil {
return nil, xerrors.Errorf("load state tree: %w", err)
diff --git a/node/modules/rpcstatemanager.go b/chain/stmgr/rpc/rpcstatemanager.go
similarity index 88%
rename from node/modules/rpcstatemanager.go
rename to chain/stmgr/rpc/rpcstatemanager.go
index 7d7b9243798..dc719eb55e3 100644
--- a/node/modules/rpcstatemanager.go
+++ b/chain/stmgr/rpc/rpcstatemanager.go
@@ -1,4 +1,4 @@
-package modules
+package rpcstmgr
import (
"context"
@@ -7,7 +7,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/api/apibstore"
+ "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin/paych"
"github.com/filecoin-project/lotus/chain/stmgr"
@@ -16,12 +16,12 @@ import (
)
type RPCStateManager struct {
- gapi api.GatewayAPI
+ gapi api.Gateway
cstore *cbor.BasicIpldStore
}
-func NewRPCStateManager(api api.GatewayAPI) *RPCStateManager {
- cstore := cbor.NewCborStore(apibstore.NewAPIBlockstore(api))
+func NewRPCStateManager(api api.Gateway) *RPCStateManager {
+ cstore := cbor.NewCborStore(blockstore.NewAPIBlockstore(api))
return &RPCStateManager{gapi: api, cstore: cstore}
}
diff --git a/chain/stmgr/stmgr.go b/chain/stmgr/stmgr.go
index 7e5809a840c..4f1351d2c58 100644
--- a/chain/stmgr/stmgr.go
+++ b/chain/stmgr/stmgr.go
@@ -5,11 +5,15 @@ import (
"errors"
"fmt"
"sync"
+ "sync/atomic"
+
+ "github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
logging "github.com/ipfs/go-log/v2"
cbg "github.com/whyrusleeping/cbor-gen"
+ "go.opencensus.io/stats"
"go.opencensus.io/trace"
"golang.org/x/xerrors"
@@ -20,6 +24,10 @@ import (
// Used for genesis.
msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
+ "github.com/filecoin-project/specs-actors/v3/actors/migration/nv10"
+
+ // we use the same adt for all receipts
+ blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
@@ -39,9 +47,11 @@ import (
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
+ "github.com/filecoin-project/lotus/metrics"
)
-const LookbackNoLimit = abi.ChainEpoch(-1)
+const LookbackNoLimit = api.LookbackNoLimit
+const ReceiptAmtBitwidth = 3
var log = logging.Logger("statemgr")
@@ -58,27 +68,49 @@ type versionSpec struct {
atOrBelow abi.ChainEpoch
}
+type migration struct {
+ upgrade MigrationFunc
+ preMigrations []PreMigration
+ cache *nv10.MemMigrationCache
+}
+
type StateManager struct {
cs *store.ChainStore
+ cancel context.CancelFunc
+ shutdown chan struct{}
+
// Determines the network version at any given epoch.
networkVersions []versionSpec
latestVersion network.Version
- // Maps chain epochs to upgrade functions.
- stateMigrations map[abi.ChainEpoch]UpgradeFunc
+ // Maps chain epochs to migrations.
+ stateMigrations map[abi.ChainEpoch]*migration
// A set of potentially expensive/time consuming upgrades. Explicit
// calls for, e.g., gas estimation fail against this epoch with
// ErrExpensiveFork.
expensiveUpgrades map[abi.ChainEpoch]struct{}
- stCache map[string][]cid.Cid
- compWait map[string]chan struct{}
- stlk sync.Mutex
- genesisMsigLk sync.Mutex
- newVM func(context.Context, *vm.VMOpts) (*vm.VM, error)
- preIgnitionGenInfos *genesisInfo
- postIgnitionGenInfos *genesisInfo
+ stCache map[string][]cid.Cid
+ tCache treeCache
+ compWait map[string]chan struct{}
+ stlk sync.Mutex
+ genesisMsigLk sync.Mutex
+ newVM func(context.Context, *vm.VMOpts) (*vm.VM, error)
+ preIgnitionVesting []msig0.State
+ postIgnitionVesting []msig0.State
+ postCalicoVesting []msig0.State
+
+ genesisPledge abi.TokenAmount
+ genesisMarketFunds abi.TokenAmount
+
+ tsExecMonitor ExecMonitor
+}
+
+// Caches a single state tree
+type treeCache struct {
+ root cid.Cid
+ tree *state.StateTree
}
func NewStateManager(cs *store.ChainStore) *StateManager {
@@ -95,7 +127,7 @@ func NewStateManagerWithUpgradeSchedule(cs *store.ChainStore, us UpgradeSchedule
return nil, err
}
- stateMigrations := make(map[abi.ChainEpoch]UpgradeFunc, len(us))
+ stateMigrations := make(map[abi.ChainEpoch]*migration, len(us))
expensiveUpgrades := make(map[abi.ChainEpoch]struct{}, len(us))
var networkVersions []versionSpec
lastVersion := network.Version0
@@ -103,8 +135,13 @@ func NewStateManagerWithUpgradeSchedule(cs *store.ChainStore, us UpgradeSchedule
// If we have any upgrades, process them and create a version
// schedule.
for _, upgrade := range us {
- if upgrade.Migration != nil {
- stateMigrations[upgrade.Height] = upgrade.Migration
+ if upgrade.Migration != nil || upgrade.PreMigrations != nil {
+ migration := &migration{
+ upgrade: upgrade.Migration,
+ preMigrations: upgrade.PreMigrations,
+ cache: nv10.NewMemMigrationCache(),
+ }
+ stateMigrations[upgrade.Height] = migration
}
if upgrade.Expensive {
expensiveUpgrades[upgrade.Height] = struct{}{}
@@ -128,10 +165,23 @@ func NewStateManagerWithUpgradeSchedule(cs *store.ChainStore, us UpgradeSchedule
newVM: vm.NewVM,
cs: cs,
stCache: make(map[string][]cid.Cid),
- compWait: make(map[string]chan struct{}),
+ tCache: treeCache{
+ root: cid.Undef,
+ tree: nil,
+ },
+ compWait: make(map[string]chan struct{}),
}, nil
}
+func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, us UpgradeSchedule, em ExecMonitor) (*StateManager, error) {
+ sm, err := NewStateManagerWithUpgradeSchedule(cs, us)
+ if err != nil {
+ return nil, err
+ }
+ sm.tsExecMonitor = em
+ return sm, nil
+}
+
func cidsToKey(cids []cid.Cid) string {
var out string
for _, c := range cids {
@@ -140,6 +190,33 @@ func cidsToKey(cids []cid.Cid) string {
return out
}
+// Start starts the state manager's optional background processes. At the moment, this schedules
+// pre-migration functions to run ahead of network upgrades.
+//
+// This method is not safe to invoke from multiple threads or concurrently with Stop.
+func (sm *StateManager) Start(context.Context) error {
+ var ctx context.Context
+ ctx, sm.cancel = context.WithCancel(context.Background())
+ sm.shutdown = make(chan struct{})
+ go sm.preMigrationWorker(ctx)
+ return nil
+}
+
+// Stop starts the state manager's background processes.
+//
+// This method is not safe to invoke concurrently with Start.
+func (sm *StateManager) Stop(ctx context.Context) error {
+ if sm.cancel != nil {
+ sm.cancel()
+ select {
+ case <-sm.shutdown:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+ return nil
+}
+
func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st cid.Cid, rec cid.Cid, err error) {
ctx, span := trace.StartSpan(ctx, "tipSetState")
defer span.End()
@@ -189,7 +266,7 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c
return ts.Blocks()[0].ParentStateRoot, ts.Blocks()[0].ParentMessageReceipts, nil
}
- st, rec, err = sm.computeTipSetState(ctx, ts, nil)
+ st, rec, err = sm.computeTipSetState(ctx, ts, sm.tsExecMonitor)
if err != nil {
return cid.Undef, cid.Undef, err
}
@@ -197,46 +274,35 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c
return st, rec, nil
}
-func traceFunc(trace *[]*api.InvocResult) func(mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet) error {
- return func(mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet) error {
- ir := &api.InvocResult{
- MsgCid: mcid,
- Msg: msg,
- MsgRct: &ret.MessageReceipt,
- ExecutionTrace: ret.ExecutionTrace,
- Duration: ret.Duration,
- }
- if ret.ActorErr != nil {
- ir.Error = ret.ActorErr.Error()
- }
- if ret.GasCosts != nil {
- ir.GasCost = MakeMsgGasCost(msg, ret)
- }
- *trace = append(*trace, ir)
- return nil
- }
+func (sm *StateManager) ExecutionTraceWithMonitor(ctx context.Context, ts *types.TipSet, em ExecMonitor) (cid.Cid, error) {
+ st, _, err := sm.computeTipSetState(ctx, ts, em)
+ return st, err
}
func (sm *StateManager) ExecutionTrace(ctx context.Context, ts *types.TipSet) (cid.Cid, []*api.InvocResult, error) {
- var trace []*api.InvocResult
- st, _, err := sm.computeTipSetState(ctx, ts, traceFunc(&trace))
+ var invocTrace []*api.InvocResult
+ st, err := sm.ExecutionTraceWithMonitor(ctx, ts, &InvocationTracer{trace: &invocTrace})
if err != nil {
return cid.Undef, nil, err
}
-
- return st, trace, nil
+ return st, invocTrace, nil
}
-type ExecCallback func(cid.Cid, *types.Message, *vm.ApplyRet) error
+func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEpoch, pstate cid.Cid, bms []store.BlockMessages, epoch abi.ChainEpoch, r vm.Rand, em ExecMonitor, baseFee abi.TokenAmount, ts *types.TipSet) (cid.Cid, cid.Cid, error) {
+ done := metrics.Timer(ctx, metrics.VMApplyBlocksTotal)
+ defer done()
-func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEpoch, pstate cid.Cid, bms []store.BlockMessages, epoch abi.ChainEpoch, r vm.Rand, cb ExecCallback, baseFee abi.TokenAmount, ts *types.TipSet) (cid.Cid, cid.Cid, error) {
+ partDone := metrics.Timer(ctx, metrics.VMApplyEarly)
+ defer func() {
+ partDone()
+ }()
makeVmWithBaseState := func(base cid.Cid) (*vm.VM, error) {
vmopt := &vm.VMOpts{
StateBase: base,
Epoch: epoch,
Rand: r,
- Bstore: sm.cs.Blockstore(),
+ Bstore: sm.cs.StateBlockstore(),
Syscalls: sm.cs.VMSys(),
CircSupplyCalc: sm.GetVMCirculatingSupply,
NtwkVersion: sm.GetNtwkVersion,
@@ -253,7 +319,6 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
}
runCron := func(epoch abi.ChainEpoch) error {
-
cronMsg := &types.Message{
To: cron.Address,
From: builtin.SystemActorAddr,
@@ -269,8 +334,8 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
if err != nil {
return err
}
- if cb != nil {
- if err := cb(cronMsg.Cid(), cronMsg, ret); err != nil {
+ if em != nil {
+ if err := em.MessageApplied(ctx, ts, cronMsg.Cid(), cronMsg, ret, true); err != nil {
return xerrors.Errorf("callback failed on cron message: %w", err)
}
}
@@ -296,7 +361,7 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
// handle state forks
// XXX: The state tree
- newState, err := sm.handleStateForks(ctx, pstate, i, cb, ts)
+ newState, err := sm.handleStateForks(ctx, pstate, i, em, ts)
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("error handling state forks: %w", err)
}
@@ -312,6 +377,9 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
pstate = newState
}
+ partDone()
+ partDone = metrics.Timer(ctx, metrics.VMApplyMessages)
+
var receipts []cbg.CBORMarshaler
processedMsgs := make(map[cid.Cid]struct{})
for _, b := range bms {
@@ -332,8 +400,8 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
gasReward = big.Add(gasReward, r.GasCosts.MinerTip)
penalty = big.Add(penalty, r.GasCosts.MinerPenalty)
- if cb != nil {
- if err := cb(cm.Cid(), m, r); err != nil {
+ if em != nil {
+ if err := em.MessageApplied(ctx, ts, cm.Cid(), m, r, false); err != nil {
return cid.Undef, cid.Undef, err
}
}
@@ -365,8 +433,8 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
if actErr != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("failed to apply reward message for miner %s: %w", b.Miner, actErr)
}
- if cb != nil {
- if err := cb(rwMsg.Cid(), rwMsg, ret); err != nil {
+ if em != nil {
+ if err := em.MessageApplied(ctx, ts, rwMsg.Cid(), rwMsg, ret, true); err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("callback failed on reward message: %w", err)
}
}
@@ -376,15 +444,17 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
}
}
+ partDone()
+ partDone = metrics.Timer(ctx, metrics.VMApplyCron)
+
if err := runCron(epoch); err != nil {
return cid.Cid{}, cid.Cid{}, err
}
- // XXX: Is the height correct? Or should it be epoch-1?
- rectarr, err := adt.NewArray(sm.cs.Store(ctx), actors.VersionForNetwork(sm.GetNtwkVersion(ctx, epoch)))
- if err != nil {
- return cid.Undef, cid.Undef, xerrors.Errorf("failed to create receipts amt: %w", err)
- }
+ partDone()
+ partDone = metrics.Timer(ctx, metrics.VMApplyFlush)
+
+ rectarr := blockadt.MakeEmptyArray(sm.cs.ActorStore(ctx))
for i, receipt := range receipts {
if err := rectarr.Set(uint64(i), receipt); err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("failed to build receipts amt: %w", err)
@@ -400,10 +470,13 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
return cid.Undef, cid.Undef, xerrors.Errorf("vm flush failed: %w", err)
}
+ stats.Record(ctx, metrics.VMSends.M(int64(atomic.LoadUint64(&vm.StatSends))),
+ metrics.VMApplied.M(int64(atomic.LoadUint64(&vm.StatApplied))))
+
return st, rectroot, nil
}
-func (sm *StateManager) computeTipSetState(ctx context.Context, ts *types.TipSet, cb ExecCallback) (cid.Cid, cid.Cid, error) {
+func (sm *StateManager) computeTipSetState(ctx context.Context, ts *types.TipSet, em ExecMonitor) (cid.Cid, cid.Cid, error) {
ctx, span := trace.StartSpan(ctx, "computeTipSetState")
defer span.End()
@@ -439,7 +512,7 @@ func (sm *StateManager) computeTipSetState(ctx context.Context, ts *types.TipSet
baseFee := blks[0].ParentBaseFee
- return sm.ApplyBlocks(ctx, parentEpoch, pstate, blkmsgs, blks[0].Height, r, cb, baseFee, ts)
+ return sm.ApplyBlocks(ctx, parentEpoch, pstate, blkmsgs, blks[0].Height, r, em, baseFee, ts)
}
func (sm *StateManager) parentState(ts *types.TipSet) cid.Cid {
@@ -469,13 +542,26 @@ func (sm *StateManager) ResolveToKeyAddress(ctx context.Context, addr address.Ad
ts = sm.cs.GetHeaviestTipSet()
}
+ cst := cbor.NewCborStore(sm.cs.StateBlockstore())
+
+ // First try to resolve the actor in the parent state, so we don't have to compute anything.
+ tree, err := state.LoadStateTree(cst, ts.ParentState())
+ if err != nil {
+ return address.Undef, xerrors.Errorf("failed to load parent state tree: %w", err)
+ }
+
+ resolved, err := vm.ResolveToKeyAddr(tree, cst, addr)
+ if err == nil {
+ return resolved, nil
+ }
+
+ // If that fails, compute the tip-set and try again.
st, _, err := sm.TipSetState(ctx, ts)
if err != nil {
return address.Undef, xerrors.Errorf("resolve address failed to get tipset state: %w", err)
}
- cst := cbor.NewCborStore(sm.cs.Blockstore())
- tree, err := state.LoadStateTree(cst, st)
+ tree, err = state.LoadStateTree(cst, st)
if err != nil {
return address.Undef, xerrors.Errorf("failed to load state tree")
}
@@ -483,6 +569,52 @@ func (sm *StateManager) ResolveToKeyAddress(ctx context.Context, addr address.Ad
return vm.ResolveToKeyAddr(tree, cst, addr)
}
+// ResolveToKeyAddressAtFinality is similar to stmgr.ResolveToKeyAddress but fails if the ID address being resolved isn't reorg-stable yet.
+// It should not be used for consensus-critical subsystems.
+func (sm *StateManager) ResolveToKeyAddressAtFinality(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) {
+ switch addr.Protocol() {
+ case address.BLS, address.SECP256K1:
+ return addr, nil
+ case address.Actor:
+ return address.Undef, xerrors.New("cannot resolve actor address to key address")
+ default:
+ }
+
+ if ts == nil {
+ ts = sm.cs.GetHeaviestTipSet()
+ }
+
+ var err error
+ if ts.Height() > policy.ChainFinality {
+ ts, err = sm.ChainStore().GetTipsetByHeight(ctx, ts.Height()-policy.ChainFinality, ts, true)
+ if err != nil {
+ return address.Undef, xerrors.Errorf("failed to load lookback tipset: %w", err)
+ }
+ }
+
+ cst := cbor.NewCborStore(sm.cs.StateBlockstore())
+ tree := sm.tCache.tree
+
+ if tree == nil || sm.tCache.root != ts.ParentState() {
+ tree, err = state.LoadStateTree(cst, ts.ParentState())
+ if err != nil {
+ return address.Undef, xerrors.Errorf("failed to load parent state tree: %w", err)
+ }
+
+ sm.tCache = treeCache{
+ root: ts.ParentState(),
+ tree: tree,
+ }
+ }
+
+ resolved, err := vm.ResolveToKeyAddr(tree, cst, addr)
+ if err == nil {
+ return resolved, nil
+ }
+
+ return address.Undef, xerrors.New("ID address not found in lookback state")
+}
+
func (sm *StateManager) GetBlsPublicKey(ctx context.Context, addr address.Address, ts *types.TipSet) (pubk []byte, err error) {
kaddr, err := sm.ResolveToKeyAddress(ctx, addr, ts)
if err != nil {
@@ -497,7 +629,7 @@ func (sm *StateManager) GetBlsPublicKey(ctx context.Context, addr address.Addres
}
func (sm *StateManager) LookupID(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) {
- cst := cbor.NewCborStore(sm.cs.Blockstore())
+ cst := cbor.NewCborStore(sm.cs.StateBlockstore())
state, err := state.LoadStateTree(cst, sm.parentState(ts))
if err != nil {
return address.Undef, xerrors.Errorf("load state tree: %w", err)
@@ -505,24 +637,10 @@ func (sm *StateManager) LookupID(ctx context.Context, addr address.Address, ts *
return state.LookupID(addr)
}
-func (sm *StateManager) GetReceipt(ctx context.Context, msg cid.Cid, ts *types.TipSet) (*types.MessageReceipt, error) {
- m, err := sm.cs.GetCMessage(msg)
- if err != nil {
- return nil, fmt.Errorf("failed to load message: %w", err)
- }
-
- _, r, _, err := sm.searchBackForMsg(ctx, ts, m, LookbackNoLimit)
- if err != nil {
- return nil, fmt.Errorf("failed to look back through chain for message: %w", err)
- }
-
- return r, nil
-}
-
// WaitForMessage blocks until a message appears on chain. It looks backwards in the chain to see if this has already
// happened, with an optional limit to how many epochs it will search. It guarantees that the message has been on
// chain for at least confidence epochs without being reverted before returning.
-func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confidence uint64, lookbackLimit abi.ChainEpoch) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) {
+func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confidence uint64, lookbackLimit abi.ChainEpoch, allowReplaced bool) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
@@ -546,7 +664,7 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid
return nil, nil, cid.Undef, fmt.Errorf("expected current head on SHC stream (got %s)", head[0].Type)
}
- r, foundMsg, err := sm.tipsetExecutedMessage(head[0].Val, mcid, msg.VMMessage())
+ r, foundMsg, err := sm.tipsetExecutedMessage(head[0].Val, mcid, msg.VMMessage(), allowReplaced)
if err != nil {
return nil, nil, cid.Undef, err
}
@@ -560,9 +678,9 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid
var backFm cid.Cid
backSearchWait := make(chan struct{})
go func() {
- fts, r, foundMsg, err := sm.searchBackForMsg(ctx, head[0].Val, msg, lookbackLimit)
+ fts, r, foundMsg, err := sm.searchBackForMsg(ctx, head[0].Val, msg, lookbackLimit, allowReplaced)
if err != nil {
- log.Warnf("failed to look back through chain for message: %w", err)
+ log.Warnf("failed to look back through chain for message: %v", err)
return
}
@@ -599,7 +717,7 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid
if candidateTs != nil && val.Val.Height() >= candidateTs.Height()+abi.ChainEpoch(confidence) {
return candidateTs, candidateRcp, candidateFm, nil
}
- r, foundMsg, err := sm.tipsetExecutedMessage(val.Val, mcid, msg.VMMessage())
+ r, foundMsg, err := sm.tipsetExecutedMessage(val.Val, mcid, msg.VMMessage(), allowReplaced)
if err != nil {
return nil, nil, cid.Undef, err
}
@@ -635,15 +753,13 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid
}
}
-func (sm *StateManager) SearchForMessage(ctx context.Context, mcid cid.Cid) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) {
+func (sm *StateManager) SearchForMessage(ctx context.Context, head *types.TipSet, mcid cid.Cid, lookbackLimit abi.ChainEpoch, allowReplaced bool) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) {
msg, err := sm.cs.GetCMessage(mcid)
if err != nil {
return nil, nil, cid.Undef, fmt.Errorf("failed to load message: %w", err)
}
- head := sm.cs.GetHeaviestTipSet()
-
- r, foundMsg, err := sm.tipsetExecutedMessage(head, mcid, msg.VMMessage())
+ r, foundMsg, err := sm.tipsetExecutedMessage(head, mcid, msg.VMMessage(), allowReplaced)
if err != nil {
return nil, nil, cid.Undef, err
}
@@ -652,7 +768,7 @@ func (sm *StateManager) SearchForMessage(ctx context.Context, mcid cid.Cid) (*ty
return head, r, foundMsg, nil
}
- fts, r, foundMsg, err := sm.searchBackForMsg(ctx, head, msg, LookbackNoLimit)
+ fts, r, foundMsg, err := sm.searchBackForMsg(ctx, head, msg, lookbackLimit, allowReplaced)
if err != nil {
log.Warnf("failed to look back through chain for message %s", mcid)
@@ -672,7 +788,7 @@ func (sm *StateManager) SearchForMessage(ctx context.Context, mcid cid.Cid) (*ty
// - 0 then no tipsets are searched
// - 5 then five tipset are searched
// - LookbackNoLimit then there is no limit
-func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet, m types.ChainMsg, limit abi.ChainEpoch) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) {
+func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet, m types.ChainMsg, limit abi.ChainEpoch, allowReplaced bool) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) {
limitHeight := from.Height() - limit
noLimit := limit == LookbackNoLimit
@@ -722,7 +838,7 @@ func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet
// check that between cur and parent tipset the nonce fell into range of our message
if actorNoExist || (curActor.Nonce > mNonce && act.Nonce <= mNonce) {
- r, foundMsg, err := sm.tipsetExecutedMessage(cur, m.Cid(), m.VMMessage())
+ r, foundMsg, err := sm.tipsetExecutedMessage(cur, m.Cid(), m.VMMessage(), allowReplaced)
if err != nil {
return nil, nil, cid.Undef, xerrors.Errorf("checking for message execution during lookback: %w", err)
}
@@ -737,7 +853,7 @@ func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet
}
}
-func (sm *StateManager) tipsetExecutedMessage(ts *types.TipSet, msg cid.Cid, vmm *types.Message) (*types.MessageReceipt, cid.Cid, error) {
+func (sm *StateManager) tipsetExecutedMessage(ts *types.TipSet, msg cid.Cid, vmm *types.Message, allowReplaced bool) (*types.MessageReceipt, cid.Cid, error) {
// The genesis block did not execute any messages
if ts.Height() == 0 {
return nil, cid.Undef, nil
@@ -760,7 +876,7 @@ func (sm *StateManager) tipsetExecutedMessage(ts *types.TipSet, msg cid.Cid, vmm
if m.VMMessage().From == vmm.From { // cheaper to just check origin first
if m.VMMessage().Nonce == vmm.Nonce {
- if m.VMMessage().EqualCall(vmm) {
+ if allowReplaced && m.VMMessage().EqualCall(vmm) {
if m.Cid() != msg {
log.Warnw("found message with equal nonce and call params but different CID",
"wanted", msg, "found", m.Cid(), "nonce", vmm.Nonce, "from", vmm.From)
@@ -790,10 +906,7 @@ func (sm *StateManager) ListAllActors(ctx context.Context, ts *types.TipSet) ([]
if ts == nil {
ts = sm.cs.GetHeaviestTipSet()
}
- st, _, err := sm.TipSetState(ctx, ts)
- if err != nil {
- return nil, err
- }
+ st := ts.ParentState()
stateTree, err := sm.StateTree(st)
if err != nil {
@@ -823,7 +936,7 @@ func (sm *StateManager) MarketBalance(ctx context.Context, addr address.Address,
return api.MarketBalance{}, err
}
- mstate, err := market.Load(sm.cs.Store(ctx), act)
+ mstate, err := market.Load(sm.cs.ActorStore(ctx), act)
if err != nil {
return api.MarketBalance{}, err
}
@@ -889,23 +1002,8 @@ func (sm *StateManager) SetVMConstructor(nvm func(context.Context, *vm.VMOpts) (
sm.newVM = nvm
}
-type genesisInfo struct {
- genesisMsigs []msig0.State
- // info about the Accounts in the genesis state
- genesisActors []genesisActor
- genesisPledge abi.TokenAmount
- genesisMarketFunds abi.TokenAmount
-}
-
-type genesisActor struct {
- addr address.Address
- initBal abi.TokenAmount
-}
-
-// sets up information about the actors in the genesis state
-func (sm *StateManager) setupGenesisActors(ctx context.Context) error {
-
- gi := genesisInfo{}
+// sets up information about the vesting schedule
+func (sm *StateManager) setupGenesisVestingSchedule(ctx context.Context) error {
gb, err := sm.cs.GetGenesis()
if err != nil {
@@ -922,133 +1020,64 @@ func (sm *StateManager) setupGenesisActors(ctx context.Context) error {
return xerrors.Errorf("getting genesis tipset state: %w", err)
}
- cst := cbor.NewCborStore(sm.cs.Blockstore())
+ cst := cbor.NewCborStore(sm.cs.StateBlockstore())
sTree, err := state.LoadStateTree(cst, st)
if err != nil {
return xerrors.Errorf("loading state tree: %w", err)
}
- gi.genesisMarketFunds, err = getFilMarketLocked(ctx, sTree)
+ gmf, err := getFilMarketLocked(ctx, sTree)
if err != nil {
return xerrors.Errorf("setting up genesis market funds: %w", err)
}
- gi.genesisPledge, err = getFilPowerLocked(ctx, sTree)
+ gp, err := getFilPowerLocked(ctx, sTree)
if err != nil {
return xerrors.Errorf("setting up genesis pledge: %w", err)
}
- totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount)
- err = sTree.ForEach(func(kaddr address.Address, act *types.Actor) error {
- if builtin.IsMultisigActor(act.Code) {
- s, err := multisig.Load(sm.cs.Store(ctx), act)
- if err != nil {
- return err
- }
-
- se, err := s.StartEpoch()
- if err != nil {
- return err
- }
-
- if se != 0 {
- return xerrors.New("genesis multisig doesn't start vesting at epoch 0!")
- }
-
- ud, err := s.UnlockDuration()
- if err != nil {
- return err
- }
+ sm.genesisMarketFunds = gmf
+ sm.genesisPledge = gp
- ib, err := s.InitialBalance()
- if err != nil {
- return err
- }
+ totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount)
- ot, f := totalsByEpoch[ud]
- if f {
- totalsByEpoch[ud] = big.Add(ot, ib)
- } else {
- totalsByEpoch[ud] = ib
- }
+ // 6 months
+ sixMonths := abi.ChainEpoch(183 * builtin.EpochsInDay)
+ totalsByEpoch[sixMonths] = big.NewInt(49_929_341)
+ totalsByEpoch[sixMonths] = big.Add(totalsByEpoch[sixMonths], big.NewInt(32_787_700))
- } else if builtin.IsAccountActor(act.Code) {
- // should exclude burnt funds actor and "remainder account actor"
- // should only ever be "faucet" accounts in testnets
- if kaddr == builtin.BurntFundsActorAddr {
- return nil
- }
+ // 1 year
+ oneYear := abi.ChainEpoch(365 * builtin.EpochsInDay)
+ totalsByEpoch[oneYear] = big.NewInt(22_421_712)
- kid, err := sTree.LookupID(kaddr)
- if err != nil {
- return xerrors.Errorf("resolving address: %w", err)
- }
+ // 2 years
+ twoYears := abi.ChainEpoch(2 * 365 * builtin.EpochsInDay)
+ totalsByEpoch[twoYears] = big.NewInt(7_223_364)
- gi.genesisActors = append(gi.genesisActors, genesisActor{
- addr: kid,
- initBal: act.Balance,
- })
- }
- return nil
- })
+ // 3 years
+ threeYears := abi.ChainEpoch(3 * 365 * builtin.EpochsInDay)
+ totalsByEpoch[threeYears] = big.NewInt(87_637_883)
- if err != nil {
- return xerrors.Errorf("error setting up genesis infos: %w", err)
- }
+ // 6 years
+ sixYears := abi.ChainEpoch(6 * 365 * builtin.EpochsInDay)
+ totalsByEpoch[sixYears] = big.NewInt(100_000_000)
+ totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(300_000_000))
- // TODO: use network upgrade abstractions or always start at actors v0?
- gi.genesisMsigs = make([]msig0.State, 0, len(totalsByEpoch))
+ sm.preIgnitionVesting = make([]msig0.State, 0, len(totalsByEpoch))
for k, v := range totalsByEpoch {
ns := msig0.State{
InitialBalance: v,
UnlockDuration: k,
PendingTxns: cid.Undef,
}
- gi.genesisMsigs = append(gi.genesisMsigs, ns)
+ sm.preIgnitionVesting = append(sm.preIgnitionVesting, ns)
}
- sm.preIgnitionGenInfos = &gi
-
return nil
}
-// sets up information about the actors in the genesis state
-// For testnet we use a hardcoded set of multisig states, instead of what's actually in the genesis multisigs
-// We also do not consider ANY account actors (including the faucet)
-func (sm *StateManager) setupPreIgnitionGenesisActorsTestnet(ctx context.Context) error {
-
- gi := genesisInfo{}
-
- gb, err := sm.cs.GetGenesis()
- if err != nil {
- return xerrors.Errorf("getting genesis block: %w", err)
- }
-
- gts, err := types.NewTipSet([]*types.BlockHeader{gb})
- if err != nil {
- return xerrors.Errorf("getting genesis tipset: %w", err)
- }
-
- st, _, err := sm.TipSetState(ctx, gts)
- if err != nil {
- return xerrors.Errorf("getting genesis tipset state: %w", err)
- }
-
- cst := cbor.NewCborStore(sm.cs.Blockstore())
- sTree, err := state.LoadStateTree(cst, st)
- if err != nil {
- return xerrors.Errorf("loading state tree: %w", err)
- }
-
- gi.genesisMarketFunds, err = getFilMarketLocked(ctx, sTree)
- if err != nil {
- return xerrors.Errorf("setting up genesis market funds: %w", err)
- }
-
- gi.genesisPledge, err = getFilPowerLocked(ctx, sTree)
- if err != nil {
- return xerrors.Errorf("setting up genesis pledge: %w", err)
- }
+// sets up information about the vesting schedule post the ignition upgrade
+func (sm *StateManager) setupPostIgnitionVesting(ctx context.Context) error {
totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount)
@@ -1074,69 +1103,40 @@ func (sm *StateManager) setupPreIgnitionGenesisActorsTestnet(ctx context.Context
totalsByEpoch[sixYears] = big.NewInt(100_000_000)
totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(300_000_000))
- gi.genesisMsigs = make([]msig0.State, 0, len(totalsByEpoch))
+ sm.postIgnitionVesting = make([]msig0.State, 0, len(totalsByEpoch))
for k, v := range totalsByEpoch {
ns := msig0.State{
- InitialBalance: v,
+ // In the pre-ignition logic, we incorrectly set this value in Fil, not attoFil, an off-by-10^18 error
+ InitialBalance: big.Mul(v, big.NewInt(int64(build.FilecoinPrecision))),
UnlockDuration: k,
PendingTxns: cid.Undef,
+ // In the pre-ignition logic, the start epoch was 0. This changes in the fork logic of the Ignition upgrade itself.
+ StartEpoch: build.UpgradeLiftoffHeight,
}
- gi.genesisMsigs = append(gi.genesisMsigs, ns)
+ sm.postIgnitionVesting = append(sm.postIgnitionVesting, ns)
}
- sm.preIgnitionGenInfos = &gi
-
return nil
}
-// sets up information about the actors in the genesis state, post the ignition fork
-func (sm *StateManager) setupPostIgnitionGenesisActors(ctx context.Context) error {
-
- gi := genesisInfo{}
-
- gb, err := sm.cs.GetGenesis()
- if err != nil {
- return xerrors.Errorf("getting genesis block: %w", err)
- }
-
- gts, err := types.NewTipSet([]*types.BlockHeader{gb})
- if err != nil {
- return xerrors.Errorf("getting genesis tipset: %w", err)
- }
-
- st, _, err := sm.TipSetState(ctx, gts)
- if err != nil {
- return xerrors.Errorf("getting genesis tipset state: %w", err)
- }
-
- cst := cbor.NewCborStore(sm.cs.Blockstore())
- sTree, err := state.LoadStateTree(cst, st)
- if err != nil {
- return xerrors.Errorf("loading state tree: %w", err)
- }
-
- // Unnecessary, should be removed
- gi.genesisMarketFunds, err = getFilMarketLocked(ctx, sTree)
- if err != nil {
- return xerrors.Errorf("setting up genesis market funds: %w", err)
- }
-
- // Unnecessary, should be removed
- gi.genesisPledge, err = getFilPowerLocked(ctx, sTree)
- if err != nil {
- return xerrors.Errorf("setting up genesis pledge: %w", err)
- }
+// sets up information about the vesting schedule post the calico upgrade
+func (sm *StateManager) setupPostCalicoVesting(ctx context.Context) error {
totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount)
+ // 0 days
+ zeroDays := abi.ChainEpoch(0)
+ totalsByEpoch[zeroDays] = big.NewInt(10_632_000)
+
// 6 months
sixMonths := abi.ChainEpoch(183 * builtin.EpochsInDay)
- totalsByEpoch[sixMonths] = big.NewInt(49_929_341)
+ totalsByEpoch[sixMonths] = big.NewInt(19_015_887)
totalsByEpoch[sixMonths] = big.Add(totalsByEpoch[sixMonths], big.NewInt(32_787_700))
// 1 year
oneYear := abi.ChainEpoch(365 * builtin.EpochsInDay)
totalsByEpoch[oneYear] = big.NewInt(22_421_712)
+ totalsByEpoch[oneYear] = big.Add(totalsByEpoch[oneYear], big.NewInt(9_400_000))
// 2 years
twoYears := abi.ChainEpoch(2 * 365 * builtin.EpochsInDay)
@@ -1145,27 +1145,25 @@ func (sm *StateManager) setupPostIgnitionGenesisActors(ctx context.Context) erro
// 3 years
threeYears := abi.ChainEpoch(3 * 365 * builtin.EpochsInDay)
totalsByEpoch[threeYears] = big.NewInt(87_637_883)
+ totalsByEpoch[threeYears] = big.Add(totalsByEpoch[threeYears], big.NewInt(898_958))
// 6 years
sixYears := abi.ChainEpoch(6 * 365 * builtin.EpochsInDay)
totalsByEpoch[sixYears] = big.NewInt(100_000_000)
totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(300_000_000))
+ totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(9_805_053))
- gi.genesisMsigs = make([]msig0.State, 0, len(totalsByEpoch))
+ sm.postCalicoVesting = make([]msig0.State, 0, len(totalsByEpoch))
for k, v := range totalsByEpoch {
ns := msig0.State{
- // In the pre-ignition logic, we incorrectly set this value in Fil, not attoFil, an off-by-10^18 error
InitialBalance: big.Mul(v, big.NewInt(int64(build.FilecoinPrecision))),
UnlockDuration: k,
PendingTxns: cid.Undef,
- // In the pre-ignition logic, the start epoch was 0. This changes in the fork logic of the Ignition upgrade itself.
- StartEpoch: build.UpgradeLiftoffHeight,
+ StartEpoch: build.UpgradeLiftoffHeight,
}
- gi.genesisMsigs = append(gi.genesisMsigs, ns)
+ sm.postCalicoVesting = append(sm.postCalicoVesting, ns)
}
- sm.postIgnitionGenInfos = &gi
-
return nil
}
@@ -1175,39 +1173,32 @@ func (sm *StateManager) setupPostIgnitionGenesisActors(ctx context.Context) erro
func (sm *StateManager) GetFilVested(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (abi.TokenAmount, error) {
vf := big.Zero()
if height <= build.UpgradeIgnitionHeight {
- for _, v := range sm.preIgnitionGenInfos.genesisMsigs {
+ for _, v := range sm.preIgnitionVesting {
au := big.Sub(v.InitialBalance, v.AmountLocked(height))
vf = big.Add(vf, au)
}
- } else {
- for _, v := range sm.postIgnitionGenInfos.genesisMsigs {
+ } else if height <= build.UpgradeCalicoHeight {
+ for _, v := range sm.postIgnitionVesting {
// In the pre-ignition logic, we simply called AmountLocked(height), assuming startEpoch was 0.
// The start epoch changed in the Ignition upgrade.
au := big.Sub(v.InitialBalance, v.AmountLocked(height-v.StartEpoch))
vf = big.Add(vf, au)
}
- }
-
- // there should not be any such accounts in testnet (and also none in mainnet?)
- // continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch
- for _, v := range sm.preIgnitionGenInfos.genesisActors {
- act, err := st.GetActor(v.addr)
- if err != nil {
- return big.Zero(), xerrors.Errorf("failed to get actor: %w", err)
- }
-
- diff := big.Sub(v.initBal, act.Balance)
- if diff.GreaterThan(big.Zero()) {
- vf = big.Add(vf, diff)
+ } else {
+ for _, v := range sm.postCalicoVesting {
+ // In the pre-ignition logic, we simply called AmountLocked(height), assuming startEpoch was 0.
+ // The start epoch changed in the Ignition upgrade.
+ au := big.Sub(v.InitialBalance, v.AmountLocked(height-v.StartEpoch))
+ vf = big.Add(vf, au)
}
}
- // After UpgradeActorsV2Height these funds are accounted for in GetFilReserveDisbursed
- if height <= build.UpgradeActorsV2Height {
+ // After UpgradeAssemblyHeight these funds are accounted for in GetFilReserveDisbursed
+ if height <= build.UpgradeAssemblyHeight {
// continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch
- vf = big.Add(vf, sm.preIgnitionGenInfos.genesisPledge)
+ vf = big.Add(vf, sm.genesisPledge)
// continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch
- vf = big.Add(vf, sm.preIgnitionGenInfos.genesisMarketFunds)
+ vf = big.Add(vf, sm.genesisMarketFunds)
}
return vf, nil
@@ -1301,16 +1292,22 @@ func (sm *StateManager) GetVMCirculatingSupply(ctx context.Context, height abi.C
func (sm *StateManager) GetVMCirculatingSupplyDetailed(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (api.CirculatingSupply, error) {
sm.genesisMsigLk.Lock()
defer sm.genesisMsigLk.Unlock()
- if sm.preIgnitionGenInfos == nil {
- err := sm.setupPreIgnitionGenesisActorsTestnet(ctx)
+ if sm.preIgnitionVesting == nil || sm.genesisPledge.IsZero() || sm.genesisMarketFunds.IsZero() {
+ err := sm.setupGenesisVestingSchedule(ctx)
+ if err != nil {
+ return api.CirculatingSupply{}, xerrors.Errorf("failed to setup pre-ignition vesting schedule: %w", err)
+ }
+ }
+ if sm.postIgnitionVesting == nil {
+ err := sm.setupPostIgnitionVesting(ctx)
if err != nil {
- return api.CirculatingSupply{}, xerrors.Errorf("failed to setup pre-ignition genesis information: %w", err)
+ return api.CirculatingSupply{}, xerrors.Errorf("failed to setup post-ignition vesting schedule: %w", err)
}
}
- if sm.postIgnitionGenInfos == nil {
- err := sm.setupPostIgnitionGenesisActors(ctx)
+ if sm.postCalicoVesting == nil {
+ err := sm.setupPostCalicoVesting(ctx)
if err != nil {
- return api.CirculatingSupply{}, xerrors.Errorf("failed to setup post-ignition genesis information: %w", err)
+ return api.CirculatingSupply{}, xerrors.Errorf("failed to setup post-calico vesting schedule: %w", err)
}
}
@@ -1320,7 +1317,7 @@ func (sm *StateManager) GetVMCirculatingSupplyDetailed(ctx context.Context, heig
}
filReserveDisbursed := big.Zero()
- if height > build.UpgradeActorsV2Height {
+ if height > build.UpgradeAssemblyHeight {
filReserveDisbursed, err = GetFilReserveDisbursed(ctx, st)
if err != nil {
return api.CirculatingSupply{}, xerrors.Errorf("failed to calculate filReserveDisbursed: %w", err)
@@ -1352,11 +1349,12 @@ func (sm *StateManager) GetVMCirculatingSupplyDetailed(ctx context.Context, heig
}
return api.CirculatingSupply{
- FilVested: filVested,
- FilMined: filMined,
- FilBurnt: filBurnt,
- FilLocked: filLocked,
- FilCirculating: ret,
+ FilVested: filVested,
+ FilMined: filMined,
+ FilBurnt: filBurnt,
+ FilLocked: filLocked,
+ FilCirculating: ret,
+ FilReserveDisbursed: filReserveDisbursed,
}, nil
}
@@ -1382,7 +1380,7 @@ func (sm *StateManager) GetCirculatingSupply(ctx context.Context, height abi.Cha
unCirc = big.Add(unCirc, actor.Balance)
case a == market.Address:
- mst, err := market.Load(sm.cs.Store(ctx), actor)
+ mst, err := market.Load(sm.cs.ActorStore(ctx), actor)
if err != nil {
return err
}
@@ -1399,7 +1397,7 @@ func (sm *StateManager) GetCirculatingSupply(ctx context.Context, height abi.Cha
circ = big.Add(circ, actor.Balance)
case builtin.IsStorageMinerActor(actor.Code):
- mst, err := miner.Load(sm.cs.Store(ctx), actor)
+ mst, err := miner.Load(sm.cs.ActorStore(ctx), actor)
if err != nil {
return err
}
@@ -1416,7 +1414,7 @@ func (sm *StateManager) GetCirculatingSupply(ctx context.Context, height abi.Cha
}
case builtin.IsMultisigActor(actor.Code):
- mst, err := multisig.Load(sm.cs.Store(ctx), actor)
+ mst, err := multisig.Load(sm.cs.ActorStore(ctx), actor)
if err != nil {
return err
}
@@ -1470,7 +1468,7 @@ func (sm *StateManager) GetPaychState(ctx context.Context, addr address.Address,
return nil, nil, err
}
- actState, err := paych.Load(sm.cs.Store(ctx), act)
+ actState, err := paych.Load(sm.cs.ActorStore(ctx), act)
if err != nil {
return nil, nil, err
}
@@ -1488,7 +1486,7 @@ func (sm *StateManager) GetMarketState(ctx context.Context, ts *types.TipSet) (m
return nil, err
}
- actState, err := market.Load(sm.cs.Store(ctx), act)
+ actState, err := market.Load(sm.cs.ActorStore(ctx), act)
if err != nil {
return nil, err
}
diff --git a/chain/stmgr/tracers.go b/chain/stmgr/tracers.go
new file mode 100644
index 00000000000..6bcd7bc1595
--- /dev/null
+++ b/chain/stmgr/tracers.go
@@ -0,0 +1,56 @@
+package stmgr
+
+import (
+ "context"
+
+ "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/vm"
+ "github.com/ipfs/go-cid"
+)
+
+type ExecMonitor interface {
+ // MessageApplied is called after a message has been applied. Returning an error will halt execution of any further messages.
+ MessageApplied(ctx context.Context, ts *types.TipSet, mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet, implicit bool) error
+}
+
+var _ ExecMonitor = (*InvocationTracer)(nil)
+
+type InvocationTracer struct {
+ trace *[]*api.InvocResult
+}
+
+func (i *InvocationTracer) MessageApplied(ctx context.Context, ts *types.TipSet, mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet, implicit bool) error {
+ ir := &api.InvocResult{
+ MsgCid: mcid,
+ Msg: msg,
+ MsgRct: &ret.MessageReceipt,
+ ExecutionTrace: ret.ExecutionTrace,
+ Duration: ret.Duration,
+ }
+ if ret.ActorErr != nil {
+ ir.Error = ret.ActorErr.Error()
+ }
+ if ret.GasCosts != nil {
+ ir.GasCost = MakeMsgGasCost(msg, ret)
+ }
+ *i.trace = append(*i.trace, ir)
+ return nil
+}
+
+var _ ExecMonitor = (*messageFinder)(nil)
+
+type messageFinder struct {
+ mcid cid.Cid // the message cid to find
+ outm *types.Message
+ outr *vm.ApplyRet
+}
+
+func (m *messageFinder) MessageApplied(ctx context.Context, ts *types.TipSet, mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet, implicit bool) error {
+ if m.mcid == mcid {
+ m.outm = msg
+ m.outr = ret
+ return errHaltExecution // message was found, no need to continue
+ }
+ return nil
+}
diff --git a/chain/stmgr/utils.go b/chain/stmgr/utils.go
index 5b144281d53..d2a2c6e604c 100644
--- a/chain/stmgr/utils.go
+++ b/chain/stmgr/utils.go
@@ -9,6 +9,8 @@ import (
"runtime"
"strings"
+ exported5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/exported"
+
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/network"
@@ -25,6 +27,8 @@ import (
exported0 "github.com/filecoin-project/specs-actors/actors/builtin/exported"
exported2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/exported"
+ exported3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/exported"
+ exported4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/exported"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/builtin"
@@ -47,7 +51,7 @@ func GetNetworkName(ctx context.Context, sm *StateManager, st cid.Cid) (dtypes.N
if err != nil {
return "", err
}
- ias, err := init_.Load(sm.cs.Store(ctx), act)
+ ias, err := init_.Load(sm.cs.ActorStore(ctx), act)
if err != nil {
return "", err
}
@@ -64,7 +68,7 @@ func GetMinerWorkerRaw(ctx context.Context, sm *StateManager, st cid.Cid, maddr
if err != nil {
return address.Undef, xerrors.Errorf("(get sset) failed to load miner actor: %w", err)
}
- mas, err := miner.Load(sm.cs.Store(ctx), act)
+ mas, err := miner.Load(sm.cs.ActorStore(ctx), act)
if err != nil {
return address.Undef, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err)
}
@@ -74,7 +78,7 @@ func GetMinerWorkerRaw(ctx context.Context, sm *StateManager, st cid.Cid, maddr
return address.Undef, xerrors.Errorf("failed to load actor info: %w", err)
}
- return vm.ResolveToKeyAddr(state, sm.cs.Store(ctx), info.Worker)
+ return vm.ResolveToKeyAddr(state, sm.cs.ActorStore(ctx), info.Worker)
}
func GetPower(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (power.Claim, power.Claim, bool, error) {
@@ -87,7 +91,7 @@ func GetPowerRaw(ctx context.Context, sm *StateManager, st cid.Cid, maddr addres
return power.Claim{}, power.Claim{}, false, xerrors.Errorf("(get sset) failed to load power actor state: %w", err)
}
- pas, err := power.Load(sm.cs.Store(ctx), act)
+ pas, err := power.Load(sm.cs.ActorStore(ctx), act)
if err != nil {
return power.Claim{}, power.Claim{}, false, err
}
@@ -103,8 +107,7 @@ func GetPowerRaw(ctx context.Context, sm *StateManager, st cid.Cid, maddr addres
var found bool
mpow, found, err = pas.MinerPower(maddr)
if err != nil || !found {
- // TODO: return an error when not found?
- return power.Claim{}, power.Claim{}, false, err
+ return power.Claim{}, tpow, false, err
}
minpow, err = pas.MinerNominalPowerMeetsConsensusMinimum(maddr)
@@ -122,7 +125,7 @@ func PreCommitInfo(ctx context.Context, sm *StateManager, maddr address.Address,
return nil, xerrors.Errorf("(get sset) failed to load miner actor: %w", err)
}
- mas, err := miner.Load(sm.cs.Store(ctx), act)
+ mas, err := miner.Load(sm.cs.ActorStore(ctx), act)
if err != nil {
return nil, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err)
}
@@ -136,7 +139,7 @@ func MinerSectorInfo(ctx context.Context, sm *StateManager, maddr address.Addres
return nil, xerrors.Errorf("(get sset) failed to load miner actor: %w", err)
}
- mas, err := miner.Load(sm.cs.Store(ctx), act)
+ mas, err := miner.Load(sm.cs.ActorStore(ctx), act)
if err != nil {
return nil, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err)
}
@@ -144,46 +147,38 @@ func MinerSectorInfo(ctx context.Context, sm *StateManager, maddr address.Addres
return mas.GetSector(sid)
}
-func GetMinerSectorSet(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address, snos *bitfield.BitField) ([]*miner.SectorOnChainInfo, error) {
- act, err := sm.LoadActor(ctx, maddr, ts)
- if err != nil {
- return nil, xerrors.Errorf("(get sset) failed to load miner actor: %w", err)
- }
-
- mas, err := miner.Load(sm.cs.Store(ctx), act)
- if err != nil {
- return nil, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err)
- }
-
- return mas.LoadSectors(snos)
-}
-
-func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]builtin.SectorInfo, error) {
+func GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]builtin.SectorInfo, error) {
act, err := sm.LoadActorRaw(ctx, maddr, st)
if err != nil {
return nil, xerrors.Errorf("failed to load miner actor: %w", err)
}
- mas, err := miner.Load(sm.cs.Store(ctx), act)
+ mas, err := miner.Load(sm.cs.ActorStore(ctx), act)
if err != nil {
return nil, xerrors.Errorf("failed to load miner actor state: %w", err)
}
- // TODO (!!): Actor Update: Make this active sectors
-
- allSectors, err := miner.AllPartSectors(mas, miner.Partition.AllSectors)
- if err != nil {
- return nil, xerrors.Errorf("get all sectors: %w", err)
- }
+ var provingSectors bitfield.BitField
+ if nv < network.Version7 {
+ allSectors, err := miner.AllPartSectors(mas, miner.Partition.AllSectors)
+ if err != nil {
+ return nil, xerrors.Errorf("get all sectors: %w", err)
+ }
- faultySectors, err := miner.AllPartSectors(mas, miner.Partition.FaultySectors)
- if err != nil {
- return nil, xerrors.Errorf("get faulty sectors: %w", err)
- }
+ faultySectors, err := miner.AllPartSectors(mas, miner.Partition.FaultySectors)
+ if err != nil {
+ return nil, xerrors.Errorf("get faulty sectors: %w", err)
+ }
- provingSectors, err := bitfield.SubtractBitField(allSectors, faultySectors) // TODO: This is wrong, as it can contain faaults, change to just ActiveSectors in an upgrade
- if err != nil {
- return nil, xerrors.Errorf("calc proving sectors: %w", err)
+ provingSectors, err = bitfield.SubtractBitField(allSectors, faultySectors)
+ if err != nil {
+ return nil, xerrors.Errorf("calc proving sectors: %w", err)
+ }
+ } else {
+ provingSectors, err = miner.AllPartSectors(mas, miner.Partition.ActiveSectors)
+ if err != nil {
+ return nil, xerrors.Errorf("get active sectors sectors: %w", err)
+ }
}
numProvSect, err := provingSectors.Count()
@@ -201,22 +196,17 @@ func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *S
return nil, xerrors.Errorf("getting miner info: %w", err)
}
- spt, err := ffiwrapper.SealProofTypeFromSectorSize(info.SectorSize)
- if err != nil {
- return nil, xerrors.Errorf("getting seal proof type: %w", err)
- }
-
- wpt, err := spt.RegisteredWinningPoStProof()
+ mid, err := address.IDFromAddress(maddr)
if err != nil {
- return nil, xerrors.Errorf("getting window proof type: %w", err)
+ return nil, xerrors.Errorf("getting miner ID: %w", err)
}
- mid, err := address.IDFromAddress(maddr)
+ proofType, err := miner.WinningPoStProofTypeFromWindowPoStProofType(nv, info.WindowPoStProofType)
if err != nil {
- return nil, xerrors.Errorf("getting miner ID: %w", err)
+ return nil, xerrors.Errorf("determining winning post proof type: %w", err)
}
- ids, err := pv.GenerateWinningPoStSectorChallenge(ctx, wpt, abi.ActorID(mid), rand, numProvSect)
+ ids, err := pv.GenerateWinningPoStSectorChallenge(ctx, proofType, abi.ActorID(mid), rand, numProvSect)
if err != nil {
return nil, xerrors.Errorf("generating winning post challenges: %w", err)
}
@@ -246,7 +236,7 @@ func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *S
out := make([]builtin.SectorInfo, len(sectors))
for i, sinfo := range sectors {
out[i] = builtin.SectorInfo{
- SealProof: spt,
+ SealProof: sinfo.SealProof,
SectorNumber: sinfo.SectorNumber,
SealedCID: sinfo.SealedCID,
}
@@ -261,7 +251,7 @@ func GetMinerSlashed(ctx context.Context, sm *StateManager, ts *types.TipSet, ma
return false, xerrors.Errorf("failed to load power actor: %w", err)
}
- spas, err := power.Load(sm.cs.Store(ctx), act)
+ spas, err := power.Load(sm.cs.ActorStore(ctx), act)
if err != nil {
return false, xerrors.Errorf("failed to load power actor state: %w", err)
}
@@ -284,7 +274,7 @@ func GetStorageDeal(ctx context.Context, sm *StateManager, dealID abi.DealID, ts
return nil, xerrors.Errorf("failed to load market actor: %w", err)
}
- state, err := market.Load(sm.cs.Store(ctx), act)
+ state, err := market.Load(sm.cs.ActorStore(ctx), act)
if err != nil {
return nil, xerrors.Errorf("failed to load market actor state: %w", err)
}
@@ -299,7 +289,11 @@ func GetStorageDeal(ctx context.Context, sm *StateManager, dealID abi.DealID, ts
if err != nil {
return nil, err
} else if !found {
- return nil, xerrors.Errorf("deal %d not found", dealID)
+ return nil, xerrors.Errorf(
+ "deal %d not found "+
+ "- deal may not have completed sealing before deal proposal "+
+ "start epoch, or deal may have been slashed",
+ dealID)
}
states, err := state.States()
@@ -328,7 +322,7 @@ func ListMinerActors(ctx context.Context, sm *StateManager, ts *types.TipSet) ([
return nil, xerrors.Errorf("failed to load power actor: %w", err)
}
- powState, err := power.Load(sm.cs.Store(ctx), act)
+ powState, err := power.Load(sm.cs.ActorStore(ctx), act)
if err != nil {
return nil, xerrors.Errorf("failed to load power actor state: %w", err)
}
@@ -348,7 +342,7 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch,
for i := ts.Height(); i < height; i++ {
// handle state forks
- base, err = sm.handleStateForks(ctx, base, i, traceFunc(&trace), ts)
+ base, err = sm.handleStateForks(ctx, base, i, &InvocationTracer{trace: &trace}, ts)
if err != nil {
return cid.Undef, nil, xerrors.Errorf("error handling state forks: %w", err)
}
@@ -361,7 +355,7 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch,
StateBase: base,
Epoch: height,
Rand: r,
- Bstore: sm.cs.Blockstore(),
+ Bstore: sm.cs.StateBlockstore(),
Syscalls: sm.cs.VMSys(),
CircSupplyCalc: sm.GetVMCirculatingSupply,
NtwkVersion: sm.GetNtwkVersion,
@@ -482,7 +476,7 @@ func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcs beacon.Schedule
return nil, xerrors.Errorf("failed to load miner actor: %w", err)
}
- mas, err := miner.Load(sm.cs.Store(ctx), act)
+ mas, err := miner.Load(sm.cs.ActorStore(ctx), act)
if err != nil {
return nil, xerrors.Errorf("failed to load miner actor state: %w", err)
}
@@ -497,7 +491,9 @@ func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcs beacon.Schedule
return nil, xerrors.Errorf("failed to get randomness for winning post: %w", err)
}
- sectors, err := GetSectorsForWinningPoSt(ctx, pv, sm, lbst, maddr, prand)
+ nv := sm.GetNtwkVersion(ctx, ts.Height())
+
+ sectors, err := GetSectorsForWinningPoSt(ctx, nv, pv, sm, lbst, maddr, prand)
if err != nil {
return nil, xerrors.Errorf("getting winning post proving set: %w", err)
}
@@ -553,6 +549,9 @@ func init() {
var actors []rt.VMActor
actors = append(actors, exported0.BuiltinActors()...)
actors = append(actors, exported2.BuiltinActors()...)
+ actors = append(actors, exported3.BuiltinActors()...)
+ actors = append(actors, exported4.BuiltinActors()...)
+ actors = append(actors, exported5.BuiltinActors()...)
for _, actor := range actors {
exports := actor.Exports()
@@ -614,13 +613,21 @@ func GetReturnType(ctx context.Context, sm *StateManager, to address.Address, me
return reflect.New(m.Ret.Elem()).Interface().(cbg.CBORUnmarshaler), nil
}
+func GetParamType(actCode cid.Cid, method abi.MethodNum) (cbg.CBORUnmarshaler, error) {
+ m, found := MethodsMap[actCode][method]
+ if !found {
+ return nil, fmt.Errorf("unknown method %d for actor %s", method, actCode)
+ }
+ return reflect.New(m.Params.Elem()).Interface().(cbg.CBORUnmarshaler), nil
+}
+
func minerHasMinPower(ctx context.Context, sm *StateManager, addr address.Address, ts *types.TipSet) (bool, error) {
pact, err := sm.LoadActor(ctx, power.Address, ts)
if err != nil {
return false, xerrors.Errorf("loading power actor state: %w", err)
}
- ps, err := power.Load(sm.cs.Store(ctx), pact)
+ ps, err := power.Load(sm.cs.ActorStore(ctx), pact)
if err != nil {
return false, err
}
@@ -651,7 +658,7 @@ func MinerEligibleToMine(ctx context.Context, sm *StateManager, addr address.Add
return false, xerrors.Errorf("loading power actor state: %w", err)
}
- pstate, err := power.Load(sm.cs.Store(ctx), pact)
+ pstate, err := power.Load(sm.cs.ActorStore(ctx), pact)
if err != nil {
return false, err
}
@@ -661,7 +668,7 @@ func MinerEligibleToMine(ctx context.Context, sm *StateManager, addr address.Add
return false, xerrors.Errorf("loading miner actor state: %w", err)
}
- mstate, err := miner.Load(sm.cs.Store(ctx), mact)
+ mstate, err := miner.Load(sm.cs.ActorStore(ctx), mact)
if err != nil {
return false, err
}
@@ -693,7 +700,7 @@ func MinerEligibleToMine(ctx context.Context, sm *StateManager, addr address.Add
}
func CheckTotalFIL(ctx context.Context, sm *StateManager, ts *types.TipSet) (abi.TokenAmount, error) {
- str, err := state.LoadStateTree(sm.ChainStore().Store(ctx), ts.ParentState())
+ str, err := state.LoadStateTree(sm.ChainStore().ActorStore(ctx), ts.ParentState())
if err != nil {
return abi.TokenAmount{}, err
}
diff --git a/chain/store/checkpoint_test.go b/chain/store/checkpoint_test.go
new file mode 100644
index 00000000000..81bbab6ea43
--- /dev/null
+++ b/chain/store/checkpoint_test.go
@@ -0,0 +1,89 @@
+package store_test
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/filecoin-project/lotus/chain/gen"
+)
+
+func TestChainCheckpoint(t *testing.T) {
+ cg, err := gen.NewGenerator()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Let the first miner mine some blocks.
+ last := cg.CurTipset.TipSet()
+ for i := 0; i < 4; i++ {
+ ts, err := cg.NextTipSetFromMiners(last, cg.Miners[:1], 0)
+ require.NoError(t, err)
+
+ last = ts.TipSet.TipSet()
+ }
+
+ cs := cg.ChainStore()
+
+ checkpoint := last
+ checkpointParents, err := cs.GetTipSetFromKey(checkpoint.Parents())
+ require.NoError(t, err)
+
+ // Set the head to the block before the checkpoint.
+ err = cs.SetHead(checkpointParents)
+ require.NoError(t, err)
+
+ // Verify it worked.
+ head := cs.GetHeaviestTipSet()
+ require.True(t, head.Equals(checkpointParents))
+
+ // Try to set the checkpoint in the future, it should fail.
+ err = cs.SetCheckpoint(checkpoint)
+ require.Error(t, err)
+
+ // Then move the head back.
+ err = cs.SetHead(checkpoint)
+ require.NoError(t, err)
+
+ // Verify it worked.
+ head = cs.GetHeaviestTipSet()
+ require.True(t, head.Equals(checkpoint))
+
+ // And checkpoint it.
+ err = cs.SetCheckpoint(checkpoint)
+ require.NoError(t, err)
+
+ // Let the second miner miner mine a fork
+ last = checkpointParents
+ for i := 0; i < 4; i++ {
+ ts, err := cg.NextTipSetFromMiners(last, cg.Miners[1:], 0)
+ require.NoError(t, err)
+
+ last = ts.TipSet.TipSet()
+ }
+
+ // See if the chain will take the fork, it shouldn't.
+ err = cs.MaybeTakeHeavierTipSet(context.Background(), last)
+ require.NoError(t, err)
+ head = cs.GetHeaviestTipSet()
+ require.True(t, head.Equals(checkpoint))
+
+ // Remove the checkpoint.
+ err = cs.RemoveCheckpoint()
+ require.NoError(t, err)
+
+ // Now switch to the other fork.
+ err = cs.MaybeTakeHeavierTipSet(context.Background(), last)
+ require.NoError(t, err)
+ head = cs.GetHeaviestTipSet()
+ require.True(t, head.Equals(last))
+
+ // Setting a checkpoint on the other fork should fail.
+ err = cs.SetCheckpoint(checkpoint)
+ require.Error(t, err)
+
+ // Setting a checkpoint on this fork should succeed.
+ err = cs.SetCheckpoint(checkpointParents)
+ require.NoError(t, err)
+}
diff --git a/chain/store/coalescer.go b/chain/store/coalescer.go
new file mode 100644
index 00000000000..a6d066bcaab
--- /dev/null
+++ b/chain/store/coalescer.go
@@ -0,0 +1,213 @@
+package store
+
+import (
+ "context"
+ "time"
+
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+// WrapHeadChangeCoalescer wraps a ReorgNotifee with a head change coalescer.
+// minDelay is the minimum coalesce delay; when a head change is first received, the coalescer will
+// wait for that long to coalesce more head changes.
+// maxDelay is the maximum coalesce delay; the coalescer will not delay delivery of a head change
+// more than that.
+// mergeInterval is the interval that triggers additional coalesce delay; if the last head change was
+// within the merge interval when the coalesce timer fires, then the coalesce time is extended
+// by min delay and up to max delay total.
+func WrapHeadChangeCoalescer(fn ReorgNotifee, minDelay, maxDelay, mergeInterval time.Duration) ReorgNotifee {
+ c := NewHeadChangeCoalescer(fn, minDelay, maxDelay, mergeInterval)
+ return c.HeadChange
+}
+
+// HeadChangeCoalescer is a stateful reorg notifee which coalesces incoming head changes
+// with pending head changes to reduce state computations from head change notifications.
+type HeadChangeCoalescer struct {
+ notify ReorgNotifee
+
+ ctx context.Context
+ cancel func()
+
+ eventq chan headChange
+
+ revert []*types.TipSet
+ apply []*types.TipSet
+}
+
+type headChange struct {
+ revert, apply []*types.TipSet
+}
+
+// NewHeadChangeCoalescer creates a HeadChangeCoalescer.
+func NewHeadChangeCoalescer(fn ReorgNotifee, minDelay, maxDelay, mergeInterval time.Duration) *HeadChangeCoalescer {
+ ctx, cancel := context.WithCancel(context.Background())
+ c := &HeadChangeCoalescer{
+ notify: fn,
+ ctx: ctx,
+ cancel: cancel,
+ eventq: make(chan headChange),
+ }
+
+ go c.background(minDelay, maxDelay, mergeInterval)
+
+ return c
+}
+
+// HeadChange is the ReorgNotifee callback for the stateful coalescer; it receives an incoming
+// head change and schedules dispatch of a coalesced head change in the background.
+func (c *HeadChangeCoalescer) HeadChange(revert, apply []*types.TipSet) error {
+ select {
+ case c.eventq <- headChange{revert: revert, apply: apply}:
+ return nil
+ case <-c.ctx.Done():
+ return c.ctx.Err()
+ }
+}
+
+// Close closes the coalescer and cancels the background dispatch goroutine.
+// Any further notification will result in an error.
+func (c *HeadChangeCoalescer) Close() error {
+ select {
+ case <-c.ctx.Done():
+ default:
+ c.cancel()
+ }
+
+ return nil
+}
+
+// Implementation details
+
+func (c *HeadChangeCoalescer) background(minDelay, maxDelay, mergeInterval time.Duration) {
+ var timerC <-chan time.Time
+ var first, last time.Time
+
+ for {
+ select {
+ case evt := <-c.eventq:
+ c.coalesce(evt.revert, evt.apply)
+
+ now := time.Now()
+ last = now
+ if first.IsZero() {
+ first = now
+ }
+
+ if timerC == nil {
+ timerC = time.After(minDelay)
+ }
+
+ case now := <-timerC:
+ sinceFirst := now.Sub(first)
+ sinceLast := now.Sub(last)
+
+ if sinceLast < mergeInterval && sinceFirst < maxDelay {
+ // coalesce some more
+ maxWait := maxDelay - sinceFirst
+ wait := minDelay
+ if maxWait < wait {
+ wait = maxWait
+ }
+
+ timerC = time.After(wait)
+ } else {
+ // dispatch
+ c.dispatch()
+
+ first = time.Time{}
+ last = time.Time{}
+ timerC = nil
+ }
+
+ case <-c.ctx.Done():
+ if c.revert != nil || c.apply != nil {
+ c.dispatch()
+ }
+ return
+ }
+ }
+}
+
+func (c *HeadChangeCoalescer) coalesce(revert, apply []*types.TipSet) {
+ // newly reverted tipsets cancel out with pending applys.
+ // similarly, newly applied tipsets cancel out with pending reverts.
+
+ // pending tipsets
+ pendRevert := make(map[types.TipSetKey]struct{}, len(c.revert))
+ for _, ts := range c.revert {
+ pendRevert[ts.Key()] = struct{}{}
+ }
+
+ pendApply := make(map[types.TipSetKey]struct{}, len(c.apply))
+ for _, ts := range c.apply {
+ pendApply[ts.Key()] = struct{}{}
+ }
+
+ // incoming tipsets
+ reverting := make(map[types.TipSetKey]struct{}, len(revert))
+ for _, ts := range revert {
+ reverting[ts.Key()] = struct{}{}
+ }
+
+ applying := make(map[types.TipSetKey]struct{}, len(apply))
+ for _, ts := range apply {
+ applying[ts.Key()] = struct{}{}
+ }
+
+ // coalesced revert set
+ // - pending reverts are cancelled by incoming applys
+ // - incoming reverts are cancelled by pending applys
+ newRevert := c.merge(c.revert, revert, pendApply, applying)
+
+ // coalesced apply set
+ // - pending applys are cancelled by incoming reverts
+ // - incoming applys are cancelled by pending reverts
+ newApply := c.merge(c.apply, apply, pendRevert, reverting)
+
+ // commit the coalesced sets
+ c.revert = newRevert
+ c.apply = newApply
+}
+
+func (c *HeadChangeCoalescer) merge(pend, incoming []*types.TipSet, cancel1, cancel2 map[types.TipSetKey]struct{}) []*types.TipSet {
+ result := make([]*types.TipSet, 0, len(pend)+len(incoming))
+ for _, ts := range pend {
+ _, cancel := cancel1[ts.Key()]
+ if cancel {
+ continue
+ }
+
+ _, cancel = cancel2[ts.Key()]
+ if cancel {
+ continue
+ }
+
+ result = append(result, ts)
+ }
+
+ for _, ts := range incoming {
+ _, cancel := cancel1[ts.Key()]
+ if cancel {
+ continue
+ }
+
+ _, cancel = cancel2[ts.Key()]
+ if cancel {
+ continue
+ }
+
+ result = append(result, ts)
+ }
+
+ return result
+}
+
+func (c *HeadChangeCoalescer) dispatch() {
+ err := c.notify(c.revert, c.apply)
+ if err != nil {
+ log.Errorf("error dispatching coalesced head change notification: %s", err)
+ }
+
+ c.revert = nil
+ c.apply = nil
+}
diff --git a/chain/store/coalescer_test.go b/chain/store/coalescer_test.go
new file mode 100644
index 00000000000..d462851086e
--- /dev/null
+++ b/chain/store/coalescer_test.go
@@ -0,0 +1,72 @@
+package store
+
+import (
+ "testing"
+ "time"
+
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/types/mock"
+)
+
+func TestHeadChangeCoalescer(t *testing.T) {
+ notif := make(chan headChange, 1)
+ c := NewHeadChangeCoalescer(func(revert, apply []*types.TipSet) error {
+ notif <- headChange{apply: apply, revert: revert}
+ return nil
+ },
+ 100*time.Millisecond,
+ 200*time.Millisecond,
+ 10*time.Millisecond,
+ )
+ defer c.Close() //nolint
+
+ b0 := mock.MkBlock(nil, 0, 0)
+ root := mock.TipSet(b0)
+ bA := mock.MkBlock(root, 1, 1)
+ tA := mock.TipSet(bA)
+ bB := mock.MkBlock(root, 1, 2)
+ tB := mock.TipSet(bB)
+ tAB := mock.TipSet(bA, bB)
+ bC := mock.MkBlock(root, 1, 3)
+ tABC := mock.TipSet(bA, bB, bC)
+ bD := mock.MkBlock(root, 1, 4)
+ tABCD := mock.TipSet(bA, bB, bC, bD)
+ bE := mock.MkBlock(root, 1, 5)
+ tABCDE := mock.TipSet(bA, bB, bC, bD, bE)
+
+ c.HeadChange(nil, []*types.TipSet{tA}) //nolint
+ c.HeadChange(nil, []*types.TipSet{tB}) //nolint
+ c.HeadChange([]*types.TipSet{tA, tB}, []*types.TipSet{tAB}) //nolint
+ c.HeadChange([]*types.TipSet{tAB}, []*types.TipSet{tABC}) //nolint
+
+ change := <-notif
+
+ if len(change.revert) != 0 {
+ t.Fatalf("expected empty revert set but got %d elements", len(change.revert))
+ }
+ if len(change.apply) != 1 {
+ t.Fatalf("expected single element apply set but got %d elements", len(change.apply))
+ }
+ if change.apply[0] != tABC {
+ t.Fatalf("expected to apply tABC")
+ }
+
+ c.HeadChange([]*types.TipSet{tABC}, []*types.TipSet{tABCD}) //nolint
+ c.HeadChange([]*types.TipSet{tABCD}, []*types.TipSet{tABCDE}) //nolint
+
+ change = <-notif
+
+ if len(change.revert) != 1 {
+ t.Fatalf("expected single element revert set but got %d elements", len(change.revert))
+ }
+ if change.revert[0] != tABC {
+ t.Fatalf("expected to revert tABC")
+ }
+ if len(change.apply) != 1 {
+ t.Fatalf("expected single element apply set but got %d elements", len(change.apply))
+ }
+ if change.apply[0] != tABCDE {
+ t.Fatalf("expected to revert tABC")
+ }
+
+}
diff --git a/chain/store/index.go b/chain/store/index.go
index a9da994af9d..324fb7a633a 100644
--- a/chain/store/index.go
+++ b/chain/store/index.go
@@ -107,6 +107,9 @@ func (ci *ChainIndex) fillCache(tsk types.TipSetKey) (*lbEntry, error) {
}
rheight -= ci.skipLength
+ if rheight < 0 {
+ rheight = 0
+ }
var skipTarget *types.TipSet
if parent.Height() < rheight {
diff --git a/chain/store/index_test.go b/chain/store/index_test.go
index 5283d10dc3a..4470719016c 100644
--- a/chain/store/index_test.go
+++ b/chain/store/index_test.go
@@ -6,10 +6,10 @@ import (
"testing"
"github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types/mock"
- "github.com/filecoin-project/lotus/lib/blockstore"
datastore "github.com/ipfs/go-datastore"
syncds "github.com/ipfs/go-datastore/sync"
"github.com/stretchr/testify/assert"
@@ -30,8 +30,9 @@ func TestIndexSeeks(t *testing.T) {
ctx := context.TODO()
- nbs := blockstore.NewTemporarySync()
- cs := store.NewChainStore(nbs, syncds.MutexWrap(datastore.NewMapDatastore()), nil, nil)
+ nbs := blockstore.NewMemorySync()
+ cs := store.NewChainStore(nbs, nbs, syncds.MutexWrap(datastore.NewMapDatastore()), nil, nil)
+ defer cs.Close() //nolint:errcheck
_, err = cs.Import(bytes.NewReader(gencar))
if err != nil {
diff --git a/chain/store/store.go b/chain/store/store.go
index 00a78500ef9..523726863f1 100644
--- a/chain/store/store.go
+++ b/chain/store/store.go
@@ -5,11 +5,15 @@ import (
"context"
"encoding/binary"
"encoding/json"
+ "errors"
"io"
"os"
"strconv"
"strings"
"sync"
+ "time"
+
+ "github.com/filecoin-project/lotus/chain/state"
"golang.org/x/sync/errgroup"
@@ -22,12 +26,12 @@ import (
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/lotus/api"
+ bstore "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/journal"
- bstore "github.com/filecoin-project/lotus/lib/blockstore"
"github.com/filecoin-project/lotus/metrics"
"go.opencensus.io/stats"
@@ -44,21 +48,26 @@ import (
"github.com/ipfs/go-datastore/query"
cbor "github.com/ipfs/go-ipld-cbor"
logging "github.com/ipfs/go-log/v2"
- car "github.com/ipld/go-car"
+ "github.com/ipld/go-car"
carutil "github.com/ipld/go-car/util"
cbg "github.com/whyrusleeping/cbor-gen"
- pubsub "github.com/whyrusleeping/pubsub"
+ "github.com/whyrusleeping/pubsub"
"golang.org/x/xerrors"
)
var log = logging.Logger("chainstore")
-var chainHeadKey = dstore.NewKey("head")
-var blockValidationCacheKeyPrefix = dstore.NewKey("blockValidation")
+var (
+ chainHeadKey = dstore.NewKey("head")
+ checkpointKey = dstore.NewKey("/chain/checks")
+ blockValidationCacheKeyPrefix = dstore.NewKey("blockValidation")
+)
var DefaultTipSetCacheSize = 8192
var DefaultMsgMetaCacheSize = 2048
+var ErrNotifeeDone = errors.New("notifee is done and should be removed")
+
func init() {
if s := os.Getenv("LOTUS_CHAIN_TIPSET_CACHE"); s != "" {
tscs, err := strconv.Atoi(s)
@@ -78,7 +87,7 @@ func init() {
}
// ReorgNotifee represents a callback that gets called upon reorgs.
-type ReorgNotifee func(rev, app []*types.TipSet) error
+type ReorgNotifee = func(rev, app []*types.TipSet) error
// Journal event types.
const (
@@ -104,11 +113,15 @@ type HeadChangeEvt struct {
// 1. a tipset cache
// 2. a block => messages references cache.
type ChainStore struct {
- bs bstore.Blockstore
- ds dstore.Batching
+ chainBlockstore bstore.Blockstore
+ stateBlockstore bstore.Blockstore
+ metadataDs dstore.Batching
- heaviestLk sync.Mutex
+ chainLocalBlockstore bstore.Blockstore
+
+ heaviestLk sync.RWMutex
heaviest *types.TipSet
+ checkpoint *types.TipSet
bestTips *pubsub.PubSub
pubLk sync.Mutex
@@ -128,23 +141,34 @@ type ChainStore struct {
evtTypes [1]journal.EventType
journal journal.Journal
+
+ cancelFn context.CancelFunc
+ wg sync.WaitGroup
}
-func NewChainStore(bs bstore.Blockstore, ds dstore.Batching, vmcalls vm.SyscallBuilder, j journal.Journal) *ChainStore {
+func NewChainStore(chainBs bstore.Blockstore, stateBs bstore.Blockstore, ds dstore.Batching, vmcalls vm.SyscallBuilder, j journal.Journal) *ChainStore {
c, _ := lru.NewARC(DefaultMsgMetaCacheSize)
tsc, _ := lru.NewARC(DefaultTipSetCacheSize)
if j == nil {
j = journal.NilJournal()
}
+
+ ctx, cancel := context.WithCancel(context.Background())
+ // unwraps the fallback store in case one is configured.
+ // some methods _need_ to operate on a local blockstore only.
+ localbs, _ := bstore.UnwrapFallbackStore(chainBs)
cs := &ChainStore{
- bs: bs,
- ds: ds,
- bestTips: pubsub.New(64),
- tipsets: make(map[abi.ChainEpoch][]cid.Cid),
- mmCache: c,
- tsCache: tsc,
- vmcalls: vmcalls,
- journal: j,
+ chainBlockstore: chainBs,
+ stateBlockstore: stateBs,
+ chainLocalBlockstore: localbs,
+ metadataDs: ds,
+ bestTips: pubsub.New(64),
+ tipsets: make(map[abi.ChainEpoch][]cid.Cid),
+ mmCache: c,
+ tsCache: tsc,
+ vmcalls: vmcalls,
+ cancelFn: cancel,
+ journal: j,
}
cs.evtTypes = [1]journal.EventType{
@@ -179,21 +203,35 @@ func NewChainStore(bs bstore.Blockstore, ds dstore.Batching, vmcalls vm.SyscallB
}
hcmetric := func(rev, app []*types.TipSet) error {
- ctx := context.Background()
for _, r := range app {
- stats.Record(ctx, metrics.ChainNodeHeight.M(int64(r.Height())))
+ stats.Record(context.Background(), metrics.ChainNodeHeight.M(int64(r.Height())))
}
return nil
}
cs.reorgNotifeeCh = make(chan ReorgNotifee)
- cs.reorgCh = cs.reorgWorker(context.TODO(), []ReorgNotifee{hcnf, hcmetric})
+ cs.reorgCh = cs.reorgWorker(ctx, []ReorgNotifee{hcnf, hcmetric})
return cs
}
+func (cs *ChainStore) Close() error {
+ cs.cancelFn()
+ cs.wg.Wait()
+ return nil
+}
+
func (cs *ChainStore) Load() error {
- head, err := cs.ds.Get(chainHeadKey)
+ if err := cs.loadHead(); err != nil {
+ return err
+ }
+ if err := cs.loadCheckpoint(); err != nil {
+ return err
+ }
+ return nil
+}
+func (cs *ChainStore) loadHead() error {
+ head, err := cs.metadataDs.Get(chainHeadKey)
if err == dstore.ErrNotFound {
log.Warn("no previous chain state found")
return nil
@@ -217,13 +255,38 @@ func (cs *ChainStore) Load() error {
return nil
}
+func (cs *ChainStore) loadCheckpoint() error {
+ tskBytes, err := cs.metadataDs.Get(checkpointKey)
+ if err == dstore.ErrNotFound {
+ return nil
+ }
+ if err != nil {
+ return xerrors.Errorf("failed to load checkpoint from datastore: %w", err)
+ }
+
+ var tsk types.TipSetKey
+ err = json.Unmarshal(tskBytes, &tsk)
+ if err != nil {
+ return err
+ }
+
+ ts, err := cs.LoadTipSet(tsk)
+ if err != nil {
+ return xerrors.Errorf("loading tipset: %w", err)
+ }
+
+ cs.checkpoint = ts
+
+ return nil
+}
+
func (cs *ChainStore) writeHead(ts *types.TipSet) error {
data, err := json.Marshal(ts.Cids())
if err != nil {
return xerrors.Errorf("failed to marshal tipset: %w", err)
}
- if err := cs.ds.Put(chainHeadKey, data); err != nil {
+ if err := cs.metadataDs.Put(chainHeadKey, data); err != nil {
return xerrors.Errorf("failed to write chain head to datastore: %w", err)
}
@@ -259,7 +322,7 @@ func (cs *ChainStore) SubHeadChanges(ctx context.Context) chan []*api.HeadChange
log.Warn("chain head sub exit loop")
return
}
- if len(out) > 0 {
+ if len(out) > 5 {
log.Warnf("head change sub is slow, has %d buffered entries", len(out))
}
select {
@@ -283,13 +346,13 @@ func (cs *ChainStore) SubscribeHeadChanges(f ReorgNotifee) {
func (cs *ChainStore) IsBlockValidated(ctx context.Context, blkid cid.Cid) (bool, error) {
key := blockValidationCacheKeyPrefix.Instance(blkid.String())
- return cs.ds.Has(key)
+ return cs.metadataDs.Has(key)
}
func (cs *ChainStore) MarkBlockAsValidated(ctx context.Context, blkid cid.Cid) error {
key := blockValidationCacheKeyPrefix.Instance(blkid.String())
- if err := cs.ds.Put(key, []byte{0}); err != nil {
+ if err := cs.metadataDs.Put(key, []byte{0}); err != nil {
return xerrors.Errorf("cache block validation: %w", err)
}
@@ -299,7 +362,7 @@ func (cs *ChainStore) MarkBlockAsValidated(ctx context.Context, blkid cid.Cid) e
func (cs *ChainStore) UnmarkBlockAsValidated(ctx context.Context, blkid cid.Cid) error {
key := blockValidationCacheKeyPrefix.Instance(blkid.String())
- if err := cs.ds.Delete(key); err != nil {
+ if err := cs.metadataDs.Delete(key); err != nil {
return xerrors.Errorf("removing from valid block cache: %w", err)
}
@@ -316,7 +379,7 @@ func (cs *ChainStore) SetGenesis(b *types.BlockHeader) error {
return err
}
- return cs.ds.Put(dstore.NewKey("0"), b.Cid().Bytes())
+ return cs.metadataDs.Put(dstore.NewKey("0"), b.Cid().Bytes())
}
func (cs *ChainStore) PutTipSet(ctx context.Context, ts *types.TipSet) error {
@@ -340,9 +403,22 @@ func (cs *ChainStore) PutTipSet(ctx context.Context, ts *types.TipSet) error {
// MaybeTakeHeavierTipSet evaluates the incoming tipset and locks it in our
// internal state as our new head, if and only if it is heavier than the current
-// head.
+// head and does not exceed the maximum fork length.
func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipSet) error {
- cs.heaviestLk.Lock()
+ for {
+ cs.heaviestLk.Lock()
+ if len(cs.reorgCh) < reorgChBuf/2 {
+ break
+ }
+ cs.heaviestLk.Unlock()
+ log.Errorf("reorg channel is heavily backlogged, waiting a bit before trying to take process new tipsets")
+ select {
+ case <-time.After(time.Second / 2):
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+
defer cs.heaviestLk.Unlock()
w, err := cs.Weight(ctx, ts)
if err != nil {
@@ -357,22 +433,126 @@ func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipS
// TODO: don't do this for initial sync. Now that we don't have a
// difference between 'bootstrap sync' and 'caught up' sync, we need
// some other heuristic.
+
+ exceeds, err := cs.exceedsForkLength(cs.heaviest, ts)
+ if err != nil {
+ return err
+ }
+ if exceeds {
+ return nil
+ }
+
return cs.takeHeaviestTipSet(ctx, ts)
+ } else if w.Equals(heaviestW) && !ts.Equals(cs.heaviest) {
+ log.Errorw("weight draw", "currTs", cs.heaviest, "ts", ts)
}
return nil
}
+// Check if the two tipsets have a fork length above `ForkLengthThreshold`.
+// `synced` is the head of the chain we are currently synced to and `external`
+// is the incoming tipset potentially belonging to a forked chain. It assumes
+// the external chain has already been validated and available in the ChainStore.
+// The "fast forward" case is covered in this logic as a valid fork of length 0.
+//
+// FIXME: We may want to replace some of the logic in `syncFork()` with this.
+// `syncFork()` counts the length on both sides of the fork at the moment (we
+// need to settle on that) but here we just enforce it on the `synced` side.
+func (cs *ChainStore) exceedsForkLength(synced, external *types.TipSet) (bool, error) {
+ if synced == nil || external == nil {
+ // FIXME: If `cs.heaviest` is nil we should just bypass the entire
+ // `MaybeTakeHeavierTipSet` logic (instead of each of the called
+ // functions having to handle the nil case on their own).
+ return false, nil
+ }
+
+ var err error
+ // `forkLength`: number of tipsets we need to walk back from the our `synced`
+ // chain to the common ancestor with the new `external` head in order to
+ // adopt the fork.
+ for forkLength := 0; forkLength < int(build.ForkLengthThreshold); forkLength++ {
+ // First walk back as many tipsets in the external chain to match the
+ // `synced` height to compare them. If we go past the `synced` height
+ // the subsequent match will fail but it will still be useful to get
+ // closer to the `synced` head parent's height in the next loop.
+ for external.Height() > synced.Height() {
+ if external.Height() == 0 {
+ // We reached the genesis of the external chain without a match;
+ // this is considered a fork outside the allowed limit (of "infinite"
+ // length).
+ return true, nil
+ }
+ external, err = cs.LoadTipSet(external.Parents())
+ if err != nil {
+ return false, xerrors.Errorf("failed to load parent tipset in external chain: %w", err)
+ }
+ }
+
+ // Now check if we arrived at the common ancestor.
+ if synced.Equals(external) {
+ return false, nil
+ }
+
+ // Now check to see if we've walked back to the checkpoint.
+ if synced.Equals(cs.checkpoint) {
+ return true, nil
+ }
+
+ // If we didn't, go back *one* tipset on the `synced` side (incrementing
+ // the `forkLength`).
+ if synced.Height() == 0 {
+ // Same check as the `external` side, if we reach the start (genesis)
+ // there is no common ancestor.
+ return true, nil
+ }
+ synced, err = cs.LoadTipSet(synced.Parents())
+ if err != nil {
+ return false, xerrors.Errorf("failed to load parent tipset in synced chain: %w", err)
+ }
+ }
+
+ // We traversed the fork length allowed without finding a common ancestor.
+ return true, nil
+}
+
+// ForceHeadSilent forces a chain head tipset without triggering a reorg
+// operation.
+//
+// CAUTION: Use it only for testing, such as to teleport the chain to a
+// particular tipset to carry out a benchmark, verification, etc. on a chain
+// segment.
+func (cs *ChainStore) ForceHeadSilent(_ context.Context, ts *types.TipSet) error {
+ log.Warnf("(!!!) forcing a new head silently; new head: %s", ts)
+
+ cs.heaviestLk.Lock()
+ defer cs.heaviestLk.Unlock()
+ if err := cs.removeCheckpoint(); err != nil {
+ return err
+ }
+ cs.heaviest = ts
+
+ err := cs.writeHead(ts)
+ if err != nil {
+ err = xerrors.Errorf("failed to write chain head: %s", err)
+ }
+ return err
+}
+
type reorg struct {
old *types.TipSet
new *types.TipSet
}
+const reorgChBuf = 32
+
func (cs *ChainStore) reorgWorker(ctx context.Context, initialNotifees []ReorgNotifee) chan<- reorg {
- out := make(chan reorg, 32)
+ out := make(chan reorg, reorgChBuf)
notifees := make([]ReorgNotifee, len(initialNotifees))
copy(notifees, initialNotifees)
+ cs.wg.Add(1)
go func() {
+ defer cs.wg.Done()
defer log.Warn("reorgWorker quit")
for {
@@ -404,11 +584,36 @@ func (cs *ChainStore) reorgWorker(ctx context.Context, initialNotifees []ReorgNo
apply[i], apply[opp] = apply[opp], apply[i]
}
- for _, hcf := range notifees {
- if err := hcf(revert, apply); err != nil {
+ var toremove map[int]struct{}
+ for i, hcf := range notifees {
+ err := hcf(revert, apply)
+
+ switch err {
+ case nil:
+
+ case ErrNotifeeDone:
+ if toremove == nil {
+ toremove = make(map[int]struct{})
+ }
+ toremove[i] = struct{}{}
+
+ default:
log.Error("head change func errored (BAD): ", err)
}
}
+
+ if len(toremove) > 0 {
+ newNotifees := make([]ReorgNotifee, 0, len(notifees)-len(toremove))
+ for i, hcf := range notifees {
+ _, remove := toremove[i]
+ if remove {
+ continue
+ }
+ newNotifees = append(newNotifees, hcf)
+ }
+ notifees = newNotifees
+ }
+
case <-ctx.Done():
return
}
@@ -452,9 +657,13 @@ func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet)
// FlushValidationCache removes all results of block validation from the
// chain metadata store. Usually the first step after a new chain import.
func (cs *ChainStore) FlushValidationCache() error {
+ return FlushValidationCache(cs.metadataDs)
+}
+
+func FlushValidationCache(ds datastore.Batching) error {
log.Infof("clearing block validation cache...")
- dsWalk, err := cs.ds.Query(query.Query{
+ dsWalk, err := ds.Query(query.Query{
// Potential TODO: the validation cache is not a namespace on its own
// but is rather constructed as prefixed-key `foo:bar` via .Instance(), which
// in turn does not work with the filter, which can match only on `foo/bar`
@@ -474,7 +683,7 @@ func (cs *ChainStore) FlushValidationCache() error {
return xerrors.Errorf("failed to run key listing query: %w", err)
}
- batch, err := cs.ds.Batch()
+ batch, err := ds.Batch()
if err != nil {
return xerrors.Errorf("failed to open a DS batch: %w", err)
}
@@ -497,17 +706,84 @@ func (cs *ChainStore) FlushValidationCache() error {
}
// SetHead sets the chainstores current 'best' head node.
-// This should only be called if something is broken and needs fixing
+// This should only be called if something is broken and needs fixing.
+//
+// This function will bypass and remove any checkpoints.
func (cs *ChainStore) SetHead(ts *types.TipSet) error {
cs.heaviestLk.Lock()
defer cs.heaviestLk.Unlock()
+ if err := cs.removeCheckpoint(); err != nil {
+ return err
+ }
return cs.takeHeaviestTipSet(context.TODO(), ts)
}
+// RemoveCheckpoint removes the current checkpoint.
+func (cs *ChainStore) RemoveCheckpoint() error {
+ cs.heaviestLk.Lock()
+ defer cs.heaviestLk.Unlock()
+ return cs.removeCheckpoint()
+}
+
+func (cs *ChainStore) removeCheckpoint() error {
+ if err := cs.metadataDs.Delete(checkpointKey); err != nil {
+ return err
+ }
+ cs.checkpoint = nil
+ return nil
+}
+
+// SetCheckpoint will set a checkpoint past which the chainstore will not allow forks.
+//
+// NOTE: Checkpoints cannot be set beyond ForkLengthThreshold epochs in the past.
+func (cs *ChainStore) SetCheckpoint(ts *types.TipSet) error {
+ tskBytes, err := json.Marshal(ts.Key())
+ if err != nil {
+ return err
+ }
+
+ cs.heaviestLk.Lock()
+ defer cs.heaviestLk.Unlock()
+
+ if ts.Height() > cs.heaviest.Height() {
+ return xerrors.Errorf("cannot set a checkpoint in the future")
+ }
+
+ // Otherwise, this operation could get _very_ expensive.
+ if cs.heaviest.Height()-ts.Height() > build.ForkLengthThreshold {
+ return xerrors.Errorf("cannot set a checkpoint before the fork threshold")
+ }
+
+ if !ts.Equals(cs.heaviest) {
+ anc, err := cs.IsAncestorOf(ts, cs.heaviest)
+ if err != nil {
+ return xerrors.Errorf("cannot determine whether checkpoint tipset is in main-chain: %w", err)
+ }
+
+ if !anc {
+ return xerrors.Errorf("cannot mark tipset as checkpoint, since it isn't in the main-chain: %w", err)
+ }
+ }
+ err = cs.metadataDs.Put(checkpointKey, tskBytes)
+ if err != nil {
+ return err
+ }
+
+ cs.checkpoint = ts
+ return nil
+}
+
+func (cs *ChainStore) GetCheckpoint() *types.TipSet {
+ cs.heaviestLk.RLock()
+ chkpt := cs.checkpoint
+ cs.heaviestLk.RUnlock()
+ return chkpt
+}
+
// Contains returns whether our BlockStore has all blocks in the supplied TipSet.
func (cs *ChainStore) Contains(ts *types.TipSet) (bool, error) {
for _, c := range ts.Cids() {
- has, err := cs.bs.Has(c)
+ has, err := cs.chainBlockstore.Has(c)
if err != nil {
return false, err
}
@@ -522,12 +798,12 @@ func (cs *ChainStore) Contains(ts *types.TipSet) (bool, error) {
// GetBlock fetches a BlockHeader with the supplied CID. It returns
// blockstore.ErrNotFound if the block was not found in the BlockStore.
func (cs *ChainStore) GetBlock(c cid.Cid) (*types.BlockHeader, error) {
- sb, err := cs.bs.Get(c)
- if err != nil {
- return nil, err
- }
-
- return types.DecodeBlock(sb.RawData())
+ var blk *types.BlockHeader
+ err := cs.chainLocalBlockstore.View(c, func(b []byte) (err error) {
+ blk, err = types.DecodeBlock(b)
+ return err
+ })
+ return blk, err
}
func (cs *ChainStore) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) {
@@ -595,6 +871,14 @@ func (cs *ChainStore) NearestCommonAncestor(a, b *types.TipSet) (*types.TipSet,
return cs.LoadTipSet(l[len(l)-1].Parents())
}
+// ReorgOps takes two tipsets (which can be at different heights), and walks
+// their corresponding chains backwards one step at a time until we find
+// a common ancestor. It then returns the respective chain segments that fork
+// from the identified ancestor, in reverse order, where the first element of
+// each slice is the supplied tipset, and the last element is the common
+// ancestor.
+//
+// If an error happens along the way, we return the error with nil slices.
func (cs *ChainStore) ReorgOps(a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) {
return ReorgOps(cs.LoadTipSet, a, b)
}
@@ -630,10 +914,11 @@ func ReorgOps(lts func(types.TipSetKey) (*types.TipSet, error), a, b *types.TipS
}
// GetHeaviestTipSet returns the current heaviest tipset known (i.e. our head).
-func (cs *ChainStore) GetHeaviestTipSet() *types.TipSet {
- cs.heaviestLk.Lock()
- defer cs.heaviestLk.Unlock()
- return cs.heaviest
+func (cs *ChainStore) GetHeaviestTipSet() (ts *types.TipSet) {
+ cs.heaviestLk.RLock()
+ ts = cs.heaviest
+ cs.heaviestLk.RUnlock()
+ return
}
func (cs *ChainStore) AddToTipSetTracker(b *types.BlockHeader) error {
@@ -646,12 +931,32 @@ func (cs *ChainStore) AddToTipSetTracker(b *types.BlockHeader) error {
log.Debug("tried to add block to tipset tracker that was already there")
return nil
}
+ h, err := cs.GetBlock(oc)
+ if err == nil && h != nil {
+ if h.Miner == b.Miner {
+ log.Warnf("Have multiple blocks from miner %s at height %d in our tipset cache %s-%s", b.Miner, b.Height, b.Cid(), h.Cid())
+ }
+ }
+ }
+ // This function is called 5 times per epoch on average
+ // It is also called with tipsets that are done with initial validation
+ // so they cannot be from the future.
+ // We are guaranteed not to use tipsets older than 900 epochs (fork limit)
+ // This means that we ideally want to keep only most recent 900 epochs in here
+ // Golang's map iteration starts at a random point in a map.
+ // With 5 tries per epoch, and 900 entries to keep, on average we will have
+ // ~136 garbage entires in the `cs.tipsets` map. (solve for 1-(1-x/(900+x))^5 == 0.5)
+ // Seems good enough to me
+
+ for height := range cs.tipsets {
+ if height < b.Height-build.Finality {
+ delete(cs.tipsets, height)
+ }
+ break
}
cs.tipsets[b.Height] = append(tss, b.Cid())
- // TODO: do we want to look for slashable submissions here? might as well...
-
return nil
}
@@ -677,7 +982,7 @@ func (cs *ChainStore) PersistBlockHeaders(b ...*types.BlockHeader) error {
end = len(b)
}
- err = multierr.Append(err, cs.bs.PutMany(sbs[start:end]))
+ err = multierr.Append(err, cs.chainLocalBlockstore.PutMany(sbs[start:end]))
}
return err
@@ -701,7 +1006,7 @@ func PutMessage(bs bstore.Blockstore, m storable) (cid.Cid, error) {
}
func (cs *ChainStore) PutMessage(m storable) (cid.Cid, error) {
- return PutMessage(cs.bs, m)
+ return PutMessage(cs.chainBlockstore, m)
}
func (cs *ChainStore) expandTipset(b *types.BlockHeader) (*types.TipSet, error) {
@@ -717,7 +1022,7 @@ func (cs *ChainStore) expandTipset(b *types.BlockHeader) (*types.TipSet, error)
return types.NewTipSet(all)
}
- inclMiners := map[address.Address]bool{b.Miner: true}
+ inclMiners := map[address.Address]cid.Cid{b.Miner: b.Cid()}
for _, bhc := range tsets {
if bhc == b.Cid() {
continue
@@ -728,14 +1033,14 @@ func (cs *ChainStore) expandTipset(b *types.BlockHeader) (*types.TipSet, error)
return nil, xerrors.Errorf("failed to load block (%s) for tipset expansion: %w", bhc, err)
}
- if inclMiners[h.Miner] {
- log.Warnf("Have multiple blocks from miner %s at height %d in our tipset cache", h.Miner, h.Height)
+ if cid, found := inclMiners[h.Miner]; found {
+ log.Warnf("Have multiple blocks from miner %s at height %d in our tipset cache %s-%s", h.Miner, h.Height, h.Cid(), cid)
continue
}
if types.CidArrsEqual(h.Parents, b.Parents) {
all = append(all, h)
- inclMiners[h.Miner] = true
+ inclMiners[h.Miner] = bhc
}
}
@@ -762,7 +1067,7 @@ func (cs *ChainStore) AddBlock(ctx context.Context, b *types.BlockHeader) error
}
func (cs *ChainStore) GetGenesis() (*types.BlockHeader, error) {
- data, err := cs.ds.Get(dstore.NewKey("0"))
+ data, err := cs.metadataDs.Get(dstore.NewKey("0"))
if err != nil {
return nil, err
}
@@ -772,12 +1077,7 @@ func (cs *ChainStore) GetGenesis() (*types.BlockHeader, error) {
return nil, err
}
- genb, err := cs.bs.Get(c)
- if err != nil {
- return nil, err
- }
-
- return types.DecodeBlock(genb.RawData())
+ return cs.GetBlock(c)
}
func (cs *ChainStore) GetCMessage(c cid.Cid) (types.ChainMsg, error) {
@@ -793,29 +1093,27 @@ func (cs *ChainStore) GetCMessage(c cid.Cid) (types.ChainMsg, error) {
}
func (cs *ChainStore) GetMessage(c cid.Cid) (*types.Message, error) {
- sb, err := cs.bs.Get(c)
- if err != nil {
- log.Errorf("get message get failed: %s: %s", c, err)
- return nil, err
- }
-
- return types.DecodeMessage(sb.RawData())
+ var msg *types.Message
+ err := cs.chainLocalBlockstore.View(c, func(b []byte) (err error) {
+ msg, err = types.DecodeMessage(b)
+ return err
+ })
+ return msg, err
}
func (cs *ChainStore) GetSignedMessage(c cid.Cid) (*types.SignedMessage, error) {
- sb, err := cs.bs.Get(c)
- if err != nil {
- log.Errorf("get message get failed: %s: %s", c, err)
- return nil, err
- }
-
- return types.DecodeSignedMessage(sb.RawData())
+ var msg *types.SignedMessage
+ err := cs.chainLocalBlockstore.View(c, func(b []byte) (err error) {
+ msg, err = types.DecodeSignedMessage(b)
+ return err
+ })
+ return msg, err
}
func (cs *ChainStore) readAMTCids(root cid.Cid) ([]cid.Cid, error) {
ctx := context.TODO()
// block headers use adt0, for now.
- a, err := blockadt.AsArray(cs.Store(ctx), root)
+ a, err := blockadt.AsArray(cs.ActorStore(ctx), root)
if err != nil {
return nil, xerrors.Errorf("amt load: %w", err)
}
@@ -849,17 +1147,33 @@ type BlockMessages struct {
func (cs *ChainStore) BlockMsgsForTipset(ts *types.TipSet) ([]BlockMessages, error) {
applied := make(map[address.Address]uint64)
+ cst := cbor.NewCborStore(cs.stateBlockstore)
+ st, err := state.LoadStateTree(cst, ts.Blocks()[0].ParentStateRoot)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to load state tree")
+ }
+
selectMsg := func(m *types.Message) (bool, error) {
+ var sender address.Address
+ if ts.Height() >= build.UpgradeHyperdriveHeight {
+ sender, err = st.LookupID(m.From)
+ if err != nil {
+ return false, err
+ }
+ } else {
+ sender = m.From
+ }
+
// The first match for a sender is guaranteed to have correct nonce -- the block isn't valid otherwise
- if _, ok := applied[m.From]; !ok {
- applied[m.From] = m.Nonce
+ if _, ok := applied[sender]; !ok {
+ applied[sender] = m.Nonce
}
- if applied[m.From] != m.Nonce {
+ if applied[sender] != m.Nonce {
return false, nil
}
- applied[m.From]++
+ applied[sender]++
return true, nil
}
@@ -939,7 +1253,7 @@ func (cs *ChainStore) ReadMsgMetaCids(mmc cid.Cid) ([]cid.Cid, []cid.Cid, error)
return mmcids.bls, mmcids.secpk, nil
}
- cst := cbor.NewCborStore(cs.bs)
+ cst := cbor.NewCborStore(cs.chainLocalBlockstore)
var msgmeta types.MsgMeta
if err := cst.Get(context.TODO(), mmc, &msgmeta); err != nil {
return nil, nil, xerrors.Errorf("failed to load msgmeta (%s): %w", mmc, err)
@@ -963,6 +1277,9 @@ func (cs *ChainStore) ReadMsgMetaCids(mmc cid.Cid) ([]cid.Cid, []cid.Cid, error)
return blscids, secpkcids, nil
}
+// GetPath returns the sequence of atomic head change operations that
+// need to be applied in order to switch the head of the chain from the `from`
+// tipset to the `to` tipset.
func (cs *ChainStore) GetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*api.HeadChange, error) {
fts, err := cs.LoadTipSet(from)
if err != nil {
@@ -1009,7 +1326,7 @@ func (cs *ChainStore) MessagesForBlock(b *types.BlockHeader) ([]*types.Message,
func (cs *ChainStore) GetParentReceipt(b *types.BlockHeader, i int) (*types.MessageReceipt, error) {
ctx := context.TODO()
// block headers use adt0, for now.
- a, err := blockadt.AsArray(cs.Store(ctx), b.ParentMessageReceipts)
+ a, err := blockadt.AsArray(cs.ActorStore(ctx), b.ParentMessageReceipts)
if err != nil {
return nil, xerrors.Errorf("amt load: %w", err)
}
@@ -1052,16 +1369,26 @@ func (cs *ChainStore) LoadSignedMessagesFromCids(cids []cid.Cid) ([]*types.Signe
return msgs, nil
}
-func (cs *ChainStore) Blockstore() bstore.Blockstore {
- return cs.bs
+// ChainBlockstore returns the chain blockstore. Currently the chain and state
+// // stores are both backed by the same physical store, albeit with different
+// // caching policies, but in the future they will segregate.
+func (cs *ChainStore) ChainBlockstore() bstore.Blockstore {
+ return cs.chainBlockstore
+}
+
+// StateBlockstore returns the state blockstore. Currently the chain and state
+// stores are both backed by the same physical store, albeit with different
+// caching policies, but in the future they will segregate.
+func (cs *ChainStore) StateBlockstore() bstore.Blockstore {
+ return cs.stateBlockstore
}
func ActorStore(ctx context.Context, bs bstore.Blockstore) adt.Store {
return adt.WrapStore(ctx, cbor.NewCborStore(bs))
}
-func (cs *ChainStore) Store(ctx context.Context) adt.Store {
- return ActorStore(ctx, cs.bs)
+func (cs *ChainStore) ActorStore(ctx context.Context) adt.Store {
+ return ActorStore(ctx, cs.stateBlockstore)
}
func (cs *ChainStore) VMSys() vm.SyscallBuilder {
@@ -1111,7 +1438,15 @@ func DrawRandomness(rbase []byte, pers crypto.DomainSeparationTag, round abi.Cha
return h.Sum(nil), nil
}
-func (cs *ChainStore) GetBeaconRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+func (cs *ChainStore) GetBeaconRandomnessLookingBack(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+ return cs.GetBeaconRandomness(ctx, blks, pers, round, entropy, true)
+}
+
+func (cs *ChainStore) GetBeaconRandomnessLookingForward(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+ return cs.GetBeaconRandomness(ctx, blks, pers, round, entropy, false)
+}
+
+func (cs *ChainStore) GetBeaconRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) {
_, span := trace.StartSpan(ctx, "store.GetBeaconRandomness")
defer span.End()
span.AddAttributes(trace.Int64Attribute("round", int64(round)))
@@ -1130,7 +1465,7 @@ func (cs *ChainStore) GetBeaconRandomness(ctx context.Context, blks []cid.Cid, p
searchHeight = 0
}
- randTs, err := cs.GetTipsetByHeight(ctx, searchHeight, ts, true)
+ randTs, err := cs.GetTipsetByHeight(ctx, searchHeight, ts, lookback)
if err != nil {
return nil, err
}
@@ -1145,7 +1480,15 @@ func (cs *ChainStore) GetBeaconRandomness(ctx context.Context, blks []cid.Cid, p
return DrawRandomness(be.Data, pers, round, entropy)
}
-func (cs *ChainStore) GetChainRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+func (cs *ChainStore) GetChainRandomnessLookingBack(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+ return cs.GetChainRandomness(ctx, blks, pers, round, entropy, true)
+}
+
+func (cs *ChainStore) GetChainRandomnessLookingForward(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+ return cs.GetChainRandomness(ctx, blks, pers, round, entropy, false)
+}
+
+func (cs *ChainStore) GetChainRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) {
_, span := trace.StartSpan(ctx, "store.GetChainRandomness")
defer span.End()
span.AddAttributes(trace.Int64Attribute("round", int64(round)))
@@ -1164,7 +1507,7 @@ func (cs *ChainStore) GetChainRandomness(ctx context.Context, blks []cid.Cid, pe
searchHeight = 0
}
- randTs, err := cs.GetTipsetByHeight(ctx, searchHeight, ts, true)
+ randTs, err := cs.GetTipsetByHeight(ctx, searchHeight, ts, lookback)
if err != nil {
return nil, err
}
@@ -1259,8 +1602,9 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRo
return xerrors.Errorf("failed to write car header: %s", err)
}
- return cs.WalkSnapshot(ctx, ts, inclRecentRoots, skipOldMsgs, func(c cid.Cid) error {
- blk, err := cs.bs.Get(c)
+ unionBs := bstore.Union(cs.stateBlockstore, cs.chainBlockstore)
+ return cs.WalkSnapshot(ctx, ts, inclRecentRoots, skipOldMsgs, true, func(c cid.Cid) error {
+ blk, err := unionBs.Get(c)
if err != nil {
return xerrors.Errorf("writing object to car, bs.Get: %w", err)
}
@@ -1273,7 +1617,7 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRo
})
}
-func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRecentRoots abi.ChainEpoch, skipOldMsgs bool, cb func(cid.Cid) error) error {
+func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRecentRoots abi.ChainEpoch, skipOldMsgs, skipMsgReceipts bool, cb func(cid.Cid) error) error {
if ts == nil {
ts = cs.GetHeaviestTipSet()
}
@@ -1293,7 +1637,7 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe
return err
}
- data, err := cs.bs.Get(blk)
+ data, err := cs.chainBlockstore.Get(blk)
if err != nil {
return xerrors.Errorf("getting block: %w", err)
}
@@ -1313,7 +1657,7 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe
var cids []cid.Cid
if !skipOldMsgs || b.Height > ts.Height()-inclRecentRoots {
if walked.Visit(b.Messages) {
- mcids, err := recurseLinks(cs.bs, walked, b.Messages, []cid.Cid{b.Messages})
+ mcids, err := recurseLinks(cs.chainBlockstore, walked, b.Messages, []cid.Cid{b.Messages})
if err != nil {
return xerrors.Errorf("recursing messages failed: %w", err)
}
@@ -1334,13 +1678,17 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe
if b.Height == 0 || b.Height > ts.Height()-inclRecentRoots {
if walked.Visit(b.ParentStateRoot) {
- cids, err := recurseLinks(cs.bs, walked, b.ParentStateRoot, []cid.Cid{b.ParentStateRoot})
+ cids, err := recurseLinks(cs.stateBlockstore, walked, b.ParentStateRoot, []cid.Cid{b.ParentStateRoot})
if err != nil {
return xerrors.Errorf("recursing genesis state failed: %w", err)
}
out = append(out, cids...)
}
+
+ if !skipMsgReceipts && walked.Visit(b.ParentMessageReceipts) {
+ out = append(out, b.ParentMessageReceipts)
+ }
}
for _, c := range out {
@@ -1376,7 +1724,12 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe
}
func (cs *ChainStore) Import(r io.Reader) (*types.TipSet, error) {
- header, err := car.LoadCar(cs.Blockstore(), r)
+ // TODO: writing only to the state blockstore is incorrect.
+ // At this time, both the state and chain blockstores are backed by the
+ // universal store. When we physically segregate the stores, we will need
+ // to route state objects to the state blockstore, and chain objects to
+ // the chain blockstore.
+ header, err := car.LoadCar(cs.StateBlockstore(), r)
if err != nil {
return nil, xerrors.Errorf("loadcar failed: %w", err)
}
@@ -1429,12 +1782,20 @@ func NewChainRand(cs *ChainStore, blks []cid.Cid) vm.Rand {
}
}
-func (cr *chainRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
- return cr.cs.GetChainRandomness(ctx, cr.blks, pers, round, entropy)
+func (cr *chainRand) GetChainRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+ return cr.cs.GetChainRandomnessLookingBack(ctx, cr.blks, pers, round, entropy)
+}
+
+func (cr *chainRand) GetChainRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+ return cr.cs.GetChainRandomnessLookingForward(ctx, cr.blks, pers, round, entropy)
+}
+
+func (cr *chainRand) GetBeaconRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+ return cr.cs.GetBeaconRandomnessLookingBack(ctx, cr.blks, pers, round, entropy)
}
-func (cr *chainRand) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
- return cr.cs.GetBeaconRandomness(ctx, cr.blks, pers, round, entropy)
+func (cr *chainRand) GetBeaconRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+ return cr.cs.GetBeaconRandomnessLookingForward(ctx, cr.blks, pers, round, entropy)
}
func (cs *ChainStore) GetTipSetFromKey(tsk types.TipSetKey) (*types.TipSet, error) {
diff --git a/chain/store/store_test.go b/chain/store/store_test.go
index 16052710446..62a0430e301 100644
--- a/chain/store/store_test.go
+++ b/chain/store/store_test.go
@@ -3,6 +3,7 @@ package store_test
import (
"bytes"
"context"
+ "io"
"testing"
datastore "github.com/ipfs/go-datastore"
@@ -10,12 +11,12 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto"
+ "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/lotus/lib/blockstore"
"github.com/filecoin-project/lotus/node/repo"
)
@@ -51,24 +52,31 @@ func BenchmarkGetRandomness(b *testing.B) {
b.Fatal(err)
}
- bds, err := lr.Datastore("/chain")
+ bs, err := lr.Blockstore(context.TODO(), repo.UniversalBlockstore)
if err != nil {
b.Fatal(err)
}
- mds, err := lr.Datastore("/metadata")
+ defer func() {
+ if c, ok := bs.(io.Closer); ok {
+ if err := c.Close(); err != nil {
+ b.Logf("WARN: failed to close blockstore: %s", err)
+ }
+ }
+ }()
+
+ mds, err := lr.Datastore(context.Background(), "/metadata")
if err != nil {
b.Fatal(err)
}
- bs := blockstore.NewBlockstore(bds)
-
- cs := store.NewChainStore(bs, mds, nil, nil)
+ cs := store.NewChainStore(bs, bs, mds, nil, nil)
+ defer cs.Close() //nolint:errcheck
b.ResetTimer()
for i := 0; i < b.N; i++ {
- _, err := cs.GetChainRandomness(context.TODO(), last.Cids(), crypto.DomainSeparationTag_SealRandomness, 500, nil)
+ _, err := cs.GetChainRandomnessLookingBack(context.TODO(), last.Cids(), crypto.DomainSeparationTag_SealRandomness, 500, nil)
if err != nil {
b.Fatal(err)
}
@@ -96,8 +104,9 @@ func TestChainExportImport(t *testing.T) {
t.Fatal(err)
}
- nbs := blockstore.NewTemporary()
- cs := store.NewChainStore(nbs, datastore.NewMapDatastore(), nil, nil)
+ nbs := blockstore.NewMemory()
+ cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), nil, nil)
+ defer cs.Close() //nolint:errcheck
root, err := cs.Import(buf)
if err != nil {
@@ -130,8 +139,10 @@ func TestChainExportImportFull(t *testing.T) {
t.Fatal(err)
}
- nbs := blockstore.NewTemporary()
- cs := store.NewChainStore(nbs, datastore.NewMapDatastore(), nil, nil)
+ nbs := blockstore.NewMemory()
+ cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), nil, nil)
+ defer cs.Close() //nolint:errcheck
+
root, err := cs.Import(buf)
if err != nil {
t.Fatal(err)
diff --git a/chain/store/weight.go b/chain/store/weight.go
index 9100df31547..42546d5e3d9 100644
--- a/chain/store/weight.go
+++ b/chain/store/weight.go
@@ -28,7 +28,7 @@ func (cs *ChainStore) Weight(ctx context.Context, ts *types.TipSet) (types.BigIn
tpow := big2.Zero()
{
- cst := cbor.NewCborStore(cs.Blockstore())
+ cst := cbor.NewCborStore(cs.StateBlockstore())
state, err := state.LoadStateTree(cst, ts.ParentState())
if err != nil {
return types.NewInt(0), xerrors.Errorf("load state tree: %w", err)
@@ -39,7 +39,7 @@ func (cs *ChainStore) Weight(ctx context.Context, ts *types.TipSet) (types.BigIn
return types.NewInt(0), xerrors.Errorf("get power actor: %w", err)
}
- powState, err := power.Load(cs.Store(ctx), act)
+ powState, err := power.Load(cs.ActorStore(ctx), act)
if err != nil {
return types.NewInt(0), xerrors.Errorf("failed to load power actor state: %w", err)
}
diff --git a/chain/sub/incoming.go b/chain/sub/incoming.go
index 1701866eb6f..115c3326193 100644
--- a/chain/sub/incoming.go
+++ b/chain/sub/incoming.go
@@ -6,9 +6,18 @@ import (
"fmt"
"time"
- "golang.org/x/xerrors"
-
address "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/lotus/blockstore"
+ "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain"
+ "github.com/filecoin-project/lotus/chain/messagepool"
+ "github.com/filecoin-project/lotus/chain/stmgr"
+ "github.com/filecoin-project/lotus/chain/store"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/lib/sigs"
+ "github.com/filecoin-project/lotus/metrics"
+ "github.com/filecoin-project/lotus/node/impl/client"
+ blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
lru "github.com/hashicorp/golang-lru"
blocks "github.com/ipfs/go-block-format"
bserv "github.com/ipfs/go-blockservice"
@@ -21,19 +30,7 @@ import (
cbg "github.com/whyrusleeping/cbor-gen"
"go.opencensus.io/stats"
"go.opencensus.io/tag"
-
- blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
-
- "github.com/filecoin-project/lotus/build"
- "github.com/filecoin-project/lotus/chain"
- "github.com/filecoin-project/lotus/chain/messagepool"
- "github.com/filecoin-project/lotus/chain/stmgr"
- "github.com/filecoin-project/lotus/chain/store"
- "github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/lotus/lib/blockstore"
- "github.com/filecoin-project/lotus/lib/sigs"
- "github.com/filecoin-project/lotus/metrics"
- "github.com/filecoin-project/lotus/node/impl/client"
+ "golang.org/x/xerrors"
)
var log = logging.Logger("sub")
@@ -84,20 +81,27 @@ func HandleIncomingBlocks(ctx context.Context, bsub *pubsub.Subscription, s *cha
log.Debug("about to fetch messages for block from pubsub")
bmsgs, err := FetchMessagesByCids(ctx, ses, blk.BlsMessages)
if err != nil {
- log.Errorf("failed to fetch all bls messages for block received over pubusb: %s; source: %s", err, src)
+ log.Errorf("failed to fetch all bls messages for block received over pubsub: %s; source: %s", err, src)
return
}
smsgs, err := FetchSignedMessagesByCids(ctx, ses, blk.SecpkMessages)
if err != nil {
- log.Errorf("failed to fetch all secpk messages for block received over pubusb: %s; source: %s", err, src)
+ log.Errorf("failed to fetch all secpk messages for block received over pubsub: %s; source: %s", err, src)
return
}
took := build.Clock.Since(start)
- log.Infow("new block over pubsub", "cid", blk.Header.Cid(), "source", msg.GetFrom(), "msgfetch", took)
+ log.Debugw("new block over pubsub", "cid", blk.Header.Cid(), "source", msg.GetFrom(), "msgfetch", took)
+ if took > 3*time.Second {
+ log.Warnw("Slow msg fetch", "cid", blk.Header.Cid(), "source", msg.GetFrom(), "msgfetch", took)
+ }
if delay := build.Clock.Now().Unix() - int64(blk.Header.Timestamp); delay > 5 {
- log.Warnf("Received block with large delay %d from miner %s", delay, blk.Header.Miner)
+ _ = stats.RecordWithTags(ctx,
+ []tag.Mutator{tag.Insert(metrics.MinerID, blk.Header.Miner.String())},
+ metrics.BlockDelay.M(delay),
+ )
+ log.Warnw("received block with large delay from miner", "block", blk.Cid(), "delay", delay, "miner", blk.Header.Miner)
}
if s.InformNewBlock(msg.ReceivedFrom, &types.FullBlock{
@@ -337,11 +341,16 @@ func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub
func (bv *BlockValidator) validateLocalBlock(ctx context.Context, msg *pubsub.Message) pubsub.ValidationResult {
stats.Record(ctx, metrics.BlockPublished.M(1))
+ if size := msg.Size(); size > 1<<20-1<<15 {
+ log.Errorf("ignoring oversize block (%dB)", size)
+ recordFailure(ctx, metrics.BlockValidationFailure, "oversize_block")
+ return pubsub.ValidationIgnore
+ }
+
blk, what, err := bv.decodeAndCheckBlock(msg)
if err != nil {
log.Errorf("got invalid local block: %s", err)
- ctx, _ = tag.New(ctx, tag.Insert(metrics.FailureType, what))
- stats.Record(ctx, metrics.BlockValidationFailure.M(1))
+ recordFailure(ctx, metrics.BlockValidationFailure, what)
return pubsub.ValidationIgnore
}
@@ -383,7 +392,7 @@ func (bv *BlockValidator) isChainNearSynced() bool {
func (bv *BlockValidator) validateMsgMeta(ctx context.Context, msg *types.BlockMsg) error {
// TODO there has to be a simpler way to do this without the blockstore dance
// block headers use adt0
- store := blockadt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewTemporary()))
+ store := blockadt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewMemory()))
bmArr := blockadt.MakeEmptyArray(store)
smArr := blockadt.MakeEmptyArray(store)
@@ -498,6 +507,12 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs
return mv.validateLocalMessage(ctx, msg)
}
+ start := time.Now()
+ defer func() {
+ ms := time.Now().Sub(start).Microseconds()
+ stats.Record(ctx, metrics.MessageValidationDuration.M(float64(ms)/1000))
+ }()
+
stats.Record(ctx, metrics.MessageReceived.M(1))
m, err := types.DecodeSignedMessage(msg.Message.GetData())
if err != nil {
@@ -507,7 +522,7 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs
return pubsub.ValidationReject
}
- if err := mv.mpool.Add(m); err != nil {
+ if err := mv.mpool.Add(ctx, m); err != nil {
log.Debugf("failed to add message from network to message pool (From: %s, To: %s, Nonce: %d, Value: %s): %s", m.Message.From, m.Message.To, m.Message.Nonce, types.FIL(m.Message.Value), err)
ctx, _ = tag.New(
ctx,
@@ -529,6 +544,12 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs
return pubsub.ValidationReject
}
}
+
+ ctx, _ = tag.New(
+ ctx,
+ tag.Upsert(metrics.MsgValid, "true"),
+ )
+
stats.Record(ctx, metrics.MessageValidationSuccess.M(1))
return pubsub.ValidationAccept
}
@@ -538,6 +559,13 @@ func (mv *MessageValidator) validateLocalMessage(ctx context.Context, msg *pubsu
ctx,
tag.Upsert(metrics.Local, "true"),
)
+
+ start := time.Now()
+ defer func() {
+ ms := time.Now().Sub(start).Microseconds()
+ stats.Record(ctx, metrics.MessageValidationDuration.M(float64(ms)/1000))
+ }()
+
// do some lightweight validation
stats.Record(ctx, metrics.MessagePublished.M(1))
@@ -548,7 +576,7 @@ func (mv *MessageValidator) validateLocalMessage(ctx context.Context, msg *pubsu
return pubsub.ValidationIgnore
}
- if m.Size() > 32*1024 {
+ if m.Size() > messagepool.MaxMessageSize {
log.Warnf("local message is too large! (%dB)", m.Size())
recordFailure(ctx, metrics.MessageValidationFailure, "oversize")
return pubsub.ValidationIgnore
@@ -572,6 +600,11 @@ func (mv *MessageValidator) validateLocalMessage(ctx context.Context, msg *pubsu
return pubsub.ValidationIgnore
}
+ ctx, _ = tag.New(
+ ctx,
+ tag.Upsert(metrics.MsgValid, "true"),
+ )
+
stats.Record(ctx, metrics.MessageValidationSuccess.M(1))
return pubsub.ValidationAccept
}
diff --git a/chain/sync.go b/chain/sync.go
index 1410dd2a707..167856927f3 100644
--- a/chain/sync.go
+++ b/chain/sync.go
@@ -32,8 +32,10 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto"
+ "github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
- blst "github.com/supranational/blst/bindings/go"
+
+ ffi "github.com/filecoin-project/filecoin-ffi"
// named msgarray here to make it clear that these are the types used by
// messages, regardless of specs-actors version.
@@ -42,6 +44,7 @@ import (
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
"github.com/filecoin-project/lotus/api"
+ bstore "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/beacon"
@@ -52,9 +55,7 @@ import (
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
- bstore "github.com/filecoin-project/lotus/lib/blockstore"
"github.com/filecoin-project/lotus/lib/sigs"
- "github.com/filecoin-project/lotus/lib/sigs/bls"
"github.com/filecoin-project/lotus/metrics"
)
@@ -130,10 +131,6 @@ type Syncer struct {
tickerCtxCancel context.CancelFunc
- checkptLk sync.Mutex
-
- checkpt types.TipSetKey
-
ds dtypes.MetadataDS
}
@@ -151,14 +148,8 @@ func NewSyncer(ds dtypes.MetadataDS, sm *stmgr.StateManager, exchange exchange.C
return nil, err
}
- cp, err := loadCheckpoint(ds)
- if err != nil {
- return nil, xerrors.Errorf("error loading mpool config: %w", err)
- }
-
s := &Syncer{
ds: ds,
- checkpt: cp,
beacon: beacon,
bad: NewBadBlockCache(),
Genesis: gent,
@@ -249,18 +240,6 @@ func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool {
syncer.incoming.Pub(fts.TipSet().Blocks(), LocalIncoming)
- if from == syncer.self {
- // TODO: this is kindof a hack...
- log.Debug("got block from ourselves")
-
- if err := syncer.Sync(ctx, fts.TipSet()); err != nil {
- log.Errorf("failed to sync our own block %s: %+v", fts.TipSet().Cids(), err)
- return false
- }
-
- return true
- }
-
// TODO: IMPORTANT(GARBAGE) this needs to be put in the 'temporary' side of
// the blockstore
if err := syncer.store.PersistBlockHeaders(fts.TipSet().Blocks()...); err != nil {
@@ -278,7 +257,7 @@ func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool {
for _, blk := range fts.TipSet().Blocks() {
miners = append(miners, blk.Miner.String())
}
- log.Infow("incoming tipset does not appear to be better than our best chain, ignoring for now", "miners", miners, "bestPweight", bestPweight, "bestTS", hts.Cids(), "incomingWeight", targetWeight, "incomingTS", fts.TipSet().Cids())
+ log.Debugw("incoming tipset does not appear to be better than our best chain, ignoring for now", "miners", miners, "bestPweight", bestPweight, "bestTS", hts.Cids(), "incomingWeight", targetWeight, "incomingTS", fts.TipSet().Cids())
return false
}
@@ -332,7 +311,7 @@ func (syncer *Syncer) ValidateMsgMeta(fblk *types.FullBlock) error {
// We use a temporary bstore here to avoid writing intermediate pieces
// into the blockstore.
- blockstore := bstore.NewTemporary()
+ blockstore := bstore.NewMemory()
cst := cbor.NewCborStore(blockstore)
var bcids, scids []cid.Cid
@@ -365,7 +344,7 @@ func (syncer *Syncer) ValidateMsgMeta(fblk *types.FullBlock) error {
}
// Finally, flush.
- return vm.Copy(context.TODO(), blockstore, syncer.store.Blockstore(), smroot)
+ return vm.Copy(context.TODO(), blockstore, syncer.store.ChainBlockstore(), smroot)
}
func (syncer *Syncer) LocalPeer() peer.ID {
@@ -563,15 +542,16 @@ func (syncer *Syncer) Sync(ctx context.Context, maybeHead *types.TipSet) error {
)
}
- if syncer.store.GetHeaviestTipSet().ParentWeight().GreaterThan(maybeHead.ParentWeight()) {
+ hts := syncer.store.GetHeaviestTipSet()
+
+ if hts.ParentWeight().GreaterThan(maybeHead.ParentWeight()) {
return nil
}
-
- if syncer.Genesis.Equals(maybeHead) || syncer.store.GetHeaviestTipSet().Equals(maybeHead) {
+ if syncer.Genesis.Equals(maybeHead) || hts.Equals(maybeHead) {
return nil
}
- if err := syncer.collectChain(ctx, maybeHead); err != nil {
+ if err := syncer.collectChain(ctx, maybeHead, hts, false); err != nil {
span.AddAttributes(trace.StringAttribute("col_error", err.Error()))
span.SetStatus(trace.Status{
Code: 13,
@@ -650,7 +630,7 @@ func (syncer *Syncer) minerIsValid(ctx context.Context, maddr address.Address, b
return xerrors.Errorf("failed to load power actor: %w", err)
}
- powState, err := power.Load(syncer.store.Store(ctx), act)
+ powState, err := power.Load(syncer.store.ActorStore(ctx), act)
if err != nil {
return xerrors.Errorf("failed to load power actor state: %w", err)
}
@@ -686,6 +666,10 @@ func blockSanityChecks(h *types.BlockHeader) error {
return xerrors.Errorf("block had nil bls aggregate signature")
}
+ if h.Miner.Protocol() != address.ID {
+ return xerrors.Errorf("block had non-ID miner address")
+ }
+
return nil
}
@@ -730,6 +714,8 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, use
return xerrors.Errorf("load parent tipset failed (%s): %w", h.Parents, err)
}
+ winPoStNv := syncer.sm.GetNtwkVersion(ctx, baseTs.Height())
+
lbts, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, syncer.sm, baseTs, h.Height)
if err != nil {
return xerrors.Errorf("failed to get lookback tipset for block: %w", err)
@@ -755,6 +741,10 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, use
}
msgsCheck := async.Err(func() error {
+ if b.Cid() == build.WhitelistedBlock {
+ return nil
+ }
+
if err := syncer.checkBlockMessages(ctx, b, baseTs); err != nil {
return xerrors.Errorf("block had invalid messages: %w", err)
}
@@ -923,7 +913,7 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, use
})
wproofCheck := async.Err(func() error {
- if err := syncer.VerifyWinningPoStProof(ctx, h, *prevBeacon, lbst, waddr); err != nil {
+ if err := syncer.VerifyWinningPoStProof(ctx, winPoStNv, h, *prevBeacon, lbst, waddr); err != nil {
return xerrors.Errorf("invalid election post: %w", err)
}
return nil
@@ -975,7 +965,7 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, use
return nil
}
-func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, h *types.BlockHeader, prevBeacon types.BeaconEntry, lbst cid.Cid, waddr address.Address) error {
+func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, nv network.Version, h *types.BlockHeader, prevBeacon types.BeaconEntry, lbst cid.Cid, waddr address.Address) error {
if build.InsecurePoStValidation {
if len(h.WinPoStProof) == 0 {
return xerrors.Errorf("[INSECURE-POST-VALIDATION] No winning post proof given")
@@ -1007,7 +997,7 @@ func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, h *types.Block
return xerrors.Errorf("failed to get ID from miner address %s: %w", h.Miner, err)
}
- sectors, err := stmgr.GetSectorsForWinningPoSt(ctx, syncer.verifier, syncer.sm, lbst, h.Miner, rand)
+ sectors, err := stmgr.GetSectorsForWinningPoSt(ctx, nv, syncer.verifier, syncer.sm, lbst, h.Miner, rand)
if err != nil {
return xerrors.Errorf("getting winning post sector set: %w", err)
}
@@ -1059,7 +1049,7 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock
return err
}
- st, err := state.LoadStateTree(syncer.store.Store(ctx), stateroot)
+ st, err := state.LoadStateTree(syncer.store.ActorStore(ctx), stateroot)
if err != nil {
return xerrors.Errorf("failed to load base state tree: %w", err)
}
@@ -1071,7 +1061,7 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock
// Phase 1: syntactic validation, as defined in the spec
minGas := pl.OnChainMessage(msg.ChainLength())
- if err := m.ValidForBlockInclusion(minGas.Total()); err != nil {
+ if err := m.ValidForBlockInclusion(minGas.Total(), syncer.sm.GetNtwkVersion(ctx, b.Header.Height)); err != nil {
return err
}
@@ -1084,9 +1074,19 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock
// Phase 2: (Partial) semantic validation:
// the sender exists and is an account actor, and the nonces make sense
- if _, ok := nonces[m.From]; !ok {
+ var sender address.Address
+ if syncer.sm.GetNtwkVersion(ctx, b.Header.Height) >= network.Version13 {
+ sender, err = st.LookupID(m.From)
+ if err != nil {
+ return err
+ }
+ } else {
+ sender = m.From
+ }
+
+ if _, ok := nonces[sender]; !ok {
// `GetActor` does not validate that this is an account actor.
- act, err := st.GetActor(m.From)
+ act, err := st.GetActor(sender)
if err != nil {
return xerrors.Errorf("failed to get actor: %w", err)
}
@@ -1094,19 +1094,19 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock
if !builtin.IsAccountActor(act.Code) {
return xerrors.New("Sender must be an account actor")
}
- nonces[m.From] = act.Nonce
+ nonces[sender] = act.Nonce
}
- if nonces[m.From] != m.Nonce {
- return xerrors.Errorf("wrong nonce (exp: %d, got: %d)", nonces[m.From], m.Nonce)
+ if nonces[sender] != m.Nonce {
+ return xerrors.Errorf("wrong nonce (exp: %d, got: %d)", nonces[sender], m.Nonce)
}
- nonces[m.From]++
+ nonces[sender]++
return nil
}
// Validate message arrays in a temporary blockstore.
- tmpbs := bstore.NewTemporary()
+ tmpbs := bstore.NewMemory()
tmpstore := blockadt.WrapStore(ctx, cbor.NewCborStore(tmpbs))
bmArr := blockadt.MakeEmptyArray(tmpstore)
@@ -1176,7 +1176,7 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock
}
// Finally, flush.
- return vm.Copy(ctx, tmpbs, syncer.store.Blockstore(), mrcid)
+ return vm.Copy(ctx, tmpbs, syncer.store.ChainBlockstore(), mrcid)
}
func (syncer *Syncer) verifyBlsAggregate(ctx context.Context, sig *crypto.Signature, msgs []cid.Cid, pubks [][]byte) error {
@@ -1186,17 +1186,21 @@ func (syncer *Syncer) verifyBlsAggregate(ctx context.Context, sig *crypto.Signat
trace.Int64Attribute("msgCount", int64(len(msgs))),
)
- msgsS := make([]blst.Message, len(msgs))
+ msgsS := make([]ffi.Message, len(msgs))
+ pubksS := make([]ffi.PublicKey, len(msgs))
for i := 0; i < len(msgs); i++ {
msgsS[i] = msgs[i].Bytes()
+ copy(pubksS[i][:], pubks[i][:ffi.PublicKeyBytes])
}
+ sigS := new(ffi.Signature)
+ copy(sigS[:], sig.Data[:ffi.SignatureBytes])
+
if len(msgs) == 0 {
return nil
}
- valid := new(bls.Signature).AggregateVerifyCompressed(sig.Data, pubks,
- msgsS, []byte(bls.DST))
+ valid := ffi.HashVerify(sigS, msgsS, pubksS)
if !valid {
return xerrors.New("bls aggregate signature failed to verify")
}
@@ -1243,7 +1247,7 @@ func extractSyncState(ctx context.Context) *SyncerState {
//
// All throughout the process, we keep checking if the received blocks are in
// the deny list, and short-circuit the process if so.
-func (syncer *Syncer) collectHeaders(ctx context.Context, incoming *types.TipSet, known *types.TipSet) ([]*types.TipSet, error) {
+func (syncer *Syncer) collectHeaders(ctx context.Context, incoming *types.TipSet, known *types.TipSet, ignoreCheckpoint bool) ([]*types.TipSet, error) {
ctx, span := trace.StartSpan(ctx, "collectHeaders")
defer span.End()
ss := extractSyncState(ctx)
@@ -1327,7 +1331,7 @@ loop:
continue
}
if !xerrors.Is(err, bstore.ErrNotFound) {
- log.Warn("loading local tipset: %s", err)
+ log.Warnf("loading local tipset: %s", err)
}
// NB: GetBlocks validates that the blocks are in-fact the ones we
@@ -1412,7 +1416,7 @@ loop:
// We have now ascertained that this is *not* a 'fast forward'
log.Warnf("(fork detected) synced header chain (%s - %d) does not link to our best block (%s - %d)", incoming.Cids(), incoming.Height(), known.Cids(), known.Height())
- fork, err := syncer.syncFork(ctx, base, known)
+ fork, err := syncer.syncFork(ctx, base, known, ignoreCheckpoint)
if err != nil {
if xerrors.Is(err, ErrForkTooLong) || xerrors.Is(err, ErrForkCheckpoint) {
// TODO: we're marking this block bad in the same way that we mark invalid blocks bad. Maybe distinguish?
@@ -1438,14 +1442,17 @@ var ErrForkCheckpoint = fmt.Errorf("fork would require us to diverge from checkp
// If the fork is too long (build.ForkLengthThreshold), or would cause us to diverge from the checkpoint (ErrForkCheckpoint),
// we add the entire subchain to the denylist. Else, we find the common ancestor, and add the missing chain
// fragment until the fork point to the returned []TipSet.
-func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, known *types.TipSet) ([]*types.TipSet, error) {
+func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, known *types.TipSet, ignoreCheckpoint bool) ([]*types.TipSet, error) {
- chkpt := syncer.GetCheckpoint()
- if known.Key() == chkpt {
- return nil, ErrForkCheckpoint
+ var chkpt *types.TipSet
+ if !ignoreCheckpoint {
+ chkpt = syncer.store.GetCheckpoint()
+ if known.Equals(chkpt) {
+ return nil, ErrForkCheckpoint
+ }
}
- // TODO: Does this mean we always ask for ForkLengthThreshold blocks from the network, even if we just need, like, 2?
+ // TODO: Does this mean we always ask for ForkLengthThreshold blocks from the network, even if we just need, like, 2? Yes.
// Would it not be better to ask in smaller chunks, given that an ~ForkLengthThreshold is very rare?
tips, err := syncer.Exchange.GetBlocks(ctx, incoming.Parents(), int(build.ForkLengthThreshold))
if err != nil {
@@ -1456,6 +1463,10 @@ func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, know
if err != nil {
return nil, xerrors.Errorf("failed to load next local tipset: %w", err)
}
+ // Track the fork length on our side of the synced chain to enforce
+ // `ForkLengthThreshold`. Initialized to 1 because we already walked back
+ // one tipset from `known` (our synced head).
+ forkLengthInHead := 1
for cur := 0; cur < len(tips); {
if nts.Height() == 0 {
@@ -1472,8 +1483,15 @@ func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, know
if nts.Height() < tips[cur].Height() {
cur++
} else {
+ // Walk back one block in our synced chain to try to meet the fork's
+ // height.
+ forkLengthInHead++
+ if forkLengthInHead > int(build.ForkLengthThreshold) {
+ return nil, ErrForkTooLong
+ }
+
// We will be forking away from nts, check that it isn't checkpointed
- if nts.Key() == chkpt {
+ if nts.Equals(chkpt) {
return nil, ErrForkCheckpoint
}
@@ -1542,7 +1560,7 @@ func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipS
for bsi := 0; bsi < len(bstout); bsi++ {
// temp storage so we don't persist data we dont want to
- bs := bstore.NewTemporary()
+ bs := bstore.NewMemory()
blks := cbor.NewCborStore(bs)
this := headers[i-bsi]
@@ -1563,7 +1581,7 @@ func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipS
return err
}
- if err := copyBlockstore(ctx, bs, syncer.store.Blockstore()); err != nil {
+ if err := copyBlockstore(ctx, bs, syncer.store.ChainBlockstore()); err != nil {
return xerrors.Errorf("message processing failed: %w", err)
}
}
@@ -1684,14 +1702,14 @@ func persistMessages(ctx context.Context, bs bstore.Blockstore, bst *exchange.Co
//
// 3. StageMessages: having acquired the headers and found a common tipset,
// we then move forward, requesting the full blocks, including the messages.
-func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet) error {
+func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet, hts *types.TipSet, ignoreCheckpoint bool) error {
ctx, span := trace.StartSpan(ctx, "collectChain")
defer span.End()
ss := extractSyncState(ctx)
- ss.Init(syncer.store.GetHeaviestTipSet(), ts)
+ ss.Init(hts, ts)
- headers, err := syncer.collectHeaders(ctx, ts, syncer.store.GetHeaviestTipSet())
+ headers, err := syncer.collectHeaders(ctx, ts, hts, ignoreCheckpoint)
if err != nil {
ss.Error(err)
return err
@@ -1780,11 +1798,10 @@ func (syncer *Syncer) getLatestBeaconEntry(_ context.Context, ts *types.TipSet)
}
func (syncer *Syncer) IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool {
- g, err := syncer.store.GetGenesis()
- if err != nil {
+ if syncer.Genesis == nil {
return false
}
now := uint64(build.Clock.Now().Unix())
- return epoch > (abi.ChainEpoch((now-g.Timestamp)/build.BlockDelaySecs) + MaxHeightDrift)
+ return epoch > (abi.ChainEpoch((now-syncer.Genesis.MinTimestamp())/build.BlockDelaySecs) + MaxHeightDrift)
}
diff --git a/chain/sync_manager.go b/chain/sync_manager.go
index c25068f60c2..685e05df6ca 100644
--- a/chain/sync_manager.go
+++ b/chain/sync_manager.go
@@ -4,30 +4,43 @@ import (
"context"
"os"
"sort"
+ "strconv"
"strings"
"sync"
+ "time"
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
+
peer "github.com/libp2p/go-libp2p-core/peer"
)
-const BootstrapPeerThreshold = 2
+var (
+ BootstrapPeerThreshold = build.BootstrapPeerThreshold
+
+ RecentSyncBufferSize = 10
+ MaxSyncWorkers = 5
+ SyncWorkerHistory = 3
-var coalesceForksParents = false
+ InitialSyncTimeThreshold = 15 * time.Minute
+
+ coalesceTipsets = false
+)
func init() {
- if os.Getenv("LOTUS_SYNC_REL_PARENT") == "yes" {
- coalesceForksParents = true
+ coalesceTipsets = os.Getenv("LOTUS_SYNC_FORMTS_PEND") == "yes"
+
+ if bootstrapPeerThreshold := os.Getenv("LOTUS_SYNC_BOOTSTRAP_PEERS"); bootstrapPeerThreshold != "" {
+ threshold, err := strconv.Atoi(bootstrapPeerThreshold)
+ if err != nil {
+ log.Errorf("failed to parse 'LOTUS_SYNC_BOOTSTRAP_PEERS' env var: %s", err)
+ } else {
+ BootstrapPeerThreshold = threshold
+ }
}
}
-const (
- BSStateInit = 0
- BSStateSelected = 1
- BSStateScheduled = 2
- BSStateComplete = 3
-)
-
type SyncFunc func(context.Context, *types.TipSet) error
// SyncManager manages the chain synchronization process, both at bootstrap time
@@ -52,108 +65,468 @@ type SyncManager interface {
}
type syncManager struct {
- lk sync.Mutex
- peerHeads map[peer.ID]*types.TipSet
+ ctx context.Context
+ cancel func()
- bssLk sync.Mutex
- bootstrapState int
+ workq chan peerHead
+ statusq chan workerStatus
- bspThresh int
+ nextWorker uint64
+ pend syncBucketSet
+ deferred syncBucketSet
+ heads map[peer.ID]*types.TipSet
+ recent *syncBuffer
- incomingTipSets chan *types.TipSet
- syncTargets chan *types.TipSet
- syncResults chan *syncResult
+ initialSyncDone bool
- syncStates []*SyncerState
-
- // Normally this handler is set to `(*Syncer).Sync()`.
- doSync func(context.Context, *types.TipSet) error
+ mx sync.Mutex
+ state map[uint64]*workerState
- stop chan struct{}
+ history []*workerState
+ historyI int
- // Sync Scheduler fields
- activeSyncs map[types.TipSetKey]*types.TipSet
- syncQueue syncBucketSet
- activeSyncTips syncBucketSet
- nextSyncTarget *syncTargetBucket
- workerChan chan *types.TipSet
+ doSync func(context.Context, *types.TipSet) error
}
var _ SyncManager = (*syncManager)(nil)
-type syncResult struct {
- ts *types.TipSet
- success bool
+type peerHead struct {
+ p peer.ID
+ ts *types.TipSet
}
-const syncWorkerCount = 3
+type workerState struct {
+ id uint64
+ ts *types.TipSet
+ ss *SyncerState
+ dt time.Duration
+}
+type workerStatus struct {
+ id uint64
+ err error
+}
+
+// sync manager interface
func NewSyncManager(sync SyncFunc) SyncManager {
- sm := &syncManager{
- bspThresh: 1,
- peerHeads: make(map[peer.ID]*types.TipSet),
- syncTargets: make(chan *types.TipSet),
- syncResults: make(chan *syncResult),
- syncStates: make([]*SyncerState, syncWorkerCount),
- incomingTipSets: make(chan *types.TipSet),
- activeSyncs: make(map[types.TipSetKey]*types.TipSet),
- doSync: sync,
- stop: make(chan struct{}),
- }
- for i := range sm.syncStates {
- sm.syncStates[i] = new(SyncerState)
+ ctx, cancel := context.WithCancel(context.Background())
+ return &syncManager{
+ ctx: ctx,
+ cancel: cancel,
+
+ workq: make(chan peerHead),
+ statusq: make(chan workerStatus),
+
+ heads: make(map[peer.ID]*types.TipSet),
+ state: make(map[uint64]*workerState),
+ recent: newSyncBuffer(RecentSyncBufferSize),
+ history: make([]*workerState, SyncWorkerHistory),
+
+ doSync: sync,
}
- return sm
}
func (sm *syncManager) Start() {
- go sm.syncScheduler()
- for i := 0; i < syncWorkerCount; i++ {
- go sm.syncWorker(i)
- }
+ go sm.scheduler()
}
func (sm *syncManager) Stop() {
- close(sm.stop)
+ select {
+ case <-sm.ctx.Done():
+ default:
+ sm.cancel()
+ }
}
func (sm *syncManager) SetPeerHead(ctx context.Context, p peer.ID, ts *types.TipSet) {
- sm.lk.Lock()
- defer sm.lk.Unlock()
- sm.peerHeads[p] = ts
-
- if sm.getBootstrapState() == BSStateInit {
- spc := sm.syncedPeerCount()
- if spc >= sm.bspThresh {
- // Its go time!
- target, err := sm.selectSyncTarget()
- if err != nil {
- log.Error("failed to select sync target: ", err)
- return
+ select {
+ case sm.workq <- peerHead{p: p, ts: ts}:
+ case <-sm.ctx.Done():
+ case <-ctx.Done():
+ }
+}
+
+func (sm *syncManager) State() []SyncerStateSnapshot {
+ sm.mx.Lock()
+ workerStates := make([]*workerState, 0, len(sm.state)+len(sm.history))
+ for _, ws := range sm.state {
+ workerStates = append(workerStates, ws)
+ }
+ for _, ws := range sm.history {
+ if ws != nil {
+ workerStates = append(workerStates, ws)
+ }
+ }
+ sm.mx.Unlock()
+
+ sort.Slice(workerStates, func(i, j int) bool {
+ return workerStates[i].id < workerStates[j].id
+ })
+
+ result := make([]SyncerStateSnapshot, 0, len(workerStates))
+ for _, ws := range workerStates {
+ result = append(result, ws.ss.Snapshot())
+ }
+
+ return result
+}
+
+// sync manager internals
+func (sm *syncManager) scheduler() {
+ ticker := time.NewTicker(time.Minute)
+ tickerC := ticker.C
+ for {
+ select {
+ case head := <-sm.workq:
+ sm.handlePeerHead(head)
+ case status := <-sm.statusq:
+ sm.handleWorkerStatus(status)
+ case <-tickerC:
+ if sm.initialSyncDone {
+ ticker.Stop()
+ tickerC = nil
+ sm.handleInitialSyncDone()
}
- sm.setBootstrapState(BSStateSelected)
+ case <-sm.ctx.Done():
+ return
+ }
+ }
+}
+
+func (sm *syncManager) handlePeerHead(head peerHead) {
+ log.Debugf("new peer head: %s %s", head.p, head.ts)
+
+ // have we started syncing yet?
+ if sm.nextWorker == 0 {
+ // track the peer head until we start syncing
+ sm.heads[head.p] = head.ts
+
+ // not yet; do we have enough peers?
+ if len(sm.heads) < BootstrapPeerThreshold {
+ log.Debugw("not tracking enough peers to start sync worker", "have", len(sm.heads), "need", BootstrapPeerThreshold)
+ // not enough peers; track it and wait
+ return
+ }
- sm.incomingTipSets <- target
+ // we are ready to start syncing; select the sync target and spawn a worker
+ target, err := sm.selectInitialSyncTarget()
+ if err != nil {
+ log.Errorf("failed to select initial sync target: %s", err)
+ return
}
- log.Infof("sync bootstrap has %d peers", spc)
+
+ log.Infof("selected initial sync target: %s", target)
+ sm.spawnWorker(target)
+ return
+ }
+
+ // we have started syncing, add peer head to the queue if applicable and maybe spawn a worker
+ // if there is work to do (possibly in a fork)
+ target, work, err := sm.addSyncTarget(head.ts)
+ if err != nil {
+ log.Warnf("failed to add sync target: %s", err)
return
}
- sm.incomingTipSets <- ts
+ if work {
+ log.Infof("selected sync target: %s", target)
+ sm.spawnWorker(target)
+ }
}
-func (sm *syncManager) State() []SyncerStateSnapshot {
- ret := make([]SyncerStateSnapshot, 0, len(sm.syncStates))
- for _, s := range sm.syncStates {
- ret = append(ret, s.Snapshot())
+func (sm *syncManager) handleWorkerStatus(status workerStatus) {
+ log.Debugf("worker %d done; status error: %s", status.id, status.err)
+
+ sm.mx.Lock()
+ ws := sm.state[status.id]
+ delete(sm.state, status.id)
+
+ // we track the last few workers for debug purposes
+ sm.history[sm.historyI] = ws
+ sm.historyI++
+ sm.historyI %= len(sm.history)
+ sm.mx.Unlock()
+
+ if status.err != nil {
+ // we failed to sync this target -- log it and try to work on an extended chain
+ // if there is nothing related to be worked on, we stop working on this chain.
+ log.Errorf("error during sync in %s: %s", ws.ts, status.err)
+ } else {
+ // add to the recently synced buffer
+ sm.recent.Push(ws.ts)
+ // if we are still in initial sync and this was fast enough, mark the end of the initial sync
+ if !sm.initialSyncDone && ws.dt < InitialSyncTimeThreshold {
+ sm.initialSyncDone = true
+ }
+ }
+
+ // we are done with this target, select the next sync target and spawn a worker if there is work
+ // to do, because of an extension of this chain.
+ target, work, err := sm.selectSyncTarget(ws.ts)
+ if err != nil {
+ log.Warnf("failed to select sync target: %s", err)
+ return
+ }
+
+ if work {
+ log.Infof("selected sync target: %s", target)
+ sm.spawnWorker(target)
+ }
+}
+
+func (sm *syncManager) handleInitialSyncDone() {
+ // we have just finished the initial sync; spawn some additional workers in deferred syncs
+ // as needed (and up to MaxSyncWorkers) to ramp up chain sync
+ for len(sm.state) < MaxSyncWorkers {
+ target, work, err := sm.selectDeferredSyncTarget()
+ if err != nil {
+ log.Errorf("error selecting deferred sync target: %s", err)
+ return
+ }
+
+ if !work {
+ return
+ }
+
+ log.Infof("selected deferred sync target: %s", target)
+ sm.spawnWorker(target)
+ }
+}
+
+func (sm *syncManager) spawnWorker(target *types.TipSet) {
+ id := sm.nextWorker
+ sm.nextWorker++
+ ws := &workerState{
+ id: id,
+ ts: target,
+ ss: new(SyncerState),
+ }
+ ws.ss.data.WorkerID = id
+
+ sm.mx.Lock()
+ sm.state[id] = ws
+ sm.mx.Unlock()
+
+ go sm.worker(ws)
+}
+
+func (sm *syncManager) worker(ws *workerState) {
+ log.Infof("worker %d syncing in %s", ws.id, ws.ts)
+
+ start := build.Clock.Now()
+
+ ctx := context.WithValue(sm.ctx, syncStateKey{}, ws.ss)
+ err := sm.doSync(ctx, ws.ts)
+
+ ws.dt = build.Clock.Since(start)
+ log.Infof("worker %d done; took %s", ws.id, ws.dt)
+ select {
+ case sm.statusq <- workerStatus{id: ws.id, err: err}:
+ case <-sm.ctx.Done():
+ }
+}
+
+// selects the initial sync target by examining known peer heads; only called once for the initial
+// sync.
+func (sm *syncManager) selectInitialSyncTarget() (*types.TipSet, error) {
+ var buckets syncBucketSet
+
+ var peerHeads []*types.TipSet
+ for _, ts := range sm.heads {
+ peerHeads = append(peerHeads, ts)
+ }
+ // clear the map, we don't use it any longer
+ sm.heads = nil
+
+ sort.Slice(peerHeads, func(i, j int) bool {
+ return peerHeads[i].Height() < peerHeads[j].Height()
+ })
+
+ for _, ts := range peerHeads {
+ buckets.Insert(ts)
+ }
+
+ if len(buckets.buckets) > 1 {
+ log.Warn("caution, multiple distinct chains seen during head selections")
+ // TODO: we *could* refuse to sync here without user intervention.
+ // For now, just select the best cluster
+ }
+
+ return buckets.Heaviest(), nil
+}
+
+// adds a tipset to the potential sync targets; returns true if there is a a tipset to work on.
+// this could be either a restart, eg because there is no currently scheduled sync work or a worker
+// failed or a potential fork.
+func (sm *syncManager) addSyncTarget(ts *types.TipSet) (*types.TipSet, bool, error) {
+ // Note: we don't need the state lock here to access the active worker states, as the only
+ // competing threads that may access it do so through State() which is read only.
+
+ // if we have recently synced this or any heavier tipset we just ignore it; this can happen
+ // with an empty worker set after we just finished syncing to a target
+ if sm.recent.Synced(ts) {
+ return nil, false, nil
+ }
+
+ // if the worker set is empty, we have finished syncing and were waiting for the next tipset
+ // in this case, we just return the tipset as work to be done
+ if len(sm.state) == 0 {
+ return ts, true, nil
+ }
+
+ // check if it is related to any active sync; if so insert into the pending sync queue
+ for _, ws := range sm.state {
+ if ts.Equals(ws.ts) {
+ // ignore it, we are already syncing it
+ return nil, false, nil
+ }
+
+ if ts.Parents() == ws.ts.Key() {
+ // schedule for syncing next; it's an extension of an active sync
+ sm.pend.Insert(ts)
+ return nil, false, nil
+ }
+ }
+
+ // check to see if it is related to any pending sync; if so insert it into the pending sync queue
+ if sm.pend.RelatedToAny(ts) {
+ sm.pend.Insert(ts)
+ return nil, false, nil
+ }
+
+ // it's not related to any active or pending sync; this could be a fork in which case we
+ // start a new worker to sync it, if it is *heavier* than any active or pending set;
+ // if it is not, we ignore it.
+ for _, ws := range sm.state {
+ if isHeavier(ws.ts, ts) {
+ return nil, false, nil
+ }
+ }
+
+ pendHeaviest := sm.pend.Heaviest()
+ if pendHeaviest != nil && isHeavier(pendHeaviest, ts) {
+ return nil, false, nil
+ }
+
+ // if we have not finished the initial sync or have too many workers, add it to the deferred queue;
+ // it will be processed once a worker is freed from syncing a chain (or the initial sync finishes)
+ if !sm.initialSyncDone || len(sm.state) >= MaxSyncWorkers {
+ log.Debugf("deferring sync on %s", ts)
+ sm.deferred.Insert(ts)
+ return nil, false, nil
+ }
+
+ // start a new worker, seems heavy enough and unrelated to active or pending syncs
+ return ts, true, nil
+}
+
+// selects the next sync target after a worker sync has finished; returns true and a target
+// TipSet if this chain should continue to sync because there is a heavier related tipset.
+func (sm *syncManager) selectSyncTarget(done *types.TipSet) (*types.TipSet, bool, error) {
+ // we pop the related bucket and if there is any related tipset, we work on the heaviest one next
+ // if we are not already working on a heavier tipset
+ related := sm.pend.PopRelated(done)
+ if related == nil {
+ return sm.selectDeferredSyncTarget()
+ }
+
+ heaviest := related.heaviestTipSet()
+ if isHeavier(done, heaviest) {
+ return sm.selectDeferredSyncTarget()
+ }
+
+ for _, ws := range sm.state {
+ if isHeavier(ws.ts, heaviest) {
+ return sm.selectDeferredSyncTarget()
+ }
+ }
+
+ if sm.recent.Synced(heaviest) {
+ return sm.selectDeferredSyncTarget()
+ }
+
+ return heaviest, true, nil
+}
+
+// selects a deferred sync target if there is any; these are sync targets that were not related to
+// active syncs and were deferred because there were too many workers running
+func (sm *syncManager) selectDeferredSyncTarget() (*types.TipSet, bool, error) {
+deferredLoop:
+ for !sm.deferred.Empty() {
+ bucket := sm.deferred.Pop()
+ heaviest := bucket.heaviestTipSet()
+
+ if sm.recent.Synced(heaviest) {
+ // we have synced it or something heavier recently, skip it
+ continue deferredLoop
+ }
+
+ if sm.pend.RelatedToAny(heaviest) {
+ // this has converged to a pending sync, insert it to the pending queue
+ sm.pend.Insert(heaviest)
+ continue deferredLoop
+ }
+
+ for _, ws := range sm.state {
+ if ws.ts.Equals(heaviest) || isHeavier(ws.ts, heaviest) {
+ // we have converged and are already syncing it or we are syncing on something heavier
+ // ignore it and pop the next deferred bucket
+ continue deferredLoop
+ }
+
+ if heaviest.Parents() == ws.ts.Key() {
+ // we have converged and we are syncing its parent; insert it to the pending queue
+ sm.pend.Insert(heaviest)
+ continue deferredLoop
+ }
+
+ // it's not related to any active or pending sync and this worker is free, so sync it!
+ return heaviest, true, nil
+ }
}
- return ret
+
+ return nil, false, nil
}
+func isHeavier(a, b *types.TipSet) bool {
+ return a.ParentWeight().GreaterThan(b.ParentWeight())
+}
+
+// sync buffer -- this is a circular buffer of recently synced tipsets
+type syncBuffer struct {
+ buf []*types.TipSet
+ next int
+}
+
+func newSyncBuffer(size int) *syncBuffer {
+ return &syncBuffer{buf: make([]*types.TipSet, size)}
+}
+
+func (sb *syncBuffer) Push(ts *types.TipSet) {
+ sb.buf[sb.next] = ts
+ sb.next++
+ sb.next %= len(sb.buf)
+}
+
+func (sb *syncBuffer) Synced(ts *types.TipSet) bool {
+ for _, rts := range sb.buf {
+ if rts != nil && (rts.Equals(ts) || isHeavier(rts, ts)) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// sync buckets and related utilities
type syncBucketSet struct {
buckets []*syncTargetBucket
}
+type syncTargetBucket struct {
+ tips []*types.TipSet
+}
+
func newSyncTargetBucket(tipsets ...*types.TipSet) *syncTargetBucket {
var stb syncTargetBucket
for _, ts := range tipsets {
@@ -250,10 +623,6 @@ func (sbs *syncBucketSet) Empty() bool {
return len(sbs.buckets) == 0
}
-type syncTargetBucket struct {
- tips []*types.TipSet
-}
-
func (stb *syncTargetBucket) sameChainAs(ts *types.TipSet) bool {
for _, t := range stb.tips {
if ts.Equals(t) {
@@ -265,19 +634,43 @@ func (stb *syncTargetBucket) sameChainAs(ts *types.TipSet) bool {
if ts.Parents() == t.Key() {
return true
}
- if coalesceForksParents && ts.Parents() == t.Parents() {
- return true
- }
}
return false
}
func (stb *syncTargetBucket) add(ts *types.TipSet) {
-
- for _, t := range stb.tips {
+ for i, t := range stb.tips {
if t.Equals(ts) {
return
}
+ if coalesceTipsets && t.Height() == ts.Height() &&
+ types.CidArrsEqual(t.Blocks()[0].Parents, ts.Blocks()[0].Parents) {
+ miners := make(map[address.Address]struct{})
+ newTs := []*types.BlockHeader{}
+ for _, b := range t.Blocks() {
+ _, have := miners[b.Miner]
+ if !have {
+ newTs = append(newTs, b)
+ miners[b.Miner] = struct{}{}
+ }
+ }
+ for _, b := range ts.Blocks() {
+ _, have := miners[b.Miner]
+ if !have {
+ newTs = append(newTs, b)
+ miners[b.Miner] = struct{}{}
+ }
+ }
+
+ ts2, err := types.NewTipSet(newTs)
+ if err != nil {
+ log.Warnf("error while trying to recombine a tipset in a bucket: %+v", err)
+ continue
+ }
+ stb.tips[i] = ts2
+ return
+ }
+
}
stb.tips = append(stb.tips, ts)
@@ -296,196 +689,3 @@ func (stb *syncTargetBucket) heaviestTipSet() *types.TipSet {
}
return best
}
-
-func (sm *syncManager) selectSyncTarget() (*types.TipSet, error) {
- var buckets syncBucketSet
-
- var peerHeads []*types.TipSet
- for _, ts := range sm.peerHeads {
- peerHeads = append(peerHeads, ts)
- }
- sort.Slice(peerHeads, func(i, j int) bool {
- return peerHeads[i].Height() < peerHeads[j].Height()
- })
-
- for _, ts := range peerHeads {
- buckets.Insert(ts)
- }
-
- if len(buckets.buckets) > 1 {
- log.Warn("caution, multiple distinct chains seen during head selections")
- // TODO: we *could* refuse to sync here without user intervention.
- // For now, just select the best cluster
- }
-
- return buckets.Heaviest(), nil
-}
-
-func (sm *syncManager) syncScheduler() {
- for {
- select {
- case ts, ok := <-sm.incomingTipSets:
- if !ok {
- log.Info("shutting down sync scheduler")
- return
- }
-
- sm.scheduleIncoming(ts)
- case res := <-sm.syncResults:
- sm.scheduleProcessResult(res)
- case sm.workerChan <- sm.nextSyncTarget.heaviestTipSet():
- sm.scheduleWorkSent()
- case <-sm.stop:
- log.Info("sync scheduler shutting down")
- return
- }
- }
-}
-
-func (sm *syncManager) scheduleIncoming(ts *types.TipSet) {
- log.Debug("scheduling incoming tipset sync: ", ts.Cids())
- if sm.getBootstrapState() == BSStateSelected {
- sm.setBootstrapState(BSStateScheduled)
- sm.syncTargets <- ts
- return
- }
-
- var relatedToActiveSync bool
- for _, acts := range sm.activeSyncs {
- if ts.Equals(acts) {
- // ignore, we are already syncing it
- return
- }
-
- if ts.Parents() == acts.Key() {
- // sync this next, after that sync process finishes
- relatedToActiveSync = true
- }
- }
-
- if !relatedToActiveSync && sm.activeSyncTips.RelatedToAny(ts) {
- relatedToActiveSync = true
- }
-
- // if this is related to an active sync process, immediately bucket it
- // we don't want to start a parallel sync process that duplicates work
- if relatedToActiveSync {
- sm.activeSyncTips.Insert(ts)
- return
- }
-
- if sm.getBootstrapState() == BSStateScheduled {
- sm.syncQueue.Insert(ts)
- return
- }
-
- if sm.nextSyncTarget != nil && sm.nextSyncTarget.sameChainAs(ts) {
- sm.nextSyncTarget.add(ts)
- } else {
- sm.syncQueue.Insert(ts)
-
- if sm.nextSyncTarget == nil {
- sm.nextSyncTarget = sm.syncQueue.Pop()
- sm.workerChan = sm.syncTargets
- }
- }
-}
-
-func (sm *syncManager) scheduleProcessResult(res *syncResult) {
- if res.success && sm.getBootstrapState() != BSStateComplete {
- sm.setBootstrapState(BSStateComplete)
- }
-
- delete(sm.activeSyncs, res.ts.Key())
- relbucket := sm.activeSyncTips.PopRelated(res.ts)
- if relbucket != nil {
- if res.success {
- if sm.nextSyncTarget == nil {
- sm.nextSyncTarget = relbucket
- sm.workerChan = sm.syncTargets
- } else {
- for _, t := range relbucket.tips {
- sm.syncQueue.Insert(t)
- }
- }
- return
- }
- // TODO: this is the case where we try to sync a chain, and
- // fail, and we have more blocks on top of that chain that
- // have come in since. The question is, should we try to
- // sync these? or just drop them?
- log.Error("failed to sync chain but have new unconnected blocks from chain")
- }
-
- if sm.nextSyncTarget == nil && !sm.syncQueue.Empty() {
- next := sm.syncQueue.Pop()
- if next != nil {
- sm.nextSyncTarget = next
- sm.workerChan = sm.syncTargets
- }
- }
-}
-
-func (sm *syncManager) scheduleWorkSent() {
- hts := sm.nextSyncTarget.heaviestTipSet()
- sm.activeSyncs[hts.Key()] = hts
-
- if !sm.syncQueue.Empty() {
- sm.nextSyncTarget = sm.syncQueue.Pop()
- } else {
- sm.nextSyncTarget = nil
- sm.workerChan = nil
- }
-}
-
-func (sm *syncManager) syncWorker(id int) {
- ss := sm.syncStates[id]
- for {
- select {
- case ts, ok := <-sm.syncTargets:
- if !ok {
- log.Info("sync manager worker shutting down")
- return
- }
-
- ctx := context.WithValue(context.TODO(), syncStateKey{}, ss)
- err := sm.doSync(ctx, ts)
- if err != nil {
- log.Errorf("sync error: %+v", err)
- }
-
- sm.syncResults <- &syncResult{
- ts: ts,
- success: err == nil,
- }
- }
- }
-}
-
-func (sm *syncManager) syncedPeerCount() int {
- var count int
- for _, ts := range sm.peerHeads {
- if ts.Height() > 0 {
- count++
- }
- }
- return count
-}
-
-func (sm *syncManager) getBootstrapState() int {
- sm.bssLk.Lock()
- defer sm.bssLk.Unlock()
- return sm.bootstrapState
-}
-
-func (sm *syncManager) setBootstrapState(v int) {
- sm.bssLk.Lock()
- defer sm.bssLk.Unlock()
- sm.bootstrapState = v
-}
-
-func (sm *syncManager) IsBootstrapped() bool {
- sm.bssLk.Lock()
- defer sm.bssLk.Unlock()
- return sm.bootstrapState == BSStateComplete
-}
diff --git a/chain/sync_manager_test.go b/chain/sync_manager_test.go
index 709e03a4108..5f23e67c071 100644
--- a/chain/sync_manager_test.go
+++ b/chain/sync_manager_test.go
@@ -10,6 +10,10 @@ import (
"github.com/filecoin-project/lotus/chain/types/mock"
)
+func init() {
+ BootstrapPeerThreshold = 1
+}
+
var genTs = mock.TipSet(mock.MkBlock(nil, 0, 0))
type syncOp struct {
@@ -28,7 +32,12 @@ func runSyncMgrTest(t *testing.T, tname string, thresh int, tf func(*testing.T,
<-ch
return nil
}).(*syncManager)
- sm.bspThresh = thresh
+
+ oldBootstrapPeerThreshold := BootstrapPeerThreshold
+ BootstrapPeerThreshold = thresh
+ defer func() {
+ BootstrapPeerThreshold = oldBootstrapPeerThreshold
+ }()
sm.Start()
defer sm.Stop()
@@ -87,47 +96,67 @@ func TestSyncManagerEdgeCase(t *testing.T) {
runSyncMgrTest(t, "edgeCase", 1, func(t *testing.T, sm *syncManager, stc chan *syncOp) {
sm.SetPeerHead(ctx, "peer1", a)
- assertGetSyncOp(t, stc, a)
sm.SetPeerHead(ctx, "peer1", b1)
sm.SetPeerHead(ctx, "peer1", b2)
- // b1 and b2 are being processed
- b1op := <-stc
- b2op := <-stc
- if !b1op.ts.Equals(b1) {
- b1op, b2op = b2op, b1op
+ assertGetSyncOp(t, stc, a)
+
+ // b1 and b2 are in queue after a; the sync manager should pick the heaviest one which is b2
+ bop := <-stc
+ if !bop.ts.Equals(b2) {
+ t.Fatalf("Expected tipset %s to sync, but got %s", b2, bop.ts)
}
- sm.SetPeerHead(ctx, "peer2", c2) // c2 is put into activeSyncTips at index 0
- sm.SetPeerHead(ctx, "peer2", c1) // c1 is put into activeSyncTips at index 1
- sm.SetPeerHead(ctx, "peer3", b2) // b2 is related to c2 and even though it is actively synced it is put into activeSyncTips index 0
- sm.SetPeerHead(ctx, "peer1", a) // a is related to b2 and is put into activeSyncTips index 0
+ sm.SetPeerHead(ctx, "peer2", c2)
+ sm.SetPeerHead(ctx, "peer2", c1)
+ sm.SetPeerHead(ctx, "peer3", b2)
+ sm.SetPeerHead(ctx, "peer1", a)
- b1op.done() // b1 completes first, is related to a, so it pops activeSyncTips index 0
- // even though correct one is index 1
+ bop.done()
- b2op.done()
- // b2 completes and is not related to c1, so it leaves activeSyncTips as it is
+ // get the next sync target; it should be c1 as the heaviest tipset but added last (same weight as c2)
+ bop = <-stc
+ if bop.ts.Equals(c2) {
+ // there's a small race and we might get c2 first.
+ // But we should still end on c1.
+ bop.done()
+ bop = <-stc
+ }
- waitUntilAllWorkersAreDone(stc)
+ if !bop.ts.Equals(c1) {
+ t.Fatalf("Expected tipset %s to sync, but got %s", c1, bop.ts)
+ }
- if len(sm.activeSyncTips.buckets) != 0 {
- t.Errorf("activeSyncTips expected empty but got: %s", sm.activeSyncTips.String())
+ sm.SetPeerHead(ctx, "peer4", d1)
+ sm.SetPeerHead(ctx, "peer5", e1)
+ bop.done()
+
+ // get the last sync target; it should be e1
+ var last *types.TipSet
+ for i := 0; i < 10; {
+ select {
+ case bop = <-stc:
+ bop.done()
+ if last == nil || bop.ts.Height() > last.Height() {
+ last = bop.ts
+ }
+ default:
+ i++
+ time.Sleep(10 * time.Millisecond)
+ }
+ }
+ if !last.Equals(e1) {
+ t.Fatalf("Expected tipset %s to sync, but got %s", e1, last)
}
- })
-}
-func waitUntilAllWorkersAreDone(stc chan *syncOp) {
- for i := 0; i < 10; {
- select {
- case so := <-stc:
- so.done()
- default:
- i++
- time.Sleep(10 * time.Millisecond)
+ sm.mx.Lock()
+ activeSyncs := len(sm.state)
+ sm.mx.Unlock()
+ if activeSyncs != 0 {
+ t.Errorf("active syncs expected empty but got: %d", activeSyncs)
}
- }
+ })
}
func TestSyncManager(t *testing.T) {
diff --git a/chain/sync_test.go b/chain/sync_test.go
index 559a73bf525..5312dff0bed 100644
--- a/chain/sync_test.go
+++ b/chain/sync_test.go
@@ -7,6 +7,11 @@ import (
"testing"
"time"
+ "github.com/filecoin-project/go-state-types/crypto"
+
+ "github.com/filecoin-project/go-state-types/network"
+ "github.com/filecoin-project/lotus/chain/stmgr"
+
"github.com/ipfs/go-cid"
ds "github.com/ipfs/go-datastore"
@@ -80,6 +85,7 @@ type syncTestUtil struct {
blocks []*store.FullTipSet
nds []api.FullNode
+ us stmgr.UpgradeSchedule
}
func prepSyncTest(t testing.TB, h int) *syncTestUtil {
@@ -99,9 +105,11 @@ func prepSyncTest(t testing.TB, h int) *syncTestUtil {
mn: mocknet.New(ctx),
g: g,
+ us: stmgr.DefaultUpgradeSchedule(),
}
tu.addSourceNode(h)
+
//tu.checkHeight("source", source, h)
// separate logs
@@ -110,6 +118,54 @@ func prepSyncTest(t testing.TB, h int) *syncTestUtil {
return tu
}
+func prepSyncTestWithV5Height(t testing.TB, h int, v5height abi.ChainEpoch) *syncTestUtil {
+ logging.SetLogLevel("*", "INFO")
+
+ sched := stmgr.UpgradeSchedule{{
+ // prepare for upgrade.
+ Network: network.Version9,
+ Height: 1,
+ Migration: stmgr.UpgradeActorsV2,
+ }, {
+ Network: network.Version10,
+ Height: 2,
+ Migration: stmgr.UpgradeActorsV3,
+ }, {
+ Network: network.Version12,
+ Height: 3,
+ Migration: stmgr.UpgradeActorsV4,
+ }, {
+ Network: network.Version13,
+ Height: v5height,
+ Migration: stmgr.UpgradeActorsV5,
+ }}
+
+ g, err := gen.NewGeneratorWithUpgradeSchedule(sched)
+
+ if err != nil {
+ t.Fatalf("%+v", err)
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+
+ tu := &syncTestUtil{
+ t: t,
+ ctx: ctx,
+ cancel: cancel,
+
+ mn: mocknet.New(ctx),
+ g: g,
+ us: sched,
+ }
+
+ tu.addSourceNode(h)
+ //tu.checkHeight("source", source, h)
+
+ // separate logs
+ fmt.Println("\x1b[31m///////////////////////////////////////////////////\x1b[39b")
+ return tu
+}
+
func (tu *syncTestUtil) Shutdown() {
tu.cancel()
}
@@ -174,7 +230,7 @@ func (tu *syncTestUtil) pushTsExpectErr(to int, fts *store.FullTipSet, experr bo
}
}
-func (tu *syncTestUtil) mineOnBlock(blk *store.FullTipSet, to int, miners []int, wait, fail bool, msgs [][]*types.SignedMessage) *store.FullTipSet {
+func (tu *syncTestUtil) mineOnBlock(blk *store.FullTipSet, to int, miners []int, wait, fail bool, msgs [][]*types.SignedMessage, nulls abi.ChainEpoch) *store.FullTipSet {
if miners == nil {
for i := range tu.g.Miners {
miners = append(miners, i)
@@ -191,10 +247,10 @@ func (tu *syncTestUtil) mineOnBlock(blk *store.FullTipSet, to int, miners []int,
var nts *store.FullTipSet
var err error
if msgs != nil {
- nts, err = tu.g.NextTipSetFromMinersWithMessages(blk.TipSet(), maddrs, msgs)
+ nts, err = tu.g.NextTipSetFromMinersWithMessagesAndNulls(blk.TipSet(), maddrs, msgs, 0)
require.NoError(tu.t, err)
} else {
- mt, err := tu.g.NextTipSetFromMiners(blk.TipSet(), maddrs)
+ mt, err := tu.g.NextTipSetFromMiners(blk.TipSet(), maddrs, nulls)
require.NoError(tu.t, err)
nts = mt.TipSet
}
@@ -209,7 +265,7 @@ func (tu *syncTestUtil) mineOnBlock(blk *store.FullTipSet, to int, miners []int,
}
func (tu *syncTestUtil) mineNewBlock(src int, miners []int) {
- mts := tu.mineOnBlock(tu.g.CurTipset, src, miners, true, false, nil)
+ mts := tu.mineOnBlock(tu.g.CurTipset, src, miners, true, false, nil, 0)
tu.g.CurTipset = mts
}
@@ -223,12 +279,13 @@ func (tu *syncTestUtil) addSourceNode(gen int) {
stop, err := node.New(tu.ctx,
node.FullAPI(&out),
- node.Online(),
+ node.Base(),
node.Repo(sourceRepo),
node.MockHost(tu.mn),
node.Test(),
node.Override(new(modules.Genesis), modules.LoadGenesis(genesis)),
+ node.Override(new(stmgr.UpgradeSchedule), tu.us),
)
require.NoError(tu.t, err)
tu.t.Cleanup(func() { _ = stop(context.Background()) })
@@ -253,14 +310,16 @@ func (tu *syncTestUtil) addClientNode() int {
var out api.FullNode
+ r := repo.NewMemory(nil)
stop, err := node.New(tu.ctx,
node.FullAPI(&out),
- node.Online(),
- node.Repo(repo.NewMemory(nil)),
+ node.Base(),
+ node.Repo(r),
node.MockHost(tu.mn),
node.Test(),
node.Override(new(modules.Genesis), modules.LoadGenesis(tu.genesis)),
+ node.Override(new(stmgr.UpgradeSchedule), tu.us),
)
require.NoError(tu.t, err)
tu.t.Cleanup(func() { _ = stop(context.Background()) })
@@ -346,12 +405,15 @@ func (tu *syncTestUtil) checkpointTs(node int, tsk types.TipSetKey) {
require.NoError(tu.t, tu.nds[node].SyncCheckpoint(context.TODO(), tsk))
}
+func (tu *syncTestUtil) nodeHasTs(node int, tsk types.TipSetKey) bool {
+ _, err := tu.nds[node].ChainGetTipSet(context.TODO(), tsk)
+ return err == nil
+}
+
func (tu *syncTestUtil) waitUntilNodeHasTs(node int, tsk types.TipSetKey) {
- for {
- _, err := tu.nds[node].ChainGetTipSet(context.TODO(), tsk)
- if err != nil {
- break
- }
+ for !tu.nodeHasTs(node, tsk) {
+ // Time to allow for syncing and validation
+ time.Sleep(10 * time.Millisecond)
}
// Time to allow for syncing and validation
@@ -376,12 +438,18 @@ func (tu *syncTestUtil) waitUntilSyncTarget(to int, target *types.TipSet) {
tu.t.Fatal(err)
}
- // TODO: some sort of timeout?
- for n := range hc {
- for _, c := range n {
- if c.Val.Equals(target) {
- return
+ timeout := time.After(5 * time.Second)
+
+ for {
+ select {
+ case n := <-hc:
+ for _, c := range n {
+ if c.Val.Equals(target) {
+ return
+ }
}
+ case <-timeout:
+ tu.t.Fatal("waitUntilSyncTarget timeout")
}
}
}
@@ -442,7 +510,7 @@ func TestSyncBadTimestamp(t *testing.T) {
fmt.Println("BASE: ", base.Cids())
tu.printHeads()
- a1 := tu.mineOnBlock(base, 0, nil, false, true, nil)
+ a1 := tu.mineOnBlock(base, 0, nil, false, true, nil, 0)
tu.g.Timestamper = nil
require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet()))
@@ -451,7 +519,7 @@ func TestSyncBadTimestamp(t *testing.T) {
fmt.Println("After mine bad block!")
tu.printHeads()
- a2 := tu.mineOnBlock(base, 0, nil, true, false, nil)
+ a2 := tu.mineOnBlock(base, 0, nil, true, false, nil, 0)
tu.waitUntilSync(0, client)
@@ -495,7 +563,7 @@ func TestSyncBadWinningPoSt(t *testing.T) {
tu.g.SetWinningPoStProver(tu.g.Miners[1], &badWpp{})
// now ensure that new blocks are not accepted
- tu.mineOnBlock(base, client, nil, false, true, nil)
+ tu.mineOnBlock(base, client, nil, false, true, nil, 0)
}
func (tu *syncTestUtil) loadChainToNode(to int) {
@@ -518,15 +586,20 @@ func TestSyncFork(t *testing.T) {
tu.loadChainToNode(p1)
tu.loadChainToNode(p2)
- phead := func() {
+ printHead := func() {
h1, err := tu.nds[1].ChainHead(tu.ctx)
require.NoError(tu.t, err)
h2, err := tu.nds[2].ChainHead(tu.ctx)
require.NoError(tu.t, err)
- fmt.Println("Node 1: ", h1.Cids(), h1.Parents(), h1.Height())
- fmt.Println("Node 2: ", h2.Cids(), h1.Parents(), h2.Height())
+ w1, err := tu.nds[1].(*impl.FullNodeAPI).ChainAPI.Chain.Weight(tu.ctx, h1)
+ require.NoError(tu.t, err)
+ w2, err := tu.nds[2].(*impl.FullNodeAPI).ChainAPI.Chain.Weight(tu.ctx, h2)
+ require.NoError(tu.t, err)
+
+ fmt.Println("Node 1: ", h1.Cids(), h1.Parents(), h1.Height(), w1)
+ fmt.Println("Node 2: ", h2.Cids(), h2.Parents(), h2.Height(), w2)
//time.Sleep(time.Second * 2)
fmt.Println()
fmt.Println()
@@ -534,26 +607,28 @@ func TestSyncFork(t *testing.T) {
fmt.Println()
}
- phead()
+ printHead()
base := tu.g.CurTipset
fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height())
// The two nodes fork at this point into 'a' and 'b'
- a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil)
- a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil)
- a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil)
+ a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil, 0)
+ a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil, 0)
+ a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0)
require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet()))
// chain B will now be heaviest
- b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil)
- b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil)
- b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil)
- b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil)
+ b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil, 0)
+ b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0)
+ b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0)
+ b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0)
fmt.Println("A: ", a.Cids(), a.TipSet().Height())
fmt.Println("B: ", b.Cids(), b.TipSet().Height())
+ printHead()
+
// Now for the fun part!!
require.NoError(t, tu.mn.LinkAll())
@@ -561,7 +636,7 @@ func TestSyncFork(t *testing.T) {
tu.waitUntilSyncTarget(p1, b.TipSet())
tu.waitUntilSyncTarget(p2, b.TipSet())
- phead()
+ printHead()
}
// This test crafts a tipset with 2 blocks, A and B.
@@ -573,11 +648,14 @@ func TestDuplicateNonce(t *testing.T) {
base := tu.g.CurTipset
+ // Get the banker from computed tipset state, not the parent.
+ st, _, err := tu.g.StateManager().TipSetState(context.TODO(), base.TipSet())
+ require.NoError(t, err)
+ ba, err := tu.g.StateManager().LoadActorRaw(context.TODO(), tu.g.Banker(), st)
+ require.NoError(t, err)
+
// Produce a message from the banker to the rcvr
makeMsg := func(rcvr address.Address) *types.SignedMessage {
-
- ba, err := tu.nds[0].StateGetActor(context.TODO(), tu.g.Banker(), base.TipSet().Key())
- require.NoError(t, err)
msg := types.Message{
To: rcvr,
From: tu.g.Banker(),
@@ -608,28 +686,28 @@ func TestDuplicateNonce(t *testing.T) {
msgs[k] = []*types.SignedMessage{makeMsg(tu.g.Miners[k])}
}
- ts1 := tu.mineOnBlock(base, 0, []int{0, 1}, true, false, msgs)
+ ts1 := tu.mineOnBlock(base, 0, []int{0, 1}, true, false, msgs, 0)
tu.waitUntilSyncTarget(0, ts1.TipSet())
// mine another tipset
- ts2 := tu.mineOnBlock(ts1, 0, []int{0, 1}, true, false, make([][]*types.SignedMessage, 2))
+ ts2 := tu.mineOnBlock(ts1, 0, []int{0, 1}, true, false, make([][]*types.SignedMessage, 2), 0)
tu.waitUntilSyncTarget(0, ts2.TipSet())
var includedMsg cid.Cid
var skippedMsg cid.Cid
- r0, err0 := tu.nds[0].StateGetReceipt(context.TODO(), msgs[0][0].Cid(), ts2.TipSet().Key())
- r1, err1 := tu.nds[0].StateGetReceipt(context.TODO(), msgs[1][0].Cid(), ts2.TipSet().Key())
+ r0, err0 := tu.nds[0].StateSearchMsg(context.TODO(), ts2.TipSet().Key(), msgs[0][0].Cid(), api.LookbackNoLimit, true)
+ r1, err1 := tu.nds[0].StateSearchMsg(context.TODO(), ts2.TipSet().Key(), msgs[1][0].Cid(), api.LookbackNoLimit, true)
if err0 == nil {
require.Error(t, err1, "at least one of the StateGetReceipt calls should fail")
- require.True(t, r0.ExitCode.IsSuccess())
+ require.True(t, r0.Receipt.ExitCode.IsSuccess())
includedMsg = msgs[0][0].Message.Cid()
skippedMsg = msgs[1][0].Message.Cid()
} else {
require.NoError(t, err1, "both the StateGetReceipt calls should not fail")
- require.True(t, r1.ExitCode.IsSuccess())
+ require.True(t, r1.Receipt.ExitCode.IsSuccess())
includedMsg = msgs[1][0].Message.Cid()
skippedMsg = msgs[0][0].Message.Cid()
}
@@ -665,11 +743,14 @@ func TestBadNonce(t *testing.T) {
base := tu.g.CurTipset
+ // Get the banker from computed tipset state, not the parent.
+ st, _, err := tu.g.StateManager().TipSetState(context.TODO(), base.TipSet())
+ require.NoError(t, err)
+ ba, err := tu.g.StateManager().LoadActorRaw(context.TODO(), tu.g.Banker(), st)
+ require.NoError(t, err)
+
// Produce a message from the banker with a bad nonce
makeBadMsg := func() *types.SignedMessage {
-
- ba, err := tu.nds[0].StateGetActor(context.TODO(), tu.g.Banker(), base.TipSet().Key())
- require.NoError(t, err)
msg := types.Message{
To: tu.g.Banker(),
From: tu.g.Banker(),
@@ -697,7 +778,115 @@ func TestBadNonce(t *testing.T) {
msgs := make([][]*types.SignedMessage, 1)
msgs[0] = []*types.SignedMessage{makeBadMsg()}
- tu.mineOnBlock(base, 0, []int{0}, true, true, msgs)
+ tu.mineOnBlock(base, 0, []int{0}, true, true, msgs, 0)
+}
+
+// This test introduces a block that has 2 messages, with the same sender, and same nonce.
+// One of the messages uses the sender's robust address, the other uses the ID address.
+// Such a block is invalid and should not sync.
+func TestMismatchedNoncesRobustID(t *testing.T) {
+ v5h := abi.ChainEpoch(4)
+ tu := prepSyncTestWithV5Height(t, int(v5h+5), v5h)
+
+ base := tu.g.CurTipset
+
+ // Get the banker from computed tipset state, not the parent.
+ st, _, err := tu.g.StateManager().TipSetState(context.TODO(), base.TipSet())
+ require.NoError(t, err)
+ ba, err := tu.g.StateManager().LoadActorRaw(context.TODO(), tu.g.Banker(), st)
+ require.NoError(t, err)
+
+ // Produce a message from the banker
+ makeMsg := func(id bool) *types.SignedMessage {
+ sender := tu.g.Banker()
+ if id {
+ s, err := tu.nds[0].StateLookupID(context.TODO(), sender, base.TipSet().Key())
+ require.NoError(t, err)
+ sender = s
+ }
+
+ msg := types.Message{
+ To: tu.g.Banker(),
+ From: sender,
+
+ Nonce: ba.Nonce,
+
+ Value: types.NewInt(1),
+
+ Method: 0,
+
+ GasLimit: 100_000_000,
+ GasFeeCap: types.NewInt(0),
+ GasPremium: types.NewInt(0),
+ }
+
+ sig, err := tu.g.Wallet().WalletSign(context.TODO(), tu.g.Banker(), msg.Cid().Bytes(), api.MsgMeta{})
+ require.NoError(t, err)
+
+ return &types.SignedMessage{
+ Message: msg,
+ Signature: *sig,
+ }
+ }
+
+ msgs := make([][]*types.SignedMessage, 1)
+ msgs[0] = []*types.SignedMessage{makeMsg(false), makeMsg(true)}
+
+ tu.mineOnBlock(base, 0, []int{0}, true, true, msgs, 0)
+}
+
+// This test introduces a block that has 2 messages, with the same sender, and nonces N and N+1 (so both can be included in a block)
+// One of the messages uses the sender's robust address, the other uses the ID address.
+// Such a block is valid and should sync.
+func TestMatchedNoncesRobustID(t *testing.T) {
+ v5h := abi.ChainEpoch(4)
+ tu := prepSyncTestWithV5Height(t, int(v5h+5), v5h)
+
+ base := tu.g.CurTipset
+
+ // Get the banker from computed tipset state, not the parent.
+ st, _, err := tu.g.StateManager().TipSetState(context.TODO(), base.TipSet())
+ require.NoError(t, err)
+ ba, err := tu.g.StateManager().LoadActorRaw(context.TODO(), tu.g.Banker(), st)
+ require.NoError(t, err)
+
+ // Produce a message from the banker with specified nonce
+ makeMsg := func(n uint64, id bool) *types.SignedMessage {
+ sender := tu.g.Banker()
+ if id {
+ s, err := tu.nds[0].StateLookupID(context.TODO(), sender, base.TipSet().Key())
+ require.NoError(t, err)
+ sender = s
+ }
+
+ msg := types.Message{
+ To: tu.g.Banker(),
+ From: sender,
+
+ Nonce: n,
+
+ Value: types.NewInt(1),
+
+ Method: 0,
+
+ GasLimit: 100_000_000,
+ GasFeeCap: types.NewInt(0),
+ GasPremium: types.NewInt(0),
+ }
+
+ sig, err := tu.g.Wallet().WalletSign(context.TODO(), tu.g.Banker(), msg.Cid().Bytes(), api.MsgMeta{})
+ require.NoError(t, err)
+
+ return &types.SignedMessage{
+ Message: msg,
+ Signature: *sig,
+ }
+ }
+
+ msgs := make([][]*types.SignedMessage, 1)
+ msgs[0] = []*types.SignedMessage{makeMsg(ba.Nonce, false), makeMsg(ba.Nonce+1, true)}
+
+ tu.mineOnBlock(base, 0, []int{0}, true, false, msgs, 0)
}
func BenchmarkSyncBasic(b *testing.B) {
@@ -762,19 +951,19 @@ func TestSyncCheckpointHead(t *testing.T) {
fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height())
// The two nodes fork at this point into 'a' and 'b'
- a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil)
- a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil)
- a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil)
+ a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil, 0)
+ a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil, 0)
+ a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0)
tu.waitUntilSyncTarget(p1, a.TipSet())
tu.checkpointTs(p1, a.TipSet().Key())
require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet()))
// chain B will now be heaviest
- b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil)
- b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil)
- b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil)
- b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil)
+ b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil, 0)
+ b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0)
+ b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0)
+ b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0)
fmt.Println("A: ", a.Cids(), a.TipSet().Height())
fmt.Println("B: ", b.Cids(), b.TipSet().Height())
@@ -785,8 +974,13 @@ func TestSyncCheckpointHead(t *testing.T) {
tu.connect(p1, p2)
tu.waitUntilNodeHasTs(p1, b.TipSet().Key())
p1Head := tu.getHead(p1)
- require.Equal(tu.t, p1Head, a.TipSet())
+ require.True(tu.t, p1Head.Equals(a.TipSet()))
tu.assertBad(p1, b.TipSet())
+
+ // Should be able to switch forks.
+ tu.checkpointTs(p1, b.TipSet().Key())
+ p1Head = tu.getHead(p1)
+ require.True(tu.t, p1Head.Equals(b.TipSet()))
}
func TestSyncCheckpointEarlierThanHead(t *testing.T) {
@@ -804,19 +998,19 @@ func TestSyncCheckpointEarlierThanHead(t *testing.T) {
fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height())
// The two nodes fork at this point into 'a' and 'b'
- a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil)
- a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil)
- a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil)
+ a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil, 0)
+ a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil, 0)
+ a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0)
tu.waitUntilSyncTarget(p1, a.TipSet())
tu.checkpointTs(p1, a1.TipSet().Key())
require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet()))
// chain B will now be heaviest
- b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil)
- b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil)
- b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil)
- b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil)
+ b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil, 0)
+ b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0)
+ b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0)
+ b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0)
fmt.Println("A: ", a.Cids(), a.TipSet().Height())
fmt.Println("B: ", b.Cids(), b.TipSet().Height())
@@ -827,6 +1021,86 @@ func TestSyncCheckpointEarlierThanHead(t *testing.T) {
tu.connect(p1, p2)
tu.waitUntilNodeHasTs(p1, b.TipSet().Key())
p1Head := tu.getHead(p1)
- require.Equal(tu.t, p1Head, a.TipSet())
+ require.True(tu.t, p1Head.Equals(a.TipSet()))
tu.assertBad(p1, b.TipSet())
+
+ // Should be able to switch forks.
+ tu.checkpointTs(p1, b.TipSet().Key())
+ p1Head = tu.getHead(p1)
+ require.True(tu.t, p1Head.Equals(b.TipSet()))
+}
+
+func TestDrandNull(t *testing.T) {
+ H := 10
+ v5h := abi.ChainEpoch(50)
+ ov5h := build.UpgradeHyperdriveHeight
+ build.UpgradeHyperdriveHeight = v5h
+ tu := prepSyncTestWithV5Height(t, H, v5h)
+
+ p0 := tu.addClientNode()
+ p1 := tu.addClientNode()
+
+ tu.loadChainToNode(p0)
+ tu.loadChainToNode(p1)
+
+ entropy := []byte{0, 2, 3, 4}
+ // arbitrarily chosen
+ pers := crypto.DomainSeparationTag_WinningPoStChallengeSeed
+
+ beforeNull := tu.g.CurTipset
+ afterNull := tu.mineOnBlock(beforeNull, p0, nil, false, false, nil, 2)
+ nullHeight := beforeNull.TipSet().Height() + 1
+ if afterNull.TipSet().Height() == nullHeight {
+ t.Fatal("didn't inject nulls as expected")
+ }
+
+ rand, err := tu.nds[p0].ChainGetRandomnessFromBeacon(tu.ctx, afterNull.TipSet().Key(), pers, nullHeight, entropy)
+ require.NoError(t, err)
+
+ // calculate the expected randomness based on the beacon BEFORE the null
+ expectedBE := beforeNull.Blocks[0].Header.BeaconEntries
+ expectedRand, err := store.DrawRandomness(expectedBE[len(expectedBE)-1].Data, pers, nullHeight, entropy)
+ require.NoError(t, err)
+
+ require.Equal(t, []byte(rand), expectedRand)
+
+ // zoom zoom to past the v5 upgrade by injecting many many nulls
+ postUpgrade := tu.mineOnBlock(afterNull, p0, nil, false, false, nil, v5h)
+ nv, err := tu.nds[p0].StateNetworkVersion(tu.ctx, postUpgrade.TipSet().Key())
+ require.NoError(t, err)
+ if nv != network.Version13 {
+ t.Fatal("expect to be v13 by now")
+ }
+
+ afterNull = tu.mineOnBlock(postUpgrade, p0, nil, false, false, nil, 2)
+ nullHeight = postUpgrade.TipSet().Height() + 1
+ if afterNull.TipSet().Height() == nullHeight {
+ t.Fatal("didn't inject nulls as expected")
+ }
+
+ rand0, err := tu.nds[p0].ChainGetRandomnessFromBeacon(tu.ctx, afterNull.TipSet().Key(), pers, nullHeight, entropy)
+ require.NoError(t, err)
+
+ // calculate the expected randomness based on the beacon AFTER the null
+ expectedBE = afterNull.Blocks[0].Header.BeaconEntries
+ expectedRand, err = store.DrawRandomness(expectedBE[len(expectedBE)-1].Data, pers, nullHeight, entropy)
+ require.NoError(t, err)
+
+ require.Equal(t, []byte(rand0), expectedRand)
+
+ // Introduce p1 to friendly p0 who has all the blocks
+ require.NoError(t, tu.mn.LinkAll())
+ tu.connect(p0, p1)
+ tu.waitUntilNodeHasTs(p1, afterNull.TipSet().Key())
+ p1Head := tu.getHead(p1)
+
+ // Yes, p1 syncs well to p0's chain
+ require.Equal(tu.t, p1Head.Key(), afterNull.TipSet().Key())
+
+ // Yes, p1 sources the same randomness as p0
+ rand1, err := tu.nds[p1].ChainGetRandomnessFromBeacon(tu.ctx, afterNull.TipSet().Key(), pers, nullHeight, entropy)
+ require.NoError(t, err)
+ require.Equal(t, rand0, rand1)
+
+ build.UpgradeHyperdriveHeight = ov5h
}
diff --git a/chain/syncstate.go b/chain/syncstate.go
index 26f9f1c39f0..527d6be4832 100644
--- a/chain/syncstate.go
+++ b/chain/syncstate.go
@@ -12,13 +12,14 @@ import (
)
type SyncerStateSnapshot struct {
- Target *types.TipSet
- Base *types.TipSet
- Stage api.SyncStateStage
- Height abi.ChainEpoch
- Message string
- Start time.Time
- End time.Time
+ WorkerID uint64
+ Target *types.TipSet
+ Base *types.TipSet
+ Stage api.SyncStateStage
+ Height abi.ChainEpoch
+ Message string
+ Start time.Time
+ End time.Time
}
type SyncerState struct {
diff --git a/chain/types/bigint.go b/chain/types/bigint.go
index da4857d5b4d..72ef5212862 100644
--- a/chain/types/bigint.go
+++ b/chain/types/bigint.go
@@ -47,6 +47,11 @@ func BigDiv(a, b BigInt) BigInt {
return BigInt{Int: big.NewInt(0).Div(a.Int, b.Int)}
}
+func BigDivFloat(num, den BigInt) float64 {
+ res, _ := new(big.Rat).SetFrac(num.Int, den.Int).Float64()
+ return res
+}
+
func BigMod(a, b BigInt) BigInt {
return BigInt{Int: big.NewInt(0).Mod(a.Int, b.Int)}
}
diff --git a/chain/types/blockheader.go b/chain/types/blockheader.go
index 4db6788e1b8..66e711cabe9 100644
--- a/chain/types/blockheader.go
+++ b/chain/types/blockheader.go
@@ -47,41 +47,24 @@ func NewBeaconEntry(round uint64, data []byte) BeaconEntry {
}
type BlockHeader struct {
- Miner address.Address // 0
-
- Ticket *Ticket // 1
-
- ElectionProof *ElectionProof // 2
-
- BeaconEntries []BeaconEntry // 3
-
- WinPoStProof []proof2.PoStProof // 4
-
- Parents []cid.Cid // 5
-
- ParentWeight BigInt // 6
-
- Height abi.ChainEpoch // 7
-
- ParentStateRoot cid.Cid // 8
-
- ParentMessageReceipts cid.Cid // 8
-
- Messages cid.Cid // 10
-
- BLSAggregate *crypto.Signature // 11
-
- Timestamp uint64 // 12
-
- BlockSig *crypto.Signature // 13
-
- ForkSignaling uint64 // 14
-
- // ParentBaseFee is the base fee after executing parent tipset
- ParentBaseFee abi.TokenAmount // 15
-
- // internal
- validated bool // true if the signature has been validated
+ Miner address.Address // 0 unique per block/miner
+ Ticket *Ticket // 1 unique per block/miner: should be a valid VRF
+ ElectionProof *ElectionProof // 2 unique per block/miner: should be a valid VRF
+ BeaconEntries []BeaconEntry // 3 identical for all blocks in same tipset
+ WinPoStProof []proof2.PoStProof // 4 unique per block/miner
+ Parents []cid.Cid // 5 identical for all blocks in same tipset
+ ParentWeight BigInt // 6 identical for all blocks in same tipset
+ Height abi.ChainEpoch // 7 identical for all blocks in same tipset
+ ParentStateRoot cid.Cid // 8 identical for all blocks in same tipset
+ ParentMessageReceipts cid.Cid // 9 identical for all blocks in same tipset
+ Messages cid.Cid // 10 unique per block
+ BLSAggregate *crypto.Signature // 11 unique per block: aggrregate of BLS messages from above
+ Timestamp uint64 // 12 identical for all blocks in same tipset / hard-tied to the value of Height above
+ BlockSig *crypto.Signature // 13 unique per block/miner: miner signature
+ ForkSignaling uint64 // 14 currently unused/undefined
+ ParentBaseFee abi.TokenAmount // 15 identical for all blocks in same tipset: the base fee after executing parent tipset
+
+ validated bool // internal, true if the signature has been validated
}
func (blk *BlockHeader) ToStorageBlock() (block.Block, error) {
diff --git a/chain/types/cbor_gen.go b/chain/types/cbor_gen.go
index d063ce8c9f7..db1f3cdb238 100644
--- a/chain/types/cbor_gen.go
+++ b/chain/types/cbor_gen.go
@@ -5,6 +5,7 @@ package types
import (
"fmt"
"io"
+ "sort"
abi "github.com/filecoin-project/go-state-types/abi"
crypto "github.com/filecoin-project/go-state-types/crypto"
@@ -16,6 +17,8 @@ import (
)
var _ = xerrors.Errorf
+var _ = cid.Undef
+var _ = sort.Sort
var lengthBufBlockHeader = []byte{144}
diff --git a/chain/types/fil.go b/chain/types/fil.go
index 0ea77660c3a..21125e6d617 100644
--- a/chain/types/fil.go
+++ b/chain/types/fil.go
@@ -23,6 +23,43 @@ func (f FIL) Unitless() string {
return strings.TrimRight(strings.TrimRight(r.FloatString(18), "0"), ".")
}
+var AttoFil = NewInt(1)
+var FemtoFil = BigMul(AttoFil, NewInt(1000))
+var PicoFil = BigMul(FemtoFil, NewInt(1000))
+var NanoFil = BigMul(PicoFil, NewInt(1000))
+
+var unitPrefixes = []string{"a", "f", "p", "n", "μ", "m"}
+
+func (f FIL) Short() string {
+ n := BigInt(f).Abs()
+
+ dn := uint64(1)
+ var prefix string
+ for _, p := range unitPrefixes {
+ if n.LessThan(NewInt(dn * 1000)) {
+ prefix = p
+ break
+ }
+ dn *= 1000
+ }
+
+ r := new(big.Rat).SetFrac(f.Int, big.NewInt(int64(dn)))
+ if r.Sign() == 0 {
+ return "0"
+ }
+
+ return strings.TrimRight(strings.TrimRight(r.FloatString(3), "0"), ".") + " " + prefix + "FIL"
+}
+
+func (f FIL) Nano() string {
+ r := new(big.Rat).SetFrac(f.Int, big.NewInt(int64(1e9)))
+ if r.Sign() == 0 {
+ return "0"
+ }
+
+ return strings.TrimRight(strings.TrimRight(r.FloatString(9), "0"), ".") + " nFIL"
+}
+
func (f FIL) Format(s fmt.State, ch rune) {
switch ch {
case 's', 'v':
@@ -47,7 +84,7 @@ func (f FIL) UnmarshalText(text []byte) error {
}
func ParseFIL(s string) (FIL, error) {
- suffix := strings.TrimLeft(s, ".1234567890")
+ suffix := strings.TrimLeft(s, "-.1234567890")
s = s[:len(s)-len(suffix)]
var attofil bool
if suffix != "" {
@@ -61,6 +98,10 @@ func ParseFIL(s string) (FIL, error) {
}
}
+ if len(s) > 50 {
+ return FIL{}, fmt.Errorf("string length too large: %d", len(s))
+ }
+
r, ok := new(big.Rat).SetString(s)
if !ok {
return FIL{}, fmt.Errorf("failed to parse %q as a decimal number", s)
diff --git a/chain/types/fil_test.go b/chain/types/fil_test.go
new file mode 100644
index 00000000000..7bf2a802ede
--- /dev/null
+++ b/chain/types/fil_test.go
@@ -0,0 +1,114 @@
+package types
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestFilShort(t *testing.T) {
+ for _, s := range []struct {
+ fil string
+ expect string
+ }{
+
+ {fil: "1", expect: "1 FIL"},
+ {fil: "1.1", expect: "1.1 FIL"},
+ {fil: "12", expect: "12 FIL"},
+ {fil: "123", expect: "123 FIL"},
+ {fil: "123456", expect: "123456 FIL"},
+ {fil: "123.23", expect: "123.23 FIL"},
+ {fil: "123456.234", expect: "123456.234 FIL"},
+ {fil: "123456.2341234", expect: "123456.234 FIL"},
+ {fil: "123456.234123445", expect: "123456.234 FIL"},
+
+ {fil: "0.1", expect: "100 mFIL"},
+ {fil: "0.01", expect: "10 mFIL"},
+ {fil: "0.001", expect: "1 mFIL"},
+
+ {fil: "0.0001", expect: "100 μFIL"},
+ {fil: "0.00001", expect: "10 μFIL"},
+ {fil: "0.000001", expect: "1 μFIL"},
+
+ {fil: "0.0000001", expect: "100 nFIL"},
+ {fil: "0.00000001", expect: "10 nFIL"},
+ {fil: "0.000000001", expect: "1 nFIL"},
+
+ {fil: "0.0000000001", expect: "100 pFIL"},
+ {fil: "0.00000000001", expect: "10 pFIL"},
+ {fil: "0.000000000001", expect: "1 pFIL"},
+
+ {fil: "0.0000000000001", expect: "100 fFIL"},
+ {fil: "0.00000000000001", expect: "10 fFIL"},
+ {fil: "0.000000000000001", expect: "1 fFIL"},
+
+ {fil: "0.0000000000000001", expect: "100 aFIL"},
+ {fil: "0.00000000000000001", expect: "10 aFIL"},
+ {fil: "0.000000000000000001", expect: "1 aFIL"},
+
+ {fil: "0.0000012", expect: "1.2 μFIL"},
+ {fil: "0.00000123", expect: "1.23 μFIL"},
+ {fil: "0.000001234", expect: "1.234 μFIL"},
+ {fil: "0.0000012344", expect: "1.234 μFIL"},
+ {fil: "0.00000123444", expect: "1.234 μFIL"},
+
+ {fil: "0.0002212", expect: "221.2 μFIL"},
+ {fil: "0.00022123", expect: "221.23 μFIL"},
+ {fil: "0.000221234", expect: "221.234 μFIL"},
+ {fil: "0.0002212344", expect: "221.234 μFIL"},
+ {fil: "0.00022123444", expect: "221.234 μFIL"},
+
+ {fil: "-1", expect: "-1 FIL"},
+ {fil: "-1.1", expect: "-1.1 FIL"},
+ {fil: "-12", expect: "-12 FIL"},
+ {fil: "-123", expect: "-123 FIL"},
+ {fil: "-123456", expect: "-123456 FIL"},
+ {fil: "-123.23", expect: "-123.23 FIL"},
+ {fil: "-123456.234", expect: "-123456.234 FIL"},
+ {fil: "-123456.2341234", expect: "-123456.234 FIL"},
+ {fil: "-123456.234123445", expect: "-123456.234 FIL"},
+
+ {fil: "-0.1", expect: "-100 mFIL"},
+ {fil: "-0.01", expect: "-10 mFIL"},
+ {fil: "-0.001", expect: "-1 mFIL"},
+
+ {fil: "-0.0001", expect: "-100 μFIL"},
+ {fil: "-0.00001", expect: "-10 μFIL"},
+ {fil: "-0.000001", expect: "-1 μFIL"},
+
+ {fil: "-0.0000001", expect: "-100 nFIL"},
+ {fil: "-0.00000001", expect: "-10 nFIL"},
+ {fil: "-0.000000001", expect: "-1 nFIL"},
+
+ {fil: "-0.0000000001", expect: "-100 pFIL"},
+ {fil: "-0.00000000001", expect: "-10 pFIL"},
+ {fil: "-0.000000000001", expect: "-1 pFIL"},
+
+ {fil: "-0.0000000000001", expect: "-100 fFIL"},
+ {fil: "-0.00000000000001", expect: "-10 fFIL"},
+ {fil: "-0.000000000000001", expect: "-1 fFIL"},
+
+ {fil: "-0.0000000000000001", expect: "-100 aFIL"},
+ {fil: "-0.00000000000000001", expect: "-10 aFIL"},
+ {fil: "-0.000000000000000001", expect: "-1 aFIL"},
+
+ {fil: "-0.0000012", expect: "-1.2 μFIL"},
+ {fil: "-0.00000123", expect: "-1.23 μFIL"},
+ {fil: "-0.000001234", expect: "-1.234 μFIL"},
+ {fil: "-0.0000012344", expect: "-1.234 μFIL"},
+ {fil: "-0.00000123444", expect: "-1.234 μFIL"},
+
+ {fil: "-0.0002212", expect: "-221.2 μFIL"},
+ {fil: "-0.00022123", expect: "-221.23 μFIL"},
+ {fil: "-0.000221234", expect: "-221.234 μFIL"},
+ {fil: "-0.0002212344", expect: "-221.234 μFIL"},
+ {fil: "-0.00022123444", expect: "-221.234 μFIL"},
+ } {
+ s := s
+ t.Run(s.fil, func(t *testing.T) {
+ f, err := ParseFIL(s.fil)
+ require.NoError(t, err)
+ require.Equal(t, s.expect, f.Short())
+ })
+ }
+}
diff --git a/chain/types/message.go b/chain/types/message.go
index c53ecc7c160..4f6bb78224b 100644
--- a/chain/types/message.go
+++ b/chain/types/message.go
@@ -5,6 +5,8 @@ import (
"encoding/json"
"fmt"
+ "github.com/filecoin-project/go-state-types/network"
+
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/build"
@@ -144,7 +146,7 @@ func (m *Message) EqualCall(o *Message) bool {
return (&m1).Equals(&m2)
}
-func (m *Message) ValidForBlockInclusion(minGas int64) error {
+func (m *Message) ValidForBlockInclusion(minGas int64, version network.Version) error {
if m.Version != 0 {
return xerrors.New("'Version' unsupported")
}
@@ -153,6 +155,10 @@ func (m *Message) ValidForBlockInclusion(minGas int64) error {
return xerrors.New("'To' address cannot be empty")
}
+ if m.To == build.ZeroAddress && version >= network.Version7 {
+ return xerrors.New("invalid 'To' address")
+ }
+
if m.From == address.Undef {
return xerrors.New("'From' address cannot be empty")
}
diff --git a/chain/types/state.go b/chain/types/state.go
index a96883604be..c8f8f1cd984 100644
--- a/chain/types/state.go
+++ b/chain/types/state.go
@@ -9,8 +9,14 @@ type StateTreeVersion uint64
const (
// StateTreeVersion0 corresponds to actors < v2.
StateTreeVersion0 StateTreeVersion = iota
- // StateTreeVersion1 corresponds to actors >= v2.
+ // StateTreeVersion1 corresponds to actors v2
StateTreeVersion1
+ // StateTreeVersion2 corresponds to actors v3.
+ StateTreeVersion2
+ // StateTreeVersion3 corresponds to actors v4.
+ StateTreeVersion3
+ // StateTreeVersion4 corresponds to actors v5.
+ StateTreeVersion4
)
type StateRoot struct {
diff --git a/chain/types/tipset_key.go b/chain/types/tipset_key.go
index e5bc7750de3..9f98877964b 100644
--- a/chain/types/tipset_key.go
+++ b/chain/types/tipset_key.go
@@ -47,7 +47,7 @@ func NewTipSetKey(cids ...cid.Cid) TipSetKey {
func TipSetKeyFromBytes(encoded []byte) (TipSetKey, error) {
_, err := decodeKey(encoded)
if err != nil {
- return TipSetKey{}, err
+ return EmptyTSK, err
}
return TipSetKey{string(encoded)}, nil
}
diff --git a/chain/types/tipset_key_test.go b/chain/types/tipset_key_test.go
index 7b3ce439db9..73c1ca9df43 100644
--- a/chain/types/tipset_key_test.go
+++ b/chain/types/tipset_key_test.go
@@ -19,7 +19,7 @@ func TestTipSetKey(t *testing.T) {
fmt.Println(len(c1.Bytes()))
t.Run("zero value", func(t *testing.T) {
- assert.Equal(t, TipSetKey{}, NewTipSetKey())
+ assert.Equal(t, EmptyTSK, NewTipSetKey())
})
t.Run("CID extraction", func(t *testing.T) {
diff --git a/chain/vm/burn.go b/chain/vm/burn.go
index 9f9b95755b7..a214d198b66 100644
--- a/chain/vm/burn.go
+++ b/chain/vm/burn.go
@@ -67,7 +67,7 @@ func ComputeGasOverestimationBurn(gasUsed, gasLimit int64) (int64, int64) {
return gasLimit - gasUsed - gasToBurn.Int64(), gasToBurn.Int64()
}
-func ComputeGasOutputs(gasUsed, gasLimit int64, baseFee, feeCap, gasPremium abi.TokenAmount) GasOutputs {
+func ComputeGasOutputs(gasUsed, gasLimit int64, baseFee, feeCap, gasPremium abi.TokenAmount, chargeNetworkFee bool) GasOutputs {
gasUsedBig := big.NewInt(gasUsed)
out := ZeroGasOutputs()
@@ -76,7 +76,12 @@ func ComputeGasOutputs(gasUsed, gasLimit int64, baseFee, feeCap, gasPremium abi.
baseFeeToPay = feeCap
out.MinerPenalty = big.Mul(big.Sub(baseFee, feeCap), gasUsedBig)
}
- out.BaseFeeBurn = big.Mul(baseFeeToPay, gasUsedBig)
+
+ // If chargeNetworkFee is disabled, just skip computing the BaseFeeBurn. However,
+ // we charge all the other fees regardless.
+ if chargeNetworkFee {
+ out.BaseFeeBurn = big.Mul(baseFeeToPay, gasUsedBig)
+ }
minerTip := gasPremium
if big.Cmp(big.Add(baseFeeToPay, minerTip), feeCap) > 0 {
diff --git a/chain/vm/burn_test.go b/chain/vm/burn_test.go
index 58e1336057b..e4fc69affd6 100644
--- a/chain/vm/burn_test.go
+++ b/chain/vm/burn_test.go
@@ -63,7 +63,7 @@ func TestGasOutputs(t *testing.T) {
for _, test := range tests {
test := test
t.Run(fmt.Sprintf("%v", test), func(t *testing.T) {
- output := ComputeGasOutputs(test.used, test.limit, baseFee, types.NewInt(test.feeCap), types.NewInt(test.premium))
+ output := ComputeGasOutputs(test.used, test.limit, baseFee, types.NewInt(test.feeCap), types.NewInt(test.premium), true)
i2s := func(i uint64) string {
return fmt.Sprintf("%d", i)
}
diff --git a/chain/vm/gas.go b/chain/vm/gas.go
index cbe5bab13e1..c860ce9a0c2 100644
--- a/chain/vm/gas.go
+++ b/chain/vm/gas.go
@@ -3,21 +3,17 @@ package vm
import (
"fmt"
- vmr2 "github.com/filecoin-project/specs-actors/v2/actors/runtime"
- proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
+ "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/go-address"
addr "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto"
+ vmr5 "github.com/filecoin-project/specs-actors/v5/actors/runtime"
+ proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/ipfs/go-cid"
)
-const (
- GasStorageMulti = 1000
- GasComputeMulti = 1
-)
-
type GasCharge struct {
Name string
Extra interface{}
@@ -30,7 +26,7 @@ type GasCharge struct {
}
func (g GasCharge) Total() int64 {
- return g.ComputeGas*GasComputeMulti + g.StorageGas*GasStorageMulti
+ return g.ComputeGas + g.StorageGas
}
func (g GasCharge) WithVirtual(compute, storage int64) GasCharge {
out := g
@@ -78,13 +74,17 @@ type Pricelist interface {
OnVerifySignature(sigType crypto.SigType, planTextSize int) (GasCharge, error)
OnHashing(dataSize int) GasCharge
OnComputeUnsealedSectorCid(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) GasCharge
- OnVerifySeal(info proof2.SealVerifyInfo) GasCharge
- OnVerifyPost(info proof2.WindowPoStVerifyInfo) GasCharge
+ OnVerifySeal(info proof5.SealVerifyInfo) GasCharge
+ OnVerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) GasCharge
+ OnVerifyPost(info proof5.WindowPoStVerifyInfo) GasCharge
OnVerifyConsensusFault() GasCharge
}
var prices = map[abi.ChainEpoch]Pricelist{
abi.ChainEpoch(0): &pricelistV0{
+ computeGasMulti: 1,
+ storageGasMulti: 1000,
+
onChainMessageComputeBase: 38863,
onChainMessageStorageBase: 36,
onChainMessageStoragePerByte: 1,
@@ -112,6 +112,7 @@ var prices = map[abi.ChainEpoch]Pricelist{
hashingBase: 31355,
computeUnsealedSectorCidBase: 98647,
verifySealBase: 2000, // TODO gas , it VerifySeal syscall is not used
+ verifyAggregateSealBase: 0,
verifyPostLookup: map[abi.RegisteredPoStProof]scalingCost{
abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: {
flat: 123861062,
@@ -126,6 +127,83 @@ var prices = map[abi.ChainEpoch]Pricelist{
scale: 85639,
},
},
+ verifyPostDiscount: true,
+ verifyConsensusFault: 495422,
+ },
+ abi.ChainEpoch(build.UpgradeCalicoHeight): &pricelistV0{
+ computeGasMulti: 1,
+ storageGasMulti: 1300,
+
+ onChainMessageComputeBase: 38863,
+ onChainMessageStorageBase: 36,
+ onChainMessageStoragePerByte: 1,
+
+ onChainReturnValuePerByte: 1,
+
+ sendBase: 29233,
+ sendTransferFunds: 27500,
+ sendTransferOnlyPremium: 159672,
+ sendInvokeMethod: -5377,
+
+ ipldGetBase: 114617,
+ ipldPutBase: 353640,
+ ipldPutPerByte: 1,
+
+ createActorCompute: 1108454,
+ createActorStorage: 36 + 40,
+ deleteActor: -(36 + 40), // -createActorStorage
+
+ verifySignature: map[crypto.SigType]int64{
+ crypto.SigTypeBLS: 16598605,
+ crypto.SigTypeSecp256k1: 1637292,
+ },
+
+ hashingBase: 31355,
+ computeUnsealedSectorCidBase: 98647,
+ verifySealBase: 2000, // TODO gas, it VerifySeal syscall is not used
+
+ verifyAggregateSealPer: map[abi.RegisteredSealProof]int64{
+ abi.RegisteredSealProof_StackedDrg32GiBV1_1: 449900,
+ abi.RegisteredSealProof_StackedDrg64GiBV1_1: 359272,
+ },
+ verifyAggregateSealSteps: map[abi.RegisteredSealProof]stepCost{
+ abi.RegisteredSealProof_StackedDrg32GiBV1_1: {
+ {4, 103994170},
+ {7, 112356810},
+ {13, 122912610},
+ {26, 137559930},
+ {52, 162039100},
+ {103, 210960780},
+ {205, 318351180},
+ {410, 528274980},
+ },
+ abi.RegisteredSealProof_StackedDrg64GiBV1_1: {
+ {4, 102581240},
+ {7, 110803030},
+ {13, 120803700},
+ {26, 134642130},
+ {52, 157357890},
+ {103, 203017690},
+ {205, 304253590},
+ {410, 509880640},
+ },
+ },
+
+ verifyPostLookup: map[abi.RegisteredPoStProof]scalingCost{
+ abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: {
+ flat: 117680921,
+ scale: 43780,
+ },
+ abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: {
+ flat: 117680921,
+ scale: 43780,
+ },
+ abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: {
+ flat: 117680921,
+ scale: 43780,
+ },
+ },
+ verifyPostDiscount: false,
verifyConsensusFault: 495422,
},
}
@@ -150,7 +228,7 @@ func PricelistByEpoch(epoch abi.ChainEpoch) Pricelist {
}
type pricedSyscalls struct {
- under vmr2.Syscalls
+ under vmr5.Syscalls
pl Pricelist
chargeGas func(GasCharge)
}
@@ -184,7 +262,7 @@ func (ps pricedSyscalls) ComputeUnsealedSectorCID(reg abi.RegisteredSealProof, p
}
// Verifies a sector seal proof.
-func (ps pricedSyscalls) VerifySeal(vi proof2.SealVerifyInfo) error {
+func (ps pricedSyscalls) VerifySeal(vi proof5.SealVerifyInfo) error {
ps.chargeGas(ps.pl.OnVerifySeal(vi))
defer ps.chargeGas(gasOnActorExec)
@@ -192,7 +270,7 @@ func (ps pricedSyscalls) VerifySeal(vi proof2.SealVerifyInfo) error {
}
// Verifies a proof of spacetime.
-func (ps pricedSyscalls) VerifyPoSt(vi proof2.WindowPoStVerifyInfo) error {
+func (ps pricedSyscalls) VerifyPoSt(vi proof5.WindowPoStVerifyInfo) error {
ps.chargeGas(ps.pl.OnVerifyPost(vi))
defer ps.chargeGas(gasOnActorExec)
@@ -209,14 +287,14 @@ func (ps pricedSyscalls) VerifyPoSt(vi proof2.WindowPoStVerifyInfo) error {
// the "parent grinding fault", in which case it must be the sibling of h1 (same parent tipset) and one of the
// blocks in the parent of h2 (i.e. h2's grandparent).
// Returns nil and an error if the headers don't prove a fault.
-func (ps pricedSyscalls) VerifyConsensusFault(h1 []byte, h2 []byte, extra []byte) (*vmr2.ConsensusFault, error) {
+func (ps pricedSyscalls) VerifyConsensusFault(h1 []byte, h2 []byte, extra []byte) (*vmr5.ConsensusFault, error) {
ps.chargeGas(ps.pl.OnVerifyConsensusFault())
defer ps.chargeGas(gasOnActorExec)
return ps.under.VerifyConsensusFault(h1, h2, extra)
}
-func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]proof2.SealVerifyInfo) (map[address.Address][]bool, error) {
+func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]proof5.SealVerifyInfo) (map[address.Address][]bool, error) {
count := int64(0)
for _, svis := range inp {
count += int64(len(svis))
@@ -229,3 +307,10 @@ func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]proof2.SealV
return ps.under.BatchVerifySeals(inp)
}
+
+func (ps pricedSyscalls) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) error {
+ ps.chargeGas(ps.pl.OnVerifyAggregateSeals(aggregate))
+ defer ps.chargeGas(gasOnActorExec)
+
+ return ps.under.VerifyAggregateSeals(aggregate)
+}
diff --git a/chain/vm/gas_v0.go b/chain/vm/gas_v0.go
index 7a7fb364d18..13c5fdd86ad 100644
--- a/chain/vm/gas_v0.go
+++ b/chain/vm/gas_v0.go
@@ -4,6 +4,7 @@ import (
"fmt"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
+ proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
@@ -17,7 +18,31 @@ type scalingCost struct {
scale int64
}
+type stepCost []step
+
+type step struct {
+ start int64
+ cost int64
+}
+
+func (sc stepCost) Lookup(x int64) int64 {
+ i := 0
+ for ; i < len(sc); i++ {
+ if sc[i].start > x {
+ break
+ }
+ }
+ i-- // look at previous item
+ if i < 0 {
+ return 0
+ }
+
+ return sc[i].cost
+}
+
type pricelistV0 struct {
+ computeGasMulti int64
+ storageGasMulti int64
///////////////////////////////////////////////////////////////////////////
// System operations
///////////////////////////////////////////////////////////////////////////
@@ -89,8 +114,13 @@ type pricelistV0 struct {
computeUnsealedSectorCidBase int64
verifySealBase int64
- verifyPostLookup map[abi.RegisteredPoStProof]scalingCost
- verifyConsensusFault int64
+ verifyAggregateSealBase int64
+ verifyAggregateSealPer map[abi.RegisteredSealProof]int64
+ verifyAggregateSealSteps map[abi.RegisteredSealProof]stepCost
+
+ verifyPostLookup map[abi.RegisteredPoStProof]scalingCost
+ verifyPostDiscount bool
+ verifyConsensusFault int64
}
var _ Pricelist = (*pricelistV0)(nil)
@@ -98,12 +128,12 @@ var _ Pricelist = (*pricelistV0)(nil)
// OnChainMessage returns the gas used for storing a message of a given size in the chain.
func (pl *pricelistV0) OnChainMessage(msgSize int) GasCharge {
return newGasCharge("OnChainMessage", pl.onChainMessageComputeBase,
- pl.onChainMessageStorageBase+pl.onChainMessageStoragePerByte*int64(msgSize))
+ (pl.onChainMessageStorageBase+pl.onChainMessageStoragePerByte*int64(msgSize))*pl.storageGasMulti)
}
// OnChainReturnValue returns the gas used for storing the response of a message in the chain.
func (pl *pricelistV0) OnChainReturnValue(dataSize int) GasCharge {
- return newGasCharge("OnChainReturnValue", 0, int64(dataSize)*pl.onChainReturnValuePerByte)
+ return newGasCharge("OnChainReturnValue", 0, int64(dataSize)*pl.onChainReturnValuePerByte*pl.storageGasMulti)
}
// OnMethodInvocation returns the gas used when invoking a method.
@@ -130,23 +160,23 @@ func (pl *pricelistV0) OnMethodInvocation(value abi.TokenAmount, methodNum abi.M
// OnIpldGet returns the gas used for storing an object
func (pl *pricelistV0) OnIpldGet() GasCharge {
- return newGasCharge("OnIpldGet", pl.ipldGetBase, 0)
+ return newGasCharge("OnIpldGet", pl.ipldGetBase, 0).WithVirtual(114617, 0)
}
// OnIpldPut returns the gas used for storing an object
func (pl *pricelistV0) OnIpldPut(dataSize int) GasCharge {
- return newGasCharge("OnIpldPut", pl.ipldPutBase, int64(dataSize)*pl.ipldPutPerByte).
- WithExtra(dataSize)
+ return newGasCharge("OnIpldPut", pl.ipldPutBase, int64(dataSize)*pl.ipldPutPerByte*pl.storageGasMulti).
+ WithExtra(dataSize).WithVirtual(400000, int64(dataSize)*1300)
}
// OnCreateActor returns the gas used for creating an actor
func (pl *pricelistV0) OnCreateActor() GasCharge {
- return newGasCharge("OnCreateActor", pl.createActorCompute, pl.createActorStorage)
+ return newGasCharge("OnCreateActor", pl.createActorCompute, pl.createActorStorage*pl.storageGasMulti)
}
// OnDeleteActor returns the gas used for deleting an actor
func (pl *pricelistV0) OnDeleteActor() GasCharge {
- return newGasCharge("OnDeleteActor", 0, pl.deleteActor)
+ return newGasCharge("OnDeleteActor", 0, pl.deleteActor*pl.storageGasMulti)
}
// OnVerifySignature
@@ -182,6 +212,22 @@ func (pl *pricelistV0) OnVerifySeal(info proof2.SealVerifyInfo) GasCharge {
return newGasCharge("OnVerifySeal", pl.verifySealBase, 0)
}
+// OnVerifyAggregateSeals
+func (pl *pricelistV0) OnVerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) GasCharge {
+ proofType := aggregate.SealProof
+ perProof, ok := pl.verifyAggregateSealPer[proofType]
+ if !ok {
+ perProof = pl.verifyAggregateSealPer[abi.RegisteredSealProof_StackedDrg32GiBV1_1]
+ }
+
+ step, ok := pl.verifyAggregateSealSteps[proofType]
+ if !ok {
+ step = pl.verifyAggregateSealSteps[abi.RegisteredSealProof_StackedDrg32GiBV1_1]
+ }
+ num := int64(len(aggregate.Infos))
+ return newGasCharge("OnVerifyAggregateSeals", perProof*num+step.Lookup(num), 0)
+}
+
// OnVerifyPost
func (pl *pricelistV0) OnVerifyPost(info proof2.WindowPoStVerifyInfo) GasCharge {
sectorSize := "unknown"
@@ -201,9 +247,12 @@ func (pl *pricelistV0) OnVerifyPost(info proof2.WindowPoStVerifyInfo) GasCharge
}
gasUsed := cost.flat + int64(len(info.ChallengedSectors))*cost.scale
- gasUsed /= 2 // XXX: this is an artificial discount
+ if pl.verifyPostDiscount {
+ gasUsed /= 2 // XXX: this is an artificial discount
+ }
return newGasCharge("OnVerifyPost", gasUsed, 0).
+ WithVirtual(117680921+43780*int64(len(info.ChallengedSectors)), 0).
WithExtra(map[string]interface{}{
"type": sectorSize,
"size": len(info.ChallengedSectors),
diff --git a/chain/vm/gas_v0_test.go b/chain/vm/gas_v0_test.go
new file mode 100644
index 00000000000..447e4f70c5c
--- /dev/null
+++ b/chain/vm/gas_v0_test.go
@@ -0,0 +1,32 @@
+package vm
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestStepGasCost(t *testing.T) {
+ s := stepCost{
+ {4, 103994170},
+ {7, 112356810},
+ {13, 122912610},
+ {26, 137559930},
+ {52, 162039100},
+ {103, 210960780},
+ {205, 318351180},
+ {410, 528274980},
+ }
+
+ assert.EqualValues(t, 0, s.Lookup(0))
+ assert.EqualValues(t, 0, s.Lookup(3))
+ assert.EqualValues(t, 103994170, s.Lookup(4))
+ assert.EqualValues(t, 103994170, s.Lookup(6))
+ assert.EqualValues(t, 112356810, s.Lookup(7))
+ assert.EqualValues(t, 210960780, s.Lookup(103))
+ assert.EqualValues(t, 210960780, s.Lookup(204))
+ assert.EqualValues(t, 318351180, s.Lookup(205))
+ assert.EqualValues(t, 318351180, s.Lookup(409))
+ assert.EqualValues(t, 528274980, s.Lookup(410))
+ assert.EqualValues(t, 528274980, s.Lookup(10000000000))
+}
diff --git a/chain/vm/invoker.go b/chain/vm/invoker.go
index 661e31178ee..e4b15403187 100644
--- a/chain/vm/invoker.go
+++ b/chain/vm/invoker.go
@@ -6,6 +6,8 @@ import (
"fmt"
"reflect"
+ "github.com/filecoin-project/go-state-types/network"
+
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/ipfs/go-cid"
@@ -14,7 +16,10 @@ import (
exported0 "github.com/filecoin-project/specs-actors/actors/builtin/exported"
exported2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/exported"
- vmr "github.com/filecoin-project/specs-actors/v2/actors/runtime"
+ exported3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/exported"
+ exported4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/exported"
+ exported5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/exported"
+ vmr "github.com/filecoin-project/specs-actors/v5/actors/runtime"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/exitcode"
@@ -34,9 +39,9 @@ type ActorPredicate func(vmr.Runtime, rtt.VMActor) error
func ActorsVersionPredicate(ver actors.Version) ActorPredicate {
return func(rt vmr.Runtime, v rtt.VMActor) error {
- nver := actors.VersionForNetwork(rt.NetworkVersion())
- if nver != ver {
- return xerrors.Errorf("actor %s is a version %d actor; chain only supports actor version %d at height %d", v.Code(), ver, nver, rt.CurrEpoch())
+ aver := actors.VersionForNetwork(rt.NetworkVersion())
+ if aver != ver {
+ return xerrors.Errorf("actor %s is a version %d actor; chain only supports actor version %d at height %d and nver %d", v.Code(), ver, aver, rt.CurrEpoch(), rt.NetworkVersion())
}
return nil
}
@@ -60,6 +65,9 @@ func NewActorRegistry() *ActorRegistry {
// add builtInCode using: register(cid, singleton)
inv.Register(ActorsVersionPredicate(actors.Version0), exported0.BuiltinActors()...)
inv.Register(ActorsVersionPredicate(actors.Version2), exported2.BuiltinActors()...)
+ inv.Register(ActorsVersionPredicate(actors.Version3), exported3.BuiltinActors()...)
+ inv.Register(ActorsVersionPredicate(actors.Version4), exported4.BuiltinActors()...)
+ inv.Register(ActorsVersionPredicate(actors.Version5), exported5.BuiltinActors()...)
return inv
}
@@ -147,7 +155,7 @@ func (*ActorRegistry) transform(instance invokee) (nativeCode, error) {
"vmr.Runtime, ")
}
if !runtimeType.Implements(t.In(0)) {
- return nil, newErr("first arguemnt should be vmr.Runtime")
+ return nil, newErr("first argument should be vmr.Runtime")
}
if t.In(1).Kind() != reflect.Ptr {
return nil, newErr("second argument should be of kind reflect.Ptr")
@@ -173,9 +181,14 @@ func (*ActorRegistry) transform(instance invokee) (nativeCode, error) {
paramT := meth.Type().In(1).Elem()
param := reflect.New(paramT)
+ rt := in[0].Interface().(*Runtime)
inBytes := in[1].Interface().([]byte)
if err := DecodeParams(inBytes, param.Interface()); err != nil {
- aerr := aerrors.Absorb(err, 1, "failed to decode parameters")
+ ec := exitcode.ErrSerialization
+ if rt.NetworkVersion() < network.Version7 {
+ ec = 1
+ }
+ aerr := aerrors.Absorb(err, ec, "failed to decode parameters")
return []reflect.Value{
reflect.ValueOf([]byte{}),
// Below is a hack, fixed in Go 1.13
@@ -183,7 +196,6 @@ func (*ActorRegistry) transform(instance invokee) (nativeCode, error) {
reflect.ValueOf(&aerr).Elem(),
}
}
- rt := in[0].Interface().(*Runtime)
rval, aerror := rt.shimCall(func() interface{} {
ret := meth.Call([]reflect.Value{
reflect.ValueOf(rt),
diff --git a/chain/vm/invoker_test.go b/chain/vm/invoker_test.go
index bce385b02ba..6822e2371f5 100644
--- a/chain/vm/invoker_test.go
+++ b/chain/vm/invoker_test.go
@@ -1,10 +1,13 @@
package vm
import (
+ "context"
"fmt"
"io"
"testing"
+ "github.com/filecoin-project/go-state-types/network"
+
cbor "github.com/ipfs/go-ipld-cbor"
"github.com/stretchr/testify/assert"
cbg "github.com/whyrusleeping/cbor-gen"
@@ -105,10 +108,27 @@ func TestInvokerBasic(t *testing.T) {
}
}
- _, aerr := code[1](&Runtime{}, []byte{99})
- if aerrors.IsFatal(aerr) {
- t.Fatal("err should not be fatal")
+ {
+ _, aerr := code[1](&Runtime{
+ vm: &VM{ntwkVersion: func(ctx context.Context, epoch abi.ChainEpoch) network.Version {
+ return network.Version0
+ }},
+ }, []byte{99})
+ if aerrors.IsFatal(aerr) {
+ t.Fatal("err should not be fatal")
+ }
+ assert.Equal(t, exitcode.ExitCode(1), aerrors.RetCode(aerr), "return code should be 1")
}
- assert.Equal(t, exitcode.ExitCode(1), aerrors.RetCode(aerr), "return code should be 1")
+ {
+ _, aerr := code[1](&Runtime{
+ vm: &VM{ntwkVersion: func(ctx context.Context, epoch abi.ChainEpoch) network.Version {
+ return network.Version7
+ }},
+ }, []byte{99})
+ if aerrors.IsFatal(aerr) {
+ t.Fatal("err should not be fatal")
+ }
+ assert.Equal(t, exitcode.ErrSerialization, aerrors.RetCode(aerr), "return code should be %s", 1)
+ }
}
diff --git a/chain/vm/mkactor.go b/chain/vm/mkactor.go
index 885d3c0db2b..669c1450f1a 100644
--- a/chain/vm/mkactor.go
+++ b/chain/vm/mkactor.go
@@ -3,6 +3,10 @@ package vm
import (
"context"
+ "github.com/filecoin-project/go-state-types/network"
+
+ "github.com/filecoin-project/lotus/build"
+
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/lotus/chain/actors"
@@ -12,6 +16,9 @@ import (
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+ builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
+ builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
+ builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/chain/actors/aerrors"
@@ -38,6 +45,10 @@ func TryCreateAccountActor(rt *Runtime, addr address.Address) (*types.Actor, add
return nil, address.Undef, err
}
+ if addr == build.ZeroAddress && rt.NetworkVersion() >= network.Version10 {
+ return nil, address.Undef, aerrors.New(exitcode.ErrIllegalArgument, "cannot create the zero bls actor")
+ }
+
addrID, err := rt.state.RegisterNewAddress(addr)
if err != nil {
return nil, address.Undef, aerrors.Escalate(err, "registering actor address")
@@ -91,6 +102,12 @@ func newAccountActor(ver actors.Version) *types.Actor {
code = builtin0.AccountActorCodeID
case actors.Version2:
code = builtin2.AccountActorCodeID
+ case actors.Version3:
+ code = builtin3.AccountActorCodeID
+ case actors.Version4:
+ code = builtin4.AccountActorCodeID
+ case actors.Version5:
+ code = builtin5.AccountActorCodeID
default:
panic("unsupported actors version")
}
diff --git a/chain/vm/runtime.go b/chain/vm/runtime.go
index 6e36e8e8739..2845c7696ea 100644
--- a/chain/vm/runtime.go
+++ b/chain/vm/runtime.go
@@ -5,6 +5,7 @@ import (
"context"
"encoding/binary"
"fmt"
+ gruntime "runtime"
"time"
"github.com/filecoin-project/go-address"
@@ -15,7 +16,7 @@ import (
"github.com/filecoin-project/go-state-types/network"
rtt "github.com/filecoin-project/go-state-types/rt"
rt0 "github.com/filecoin-project/specs-actors/actors/runtime"
- rt2 "github.com/filecoin-project/specs-actors/v2/actors/runtime"
+ rt5 "github.com/filecoin-project/specs-actors/v5/actors/runtime"
"github.com/ipfs/go-cid"
ipldcbor "github.com/ipfs/go-ipld-cbor"
"go.opencensus.io/trace"
@@ -53,8 +54,8 @@ func (m *Message) ValueReceived() abi.TokenAmount {
var EnableGasTracing = false
type Runtime struct {
- rt2.Message
- rt2.Syscalls
+ rt5.Message
+ rt5.Syscalls
ctx context.Context
@@ -80,6 +81,10 @@ type Runtime struct {
lastGasCharge *types.GasTrace
}
+func (rt *Runtime) BaseFee() abi.TokenAmount {
+ return rt.vm.baseFee
+}
+
func (rt *Runtime) NetworkVersion() network.Version {
return rt.vm.GetNtwkVersion(rt.ctx, rt.CurrEpoch())
}
@@ -135,7 +140,7 @@ func (rt *Runtime) StorePut(x cbor.Marshaler) cid.Cid {
}
var _ rt0.Runtime = (*Runtime)(nil)
-var _ rt2.Runtime = (*Runtime)(nil)
+var _ rt5.Runtime = (*Runtime)(nil)
func (rt *Runtime) shimCall(f func() interface{}) (rval []byte, aerr aerrors.ActorError) {
defer func() {
@@ -207,17 +212,31 @@ func (rt *Runtime) GetActorCodeCID(addr address.Address) (ret cid.Cid, ok bool)
}
func (rt *Runtime) GetRandomnessFromTickets(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness {
- res, err := rt.vm.rand.GetChainRandomness(rt.ctx, personalization, randEpoch, entropy)
+ var err error
+ var res []byte
+ if randEpoch > build.UpgradeHyperdriveHeight {
+ res, err = rt.vm.rand.GetChainRandomnessLookingForward(rt.ctx, personalization, randEpoch, entropy)
+ } else {
+ res, err = rt.vm.rand.GetChainRandomnessLookingBack(rt.ctx, personalization, randEpoch, entropy)
+ }
+
if err != nil {
- panic(aerrors.Fatalf("could not get randomness: %s", err))
+ panic(aerrors.Fatalf("could not get ticket randomness: %s", err))
}
return res
}
func (rt *Runtime) GetRandomnessFromBeacon(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness {
- res, err := rt.vm.rand.GetBeaconRandomness(rt.ctx, personalization, randEpoch, entropy)
+ var err error
+ var res []byte
+ if randEpoch > build.UpgradeHyperdriveHeight {
+ res, err = rt.vm.rand.GetBeaconRandomnessLookingForward(rt.ctx, personalization, randEpoch, entropy)
+ } else {
+ res, err = rt.vm.rand.GetBeaconRandomnessLookingBack(rt.ctx, personalization, randEpoch, entropy)
+ }
+
if err != nil {
- panic(aerrors.Fatalf("could not get randomness: %s", err))
+ panic(aerrors.Fatalf("could not get beacon randomness: %s", err))
}
return res
}
@@ -244,20 +263,23 @@ func (rt *Runtime) NewActorAddress() address.Address {
return addr
}
-func (rt *Runtime) CreateActor(codeID cid.Cid, address address.Address) {
+func (rt *Runtime) CreateActor(codeID cid.Cid, addr address.Address) {
+ if addr == address.Undef && rt.NetworkVersion() >= network.Version7 {
+ rt.Abortf(exitcode.SysErrorIllegalArgument, "CreateActor with Undef address")
+ }
act, aerr := rt.vm.areg.Create(codeID, rt)
if aerr != nil {
rt.Abortf(aerr.RetCode(), aerr.Error())
}
- _, err := rt.state.GetActor(address)
+ _, err := rt.state.GetActor(addr)
if err == nil {
rt.Abortf(exitcode.SysErrorIllegalArgument, "Actor address already exists")
}
rt.chargeGas(rt.Pricelist().OnCreateActor())
- err = rt.state.SetActor(address, act)
+ err = rt.state.SetActor(addr, act)
if err != nil {
panic(aerrors.Fatalf("creating actor entry: %v", err))
}
@@ -266,7 +288,7 @@ func (rt *Runtime) CreateActor(codeID cid.Cid, address address.Address) {
// DeleteActor deletes the executing actor from the state tree, transferring
// any balance to beneficiary.
-// Aborts if the beneficiary does not exist.
+// Aborts if the beneficiary does not exist or is the calling actor.
// May only be called by the actor itself.
func (rt *Runtime) DeleteActor(beneficiary address.Address) {
rt.chargeGas(rt.Pricelist().OnDeleteActor())
@@ -278,6 +300,19 @@ func (rt *Runtime) DeleteActor(beneficiary address.Address) {
panic(aerrors.Fatalf("failed to get actor: %s", err))
}
if !act.Balance.IsZero() {
+ // TODO: Should be safe to drop the version-check,
+ // since only the paych actor called this pre-version 7, but let's leave it for now
+ if rt.NetworkVersion() >= network.Version7 {
+ beneficiaryId, found := rt.ResolveAddress(beneficiary)
+ if !found {
+ rt.Abortf(exitcode.SysErrorIllegalArgument, "beneficiary doesn't exist")
+ }
+
+ if beneficiaryId == rt.Receiver() {
+ rt.Abortf(exitcode.SysErrorIllegalArgument, "benefactor cannot be beneficiary")
+ }
+ }
+
// Transfer the executing actor's balance to the beneficiary
if err := rt.vm.transfer(rt.Receiver(), beneficiary, act.Balance); err != nil {
panic(aerrors.Fatalf("failed to transfer balance to beneficiary actor: %s", err))
@@ -518,7 +553,7 @@ func (rt *Runtime) chargeGasInternal(gas GasCharge, skip int) aerrors.ActorError
if EnableGasTracing {
var callers [10]uintptr
- cout := 0 //gruntime.Callers(2+skip, callers[:])
+ cout := gruntime.Callers(2+skip, callers[:])
now := build.Clock.Now()
if rt.lastGasCharge != nil {
@@ -533,12 +568,19 @@ func (rt *Runtime) chargeGasInternal(gas GasCharge, skip int) aerrors.ActorError
ComputeGas: gas.ComputeGas,
StorageGas: gas.StorageGas,
- TotalVirtualGas: gas.VirtualCompute*GasComputeMulti + gas.VirtualStorage*GasStorageMulti,
VirtualComputeGas: gas.VirtualCompute,
VirtualStorageGas: gas.VirtualStorage,
Callers: callers[:cout],
}
+ if gasTrace.VirtualStorageGas == 0 {
+ gasTrace.VirtualStorageGas = gasTrace.StorageGas
+ }
+ if gasTrace.VirtualComputeGas == 0 {
+ gasTrace.VirtualComputeGas = gasTrace.ComputeGas
+ }
+ gasTrace.TotalVirtualGas = gasTrace.VirtualComputeGas + gasTrace.VirtualStorageGas
+
rt.executionTrace.GasCharges = append(rt.executionTrace.GasCharges, &gasTrace)
rt.lastGasChargeTime = now
rt.lastGasCharge = &gasTrace
@@ -546,9 +588,10 @@ func (rt *Runtime) chargeGasInternal(gas GasCharge, skip int) aerrors.ActorError
// overflow safe
if rt.gasUsed > rt.gasAvailable-toUse {
+ gasUsed := rt.gasUsed
rt.gasUsed = rt.gasAvailable
- return aerrors.Newf(exitcode.SysErrOutOfGas, "not enough gas: used=%d, available=%d",
- rt.gasUsed, rt.gasAvailable)
+ return aerrors.Newf(exitcode.SysErrOutOfGas, "not enough gas: used=%d, available=%d, use=%d",
+ gasUsed, rt.gasAvailable, toUse)
}
rt.gasUsed += toUse
return nil
diff --git a/chain/vm/syscalls.go b/chain/vm/syscalls.go
index d2f1f77d314..0cbefd1fd7f 100644
--- a/chain/vm/syscalls.go
+++ b/chain/vm/syscalls.go
@@ -7,25 +7,27 @@ import (
goruntime "runtime"
"sync"
- "github.com/filecoin-project/go-address"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
"github.com/minio/blake2b-simd"
mh "github.com/multiformats/go-multihash"
"golang.org/x/xerrors"
+ "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto"
+ "github.com/filecoin-project/go-state-types/network"
+ "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+ "github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/lib/sigs"
- runtime2 "github.com/filecoin-project/specs-actors/v2/actors/runtime"
- proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
-
- "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
+ runtime5 "github.com/filecoin-project/specs-actors/v5/actors/runtime"
+ proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
)
func init() {
@@ -34,13 +36,15 @@ func init() {
// Actual type is defined in chain/types/vmcontext.go because the VMContext interface is there
-type SyscallBuilder func(ctx context.Context, rt *Runtime) runtime2.Syscalls
+type SyscallBuilder func(ctx context.Context, rt *Runtime) runtime5.Syscalls
func Syscalls(verifier ffiwrapper.Verifier) SyscallBuilder {
- return func(ctx context.Context, rt *Runtime) runtime2.Syscalls {
+ return func(ctx context.Context, rt *Runtime) runtime5.Syscalls {
return &syscallShim{
- ctx: ctx,
+ ctx: ctx,
+ epoch: rt.CurrEpoch(),
+ networkVersion: rt.NetworkVersion(),
actor: rt.Receiver(),
cstate: rt.state,
@@ -55,11 +59,13 @@ func Syscalls(verifier ffiwrapper.Verifier) SyscallBuilder {
type syscallShim struct {
ctx context.Context
- lbState LookbackStateGetter
- actor address.Address
- cstate *state.StateTree
- cst cbor.IpldStore
- verifier ffiwrapper.Verifier
+ epoch abi.ChainEpoch
+ networkVersion network.Version
+ lbState LookbackStateGetter
+ actor address.Address
+ cstate *state.StateTree
+ cst cbor.IpldStore
+ verifier ffiwrapper.Verifier
}
func (ss *syscallShim) ComputeUnsealedSectorCID(st abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) {
@@ -84,7 +90,7 @@ func (ss *syscallShim) HashBlake2b(data []byte) [32]byte {
// Checks validity of the submitted consensus fault with the two block headers needed to prove the fault
// and an optional extra one to check common ancestry (as needed).
// Note that the blocks are ordered: the method requires a.Epoch() <= b.Epoch().
-func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime2.ConsensusFault, error) {
+func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime5.ConsensusFault, error) {
// Note that block syntax is not validated. Any validly signed block will be accepted pursuant to the below conditions.
// Whether or not it could ever have been accepted in a chain is not checked/does not matter here.
// for that reason when checking block parent relationships, rather than instantiating a Tipset to do so
@@ -102,11 +108,18 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime2.Conse
return nil, xerrors.Errorf("cannot decode second block header: %f", decodeErr)
}
+ // workaround chain halt
+ if build.IsNearUpgrade(blockA.Height, build.UpgradeOrangeHeight) {
+ return nil, xerrors.Errorf("consensus reporting disabled around Upgrade Orange")
+ }
+ if build.IsNearUpgrade(blockB.Height, build.UpgradeOrangeHeight) {
+ return nil, xerrors.Errorf("consensus reporting disabled around Upgrade Orange")
+ }
+
// are blocks the same?
if blockA.Cid().Equals(blockB.Cid()) {
return nil, fmt.Errorf("no consensus fault: submitted blocks are the same")
}
-
// (1) check conditions necessary to any consensus fault
// were blocks mined by same miner?
@@ -120,14 +133,14 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime2.Conse
}
// (2) check for the consensus faults themselves
- var consensusFault *runtime2.ConsensusFault
+ var consensusFault *runtime5.ConsensusFault
// (a) double-fork mining fault
if blockA.Height == blockB.Height {
- consensusFault = &runtime2.ConsensusFault{
+ consensusFault = &runtime5.ConsensusFault{
Target: blockA.Miner,
Epoch: blockB.Height,
- Type: runtime2.ConsensusFaultDoubleForkMining,
+ Type: runtime5.ConsensusFaultDoubleForkMining,
}
}
@@ -135,10 +148,10 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime2.Conse
// strictly speaking no need to compare heights based on double fork mining check above,
// but at same height this would be a different fault.
if types.CidArrsEqual(blockA.Parents, blockB.Parents) && blockA.Height != blockB.Height {
- consensusFault = &runtime2.ConsensusFault{
+ consensusFault = &runtime5.ConsensusFault{
Target: blockA.Miner,
Epoch: blockB.Height,
- Type: runtime2.ConsensusFaultTimeOffsetMining,
+ Type: runtime5.ConsensusFaultTimeOffsetMining,
}
}
@@ -158,10 +171,10 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime2.Conse
if types.CidArrsEqual(blockA.Parents, blockC.Parents) && blockA.Height == blockC.Height &&
types.CidArrsContains(blockB.Parents, blockC.Cid()) && !types.CidArrsContains(blockB.Parents, blockA.Cid()) {
- consensusFault = &runtime2.ConsensusFault{
+ consensusFault = &runtime5.ConsensusFault{
Target: blockA.Miner,
Epoch: blockB.Height,
- Type: runtime2.ConsensusFaultParentGrinding,
+ Type: runtime5.ConsensusFaultParentGrinding,
}
}
}
@@ -202,6 +215,10 @@ func (ss *syscallShim) VerifyBlockSig(blk *types.BlockHeader) error {
}
func (ss *syscallShim) workerKeyAtLookback(height abi.ChainEpoch) (address.Address, error) {
+ if ss.networkVersion >= network.Version7 && height < ss.epoch-policy.ChainFinality {
+ return address.Undef, xerrors.Errorf("cannot get worker key (currEpoch %d, height %d)", ss.epoch, height)
+ }
+
lbState, err := ss.lbState(ss.ctx, height)
if err != nil {
return address.Undef, err
@@ -226,7 +243,7 @@ func (ss *syscallShim) workerKeyAtLookback(height abi.ChainEpoch) (address.Addre
return ResolveToKeyAddr(ss.cstate, ss.cst, info.Worker)
}
-func (ss *syscallShim) VerifyPoSt(proof proof2.WindowPoStVerifyInfo) error {
+func (ss *syscallShim) VerifyPoSt(proof proof5.WindowPoStVerifyInfo) error {
ok, err := ss.verifier.VerifyWindowPoSt(context.TODO(), proof)
if err != nil {
return err
@@ -237,7 +254,7 @@ func (ss *syscallShim) VerifyPoSt(proof proof2.WindowPoStVerifyInfo) error {
return nil
}
-func (ss *syscallShim) VerifySeal(info proof2.SealVerifyInfo) error {
+func (ss *syscallShim) VerifySeal(info proof5.SealVerifyInfo) error {
//_, span := trace.StartSpan(ctx, "ValidatePoRep")
//defer span.End()
@@ -250,7 +267,7 @@ func (ss *syscallShim) VerifySeal(info proof2.SealVerifyInfo) error {
proof := info.Proof
seed := []byte(info.InteractiveRandomness)
- log.Debugf("Verif r:%x; d:%x; m:%s; t:%x; s:%x; N:%d; p:%x", info.SealedCID, info.UnsealedCID, miner, ticket, seed, info.SectorID.Number, proof)
+ log.Debugf("Verif r:%s; d:%s; m:%s; t:%x; s:%x; N:%d; p:%x", info.SealedCID, info.UnsealedCID, miner, ticket, seed, info.SectorID.Number, proof)
//func(ctx context.Context, maddr address.Address, ssize abi.SectorSize, commD, commR, ticket, proof, seed []byte, sectorID abi.SectorNumber)
ok, err := ss.verifier.VerifySeal(info)
@@ -264,6 +281,18 @@ func (ss *syscallShim) VerifySeal(info proof2.SealVerifyInfo) error {
return nil
}
+func (ss *syscallShim) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) error {
+ ok, err := ss.verifier.VerifyAggregateSeals(aggregate)
+ if err != nil {
+ return xerrors.Errorf("failed to verify aggregated PoRep: %w", err)
+ }
+ if !ok {
+ return fmt.Errorf("invalid aggregate proof")
+ }
+
+ return nil
+}
+
func (ss *syscallShim) VerifySignature(sig crypto.Signature, addr address.Address, input []byte) error {
// TODO: in genesis setup, we are currently faking signatures
@@ -277,7 +306,7 @@ func (ss *syscallShim) VerifySignature(sig crypto.Signature, addr address.Addres
var BatchSealVerifyParallelism = goruntime.NumCPU()
-func (ss *syscallShim) BatchVerifySeals(inp map[address.Address][]proof2.SealVerifyInfo) (map[address.Address][]bool, error) {
+func (ss *syscallShim) BatchVerifySeals(inp map[address.Address][]proof5.SealVerifyInfo) (map[address.Address][]bool, error) {
out := make(map[address.Address][]bool)
sema := make(chan struct{}, BatchSealVerifyParallelism)
@@ -289,12 +318,12 @@ func (ss *syscallShim) BatchVerifySeals(inp map[address.Address][]proof2.SealVer
for i, s := range seals {
wg.Add(1)
- go func(ma address.Address, ix int, svi proof2.SealVerifyInfo, res []bool) {
+ go func(ma address.Address, ix int, svi proof5.SealVerifyInfo, res []bool) {
defer wg.Done()
sema <- struct{}{}
if err := ss.VerifySeal(svi); err != nil {
- log.Warnw("seal verify in batch failed", "miner", ma, "index", ix, "err", err)
+ log.Warnw("seal verify in batch failed", "miner", ma, "sectorNumber", svi.SectorID.Number, "err", err)
res[ix] = false
} else {
res[ix] = true
diff --git a/chain/vm/vm.go b/chain/vm/vm.go
index 8b7f78074a0..5a31187b7b9 100644
--- a/chain/vm/vm.go
+++ b/chain/vm/vm.go
@@ -4,11 +4,11 @@ import (
"bytes"
"context"
"fmt"
- "reflect"
"sync/atomic"
"time"
"github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/metrics"
block "github.com/ipfs/go-block-format"
cid "github.com/ipfs/go-cid"
@@ -16,6 +16,7 @@ import (
logging "github.com/ipfs/go-log/v2"
mh "github.com/multiformats/go-multihash"
cbg "github.com/whyrusleeping/cbor-gen"
+ "go.opencensus.io/stats"
"go.opencensus.io/trace"
"golang.org/x/xerrors"
@@ -26,23 +27,24 @@ import (
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/go-state-types/network"
+ "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/aerrors"
"github.com/filecoin-project/lotus/chain/actors/builtin/account"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/builtin/reward"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/lotus/lib/blockstore"
- bstore "github.com/filecoin-project/lotus/lib/blockstore"
- "github.com/filecoin-project/lotus/lib/bufbstore"
)
const MaxCallDepth = 4096
-var log = logging.Logger("vm")
-var actorLog = logging.Logger("actors")
-var gasOnActorExec = newGasCharge("OnActorExec", 0, 0)
+var (
+ log = logging.Logger("vm")
+ actorLog = logging.Logger("actors")
+ gasOnActorExec = newGasCharge("OnActorExec", 0, 0)
+)
// stat counters
var (
@@ -69,7 +71,10 @@ func ResolveToKeyAddr(state types.StateTree, cst cbor.IpldStore, addr address.Ad
return aast.PubkeyAddress()
}
-var _ cbor.IpldBlockstore = (*gasChargingBlocks)(nil)
+var (
+ _ cbor.IpldBlockstore = (*gasChargingBlocks)(nil)
+ _ blockstore.Viewer = (*gasChargingBlocks)(nil)
+)
type gasChargingBlocks struct {
chargeGas func(GasCharge)
@@ -77,6 +82,24 @@ type gasChargingBlocks struct {
under cbor.IpldBlockstore
}
+func (bs *gasChargingBlocks) View(c cid.Cid, cb func([]byte) error) error {
+ if v, ok := bs.under.(blockstore.Viewer); ok {
+ bs.chargeGas(bs.pricelist.OnIpldGet())
+ return v.View(c, func(b []byte) error {
+ // we have successfully retrieved the value; charge for it, even if the user-provided function fails.
+ bs.chargeGas(newGasCharge("OnIpldViewEnd", 0, 0).WithExtra(len(b)))
+ bs.chargeGas(gasOnActorExec)
+ return cb(b)
+ })
+ }
+ // the underlying blockstore doesn't implement the viewer interface, fall back to normal Get behaviour.
+ blk, err := bs.Get(c)
+ if err == nil && blk != nil {
+ return cb(blk.RawData())
+ }
+ return err
+}
+
func (bs *gasChargingBlocks) Get(c cid.Cid) (block.Block, error) {
bs.chargeGas(bs.pricelist.OnIpldGet())
blk, err := bs.under.Get(c)
@@ -119,6 +142,10 @@ func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runti
}
if parent != nil {
+ // TODO: The version check here should be unnecessary, but we can wait to take it out
+ if !parent.allowInternal && rt.NetworkVersion() >= network.Version7 {
+ rt.Abortf(exitcode.SysErrForbidden, "internal calls currently disabled")
+ }
rt.gasUsed = parent.gasUsed
rt.origin = parent.origin
rt.originNonce = parent.originNonce
@@ -130,10 +157,10 @@ func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runti
rt.Abortf(exitcode.SysErrForbidden, "message execution exceeds call depth")
}
- rt.cst = &cbor.BasicIpldStore{
- Blocks: &gasChargingBlocks{rt.chargeGasFunc(2), rt.pricelist, vm.cst.Blocks},
- Atlas: vm.cst.Atlas,
- }
+ cbb := &gasChargingBlocks{rt.chargeGasFunc(2), rt.pricelist, vm.cst.Blocks}
+ cst := cbor.NewCborStore(cbb)
+ cst.Atlas = vm.cst.Atlas // associate the atlas.
+ rt.cst = cst
vmm := *msg
resF, ok := rt.ResolveAddress(msg.From)
@@ -168,15 +195,18 @@ func (vm *UnsafeVM) MakeRuntime(ctx context.Context, msg *types.Message) *Runtim
return vm.VM.makeRuntime(ctx, msg, nil)
}
-type CircSupplyCalculator func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error)
-type NtwkVersionGetter func(context.Context, abi.ChainEpoch) network.Version
-type LookbackStateGetter func(context.Context, abi.ChainEpoch) (*state.StateTree, error)
+type (
+ CircSupplyCalculator func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error)
+ NtwkVersionGetter func(context.Context, abi.ChainEpoch) network.Version
+ LookbackStateGetter func(context.Context, abi.ChainEpoch) (*state.StateTree, error)
+)
type VM struct {
- cstate *state.StateTree
+ cstate *state.StateTree
+ // TODO: Is base actually used? Can we delete it?
base cid.Cid
cst *cbor.BasicIpldStore
- buf *bufbstore.BufferedBS
+ buf *blockstore.BufferedBlockstore
blockHeight abi.ChainEpoch
areg *ActorRegistry
rand Rand
@@ -192,7 +222,7 @@ type VMOpts struct {
StateBase cid.Cid
Epoch abi.ChainEpoch
Rand Rand
- Bstore bstore.Blockstore
+ Bstore blockstore.Blockstore
Syscalls SyscallBuilder
CircSupplyCalc CircSupplyCalculator
NtwkVersion NtwkVersionGetter // TODO: stebalien: In what cases do we actually need this? It seems like even when creating new networks we want to use the 'global'/build-default version getter
@@ -201,7 +231,7 @@ type VMOpts struct {
}
func NewVM(ctx context.Context, opts *VMOpts) (*VM, error) {
- buf := bufbstore.NewBufferedBstore(opts.Bstore)
+ buf := blockstore.NewBuffered(opts.Bstore)
cst := cbor.NewCborStore(buf)
state, err := state.LoadStateTree(cst, opts.StateBase)
if err != nil {
@@ -225,8 +255,10 @@ func NewVM(ctx context.Context, opts *VMOpts) (*VM, error) {
}
type Rand interface {
- GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error)
- GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error)
+ GetChainRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error)
+ GetChainRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error)
+ GetBeaconRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error)
+ GetBeaconRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error)
}
type ApplyRet struct {
@@ -239,7 +271,6 @@ type ApplyRet struct {
func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime,
gasCharge *GasCharge, start time.Time) ([]byte, aerrors.ActorError, *Runtime) {
-
defer atomic.AddUint64(&StatSends, 1)
st := vm.cstate
@@ -408,6 +439,8 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
},
GasCosts: &gasOutputs,
Duration: time.Since(start),
+ ActorErr: aerrors.Newf(exitcode.SysErrOutOfGas,
+ "message gas limit does not cover on-chain gas costs"),
}, nil
}
@@ -536,7 +569,13 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
if gasUsed < 0 {
gasUsed = 0
}
- gasOutputs := ComputeGasOutputs(gasUsed, msg.GasLimit, vm.baseFee, msg.GasFeeCap, msg.GasPremium)
+
+ burn, err := vm.ShouldBurn(ctx, st, msg, errcode)
+ if err != nil {
+ return nil, xerrors.Errorf("deciding whether should burn failed: %w", err)
+ }
+
+ gasOutputs := ComputeGasOutputs(gasUsed, msg.GasLimit, vm.baseFee, msg.GasFeeCap, msg.GasPremium, burn)
if err := vm.transferFromGasHolder(builtin.BurntFundsActorAddr, gasHolder,
gasOutputs.BaseFeeBurn); err != nil {
@@ -574,6 +613,34 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
}, nil
}
+func (vm *VM) ShouldBurn(ctx context.Context, st *state.StateTree, msg *types.Message, errcode exitcode.ExitCode) (bool, error) {
+ if vm.ntwkVersion(ctx, vm.blockHeight) <= network.Version12 {
+ // Check to see if we should burn funds. We avoid burning on successful
+ // window post. This won't catch _indirect_ window post calls, but this
+ // is the best we can get for now.
+ if vm.blockHeight > build.UpgradeClausHeight && errcode == exitcode.Ok && msg.Method == miner.Methods.SubmitWindowedPoSt {
+ // Ok, we've checked the _method_, but we still need to check
+ // the target actor. It would be nice if we could just look at
+ // the trace, but I'm not sure if that's safe?
+ if toActor, err := st.GetActor(msg.To); err != nil {
+ // If the actor wasn't found, we probably deleted it or something. Move on.
+ if !xerrors.Is(err, types.ErrActorNotFound) {
+ // Otherwise, this should never fail and something is very wrong.
+ return false, xerrors.Errorf("failed to lookup target actor: %w", err)
+ }
+ } else if builtin.IsStorageMinerActor(toActor.Code) {
+ // Ok, this is a storage miner and we've processed a window post. Remove the burn.
+ return false, nil
+ }
+ }
+
+ return true, nil
+ }
+
+ // Any "don't burn" rules from Network v13 onwards go here, for now we always return true
+ return true, nil
+}
+
func (vm *VM) ActorBalance(addr address.Address) (types.BigInt, aerrors.ActorError) {
act, err := vm.cstate.GetActor(addr)
if err != nil {
@@ -583,6 +650,8 @@ func (vm *VM) ActorBalance(addr address.Address) (types.BigInt, aerrors.ActorErr
return act.Balance, nil
}
+type vmFlushKey struct{}
+
func (vm *VM) Flush(ctx context.Context) (cid.Cid, error) {
_, span := trace.StartSpan(ctx, "vm.Flush")
defer span.End()
@@ -595,42 +664,17 @@ func (vm *VM) Flush(ctx context.Context) (cid.Cid, error) {
return cid.Undef, xerrors.Errorf("flushing vm: %w", err)
}
- if err := Copy(ctx, from, to, root); err != nil {
+ if err := Copy(context.WithValue(ctx, vmFlushKey{}, true), from, to, root); err != nil {
return cid.Undef, xerrors.Errorf("copying tree: %w", err)
}
return root, nil
}
-// MutateState usage: MutateState(ctx, idAddr, func(cst cbor.IpldStore, st *ActorStateType) error {...})
-func (vm *VM) MutateState(ctx context.Context, addr address.Address, fn interface{}) error {
- act, err := vm.cstate.GetActor(addr)
- if err != nil {
- return xerrors.Errorf("actor not found: %w", err)
- }
-
- st := reflect.New(reflect.TypeOf(fn).In(1).Elem())
- if err := vm.cst.Get(ctx, act.Head, st.Interface()); err != nil {
- return xerrors.Errorf("read actor head: %w", err)
- }
-
- out := reflect.ValueOf(fn).Call([]reflect.Value{reflect.ValueOf(vm.cst), st})
- if !out[0].IsNil() && out[0].Interface().(error) != nil {
- return out[0].Interface().(error)
- }
-
- head, err := vm.cst.Put(ctx, st.Interface())
- if err != nil {
- return xerrors.Errorf("put new actor head: %w", err)
- }
-
- act.Head = head
-
- if err := vm.cstate.SetActor(addr, act); err != nil {
- return xerrors.Errorf("set actor: %w", err)
- }
-
- return nil
+// Get the buffered blockstore associated with the VM. This includes any temporary blocks produced
+// during this VM's execution.
+func (vm *VM) ActorStore(ctx context.Context) adt.Store {
+ return adt.WrapStore(ctx, vm.cst)
}
func linksForObj(blk block.Block, cb func(cid.Cid)) error {
@@ -652,21 +696,48 @@ func linksForObj(blk block.Block, cb func(cid.Cid)) error {
func Copy(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid) error {
ctx, span := trace.StartSpan(ctx, "vm.Copy") // nolint
defer span.End()
+ start := time.Now()
var numBlocks int
var totalCopySize int
- var batch []block.Block
+ const batchSize = 128
+ const bufCount = 3
+ freeBufs := make(chan []block.Block, bufCount)
+ toFlush := make(chan []block.Block, bufCount)
+ for i := 0; i < bufCount; i++ {
+ freeBufs <- make([]block.Block, 0, batchSize)
+ }
+
+ errFlushChan := make(chan error)
+
+ go func() {
+ for b := range toFlush {
+ if err := to.PutMany(b); err != nil {
+ close(freeBufs)
+ errFlushChan <- xerrors.Errorf("batch put in copy: %w", err)
+ return
+ }
+ freeBufs <- b[:0]
+ }
+ close(errFlushChan)
+ close(freeBufs)
+ }()
+
+ batch := <-freeBufs
batchCp := func(blk block.Block) error {
numBlocks++
totalCopySize += len(blk.RawData())
batch = append(batch, blk)
- if len(batch) > 100 {
- if err := to.PutMany(batch); err != nil {
- return xerrors.Errorf("batch put in copy: %w", err)
+
+ if len(batch) >= batchSize {
+ toFlush <- batch
+ var ok bool
+ batch, ok = <-freeBufs
+ if !ok {
+ return <-errFlushChan
}
- batch = batch[:0]
}
return nil
}
@@ -676,15 +747,22 @@ func Copy(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid) err
}
if len(batch) > 0 {
- if err := to.PutMany(batch); err != nil {
- return xerrors.Errorf("batch put in copy: %w", err)
- }
+ toFlush <- batch
+ }
+ close(toFlush) // close the toFlush triggering the loop to end
+ err := <-errFlushChan // get error out or get nil if it was closed
+ if err != nil {
+ return err
}
span.AddAttributes(
trace.Int64Attribute("numBlocks", int64(numBlocks)),
trace.Int64Attribute("copySize", int64(totalCopySize)),
)
+ if yes, ok := ctx.Value(vmFlushKey{}).(bool); yes && ok {
+ took := metrics.SinceInMilliseconds(start)
+ stats.Record(ctx, metrics.VMFlushCopyCount.M(int64(numBlocks)), metrics.VMFlushCopyDuration.M(took))
+ }
return nil
}
diff --git a/chain/wallet/ledger/ledger.go b/chain/wallet/ledger/ledger.go
index 07f92e7ff77..eb16f646036 100644
--- a/chain/wallet/ledger/ledger.go
+++ b/chain/wallet/ledger/ledger.go
@@ -9,7 +9,7 @@ import (
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/query"
- logging "github.com/ipfs/go-log"
+ logging "github.com/ipfs/go-log/v2"
ledgerfil "github.com/whyrusleeping/ledger-filecoin-go"
"golang.org/x/xerrors"
@@ -36,7 +36,7 @@ type LedgerKeyInfo struct {
Path []uint32
}
-var _ api.WalletAPI = (*LedgerWallet)(nil)
+var _ api.Wallet = (*LedgerWallet)(nil)
func (lw LedgerWallet) WalletSign(ctx context.Context, signer address.Address, toSign []byte, meta api.MsgMeta) (*crypto.Signature, error) {
ki, err := lw.getKeyInfo(signer)
@@ -227,7 +227,7 @@ func (lw LedgerWallet) WalletNew(ctx context.Context, t types.KeyType) (address.
return lw.importKey(lki)
}
-func (lw *LedgerWallet) Get() api.WalletAPI {
+func (lw *LedgerWallet) Get() api.Wallet {
if lw == nil {
return nil
}
diff --git a/chain/wallet/multi.go b/chain/wallet/multi.go
index 532ad217bf8..a88475c2e3e 100644
--- a/chain/wallet/multi.go
+++ b/chain/wallet/multi.go
@@ -4,6 +4,7 @@ import (
"context"
"go.uber.org/fx"
+ "go.uber.org/multierr"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
@@ -24,13 +25,13 @@ type MultiWallet struct {
}
type getif interface {
- api.WalletAPI
+ api.Wallet
// workaround for the fact that iface(*struct(nil)) != nil
- Get() api.WalletAPI
+ Get() api.Wallet
}
-func firstNonNil(wallets ...getif) api.WalletAPI {
+func firstNonNil(wallets ...getif) api.Wallet {
for _, w := range wallets {
if w.Get() != nil {
return w
@@ -40,8 +41,8 @@ func firstNonNil(wallets ...getif) api.WalletAPI {
return nil
}
-func nonNil(wallets ...getif) []api.WalletAPI {
- var out []api.WalletAPI
+func nonNil(wallets ...getif) []api.Wallet {
+ var out []api.Wallet
for _, w := range wallets {
if w.Get() == nil {
continue
@@ -53,21 +54,21 @@ func nonNil(wallets ...getif) []api.WalletAPI {
return out
}
-func (m MultiWallet) find(ctx context.Context, address address.Address, wallets ...getif) (api.WalletAPI, error) {
+func (m MultiWallet) find(ctx context.Context, address address.Address, wallets ...getif) (api.Wallet, error) {
ws := nonNil(wallets...)
+ var merr error
+
for _, w := range ws {
have, err := w.WalletHas(ctx, address)
- if err != nil {
- return nil, err
- }
+ merr = multierr.Append(merr, err)
- if have {
+ if err == nil && have {
return w, nil
}
}
- return nil, nil
+ return nil, merr
}
func (m MultiWallet) WalletNew(ctx context.Context, keyType types.KeyType) (address.Address, error) {
@@ -90,7 +91,7 @@ func (m MultiWallet) WalletHas(ctx context.Context, address address.Address) (bo
}
func (m MultiWallet) WalletList(ctx context.Context) ([]address.Address, error) {
- var out []address.Address
+ out := make([]address.Address, 0)
seen := map[address.Address]struct{}{}
ws := nonNil(m.Remote, m.Ledger, m.Local)
@@ -167,4 +168,4 @@ func (m MultiWallet) WalletDelete(ctx context.Context, address address.Address)
}
}
-var _ api.WalletAPI = MultiWallet{}
+var _ api.Wallet = MultiWallet{}
diff --git a/chain/wallet/remotewallet/remote.go b/chain/wallet/remotewallet/remote.go
index aa44271326f..d1734518e1b 100644
--- a/chain/wallet/remotewallet/remote.go
+++ b/chain/wallet/remotewallet/remote.go
@@ -13,19 +13,19 @@ import (
)
type RemoteWallet struct {
- api.WalletAPI
+ api.Wallet
}
func SetupRemoteWallet(info string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (*RemoteWallet, error) {
return func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (*RemoteWallet, error) {
ai := cliutil.ParseApiInfo(info)
- url, err := ai.DialArgs()
+ url, err := ai.DialArgs("v0")
if err != nil {
return nil, err
}
- wapi, closer, err := client.NewWalletRPC(mctx, url, ai.AuthHeader())
+ wapi, closer, err := client.NewWalletRPCV0(mctx, url, ai.AuthHeader())
if err != nil {
return nil, xerrors.Errorf("creating jsonrpc client: %w", err)
}
@@ -41,7 +41,7 @@ func SetupRemoteWallet(info string) func(mctx helpers.MetricsCtx, lc fx.Lifecycl
}
}
-func (w *RemoteWallet) Get() api.WalletAPI {
+func (w *RemoteWallet) Get() api.Wallet {
if w == nil {
return nil
}
diff --git a/chain/wallet/wallet.go b/chain/wallet/wallet.go
index 33fa3135e3b..cbe78a9e8fa 100644
--- a/chain/wallet/wallet.go
+++ b/chain/wallet/wallet.go
@@ -6,18 +6,16 @@ import (
"strings"
"sync"
+ "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/crypto"
logging "github.com/ipfs/go-log/v2"
"golang.org/x/xerrors"
- "github.com/filecoin-project/go-address"
-
"github.com/filecoin-project/lotus/api"
- _ "github.com/filecoin-project/lotus/lib/sigs/bls" // enable bls signatures
- _ "github.com/filecoin-project/lotus/lib/sigs/secp" // enable secp signatures
-
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/sigs"
+ _ "github.com/filecoin-project/lotus/lib/sigs/bls" // enable bls signatures
+ _ "github.com/filecoin-project/lotus/lib/sigs/secp" // enable secp signatures
)
var log = logging.Logger("wallet")
@@ -270,7 +268,7 @@ func (w *LocalWallet) WalletHas(ctx context.Context, addr address.Address) (bool
return k != nil, nil
}
-func (w *LocalWallet) WalletDelete(ctx context.Context, addr address.Address) error {
+func (w *LocalWallet) walletDelete(ctx context.Context, addr address.Address) error {
k, err := w.findKey(addr)
if err != nil {
@@ -308,7 +306,30 @@ func (w *LocalWallet) WalletDelete(ctx context.Context, addr address.Address) er
return nil
}
-func (w *LocalWallet) Get() api.WalletAPI {
+func (w *LocalWallet) deleteDefault() {
+ w.lk.Lock()
+ defer w.lk.Unlock()
+ if err := w.keystore.Delete(KDefault); err != nil {
+ if !xerrors.Is(err, types.ErrKeyInfoNotFound) {
+ log.Warnf("failed to unregister current default key: %s", err)
+ }
+ }
+}
+
+func (w *LocalWallet) WalletDelete(ctx context.Context, addr address.Address) error {
+ if err := w.walletDelete(ctx, addr); err != nil {
+ return xerrors.Errorf("wallet delete: %w", err)
+ }
+
+ if def, err := w.GetDefault(); err == nil {
+ if def == addr {
+ w.deleteDefault()
+ }
+ }
+ return nil
+}
+
+func (w *LocalWallet) Get() api.Wallet {
if w == nil {
return nil
}
@@ -316,7 +337,7 @@ func (w *LocalWallet) Get() api.WalletAPI {
return w
}
-var _ api.WalletAPI = &LocalWallet{}
+var _ api.Wallet = &LocalWallet{}
func swapMainnetForTestnetPrefix(addr string) (string, error) {
aChars := []rune(addr)
diff --git a/cli/auth.go b/cli/auth.go
index ba20b2bccf6..20b9bb39428 100644
--- a/cli/auth.go
+++ b/cli/auth.go
@@ -8,20 +8,21 @@ import (
"github.com/filecoin-project/go-jsonrpc/auth"
- "github.com/filecoin-project/lotus/api/apistruct"
+ "github.com/filecoin-project/lotus/api"
+ cliutil "github.com/filecoin-project/lotus/cli/util"
"github.com/filecoin-project/lotus/node/repo"
)
-var authCmd = &cli.Command{
+var AuthCmd = &cli.Command{
Name: "auth",
Usage: "Manage RPC permissions",
Subcommands: []*cli.Command{
- authCreateAdminToken,
- authApiInfoToken,
+ AuthCreateAdminToken,
+ AuthApiInfoToken,
},
}
-var authCreateAdminToken = &cli.Command{
+var AuthCreateAdminToken = &cli.Command{
Name: "create-token",
Usage: "Create token",
Flags: []cli.Flag{
@@ -46,18 +47,18 @@ var authCreateAdminToken = &cli.Command{
perm := cctx.String("perm")
idx := 0
- for i, p := range apistruct.AllPermissions {
+ for i, p := range api.AllPermissions {
if auth.Permission(perm) == p {
idx = i + 1
}
}
if idx == 0 {
- return fmt.Errorf("--perm flag has to be one of: %s", apistruct.AllPermissions)
+ return fmt.Errorf("--perm flag has to be one of: %s", api.AllPermissions)
}
// slice on [:idx] so for example: 'sign' gives you [read, write, sign]
- token, err := napi.AuthNew(ctx, apistruct.AllPermissions[:idx])
+ token, err := napi.AuthNew(ctx, api.AllPermissions[:idx])
if err != nil {
return err
}
@@ -69,7 +70,7 @@ var authCreateAdminToken = &cli.Command{
},
}
-var authApiInfoToken = &cli.Command{
+var AuthApiInfoToken = &cli.Command{
Name: "api-info",
Usage: "Get token with API info required to connect to this node",
Flags: []cli.Flag{
@@ -89,23 +90,23 @@ var authApiInfoToken = &cli.Command{
ctx := ReqContext(cctx)
if !cctx.IsSet("perm") {
- return xerrors.New("--perm flag not set")
+ return xerrors.New("--perm flag not set, use with one of: read, write, sign, admin")
}
perm := cctx.String("perm")
idx := 0
- for i, p := range apistruct.AllPermissions {
+ for i, p := range api.AllPermissions {
if auth.Permission(perm) == p {
idx = i + 1
}
}
if idx == 0 {
- return fmt.Errorf("--perm flag has to be one of: %s", apistruct.AllPermissions)
+ return fmt.Errorf("--perm flag has to be one of: %s", api.AllPermissions)
}
// slice on [:idx] so for example: 'sign' gives you [read, write, sign]
- token, err := napi.AuthNew(ctx, apistruct.AllPermissions[:idx])
+ token, err := napi.AuthNew(ctx, api.AllPermissions[:idx])
if err != nil {
return err
}
@@ -127,7 +128,7 @@ var authApiInfoToken = &cli.Command{
// TODO: Log in audit log when it is implemented
- fmt.Printf("%s=%s:%s\n", envForRepo(t), string(token), ainfo.Addr)
+ fmt.Printf("%s=%s:%s\n", cliutil.EnvForRepo(t), string(token), ainfo.Addr)
return nil
},
}
diff --git a/cli/backup.go b/cli/backup.go
index c748e47c438..856e098dd36 100644
--- a/cli/backup.go
+++ b/cli/backup.go
@@ -46,12 +46,15 @@ func BackupCmd(repoFlag string, rt repo.RepoType, getApi BackupApiFn) *cli.Comma
}
defer lr.Close() // nolint:errcheck
- mds, err := lr.Datastore("/metadata")
+ mds, err := lr.Datastore(context.TODO(), "/metadata")
if err != nil {
return xerrors.Errorf("getting metadata datastore: %w", err)
}
- bds := backupds.Wrap(mds)
+ bds, err := backupds.Wrap(mds, backupds.NoLogdir)
+ if err != nil {
+ return err
+ }
fpath, err := homedir.Expand(cctx.Args().First())
if err != nil {
diff --git a/cli/chain.go b/cli/chain.go
index e2d0ebb4ad6..e30a685dd84 100644
--- a/cli/chain.go
+++ b/cli/chain.go
@@ -3,12 +3,14 @@ package cli
import (
"bytes"
"context"
+ "encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
"os"
"os/exec"
"path"
+ "reflect"
"sort"
"strconv"
"strings"
@@ -29,36 +31,40 @@ import (
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
+ "github.com/filecoin-project/lotus/api"
lapi "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/api/v0api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/stmgr"
types "github.com/filecoin-project/lotus/chain/types"
)
-var chainCmd = &cli.Command{
+var ChainCmd = &cli.Command{
Name: "chain",
Usage: "Interact with filecoin blockchain",
Subcommands: []*cli.Command{
- chainHeadCmd,
- chainGetBlock,
- chainReadObjCmd,
- chainDeleteObjCmd,
- chainStatObjCmd,
- chainGetMsgCmd,
- chainSetHeadCmd,
- chainListCmd,
- chainGetCmd,
- chainBisectCmd,
- chainExportCmd,
- slashConsensusFault,
- chainGasPriceCmd,
- chainInspectUsage,
- chainDecodeCmd,
+ ChainHeadCmd,
+ ChainGetBlock,
+ ChainReadObjCmd,
+ ChainDeleteObjCmd,
+ ChainStatObjCmd,
+ ChainGetMsgCmd,
+ ChainSetHeadCmd,
+ ChainListCmd,
+ ChainGetCmd,
+ ChainBisectCmd,
+ ChainExportCmd,
+ SlashConsensusFault,
+ ChainGasPriceCmd,
+ ChainInspectUsage,
+ ChainDecodeCmd,
+ ChainEncodeCmd,
+ ChainDisputeSetCmd,
},
}
-var chainHeadCmd = &cli.Command{
+var ChainHeadCmd = &cli.Command{
Name: "head",
Usage: "Print chain head",
Action: func(cctx *cli.Context) error {
@@ -81,7 +87,7 @@ var chainHeadCmd = &cli.Command{
},
}
-var chainGetBlock = &cli.Command{
+var ChainGetBlock = &cli.Command{
Name: "getblock",
Usage: "Get a block and print its details",
ArgsUsage: "[blockCid]",
@@ -172,7 +178,7 @@ func apiMsgCids(in []lapi.Message) []cid.Cid {
return out
}
-var chainReadObjCmd = &cli.Command{
+var ChainReadObjCmd = &cli.Command{
Name: "read-obj",
Usage: "Read the raw bytes of an object",
ArgsUsage: "[objectCid]",
@@ -199,7 +205,7 @@ var chainReadObjCmd = &cli.Command{
},
}
-var chainDeleteObjCmd = &cli.Command{
+var ChainDeleteObjCmd = &cli.Command{
Name: "delete-obj",
Usage: "Delete an object from the chain blockstore",
Description: "WARNING: Removing wrong objects from the chain blockstore may lead to sync issues",
@@ -236,7 +242,7 @@ var chainDeleteObjCmd = &cli.Command{
},
}
-var chainStatObjCmd = &cli.Command{
+var ChainStatObjCmd = &cli.Command{
Name: "stat-obj",
Usage: "Collect size and ipld link counts for objs",
ArgsUsage: "[cid]",
@@ -283,7 +289,7 @@ var chainStatObjCmd = &cli.Command{
},
}
-var chainGetMsgCmd = &cli.Command{
+var ChainGetMsgCmd = &cli.Command{
Name: "getmessage",
Usage: "Get and print a message by its cid",
ArgsUsage: "[messageCid]",
@@ -331,7 +337,7 @@ var chainGetMsgCmd = &cli.Command{
},
}
-var chainSetHeadCmd = &cli.Command{
+var ChainSetHeadCmd = &cli.Command{
Name: "sethead",
Usage: "manually set the local nodes head tipset (Caution: normally only used for recovery)",
ArgsUsage: "[tipsetkey]",
@@ -380,7 +386,7 @@ var chainSetHeadCmd = &cli.Command{
},
}
-var chainInspectUsage = &cli.Command{
+var ChainInspectUsage = &cli.Command{
Name: "inspect-usage",
Usage: "Inspect block space usage of a given tipset",
Flags: []cli.Flag{
@@ -465,6 +471,9 @@ var chainInspectUsage = &cli.Command{
code, err := lookupActorCode(m.Message.To)
if err != nil {
+ if strings.Contains(err.Error(), types.ErrActorNotFound.Error()) {
+ continue
+ }
return err
}
@@ -522,12 +531,12 @@ var chainInspectUsage = &cli.Command{
},
}
-var chainListCmd = &cli.Command{
+var ChainListCmd = &cli.Command{
Name: "list",
Aliases: []string{"love"},
Usage: "View a segment of the chain",
Flags: []cli.Flag{
- &cli.Uint64Flag{Name: "height"},
+ &cli.Uint64Flag{Name: "height", DefaultText: "current head"},
&cli.IntFlag{Name: "count", Value: 30},
&cli.StringFlag{
Name: "format",
@@ -635,7 +644,10 @@ var chainListCmd = &cli.Command{
gasUsed += r.GasUsed
}
- fmt.Printf("\ttipset: \t%d msgs, %d / %d (%0.2f%%)\n", len(msgs), gasUsed, limitSum, 100*float64(gasUsed)/float64(limitSum))
+ gasEfficiency := 100 * float64(gasUsed) / float64(limitSum)
+ gasCapacity := 100 * float64(limitSum) / float64(build.BlockGasLimit)
+
+ fmt.Printf("\ttipset: \t%d msgs, %d (%0.2f%%) / %d (%0.2f%%)\n", len(msgs), gasUsed, gasEfficiency, limitSum, gasCapacity)
}
fmt.Println()
}
@@ -648,7 +660,7 @@ var chainListCmd = &cli.Command{
},
}
-var chainGetCmd = &cli.Command{
+var ChainGetCmd = &cli.Command{
Name: "get",
Usage: "Get chain DAG node by path",
ArgsUsage: "[path]",
@@ -712,12 +724,6 @@ var chainGetCmd = &cli.Command{
return err
}
- if ts == nil {
- ts, err = api.ChainHead(ctx)
- if err != nil {
- return err
- }
- }
p = "/ipfs/" + ts.ParentState().String() + p
if cctx.Bool("verbose") {
fmt.Println(p)
@@ -796,7 +802,7 @@ var chainGetCmd = &cli.Command{
type apiIpldStore struct {
ctx context.Context
- api lapi.FullNode
+ api v0api.FullNode
}
func (ht *apiIpldStore) Context() context.Context {
@@ -824,7 +830,7 @@ func (ht *apiIpldStore) Put(ctx context.Context, v interface{}) (cid.Cid, error)
panic("No mutations allowed")
}
-func handleAmt(ctx context.Context, api lapi.FullNode, r cid.Cid) error {
+func handleAmt(ctx context.Context, api v0api.FullNode, r cid.Cid) error {
s := &apiIpldStore{ctx, api}
mp, err := adt.AsArray(s, r)
if err != nil {
@@ -837,7 +843,7 @@ func handleAmt(ctx context.Context, api lapi.FullNode, r cid.Cid) error {
})
}
-func handleHamtEpoch(ctx context.Context, api lapi.FullNode, r cid.Cid) error {
+func handleHamtEpoch(ctx context.Context, api v0api.FullNode, r cid.Cid) error {
s := &apiIpldStore{ctx, api}
mp, err := adt.AsMap(s, r)
if err != nil {
@@ -855,7 +861,7 @@ func handleHamtEpoch(ctx context.Context, api lapi.FullNode, r cid.Cid) error {
})
}
-func handleHamtAddress(ctx context.Context, api lapi.FullNode, r cid.Cid) error {
+func handleHamtAddress(ctx context.Context, api v0api.FullNode, r cid.Cid) error {
s := &apiIpldStore{ctx, api}
mp, err := adt.AsMap(s, r)
if err != nil {
@@ -895,7 +901,7 @@ func printTipSet(format string, ts *types.TipSet) {
fmt.Println(format)
}
-var chainBisectCmd = &cli.Command{
+var ChainBisectCmd = &cli.Command{
Name: "bisect",
Usage: "bisect chain for an event",
ArgsUsage: "[minHeight maxHeight path shellCommand ]",
@@ -1018,13 +1024,15 @@ var chainBisectCmd = &cli.Command{
},
}
-var chainExportCmd = &cli.Command{
+var ChainExportCmd = &cli.Command{
Name: "export",
Usage: "export chain to a car file",
ArgsUsage: "[outputPath]",
Flags: []cli.Flag{
&cli.StringFlag{
- Name: "tipset",
+ Name: "tipset",
+ Usage: "specify tipset to start the export from",
+ Value: "@head",
},
&cli.Int64Flag{
Name: "recent-stateroots",
@@ -1096,14 +1104,14 @@ var chainExportCmd = &cli.Command{
},
}
-var slashConsensusFault = &cli.Command{
+var SlashConsensusFault = &cli.Command{
Name: "slash-consensus",
Usage: "Report consensus fault",
ArgsUsage: "[blockCid1 blockCid2]",
Flags: []cli.Flag{
&cli.StringFlag{
- Name: "miner",
- Usage: "Miner address",
+ Name: "from",
+ Usage: "optionally specify the account to report consensus from",
},
&cli.StringFlag{
Name: "extra",
@@ -1111,11 +1119,13 @@ var slashConsensusFault = &cli.Command{
},
},
Action: func(cctx *cli.Context) error {
- api, closer, err := GetFullNodeAPI(cctx)
+ srv, err := GetFullNodeServices(cctx)
if err != nil {
return err
}
- defer closer()
+ defer srv.Close() //nolint:errcheck
+
+ a := srv.FullNodeAPI()
ctx := ReqContext(cctx)
c1, err := cid.Parse(cctx.Args().Get(0))
@@ -1123,7 +1133,7 @@ var slashConsensusFault = &cli.Command{
return xerrors.Errorf("parsing cid 1: %w", err)
}
- b1, err := api.ChainGetBlock(ctx, c1)
+ b1, err := a.ChainGetBlock(ctx, c1)
if err != nil {
return xerrors.Errorf("getting block 1: %w", err)
}
@@ -1133,14 +1143,30 @@ var slashConsensusFault = &cli.Command{
return xerrors.Errorf("parsing cid 2: %w", err)
}
- b2, err := api.ChainGetBlock(ctx, c2)
+ b2, err := a.ChainGetBlock(ctx, c2)
if err != nil {
return xerrors.Errorf("getting block 2: %w", err)
}
- def, err := api.WalletDefaultAddress(ctx)
- if err != nil {
- return err
+ if b1.Miner != b2.Miner {
+ return xerrors.Errorf("block1.miner:%s block2.miner:%s", b1.Miner, b2.Miner)
+ }
+
+ var fromAddr address.Address
+ if from := cctx.String("from"); from == "" {
+ defaddr, err := a.WalletDefaultAddress(ctx)
+ if err != nil {
+ return err
+ }
+
+ fromAddr = defaddr
+ } else {
+ addr, err := address.NewFromString(from)
+ if err != nil {
+ return err
+ }
+
+ fromAddr = addr
}
bh1, err := cborutil.Dump(b1)
@@ -1164,7 +1190,7 @@ var slashConsensusFault = &cli.Command{
return xerrors.Errorf("parsing cid extra: %w", err)
}
- bExtra, err := api.ChainGetBlock(ctx, cExtra)
+ bExtra, err := a.ChainGetBlock(ctx, cExtra)
if err != nil {
return xerrors.Errorf("getting block extra: %w", err)
}
@@ -1182,24 +1208,17 @@ var slashConsensusFault = &cli.Command{
return err
}
- if cctx.String("miner") == "" {
- return xerrors.Errorf("--miner flag is required")
- }
-
- maddr, err := address.NewFromString(cctx.String("miner"))
- if err != nil {
- return err
- }
-
- msg := &types.Message{
- To: maddr,
- From: def,
- Value: types.NewInt(0),
- Method: builtin.MethodsMiner.ReportConsensusFault,
- Params: enc,
+ proto := &api.MessagePrototype{
+ Message: types.Message{
+ To: b2.Miner,
+ From: fromAddr,
+ Value: types.NewInt(0),
+ Method: builtin.MethodsMiner.ReportConsensusFault,
+ Params: enc,
+ },
}
- smsg, err := api.MpoolPushMessage(ctx, msg, nil)
+ smsg, err := InteractiveSend(ctx, cctx, srv, proto)
if err != nil {
return err
}
@@ -1210,7 +1229,7 @@ var slashConsensusFault = &cli.Command{
},
}
-var chainGasPriceCmd = &cli.Command{
+var ChainGasPriceCmd = &cli.Command{
Name: "gas-price",
Usage: "Estimate gas prices",
Action: func(cctx *cli.Context) error {
@@ -1237,7 +1256,7 @@ var chainGasPriceCmd = &cli.Command{
},
}
-var chainDecodeCmd = &cli.Command{
+var ChainDecodeCmd = &cli.Command{
Name: "decode",
Usage: "decode various types",
Subcommands: []*cli.Command{
@@ -1246,14 +1265,19 @@ var chainDecodeCmd = &cli.Command{
}
var chainDecodeParamsCmd = &cli.Command{
- Name: "params",
- Usage: "Decode message params",
+ Name: "params",
+ Usage: "Decode message params",
+ ArgsUsage: "[toAddr method params]",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "tipset",
},
+ &cli.StringFlag{
+ Name: "encoding",
+ Value: "base64",
+ Usage: "specify input encoding to parse",
+ },
},
- ArgsUsage: "[toAddr method hexParams]",
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
@@ -1276,11 +1300,21 @@ var chainDecodeParamsCmd = &cli.Command{
return xerrors.Errorf("parsing method id: %w", err)
}
- params, err := hex.DecodeString(cctx.Args().Get(2))
- if err != nil {
- return xerrors.Errorf("parsing hex params: %w", err)
+ var params []byte
+ switch cctx.String("encoding") {
+ case "base64":
+ params, err = base64.StdEncoding.DecodeString(cctx.Args().Get(2))
+ if err != nil {
+ return xerrors.Errorf("decoding base64 value: %w", err)
+ }
+ case "hex":
+ params, err = hex.DecodeString(cctx.Args().Get(2))
+ if err != nil {
+ return xerrors.Errorf("decoding hex value: %w", err)
+ }
+ default:
+ return xerrors.Errorf("unrecognized encoding: %s", cctx.String("encoding"))
}
-
ts, err := LoadTipSet(ctx, cctx, api)
if err != nil {
return err
@@ -1301,3 +1335,86 @@ var chainDecodeParamsCmd = &cli.Command{
return nil
},
}
+
+var ChainEncodeCmd = &cli.Command{
+ Name: "encode",
+ Usage: "encode various types",
+ Subcommands: []*cli.Command{
+ chainEncodeParamsCmd,
+ },
+}
+
+var chainEncodeParamsCmd = &cli.Command{
+ Name: "params",
+ Usage: "Encodes the given JSON params",
+ ArgsUsage: "[toAddr method params]",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "tipset",
+ },
+ &cli.StringFlag{
+ Name: "encoding",
+ Value: "base64",
+ Usage: "specify input encoding to parse",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := ReqContext(cctx)
+
+ if cctx.Args().Len() != 3 {
+ return ShowHelp(cctx, fmt.Errorf("incorrect number of arguments"))
+ }
+
+ to, err := address.NewFromString(cctx.Args().First())
+ if err != nil {
+ return xerrors.Errorf("parsing toAddr: %w", err)
+ }
+
+ method, err := strconv.ParseInt(cctx.Args().Get(1), 10, 64)
+ if err != nil {
+ return xerrors.Errorf("parsing method id: %w", err)
+ }
+
+ ts, err := LoadTipSet(ctx, cctx, api)
+ if err != nil {
+ return err
+ }
+
+ act, err := api.StateGetActor(ctx, to, ts.Key())
+ if err != nil {
+ return xerrors.Errorf("getting actor: %w", err)
+ }
+
+ methodMeta, found := stmgr.MethodsMap[act.Code][abi.MethodNum(method)]
+ if !found {
+ return fmt.Errorf("method %d not found on actor %s", method, act.Code)
+ }
+
+ p := reflect.New(methodMeta.Params.Elem()).Interface().(cbg.CBORMarshaler)
+
+ if err := json.Unmarshal([]byte(cctx.Args().Get(2)), p); err != nil {
+ return fmt.Errorf("unmarshaling input into params type: %w", err)
+ }
+
+ buf := new(bytes.Buffer)
+ if err := p.MarshalCBOR(buf); err != nil {
+ return err
+ }
+
+ switch cctx.String("encoding") {
+ case "base64":
+ fmt.Println(base64.StdEncoding.EncodeToString(buf.Bytes()))
+ case "hex":
+ fmt.Println(hex.EncodeToString(buf.Bytes()))
+ default:
+ return xerrors.Errorf("unrecognized encoding: %s", cctx.String("encoding"))
+ }
+
+ return nil
+ },
+}
diff --git a/cli/client.go b/cli/client.go
index 07e3cb2c877..774d9aa5ff9 100644
--- a/cli/client.go
+++ b/cli/client.go
@@ -7,6 +7,7 @@ import (
"errors"
"fmt"
"io"
+ "math"
"math/rand"
"os"
"path/filepath"
@@ -39,6 +40,7 @@ import (
"github.com/filecoin-project/lotus/api"
lapi "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/api/v0api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
@@ -81,15 +83,19 @@ var clientCmd = &cli.Command{
WithCategory("storage", clientListDeals),
WithCategory("storage", clientGetDealCmd),
WithCategory("storage", clientListAsksCmd),
+ WithCategory("storage", clientDealStatsCmd),
+ WithCategory("storage", clientInspectDealCmd),
WithCategory("data", clientImportCmd),
WithCategory("data", clientDropCmd),
WithCategory("data", clientLocalCmd),
WithCategory("data", clientStat),
WithCategory("retrieval", clientFindCmd),
WithCategory("retrieval", clientRetrieveCmd),
+ WithCategory("retrieval", clientCancelRetrievalDealCmd),
+ WithCategory("retrieval", clientListRetrievalsCmd),
WithCategory("util", clientCommPCmd),
WithCategory("util", clientCarGenCmd),
- WithCategory("util", clientInfoCmd),
+ WithCategory("util", clientBalancesCmd),
WithCategory("util", clientListTransfers),
WithCategory("util", clientRestartTransfer),
WithCategory("util", clientCancelTransfer),
@@ -297,8 +303,16 @@ var clientLocalCmd = &cli.Command{
}
var clientDealCmd = &cli.Command{
- Name: "deal",
- Usage: "Initialize storage deal with a miner",
+ Name: "deal",
+ Usage: "Initialize storage deal with a miner",
+ Description: `Make a deal with a miner.
+dataCid comes from running 'lotus client import'.
+miner is the address of the miner you wish to make a deal with.
+price is measured in FIL/Epoch. Miners usually don't accept a bid
+lower than their advertised ask (which is in FIL/GiB/Epoch). You can check a miners listed price
+with 'lotus client query-ask '.
+duration is how long the miner should store the data for, in blocks.
+The minimum value is 518400 (6 months).`,
ArgsUsage: "[dataCid miner price duration]",
Flags: []cli.Flag{
&cli.StringFlag{
@@ -309,6 +323,10 @@ var clientDealCmd = &cli.Command{
Name: "manual-piece-size",
Usage: "if manually specifying piece cid, used to specify size (dataCid must be to a car file)",
},
+ &cli.BoolFlag{
+ Name: "manual-stateless-deal",
+ Usage: "instructs the node to send an offline deal without registering it with the deallist/fsm",
+ },
&cli.StringFlag{
Name: "from",
Usage: "specify address to fund the deal with",
@@ -324,9 +342,9 @@ var clientDealCmd = &cli.Command{
Value: true,
},
&cli.BoolFlag{
- Name: "verified-deal",
- Usage: "indicate that the deal counts towards verified client total",
- Value: false,
+ Name: "verified-deal",
+ Usage: "indicate that the deal counts towards verified client total",
+ DefaultText: "true if client is verified, false otherwise",
},
&cli.StringFlag{
Name: "provider-collateral",
@@ -385,6 +403,9 @@ var clientDealCmd = &cli.Command{
if abi.ChainEpoch(dur) < build.MinDealDuration {
return xerrors.Errorf("minimum deal duration is %d blocks", build.MinDealDuration)
}
+ if abi.ChainEpoch(dur) > build.MaxDealDuration {
+ return xerrors.Errorf("maximum deal duration is %d blocks", build.MaxDealDuration)
+ }
var a address.Address
if from := cctx.String("from"); from != "" {
@@ -445,7 +466,7 @@ var clientDealCmd = &cli.Command{
isVerified = verifiedDealParam
}
- proposal, err := api.ClientStartDeal(ctx, &lapi.StartDealParams{
+ sdParams := &lapi.StartDealParams{
Data: ref,
Wallet: a,
Miner: miner,
@@ -455,7 +476,18 @@ var clientDealCmd = &cli.Command{
FastRetrieval: cctx.Bool("fast-retrieval"),
VerifiedDeal: isVerified,
ProviderCollateral: provCol,
- })
+ }
+
+ var proposal *cid.Cid
+ if cctx.Bool("manual-stateless-deal") {
+ if ref.TransferType != storagemarket.TTManual || price.Int64() != 0 {
+ return xerrors.New("when manual-stateless-deal is enabled, you must also provide a 'price' of 0 and specify 'manual-piece-cid' and 'manual-piece-size'")
+ }
+ proposal, err = api.ClientStatelessDeal(ctx, sdParams)
+ } else {
+ proposal, err = api.ClientStartDeal(ctx, sdParams)
+ }
+
if err != nil {
return err
}
@@ -496,9 +528,10 @@ func interactiveDeal(cctx *cli.Context) error {
var ds lapi.DataCIDSize
// find
- var candidateAsks []*storagemarket.StorageAsk
+ var candidateAsks []QueriedAsk
var budget types.FIL
var dealCount int64
+ var medianPing, maxAcceptablePing time.Duration
var a address.Address
if from := cctx.String("from"); from != "" {
@@ -653,22 +686,72 @@ uiLoop:
state = "find"
}
case "find":
- asks, err := getAsks(ctx, api)
+ asks, err := GetAsks(ctx, api)
if err != nil {
return err
}
+ if len(asks) == 0 {
+ printErr(xerrors.Errorf("no asks found"))
+ continue uiLoop
+ }
+
+ medianPing = asks[len(asks)/2].Ping
+ var avgPing time.Duration
+ for _, ask := range asks {
+ avgPing += ask.Ping
+ }
+ avgPing /= time.Duration(len(asks))
+
for _, ask := range asks {
- if ask.MinPieceSize > ds.PieceSize {
+ if ask.Ask.MinPieceSize > ds.PieceSize {
continue
}
- if ask.MaxPieceSize < ds.PieceSize {
+ if ask.Ask.MaxPieceSize < ds.PieceSize {
continue
}
candidateAsks = append(candidateAsks, ask)
}
afmt.Printf("Found %d candidate asks\n", len(candidateAsks))
+ afmt.Printf("Average network latency: %s; Median latency: %s\n", avgPing.Truncate(time.Millisecond), medianPing.Truncate(time.Millisecond))
+ state = "max-ping"
+ case "max-ping":
+ maxAcceptablePing = medianPing
+
+ afmt.Printf("Maximum network latency (default: %s) (ms): ", maxAcceptablePing.Truncate(time.Millisecond))
+ _latStr, _, err := rl.ReadLine()
+ latStr := string(_latStr)
+ if err != nil {
+ printErr(xerrors.Errorf("reading maximum latency: %w", err))
+ continue
+ }
+
+ if latStr != "" {
+ maxMs, err := strconv.ParseInt(latStr, 10, 64)
+ if err != nil {
+ printErr(xerrors.Errorf("parsing FIL: %w", err))
+ continue uiLoop
+ }
+
+ maxAcceptablePing = time.Millisecond * time.Duration(maxMs)
+ }
+
+ var goodAsks []QueriedAsk
+ for _, candidateAsk := range candidateAsks {
+ if candidateAsk.Ping < maxAcceptablePing {
+ goodAsks = append(goodAsks, candidateAsk)
+ }
+ }
+
+ if len(goodAsks) == 0 {
+ afmt.Printf("no asks left after filtering for network latency\n")
+ continue uiLoop
+ }
+
+ afmt.Printf("%d asks left after filtering for network latency\n", len(goodAsks))
+ candidateAsks = goodAsks
+
state = "find-budget"
case "find-budget":
afmt.Printf("Proposing from %s, Current Balance: %s\n", a, types.FIL(fromBal))
@@ -687,11 +770,11 @@ uiLoop:
continue uiLoop
}
- var goodAsks []*storagemarket.StorageAsk
+ var goodAsks []QueriedAsk
for _, ask := range candidateAsks {
- p := ask.Price
+ p := ask.Ask.Price
if verified {
- p = ask.VerifiedPrice
+ p = ask.Ask.VerifiedPrice
}
epochPrice := types.BigDiv(types.BigMul(p, types.NewInt(uint64(ds.PieceSize))), gib)
@@ -731,9 +814,9 @@ uiLoop:
pickedAsks = []*storagemarket.StorageAsk{}
for _, ask := range candidateAsks {
- p := ask.Price
+ p := ask.Ask.Price
if verified {
- p = ask.VerifiedPrice
+ p = ask.Ask.VerifiedPrice
}
epochPrice := types.BigDiv(types.BigMul(p, types.NewInt(uint64(ds.PieceSize))), gib)
@@ -743,7 +826,7 @@ uiLoop:
continue
}
- pickedAsks = append(pickedAsks, ask)
+ pickedAsks = append(pickedAsks, ask.Ask)
remainingBudget = big.Sub(remainingBudget, totalPrice)
if len(pickedAsks) == int(dealCount) {
@@ -943,7 +1026,7 @@ var clientFindCmd = &cli.Command{
},
}
-const DefaultMaxRetrievePrice = 1
+const DefaultMaxRetrievePrice = "0.01"
var clientRetrieveCmd = &cli.Command{
Name: "retrieve",
@@ -964,12 +1047,15 @@ var clientRetrieveCmd = &cli.Command{
},
&cli.StringFlag{
Name: "maxPrice",
- Usage: fmt.Sprintf("maximum price the client is willing to consider (default: %d FIL)", DefaultMaxRetrievePrice),
+ Usage: fmt.Sprintf("maximum price the client is willing to consider (default: %s FIL)", DefaultMaxRetrievePrice),
},
&cli.StringFlag{
Name: "pieceCid",
Usage: "require data to be retrieved from a specific Piece CID",
},
+ &cli.BoolFlag{
+ Name: "allow-local",
+ },
},
Action: func(cctx *cli.Context) error {
if cctx.NArg() != 2 {
@@ -999,18 +1085,6 @@ var clientRetrieveCmd = &cli.Command{
return err
}
- // Check if we already have this data locally
-
- /*has, err := api.ClientHasLocal(ctx, file)
- if err != nil {
- return err
- }
-
- if has {
- fmt.Println("Success: Already in local storage")
- return nil
- }*/ // TODO: fix
-
var pieceCid *cid.Cid
if cctx.String("pieceCid") != "" {
parsed, err := cid.Parse(cctx.String("pieceCid"))
@@ -1020,73 +1094,99 @@ var clientRetrieveCmd = &cli.Command{
pieceCid = &parsed
}
- var offer api.QueryOffer
- minerStrAddr := cctx.String("miner")
- if minerStrAddr == "" { // Local discovery
- offers, err := fapi.ClientFindData(ctx, file, pieceCid)
+ var order *lapi.RetrievalOrder
+ if cctx.Bool("allow-local") {
+ imports, err := fapi.ClientListImports(ctx)
+ if err != nil {
+ return err
+ }
+
+ for _, i := range imports {
+ if i.Root != nil && i.Root.Equals(file) {
+ order = &lapi.RetrievalOrder{
+ Root: file,
+ LocalStore: &i.Key,
- var cleaned []api.QueryOffer
- // filter out offers that errored
- for _, o := range offers {
- if o.Err == "" {
- cleaned = append(cleaned, o)
+ Total: big.Zero(),
+ UnsealPrice: big.Zero(),
+ }
+ break
}
}
+ }
- offers = cleaned
+ if order == nil {
+ var offer api.QueryOffer
+ minerStrAddr := cctx.String("miner")
+ if minerStrAddr == "" { // Local discovery
+ offers, err := fapi.ClientFindData(ctx, file, pieceCid)
- // sort by price low to high
- sort.Slice(offers, func(i, j int) bool {
- return offers[i].MinPrice.LessThan(offers[j].MinPrice)
- })
- if err != nil {
- return err
- }
+ var cleaned []api.QueryOffer
+ // filter out offers that errored
+ for _, o := range offers {
+ if o.Err == "" {
+ cleaned = append(cleaned, o)
+ }
+ }
- // TODO: parse offer strings from `client find`, make this smarter
- if len(offers) < 1 {
- fmt.Println("Failed to find file")
- return nil
- }
- offer = offers[0]
- } else { // Directed retrieval
- minerAddr, err := address.NewFromString(minerStrAddr)
- if err != nil {
- return err
+ offers = cleaned
+
+ // sort by price low to high
+ sort.Slice(offers, func(i, j int) bool {
+ return offers[i].MinPrice.LessThan(offers[j].MinPrice)
+ })
+ if err != nil {
+ return err
+ }
+
+ // TODO: parse offer strings from `client find`, make this smarter
+ if len(offers) < 1 {
+ fmt.Println("Failed to find file")
+ return nil
+ }
+ offer = offers[0]
+ } else { // Directed retrieval
+ minerAddr, err := address.NewFromString(minerStrAddr)
+ if err != nil {
+ return err
+ }
+ offer, err = fapi.ClientMinerQueryOffer(ctx, minerAddr, file, pieceCid)
+ if err != nil {
+ return err
+ }
}
- offer, err = fapi.ClientMinerQueryOffer(ctx, minerAddr, file, pieceCid)
- if err != nil {
- return err
+ if offer.Err != "" {
+ return fmt.Errorf("The received offer errored: %s", offer.Err)
}
- }
- if offer.Err != "" {
- return fmt.Errorf("The received offer errored: %s", offer.Err)
- }
- maxPrice := types.FromFil(DefaultMaxRetrievePrice)
+ maxPrice := types.MustParseFIL(DefaultMaxRetrievePrice)
- if cctx.String("maxPrice") != "" {
- maxPriceFil, err := types.ParseFIL(cctx.String("maxPrice"))
- if err != nil {
- return xerrors.Errorf("parsing maxPrice: %w", err)
+ if cctx.String("maxPrice") != "" {
+ maxPrice, err = types.ParseFIL(cctx.String("maxPrice"))
+ if err != nil {
+ return xerrors.Errorf("parsing maxPrice: %w", err)
+ }
}
- maxPrice = types.BigInt(maxPriceFil)
- }
+ if offer.MinPrice.GreaterThan(big.Int(maxPrice)) {
+ return xerrors.Errorf("failed to find offer satisfying maxPrice: %s", maxPrice)
+ }
- if offer.MinPrice.GreaterThan(maxPrice) {
- return xerrors.Errorf("failed to find offer satisfying maxPrice: %s", maxPrice)
+ o := offer.Order(payer)
+ order = &o
}
-
ref := &lapi.FileRef{
Path: cctx.Args().Get(1),
IsCAR: cctx.Bool("car"),
}
- updates, err := fapi.ClientRetrieveWithEvents(ctx, offer.Order(payer), ref)
+
+ updates, err := fapi.ClientRetrieveWithEvents(ctx, *order, ref)
if err != nil {
return xerrors.Errorf("error setting up retrieval: %w", err)
}
+ var prevStatus retrievalmarket.DealStatus
+
for {
select {
case evt, ok := <-updates:
@@ -1097,14 +1197,23 @@ var clientRetrieveCmd = &cli.Command{
retrievalmarket.ClientEvents[evt.Event],
retrievalmarket.DealStatuses[evt.Status],
)
- } else {
- afmt.Println("Success")
- return nil
+ prevStatus = evt.Status
}
if evt.Err != "" {
return xerrors.Errorf("retrieval failed: %s", evt.Err)
}
+
+ if !ok {
+ if prevStatus == retrievalmarket.DealStatusCompleted {
+ afmt.Println("Success")
+ } else {
+ afmt.Printf("saw final deal state %s instead of expected success state DealStatusCompleted\n",
+ retrievalmarket.DealStatuses[prevStatus])
+ }
+ return nil
+ }
+
case <-ctx.Done():
return xerrors.Errorf("retrieval timed out")
}
@@ -1112,9 +1221,309 @@ var clientRetrieveCmd = &cli.Command{
},
}
+var clientListRetrievalsCmd = &cli.Command{
+ Name: "list-retrievals",
+ Usage: "List retrieval market deals",
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "verbose",
+ Aliases: []string{"v"},
+ Usage: "print verbose deal details",
+ },
+ &cli.BoolFlag{
+ Name: "color",
+ Usage: "use color in display output",
+ DefaultText: "depends on output being a TTY",
+ },
+ &cli.BoolFlag{
+ Name: "show-failed",
+ Usage: "show failed/failing deals",
+ Value: true,
+ },
+ &cli.BoolFlag{
+ Name: "completed",
+ Usage: "show completed retrievals",
+ },
+ &cli.BoolFlag{
+ Name: "watch",
+ Usage: "watch deal updates in real-time, rather than a one time list",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ if cctx.IsSet("color") {
+ color.NoColor = !cctx.Bool("color")
+ }
+
+ api, closer, err := GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := ReqContext(cctx)
+
+ verbose := cctx.Bool("verbose")
+ watch := cctx.Bool("watch")
+ showFailed := cctx.Bool("show-failed")
+ completed := cctx.Bool("completed")
+
+ localDeals, err := api.ClientListRetrievals(ctx)
+ if err != nil {
+ return err
+ }
+
+ if watch {
+ updates, err := api.ClientGetRetrievalUpdates(ctx)
+ if err != nil {
+ return err
+ }
+
+ for {
+ tm.Clear()
+ tm.MoveCursor(1, 1)
+
+ err = outputRetrievalDeals(ctx, tm.Screen, localDeals, verbose, showFailed, completed)
+ if err != nil {
+ return err
+ }
+
+ tm.Flush()
+
+ select {
+ case <-ctx.Done():
+ return nil
+ case updated := <-updates:
+ var found bool
+ for i, existing := range localDeals {
+ if existing.ID == updated.ID {
+ localDeals[i] = updated
+ found = true
+ break
+ }
+ }
+ if !found {
+ localDeals = append(localDeals, updated)
+ }
+ }
+ }
+ }
+
+ return outputRetrievalDeals(ctx, cctx.App.Writer, localDeals, verbose, showFailed, completed)
+ },
+}
+
+func isTerminalError(status retrievalmarket.DealStatus) bool {
+ // should patch this in go-fil-markets but to solve the problem immediate and not have buggy output
+ return retrievalmarket.IsTerminalError(status) || status == retrievalmarket.DealStatusErrored || status == retrievalmarket.DealStatusCancelled
+}
+func outputRetrievalDeals(ctx context.Context, out io.Writer, localDeals []lapi.RetrievalInfo, verbose bool, showFailed bool, completed bool) error {
+ var deals []api.RetrievalInfo
+ for _, deal := range localDeals {
+ if !showFailed && isTerminalError(deal.Status) {
+ continue
+ }
+ if !completed && retrievalmarket.IsTerminalSuccess(deal.Status) {
+ continue
+ }
+ deals = append(deals, deal)
+ }
+
+ tableColumns := []tablewriter.Column{
+ tablewriter.Col("PayloadCID"),
+ tablewriter.Col("DealId"),
+ tablewriter.Col("Provider"),
+ tablewriter.Col("Status"),
+ tablewriter.Col("PricePerByte"),
+ tablewriter.Col("Received"),
+ tablewriter.Col("TotalPaid"),
+ }
+
+ if verbose {
+ tableColumns = append(tableColumns,
+ tablewriter.Col("PieceCID"),
+ tablewriter.Col("UnsealPrice"),
+ tablewriter.Col("BytesPaidFor"),
+ tablewriter.Col("TransferChannelID"),
+ tablewriter.Col("TransferStatus"),
+ )
+ }
+ tableColumns = append(tableColumns, tablewriter.NewLineCol("Message"))
+
+ w := tablewriter.New(tableColumns...)
+
+ for _, d := range deals {
+ w.Write(toRetrievalOutput(d, verbose))
+ }
+
+ return w.Flush(out)
+}
+
+func toRetrievalOutput(d api.RetrievalInfo, verbose bool) map[string]interface{} {
+
+ payloadCID := d.PayloadCID.String()
+ provider := d.Provider.String()
+ if !verbose {
+ payloadCID = ellipsis(payloadCID, 8)
+ provider = ellipsis(provider, 8)
+ }
+
+ retrievalOutput := map[string]interface{}{
+ "PayloadCID": payloadCID,
+ "DealId": d.ID,
+ "Provider": provider,
+ "Status": retrievalStatusString(d.Status),
+ "PricePerByte": types.FIL(d.PricePerByte),
+ "Received": units.BytesSize(float64(d.BytesReceived)),
+ "TotalPaid": types.FIL(d.TotalPaid),
+ "Message": d.Message,
+ }
+
+ if verbose {
+ transferChannelID := ""
+ if d.TransferChannelID != nil {
+ transferChannelID = d.TransferChannelID.String()
+ }
+ transferStatus := ""
+ if d.DataTransfer != nil {
+ transferStatus = datatransfer.Statuses[d.DataTransfer.Status]
+ }
+ pieceCID := ""
+ if d.PieceCID != nil {
+ pieceCID = d.PieceCID.String()
+ }
+
+ retrievalOutput["PieceCID"] = pieceCID
+ retrievalOutput["UnsealPrice"] = types.FIL(d.UnsealPrice)
+ retrievalOutput["BytesPaidFor"] = units.BytesSize(float64(d.BytesPaidFor))
+ retrievalOutput["TransferChannelID"] = transferChannelID
+ retrievalOutput["TransferStatus"] = transferStatus
+ }
+ return retrievalOutput
+}
+
+func retrievalStatusString(status retrievalmarket.DealStatus) string {
+ s := retrievalmarket.DealStatuses[status]
+
+ switch {
+ case isTerminalError(status):
+ return color.RedString(s)
+ case retrievalmarket.IsTerminalSuccess(status):
+ return color.GreenString(s)
+ default:
+ return s
+ }
+}
+
+var clientInspectDealCmd = &cli.Command{
+ Name: "inspect-deal",
+ Usage: "Inspect detailed information about deal's lifecycle and the various stages it goes through",
+ Flags: []cli.Flag{
+ &cli.IntFlag{
+ Name: "deal-id",
+ },
+ &cli.StringFlag{
+ Name: "proposal-cid",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ ctx := ReqContext(cctx)
+ return inspectDealCmd(ctx, api, cctx.String("proposal-cid"), cctx.Int("deal-id"))
+ },
+}
+
+var clientDealStatsCmd = &cli.Command{
+ Name: "deal-stats",
+ Usage: "Print statistics about local storage deals",
+ Flags: []cli.Flag{
+ &cli.DurationFlag{
+ Name: "newer-than",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := ReqContext(cctx)
+
+ localDeals, err := api.ClientListDeals(ctx)
+ if err != nil {
+ return err
+ }
+
+ var totalSize uint64
+ byState := map[storagemarket.StorageDealStatus][]uint64{}
+ for _, deal := range localDeals {
+ if cctx.IsSet("newer-than") {
+ if time.Now().Sub(deal.CreationTime) > cctx.Duration("newer-than") {
+ continue
+ }
+ }
+
+ totalSize += deal.Size
+ byState[deal.State] = append(byState[deal.State], deal.Size)
+ }
+
+ fmt.Printf("Total: %d deals, %s\n", len(localDeals), types.SizeStr(types.NewInt(totalSize)))
+
+ type stateStat struct {
+ state storagemarket.StorageDealStatus
+ count int
+ bytes uint64
+ }
+
+ stateStats := make([]stateStat, 0, len(byState))
+ for state, deals := range byState {
+ if state == storagemarket.StorageDealActive {
+ state = math.MaxUint64 // for sort
+ }
+
+ st := stateStat{
+ state: state,
+ count: len(deals),
+ }
+ for _, b := range deals {
+ st.bytes += b
+ }
+
+ stateStats = append(stateStats, st)
+ }
+
+ sort.Slice(stateStats, func(i, j int) bool {
+ return int64(stateStats[i].state) < int64(stateStats[j].state)
+ })
+
+ for _, st := range stateStats {
+ if st.state == math.MaxUint64 {
+ st.state = storagemarket.StorageDealActive
+ }
+ fmt.Printf("%s: %d deals, %s\n", storagemarket.DealStates[st.state], st.count, types.SizeStr(types.NewInt(st.bytes)))
+ }
+
+ return nil
+ },
+}
+
var clientListAsksCmd = &cli.Command{
Name: "list-asks",
Usage: "List asks for top miners",
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "by-ping",
+ Usage: "sort by ping",
+ },
+ &cli.StringFlag{
+ Name: "output-format",
+ Value: "text",
+ Usage: "Either 'text' or 'csv'",
+ },
+ },
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
@@ -1123,17 +1532,31 @@ var clientListAsksCmd = &cli.Command{
defer closer()
ctx := ReqContext(cctx)
- asks, err := getAsks(ctx, api)
+ asks, err := GetAsks(ctx, api)
if err != nil {
return err
}
- for _, ask := range asks {
- fmt.Printf("%s: min:%s max:%s price:%s/GiB/Epoch verifiedPrice:%s/GiB/Epoch\n", ask.Miner,
+ if cctx.Bool("by-ping") {
+ sort.Slice(asks, func(i, j int) bool {
+ return asks[i].Ping < asks[j].Ping
+ })
+ }
+ pfmt := "%s: min:%s max:%s price:%s/GiB/Epoch verifiedPrice:%s/GiB/Epoch ping:%s\n"
+ if cctx.String("output-format") == "csv" {
+ fmt.Printf("Miner,Min,Max,Price,VerifiedPrice,Ping\n")
+ pfmt = "%s,%s,%s,%s,%s,%s\n"
+ }
+
+ for _, a := range asks {
+ ask := a.Ask
+
+ fmt.Printf(pfmt, ask.Miner,
types.SizeStr(types.NewInt(uint64(ask.MinPieceSize))),
types.SizeStr(types.NewInt(uint64(ask.MaxPieceSize))),
types.FIL(ask.Price),
types.FIL(ask.VerifiedPrice),
+ a.Ping,
)
}
@@ -1141,8 +1564,19 @@ var clientListAsksCmd = &cli.Command{
},
}
-func getAsks(ctx context.Context, api lapi.FullNode) ([]*storagemarket.StorageAsk, error) {
- color.Blue(".. getting miner list")
+type QueriedAsk struct {
+ Ask *storagemarket.StorageAsk
+ Ping time.Duration
+}
+
+func GetAsks(ctx context.Context, api v0api.FullNode) ([]QueriedAsk, error) {
+ isTTY := true
+ if fileInfo, _ := os.Stdout.Stat(); (fileInfo.Mode() & os.ModeCharDevice) == 0 {
+ isTTY = false
+ }
+ if isTTY {
+ color.Blue(".. getting miner list")
+ }
miners, err := api.StateListMiners(ctx, types.EmptyTSK)
if err != nil {
return nil, xerrors.Errorf("getting miner list: %w", err)
@@ -1187,16 +1621,20 @@ loop:
for {
select {
case <-time.After(150 * time.Millisecond):
- fmt.Printf("\r* Found %d miners with power", atomic.LoadInt64(&found))
+ if isTTY {
+ fmt.Printf("\r* Found %d miners with power", atomic.LoadInt64(&found))
+ }
case <-done:
break loop
}
}
- fmt.Printf("\r* Found %d miners with power\n", atomic.LoadInt64(&found))
+ if isTTY {
+ fmt.Printf("\r* Found %d miners with power\n", atomic.LoadInt64(&found))
- color.Blue(".. querying asks")
+ color.Blue(".. querying asks")
+ }
- var asks []*storagemarket.StorageAsk
+ var asks []QueriedAsk
var queried, got int64
done = make(chan struct{})
@@ -1232,9 +1670,19 @@ loop:
return
}
+ rt := time.Now()
+ _, err = api.ClientQueryAsk(ctx, *mi.PeerId, miner)
+ if err != nil {
+ return
+ }
+ pingDuration := time.Now().Sub(rt)
+
atomic.AddInt64(&got, 1)
lk.Lock()
- asks = append(asks, ask)
+ asks = append(asks, QueriedAsk{
+ Ask: ask,
+ Ping: pingDuration,
+ })
lk.Unlock()
}(miner)
}
@@ -1244,15 +1692,19 @@ loop2:
for {
select {
case <-time.After(150 * time.Millisecond):
- fmt.Printf("\r* Queried %d asks, got %d responses", atomic.LoadInt64(&queried), atomic.LoadInt64(&got))
+ if isTTY {
+ fmt.Printf("\r* Queried %d asks, got %d responses", atomic.LoadInt64(&queried), atomic.LoadInt64(&got))
+ }
case <-done:
break loop2
}
}
- fmt.Printf("\r* Queried %d asks, got %d responses\n", atomic.LoadInt64(&queried), atomic.LoadInt64(&got))
+ if isTTY {
+ fmt.Printf("\r* Queried %d asks, got %d responses\n", atomic.LoadInt64(&queried), atomic.LoadInt64(&got))
+ }
sort.Slice(asks, func(i, j int) bool {
- return asks[i].Price.LessThan(asks[j].Price)
+ return asks[i].Ask.Price.LessThan(asks[j].Ask.Price)
})
return asks, nil
@@ -1308,7 +1760,7 @@ var clientQueryAskCmd = &cli.Command{
return xerrors.Errorf("failed to get peerID for miner: %w", err)
}
- if *mi.PeerId == peer.ID("SETME") {
+ if mi.PeerId == nil || *mi.PeerId == peer.ID("SETME") {
return fmt.Errorf("the miner hasn't initialized yet")
}
@@ -1324,6 +1776,7 @@ var clientQueryAskCmd = &cli.Command{
afmt.Printf("Price per GiB: %s\n", types.FIL(ask.Price))
afmt.Printf("Verified Price per GiB: %s\n", types.FIL(ask.VerifiedPrice))
afmt.Printf("Max Piece size: %s\n", types.SizeStr(types.NewInt(uint64(ask.MaxPieceSize))))
+ afmt.Printf("Min Piece size: %s\n", types.SizeStr(types.NewInt(uint64(ask.MinPieceSize))))
size := cctx.Int64("size")
if size == 0 {
@@ -1352,9 +1805,9 @@ var clientListDeals = &cli.Command{
Usage: "print verbose deal details",
},
&cli.BoolFlag{
- Name: "color",
- Usage: "use color in display output",
- Value: true,
+ Name: "color",
+ Usage: "use color in display output",
+ DefaultText: "depends on output being a TTY",
},
&cli.BoolFlag{
Name: "show-failed",
@@ -1366,6 +1819,10 @@ var clientListDeals = &cli.Command{
},
},
Action: func(cctx *cli.Context) error {
+ if cctx.IsSet("color") {
+ color.NoColor = !cctx.Bool("color")
+ }
+
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
return err
@@ -1374,7 +1831,6 @@ var clientListDeals = &cli.Command{
ctx := ReqContext(cctx)
verbose := cctx.Bool("verbose")
- color := cctx.Bool("color")
watch := cctx.Bool("watch")
showFailed := cctx.Bool("show-failed")
@@ -1393,7 +1849,7 @@ var clientListDeals = &cli.Command{
tm.Clear()
tm.MoveCursor(1, 1)
- err = outputStorageDeals(ctx, tm.Screen, api, localDeals, verbose, color, showFailed)
+ err = outputStorageDeals(ctx, tm.Screen, api, localDeals, verbose, showFailed)
if err != nil {
return err
}
@@ -1419,11 +1875,11 @@ var clientListDeals = &cli.Command{
}
}
- return outputStorageDeals(ctx, cctx.App.Writer, api, localDeals, cctx.Bool("verbose"), cctx.Bool("color"), showFailed)
+ return outputStorageDeals(ctx, cctx.App.Writer, api, localDeals, verbose, showFailed)
},
}
-func dealFromDealInfo(ctx context.Context, full api.FullNode, head *types.TipSet, v api.DealInfo) deal {
+func dealFromDealInfo(ctx context.Context, full v0api.FullNode, head *types.TipSet, v api.DealInfo) deal {
if v.DealID == 0 {
return deal{
LocalDeal: v,
@@ -1442,7 +1898,7 @@ func dealFromDealInfo(ctx context.Context, full api.FullNode, head *types.TipSet
}
}
-func outputStorageDeals(ctx context.Context, out io.Writer, full lapi.FullNode, localDeals []lapi.DealInfo, verbose bool, color bool, showFailed bool) error {
+func outputStorageDeals(ctx context.Context, out io.Writer, full v0api.FullNode, localDeals []lapi.DealInfo, verbose bool, showFailed bool) error {
sort.Slice(localDeals, func(i, j int) bool {
return localDeals[i].CreationTime.Before(localDeals[j].CreationTime)
})
@@ -1461,7 +1917,7 @@ func outputStorageDeals(ctx context.Context, out io.Writer, full lapi.FullNode,
if verbose {
w := tabwriter.NewWriter(out, 2, 4, 2, ' ', 0)
- fmt.Fprintf(w, "Created\tDealCid\tDealId\tProvider\tState\tOn Chain?\tSlashed?\tPieceCID\tSize\tPrice\tDuration\tVerified\tMessage\n")
+ fmt.Fprintf(w, "Created\tDealCid\tDealId\tProvider\tState\tOn Chain?\tSlashed?\tPieceCID\tSize\tPrice\tDuration\tTransferChannelID\tTransferStatus\tVerified\tMessage\n")
for _, d := range deals {
onChain := "N"
if d.OnChainDealState.SectorStartEpoch != -1 {
@@ -1474,7 +1930,37 @@ func outputStorageDeals(ctx context.Context, out io.Writer, full lapi.FullNode,
}
price := types.FIL(types.BigMul(d.LocalDeal.PricePerEpoch, types.NewInt(d.LocalDeal.Duration)))
- fmt.Fprintf(w, "%s\t%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%d\t%v\t%s\n", d.LocalDeal.CreationTime.Format(time.Stamp), d.LocalDeal.ProposalCid, d.LocalDeal.DealID, d.LocalDeal.Provider, dealStateString(color, d.LocalDeal.State), onChain, slashed, d.LocalDeal.PieceCID, types.SizeStr(types.NewInt(d.LocalDeal.Size)), price, d.LocalDeal.Duration, d.LocalDeal.Verified, d.LocalDeal.Message)
+ transferChannelID := ""
+ if d.LocalDeal.TransferChannelID != nil {
+ transferChannelID = d.LocalDeal.TransferChannelID.String()
+ }
+ transferStatus := ""
+ if d.LocalDeal.DataTransfer != nil {
+ transferStatus = datatransfer.Statuses[d.LocalDeal.DataTransfer.Status]
+ // TODO: Include the transferred percentage once this bug is fixed:
+ // https://github.com/ipfs/go-graphsync/issues/126
+ //fmt.Printf("transferred: %d / size: %d\n", d.LocalDeal.DataTransfer.Transferred, d.LocalDeal.Size)
+ //if d.LocalDeal.Size > 0 {
+ // pct := (100 * d.LocalDeal.DataTransfer.Transferred) / d.LocalDeal.Size
+ // transferPct = fmt.Sprintf("%d%%", pct)
+ //}
+ }
+ fmt.Fprintf(w, "%s\t%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%s\t%v\t%s\n",
+ d.LocalDeal.CreationTime.Format(time.Stamp),
+ d.LocalDeal.ProposalCid,
+ d.LocalDeal.DealID,
+ d.LocalDeal.Provider,
+ dealStateString(d.LocalDeal.State),
+ onChain,
+ slashed,
+ d.LocalDeal.PieceCID,
+ types.SizeStr(types.NewInt(d.LocalDeal.Size)),
+ price,
+ d.LocalDeal.Duration,
+ transferChannelID,
+ transferStatus,
+ d.LocalDeal.Verified,
+ d.LocalDeal.Message)
}
return w.Flush()
}
@@ -1513,7 +1999,7 @@ func outputStorageDeals(ctx context.Context, out io.Writer, full lapi.FullNode,
"DealCid": propcid,
"DealId": d.LocalDeal.DealID,
"Provider": d.LocalDeal.Provider,
- "State": dealStateString(color, d.LocalDeal.State),
+ "State": dealStateString(d.LocalDeal.State),
"On Chain?": onChain,
"Slashed?": slashed,
"PieceCID": piece,
@@ -1528,12 +2014,8 @@ func outputStorageDeals(ctx context.Context, out io.Writer, full lapi.FullNode,
return w.Flush(out)
}
-func dealStateString(c bool, state storagemarket.StorageDealStatus) string {
+func dealStateString(state storagemarket.StorageDealStatus) string {
s := storagemarket.DealStates[state]
- if !c {
- return s
- }
-
switch state {
case storagemarket.StorageDealError, storagemarket.StorageDealExpired:
return color.RedString(s)
@@ -1596,9 +2078,9 @@ var clientGetDealCmd = &cli.Command{
},
}
-var clientInfoCmd = &cli.Command{
- Name: "info",
- Usage: "Print storage market client information",
+var clientBalancesCmd = &cli.Command{
+ Name: "balances",
+ Usage: "Print storage market client balances",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "client",
@@ -1615,7 +2097,7 @@ var clientInfoCmd = &cli.Command{
var addr address.Address
if clientFlag := cctx.String("client"); clientFlag != "" {
- ca, err := address.NewFromString("client")
+ ca, err := address.NewFromString(clientFlag)
if err != nil {
return err
}
@@ -1634,10 +2116,22 @@ var clientInfoCmd = &cli.Command{
return err
}
- fmt.Printf("Client Market Info:\n")
+ reserved, err := api.MarketGetReserved(ctx, addr)
+ if err != nil {
+ return err
+ }
+
+ avail := big.Sub(big.Sub(balance.Escrow, balance.Locked), reserved)
+ if avail.LessThan(big.Zero()) {
+ avail = big.Zero()
+ }
- fmt.Printf("Locked Funds:\t%s\n", types.FIL(balance.Locked))
- fmt.Printf("Escrowed Funds:\t%s\n", types.FIL(balance.Escrow))
+ fmt.Printf("Client Market Balance for address %s:\n", addr)
+
+ fmt.Printf(" Escrowed Funds: %s\n", types.FIL(balance.Escrow))
+ fmt.Printf(" Locked Funds: %s\n", types.FIL(balance.Locked))
+ fmt.Printf(" Reserved Funds: %s\n", types.FIL(reserved))
+ fmt.Printf(" Available to Withdraw: %s\n", types.FIL(avail))
return nil
},
@@ -1749,6 +2243,11 @@ var clientCancelTransfer = &cli.Command{
Usage: "specify only transfers where peer is/is not initiator",
Value: true,
},
+ &cli.DurationFlag{
+ Name: "cancel-timeout",
+ Usage: "time to wait for cancel to be sent to storage provider",
+ Value: 5 * time.Second,
+ },
},
Action: func(cctx *cli.Context) error {
if !cctx.Args().Present() {
@@ -1792,7 +2291,36 @@ var clientCancelTransfer = &cli.Command{
}
}
- return api.ClientCancelDataTransfer(ctx, transferID, other, initiator)
+ timeoutCtx, cancel := context.WithTimeout(ctx, cctx.Duration("cancel-timeout"))
+ defer cancel()
+ return api.ClientCancelDataTransfer(timeoutCtx, transferID, other, initiator)
+ },
+}
+
+var clientCancelRetrievalDealCmd = &cli.Command{
+ Name: "cancel-retrieval",
+ Usage: "Cancel a retrieval deal by deal ID; this also cancels the associated transfer",
+ Flags: []cli.Flag{
+ &cli.Int64Flag{
+ Name: "deal-id",
+ Usage: "specify retrieval deal by deal ID",
+ Required: true,
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := ReqContext(cctx)
+
+ id := cctx.Int64("deal-id")
+ if id < 0 {
+ return errors.New("deal id cannot be negative")
+ }
+
+ return api.ClientCancelRetrievalDeal(ctx, retrievalmarket.DealID(id))
},
}
@@ -1801,9 +2329,14 @@ var clientListTransfers = &cli.Command{
Usage: "List ongoing data transfers for deals",
Flags: []cli.Flag{
&cli.BoolFlag{
- Name: "color",
- Usage: "use color in display output",
- Value: true,
+ Name: "verbose",
+ Aliases: []string{"v"},
+ Usage: "print verbose transfer details",
+ },
+ &cli.BoolFlag{
+ Name: "color",
+ Usage: "use color in display output",
+ DefaultText: "depends on output being a TTY",
},
&cli.BoolFlag{
Name: "completed",
@@ -1819,6 +2352,10 @@ var clientListTransfers = &cli.Command{
},
},
Action: func(cctx *cli.Context) error {
+ if cctx.IsSet("color") {
+ color.NoColor = !cctx.Bool("color")
+ }
+
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
return err
@@ -1831,8 +2368,8 @@ var clientListTransfers = &cli.Command{
return err
}
+ verbose := cctx.Bool("verbose")
completed := cctx.Bool("completed")
- color := cctx.Bool("color")
watch := cctx.Bool("watch")
showFailed := cctx.Bool("show-failed")
if watch {
@@ -1846,7 +2383,7 @@ var clientListTransfers = &cli.Command{
tm.MoveCursor(1, 1)
- OutputDataTransferChannels(tm.Screen, channels, completed, color, showFailed)
+ OutputDataTransferChannels(tm.Screen, channels, verbose, completed, showFailed)
tm.Flush()
@@ -1871,13 +2408,13 @@ var clientListTransfers = &cli.Command{
}
}
}
- OutputDataTransferChannels(os.Stdout, channels, completed, color, showFailed)
+ OutputDataTransferChannels(os.Stdout, channels, verbose, completed, showFailed)
return nil
},
}
// OutputDataTransferChannels generates table output for a list of channels
-func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChannel, completed bool, color bool, showFailed bool) {
+func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChannel, verbose, completed, showFailed bool) {
sort.Slice(channels, func(i, j int) bool {
return channels[i].TransferID < channels[j].TransferID
})
@@ -1907,7 +2444,7 @@ func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChann
tablewriter.Col("Voucher"),
tablewriter.NewLineCol("Message"))
for _, channel := range sendingChannels {
- w.Write(toChannelOutput(color, "Sending To", channel))
+ w.Write(toChannelOutput("Sending To", channel, verbose))
}
w.Flush(out) //nolint:errcheck
@@ -1921,17 +2458,13 @@ func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChann
tablewriter.Col("Voucher"),
tablewriter.NewLineCol("Message"))
for _, channel := range receivingChannels {
- w.Write(toChannelOutput(color, "Receiving From", channel))
+ w.Write(toChannelOutput("Receiving From", channel, verbose))
}
w.Flush(out) //nolint:errcheck
}
-func channelStatusString(useColor bool, status datatransfer.Status) string {
+func channelStatusString(status datatransfer.Status) string {
s := datatransfer.Statuses[status]
- if !useColor {
- return s
- }
-
switch status {
case datatransfer.Failed, datatransfer.Cancelled:
return color.RedString(s)
@@ -1942,9 +2475,13 @@ func channelStatusString(useColor bool, status datatransfer.Status) string {
}
}
-func toChannelOutput(useColor bool, otherPartyColumn string, channel lapi.DataTransferChannel) map[string]interface{} {
- rootCid := ellipsis(channel.BaseCID.String(), 8)
- otherParty := ellipsis(channel.OtherPeer.String(), 8)
+func toChannelOutput(otherPartyColumn string, channel lapi.DataTransferChannel, verbose bool) map[string]interface{} {
+ rootCid := channel.BaseCID.String()
+ otherParty := channel.OtherPeer.String()
+ if !verbose {
+ rootCid = ellipsis(rootCid, 8)
+ otherParty = ellipsis(otherParty, 8)
+ }
initiated := "N"
if channel.IsInitiator {
@@ -1952,13 +2489,13 @@ func toChannelOutput(useColor bool, otherPartyColumn string, channel lapi.DataTr
}
voucher := channel.Voucher
- if len(voucher) > 40 {
+ if len(voucher) > 40 && !verbose {
voucher = ellipsis(voucher, 37)
}
return map[string]interface{}{
"ID": channel.TransferID,
- "Status": channelStatusString(useColor, channel.Status),
+ "Status": channelStatusString(channel.Status),
otherPartyColumn: otherParty,
"Root Cid": rootCid,
"Initiated?": initiated,
@@ -1974,3 +2511,77 @@ func ellipsis(s string, length int) string {
}
return s
}
+
+func inspectDealCmd(ctx context.Context, api v0api.FullNode, proposalCid string, dealId int) error {
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ deals, err := api.ClientListDeals(ctx)
+ if err != nil {
+ return err
+ }
+
+ var di *lapi.DealInfo
+ for i, cdi := range deals {
+ if proposalCid != "" && cdi.ProposalCid.String() == proposalCid {
+ di = &deals[i]
+ break
+ }
+
+ if dealId != 0 && int(cdi.DealID) == dealId {
+ di = &deals[i]
+ break
+ }
+ }
+
+ if di == nil {
+ if proposalCid != "" {
+ return fmt.Errorf("cannot find deal with proposal cid: %s", proposalCid)
+ }
+ if dealId != 0 {
+ return fmt.Errorf("cannot find deal with deal id: %v", dealId)
+ }
+ return errors.New("you must specify proposal cid or deal id in order to inspect a deal")
+ }
+
+ // populate DealInfo.DealStages and DataTransfer.Stages
+ di, err = api.ClientGetDealInfo(ctx, di.ProposalCid)
+ if err != nil {
+ return fmt.Errorf("cannot get deal info for proposal cid: %v", di.ProposalCid)
+ }
+
+ renderDeal(di)
+
+ return nil
+}
+
+func renderDeal(di *lapi.DealInfo) {
+ color.Blue("Deal ID: %d\n", int(di.DealID))
+ color.Blue("Proposal CID: %s\n\n", di.ProposalCid.String())
+
+ if di.DealStages == nil {
+ color.Yellow("Deal was made with an older version of Lotus and Lotus did not collect detailed information about its stages")
+ return
+ }
+
+ for _, stg := range di.DealStages.Stages {
+ msg := fmt.Sprintf("%s %s: %s (expected duration: %s)", color.BlueString("Stage:"), color.BlueString(strings.TrimPrefix(stg.Name, "StorageDeal")), stg.Description, color.GreenString(stg.ExpectedDuration))
+ if stg.UpdatedTime.Time().IsZero() {
+ msg = color.YellowString(msg)
+ }
+ fmt.Println(msg)
+
+ for _, l := range stg.Logs {
+ fmt.Printf(" %s %s\n", color.YellowString(l.UpdatedTime.Time().UTC().Round(time.Second).Format(time.Stamp)), l.Log)
+ }
+
+ if stg.Name == "StorageDealStartDataTransfer" {
+ for _, dtStg := range di.DataTransfer.Stages.Stages {
+ fmt.Printf(" %s %s %s\n", color.YellowString(dtStg.CreatedTime.Time().UTC().Round(time.Second).Format(time.Stamp)), color.BlueString("Data transfer stage:"), color.BlueString(dtStg.Name))
+ for _, l := range dtStg.Logs {
+ fmt.Printf(" %s %s\n", color.YellowString(l.UpdatedTime.Time().UTC().Round(time.Second).Format(time.Stamp)), l.Log)
+ }
+ }
+ }
+ }
+}
diff --git a/cli/client_test.go b/cli/client_test.go
deleted file mode 100644
index f0e8efda846..00000000000
--- a/cli/client_test.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package cli
-
-import (
- "context"
- "os"
- "testing"
- "time"
-
- clitest "github.com/filecoin-project/lotus/cli/test"
-)
-
-// TestClient does a basic test to exercise the client CLI
-// commands
-func TestClient(t *testing.T) {
- _ = os.Setenv("BELLMAN_NO_GPU", "1")
- clitest.QuietMiningLogs()
-
- blocktime := 5 * time.Millisecond
- ctx := context.Background()
- clientNode, _ := clitest.StartOneNodeOneMiner(ctx, t, blocktime)
- clitest.RunClientTest(t, Commands, clientNode)
-}
diff --git a/cli/cmd.go b/cli/cmd.go
index 02ef06002af..630aae1bc75 100644
--- a/cli/cmd.go
+++ b/cli/cmd.go
@@ -1,34 +1,17 @@
package cli
import (
- "context"
- "fmt"
- "net/http"
- "net/url"
- "os"
- "os/signal"
"strings"
- "syscall"
logging "github.com/ipfs/go-log/v2"
- "github.com/mitchellh/go-homedir"
"github.com/urfave/cli/v2"
- "golang.org/x/xerrors"
-
- "github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/api/client"
cliutil "github.com/filecoin-project/lotus/cli/util"
- "github.com/filecoin-project/lotus/node/repo"
)
var log = logging.Logger("cli")
-const (
- metadataTraceContext = "traceContext"
-)
-
// custom CLI error
type ErrCmdFailed struct {
@@ -46,261 +29,40 @@ func NewCliError(s string) error {
// ApiConnector returns API instance
type ApiConnector func() api.FullNode
-// The flag passed on the command line with the listen address of the API
-// server (only used by the tests)
-func flagForAPI(t repo.RepoType) string {
- switch t {
- case repo.FullNode:
- return "api-url"
- case repo.StorageMiner:
- return "miner-api-url"
- case repo.Worker:
- return "worker-api-url"
- default:
- panic(fmt.Sprintf("Unknown repo type: %v", t))
- }
-}
-
-func flagForRepo(t repo.RepoType) string {
- switch t {
- case repo.FullNode:
- return "repo"
- case repo.StorageMiner:
- return "miner-repo"
- case repo.Worker:
- return "worker-repo"
- default:
- panic(fmt.Sprintf("Unknown repo type: %v", t))
- }
-}
-
-func envForRepo(t repo.RepoType) string {
- switch t {
- case repo.FullNode:
- return "FULLNODE_API_INFO"
- case repo.StorageMiner:
- return "MINER_API_INFO"
- case repo.Worker:
- return "WORKER_API_INFO"
- default:
- panic(fmt.Sprintf("Unknown repo type: %v", t))
- }
-}
-
-// TODO remove after deprecation period
-func envForRepoDeprecation(t repo.RepoType) string {
- switch t {
- case repo.FullNode:
- return "FULLNODE_API_INFO"
- case repo.StorageMiner:
- return "STORAGE_API_INFO"
- case repo.Worker:
- return "WORKER_API_INFO"
- default:
- panic(fmt.Sprintf("Unknown repo type: %v", t))
- }
-}
-
-func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (cliutil.APIInfo, error) {
- // Check if there was a flag passed with the listen address of the API
- // server (only used by the tests)
- apiFlag := flagForAPI(t)
- if ctx.IsSet(apiFlag) {
- strma := ctx.String(apiFlag)
- strma = strings.TrimSpace(strma)
-
- return cliutil.APIInfo{Addr: strma}, nil
- }
-
- envKey := envForRepo(t)
- env, ok := os.LookupEnv(envKey)
- if !ok {
- // TODO remove after deprecation period
- envKey = envForRepoDeprecation(t)
- env, ok = os.LookupEnv(envKey)
- if ok {
- log.Warnf("Use deprecation env(%s) value, please use env(%s) instead.", envKey, envForRepo(t))
- }
- }
- if ok {
- return cliutil.ParseApiInfo(env), nil
- }
-
- repoFlag := flagForRepo(t)
-
- p, err := homedir.Expand(ctx.String(repoFlag))
- if err != nil {
- return cliutil.APIInfo{}, xerrors.Errorf("could not expand home dir (%s): %w", repoFlag, err)
- }
-
- r, err := repo.NewFS(p)
- if err != nil {
- return cliutil.APIInfo{}, xerrors.Errorf("could not open repo at path: %s; %w", p, err)
- }
-
- ma, err := r.APIEndpoint()
- if err != nil {
- return cliutil.APIInfo{}, xerrors.Errorf("could not get api endpoint: %w", err)
- }
-
- token, err := r.APIToken()
- if err != nil {
- log.Warnf("Couldn't load CLI token, capabilities may be limited: %v", err)
- }
-
- return cliutil.APIInfo{
- Addr: ma.String(),
- Token: token,
- }, nil
-}
-
-func GetRawAPI(ctx *cli.Context, t repo.RepoType) (string, http.Header, error) {
- ainfo, err := GetAPIInfo(ctx, t)
- if err != nil {
- return "", nil, xerrors.Errorf("could not get API info: %w", err)
+func GetFullNodeServices(ctx *cli.Context) (ServicesAPI, error) {
+ if tn, ok := ctx.App.Metadata["test-services"]; ok {
+ return tn.(ServicesAPI), nil
}
- addr, err := ainfo.DialArgs()
+ api, c, err := GetFullNodeAPIV1(ctx)
if err != nil {
- return "", nil, xerrors.Errorf("could not get DialArgs: %w", err)
+ return nil, err
}
- return addr, ainfo.AuthHeader(), nil
+ return &ServicesImpl{api: api, closer: c}, nil
}
-func GetAPI(ctx *cli.Context) (api.Common, jsonrpc.ClientCloser, error) {
- ti, ok := ctx.App.Metadata["repoType"]
- if !ok {
- log.Errorf("unknown repo type, are you sure you want to use GetAPI?")
- ti = repo.FullNode
- }
- t, ok := ti.(repo.RepoType)
- if !ok {
- log.Errorf("repoType type does not match the type of repo.RepoType")
- }
+var GetAPIInfo = cliutil.GetAPIInfo
+var GetRawAPI = cliutil.GetRawAPI
+var GetAPI = cliutil.GetAPI
- if tn, ok := ctx.App.Metadata["testnode-storage"]; ok {
- return tn.(api.StorageMiner), func() {}, nil
- }
- if tn, ok := ctx.App.Metadata["testnode-full"]; ok {
- return tn.(api.FullNode), func() {}, nil
- }
+var DaemonContext = cliutil.DaemonContext
+var ReqContext = cliutil.ReqContext
- addr, headers, err := GetRawAPI(ctx, t)
- if err != nil {
- return nil, nil, err
- }
+var GetFullNodeAPI = cliutil.GetFullNodeAPI
+var GetFullNodeAPIV1 = cliutil.GetFullNodeAPIV1
+var GetGatewayAPI = cliutil.GetGatewayAPI
- return client.NewCommonRPC(ctx.Context, addr, headers)
-}
-
-func GetFullNodeAPI(ctx *cli.Context) (api.FullNode, jsonrpc.ClientCloser, error) {
- if tn, ok := ctx.App.Metadata["testnode-full"]; ok {
- return tn.(api.FullNode), func() {}, nil
- }
-
- addr, headers, err := GetRawAPI(ctx, repo.FullNode)
- if err != nil {
- return nil, nil, err
- }
-
- return client.NewFullNodeRPC(ctx.Context, addr, headers)
-}
-
-type GetStorageMinerOptions struct {
- PreferHttp bool
-}
-
-type GetStorageMinerOption func(*GetStorageMinerOptions)
-
-func StorageMinerUseHttp(opts *GetStorageMinerOptions) {
- opts.PreferHttp = true
-}
-
-func GetStorageMinerAPI(ctx *cli.Context, opts ...GetStorageMinerOption) (api.StorageMiner, jsonrpc.ClientCloser, error) {
- var options GetStorageMinerOptions
- for _, opt := range opts {
- opt(&options)
- }
-
- if tn, ok := ctx.App.Metadata["testnode-storage"]; ok {
- return tn.(api.StorageMiner), func() {}, nil
- }
-
- addr, headers, err := GetRawAPI(ctx, repo.StorageMiner)
- if err != nil {
- return nil, nil, err
- }
-
- if options.PreferHttp {
- u, err := url.Parse(addr)
- if err != nil {
- return nil, nil, xerrors.Errorf("parsing miner api URL: %w", err)
- }
-
- switch u.Scheme {
- case "ws":
- u.Scheme = "http"
- case "wss":
- u.Scheme = "https"
- }
-
- addr = u.String()
- }
-
- return client.NewStorageMinerRPC(ctx.Context, addr, headers)
-}
-
-func GetWorkerAPI(ctx *cli.Context) (api.WorkerAPI, jsonrpc.ClientCloser, error) {
- addr, headers, err := GetRawAPI(ctx, repo.Worker)
- if err != nil {
- return nil, nil, err
- }
-
- return client.NewWorkerRPC(ctx.Context, addr, headers)
-}
-
-func GetGatewayAPI(ctx *cli.Context) (api.GatewayAPI, jsonrpc.ClientCloser, error) {
- addr, headers, err := GetRawAPI(ctx, repo.FullNode)
- if err != nil {
- return nil, nil, err
- }
-
- return client.NewGatewayRPC(ctx.Context, addr, headers)
-}
-
-func DaemonContext(cctx *cli.Context) context.Context {
- if mtCtx, ok := cctx.App.Metadata[metadataTraceContext]; ok {
- return mtCtx.(context.Context)
- }
-
- return context.Background()
-}
-
-// ReqContext returns context for cli execution. Calling it for the first time
-// installs SIGTERM handler that will close returned context.
-// Not safe for concurrent execution.
-func ReqContext(cctx *cli.Context) context.Context {
- tCtx := DaemonContext(cctx)
-
- ctx, done := context.WithCancel(tCtx)
- sigChan := make(chan os.Signal, 2)
- go func() {
- <-sigChan
- done()
- }()
- signal.Notify(sigChan, syscall.SIGTERM, syscall.SIGINT, syscall.SIGHUP)
-
- return ctx
-}
+var GetStorageMinerAPI = cliutil.GetStorageMinerAPI
+var GetWorkerAPI = cliutil.GetWorkerAPI
var CommonCommands = []*cli.Command{
- netCmd,
- authCmd,
- logCmd,
- waitApiCmd,
- fetchParamCmd,
- pprofCmd,
+ NetCmd,
+ AuthCmd,
+ LogCmd,
+ WaitApiCmd,
+ FetchParamCmd,
+ PprofCmd,
VersionCmd,
}
@@ -309,21 +71,23 @@ var Commands = []*cli.Command{
WithCategory("basic", walletCmd),
WithCategory("basic", clientCmd),
WithCategory("basic", multisigCmd),
+ WithCategory("basic", filplusCmd),
WithCategory("basic", paychCmd),
- WithCategory("developer", authCmd),
- WithCategory("developer", mpoolCmd),
- WithCategory("developer", stateCmd),
- WithCategory("developer", chainCmd),
- WithCategory("developer", logCmd),
- WithCategory("developer", waitApiCmd),
- WithCategory("developer", fetchParamCmd),
- WithCategory("network", netCmd),
- WithCategory("network", syncCmd),
- pprofCmd,
+ WithCategory("developer", AuthCmd),
+ WithCategory("developer", MpoolCmd),
+ WithCategory("developer", StateCmd),
+ WithCategory("developer", ChainCmd),
+ WithCategory("developer", LogCmd),
+ WithCategory("developer", WaitApiCmd),
+ WithCategory("developer", FetchParamCmd),
+ WithCategory("network", NetCmd),
+ WithCategory("network", SyncCmd),
+ WithCategory("status", StatusCmd),
+ PprofCmd,
VersionCmd,
}
func WithCategory(cat string, cmd *cli.Command) *cli.Command {
- cmd.Category = cat
+ cmd.Category = strings.ToUpper(cat)
return cmd
}
diff --git a/cli/disputer.go b/cli/disputer.go
new file mode 100644
index 00000000000..ceebeb9397b
--- /dev/null
+++ b/cli/disputer.go
@@ -0,0 +1,446 @@
+package cli
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "time"
+
+ "github.com/filecoin-project/go-state-types/abi"
+
+ "github.com/filecoin-project/go-address"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+
+ miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner"
+
+ "github.com/filecoin-project/go-state-types/big"
+ lapi "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/chain/types"
+ builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
+ "golang.org/x/xerrors"
+
+ logging "github.com/ipfs/go-log/v2"
+
+ "github.com/filecoin-project/lotus/api/v0api"
+ "github.com/filecoin-project/lotus/chain/store"
+ "github.com/urfave/cli/v2"
+)
+
+var disputeLog = logging.Logger("disputer")
+
+const Confidence = 10
+
+type minerDeadline struct {
+ miner address.Address
+ index uint64
+}
+
+var ChainDisputeSetCmd = &cli.Command{
+ Name: "disputer",
+ Usage: "interact with the window post disputer",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "max-fee",
+ Usage: "Spend up to X FIL per DisputeWindowedPoSt message",
+ },
+ &cli.StringFlag{
+ Name: "from",
+ Usage: "optionally specify the account to send messages from",
+ },
+ },
+ Subcommands: []*cli.Command{
+ disputerStartCmd,
+ disputerMsgCmd,
+ },
+}
+
+var disputerMsgCmd = &cli.Command{
+ Name: "dispute",
+ Usage: "Send a specific DisputeWindowedPoSt message",
+ ArgsUsage: "[minerAddress index postIndex]",
+ Flags: []cli.Flag{},
+ Action: func(cctx *cli.Context) error {
+ if cctx.NArg() != 3 {
+ fmt.Println("Usage: dispute [minerAddress index postIndex]")
+ return nil
+ }
+
+ ctx := ReqContext(cctx)
+
+ api, closer, err := GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ toa, err := address.NewFromString(cctx.Args().First())
+ if err != nil {
+ return fmt.Errorf("given 'miner' address %q was invalid: %w", cctx.Args().First(), err)
+ }
+
+ deadline, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64)
+ if err != nil {
+ return err
+ }
+
+ postIndex, err := strconv.ParseUint(cctx.Args().Get(2), 10, 64)
+ if err != nil {
+ return err
+ }
+
+ fromAddr, err := getSender(ctx, api, cctx.String("from"))
+ if err != nil {
+ return err
+ }
+
+ dpp, aerr := actors.SerializeParams(&miner3.DisputeWindowedPoStParams{
+ Deadline: deadline,
+ PoStIndex: postIndex,
+ })
+
+ if aerr != nil {
+ return xerrors.Errorf("failed to serailize params: %w", aerr)
+ }
+
+ dmsg := &types.Message{
+ To: toa,
+ From: fromAddr,
+ Value: big.Zero(),
+ Method: builtin3.MethodsMiner.DisputeWindowedPoSt,
+ Params: dpp,
+ }
+
+ rslt, err := api.StateCall(ctx, dmsg, types.EmptyTSK)
+ if err != nil {
+ return xerrors.Errorf("failed to simulate dispute: %w", err)
+ }
+
+ if rslt.MsgRct.ExitCode == 0 {
+ mss, err := getMaxFee(cctx.String("max-fee"))
+ if err != nil {
+ return err
+ }
+
+ sm, err := api.MpoolPushMessage(ctx, dmsg, mss)
+ if err != nil {
+ return err
+ }
+
+ fmt.Println("dispute message ", sm.Cid())
+ } else {
+ fmt.Println("dispute is unsuccessful")
+ }
+
+ return nil
+ },
+}
+
+var disputerStartCmd = &cli.Command{
+ Name: "start",
+ Usage: "Start the window post disputer",
+ ArgsUsage: "[minerAddress]",
+ Flags: []cli.Flag{
+ &cli.Uint64Flag{
+ Name: "start-epoch",
+ Usage: "only start disputing PoSts after this epoch ",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ ctx := ReqContext(cctx)
+
+ fromAddr, err := getSender(ctx, api, cctx.String("from"))
+ if err != nil {
+ return err
+ }
+
+ mss, err := getMaxFee(cctx.String("max-fee"))
+ if err != nil {
+ return err
+ }
+
+ startEpoch := abi.ChainEpoch(0)
+ if cctx.IsSet("height") {
+ startEpoch = abi.ChainEpoch(cctx.Uint64("height"))
+ }
+
+ disputeLog.Info("checking sync status")
+
+ if err := SyncWait(ctx, api, false); err != nil {
+ return xerrors.Errorf("sync wait: %w", err)
+ }
+
+ disputeLog.Info("setting up window post disputer")
+
+ // subscribe to head changes and validate the current value
+
+ headChanges, err := api.ChainNotify(ctx)
+ if err != nil {
+ return err
+ }
+
+ head, ok := <-headChanges
+ if !ok {
+ return xerrors.Errorf("Notify stream was invalid")
+ }
+
+ if len(head) != 1 {
+ return xerrors.Errorf("Notify first entry should have been one item")
+ }
+
+ if head[0].Type != store.HCCurrent {
+ return xerrors.Errorf("expected current head on Notify stream (got %s)", head[0].Type)
+ }
+
+ lastEpoch := head[0].Val.Height()
+ lastStatusCheckEpoch := lastEpoch
+
+ // build initial deadlineMap
+
+ minerList, err := api.StateListMiners(ctx, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ knownMiners := make(map[address.Address]struct{})
+ deadlineMap := make(map[abi.ChainEpoch][]minerDeadline)
+ for _, miner := range minerList {
+ dClose, dl, err := makeMinerDeadline(ctx, api, miner)
+ if err != nil {
+ return xerrors.Errorf("making deadline: %w", err)
+ }
+
+ deadlineMap[dClose+Confidence] = append(deadlineMap[dClose+Confidence], *dl)
+
+ knownMiners[miner] = struct{}{}
+ }
+
+ // when this fires, check for newly created miners, and purge any "missed" epochs from deadlineMap
+ statusCheckTicker := time.NewTicker(time.Hour)
+ defer statusCheckTicker.Stop()
+
+ disputeLog.Info("starting up window post disputer")
+
+ applyTsk := func(tsk types.TipSetKey) error {
+ disputeLog.Infow("last checked epoch", "epoch", lastEpoch)
+ dls, ok := deadlineMap[lastEpoch]
+ delete(deadlineMap, lastEpoch)
+ if !ok || startEpoch >= lastEpoch {
+ // no deadlines closed at this epoch - Confidence, or we haven't reached the start cutoff yet
+ return nil
+ }
+
+ dpmsgs := make([]*types.Message, 0)
+
+ startTime := time.Now()
+ proofsChecked := uint64(0)
+
+ // TODO: Parallelizeable
+ for _, dl := range dls {
+ fullDeadlines, err := api.StateMinerDeadlines(ctx, dl.miner, tsk)
+ if err != nil {
+ return xerrors.Errorf("failed to load deadlines: %w", err)
+ }
+
+ if int(dl.index) >= len(fullDeadlines) {
+ return xerrors.Errorf("deadline index %d not found in deadlines", dl.index)
+ }
+
+ disputableProofs := fullDeadlines[dl.index].DisputableProofCount
+ proofsChecked += disputableProofs
+
+ ms, err := makeDisputeWindowedPosts(ctx, api, dl, disputableProofs, fromAddr)
+ if err != nil {
+ return xerrors.Errorf("failed to check for disputes: %w", err)
+ }
+
+ dpmsgs = append(dpmsgs, ms...)
+
+ dClose, dl, err := makeMinerDeadline(ctx, api, dl.miner)
+ if err != nil {
+ return xerrors.Errorf("making deadline: %w", err)
+ }
+
+ deadlineMap[dClose+Confidence] = append(deadlineMap[dClose+Confidence], *dl)
+ }
+
+ disputeLog.Infow("checked proofs", "count", proofsChecked, "duration", time.Since(startTime))
+
+ // TODO: Parallelizeable / can be integrated into the previous deadline-iterating for loop
+ for _, dpmsg := range dpmsgs {
+ disputeLog.Infow("disputing a PoSt", "miner", dpmsg.To)
+ m, err := api.MpoolPushMessage(ctx, dpmsg, mss)
+ if err != nil {
+ disputeLog.Errorw("failed to dispute post message", "err", err.Error(), "miner", dpmsg.To)
+ } else {
+ disputeLog.Infow("submited dispute", "mcid", m.Cid(), "miner", dpmsg.To)
+ }
+ }
+
+ return nil
+ }
+
+ disputeLoop := func() error {
+ select {
+ case notif, ok := <-headChanges:
+ if !ok {
+ return xerrors.Errorf("head change channel errored")
+ }
+
+ for _, val := range notif {
+ switch val.Type {
+ case store.HCApply:
+ for ; lastEpoch <= val.Val.Height(); lastEpoch++ {
+ err := applyTsk(val.Val.Key())
+ if err != nil {
+ return err
+ }
+ }
+ case store.HCRevert:
+ // do nothing
+ default:
+ return xerrors.Errorf("unexpected head change type %s", val.Type)
+ }
+ }
+ case <-statusCheckTicker.C:
+ disputeLog.Infof("running status check")
+
+ minerList, err = api.StateListMiners(ctx, types.EmptyTSK)
+ if err != nil {
+ return xerrors.Errorf("getting miner list: %w", err)
+ }
+
+ for _, m := range minerList {
+ _, ok := knownMiners[m]
+ if !ok {
+ dClose, dl, err := makeMinerDeadline(ctx, api, m)
+ if err != nil {
+ return xerrors.Errorf("making deadline: %w", err)
+ }
+
+ deadlineMap[dClose+Confidence] = append(deadlineMap[dClose+Confidence], *dl)
+
+ knownMiners[m] = struct{}{}
+ }
+ }
+
+ for ; lastStatusCheckEpoch < lastEpoch; lastStatusCheckEpoch++ {
+ // if an epoch got "skipped" from the deadlineMap somehow, just fry it now instead of letting it sit around forever
+ _, ok := deadlineMap[lastStatusCheckEpoch]
+ if ok {
+ disputeLog.Infow("epoch skipped during execution, deleting it from deadlineMap", "epoch", lastStatusCheckEpoch)
+ delete(deadlineMap, lastStatusCheckEpoch)
+ }
+ }
+
+ log.Infof("status check complete")
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+
+ return nil
+ }
+
+ for {
+ err := disputeLoop()
+ if err == context.Canceled {
+ disputeLog.Info("disputer shutting down")
+ break
+ }
+ if err != nil {
+ disputeLog.Errorw("disputer shutting down", "err", err)
+ return err
+ }
+ }
+
+ return nil
+ },
+}
+
+// for a given miner, index, and maxPostIndex, tries to dispute posts from 0...postsSnapshotted-1
+// returns a list of DisputeWindowedPoSt msgs that are expected to succeed if sent
+func makeDisputeWindowedPosts(ctx context.Context, api v0api.FullNode, dl minerDeadline, postsSnapshotted uint64, sender address.Address) ([]*types.Message, error) {
+ disputes := make([]*types.Message, 0)
+
+ for i := uint64(0); i < postsSnapshotted; i++ {
+
+ dpp, aerr := actors.SerializeParams(&miner3.DisputeWindowedPoStParams{
+ Deadline: dl.index,
+ PoStIndex: i,
+ })
+
+ if aerr != nil {
+ return nil, xerrors.Errorf("failed to serailize params: %w", aerr)
+ }
+
+ dispute := &types.Message{
+ To: dl.miner,
+ From: sender,
+ Value: big.Zero(),
+ Method: builtin3.MethodsMiner.DisputeWindowedPoSt,
+ Params: dpp,
+ }
+
+ rslt, err := api.StateCall(ctx, dispute, types.EmptyTSK)
+ if err == nil && rslt.MsgRct.ExitCode == 0 {
+ disputes = append(disputes, dispute)
+ }
+
+ }
+
+ return disputes, nil
+}
+
+func makeMinerDeadline(ctx context.Context, api v0api.FullNode, mAddr address.Address) (abi.ChainEpoch, *minerDeadline, error) {
+ dl, err := api.StateMinerProvingDeadline(ctx, mAddr, types.EmptyTSK)
+ if err != nil {
+ return -1, nil, xerrors.Errorf("getting proving index list: %w", err)
+ }
+
+ return dl.Close, &minerDeadline{
+ miner: mAddr,
+ index: dl.Index,
+ }, nil
+}
+
+func getSender(ctx context.Context, api v0api.FullNode, fromStr string) (address.Address, error) {
+ if fromStr == "" {
+ return api.WalletDefaultAddress(ctx)
+ }
+
+ addr, err := address.NewFromString(fromStr)
+ if err != nil {
+ return address.Undef, err
+ }
+
+ has, err := api.WalletHas(ctx, addr)
+ if err != nil {
+ return address.Undef, err
+ }
+
+ if !has {
+ return address.Undef, xerrors.Errorf("wallet doesn't contain: %s ", addr)
+ }
+
+ return addr, nil
+}
+
+func getMaxFee(maxStr string) (*lapi.MessageSendSpec, error) {
+ if maxStr != "" {
+ maxFee, err := types.ParseFIL(maxStr)
+ if err != nil {
+ return nil, xerrors.Errorf("parsing max-fee: %w", err)
+ }
+ return &lapi.MessageSendSpec{
+ MaxFee: types.BigInt(maxFee),
+ }, nil
+ }
+
+ return nil, nil
+}
diff --git a/cli/filplus.go b/cli/filplus.go
new file mode 100644
index 00000000000..007071ea297
--- /dev/null
+++ b/cli/filplus.go
@@ -0,0 +1,276 @@
+package cli
+
+import (
+ "context"
+ "fmt"
+
+ verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg"
+
+ "github.com/filecoin-project/go-state-types/big"
+
+ "github.com/urfave/cli/v2"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+
+ "github.com/filecoin-project/lotus/api/v0api"
+ "github.com/filecoin-project/lotus/blockstore"
+ "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
+ "github.com/filecoin-project/lotus/chain/types"
+ cbor "github.com/ipfs/go-ipld-cbor"
+)
+
+var filplusCmd = &cli.Command{
+ Name: "filplus",
+ Usage: "Interact with the verified registry actor used by Filplus",
+ Flags: []cli.Flag{},
+ Subcommands: []*cli.Command{
+ filplusVerifyClientCmd,
+ filplusListNotariesCmd,
+ filplusListClientsCmd,
+ filplusCheckClientCmd,
+ filplusCheckNotaryCmd,
+ },
+}
+
+var filplusVerifyClientCmd = &cli.Command{
+ Name: "grant-datacap",
+ Usage: "give allowance to the specified verified client address",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "from",
+ Usage: "specify your notary address to send the message from",
+ Required: true,
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ froms := cctx.String("from")
+ if froms == "" {
+ return fmt.Errorf("must specify from address with --from")
+ }
+
+ fromk, err := address.NewFromString(froms)
+ if err != nil {
+ return err
+ }
+
+ if cctx.Args().Len() != 2 {
+ return fmt.Errorf("must specify two arguments: address and allowance")
+ }
+
+ target, err := address.NewFromString(cctx.Args().Get(0))
+ if err != nil {
+ return err
+ }
+
+ allowance, err := types.BigFromString(cctx.Args().Get(1))
+ if err != nil {
+ return err
+ }
+
+ api, closer, err := GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := ReqContext(cctx)
+
+ found, dcap, err := checkNotary(ctx, api, fromk)
+ if err != nil {
+ return err
+ }
+
+ if !found {
+ return xerrors.New("sender address must be a notary")
+ }
+
+ if dcap.Cmp(allowance.Int) < 0 {
+ return xerrors.Errorf("cannot allot more allowance than notary data cap: %s < %s", dcap, allowance)
+ }
+
+ // TODO: This should be abstracted over actor versions
+ params, err := actors.SerializeParams(&verifreg4.AddVerifiedClientParams{Address: target, Allowance: allowance})
+ if err != nil {
+ return err
+ }
+
+ msg := &types.Message{
+ To: verifreg.Address,
+ From: fromk,
+ Method: verifreg.Methods.AddVerifiedClient,
+ Params: params,
+ }
+
+ smsg, err := api.MpoolPushMessage(ctx, msg, nil)
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("message sent, now waiting on cid: %s\n", smsg.Cid())
+
+ mwait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence)
+ if err != nil {
+ return err
+ }
+
+ if mwait.Receipt.ExitCode != 0 {
+ return fmt.Errorf("failed to add verified client: %d", mwait.Receipt.ExitCode)
+ }
+
+ return nil
+ },
+}
+
+var filplusListNotariesCmd = &cli.Command{
+ Name: "list-notaries",
+ Usage: "list all notaries",
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := ReqContext(cctx)
+
+ act, err := api.StateGetActor(ctx, verifreg.Address, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ apibs := blockstore.NewAPIBlockstore(api)
+ store := adt.WrapStore(ctx, cbor.NewCborStore(apibs))
+
+ st, err := verifreg.Load(store, act)
+ if err != nil {
+ return err
+ }
+ return st.ForEachVerifier(func(addr address.Address, dcap abi.StoragePower) error {
+ _, err := fmt.Printf("%s: %s\n", addr, dcap)
+ return err
+ })
+ },
+}
+
+var filplusListClientsCmd = &cli.Command{
+ Name: "list-clients",
+ Usage: "list all verified clients",
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := ReqContext(cctx)
+
+ act, err := api.StateGetActor(ctx, verifreg.Address, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ apibs := blockstore.NewAPIBlockstore(api)
+ store := adt.WrapStore(ctx, cbor.NewCborStore(apibs))
+
+ st, err := verifreg.Load(store, act)
+ if err != nil {
+ return err
+ }
+ return st.ForEachClient(func(addr address.Address, dcap abi.StoragePower) error {
+ _, err := fmt.Printf("%s: %s\n", addr, dcap)
+ return err
+ })
+ },
+}
+
+var filplusCheckClientCmd = &cli.Command{
+ Name: "check-client-datacap",
+ Usage: "check verified client remaining bytes",
+ Action: func(cctx *cli.Context) error {
+ if !cctx.Args().Present() {
+ return fmt.Errorf("must specify client address to check")
+ }
+
+ caddr, err := address.NewFromString(cctx.Args().First())
+ if err != nil {
+ return err
+ }
+
+ api, closer, err := GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := ReqContext(cctx)
+
+ dcap, err := api.StateVerifiedClientStatus(ctx, caddr, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+ if dcap == nil {
+ return xerrors.Errorf("client %s is not a verified client", caddr)
+ }
+
+ fmt.Println(*dcap)
+
+ return nil
+ },
+}
+
+var filplusCheckNotaryCmd = &cli.Command{
+ Name: "check-notaries-datacap",
+ Usage: "check notaries remaining bytes",
+ Action: func(cctx *cli.Context) error {
+ if !cctx.Args().Present() {
+ return fmt.Errorf("must specify notary address to check")
+ }
+
+ vaddr, err := address.NewFromString(cctx.Args().First())
+ if err != nil {
+ return err
+ }
+
+ api, closer, err := GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := ReqContext(cctx)
+
+ found, dcap, err := checkNotary(ctx, api, vaddr)
+ if err != nil {
+ return err
+ }
+ if !found {
+ return fmt.Errorf("not found")
+ }
+
+ fmt.Println(dcap)
+
+ return nil
+ },
+}
+
+func checkNotary(ctx context.Context, api v0api.FullNode, vaddr address.Address) (bool, abi.StoragePower, error) {
+ vid, err := api.StateLookupID(ctx, vaddr, types.EmptyTSK)
+ if err != nil {
+ return false, big.Zero(), err
+ }
+
+ act, err := api.StateGetActor(ctx, verifreg.Address, types.EmptyTSK)
+ if err != nil {
+ return false, big.Zero(), err
+ }
+
+ apibs := blockstore.NewAPIBlockstore(api)
+ store := adt.WrapStore(ctx, cbor.NewCborStore(apibs))
+
+ st, err := verifreg.Load(store, act)
+ if err != nil {
+ return false, big.Zero(), err
+ }
+
+ return st.VerifierDataCap(vid)
+}
diff --git a/cli/init_test.go b/cli/init_test.go
new file mode 100644
index 00000000000..8c343bcfabe
--- /dev/null
+++ b/cli/init_test.go
@@ -0,0 +1,9 @@
+package cli
+
+import (
+ logging "github.com/ipfs/go-log/v2"
+)
+
+func init() {
+ logging.SetLogLevel("watchdog", "ERROR")
+}
diff --git a/cli/log.go b/cli/log.go
index ed624eb8df8..4ab6aa74813 100644
--- a/cli/log.go
+++ b/cli/log.go
@@ -7,16 +7,16 @@ import (
"golang.org/x/xerrors"
)
-var logCmd = &cli.Command{
+var LogCmd = &cli.Command{
Name: "log",
Usage: "Manage logging",
Subcommands: []*cli.Command{
- logList,
- logSetLevel,
+ LogList,
+ LogSetLevel,
},
}
-var logList = &cli.Command{
+var LogList = &cli.Command{
Name: "list",
Usage: "List log systems",
Action: func(cctx *cli.Context) error {
@@ -41,7 +41,7 @@ var logList = &cli.Command{
},
}
-var logSetLevel = &cli.Command{
+var LogSetLevel = &cli.Command{
Name: "set-level",
Usage: "Set log level",
ArgsUsage: "[level]",
@@ -93,7 +93,7 @@ var logSetLevel = &cli.Command{
for _, system := range systems {
if err := api.LogSetLevel(ctx, system, cctx.Args().First()); err != nil {
- return xerrors.Errorf("setting log level on %s: %w", system, err)
+ return xerrors.Errorf("setting log level on %s: %v", system, err)
}
}
diff --git a/cli/mpool.go b/cli/mpool.go
index 8f3e937b6ad..b128ccc159f 100644
--- a/cli/mpool.go
+++ b/cli/mpool.go
@@ -19,24 +19,26 @@ import (
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/messagepool"
"github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/node/config"
)
-var mpoolCmd = &cli.Command{
+var MpoolCmd = &cli.Command{
Name: "mpool",
Usage: "Manage message pool",
Subcommands: []*cli.Command{
- mpoolPending,
- mpoolClear,
- mpoolSub,
- mpoolStat,
- mpoolReplaceCmd,
- mpoolFindCmd,
- mpoolConfig,
- mpoolGasPerfCmd,
+ MpoolPending,
+ MpoolClear,
+ MpoolSub,
+ MpoolStat,
+ MpoolReplaceCmd,
+ MpoolFindCmd,
+ MpoolConfig,
+ MpoolGasPerfCmd,
+ mpoolManage,
},
}
-var mpoolPending = &cli.Command{
+var MpoolPending = &cli.Command{
Name: "pending",
Usage: "Get pending messages",
Flags: []cli.Flag{
@@ -48,6 +50,14 @@ var mpoolPending = &cli.Command{
Name: "cids",
Usage: "only print cids of messages in output",
},
+ &cli.StringFlag{
+ Name: "to",
+ Usage: "return messages to a given address",
+ },
+ &cli.StringFlag{
+ Name: "from",
+ Usage: "return messages from a given address",
+ },
},
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
@@ -58,6 +68,23 @@ var mpoolPending = &cli.Command{
ctx := ReqContext(cctx)
+ var toa, froma address.Address
+ if tos := cctx.String("to"); tos != "" {
+ a, err := address.NewFromString(tos)
+ if err != nil {
+ return fmt.Errorf("given 'to' address %q was invalid: %w", tos, err)
+ }
+ toa = a
+ }
+
+ if froms := cctx.String("from"); froms != "" {
+ a, err := address.NewFromString(froms)
+ if err != nil {
+ return fmt.Errorf("given 'from' address %q was invalid: %w", froms, err)
+ }
+ froma = a
+ }
+
var filter map[address.Address]struct{}
if cctx.Bool("local") {
filter = map[address.Address]struct{}{}
@@ -84,6 +111,13 @@ var mpoolPending = &cli.Command{
}
}
+ if toa != address.Undef && msg.Message.To != toa {
+ continue
+ }
+ if froma != address.Undef && msg.Message.From != froma {
+ continue
+ }
+
if cctx.Bool("cids") {
fmt.Println(msg.Cid())
} else {
@@ -99,9 +133,11 @@ var mpoolPending = &cli.Command{
},
}
-var mpoolClear = &cli.Command{
- Name: "clear",
- Usage: "Clear all pending messages from the mpool (USE WITH CARE)",
+// Deprecated: MpoolClear is now available at `lotus-shed mpool clear`
+var MpoolClear = &cli.Command{
+ Name: "clear",
+ Usage: "Clear all pending messages from the mpool (USE WITH CARE) (DEPRECATED)",
+ Hidden: true,
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "local",
@@ -113,6 +149,7 @@ var mpoolClear = &cli.Command{
},
},
Action: func(cctx *cli.Context) error {
+ fmt.Println("DEPRECATED: This behavior is being moved to `lotus-shed mpool clear`")
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
return err
@@ -132,7 +169,7 @@ var mpoolClear = &cli.Command{
},
}
-var mpoolSub = &cli.Command{
+var MpoolSub = &cli.Command{
Name: "sub",
Usage: "Subscribe to mpool changes",
Action: func(cctx *cli.Context) error {
@@ -164,7 +201,7 @@ var mpoolSub = &cli.Command{
},
}
-var mpoolStat = &cli.Command{
+var MpoolStat = &cli.Command{
Name: "stat",
Usage: "print mempool stats",
Flags: []cli.Flag{
@@ -232,6 +269,7 @@ var mpoolStat = &cli.Command{
addr string
past, cur, future uint64
belowCurr, belowPast uint64
+ gasLimit big.Int
}
buckets := map[address.Address]*statBucket{}
@@ -273,6 +311,7 @@ var mpoolStat = &cli.Command{
var s mpStat
s.addr = a.String()
+ s.gasLimit = big.Zero()
for _, m := range bkt.msgs {
if m.Message.Nonce < act.Nonce {
@@ -289,6 +328,8 @@ var mpoolStat = &cli.Command{
if m.Message.GasFeeCap.LessThan(minBF) {
s.belowPast++
}
+
+ s.gasLimit = big.Add(s.gasLimit, types.NewInt(uint64(m.Message.GasLimit)))
}
out = append(out, s)
@@ -299,6 +340,7 @@ var mpoolStat = &cli.Command{
})
var total mpStat
+ total.gasLimit = big.Zero()
for _, stat := range out {
total.past += stat.past
@@ -306,32 +348,33 @@ var mpoolStat = &cli.Command{
total.future += stat.future
total.belowCurr += stat.belowCurr
total.belowPast += stat.belowPast
+ total.gasLimit = big.Add(total.gasLimit, stat.gasLimit)
- fmt.Printf("%s: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d \n", stat.addr, stat.past, stat.cur, stat.future, stat.belowCurr, cctx.Int("basefee-lookback"), stat.belowPast)
+ fmt.Printf("%s: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d, gasLimit: %s\n", stat.addr, stat.past, stat.cur, stat.future, stat.belowCurr, cctx.Int("basefee-lookback"), stat.belowPast, stat.gasLimit)
}
fmt.Println("-----")
- fmt.Printf("total: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d \n", total.past, total.cur, total.future, total.belowCurr, cctx.Int("basefee-lookback"), total.belowPast)
+ fmt.Printf("total: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d, gasLimit: %s\n", total.past, total.cur, total.future, total.belowCurr, cctx.Int("basefee-lookback"), total.belowPast, total.gasLimit)
return nil
},
}
-var mpoolReplaceCmd = &cli.Command{
+var MpoolReplaceCmd = &cli.Command{
Name: "replace",
Usage: "replace a message in the mempool",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "gas-feecap",
- Usage: "gas feecap for new message",
+ Usage: "gas feecap for new message (burn and pay to miner, attoFIL/GasUnit)",
},
&cli.StringFlag{
Name: "gas-premium",
- Usage: "gas price for new message",
+ Usage: "gas price for new message (pay to miner, attoFIL/GasUnit)",
},
&cli.Int64Flag{
Name: "gas-limit",
- Usage: "gas price for new message",
+ Usage: "gas limit for new message (GasUnit)",
},
&cli.BoolFlag{
Name: "auto",
@@ -339,7 +382,7 @@ var mpoolReplaceCmd = &cli.Command{
},
&cli.StringFlag{
Name: "max-fee",
- Usage: "Spend up to X FIL for this message (applicable for auto mode)",
+ Usage: "Spend up to X attoFIL for this message (applicable for auto mode)",
},
},
ArgsUsage: " | ",
@@ -434,9 +477,16 @@ var mpoolReplaceCmd = &cli.Command{
msg.GasPremium = big.Max(retm.GasPremium, minRBF)
msg.GasFeeCap = big.Max(retm.GasFeeCap, msg.GasPremium)
- messagepool.CapGasFee(&msg, mss.Get().MaxFee)
+
+ mff := func() (abi.TokenAmount, error) {
+ return abi.TokenAmount(config.DefaultDefaultMaxFee), nil
+ }
+
+ messagepool.CapGasFee(mff, &msg, mss)
} else {
- msg.GasLimit = cctx.Int64("gas-limit")
+ if cctx.IsSet("gas-limit") {
+ msg.GasLimit = cctx.Int64("gas-limit")
+ }
msg.GasPremium, err = types.BigFromString(cctx.String("gas-premium"))
if err != nil {
return fmt.Errorf("parsing gas-premium: %w", err)
@@ -463,7 +513,7 @@ var mpoolReplaceCmd = &cli.Command{
},
}
-var mpoolFindCmd = &cli.Command{
+var MpoolFindCmd = &cli.Command{
Name: "find",
Usage: "find a message in the mempool",
Flags: []cli.Flag{
@@ -546,7 +596,7 @@ var mpoolFindCmd = &cli.Command{
},
}
-var mpoolConfig = &cli.Command{
+var MpoolConfig = &cli.Command{
Name: "config",
Usage: "get or set current mpool configuration",
ArgsUsage: "[new-config]",
@@ -591,7 +641,7 @@ var mpoolConfig = &cli.Command{
},
}
-var mpoolGasPerfCmd = &cli.Command{
+var MpoolGasPerfCmd = &cli.Command{
Name: "gas-perf",
Usage: "Check gas performance of messages in mempool",
Flags: []cli.Flag{
diff --git a/cli/mpool_manage.go b/cli/mpool_manage.go
new file mode 100644
index 00000000000..164a0584241
--- /dev/null
+++ b/cli/mpool_manage.go
@@ -0,0 +1,360 @@
+package cli
+
+import (
+ "context"
+ "fmt"
+ "sort"
+
+ "github.com/Kubuxu/imtui"
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/messagepool"
+ types "github.com/filecoin-project/lotus/chain/types"
+ "github.com/gdamore/tcell/v2"
+ cid "github.com/ipfs/go-cid"
+ "github.com/urfave/cli/v2"
+ "golang.org/x/xerrors"
+)
+
+var mpoolManage = &cli.Command{
+ Name: "manage",
+ Action: func(cctx *cli.Context) error {
+ srv, err := GetFullNodeServices(cctx)
+ if err != nil {
+ return err
+ }
+ defer srv.Close() //nolint:errcheck
+
+ ctx := ReqContext(cctx)
+
+ _, localAddr, err := srv.LocalAddresses(ctx)
+ if err != nil {
+ return xerrors.Errorf("getting local addresses: %w", err)
+ }
+
+ msgs, err := srv.MpoolPendingFilter(ctx, func(sm *types.SignedMessage) bool {
+ if sm.Message.From.Empty() {
+ return false
+ }
+ for _, a := range localAddr {
+ if a == sm.Message.From {
+ return true
+ }
+ }
+ return false
+ }, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ t, err := imtui.NewTui()
+ if err != nil {
+ panic(err)
+ }
+
+ mm := &mmUI{
+ ctx: ctx,
+ srv: srv,
+ addrs: localAddr,
+ messages: msgs,
+ }
+ sort.Slice(mm.addrs, func(i, j int) bool {
+ return mm.addrs[i].String() < mm.addrs[j].String()
+ })
+ t.PushScene(mm.addrSelect())
+
+ err = t.Run()
+
+ if err != nil {
+ panic(err)
+ }
+
+ return nil
+ },
+}
+
+type mmUI struct {
+ ctx context.Context
+ srv ServicesAPI
+ addrs []address.Address
+ messages []*types.SignedMessage
+}
+
+func (mm *mmUI) addrSelect() func(*imtui.Tui) error {
+ rows := [][]string{{"Address", "No. Messages"}}
+ mCount := map[address.Address]int{}
+ for _, sm := range mm.messages {
+ mCount[sm.Message.From]++
+ }
+ for _, a := range mm.addrs {
+ rows = append(rows, []string{a.String(), fmt.Sprintf("%d", mCount[a])})
+ }
+
+ flex := []int{4, 1}
+ sel := 0
+ scroll := 0
+ return func(t *imtui.Tui) error {
+ if t.CurrentKey != nil && t.CurrentKey.Key() == tcell.KeyEnter {
+ if sel > 0 {
+ t.ReplaceScene(mm.messageLising(mm.addrs[sel-1]))
+ }
+ }
+ t.FlexTable(0, 0, 0, &sel, &scroll, rows, flex, true)
+ return nil
+ }
+}
+
+func errUI(err error) func(*imtui.Tui) error {
+ return func(t *imtui.Tui) error {
+ return err
+ }
+}
+
+type msgInfo struct {
+ sm *types.SignedMessage
+ checks []api.MessageCheckStatus
+}
+
+func (mi *msgInfo) Row() []string {
+ cidStr := mi.sm.Cid().String()
+ failedChecks := 0
+ for _, c := range mi.checks {
+ if !c.OK {
+ failedChecks++
+ }
+ }
+ shortAddr := mi.sm.Message.To.String()
+ if len(shortAddr) > 16 {
+ shortAddr = "…" + shortAddr[len(shortAddr)-16:]
+ }
+ var fCk string
+ if failedChecks == 0 {
+ fCk = "[:green:]OK"
+ } else {
+ fCk = "[:orange:]" + fmt.Sprintf("%d", failedChecks)
+ }
+ return []string{"…" + cidStr[len(cidStr)-32:], shortAddr,
+ fmt.Sprintf("%d", mi.sm.Message.Nonce), types.FIL(mi.sm.Message.Value).String(),
+ fmt.Sprintf("%d", mi.sm.Message.Method), fCk}
+
+}
+
+func (mm *mmUI) messageLising(a address.Address) func(*imtui.Tui) error {
+ genMsgInfos := func() ([]msgInfo, error) {
+ msgs, err := mm.srv.MpoolPendingFilter(mm.ctx, func(sm *types.SignedMessage) bool {
+ if sm.Message.From.Empty() {
+ return false
+ }
+ if a == sm.Message.From {
+ return true
+ }
+ return false
+ }, types.EmptyTSK)
+
+ if err != nil {
+ return nil, xerrors.Errorf("getting pending: %w", err)
+ }
+
+ msgIdx := map[cid.Cid]*types.SignedMessage{}
+ for _, sm := range msgs {
+ if sm.Message.From == a {
+ msgIdx[sm.Message.Cid()] = sm
+ msgIdx[sm.Cid()] = sm
+ }
+ }
+
+ checks, err := mm.srv.MpoolCheckPendingMessages(mm.ctx, a)
+ if err != nil {
+ return nil, xerrors.Errorf("checking pending: %w", err)
+ }
+ msgInfos := make([]msgInfo, 0, len(checks))
+ for _, msgChecks := range checks {
+ failingChecks := []api.MessageCheckStatus{}
+ for _, c := range msgChecks {
+ if !c.OK {
+ failingChecks = append(failingChecks, c)
+ }
+ }
+ msgInfos = append(msgInfos, msgInfo{
+ sm: msgIdx[msgChecks[0].Cid],
+ checks: failingChecks,
+ })
+ }
+ return msgInfos, nil
+ }
+
+ sel := 0
+ scroll := 0
+
+ var msgInfos []msgInfo
+ var rows [][]string
+ flex := []int{3, 2, 1, 1, 1, 1}
+ refresh := true
+
+ return func(t *imtui.Tui) error {
+ if refresh {
+ var err error
+ msgInfos, err = genMsgInfos()
+ if err != nil {
+ return xerrors.Errorf("getting msgInfos: %w", err)
+ }
+
+ rows = [][]string{{"Message Cid", "To", "Nonce", "Value", "Method", "Checks"}}
+ for _, mi := range msgInfos {
+ rows = append(rows, mi.Row())
+ }
+ refresh = false
+ }
+
+ if t.CurrentKey != nil && t.CurrentKey.Key() == tcell.KeyEnter {
+ if sel > 0 {
+ t.PushScene(mm.messageDetail(msgInfos[sel-1]))
+ refresh = true
+ return nil
+ }
+ }
+
+ t.Label(0, 0, fmt.Sprintf("Address: %s", a), tcell.StyleDefault)
+ t.FlexTable(1, 0, 0, &sel, &scroll, rows, flex, true)
+ return nil
+ }
+}
+
+func (mm *mmUI) messageDetail(mi msgInfo) func(*imtui.Tui) error {
+ baseFee, err := mm.srv.GetBaseFee(mm.ctx)
+ if err != nil {
+ return errUI(err)
+ }
+ _ = baseFee
+
+ m := mi.sm.Message
+ maxFee := big.Mul(m.GasFeeCap, big.NewInt(m.GasLimit))
+
+ issues := [][]string{}
+ for _, c := range mi.checks {
+ issues = append(issues, []string{c.Code.String(), c.Err})
+ }
+ issuesFlex := []int{1, 3}
+ var sel, scroll int
+
+ executeReprice := false
+ executeNoop := false
+ return func(t *imtui.Tui) error {
+ if executeReprice {
+ m.GasFeeCap = big.Div(maxFee, big.NewInt(m.GasLimit))
+ m.GasPremium = messagepool.ComputeMinRBF(m.GasPremium)
+ m.GasFeeCap = big.Max(m.GasFeeCap, m.GasPremium)
+
+ _, _, err := mm.srv.PublishMessage(mm.ctx, &api.MessagePrototype{
+ Message: m,
+ ValidNonce: true,
+ }, true)
+ if err != nil {
+ return err
+ }
+ t.PopScene()
+ return nil
+ }
+ if executeNoop {
+ nop := types.Message{
+ To: builtin.BurntFundsActorAddr,
+ From: m.From,
+
+ Nonce: m.Nonce,
+ Value: big.Zero(),
+ }
+
+ nop.GasPremium = messagepool.ComputeMinRBF(m.GasPremium)
+
+ _, _, err := mm.srv.PublishMessage(mm.ctx, &api.MessagePrototype{
+ Message: nop,
+ ValidNonce: true,
+ }, true)
+
+ if err != nil {
+ return xerrors.Errorf("publishing noop message: %w", err)
+ }
+
+ t.PopScene()
+ return nil
+ }
+
+ if t.CurrentKey != nil {
+ if t.CurrentKey.Key() == tcell.KeyLeft {
+ t.PopScene()
+ return nil
+ }
+ if t.CurrentKey.Key() == tcell.KeyRune {
+ switch t.CurrentKey.Rune() {
+ case 'R', 'r':
+ t.PushScene(feeUI(baseFee, m.GasLimit, &maxFee, &executeReprice))
+ return nil
+ case 'N', 'n':
+ t.PushScene(confirmationScene(
+ &executeNoop,
+ "Are you sure you want to cancel the message by",
+ "replacing it with a message with no effects?"))
+ return nil
+ }
+ }
+ }
+
+ row := 0
+ defS := tcell.StyleDefault
+ display := func(f string, args ...interface{}) {
+ t.Label(0, row, fmt.Sprintf(f, args...), defS)
+ row++
+ }
+
+ display("Message CID: %s", m.Cid())
+ display("Signed Message CID: %s", mi.sm.Cid())
+ row++
+ display("From: %s", m.From)
+ display("To: %s", m.To)
+ row++
+ display("Nonce: %d", m.Nonce)
+ display("Value: %s", types.FIL(m.Value))
+ row++
+ display("GasLimit: %d", m.GasLimit)
+ display("GasPremium: %s", types.FIL(m.GasPremium).Short())
+ display("GasFeeCap %s", types.FIL(m.GasFeeCap).Short())
+ row++
+ display("Press R to reprice this message")
+ display("Press N to replace this message with no-operation message")
+ row++
+
+ t.FlexTable(row, 0, 0, &sel, &scroll, issues, issuesFlex, false)
+
+ return nil
+ }
+}
+
+func confirmationScene(yes *bool, ask ...string) func(*imtui.Tui) error {
+ return func(t *imtui.Tui) error {
+ row := 0
+ defS := tcell.StyleDefault
+ display := func(f string, args ...interface{}) {
+ t.Label(0, row, fmt.Sprintf(f, args...), defS)
+ row++
+ }
+
+ for _, a := range ask {
+ display(a)
+ }
+ row++
+ display("Enter to confirm")
+ display("Esc to cancel")
+
+ if t.CurrentKey != nil {
+ if t.CurrentKey.Key() == tcell.KeyEnter {
+ *yes = true
+ t.PopScene()
+ return nil
+ }
+ }
+
+ return nil
+ }
+}
diff --git a/cli/multisig.go b/cli/multisig.go
index 8abae51820b..c51677d85ca 100644
--- a/cli/multisig.go
+++ b/cli/multisig.go
@@ -29,7 +29,7 @@ import (
init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init"
msig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig"
- "github.com/filecoin-project/lotus/api/apibstore"
+ "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
@@ -95,11 +95,13 @@ var msigCreateCmd = &cli.Command{
return ShowHelp(cctx, fmt.Errorf("multisigs must have at least one signer"))
}
- api, closer, err := GetFullNodeAPI(cctx)
+ srv, err := GetFullNodeServices(cctx)
if err != nil {
return err
}
- defer closer()
+ defer srv.Close() //nolint:errcheck
+
+ api := srv.FullNodeAPI()
ctx := ReqContext(cctx)
var addrs []address.Address
@@ -146,13 +148,20 @@ var msigCreateCmd = &cli.Command{
gp := types.NewInt(1)
- msgCid, err := api.MsigCreate(ctx, required, addrs, d, intVal, sendAddr, gp)
+ proto, err := api.MsigCreate(ctx, required, addrs, d, intVal, sendAddr, gp)
+ if err != nil {
+ return err
+ }
+
+ sm, err := InteractiveSend(ctx, cctx, srv, proto)
if err != nil {
return err
}
+ msgCid := sm.Cid()
+
// wait for it to get mined into a block
- wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")))
+ wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true)
if err != nil {
return err
}
@@ -202,7 +211,7 @@ var msigInspectCmd = &cli.Command{
defer closer()
ctx := ReqContext(cctx)
- store := adt.WrapStore(ctx, cbor.NewCborStore(apibstore.NewAPIBlockstore(api)))
+ store := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(api)))
maddr, err := address.NewFromString(cctx.Args().First())
if err != nil {
@@ -364,11 +373,13 @@ var msigProposeCmd = &cli.Command{
return ShowHelp(cctx, fmt.Errorf("must either pass three or five arguments"))
}
- api, closer, err := GetFullNodeAPI(cctx)
+ srv, err := GetFullNodeServices(cctx)
if err != nil {
return err
}
- defer closer()
+ defer srv.Close() //nolint:errcheck
+
+ api := srv.FullNodeAPI()
ctx := ReqContext(cctx)
msig, err := address.NewFromString(cctx.Args().Get(0))
@@ -426,14 +437,21 @@ var msigProposeCmd = &cli.Command{
return fmt.Errorf("actor %s is not a multisig actor", msig)
}
- msgCid, err := api.MsigPropose(ctx, msig, dest, types.BigInt(value), from, method, params)
+ proto, err := api.MsigPropose(ctx, msig, dest, types.BigInt(value), from, method, params)
+ if err != nil {
+ return err
+ }
+
+ sm, err := InteractiveSend(ctx, cctx, srv, proto)
if err != nil {
return err
}
+ msgCid := sm.Cid()
+
fmt.Println("send proposal in message: ", msgCid)
- wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")))
+ wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true)
if err != nil {
return err
}
@@ -473,19 +491,21 @@ var msigApproveCmd = &cli.Command{
return ShowHelp(cctx, fmt.Errorf("must pass at least multisig address and message ID"))
}
- if cctx.Args().Len() > 5 && cctx.Args().Len() != 7 {
- return ShowHelp(cctx, fmt.Errorf("usage: msig approve [ ]"))
+ if cctx.Args().Len() > 2 && cctx.Args().Len() < 5 {
+ return ShowHelp(cctx, fmt.Errorf("usage: msig approve "))
}
- if cctx.Args().Len() > 2 && cctx.Args().Len() != 5 {
- return ShowHelp(cctx, fmt.Errorf("usage: msig approve "))
+ if cctx.Args().Len() > 5 && cctx.Args().Len() != 7 {
+ return ShowHelp(cctx, fmt.Errorf("usage: msig approve [ ]"))
}
- api, closer, err := GetFullNodeAPI(cctx)
+ srv, err := GetFullNodeServices(cctx)
if err != nil {
return err
}
- defer closer()
+ defer srv.Close() //nolint:errcheck
+
+ api := srv.FullNodeAPI()
ctx := ReqContext(cctx)
msig, err := address.NewFromString(cctx.Args().Get(0))
@@ -515,10 +535,17 @@ var msigApproveCmd = &cli.Command{
var msgCid cid.Cid
if cctx.Args().Len() == 2 {
- msgCid, err = api.MsigApprove(ctx, msig, txid, from)
+ proto, err := api.MsigApprove(ctx, msig, txid, from)
+ if err != nil {
+ return err
+ }
+
+ sm, err := InteractiveSend(ctx, cctx, srv, proto)
if err != nil {
return err
}
+
+ msgCid = sm.Cid()
} else {
proposer, err := address.NewFromString(cctx.Args().Get(2))
if err != nil {
@@ -558,15 +585,22 @@ var msigApproveCmd = &cli.Command{
params = p
}
- msgCid, err = api.MsigApproveTxnHash(ctx, msig, txid, proposer, dest, types.BigInt(value), from, method, params)
+ proto, err := api.MsigApproveTxnHash(ctx, msig, txid, proposer, dest, types.BigInt(value), from, method, params)
if err != nil {
return err
}
+
+ sm, err := InteractiveSend(ctx, cctx, srv, proto)
+ if err != nil {
+ return err
+ }
+
+ msgCid = sm.Cid()
}
fmt.Println("sent approval in message: ", msgCid)
- wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")))
+ wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true)
if err != nil {
return err
}
@@ -598,11 +632,13 @@ var msigRemoveProposeCmd = &cli.Command{
return ShowHelp(cctx, fmt.Errorf("must pass multisig address and signer address"))
}
- api, closer, err := GetFullNodeAPI(cctx)
+ srv, err := GetFullNodeServices(cctx)
if err != nil {
return err
}
- defer closer()
+ defer srv.Close() //nolint:errcheck
+
+ api := srv.FullNodeAPI()
ctx := ReqContext(cctx)
msig, err := address.NewFromString(cctx.Args().Get(0))
@@ -630,14 +666,21 @@ var msigRemoveProposeCmd = &cli.Command{
from = defaddr
}
- msgCid, err := api.MsigRemoveSigner(ctx, msig, from, addr, cctx.Bool("decrease-threshold"))
+ proto, err := api.MsigRemoveSigner(ctx, msig, from, addr, cctx.Bool("decrease-threshold"))
+ if err != nil {
+ return err
+ }
+
+ sm, err := InteractiveSend(ctx, cctx, srv, proto)
if err != nil {
return err
}
+ msgCid := sm.Cid()
+
fmt.Println("sent remove proposal in message: ", msgCid)
- wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")))
+ wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true)
if err != nil {
return err
}
@@ -676,11 +719,13 @@ var msigAddProposeCmd = &cli.Command{
return ShowHelp(cctx, fmt.Errorf("must pass multisig address and signer address"))
}
- api, closer, err := GetFullNodeAPI(cctx)
+ srv, err := GetFullNodeServices(cctx)
if err != nil {
return err
}
- defer closer()
+ defer srv.Close() //nolint:errcheck
+
+ api := srv.FullNodeAPI()
ctx := ReqContext(cctx)
msig, err := address.NewFromString(cctx.Args().Get(0))
@@ -708,14 +753,21 @@ var msigAddProposeCmd = &cli.Command{
from = defaddr
}
- msgCid, err := api.MsigAddPropose(ctx, msig, from, addr, cctx.Bool("increase-threshold"))
+ proto, err := api.MsigAddPropose(ctx, msig, from, addr, cctx.Bool("increase-threshold"))
if err != nil {
return err
}
+ sm, err := InteractiveSend(ctx, cctx, srv, proto)
+ if err != nil {
+ return err
+ }
+
+ msgCid := sm.Cid()
+
fmt.Fprintln(cctx.App.Writer, "sent add proposal in message: ", msgCid)
- wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")))
+ wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true)
if err != nil {
return err
}
@@ -743,11 +795,13 @@ var msigAddApproveCmd = &cli.Command{
return ShowHelp(cctx, fmt.Errorf("must pass multisig address, proposer address, transaction id, new signer address, whether to increase threshold"))
}
- api, closer, err := GetFullNodeAPI(cctx)
+ srv, err := GetFullNodeServices(cctx)
if err != nil {
return err
}
- defer closer()
+ defer srv.Close() //nolint:errcheck
+
+ api := srv.FullNodeAPI()
ctx := ReqContext(cctx)
msig, err := address.NewFromString(cctx.Args().Get(0))
@@ -790,14 +844,21 @@ var msigAddApproveCmd = &cli.Command{
from = defaddr
}
- msgCid, err := api.MsigAddApprove(ctx, msig, from, txid, prop, newAdd, inc)
+ proto, err := api.MsigAddApprove(ctx, msig, from, txid, prop, newAdd, inc)
if err != nil {
return err
}
+ sm, err := InteractiveSend(ctx, cctx, srv, proto)
+ if err != nil {
+ return err
+ }
+
+ msgCid := sm.Cid()
+
fmt.Println("sent add approval in message: ", msgCid)
- wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")))
+ wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true)
if err != nil {
return err
}
@@ -825,11 +886,13 @@ var msigAddCancelCmd = &cli.Command{
return ShowHelp(cctx, fmt.Errorf("must pass multisig address, transaction id, new signer address, whether to increase threshold"))
}
- api, closer, err := GetFullNodeAPI(cctx)
+ srv, err := GetFullNodeServices(cctx)
if err != nil {
return err
}
- defer closer()
+ defer srv.Close() //nolint:errcheck
+
+ api := srv.FullNodeAPI()
ctx := ReqContext(cctx)
msig, err := address.NewFromString(cctx.Args().Get(0))
@@ -867,14 +930,21 @@ var msigAddCancelCmd = &cli.Command{
from = defaddr
}
- msgCid, err := api.MsigAddCancel(ctx, msig, from, txid, newAdd, inc)
+ proto, err := api.MsigAddCancel(ctx, msig, from, txid, newAdd, inc)
if err != nil {
return err
}
+ sm, err := InteractiveSend(ctx, cctx, srv, proto)
+ if err != nil {
+ return err
+ }
+
+ msgCid := sm.Cid()
+
fmt.Println("sent add cancellation in message: ", msgCid)
- wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")))
+ wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true)
if err != nil {
return err
}
@@ -902,11 +972,13 @@ var msigSwapProposeCmd = &cli.Command{
return ShowHelp(cctx, fmt.Errorf("must pass multisig address, old signer address, new signer address"))
}
- api, closer, err := GetFullNodeAPI(cctx)
+ srv, err := GetFullNodeServices(cctx)
if err != nil {
return err
}
- defer closer()
+ defer srv.Close() //nolint:errcheck
+
+ api := srv.FullNodeAPI()
ctx := ReqContext(cctx)
msig, err := address.NewFromString(cctx.Args().Get(0))
@@ -939,14 +1011,21 @@ var msigSwapProposeCmd = &cli.Command{
from = defaddr
}
- msgCid, err := api.MsigSwapPropose(ctx, msig, from, oldAdd, newAdd)
+ proto, err := api.MsigSwapPropose(ctx, msig, from, oldAdd, newAdd)
+ if err != nil {
+ return err
+ }
+
+ sm, err := InteractiveSend(ctx, cctx, srv, proto)
if err != nil {
return err
}
+ msgCid := sm.Cid()
+
fmt.Println("sent swap proposal in message: ", msgCid)
- wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")))
+ wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true)
if err != nil {
return err
}
@@ -974,11 +1053,13 @@ var msigSwapApproveCmd = &cli.Command{
return ShowHelp(cctx, fmt.Errorf("must pass multisig address, proposer address, transaction id, old signer address, new signer address"))
}
- api, closer, err := GetFullNodeAPI(cctx)
+ srv, err := GetFullNodeServices(cctx)
if err != nil {
return err
}
- defer closer()
+ defer srv.Close() //nolint:errcheck
+
+ api := srv.FullNodeAPI()
ctx := ReqContext(cctx)
msig, err := address.NewFromString(cctx.Args().Get(0))
@@ -1021,14 +1102,21 @@ var msigSwapApproveCmd = &cli.Command{
from = defaddr
}
- msgCid, err := api.MsigSwapApprove(ctx, msig, from, txid, prop, oldAdd, newAdd)
+ proto, err := api.MsigSwapApprove(ctx, msig, from, txid, prop, oldAdd, newAdd)
if err != nil {
return err
}
+ sm, err := InteractiveSend(ctx, cctx, srv, proto)
+ if err != nil {
+ return err
+ }
+
+ msgCid := sm.Cid()
+
fmt.Println("sent swap approval in message: ", msgCid)
- wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")))
+ wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true)
if err != nil {
return err
}
@@ -1056,11 +1144,13 @@ var msigSwapCancelCmd = &cli.Command{
return ShowHelp(cctx, fmt.Errorf("must pass multisig address, transaction id, old signer address, new signer address"))
}
- api, closer, err := GetFullNodeAPI(cctx)
+ srv, err := GetFullNodeServices(cctx)
if err != nil {
return err
}
- defer closer()
+ defer srv.Close() //nolint:errcheck
+
+ api := srv.FullNodeAPI()
ctx := ReqContext(cctx)
msig, err := address.NewFromString(cctx.Args().Get(0))
@@ -1098,14 +1188,21 @@ var msigSwapCancelCmd = &cli.Command{
from = defaddr
}
- msgCid, err := api.MsigSwapCancel(ctx, msig, from, txid, oldAdd, newAdd)
+ proto, err := api.MsigSwapCancel(ctx, msig, from, txid, oldAdd, newAdd)
if err != nil {
return err
}
+ sm, err := InteractiveSend(ctx, cctx, srv, proto)
+ if err != nil {
+ return err
+ }
+
+ msgCid := sm.Cid()
+
fmt.Println("sent swap cancellation in message: ", msgCid)
- wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")))
+ wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true)
if err != nil {
return err
}
@@ -1133,11 +1230,13 @@ var msigLockProposeCmd = &cli.Command{
return ShowHelp(cctx, fmt.Errorf("must pass multisig address, start epoch, unlock duration, and amount"))
}
- api, closer, err := GetFullNodeAPI(cctx)
+ srv, err := GetFullNodeServices(cctx)
if err != nil {
return err
}
- defer closer()
+ defer srv.Close() //nolint:errcheck
+
+ api := srv.FullNodeAPI()
ctx := ReqContext(cctx)
msig, err := address.NewFromString(cctx.Args().Get(0))
@@ -1178,21 +1277,28 @@ var msigLockProposeCmd = &cli.Command{
params, actErr := actors.SerializeParams(&msig2.LockBalanceParams{
StartEpoch: abi.ChainEpoch(start),
UnlockDuration: abi.ChainEpoch(duration),
- Amount: abi.NewTokenAmount(amount.Int64()),
+ Amount: big.Int(amount),
})
if actErr != nil {
return actErr
}
- msgCid, err := api.MsigPropose(ctx, msig, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params)
+ proto, err := api.MsigPropose(ctx, msig, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params)
if err != nil {
return err
}
+ sm, err := InteractiveSend(ctx, cctx, srv, proto)
+ if err != nil {
+ return err
+ }
+
+ msgCid := sm.Cid()
+
fmt.Println("sent lock proposal in message: ", msgCid)
- wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")))
+ wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true)
if err != nil {
return err
}
@@ -1220,11 +1326,13 @@ var msigLockApproveCmd = &cli.Command{
return ShowHelp(cctx, fmt.Errorf("must pass multisig address, proposer address, tx id, start epoch, unlock duration, and amount"))
}
- api, closer, err := GetFullNodeAPI(cctx)
+ srv, err := GetFullNodeServices(cctx)
if err != nil {
return err
}
- defer closer()
+ defer srv.Close() //nolint:errcheck
+
+ api := srv.FullNodeAPI()
ctx := ReqContext(cctx)
msig, err := address.NewFromString(cctx.Args().Get(0))
@@ -1275,21 +1383,28 @@ var msigLockApproveCmd = &cli.Command{
params, actErr := actors.SerializeParams(&msig2.LockBalanceParams{
StartEpoch: abi.ChainEpoch(start),
UnlockDuration: abi.ChainEpoch(duration),
- Amount: abi.NewTokenAmount(amount.Int64()),
+ Amount: big.Int(amount),
})
if actErr != nil {
return actErr
}
- msgCid, err := api.MsigApproveTxnHash(ctx, msig, txid, prop, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params)
+ proto, err := api.MsigApproveTxnHash(ctx, msig, txid, prop, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params)
if err != nil {
return err
}
+ sm, err := InteractiveSend(ctx, cctx, srv, proto)
+ if err != nil {
+ return err
+ }
+
+ msgCid := sm.Cid()
+
fmt.Println("sent lock approval in message: ", msgCid)
- wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")))
+ wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true)
if err != nil {
return err
}
@@ -1313,15 +1428,17 @@ var msigLockCancelCmd = &cli.Command{
},
},
Action: func(cctx *cli.Context) error {
- if cctx.Args().Len() != 6 {
+ if cctx.Args().Len() != 5 {
return ShowHelp(cctx, fmt.Errorf("must pass multisig address, tx id, start epoch, unlock duration, and amount"))
}
- api, closer, err := GetFullNodeAPI(cctx)
+ srv, err := GetFullNodeServices(cctx)
if err != nil {
return err
}
- defer closer()
+ defer srv.Close() //nolint:errcheck
+
+ api := srv.FullNodeAPI()
ctx := ReqContext(cctx)
msig, err := address.NewFromString(cctx.Args().Get(0))
@@ -1367,21 +1484,28 @@ var msigLockCancelCmd = &cli.Command{
params, actErr := actors.SerializeParams(&msig2.LockBalanceParams{
StartEpoch: abi.ChainEpoch(start),
UnlockDuration: abi.ChainEpoch(duration),
- Amount: abi.NewTokenAmount(amount.Int64()),
+ Amount: big.Int(amount),
})
if actErr != nil {
return actErr
}
- msgCid, err := api.MsigCancel(ctx, msig, txid, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params)
+ proto, err := api.MsigCancel(ctx, msig, txid, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params)
+ if err != nil {
+ return err
+ }
+
+ sm, err := InteractiveSend(ctx, cctx, srv, proto)
if err != nil {
return err
}
+ msgCid := sm.Cid()
+
fmt.Println("sent lock cancellation in message: ", msgCid)
- wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")))
+ wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true)
if err != nil {
return err
}
@@ -1471,11 +1595,13 @@ var msigProposeThresholdCmd = &cli.Command{
return ShowHelp(cctx, fmt.Errorf("must pass multisig address and new threshold value"))
}
- api, closer, err := GetFullNodeAPI(cctx)
+ srv, err := GetFullNodeServices(cctx)
if err != nil {
return err
}
- defer closer()
+ defer srv.Close() //nolint:errcheck
+
+ api := srv.FullNodeAPI()
ctx := ReqContext(cctx)
msig, err := address.NewFromString(cctx.Args().Get(0))
@@ -1511,14 +1637,21 @@ var msigProposeThresholdCmd = &cli.Command{
return actErr
}
- msgCid, err := api.MsigPropose(ctx, msig, msig, types.NewInt(0), from, uint64(multisig.Methods.ChangeNumApprovalsThreshold), params)
+ proto, err := api.MsigPropose(ctx, msig, msig, types.NewInt(0), from, uint64(multisig.Methods.ChangeNumApprovalsThreshold), params)
if err != nil {
return fmt.Errorf("failed to propose change of threshold: %w", err)
}
+ sm, err := InteractiveSend(ctx, cctx, srv, proto)
+ if err != nil {
+ return err
+ }
+
+ msgCid := sm.Cid()
+
fmt.Println("sent change threshold proposal in message: ", msgCid)
- wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")))
+ wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true)
if err != nil {
return err
}
diff --git a/cli/multisig_test.go b/cli/multisig_test.go
deleted file mode 100644
index 82472cd627b..00000000000
--- a/cli/multisig_test.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package cli
-
-import (
- "context"
- "os"
- "testing"
- "time"
-
- clitest "github.com/filecoin-project/lotus/cli/test"
-)
-
-// TestMultisig does a basic test to exercise the multisig CLI
-// commands
-func TestMultisig(t *testing.T) {
- _ = os.Setenv("BELLMAN_NO_GPU", "1")
- clitest.QuietMiningLogs()
-
- blocktime := 5 * time.Millisecond
- ctx := context.Background()
- clientNode, _ := clitest.StartOneNodeOneMiner(ctx, t, blocktime)
- clitest.RunMultisigTest(t, Commands, clientNode)
-}
diff --git a/cli/net.go b/cli/net.go
index 9c40c70c7d7..fdd0a13d656 100644
--- a/cli/net.go
+++ b/cli/net.go
@@ -18,22 +18,24 @@ import (
"github.com/filecoin-project/go-address"
+ atypes "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/addrutil"
)
-var netCmd = &cli.Command{
+var NetCmd = &cli.Command{
Name: "net",
Usage: "Manage P2P Network",
Subcommands: []*cli.Command{
NetPeers,
- netConnect,
+ NetConnect,
NetListen,
NetId,
- netFindPeer,
- netScores,
+ NetFindPeer,
+ NetScores,
NetReachability,
NetBandwidthCmd,
+ NetBlockCmd,
},
}
@@ -46,6 +48,11 @@ var NetPeers = &cli.Command{
Aliases: []string{"a"},
Usage: "Print agent name",
},
+ &cli.BoolFlag{
+ Name: "extended",
+ Aliases: []string{"x"},
+ Usage: "Print extended peer information in json",
+ },
},
Action: func(cctx *cli.Context) error {
api, closer, err := GetAPI(cctx)
@@ -63,31 +70,56 @@ var NetPeers = &cli.Command{
return strings.Compare(string(peers[i].ID), string(peers[j].ID)) > 0
})
- for _, peer := range peers {
- var agent string
- if cctx.Bool("agent") {
- agent, err = api.NetAgentVersion(ctx, peer.ID)
+ if cctx.Bool("extended") {
+ // deduplicate
+ seen := make(map[peer.ID]struct{})
+
+ for _, peer := range peers {
+ _, dup := seen[peer.ID]
+ if dup {
+ continue
+ }
+ seen[peer.ID] = struct{}{}
+
+ info, err := api.NetPeerInfo(ctx, peer.ID)
if err != nil {
- log.Warnf("getting agent version: %s", err)
+ log.Warnf("error getting extended peer info: %s", err)
} else {
- agent = ", " + agent
+ bytes, err := json.Marshal(&info)
+ if err != nil {
+ log.Warnf("error marshalling extended peer info: %s", err)
+ } else {
+ fmt.Println(string(bytes))
+ }
}
}
-
- fmt.Printf("%s, %s%s\n", peer.ID, peer.Addrs, agent)
+ } else {
+ for _, peer := range peers {
+ var agent string
+ if cctx.Bool("agent") {
+ agent, err = api.NetAgentVersion(ctx, peer.ID)
+ if err != nil {
+ log.Warnf("getting agent version: %s", err)
+ } else {
+ agent = ", " + agent
+ }
+ }
+ fmt.Printf("%s, %s%s\n", peer.ID, peer.Addrs, agent)
+ }
}
return nil
},
}
-var netScores = &cli.Command{
+var NetScores = &cli.Command{
Name: "scores",
Usage: "Print peers' pubsub scores",
Flags: []cli.Flag{
&cli.BoolFlag{
- Name: "extended",
- Usage: "print extended peer scores in json",
+ Name: "extended",
+ Aliases: []string{"x"},
+ Usage: "print extended peer scores in json",
},
},
Action: func(cctx *cli.Context) error {
@@ -143,7 +175,7 @@ var NetListen = &cli.Command{
},
}
-var netConnect = &cli.Command{
+var NetConnect = &cli.Command{
Name: "connect",
Usage: "Connect to a peer",
ArgsUsage: "[peerMultiaddr|minerActorAddress]",
@@ -232,7 +264,7 @@ var NetId = &cli.Command{
},
}
-var netFindPeer = &cli.Command{
+var NetFindPeer = &cli.Command{
Name: "findpeer",
Usage: "Find the addresses of a given peerID",
ArgsUsage: "[peerId]",
@@ -375,3 +407,202 @@ var NetBandwidthCmd = &cli.Command{
},
}
+
+var NetBlockCmd = &cli.Command{
+ Name: "block",
+ Usage: "Manage network connection gating rules",
+ Subcommands: []*cli.Command{
+ NetBlockAddCmd,
+ NetBlockRemoveCmd,
+ NetBlockListCmd,
+ },
+}
+
+var NetBlockAddCmd = &cli.Command{
+ Name: "add",
+ Usage: "Add connection gating rules",
+ Subcommands: []*cli.Command{
+ NetBlockAddPeer,
+ NetBlockAddIP,
+ NetBlockAddSubnet,
+ },
+}
+
+var NetBlockAddPeer = &cli.Command{
+ Name: "peer",
+ Usage: "Block a peer",
+ ArgsUsage: " ...",
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := GetAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := ReqContext(cctx)
+
+ var peers []peer.ID
+ for _, s := range cctx.Args().Slice() {
+ p, err := peer.Decode(s)
+ if err != nil {
+ return err
+ }
+
+ peers = append(peers, p)
+ }
+
+ return api.NetBlockAdd(ctx, atypes.NetBlockList{Peers: peers})
+ },
+}
+
+var NetBlockAddIP = &cli.Command{
+ Name: "ip",
+ Usage: "Block an IP address",
+ ArgsUsage: " ...",
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := GetAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := ReqContext(cctx)
+
+ return api.NetBlockAdd(ctx, atypes.NetBlockList{IPAddrs: cctx.Args().Slice()})
+ },
+}
+
+var NetBlockAddSubnet = &cli.Command{
+ Name: "subnet",
+ Usage: "Block an IP subnet",
+ ArgsUsage: " ...",
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := GetAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := ReqContext(cctx)
+
+ return api.NetBlockAdd(ctx, atypes.NetBlockList{IPSubnets: cctx.Args().Slice()})
+ },
+}
+
+var NetBlockRemoveCmd = &cli.Command{
+ Name: "remove",
+ Usage: "Remove connection gating rules",
+ Subcommands: []*cli.Command{
+ NetBlockRemovePeer,
+ NetBlockRemoveIP,
+ NetBlockRemoveSubnet,
+ },
+}
+
+var NetBlockRemovePeer = &cli.Command{
+ Name: "peer",
+ Usage: "Unblock a peer",
+ ArgsUsage: " ...",
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := GetAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := ReqContext(cctx)
+
+ var peers []peer.ID
+ for _, s := range cctx.Args().Slice() {
+ p, err := peer.Decode(s)
+ if err != nil {
+ return err
+ }
+
+ peers = append(peers, p)
+ }
+
+ return api.NetBlockRemove(ctx, atypes.NetBlockList{Peers: peers})
+ },
+}
+
+var NetBlockRemoveIP = &cli.Command{
+ Name: "ip",
+ Usage: "Unblock an IP address",
+ ArgsUsage: " ...",
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := GetAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := ReqContext(cctx)
+
+ return api.NetBlockRemove(ctx, atypes.NetBlockList{IPAddrs: cctx.Args().Slice()})
+ },
+}
+
+var NetBlockRemoveSubnet = &cli.Command{
+ Name: "subnet",
+ Usage: "Unblock an IP subnet",
+ ArgsUsage: " ...",
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := GetAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := ReqContext(cctx)
+
+ return api.NetBlockRemove(ctx, atypes.NetBlockList{IPSubnets: cctx.Args().Slice()})
+ },
+}
+
+var NetBlockListCmd = &cli.Command{
+ Name: "list",
+ Usage: "list connection gating rules",
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := GetAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := ReqContext(cctx)
+
+ acl, err := api.NetBlockList(ctx)
+ if err != nil {
+ return err
+ }
+
+ if len(acl.Peers) != 0 {
+ sort.Slice(acl.Peers, func(i, j int) bool {
+ return strings.Compare(string(acl.Peers[i]), string(acl.Peers[j])) > 0
+ })
+
+ fmt.Println("Blocked Peers:")
+ for _, p := range acl.Peers {
+ fmt.Printf("\t%s\n", p)
+ }
+ }
+
+ if len(acl.IPAddrs) != 0 {
+ sort.Slice(acl.IPAddrs, func(i, j int) bool {
+ return strings.Compare(acl.IPAddrs[i], acl.IPAddrs[j]) < 0
+ })
+
+ fmt.Println("Blocked IPs:")
+ for _, a := range acl.IPAddrs {
+ fmt.Printf("\t%s\n", a)
+ }
+ }
+
+ if len(acl.IPSubnets) != 0 {
+ sort.Slice(acl.IPSubnets, func(i, j int) bool {
+ return strings.Compare(acl.IPSubnets[i], acl.IPSubnets[j]) < 0
+ })
+
+ fmt.Println("Blocked Subnets:")
+ for _, n := range acl.IPSubnets {
+ fmt.Printf("\t%s\n", n)
+ }
+ }
+
+ return nil
+ },
+}
diff --git a/cli/params.go b/cli/params.go
index 05c0a4cda7d..1aa6555c527 100644
--- a/cli/params.go
+++ b/cli/params.go
@@ -9,7 +9,7 @@ import (
"github.com/filecoin-project/lotus/build"
)
-var fetchParamCmd = &cli.Command{
+var FetchParamCmd = &cli.Command{
Name: "fetch-params",
Usage: "Fetch proving parameters",
ArgsUsage: "[sectorSize]",
@@ -23,7 +23,7 @@ var fetchParamCmd = &cli.Command{
}
sectorSize := uint64(sectorSizeInt)
- err = paramfetch.GetParams(ReqContext(cctx), build.ParametersJSON(), sectorSize)
+ err = paramfetch.GetParams(ReqContext(cctx), build.ParametersJSON(), build.SrsJSON(), sectorSize)
if err != nil {
return xerrors.Errorf("fetching proof parameters: %w", err)
}
diff --git a/cli/pprof.go b/cli/pprof.go
index dccb97f9a9f..0da24591034 100644
--- a/cli/pprof.go
+++ b/cli/pprof.go
@@ -11,7 +11,7 @@ import (
"github.com/filecoin-project/lotus/node/repo"
)
-var pprofCmd = &cli.Command{
+var PprofCmd = &cli.Command{
Name: "pprof",
Hidden: true,
Subcommands: []*cli.Command{
diff --git a/cli/send.go b/cli/send.go
index 14c1b263b01..a5200d3b8e0 100644
--- a/cli/send.go
+++ b/cli/send.go
@@ -1,21 +1,16 @@
package cli
import (
- "bytes"
- "context"
"encoding/hex"
- "encoding/json"
"fmt"
- "reflect"
"github.com/urfave/cli/v2"
- cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
- "github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/chain/stmgr"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/types"
)
@@ -43,15 +38,15 @@ var sendCmd = &cli.Command{
Usage: "specify gas limit",
Value: 0,
},
- &cli.Int64Flag{
+ &cli.Uint64Flag{
Name: "nonce",
Usage: "specify the nonce to use",
- Value: -1,
+ Value: 0,
},
&cli.Uint64Flag{
Name: "method",
Usage: "specify method to invoke",
- Value: 0,
+ Value: uint64(builtin.MethodSend),
},
&cli.StringFlag{
Name: "params-json",
@@ -61,21 +56,30 @@ var sendCmd = &cli.Command{
Name: "params-hex",
Usage: "specify invocation parameters in hex",
},
+ &cli.BoolFlag{
+ Name: "force",
+ Usage: "Deprecated: use global 'force-send'",
+ },
},
Action: func(cctx *cli.Context) error {
+ if cctx.IsSet("force") {
+ fmt.Println("'force' flag is deprecated, use global flag 'force-send'")
+ }
+
if cctx.Args().Len() != 2 {
return ShowHelp(cctx, fmt.Errorf("'send' expects two arguments, target and amount"))
}
- api, closer, err := GetFullNodeAPI(cctx)
+ srv, err := GetFullNodeServices(cctx)
if err != nil {
return err
}
- defer closer()
+ defer srv.Close() //nolint:errcheck
ctx := ReqContext(cctx)
+ var params SendParams
- toAddr, err := address.NewFromString(cctx.Args().Get(0))
+ params.To, err = address.NewFromString(cctx.Args().Get(0))
if err != nil {
return ShowHelp(cctx, fmt.Errorf("failed to parse target address: %w", err))
}
@@ -84,109 +88,74 @@ var sendCmd = &cli.Command{
if err != nil {
return ShowHelp(cctx, fmt.Errorf("failed to parse amount: %w", err))
}
+ params.Val = abi.TokenAmount(val)
- var fromAddr address.Address
- if from := cctx.String("from"); from == "" {
- defaddr, err := api.WalletDefaultAddress(ctx)
+ if from := cctx.String("from"); from != "" {
+ addr, err := address.NewFromString(from)
if err != nil {
return err
}
- fromAddr = defaddr
- } else {
- addr, err := address.NewFromString(from)
+ params.From = addr
+ }
+
+ if cctx.IsSet("gas-premium") {
+ gp, err := types.BigFromString(cctx.String("gas-premium"))
if err != nil {
return err
}
-
- fromAddr = addr
+ params.GasPremium = &gp
}
- gp, err := types.BigFromString(cctx.String("gas-premium"))
- if err != nil {
- return err
+ if cctx.IsSet("gas-feecap") {
+ gfc, err := types.BigFromString(cctx.String("gas-feecap"))
+ if err != nil {
+ return err
+ }
+ params.GasFeeCap = &gfc
}
- gfc, err := types.BigFromString(cctx.String("gas-feecap"))
- if err != nil {
- return err
+
+ if cctx.IsSet("gas-limit") {
+ limit := cctx.Int64("gas-limit")
+ params.GasLimit = &limit
}
- method := abi.MethodNum(cctx.Uint64("method"))
+ params.Method = abi.MethodNum(cctx.Uint64("method"))
- var params []byte
if cctx.IsSet("params-json") {
- decparams, err := decodeTypedParams(ctx, api, toAddr, method, cctx.String("params-json"))
+ decparams, err := srv.DecodeTypedParamsFromJSON(ctx, params.To, params.Method, cctx.String("params-json"))
if err != nil {
return fmt.Errorf("failed to decode json params: %w", err)
}
- params = decparams
+ params.Params = decparams
}
if cctx.IsSet("params-hex") {
- if params != nil {
+ if params.Params != nil {
return fmt.Errorf("can only specify one of 'params-json' and 'params-hex'")
}
decparams, err := hex.DecodeString(cctx.String("params-hex"))
if err != nil {
return fmt.Errorf("failed to decode hex params: %w", err)
}
- params = decparams
+ params.Params = decparams
}
- msg := &types.Message{
- From: fromAddr,
- To: toAddr,
- Value: types.BigInt(val),
- GasPremium: gp,
- GasFeeCap: gfc,
- GasLimit: cctx.Int64("gas-limit"),
- Method: method,
- Params: params,
+ if cctx.IsSet("nonce") {
+ n := cctx.Uint64("nonce")
+ params.Nonce = &n
}
- if cctx.Int64("nonce") > 0 {
- msg.Nonce = uint64(cctx.Int64("nonce"))
- sm, err := api.WalletSignMessage(ctx, fromAddr, msg)
- if err != nil {
- return err
- }
+ proto, err := srv.MessageForSend(ctx, params)
+ if err != nil {
+ return xerrors.Errorf("creating message prototype: %w", err)
+ }
- _, err = api.MpoolPush(ctx, sm)
- if err != nil {
- return err
- }
- fmt.Println(sm.Cid())
- } else {
- sm, err := api.MpoolPushMessage(ctx, msg, nil)
- if err != nil {
- return err
- }
- fmt.Println(sm.Cid())
+ sm, err := InteractiveSend(ctx, cctx, srv, proto)
+ if err != nil {
+ return err
}
+ fmt.Fprintf(cctx.App.Writer, "%s\n", sm.Cid())
return nil
},
}
-
-func decodeTypedParams(ctx context.Context, fapi api.FullNode, to address.Address, method abi.MethodNum, paramstr string) ([]byte, error) {
- act, err := fapi.StateGetActor(ctx, to, types.EmptyTSK)
- if err != nil {
- return nil, err
- }
-
- methodMeta, found := stmgr.MethodsMap[act.Code][method]
- if !found {
- return nil, fmt.Errorf("method %d not found on actor %s", method, act.Code)
- }
-
- p := reflect.New(methodMeta.Params.Elem()).Interface().(cbg.CBORMarshaler)
-
- if err := json.Unmarshal([]byte(paramstr), p); err != nil {
- return nil, fmt.Errorf("unmarshaling input into params type: %w", err)
- }
-
- buf := new(bytes.Buffer)
- if err := p.MarshalCBOR(buf); err != nil {
- return nil, err
- }
- return buf.Bytes(), nil
-}
diff --git a/cli/send_test.go b/cli/send_test.go
new file mode 100644
index 00000000000..52eafda67a7
--- /dev/null
+++ b/cli/send_test.go
@@ -0,0 +1,67 @@
+package cli
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/lotus/api"
+ types "github.com/filecoin-project/lotus/chain/types"
+ gomock "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/assert"
+ ucli "github.com/urfave/cli/v2"
+)
+
+func mustAddr(a address.Address, err error) address.Address {
+ if err != nil {
+ panic(err)
+ }
+ return a
+}
+
+func newMockApp(t *testing.T, cmd *ucli.Command) (*ucli.App, *MockServicesAPI, *bytes.Buffer, func()) {
+ app := ucli.NewApp()
+ app.Commands = ucli.Commands{cmd}
+ app.Setup()
+
+ mockCtrl := gomock.NewController(t)
+ mockSrvcs := NewMockServicesAPI(mockCtrl)
+ app.Metadata["test-services"] = mockSrvcs
+
+ buf := &bytes.Buffer{}
+ app.Writer = buf
+
+ return app, mockSrvcs, buf, mockCtrl.Finish
+}
+
+func TestSendCLI(t *testing.T) {
+ oneFil := abi.TokenAmount(types.MustParseFIL("1"))
+
+ t.Run("simple", func(t *testing.T) {
+ app, mockSrvcs, buf, done := newMockApp(t, sendCmd)
+ defer done()
+
+ arbtProto := &api.MessagePrototype{
+ Message: types.Message{
+ From: mustAddr(address.NewIDAddress(1)),
+ To: mustAddr(address.NewIDAddress(1)),
+ Value: oneFil,
+ },
+ }
+ sigMsg := fakeSign(&arbtProto.Message)
+
+ gomock.InOrder(
+ mockSrvcs.EXPECT().MessageForSend(gomock.Any(), SendParams{
+ To: mustAddr(address.NewIDAddress(1)),
+ Val: oneFil,
+ }).Return(arbtProto, nil),
+ mockSrvcs.EXPECT().PublishMessage(gomock.Any(), arbtProto, false).
+ Return(sigMsg, nil, nil),
+ mockSrvcs.EXPECT().Close(),
+ )
+ err := app.Run([]string{"lotus", "send", "t01", "1"})
+ assert.NoError(t, err)
+ assert.EqualValues(t, sigMsg.Cid().String()+"\n", buf.String())
+ })
+}
diff --git a/cli/sending_ui.go b/cli/sending_ui.go
new file mode 100644
index 00000000000..a70abefb906
--- /dev/null
+++ b/cli/sending_ui.go
@@ -0,0 +1,264 @@
+package cli
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/Kubuxu/imtui"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/build"
+ types "github.com/filecoin-project/lotus/chain/types"
+ "github.com/gdamore/tcell/v2"
+ cid "github.com/ipfs/go-cid"
+ "github.com/urfave/cli/v2"
+ "golang.org/x/xerrors"
+)
+
+func InteractiveSend(ctx context.Context, cctx *cli.Context, srv ServicesAPI,
+ proto *api.MessagePrototype) (*types.SignedMessage, error) {
+
+ msg, checks, err := srv.PublishMessage(ctx, proto, cctx.Bool("force") || cctx.Bool("force-send"))
+ printer := cctx.App.Writer
+ if xerrors.Is(err, ErrCheckFailed) {
+ if !cctx.Bool("interactive") {
+ fmt.Fprintf(printer, "Following checks have failed:\n")
+ printChecks(printer, checks, proto.Message.Cid())
+ } else {
+ proto, err = resolveChecks(ctx, srv, cctx.App.Writer, proto, checks)
+ if err != nil {
+ return nil, xerrors.Errorf("from UI: %w", err)
+ }
+
+ msg, _, err = srv.PublishMessage(ctx, proto, true)
+ }
+ }
+ if err != nil {
+ return nil, xerrors.Errorf("publishing message: %w", err)
+ }
+
+ return msg, nil
+}
+
+var interactiveSolves = map[api.CheckStatusCode]bool{
+ api.CheckStatusMessageMinBaseFee: true,
+ api.CheckStatusMessageBaseFee: true,
+ api.CheckStatusMessageBaseFeeLowerBound: true,
+ api.CheckStatusMessageBaseFeeUpperBound: true,
+}
+
+func baseFeeFromHints(hint map[string]interface{}) big.Int {
+ bHint, ok := hint["baseFee"]
+ if !ok {
+ return big.Zero()
+ }
+ bHintS, ok := bHint.(string)
+ if !ok {
+ return big.Zero()
+ }
+
+ var err error
+ baseFee, err := big.FromString(bHintS)
+ if err != nil {
+ return big.Zero()
+ }
+ return baseFee
+}
+
+func resolveChecks(ctx context.Context, s ServicesAPI, printer io.Writer,
+ proto *api.MessagePrototype, checkGroups [][]api.MessageCheckStatus,
+) (*api.MessagePrototype, error) {
+
+ fmt.Fprintf(printer, "Following checks have failed:\n")
+ printChecks(printer, checkGroups, proto.Message.Cid())
+
+ if feeCapBad, baseFee := isFeeCapProblem(checkGroups, proto.Message.Cid()); feeCapBad {
+ fmt.Fprintf(printer, "Fee of the message can be adjusted\n")
+ if askUser(printer, "Do you wish to do that? [Yes/no]: ", true) {
+ var err error
+ proto, err = runFeeCapAdjustmentUI(proto, baseFee)
+ if err != nil {
+ return nil, err
+ }
+ }
+ checks, err := s.RunChecksForPrototype(ctx, proto)
+ if err != nil {
+ return nil, err
+ }
+ fmt.Fprintf(printer, "Following checks still failed:\n")
+ printChecks(printer, checks, proto.Message.Cid())
+ }
+
+ if !askUser(printer, "Do you wish to send this message? [yes/No]: ", false) {
+ return nil, ErrAbortedByUser
+ }
+ return proto, nil
+}
+
+var ErrAbortedByUser = errors.New("aborted by user")
+
+func printChecks(printer io.Writer, checkGroups [][]api.MessageCheckStatus, protoCid cid.Cid) {
+ for _, checks := range checkGroups {
+ for _, c := range checks {
+ if c.OK {
+ continue
+ }
+ aboutProto := c.Cid.Equals(protoCid)
+ msgName := "current"
+ if !aboutProto {
+ msgName = c.Cid.String()
+ }
+ fmt.Fprintf(printer, "%s message failed a check %s: %s\n", msgName, c.Code, c.Err)
+ }
+ }
+}
+
+func askUser(printer io.Writer, q string, def bool) bool {
+ var resp string
+ fmt.Fprint(printer, q)
+ fmt.Scanln(&resp)
+ resp = strings.ToLower(resp)
+ if len(resp) == 0 {
+ return def
+ }
+ return resp[0] == 'y'
+}
+
+func isFeeCapProblem(checkGroups [][]api.MessageCheckStatus, protoCid cid.Cid) (bool, big.Int) {
+ baseFee := big.Zero()
+ yes := false
+ for _, checks := range checkGroups {
+ for _, c := range checks {
+ if c.OK {
+ continue
+ }
+ aboutProto := c.Cid.Equals(protoCid)
+ if aboutProto && interactiveSolves[c.Code] {
+ yes = true
+ if baseFee.IsZero() {
+ baseFee = baseFeeFromHints(c.Hint)
+ }
+ }
+ }
+ }
+ if baseFee.IsZero() {
+ // this will only be the case if failing check is: MessageMinBaseFee
+ baseFee = big.NewInt(build.MinimumBaseFee)
+ }
+
+ return yes, baseFee
+}
+
+func runFeeCapAdjustmentUI(proto *api.MessagePrototype, baseFee abi.TokenAmount) (*api.MessagePrototype, error) {
+ t, err := imtui.NewTui()
+ if err != nil {
+ return nil, err
+ }
+
+ maxFee := big.Mul(proto.Message.GasFeeCap, big.NewInt(proto.Message.GasLimit))
+ send := false
+ t.PushScene(feeUI(baseFee, proto.Message.GasLimit, &maxFee, &send))
+
+ err = t.Run()
+ if err != nil {
+ return nil, err
+ }
+ if !send {
+ return nil, fmt.Errorf("aborted by user")
+ }
+
+ proto.Message.GasFeeCap = big.Div(maxFee, big.NewInt(proto.Message.GasLimit))
+
+ return proto, nil
+}
+
+func feeUI(baseFee abi.TokenAmount, gasLimit int64, maxFee *abi.TokenAmount, send *bool) func(*imtui.Tui) error {
+ orignalMaxFee := *maxFee
+ required := big.Mul(baseFee, big.NewInt(gasLimit))
+ safe := big.Mul(required, big.NewInt(10))
+
+ price := fmt.Sprintf("%s", types.FIL(*maxFee).Unitless())
+
+ return func(t *imtui.Tui) error {
+ if t.CurrentKey != nil {
+ if t.CurrentKey.Key() == tcell.KeyRune {
+ pF, err := types.ParseFIL(price)
+ switch t.CurrentKey.Rune() {
+ case 's', 'S':
+ price = types.FIL(safe).Unitless()
+ case '+':
+ if err == nil {
+ p := big.Mul(big.Int(pF), types.NewInt(11))
+ p = big.Div(p, types.NewInt(10))
+ price = fmt.Sprintf("%s", types.FIL(p).Unitless())
+ }
+ case '-':
+ if err == nil {
+ p := big.Mul(big.Int(pF), types.NewInt(10))
+ p = big.Div(p, types.NewInt(11))
+ price = fmt.Sprintf("%s", types.FIL(p).Unitless())
+ }
+ default:
+ }
+ }
+
+ if t.CurrentKey.Key() == tcell.KeyEnter {
+ *send = true
+ t.PopScene()
+ return nil
+ }
+ }
+
+ defS := tcell.StyleDefault
+
+ row := 0
+ t.Label(0, row, "Fee of the message is too low.", defS)
+ row++
+
+ t.Label(0, row, fmt.Sprintf("Your configured maximum fee is: %s FIL",
+ types.FIL(orignalMaxFee).Unitless()), defS)
+ row++
+ t.Label(0, row, fmt.Sprintf("Required maximum fee for the message: %s FIL",
+ types.FIL(required).Unitless()), defS)
+ row++
+ w := t.Label(0, row, fmt.Sprintf("Safe maximum fee for the message: %s FIL",
+ types.FIL(safe).Unitless()), defS)
+ t.Label(w, row, " Press S to use it", defS)
+ row++
+
+ w = t.Label(0, row, "Current Maximum Fee: ", defS)
+
+ w += t.EditFieldFiltered(w, row, 14, &price, imtui.FilterDecimal, defS.Foreground(tcell.ColorWhite).Background(tcell.ColorBlack))
+
+ w += t.Label(w, row, " FIL", defS)
+
+ pF, err := types.ParseFIL(price)
+ *maxFee = abi.TokenAmount(pF)
+ if err != nil {
+ w += t.Label(w, row, " invalid price", defS.Foreground(tcell.ColorMaroon).Bold(true))
+ } else if maxFee.GreaterThanEqual(safe) {
+ w += t.Label(w, row, " SAFE", defS.Foreground(tcell.ColorDarkGreen).Bold(true))
+ } else if maxFee.GreaterThanEqual(required) {
+ w += t.Label(w, row, " low", defS.Foreground(tcell.ColorYellow).Bold(true))
+ over := big.Div(big.Mul(*maxFee, big.NewInt(100)), required)
+ w += t.Label(w, row,
+ fmt.Sprintf(" %.1fx over the minimum", float64(over.Int64())/100.0), defS)
+ } else {
+ w += t.Label(w, row, " too low", defS.Foreground(tcell.ColorRed).Bold(true))
+ }
+ row += 2
+
+ t.Label(0, row, fmt.Sprintf("Current Base Fee is: %s", types.FIL(baseFee).Nano()), defS)
+ row++
+ t.Label(0, row, fmt.Sprintf("Resulting FeeCap is: %s",
+ types.FIL(big.Div(*maxFee, big.NewInt(gasLimit))).Nano()), defS)
+ row++
+ t.Label(0, row, "You can use '+' and '-' to adjust the fee.", defS)
+
+ return nil
+ }
+}
diff --git a/cli/services.go b/cli/services.go
new file mode 100644
index 00000000000..0923680aa08
--- /dev/null
+++ b/cli/services.go
@@ -0,0 +1,276 @@
+package cli
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "reflect"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-jsonrpc"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/chain/stmgr"
+ types "github.com/filecoin-project/lotus/chain/types"
+ cid "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+)
+
+//go:generate go run github.com/golang/mock/mockgen -destination=servicesmock_test.go -package=cli -self_package github.com/filecoin-project/lotus/cli . ServicesAPI
+
+type ServicesAPI interface {
+ FullNodeAPI() api.FullNode
+
+ GetBaseFee(ctx context.Context) (abi.TokenAmount, error)
+
+ // MessageForSend creates a prototype of a message based on SendParams
+ MessageForSend(ctx context.Context, params SendParams) (*api.MessagePrototype, error)
+
+ // DecodeTypedParamsFromJSON takes in information needed to identify a method and converts JSON
+ // parameters to bytes of their CBOR encoding
+ DecodeTypedParamsFromJSON(ctx context.Context, to address.Address, method abi.MethodNum, paramstr string) ([]byte, error)
+
+ RunChecksForPrototype(ctx context.Context, prototype *api.MessagePrototype) ([][]api.MessageCheckStatus, error)
+
+ // PublishMessage takes in a message prototype and publishes it
+ // before publishing the message, it runs checks on the node, message and mpool to verify that
+ // message is valid and won't be stuck.
+ // if `force` is true, it skips the checks
+ PublishMessage(ctx context.Context, prototype *api.MessagePrototype, force bool) (*types.SignedMessage, [][]api.MessageCheckStatus, error)
+
+ LocalAddresses(ctx context.Context) (address.Address, []address.Address, error)
+
+ MpoolPendingFilter(ctx context.Context, filter func(*types.SignedMessage) bool, tsk types.TipSetKey) ([]*types.SignedMessage, error)
+ MpoolCheckPendingMessages(ctx context.Context, a address.Address) ([][]api.MessageCheckStatus, error)
+
+ // Close ends the session of services and disconnects from RPC, using Services after Close is called
+ // most likely will result in an error
+ // Should not be called concurrently
+ Close() error
+}
+
+type ServicesImpl struct {
+ api api.FullNode
+ closer jsonrpc.ClientCloser
+}
+
+func (s *ServicesImpl) FullNodeAPI() api.FullNode {
+ return s.api
+}
+
+func (s *ServicesImpl) Close() error {
+ if s.closer == nil {
+ return xerrors.Errorf("Services already closed")
+ }
+ s.closer()
+ s.closer = nil
+ return nil
+}
+
+func (s *ServicesImpl) GetBaseFee(ctx context.Context) (abi.TokenAmount, error) {
+ // not used but useful
+
+ ts, err := s.api.ChainHead(ctx)
+ if err != nil {
+ return big.Zero(), xerrors.Errorf("getting head: %w", err)
+ }
+ return ts.MinTicketBlock().ParentBaseFee, nil
+}
+
+func (s *ServicesImpl) DecodeTypedParamsFromJSON(ctx context.Context, to address.Address, method abi.MethodNum, paramstr string) ([]byte, error) {
+ act, err := s.api.StateGetActor(ctx, to, types.EmptyTSK)
+ if err != nil {
+ return nil, err
+ }
+
+ methodMeta, found := stmgr.MethodsMap[act.Code][method]
+ if !found {
+ return nil, fmt.Errorf("method %d not found on actor %s", method, act.Code)
+ }
+
+ p := reflect.New(methodMeta.Params.Elem()).Interface().(cbg.CBORMarshaler)
+
+ if err := json.Unmarshal([]byte(paramstr), p); err != nil {
+ return nil, fmt.Errorf("unmarshaling input into params type: %w", err)
+ }
+
+ buf := new(bytes.Buffer)
+ if err := p.MarshalCBOR(buf); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+type CheckInfo struct {
+ MessageTie cid.Cid
+ CurrentMessageTie bool
+
+ Check api.MessageCheckStatus
+}
+
+var ErrCheckFailed = fmt.Errorf("check has failed")
+
+func (s *ServicesImpl) RunChecksForPrototype(ctx context.Context, prototype *api.MessagePrototype) ([][]api.MessageCheckStatus, error) {
+ var outChecks [][]api.MessageCheckStatus
+ checks, err := s.api.MpoolCheckMessages(ctx, []*api.MessagePrototype{prototype})
+ if err != nil {
+ return nil, xerrors.Errorf("message check: %w", err)
+ }
+ outChecks = append(outChecks, checks...)
+
+ checks, err = s.api.MpoolCheckPendingMessages(ctx, prototype.Message.From)
+ if err != nil {
+ return nil, xerrors.Errorf("pending mpool check: %w", err)
+ }
+ outChecks = append(outChecks, checks...)
+
+ return outChecks, nil
+}
+
+// PublishMessage modifies prototype to include gas estimation
+// Errors with ErrCheckFailed if any of the checks fail
+// First group of checks is related to the message prototype
+func (s *ServicesImpl) PublishMessage(ctx context.Context,
+ prototype *api.MessagePrototype, force bool) (*types.SignedMessage, [][]api.MessageCheckStatus, error) {
+
+ gasedMsg, err := s.api.GasEstimateMessageGas(ctx, &prototype.Message, nil, types.EmptyTSK)
+ if err != nil {
+ return nil, nil, xerrors.Errorf("estimating gas: %w", err)
+ }
+ prototype.Message = *gasedMsg
+
+ if !force {
+ checks, err := s.RunChecksForPrototype(ctx, prototype)
+ if err != nil {
+ return nil, nil, xerrors.Errorf("running checks: %w", err)
+ }
+ for _, chks := range checks {
+ for _, c := range chks {
+ if !c.OK {
+ return nil, checks, ErrCheckFailed
+ }
+ }
+ }
+ }
+
+ if prototype.ValidNonce {
+ sm, err := s.api.WalletSignMessage(ctx, prototype.Message.From, &prototype.Message)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ _, err = s.api.MpoolPush(ctx, sm)
+ if err != nil {
+ return nil, nil, err
+ }
+ return sm, nil, nil
+ }
+
+ sm, err := s.api.MpoolPushMessage(ctx, &prototype.Message, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return sm, nil, nil
+}
+
+type SendParams struct {
+ To address.Address
+ From address.Address
+ Val abi.TokenAmount
+
+ GasPremium *abi.TokenAmount
+ GasFeeCap *abi.TokenAmount
+ GasLimit *int64
+
+ Nonce *uint64
+ Method abi.MethodNum
+ Params []byte
+}
+
+func (s *ServicesImpl) MessageForSend(ctx context.Context, params SendParams) (*api.MessagePrototype, error) {
+ if params.From == address.Undef {
+ defaddr, err := s.api.WalletDefaultAddress(ctx)
+ if err != nil {
+ return nil, err
+ }
+ params.From = defaddr
+ }
+
+ msg := types.Message{
+ From: params.From,
+ To: params.To,
+ Value: params.Val,
+
+ Method: params.Method,
+ Params: params.Params,
+ }
+
+ if params.GasPremium != nil {
+ msg.GasPremium = *params.GasPremium
+ } else {
+ msg.GasPremium = types.NewInt(0)
+ }
+ if params.GasFeeCap != nil {
+ msg.GasFeeCap = *params.GasFeeCap
+ } else {
+ msg.GasFeeCap = types.NewInt(0)
+ }
+ if params.GasLimit != nil {
+ msg.GasLimit = *params.GasLimit
+ } else {
+ msg.GasLimit = 0
+ }
+ validNonce := false
+ if params.Nonce != nil {
+ msg.Nonce = *params.Nonce
+ validNonce = true
+ }
+
+ prototype := &api.MessagePrototype{
+ Message: msg,
+ ValidNonce: validNonce,
+ }
+ return prototype, nil
+}
+
+func (s *ServicesImpl) MpoolPendingFilter(ctx context.Context, filter func(*types.SignedMessage) bool,
+ tsk types.TipSetKey) ([]*types.SignedMessage, error) {
+ msgs, err := s.api.MpoolPending(ctx, types.EmptyTSK)
+ if err != nil {
+ return nil, xerrors.Errorf("getting pending messages: %w", err)
+ }
+ out := []*types.SignedMessage{}
+ for _, sm := range msgs {
+ if filter(sm) {
+ out = append(out, sm)
+ }
+ }
+
+ return out, nil
+}
+
+func (s *ServicesImpl) LocalAddresses(ctx context.Context) (address.Address, []address.Address, error) {
+ def, err := s.api.WalletDefaultAddress(ctx)
+ if err != nil {
+ return address.Undef, nil, xerrors.Errorf("getting default addr: %w", err)
+ }
+
+ all, err := s.api.WalletList(ctx)
+ if err != nil {
+ return address.Undef, nil, xerrors.Errorf("getting list of addrs: %w", err)
+ }
+
+ return def, all, nil
+}
+
+func (s *ServicesImpl) MpoolCheckPendingMessages(ctx context.Context, a address.Address) ([][]api.MessageCheckStatus, error) {
+ checks, err := s.api.MpoolCheckPendingMessages(ctx, a)
+ if err != nil {
+ return nil, xerrors.Errorf("pending mpool check: %w", err)
+ }
+ return checks, nil
+}
diff --git a/cli/services_send_test.go b/cli/services_send_test.go
new file mode 100644
index 00000000000..b7ed78f80db
--- /dev/null
+++ b/cli/services_send_test.go
@@ -0,0 +1,215 @@
+package cli
+
+import (
+ "context"
+ "fmt"
+ "testing"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/crypto"
+ "github.com/filecoin-project/lotus/api"
+ mocks "github.com/filecoin-project/lotus/api/mocks"
+ types "github.com/filecoin-project/lotus/chain/types"
+ gomock "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/assert"
+)
+
+type markerKeyType struct{}
+
+var markerKey = markerKeyType{}
+
+type contextMatcher struct {
+ marker *int
+}
+
+// Matches returns whether x is a match.
+func (cm contextMatcher) Matches(x interface{}) bool {
+ ctx, ok := x.(context.Context)
+ if !ok {
+ return false
+ }
+ maybeMarker, ok := ctx.Value(markerKey).(*int)
+ if !ok {
+ return false
+ }
+
+ return cm.marker == maybeMarker
+}
+
+func (cm contextMatcher) String() string {
+ return fmt.Sprintf("Context with Value(%v/%T, %p)", markerKey, markerKey, cm.marker)
+}
+
+func ContextWithMarker(ctx context.Context) (context.Context, gomock.Matcher) {
+ marker := new(int)
+ outCtx := context.WithValue(ctx, markerKey, marker)
+ return outCtx, contextMatcher{marker: marker}
+
+}
+
+func setupMockSrvcs(t *testing.T) (*ServicesImpl, *mocks.MockFullNode) {
+ mockCtrl := gomock.NewController(t)
+
+ mockApi := mocks.NewMockFullNode(mockCtrl)
+
+ srvcs := &ServicesImpl{
+ api: mockApi,
+ closer: mockCtrl.Finish,
+ }
+ return srvcs, mockApi
+}
+
+// linter doesn't like dead code, so these are commented out.
+func fakeSign(msg *types.Message) *types.SignedMessage {
+ return &types.SignedMessage{
+ Message: *msg,
+ Signature: crypto.Signature{Type: crypto.SigTypeSecp256k1, Data: make([]byte, 32)},
+ }
+}
+
+//func makeMessageSigner() (*cid.Cid, interface{}) {
+//smCid := cid.Undef
+//return &smCid,
+//func(_ context.Context, msg *types.Message, _ *api.MessageSendSpec) (*types.SignedMessage, error) {
+//sm := fakeSign(msg)
+//smCid = sm.Cid()
+//return sm, nil
+//}
+//}
+
+type MessageMatcher SendParams
+
+var _ gomock.Matcher = MessageMatcher{}
+
+// Matches returns whether x is a match.
+func (mm MessageMatcher) Matches(x interface{}) bool {
+ proto, ok := x.(*api.MessagePrototype)
+ if !ok {
+ return false
+ }
+
+ m := &proto.Message
+
+ if mm.From != address.Undef && mm.From != m.From {
+ return false
+ }
+ if mm.To != address.Undef && mm.To != m.To {
+ return false
+ }
+
+ if types.BigCmp(mm.Val, m.Value) != 0 {
+ return false
+ }
+
+ if mm.Nonce != nil && *mm.Nonce != m.Nonce {
+ return false
+ }
+
+ if mm.GasPremium != nil && big.Cmp(*mm.GasPremium, m.GasPremium) != 0 {
+ return false
+ }
+ if mm.GasPremium == nil && m.GasPremium.Sign() != 0 {
+ return false
+ }
+
+ if mm.GasFeeCap != nil && big.Cmp(*mm.GasFeeCap, m.GasFeeCap) != 0 {
+ return false
+ }
+ if mm.GasFeeCap == nil && m.GasFeeCap.Sign() != 0 {
+ return false
+ }
+
+ if mm.GasLimit != nil && *mm.GasLimit != m.GasLimit {
+ return false
+ }
+
+ if mm.GasLimit == nil && m.GasLimit != 0 {
+ return false
+ }
+ // handle rest of options
+ return true
+}
+
+// String describes what the matcher matches.
+func (mm MessageMatcher) String() string {
+ return fmt.Sprintf("%#v", SendParams(mm))
+}
+
+func TestSendService(t *testing.T) {
+ addrGen := address.NewForTestGetter()
+ a1 := addrGen()
+ a2 := addrGen()
+
+ const balance = 10000
+
+ params := SendParams{
+ From: a1,
+ To: a2,
+ Val: types.NewInt(balance - 100),
+ }
+
+ ctx, ctxM := ContextWithMarker(context.Background())
+
+ t.Run("happy", func(t *testing.T) {
+ params := params
+ srvcs, _ := setupMockSrvcs(t)
+ defer srvcs.Close() //nolint:errcheck
+
+ proto, err := srvcs.MessageForSend(ctx, params)
+ assert.NoError(t, err)
+ assert.True(t, MessageMatcher(params).Matches(proto))
+ })
+
+ t.Run("default-from", func(t *testing.T) {
+ params := params
+ params.From = address.Undef
+ mm := MessageMatcher(params)
+ mm.From = a1
+
+ srvcs, mockApi := setupMockSrvcs(t)
+ defer srvcs.Close() //nolint:errcheck
+
+ gomock.InOrder(
+ mockApi.EXPECT().WalletDefaultAddress(ctxM).Return(a1, nil),
+ )
+
+ proto, err := srvcs.MessageForSend(ctx, params)
+ assert.NoError(t, err)
+ assert.True(t, mm.Matches(proto))
+ })
+
+ t.Run("set-nonce", func(t *testing.T) {
+ params := params
+ n := uint64(5)
+ params.Nonce = &n
+ mm := MessageMatcher(params)
+
+ srvcs, _ := setupMockSrvcs(t)
+ defer srvcs.Close() //nolint:errcheck
+
+ proto, err := srvcs.MessageForSend(ctx, params)
+ assert.NoError(t, err)
+ assert.True(t, mm.Matches(proto))
+ })
+
+ t.Run("gas-params", func(t *testing.T) {
+ params := params
+ limit := int64(1)
+ params.GasLimit = &limit
+ gfc := big.NewInt(100)
+ params.GasFeeCap = &gfc
+ gp := big.NewInt(10)
+ params.GasPremium = &gp
+
+ mm := MessageMatcher(params)
+
+ srvcs, _ := setupMockSrvcs(t)
+ defer srvcs.Close() //nolint:errcheck
+
+ proto, err := srvcs.MessageForSend(ctx, params)
+ assert.NoError(t, err)
+ assert.True(t, mm.Matches(proto))
+
+ })
+}
diff --git a/cli/servicesmock_test.go b/cli/servicesmock_test.go
new file mode 100644
index 00000000000..5bae52a5ebc
--- /dev/null
+++ b/cli/servicesmock_test.go
@@ -0,0 +1,190 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: github.com/filecoin-project/lotus/cli (interfaces: ServicesAPI)
+
+// Package cli is a generated GoMock package.
+package cli
+
+import (
+ context "context"
+ reflect "reflect"
+
+ go_address "github.com/filecoin-project/go-address"
+ abi "github.com/filecoin-project/go-state-types/abi"
+ big "github.com/filecoin-project/go-state-types/big"
+ api "github.com/filecoin-project/lotus/api"
+ types "github.com/filecoin-project/lotus/chain/types"
+ gomock "github.com/golang/mock/gomock"
+)
+
+// MockServicesAPI is a mock of ServicesAPI interface.
+type MockServicesAPI struct {
+ ctrl *gomock.Controller
+ recorder *MockServicesAPIMockRecorder
+}
+
+// MockServicesAPIMockRecorder is the mock recorder for MockServicesAPI.
+type MockServicesAPIMockRecorder struct {
+ mock *MockServicesAPI
+}
+
+// NewMockServicesAPI creates a new mock instance.
+func NewMockServicesAPI(ctrl *gomock.Controller) *MockServicesAPI {
+ mock := &MockServicesAPI{ctrl: ctrl}
+ mock.recorder = &MockServicesAPIMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockServicesAPI) EXPECT() *MockServicesAPIMockRecorder {
+ return m.recorder
+}
+
+// Close mocks base method.
+func (m *MockServicesAPI) Close() error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Close")
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Close indicates an expected call of Close.
+func (mr *MockServicesAPIMockRecorder) Close() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockServicesAPI)(nil).Close))
+}
+
+// DecodeTypedParamsFromJSON mocks base method.
+func (m *MockServicesAPI) DecodeTypedParamsFromJSON(arg0 context.Context, arg1 go_address.Address, arg2 abi.MethodNum, arg3 string) ([]byte, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DecodeTypedParamsFromJSON", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].([]byte)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DecodeTypedParamsFromJSON indicates an expected call of DecodeTypedParamsFromJSON.
+func (mr *MockServicesAPIMockRecorder) DecodeTypedParamsFromJSON(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DecodeTypedParamsFromJSON", reflect.TypeOf((*MockServicesAPI)(nil).DecodeTypedParamsFromJSON), arg0, arg1, arg2, arg3)
+}
+
+// FullNodeAPI mocks base method.
+func (m *MockServicesAPI) FullNodeAPI() api.FullNode {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "FullNodeAPI")
+ ret0, _ := ret[0].(api.FullNode)
+ return ret0
+}
+
+// FullNodeAPI indicates an expected call of FullNodeAPI.
+func (mr *MockServicesAPIMockRecorder) FullNodeAPI() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FullNodeAPI", reflect.TypeOf((*MockServicesAPI)(nil).FullNodeAPI))
+}
+
+// GetBaseFee mocks base method.
+func (m *MockServicesAPI) GetBaseFee(arg0 context.Context) (big.Int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetBaseFee", arg0)
+ ret0, _ := ret[0].(big.Int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetBaseFee indicates an expected call of GetBaseFee.
+func (mr *MockServicesAPIMockRecorder) GetBaseFee(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBaseFee", reflect.TypeOf((*MockServicesAPI)(nil).GetBaseFee), arg0)
+}
+
+// LocalAddresses mocks base method.
+func (m *MockServicesAPI) LocalAddresses(arg0 context.Context) (go_address.Address, []go_address.Address, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "LocalAddresses", arg0)
+ ret0, _ := ret[0].(go_address.Address)
+ ret1, _ := ret[1].([]go_address.Address)
+ ret2, _ := ret[2].(error)
+ return ret0, ret1, ret2
+}
+
+// LocalAddresses indicates an expected call of LocalAddresses.
+func (mr *MockServicesAPIMockRecorder) LocalAddresses(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LocalAddresses", reflect.TypeOf((*MockServicesAPI)(nil).LocalAddresses), arg0)
+}
+
+// MessageForSend mocks base method.
+func (m *MockServicesAPI) MessageForSend(arg0 context.Context, arg1 SendParams) (*api.MessagePrototype, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MessageForSend", arg0, arg1)
+ ret0, _ := ret[0].(*api.MessagePrototype)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MessageForSend indicates an expected call of MessageForSend.
+func (mr *MockServicesAPIMockRecorder) MessageForSend(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MessageForSend", reflect.TypeOf((*MockServicesAPI)(nil).MessageForSend), arg0, arg1)
+}
+
+// MpoolCheckPendingMessages mocks base method.
+func (m *MockServicesAPI) MpoolCheckPendingMessages(arg0 context.Context, arg1 go_address.Address) ([][]api.MessageCheckStatus, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MpoolCheckPendingMessages", arg0, arg1)
+ ret0, _ := ret[0].([][]api.MessageCheckStatus)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MpoolCheckPendingMessages indicates an expected call of MpoolCheckPendingMessages.
+func (mr *MockServicesAPIMockRecorder) MpoolCheckPendingMessages(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolCheckPendingMessages", reflect.TypeOf((*MockServicesAPI)(nil).MpoolCheckPendingMessages), arg0, arg1)
+}
+
+// MpoolPendingFilter mocks base method.
+func (m *MockServicesAPI) MpoolPendingFilter(arg0 context.Context, arg1 func(*types.SignedMessage) bool, arg2 types.TipSetKey) ([]*types.SignedMessage, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MpoolPendingFilter", arg0, arg1, arg2)
+ ret0, _ := ret[0].([]*types.SignedMessage)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MpoolPendingFilter indicates an expected call of MpoolPendingFilter.
+func (mr *MockServicesAPIMockRecorder) MpoolPendingFilter(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPendingFilter", reflect.TypeOf((*MockServicesAPI)(nil).MpoolPendingFilter), arg0, arg1, arg2)
+}
+
+// PublishMessage mocks base method.
+func (m *MockServicesAPI) PublishMessage(arg0 context.Context, arg1 *api.MessagePrototype, arg2 bool) (*types.SignedMessage, [][]api.MessageCheckStatus, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PublishMessage", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*types.SignedMessage)
+ ret1, _ := ret[1].([][]api.MessageCheckStatus)
+ ret2, _ := ret[2].(error)
+ return ret0, ret1, ret2
+}
+
+// PublishMessage indicates an expected call of PublishMessage.
+func (mr *MockServicesAPIMockRecorder) PublishMessage(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublishMessage", reflect.TypeOf((*MockServicesAPI)(nil).PublishMessage), arg0, arg1, arg2)
+}
+
+// RunChecksForPrototype mocks base method.
+func (m *MockServicesAPI) RunChecksForPrototype(arg0 context.Context, arg1 *api.MessagePrototype) ([][]api.MessageCheckStatus, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "RunChecksForPrototype", arg0, arg1)
+ ret0, _ := ret[0].([][]api.MessageCheckStatus)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// RunChecksForPrototype indicates an expected call of RunChecksForPrototype.
+func (mr *MockServicesAPIMockRecorder) RunChecksForPrototype(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunChecksForPrototype", reflect.TypeOf((*MockServicesAPI)(nil).RunChecksForPrototype), arg0, arg1)
+}
diff --git a/cli/state.go b/cli/state.go
index 13aa5c39b8c..d5251fb8595 100644
--- a/cli/state.go
+++ b/cli/state.go
@@ -3,6 +3,8 @@ package cli
import (
"bytes"
"context"
+ "encoding/base64"
+ "encoding/hex"
"encoding/json"
"fmt"
"html/template"
@@ -15,13 +17,16 @@ import (
"strings"
"time"
- "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/go-state-types/big"
- "github.com/multiformats/go-multiaddr"
+ "github.com/filecoin-project/lotus/api/v0api"
+
+ "github.com/fatih/color"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
- "github.com/libp2p/go-libp2p-core/peer"
+ "github.com/multiformats/go-multiaddr"
"github.com/multiformats/go-multihash"
"github.com/urfave/cli/v2"
cbg "github.com/whyrusleeping/cbor-gen"
@@ -29,19 +34,18 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
- "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/lotus/api"
lapi "github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/api/apibstore"
+ "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types"
)
-var stateCmd = &cli.Command{
+var StateCmd = &cli.Command{
Name: "state",
Usage: "Interact with and query filecoin chain state",
Flags: []cli.Flag{
@@ -51,31 +55,76 @@ var stateCmd = &cli.Command{
},
},
Subcommands: []*cli.Command{
- statePowerCmd,
- stateSectorsCmd,
- stateActiveSectorsCmd,
- stateListActorsCmd,
- stateListMinersCmd,
- stateCircSupplyCmd,
- stateSectorCmd,
- stateGetActorCmd,
- stateLookupIDCmd,
- stateReplayCmd,
- stateSectorSizeCmd,
- stateReadStateCmd,
- stateListMessagesCmd,
- stateComputeStateCmd,
- stateCallCmd,
- stateGetDealSetCmd,
- stateWaitMsgCmd,
- stateSearchMsgCmd,
- stateMinerInfo,
- stateMarketCmd,
- stateExecTraceCmd,
+ StatePowerCmd,
+ StateSectorsCmd,
+ StateActiveSectorsCmd,
+ StateListActorsCmd,
+ StateListMinersCmd,
+ StateCircSupplyCmd,
+ StateSectorCmd,
+ StateGetActorCmd,
+ StateLookupIDCmd,
+ StateReplayCmd,
+ StateSectorSizeCmd,
+ StateReadStateCmd,
+ StateListMessagesCmd,
+ StateComputeStateCmd,
+ StateCallCmd,
+ StateGetDealSetCmd,
+ StateWaitMsgCmd,
+ StateSearchMsgCmd,
+ StateMinerInfo,
+ StateMarketCmd,
+ StateExecTraceCmd,
+ StateNtwkVersionCmd,
+ StateMinerProvingDeadlineCmd,
+ },
+}
+
+var StateMinerProvingDeadlineCmd = &cli.Command{
+ Name: "miner-proving-deadline",
+ Usage: "Retrieve information about a given miner's proving deadline",
+ ArgsUsage: "[minerAddress]",
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ ctx := ReqContext(cctx)
+
+ if !cctx.Args().Present() {
+ return fmt.Errorf("must specify miner to get information for")
+ }
+
+ addr, err := address.NewFromString(cctx.Args().First())
+ if err != nil {
+ return err
+ }
+
+ ts, err := LoadTipSet(ctx, cctx, api)
+ if err != nil {
+ return err
+ }
+
+ cd, err := api.StateMinerProvingDeadline(ctx, addr, ts.Key())
+ if err != nil {
+ return xerrors.Errorf("getting miner info: %w", err)
+ }
+
+ fmt.Printf("Period Start:\t%s\n", cd.PeriodStart)
+ fmt.Printf("Index:\t\t%d\n", cd.Index)
+ fmt.Printf("Open:\t\t%s\n", cd.Open)
+ fmt.Printf("Close:\t\t%s\n", cd.Close)
+ fmt.Printf("Challenge:\t%s\n", cd.Challenge)
+ fmt.Printf("FaultCutoff:\t%s\n", cd.FaultCutoff)
+
+ return nil
},
}
-var stateMinerInfo = &cli.Command{
+var StateMinerInfo = &cli.Command{
Name: "miner-info",
Usage: "Retrieve miner information",
ArgsUsage: "[minerAddress]",
@@ -107,14 +156,19 @@ var stateMinerInfo = &cli.Command{
return err
}
+ availableBalance, err := api.StateMinerAvailableBalance(ctx, addr, ts.Key())
+ if err != nil {
+ return xerrors.Errorf("getting miner available balance: %w", err)
+ }
+ fmt.Printf("Available Balance: %s\n", types.FIL(availableBalance))
fmt.Printf("Owner:\t%s\n", mi.Owner)
fmt.Printf("Worker:\t%s\n", mi.Worker)
for i, controlAddress := range mi.ControlAddresses {
fmt.Printf("Control %d: \t%s\n", i, controlAddress)
}
+
fmt.Printf("PeerID:\t%s\n", mi.PeerId)
- fmt.Printf("SectorSize:\t%s (%d)\n", types.SizeStr(types.NewInt(uint64(mi.SectorSize))), mi.SectorSize)
- fmt.Printf("Multiaddrs: \t")
+ fmt.Printf("Multiaddrs:\t")
for _, addr := range mi.Multiaddrs {
a, err := multiaddr.NewMultiaddrBytes(addr)
if err != nil {
@@ -122,6 +176,33 @@ var stateMinerInfo = &cli.Command{
}
fmt.Printf("%s ", a)
}
+ fmt.Println()
+ fmt.Printf("Consensus Fault End:\t%d\n", mi.ConsensusFaultElapsed)
+
+ fmt.Printf("SectorSize:\t%s (%d)\n", types.SizeStr(types.NewInt(uint64(mi.SectorSize))), mi.SectorSize)
+ pow, err := api.StateMinerPower(ctx, addr, ts.Key())
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("Byte Power: %s / %s (%0.4f%%)\n",
+ color.BlueString(types.SizeStr(pow.MinerPower.RawBytePower)),
+ types.SizeStr(pow.TotalPower.RawBytePower),
+ types.BigDivFloat(
+ types.BigMul(pow.MinerPower.RawBytePower, big.NewInt(100)),
+ pow.TotalPower.RawBytePower,
+ ),
+ )
+
+ fmt.Printf("Actual Power: %s / %s (%0.4f%%)\n",
+ color.GreenString(types.DeciStr(pow.MinerPower.QualityAdjPower)),
+ types.DeciStr(pow.TotalPower.QualityAdjPower),
+ types.BigDivFloat(
+ types.BigMul(pow.MinerPower.QualityAdjPower, big.NewInt(100)),
+ pow.TotalPower.QualityAdjPower,
+ ),
+ )
+
fmt.Println()
cd, err := api.StateMinerProvingDeadline(ctx, addr, ts.Key())
@@ -150,16 +231,19 @@ func ParseTipSetString(ts string) ([]cid.Cid, error) {
return cids, nil
}
-func LoadTipSet(ctx context.Context, cctx *cli.Context, api api.FullNode) (*types.TipSet, error) {
+// LoadTipSet gets the tipset from the context, or the head from the API.
+//
+// It always gets the head from the API so commands use a consistent tipset even if time pases.
+func LoadTipSet(ctx context.Context, cctx *cli.Context, api v0api.FullNode) (*types.TipSet, error) {
tss := cctx.String("tipset")
if tss == "" {
- return nil, nil
+ return api.ChainHead(ctx)
}
return ParseTipSetRef(ctx, api, tss)
}
-func ParseTipSetRef(ctx context.Context, api api.FullNode, tss string) (*types.TipSet, error) {
+func ParseTipSetRef(ctx context.Context, api v0api.FullNode, tss string) (*types.TipSet, error) {
if tss[0] == '@' {
if tss == "@head" {
return api.ChainHead(ctx)
@@ -191,7 +275,7 @@ func ParseTipSetRef(ctx context.Context, api api.FullNode, tss string) (*types.T
return ts, nil
}
-var statePowerCmd = &cli.Command{
+var StatePowerCmd = &cli.Command{
Name: "power",
Usage: "Query network or miner power",
ArgsUsage: "[ (optional)]",
@@ -204,17 +288,26 @@ var statePowerCmd = &cli.Command{
ctx := ReqContext(cctx)
+ ts, err := LoadTipSet(ctx, cctx, api)
+ if err != nil {
+ return err
+ }
+
var maddr address.Address
if cctx.Args().Present() {
maddr, err = address.NewFromString(cctx.Args().First())
if err != nil {
return err
}
- }
- ts, err := LoadTipSet(ctx, cctx, api)
- if err != nil {
- return err
+ ma, err := api.StateGetActor(ctx, maddr, ts.Key())
+ if err != nil {
+ return err
+ }
+
+ if !builtin.IsStorageMinerActor(ma.Code) {
+ return xerrors.New("provided address does not correspond to a miner actor")
+ }
}
power, err := api.StateMinerPower(ctx, maddr, ts.Key())
@@ -225,8 +318,15 @@ var statePowerCmd = &cli.Command{
tp := power.TotalPower
if cctx.Args().Present() {
mp := power.MinerPower
- percI := types.BigDiv(types.BigMul(mp.QualityAdjPower, types.NewInt(1000000)), tp.QualityAdjPower)
- fmt.Printf("%s(%s) / %s(%s) ~= %0.4f%%\n", mp.QualityAdjPower.String(), types.SizeStr(mp.QualityAdjPower), tp.QualityAdjPower.String(), types.SizeStr(tp.QualityAdjPower), float64(percI.Int64())/10000)
+ fmt.Printf(
+ "%s(%s) / %s(%s) ~= %0.4f%%\n",
+ mp.QualityAdjPower.String(), types.SizeStr(mp.QualityAdjPower),
+ tp.QualityAdjPower.String(), types.SizeStr(tp.QualityAdjPower),
+ types.BigDivFloat(
+ types.BigMul(mp.QualityAdjPower, big.NewInt(100)),
+ tp.QualityAdjPower,
+ ),
+ )
} else {
fmt.Printf("%s(%s)\n", tp.QualityAdjPower.String(), types.SizeStr(tp.QualityAdjPower))
}
@@ -235,7 +335,7 @@ var statePowerCmd = &cli.Command{
},
}
-var stateSectorsCmd = &cli.Command{
+var StateSectorsCmd = &cli.Command{
Name: "sectors",
Usage: "Query the sector set of a miner",
ArgsUsage: "[minerAddress]",
@@ -268,14 +368,14 @@ var stateSectorsCmd = &cli.Command{
}
for _, s := range sectors {
- fmt.Printf("%d: %x\n", s.SectorNumber, s.SealedCID)
+ fmt.Printf("%d: %s\n", s.SectorNumber, s.SealedCID)
}
return nil
},
}
-var stateActiveSectorsCmd = &cli.Command{
+var StateActiveSectorsCmd = &cli.Command{
Name: "active-sectors",
Usage: "Query the active sector set of a miner",
ArgsUsage: "[minerAddress]",
@@ -308,14 +408,14 @@ var stateActiveSectorsCmd = &cli.Command{
}
for _, s := range sectors {
- fmt.Printf("%d: %x\n", s.SectorNumber, s.SealedCID)
+ fmt.Printf("%d: %s\n", s.SectorNumber, s.SealedCID)
}
return nil
},
}
-var stateExecTraceCmd = &cli.Command{
+var StateExecTraceCmd = &cli.Command{
Name: "exec-trace",
Usage: "Get the execution trace of a given message",
ArgsUsage: "",
@@ -346,6 +446,9 @@ var stateExecTraceCmd = &cli.Command{
if err != nil {
return err
}
+ if lookup == nil {
+ return fmt.Errorf("failed to find message: %s", mcid)
+ }
ts, err := capi.ChainGetTipSet(ctx, lookup.TipSet)
if err != nil {
@@ -383,7 +486,7 @@ var stateExecTraceCmd = &cli.Command{
},
}
-var stateReplayCmd = &cli.Command{
+var StateReplayCmd = &cli.Command{
Name: "replay",
Usage: "Replay a particular message",
ArgsUsage: "",
@@ -448,7 +551,7 @@ var stateReplayCmd = &cli.Command{
},
}
-var stateGetDealSetCmd = &cli.Command{
+var StateGetDealSetCmd = &cli.Command{
Name: "get-deal",
Usage: "View on-chain deal info",
ArgsUsage: "[dealId]",
@@ -490,7 +593,7 @@ var stateGetDealSetCmd = &cli.Command{
},
}
-var stateListMinersCmd = &cli.Command{
+var StateListMinersCmd = &cli.Command{
Name: "list-miners",
Usage: "list all miners in the network",
Flags: []cli.Flag{
@@ -546,7 +649,7 @@ var stateListMinersCmd = &cli.Command{
},
}
-func getDealsCounts(ctx context.Context, lapi api.FullNode) (map[address.Address]int, error) {
+func getDealsCounts(ctx context.Context, lapi v0api.FullNode) (map[address.Address]int, error) {
allDeals, err := lapi.StateMarketDeals(ctx, types.EmptyTSK)
if err != nil {
return nil, err
@@ -562,7 +665,7 @@ func getDealsCounts(ctx context.Context, lapi api.FullNode) (map[address.Address
return out, nil
}
-var stateListActorsCmd = &cli.Command{
+var StateListActorsCmd = &cli.Command{
Name: "list-actors",
Usage: "list all actors in the network",
Action: func(cctx *cli.Context) error {
@@ -592,10 +695,10 @@ var stateListActorsCmd = &cli.Command{
},
}
-var stateGetActorCmd = &cli.Command{
+var StateGetActorCmd = &cli.Command{
Name: "get-actor",
Usage: "Print actor information",
- ArgsUsage: "[actorrAddress]",
+ ArgsUsage: "[actorAddress]",
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
@@ -636,7 +739,7 @@ var stateGetActorCmd = &cli.Command{
},
}
-var stateLookupIDCmd = &cli.Command{
+var StateLookupIDCmd = &cli.Command{
Name: "lookup",
Usage: "Find corresponding ID address",
ArgsUsage: "[address]",
@@ -687,7 +790,7 @@ var stateLookupIDCmd = &cli.Command{
},
}
-var stateSectorSizeCmd = &cli.Command{
+var StateSectorSizeCmd = &cli.Command{
Name: "sector-size",
Usage: "Look up miners sector size",
ArgsUsage: "[minerAddress]",
@@ -724,7 +827,7 @@ var stateSectorSizeCmd = &cli.Command{
},
}
-var stateReadStateCmd = &cli.Command{
+var StateReadStateCmd = &cli.Command{
Name: "read-state",
Usage: "View a json representation of an actors state",
ArgsUsage: "[actorAddress]",
@@ -766,7 +869,7 @@ var stateReadStateCmd = &cli.Command{
},
}
-var stateListMessagesCmd = &cli.Command{
+var StateListMessagesCmd = &cli.Command{
Name: "list-messages",
Usage: "list messages on chain matching given criteria",
Flags: []cli.Flag{
@@ -820,14 +923,6 @@ var stateListMessagesCmd = &cli.Command{
return err
}
- if ts == nil {
- head, err := api.ChainHead(ctx)
- if err != nil {
- return err
- }
- ts = head
- }
-
windowSize := abi.ChainEpoch(100)
cur := ts
@@ -879,7 +974,7 @@ var stateListMessagesCmd = &cli.Command{
},
}
-var stateComputeStateCmd = &cli.Command{
+var StateComputeStateCmd = &cli.Command{
Name: "compute-state",
Usage: "Perform state computations",
Flags: []cli.Flag{
@@ -907,6 +1002,10 @@ var stateComputeStateCmd = &cli.Command{
Name: "compute-state-output",
Usage: "a json file containing pre-existing compute-state output, to generate html reports without rerunning state changes",
},
+ &cli.BoolFlag{
+ Name: "no-timing",
+ Usage: "don't show timing information in html traces",
+ },
},
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
@@ -923,13 +1022,6 @@ var stateComputeStateCmd = &cli.Command{
}
h := abi.ChainEpoch(cctx.Uint64("vm-height"))
- if ts == nil {
- head, err := api.ChainHead(ctx)
- if err != nil {
- return err
- }
- ts = head
- }
if h == 0 {
h = ts.Height()
}
@@ -978,7 +1070,7 @@ var stateComputeStateCmd = &cli.Command{
}
if cctx.Bool("html") {
- st, err := state.LoadStateTree(cbor.NewCborStore(apibstore.NewAPIBlockstore(api)), stout.Root)
+ st, err := state.LoadStateTree(cbor.NewCborStore(blockstore.NewAPIBlockstore(api)), stout.Root)
if err != nil {
return xerrors.Errorf("loading state tree: %w", err)
}
@@ -998,7 +1090,9 @@ var stateComputeStateCmd = &cli.Command{
return c.Code, nil
}
- return ComputeStateHTMLTempl(os.Stdout, ts, stout, getCode)
+ _, _ = fmt.Fprintln(os.Stderr, "computed state cid: ", stout.Root)
+
+ return ComputeStateHTMLTempl(os.Stdout, ts, stout, !cctx.Bool("no-timing"), getCode)
}
fmt.Println("computed state cid: ", stout.Root)
@@ -1119,8 +1213,11 @@ var compStateMsg = `
{{if gt (len .Msg.Params) 0}}
{{JsonParams ($code) (.Msg.Method) (.Msg.Params) | html}}
{{end}}
- Took {{.Duration}}, Exit: {{.MsgRct.ExitCode}}{{if gt (len .MsgRct.Return) 0}}, Return{{end}}
-
+ {{if PrintTiming}}
+ Took {{.Duration}}, Exit: {{.MsgRct.ExitCode}}{{if gt (len .MsgRct.Return) 0}}, Return{{end}}
+ {{else}}
+ Exit: {{.MsgRct.ExitCode}}{{if gt (len .MsgRct.Return) 0}}, Return{{end}}
+ {{end}}
{{if gt (len .MsgRct.Return) 0}}
{{JsonReturn ($code) (.Msg.Method) (.MsgRct.Return) | html}}
{{end}}
@@ -1146,7 +1243,7 @@ var compStateMsg = `
{{range .GasCharges}}
{{.Name}}{{if .Extra}}:{{.Extra}}{{end}} |
{{template "gasC" .}}
- {{.TimeTaken}} |
+ {{if PrintTiming}}{{.TimeTaken}}{{end}} |
{{ $fImp := FirstImportant .Location }}
{{ if $fImp }}
@@ -1185,7 +1282,7 @@ var compStateMsg = `
{{with SumGas .GasCharges}}
|
Sum |
{{template "gasC" .}}
- {{.TimeTaken}} |
+ {{if PrintTiming}}{{.TimeTaken}}{{end}} |
|
{{end}}
@@ -1206,19 +1303,20 @@ type compStateHTMLIn struct {
Comp *api.ComputeStateOutput
}
-func ComputeStateHTMLTempl(w io.Writer, ts *types.TipSet, o *api.ComputeStateOutput, getCode func(addr address.Address) (cid.Cid, error)) error {
+func ComputeStateHTMLTempl(w io.Writer, ts *types.TipSet, o *api.ComputeStateOutput, printTiming bool, getCode func(addr address.Address) (cid.Cid, error)) error {
t, err := template.New("compute_state").Funcs(map[string]interface{}{
- "GetCode": getCode,
- "GetMethod": getMethod,
- "ToFil": toFil,
- "JsonParams": JsonParams,
- "JsonReturn": jsonReturn,
- "IsSlow": isSlow,
- "IsVerySlow": isVerySlow,
- "IntExit": func(i exitcode.ExitCode) int64 { return int64(i) },
- "SumGas": sumGas,
- "CodeStr": codeStr,
- "Call": call,
+ "GetCode": getCode,
+ "GetMethod": getMethod,
+ "ToFil": toFil,
+ "JsonParams": JsonParams,
+ "JsonReturn": jsonReturn,
+ "IsSlow": isSlow,
+ "IsVerySlow": isVerySlow,
+ "IntExit": func(i exitcode.ExitCode) int64 { return int64(i) },
+ "SumGas": sumGas,
+ "CodeStr": codeStr,
+ "Call": call,
+ "PrintTiming": func() bool { return printTiming },
"FirstImportant": func(locs []types.Loc) *types.Loc {
if len(locs) != 0 {
for _, l := range locs {
@@ -1299,12 +1397,11 @@ func sumGas(changes []*types.GasTrace) types.GasTrace {
}
func JsonParams(code cid.Cid, method abi.MethodNum, params []byte) (string, error) {
- methodMeta, found := stmgr.MethodsMap[code][method]
- if !found {
- return "", fmt.Errorf("method %d not found on actor %s", method, code)
+ p, err := stmgr.GetParamType(code, method)
+ if err != nil {
+ return "", err
}
- re := reflect.New(methodMeta.Params.Elem())
- p := re.Interface().(cbg.CBORUnmarshaler)
+
if err := p.UnmarshalCBOR(bytes.NewReader(params)); err != nil {
return "", err
}
@@ -1328,7 +1425,7 @@ func jsonReturn(code cid.Cid, method abi.MethodNum, ret []byte) (string, error)
return string(b), err
}
-var stateWaitMsgCmd = &cli.Command{
+var StateWaitMsgCmd = &cli.Command{
Name: "wait-msg",
Usage: "Wait for a message to appear on chain",
ArgsUsage: "[messageCid]",
@@ -1366,35 +1463,11 @@ var stateWaitMsgCmd = &cli.Command{
return err
}
- fmt.Printf("message was executed in tipset: %s\n", mw.TipSet.Cids())
- fmt.Printf("Exit Code: %d\n", mw.Receipt.ExitCode)
- fmt.Printf("Gas Used: %d\n", mw.Receipt.GasUsed)
- fmt.Printf("Return: %x\n", mw.Receipt.Return)
- if err := printReceiptReturn(ctx, api, m, mw.Receipt); err != nil {
- return err
- }
-
- return nil
+ return printMsg(ctx, api, msg, mw, m)
},
}
-func printReceiptReturn(ctx context.Context, api api.FullNode, m *types.Message, r types.MessageReceipt) error {
- act, err := api.StateGetActor(ctx, m.To, types.EmptyTSK)
- if err != nil {
- return err
- }
-
- jret, err := jsonReturn(act.Code, m.Method, r.Return)
- if err != nil {
- return err
- }
-
- fmt.Println(jret)
-
- return nil
-}
-
-var stateSearchMsgCmd = &cli.Command{
+var StateSearchMsgCmd = &cli.Command{
Name: "search-msg",
Usage: "Search to see whether a message has appeared on chain",
ArgsUsage: "[messageCid]",
@@ -1421,22 +1494,64 @@ var stateSearchMsgCmd = &cli.Command{
return err
}
- if mw != nil {
- fmt.Printf("message was executed in tipset: %s", mw.TipSet.Cids())
- fmt.Printf("\nExit Code: %d", mw.Receipt.ExitCode)
- fmt.Printf("\nGas Used: %d", mw.Receipt.GasUsed)
- fmt.Printf("\nReturn: %x", mw.Receipt.Return)
- } else {
- fmt.Print("message was not found on chain")
+ if mw == nil {
+ return fmt.Errorf("failed to find message: %s", msg)
}
- return nil
+
+ m, err := api.ChainGetMessage(ctx, msg)
+ if err != nil {
+ return err
+ }
+
+ return printMsg(ctx, api, msg, mw, m)
},
}
-var stateCallCmd = &cli.Command{
+func printReceiptReturn(ctx context.Context, api v0api.FullNode, m *types.Message, r types.MessageReceipt) error {
+ if len(r.Return) == 0 {
+ return nil
+ }
+
+ act, err := api.StateGetActor(ctx, m.To, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ jret, err := jsonReturn(act.Code, m.Method, r.Return)
+ if err != nil {
+ return err
+ }
+
+ fmt.Println("Decoded return value: ", jret)
+
+ return nil
+}
+
+func printMsg(ctx context.Context, api v0api.FullNode, msg cid.Cid, mw *lapi.MsgLookup, m *types.Message) error {
+ if mw == nil {
+ fmt.Println("message was not found on chain")
+ return nil
+ }
+
+ if mw.Message != msg {
+ fmt.Printf("Message was replaced: %s\n", mw.Message)
+ }
+
+ fmt.Printf("Executed in tipset: %s\n", mw.TipSet.Cids())
+ fmt.Printf("Exit Code: %d\n", mw.Receipt.ExitCode)
+ fmt.Printf("Gas Used: %d\n", mw.Receipt.GasUsed)
+ fmt.Printf("Return: %x\n", mw.Receipt.Return)
+ if err := printReceiptReturn(ctx, api, m, mw.Receipt); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+var StateCallCmd = &cli.Command{
Name: "call",
Usage: "Invoke a method on an actor locally",
- ArgsUsage: "[toAddress methodId (optional)]",
+ ArgsUsage: "[toAddress methodId params (optional)]",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "from",
@@ -1450,8 +1565,13 @@ var stateCallCmd = &cli.Command{
},
&cli.StringFlag{
Name: "ret",
- Usage: "specify how to parse output (auto, raw, addr, big)",
- Value: "auto",
+ Usage: "specify how to parse output (raw, decoded, base64, hex)",
+ Value: "decoded",
+ },
+ &cli.StringFlag{
+ Name: "encoding",
+ Value: "base64",
+ Usage: "specify params encoding to parse (base64, hex)",
},
},
Action: func(cctx *cli.Context) error {
@@ -1492,14 +1612,23 @@ var stateCallCmd = &cli.Command{
return fmt.Errorf("failed to parse 'value': %s", err)
}
- act, err := api.StateGetActor(ctx, toa, ts.Key())
- if err != nil {
- return fmt.Errorf("failed to lookup target actor: %s", err)
- }
-
- params, err := parseParamsForMethod(act.Code, method, cctx.Args().Slice()[2:])
- if err != nil {
- return fmt.Errorf("failed to parse params: %s", err)
+ var params []byte
+ // If params were passed in, decode them
+ if cctx.Args().Len() > 2 {
+ switch cctx.String("encoding") {
+ case "base64":
+ params, err = base64.StdEncoding.DecodeString(cctx.Args().Get(2))
+ if err != nil {
+ return xerrors.Errorf("decoding base64 value: %w", err)
+ }
+ case "hex":
+ params, err = hex.DecodeString(cctx.Args().Get(2))
+ if err != nil {
+ return xerrors.Errorf("decoding hex value: %w", err)
+ }
+ default:
+ return xerrors.Errorf("unrecognized encoding: %s", cctx.String("encoding"))
+ }
}
ret, err := api.StateCall(ctx, &types.Message{
@@ -1510,138 +1639,43 @@ var stateCallCmd = &cli.Command{
Params: params,
}, ts.Key())
if err != nil {
- return fmt.Errorf("state call failed: %s", err)
+ return fmt.Errorf("state call failed: %w", err)
}
if ret.MsgRct.ExitCode != 0 {
return fmt.Errorf("invocation failed (exit: %d, gasUsed: %d): %s", ret.MsgRct.ExitCode, ret.MsgRct.GasUsed, ret.Error)
}
- s, err := formatOutput(cctx.String("ret"), ret.MsgRct.Return)
- if err != nil {
- return fmt.Errorf("failed to format output: %s", err)
- }
-
- fmt.Printf("gas used: %d\n", ret.MsgRct.GasUsed)
- fmt.Printf("return: %s\n", s)
-
- return nil
- },
-}
-
-func formatOutput(t string, val []byte) (string, error) {
- switch t {
- case "raw", "hex":
- return fmt.Sprintf("%x", val), nil
- case "address", "addr", "a":
- a, err := address.NewFromBytes(val)
- if err != nil {
- return "", err
- }
- return a.String(), nil
- case "big", "int", "bigint":
- bi := types.BigFromBytes(val)
- return bi.String(), nil
- case "fil":
- bi := types.FIL(types.BigFromBytes(val))
- return bi.String(), nil
- case "pid", "peerid", "peer":
- pid, err := peer.IDFromBytes(val)
- if err != nil {
- return "", err
- }
-
- return pid.Pretty(), nil
- case "auto":
- if len(val) == 0 {
- return "", nil
- }
-
- a, err := address.NewFromBytes(val)
- if err == nil {
- return "address: " + a.String(), nil
- }
-
- pid, err := peer.IDFromBytes(val)
- if err == nil {
- return "peerID: " + pid.Pretty(), nil
- }
-
- bi := types.BigFromBytes(val)
- return "bigint: " + bi.String(), nil
- default:
- return "", fmt.Errorf("unrecognized output type: %q", t)
- }
-}
+ fmt.Println("Call receipt:")
+ fmt.Printf("Exit code: %d\n", ret.MsgRct.ExitCode)
+ fmt.Printf("Gas Used: %d\n", ret.MsgRct.GasUsed)
-func parseParamsForMethod(act cid.Cid, method uint64, args []string) ([]byte, error) {
- if len(args) == 0 {
- return nil, nil
- }
-
- // TODO: consider moving this to a dedicated helper
- actMeta, ok := stmgr.MethodsMap[act]
- if !ok {
- return nil, fmt.Errorf("unknown actor %s", act)
- }
-
- methodMeta, ok := actMeta[abi.MethodNum(method)]
- if !ok {
- return nil, fmt.Errorf("unknown method %d for actor %s", method, act)
- }
-
- paramObj := methodMeta.Params
- if paramObj.NumField() != len(args) {
- return nil, fmt.Errorf("not enough arguments given to call that method (expecting %d)", paramObj.NumField())
- }
-
- p := reflect.New(paramObj)
- for i := 0; i < len(args); i++ {
- switch paramObj.Field(i).Type {
- case reflect.TypeOf(address.Address{}):
- a, err := address.NewFromString(args[i])
- if err != nil {
- return nil, fmt.Errorf("failed to parse address: %s", err)
- }
- p.Elem().Field(i).Set(reflect.ValueOf(a))
- case reflect.TypeOf(uint64(0)):
- val, err := strconv.ParseUint(args[i], 10, 64)
+ switch cctx.String("ret") {
+ case "decoded":
+ act, err := api.StateGetActor(ctx, toa, ts.Key())
if err != nil {
- return nil, err
+ return xerrors.Errorf("getting actor: %w", err)
}
- p.Elem().Field(i).Set(reflect.ValueOf(val))
- case reflect.TypeOf(abi.ChainEpoch(0)):
- val, err := strconv.ParseInt(args[i], 10, 64)
- if err != nil {
- return nil, err
- }
- p.Elem().Field(i).Set(reflect.ValueOf(abi.ChainEpoch(val)))
- case reflect.TypeOf(big.Int{}):
- val, err := big.FromString(args[i])
- if err != nil {
- return nil, err
- }
- p.Elem().Field(i).Set(reflect.ValueOf(val))
- case reflect.TypeOf(peer.ID("")):
- pid, err := peer.Decode(args[i])
+
+ retStr, err := jsonReturn(act.Code, abi.MethodNum(method), ret.MsgRct.Return)
if err != nil {
- return nil, fmt.Errorf("failed to parse peer ID: %s", err)
+ return xerrors.Errorf("decoding return: %w", err)
}
- p.Elem().Field(i).Set(reflect.ValueOf(pid))
- default:
- return nil, fmt.Errorf("unsupported type for call (TODO): %s", paramObj.Field(i).Type)
+
+ fmt.Printf("Return:\n%s\n", retStr)
+ case "raw":
+ fmt.Printf("Return: \n%s\n", ret.MsgRct.Return)
+ case "hex":
+ fmt.Printf("Return: \n%x\n", ret.MsgRct.Return)
+ case "base64":
+ fmt.Printf("Return: \n%s\n", base64.StdEncoding.EncodeToString(ret.MsgRct.Return))
}
- }
- m := p.Interface().(cbg.CBORMarshaler)
- buf := new(bytes.Buffer)
- if err := m.MarshalCBOR(buf); err != nil {
- return nil, fmt.Errorf("failed to marshal param object: %s", err)
- }
- return buf.Bytes(), nil
+ return nil
+ },
}
-var stateCircSupplyCmd = &cli.Command{
+var StateCircSupplyCmd = &cli.Command{
Name: "circulating-supply",
Usage: "Get the exact current circulating supply of Filecoin",
Flags: []cli.Flag{
@@ -1690,10 +1724,10 @@ var stateCircSupplyCmd = &cli.Command{
},
}
-var stateSectorCmd = &cli.Command{
+var StateSectorCmd = &cli.Command{
Name: "sector",
Usage: "Get miner sector info",
- ArgsUsage: "[miner address] [sector number]",
+ ArgsUsage: "[minerAddress] [sectorNumber]",
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
@@ -1704,7 +1738,7 @@ var stateSectorCmd = &cli.Command{
ctx := ReqContext(cctx)
if cctx.Args().Len() != 2 {
- return xerrors.Errorf("expected 2 params")
+ return xerrors.Errorf("expected 2 params: minerAddress and sectorNumber")
}
ts, err := LoadTipSet(ctx, cctx, api)
@@ -1712,13 +1746,6 @@ var stateSectorCmd = &cli.Command{
return err
}
- if ts == nil {
- ts, err = api.ChainHead(ctx)
- if err != nil {
- return err
- }
- }
-
maddr, err := address.NewFromString(cctx.Args().Get(0))
if err != nil {
return err
@@ -1733,6 +1760,9 @@ var stateSectorCmd = &cli.Command{
if err != nil {
return err
}
+ if si == nil {
+ return xerrors.Errorf("sector %d for miner %s not found", sid, maddr)
+ }
fmt.Println("SectorNumber: ", si.SectorNumber)
fmt.Println("SealProof: ", si.SealProof)
@@ -1761,7 +1791,7 @@ var stateSectorCmd = &cli.Command{
},
}
-var stateMarketCmd = &cli.Command{
+var StateMarketCmd = &cli.Command{
Name: "market",
Usage: "Inspect the storage market actor",
Subcommands: []*cli.Command{
@@ -1806,3 +1836,35 @@ var stateMarketBalanceCmd = &cli.Command{
return nil
},
}
+
+var StateNtwkVersionCmd = &cli.Command{
+ Name: "network-version",
+ Usage: "Returns the network version",
+ Action: func(cctx *cli.Context) error {
+ if cctx.Args().Present() {
+ return ShowHelp(cctx, fmt.Errorf("doesn't expect any arguments"))
+ }
+
+ api, closer, err := GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ ctx := ReqContext(cctx)
+
+ ts, err := LoadTipSet(ctx, cctx, api)
+ if err != nil {
+ return err
+ }
+
+ nv, err := api.StateNetworkVersion(ctx, ts.Key())
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("Network Version: %d\n", nv)
+
+ return nil
+ },
+}
diff --git a/cli/status.go b/cli/status.go
new file mode 100644
index 00000000000..75f91196a1c
--- /dev/null
+++ b/cli/status.go
@@ -0,0 +1,60 @@
+package cli
+
+import (
+ "fmt"
+
+ "github.com/urfave/cli/v2"
+
+ "github.com/filecoin-project/lotus/build"
+)
+
+var StatusCmd = &cli.Command{
+ Name: "status",
+ Usage: "Check node status",
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "chain",
+ Usage: "include chain health status",
+ },
+ },
+
+ Action: func(cctx *cli.Context) error {
+ apic, closer, err := GetFullNodeAPIV1(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := ReqContext(cctx)
+
+ inclChainStatus := cctx.Bool("chain")
+
+ status, err := apic.NodeStatus(ctx, inclChainStatus)
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("Sync Epoch: %d\n", status.SyncStatus.Epoch)
+ fmt.Printf("Epochs Behind: %d\n", status.SyncStatus.Behind)
+ fmt.Printf("Peers to Publish Messages: %d\n", status.PeerStatus.PeersToPublishMsgs)
+ fmt.Printf("Peers to Publish Blocks: %d\n", status.PeerStatus.PeersToPublishBlocks)
+
+ if inclChainStatus && status.SyncStatus.Epoch > uint64(build.Finality) {
+ var ok100, okFin string
+ if status.ChainStatus.BlocksPerTipsetLast100 >= 4.75 {
+ ok100 = "[OK]"
+ } else {
+ ok100 = "[UNHEALTHY]"
+ }
+ if status.ChainStatus.BlocksPerTipsetLastFinality >= 4.75 {
+ okFin = "[OK]"
+ } else {
+ okFin = "[UNHEALTHY]"
+ }
+
+ fmt.Printf("Blocks per TipSet in last 100 epochs: %f %s\n", status.ChainStatus.BlocksPerTipsetLast100, ok100)
+ fmt.Printf("Blocks per TipSet in last finality: %f %s\n", status.ChainStatus.BlocksPerTipsetLastFinality, okFin)
+ }
+
+ return nil
+ },
+}
diff --git a/cli/sync.go b/cli/sync.go
index c3f25eb1d56..c7b010111c3 100644
--- a/cli/sync.go
+++ b/cli/sync.go
@@ -12,23 +12,24 @@ import (
"github.com/urfave/cli/v2"
"github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/api/v0api"
"github.com/filecoin-project/lotus/build"
)
-var syncCmd = &cli.Command{
+var SyncCmd = &cli.Command{
Name: "sync",
Usage: "Inspect or interact with the chain syncer",
Subcommands: []*cli.Command{
- syncStatusCmd,
- syncWaitCmd,
- syncMarkBadCmd,
- syncUnmarkBadCmd,
- syncCheckBadCmd,
- syncCheckpointCmd,
+ SyncStatusCmd,
+ SyncWaitCmd,
+ SyncMarkBadCmd,
+ SyncUnmarkBadCmd,
+ SyncCheckBadCmd,
+ SyncCheckpointCmd,
},
}
-var syncStatusCmd = &cli.Command{
+var SyncStatusCmd = &cli.Command{
Name: "status",
Usage: "check sync status",
Action: func(cctx *cli.Context) error {
@@ -45,8 +46,8 @@ var syncStatusCmd = &cli.Command{
}
fmt.Println("sync status:")
- for i, ss := range state.ActiveSyncs {
- fmt.Printf("worker %d:\n", i)
+ for _, ss := range state.ActiveSyncs {
+ fmt.Printf("worker %d:\n", ss.WorkerID)
var base, target []cid.Cid
var heightDiff int64
var theight abi.ChainEpoch
@@ -81,7 +82,7 @@ var syncStatusCmd = &cli.Command{
},
}
-var syncWaitCmd = &cli.Command{
+var SyncWaitCmd = &cli.Command{
Name: "wait",
Usage: "Wait for sync to be complete",
Flags: []cli.Flag{
@@ -102,7 +103,7 @@ var syncWaitCmd = &cli.Command{
},
}
-var syncMarkBadCmd = &cli.Command{
+var SyncMarkBadCmd = &cli.Command{
Name: "mark-bad",
Usage: "Mark the given block as bad, will prevent syncing to a chain that contains it",
ArgsUsage: "[blockCid]",
@@ -127,7 +128,7 @@ var syncMarkBadCmd = &cli.Command{
},
}
-var syncUnmarkBadCmd = &cli.Command{
+var SyncUnmarkBadCmd = &cli.Command{
Name: "unmark-bad",
Usage: "Unmark the given block as bad, makes it possible to sync to a chain containing it",
Flags: []cli.Flag{
@@ -162,7 +163,7 @@ var syncUnmarkBadCmd = &cli.Command{
},
}
-var syncCheckBadCmd = &cli.Command{
+var SyncCheckBadCmd = &cli.Command{
Name: "check-bad",
Usage: "check if the given block was marked bad, and for what reason",
ArgsUsage: "[blockCid]",
@@ -198,7 +199,7 @@ var syncCheckBadCmd = &cli.Command{
},
}
-var syncCheckpointCmd = &cli.Command{
+var SyncCheckpointCmd = &cli.Command{
Name: "checkpoint",
Usage: "mark a certain tipset as checkpointed; the node will never fork away from this tipset",
ArgsUsage: "[tipsetKey]",
@@ -240,7 +241,7 @@ var syncCheckpointCmd = &cli.Command{
},
}
-func SyncWait(ctx context.Context, napi api.FullNode, watch bool) error {
+func SyncWait(ctx context.Context, napi v0api.FullNode, watch bool) error {
tick := time.Second / 4
lastLines := 0
@@ -263,12 +264,17 @@ func SyncWait(ctx context.Context, napi api.FullNode, watch bool) error {
return err
}
+ if len(state.ActiveSyncs) == 0 {
+ time.Sleep(time.Second)
+ continue
+ }
+
head, err := napi.ChainHead(ctx)
if err != nil {
return err
}
- working := 0
+ working := -1
for i, ss := range state.ActiveSyncs {
switch ss.Stage {
case api.StageSyncComplete:
@@ -279,7 +285,12 @@ func SyncWait(ctx context.Context, napi api.FullNode, watch bool) error {
}
}
+ if working == -1 {
+ working = len(state.ActiveSyncs) - 1
+ }
+
ss := state.ActiveSyncs[working]
+ workerID := ss.WorkerID
var baseHeight abi.ChainEpoch
var target []cid.Cid
@@ -302,7 +313,7 @@ func SyncWait(ctx context.Context, napi api.FullNode, watch bool) error {
fmt.Print("\r\x1b[2K\x1b[A")
}
- fmt.Printf("Worker: %d; Base: %d; Target: %d (diff: %d)\n", working, baseHeight, theight, heightDiff)
+ fmt.Printf("Worker: %d; Base: %d; Target: %d (diff: %d)\n", workerID, baseHeight, theight, heightDiff)
fmt.Printf("State: %s; Current Epoch: %d; Todo: %d\n", ss.Stage, ss.Height, theight-ss.Height)
lastLines = 2
diff --git a/cli/test/net.go b/cli/test/net.go
deleted file mode 100644
index 836b81a8f78..00000000000
--- a/cli/test/net.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package test
-
-import (
- "context"
- "testing"
- "time"
-
- "github.com/filecoin-project/go-state-types/abi"
- "github.com/filecoin-project/lotus/chain/types"
-
- "github.com/filecoin-project/go-address"
- "github.com/filecoin-project/lotus/api/test"
- test2 "github.com/filecoin-project/lotus/node/test"
-)
-
-func StartOneNodeOneMiner(ctx context.Context, t *testing.T, blocktime time.Duration) (test.TestNode, address.Address) {
- n, sn := test2.RPCMockSbBuilder(t, test.OneFull, test.OneMiner)
-
- full := n[0]
- miner := sn[0]
-
- // Get everyone connected
- addrs, err := full.NetAddrsListen(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- if err := miner.NetConnect(ctx, addrs); err != nil {
- t.Fatal(err)
- }
-
- // Start mining blocks
- bm := test.NewBlockMiner(ctx, t, miner, blocktime)
- bm.MineBlocks()
-
- // Get the full node's wallet address
- fullAddr, err := full.WalletDefaultAddress(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- // Create mock CLI
- return full, fullAddr
-}
-
-func StartTwoNodesOneMiner(ctx context.Context, t *testing.T, blocktime time.Duration) ([]test.TestNode, []address.Address) {
- n, sn := test2.RPCMockSbBuilder(t, test.TwoFull, test.OneMiner)
-
- fullNode1 := n[0]
- fullNode2 := n[1]
- miner := sn[0]
-
- // Get everyone connected
- addrs, err := fullNode1.NetAddrsListen(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- if err := fullNode2.NetConnect(ctx, addrs); err != nil {
- t.Fatal(err)
- }
-
- if err := miner.NetConnect(ctx, addrs); err != nil {
- t.Fatal(err)
- }
-
- // Start mining blocks
- bm := test.NewBlockMiner(ctx, t, miner, blocktime)
- bm.MineBlocks()
-
- // Send some funds to register the second node
- fullNodeAddr2, err := fullNode2.WalletNew(ctx, types.KTSecp256k1)
- if err != nil {
- t.Fatal(err)
- }
-
- test.SendFunds(ctx, t, fullNode1, fullNodeAddr2, abi.NewTokenAmount(1e18))
-
- // Get the first node's address
- fullNodeAddr1, err := fullNode1.WalletDefaultAddress(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- // Create mock CLI
- return n, []address.Address{fullNodeAddr1, fullNodeAddr2}
-}
diff --git a/cli/test/util.go b/cli/test/util.go
deleted file mode 100644
index e3930dc832a..00000000000
--- a/cli/test/util.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package test
-
-import "github.com/ipfs/go-log/v2"
-
-func QuietMiningLogs() {
- _ = log.SetLogLevel("miner", "ERROR")
- _ = log.SetLogLevel("chainstore", "ERROR")
- _ = log.SetLogLevel("chain", "ERROR")
- _ = log.SetLogLevel("sub", "ERROR")
- _ = log.SetLogLevel("storageminer", "ERROR")
- _ = log.SetLogLevel("pubsub", "ERROR")
-}
diff --git a/cli/util.go b/cli/util.go
index fb555e320f0..73668742def 100644
--- a/cli/util.go
+++ b/cli/util.go
@@ -3,19 +3,29 @@ package cli
import (
"context"
"fmt"
+ "os"
"time"
+ "github.com/fatih/color"
"github.com/hako/durafmt"
"github.com/ipfs/go-cid"
+ "github.com/mattn/go-isatty"
"github.com/filecoin-project/go-state-types/abi"
- "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/api/v0api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
)
-func parseTipSet(ctx context.Context, api api.FullNode, vals []string) (*types.TipSet, error) {
+// Set the global default, to be overridden by individual cli flags in order
+func init() {
+ color.NoColor = os.Getenv("GOLOG_LOG_FMT") != "color" &&
+ !isatty.IsTerminal(os.Stdout.Fd()) &&
+ !isatty.IsCygwinTerminal(os.Stdout.Fd())
+}
+
+func parseTipSet(ctx context.Context, api v0api.FullNode, vals []string) (*types.TipSet, error) {
var headers []*types.BlockHeader
for _, c := range vals {
blkc, err := cid.Decode(c)
diff --git a/cli/util/api.go b/cli/util/api.go
new file mode 100644
index 00000000000..ecd2e927f82
--- /dev/null
+++ b/cli/util/api.go
@@ -0,0 +1,298 @@
+package cliutil
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "net/url"
+ "os"
+ "os/signal"
+ "strings"
+ "syscall"
+
+ "github.com/mitchellh/go-homedir"
+ "github.com/urfave/cli/v2"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-jsonrpc"
+
+ "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/api/client"
+ "github.com/filecoin-project/lotus/api/v0api"
+ "github.com/filecoin-project/lotus/api/v1api"
+ "github.com/filecoin-project/lotus/node/repo"
+)
+
+const (
+ metadataTraceContext = "traceContext"
+)
+
+// The flag passed on the command line with the listen address of the API
+// server (only used by the tests)
+func flagForAPI(t repo.RepoType) string {
+ switch t {
+ case repo.FullNode:
+ return "api-url"
+ case repo.StorageMiner:
+ return "miner-api-url"
+ case repo.Worker:
+ return "worker-api-url"
+ default:
+ panic(fmt.Sprintf("Unknown repo type: %v", t))
+ }
+}
+
+func flagForRepo(t repo.RepoType) string {
+ switch t {
+ case repo.FullNode:
+ return "repo"
+ case repo.StorageMiner:
+ return "miner-repo"
+ case repo.Worker:
+ return "worker-repo"
+ default:
+ panic(fmt.Sprintf("Unknown repo type: %v", t))
+ }
+}
+
+func EnvForRepo(t repo.RepoType) string {
+ switch t {
+ case repo.FullNode:
+ return "FULLNODE_API_INFO"
+ case repo.StorageMiner:
+ return "MINER_API_INFO"
+ case repo.Worker:
+ return "WORKER_API_INFO"
+ default:
+ panic(fmt.Sprintf("Unknown repo type: %v", t))
+ }
+}
+
+// TODO remove after deprecation period
+func envForRepoDeprecation(t repo.RepoType) string {
+ switch t {
+ case repo.FullNode:
+ return "FULLNODE_API_INFO"
+ case repo.StorageMiner:
+ return "STORAGE_API_INFO"
+ case repo.Worker:
+ return "WORKER_API_INFO"
+ default:
+ panic(fmt.Sprintf("Unknown repo type: %v", t))
+ }
+}
+
+func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (APIInfo, error) {
+ // Check if there was a flag passed with the listen address of the API
+ // server (only used by the tests)
+ apiFlag := flagForAPI(t)
+ if ctx.IsSet(apiFlag) {
+ strma := ctx.String(apiFlag)
+ strma = strings.TrimSpace(strma)
+
+ return APIInfo{Addr: strma}, nil
+ }
+
+ envKey := EnvForRepo(t)
+ env, ok := os.LookupEnv(envKey)
+ if !ok {
+ // TODO remove after deprecation period
+ envKey = envForRepoDeprecation(t)
+ env, ok = os.LookupEnv(envKey)
+ if ok {
+ log.Warnf("Use deprecation env(%s) value, please use env(%s) instead.", envKey, EnvForRepo(t))
+ }
+ }
+ if ok {
+ return ParseApiInfo(env), nil
+ }
+
+ repoFlag := flagForRepo(t)
+
+ p, err := homedir.Expand(ctx.String(repoFlag))
+ if err != nil {
+ return APIInfo{}, xerrors.Errorf("could not expand home dir (%s): %w", repoFlag, err)
+ }
+
+ r, err := repo.NewFS(p)
+ if err != nil {
+ return APIInfo{}, xerrors.Errorf("could not open repo at path: %s; %w", p, err)
+ }
+
+ ma, err := r.APIEndpoint()
+ if err != nil {
+ return APIInfo{}, xerrors.Errorf("could not get api endpoint: %w", err)
+ }
+
+ token, err := r.APIToken()
+ if err != nil {
+ log.Warnf("Couldn't load CLI token, capabilities may be limited: %v", err)
+ }
+
+ return APIInfo{
+ Addr: ma.String(),
+ Token: token,
+ }, nil
+}
+
+func GetRawAPI(ctx *cli.Context, t repo.RepoType, version string) (string, http.Header, error) {
+ ainfo, err := GetAPIInfo(ctx, t)
+ if err != nil {
+ return "", nil, xerrors.Errorf("could not get API info: %w", err)
+ }
+
+ addr, err := ainfo.DialArgs(version)
+ if err != nil {
+ return "", nil, xerrors.Errorf("could not get DialArgs: %w", err)
+ }
+
+ return addr, ainfo.AuthHeader(), nil
+}
+
+func GetAPI(ctx *cli.Context) (api.CommonNet, jsonrpc.ClientCloser, error) {
+ ti, ok := ctx.App.Metadata["repoType"]
+ if !ok {
+ log.Errorf("unknown repo type, are you sure you want to use GetAPI?")
+ ti = repo.FullNode
+ }
+ t, ok := ti.(repo.RepoType)
+ if !ok {
+ log.Errorf("repoType type does not match the type of repo.RepoType")
+ }
+
+ if tn, ok := ctx.App.Metadata["testnode-storage"]; ok {
+ return tn.(api.StorageMiner), func() {}, nil
+ }
+ if tn, ok := ctx.App.Metadata["testnode-full"]; ok {
+ return tn.(api.FullNode), func() {}, nil
+ }
+
+ addr, headers, err := GetRawAPI(ctx, t, "v0")
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return client.NewCommonRPCV0(ctx.Context, addr, headers)
+}
+
+func GetFullNodeAPI(ctx *cli.Context) (v0api.FullNode, jsonrpc.ClientCloser, error) {
+ if tn, ok := ctx.App.Metadata["testnode-full"]; ok {
+ return &v0api.WrapperV1Full{FullNode: tn.(v1api.FullNode)}, func() {}, nil
+ }
+
+ addr, headers, err := GetRawAPI(ctx, repo.FullNode, "v0")
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return client.NewFullNodeRPCV0(ctx.Context, addr, headers)
+}
+
+func GetFullNodeAPIV1(ctx *cli.Context) (v1api.FullNode, jsonrpc.ClientCloser, error) {
+ if tn, ok := ctx.App.Metadata["testnode-full"]; ok {
+ return tn.(v1api.FullNode), func() {}, nil
+ }
+
+ addr, headers, err := GetRawAPI(ctx, repo.FullNode, "v1")
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return client.NewFullNodeRPCV1(ctx.Context, addr, headers)
+}
+
+type GetStorageMinerOptions struct {
+ PreferHttp bool
+}
+
+type GetStorageMinerOption func(*GetStorageMinerOptions)
+
+func StorageMinerUseHttp(opts *GetStorageMinerOptions) {
+ opts.PreferHttp = true
+}
+
+func GetStorageMinerAPI(ctx *cli.Context, opts ...GetStorageMinerOption) (api.StorageMiner, jsonrpc.ClientCloser, error) {
+ var options GetStorageMinerOptions
+ for _, opt := range opts {
+ opt(&options)
+ }
+
+ if tn, ok := ctx.App.Metadata["testnode-storage"]; ok {
+ return tn.(api.StorageMiner), func() {}, nil
+ }
+
+ addr, headers, err := GetRawAPI(ctx, repo.StorageMiner, "v0")
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if options.PreferHttp {
+ u, err := url.Parse(addr)
+ if err != nil {
+ return nil, nil, xerrors.Errorf("parsing miner api URL: %w", err)
+ }
+
+ switch u.Scheme {
+ case "ws":
+ u.Scheme = "http"
+ case "wss":
+ u.Scheme = "https"
+ }
+
+ addr = u.String()
+ }
+
+ return client.NewStorageMinerRPCV0(ctx.Context, addr, headers)
+}
+
+func GetWorkerAPI(ctx *cli.Context) (api.Worker, jsonrpc.ClientCloser, error) {
+ addr, headers, err := GetRawAPI(ctx, repo.Worker, "v0")
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return client.NewWorkerRPCV0(ctx.Context, addr, headers)
+}
+
+func GetGatewayAPI(ctx *cli.Context) (api.Gateway, jsonrpc.ClientCloser, error) {
+ addr, headers, err := GetRawAPI(ctx, repo.FullNode, "v1")
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return client.NewGatewayRPCV1(ctx.Context, addr, headers)
+}
+
+func GetGatewayAPIV0(ctx *cli.Context) (v0api.Gateway, jsonrpc.ClientCloser, error) {
+ addr, headers, err := GetRawAPI(ctx, repo.FullNode, "v0")
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return client.NewGatewayRPCV0(ctx.Context, addr, headers)
+}
+
+func DaemonContext(cctx *cli.Context) context.Context {
+ if mtCtx, ok := cctx.App.Metadata[metadataTraceContext]; ok {
+ return mtCtx.(context.Context)
+ }
+
+ return context.Background()
+}
+
+// ReqContext returns context for cli execution. Calling it for the first time
+// installs SIGTERM handler that will close returned context.
+// Not safe for concurrent execution.
+func ReqContext(cctx *cli.Context) context.Context {
+ tCtx := DaemonContext(cctx)
+
+ ctx, done := context.WithCancel(tCtx)
+ sigChan := make(chan os.Signal, 2)
+ go func() {
+ <-sigChan
+ done()
+ }()
+ signal.Notify(sigChan, syscall.SIGTERM, syscall.SIGINT, syscall.SIGHUP)
+
+ return ctx
+}
diff --git a/cli/util/apiinfo.go b/cli/util/apiinfo.go
index 1f9a837697a..41ca18c6104 100644
--- a/cli/util/apiinfo.go
+++ b/cli/util/apiinfo.go
@@ -36,7 +36,7 @@ func ParseApiInfo(s string) APIInfo {
}
}
-func (a APIInfo) DialArgs() (string, error) {
+func (a APIInfo) DialArgs(version string) (string, error) {
ma, err := multiaddr.NewMultiaddr(a.Addr)
if err == nil {
_, addr, err := manet.DialArgs(ma)
@@ -44,14 +44,14 @@ func (a APIInfo) DialArgs() (string, error) {
return "", err
}
- return "ws://" + addr + "/rpc/v0", nil
+ return "ws://" + addr + "/rpc/" + version, nil
}
_, err = url.Parse(a.Addr)
if err != nil {
return "", err
}
- return a.Addr + "/rpc/v0", nil
+ return a.Addr + "/rpc/" + version, nil
}
func (a APIInfo) Host() (string, error) {
diff --git a/cli/wait.go b/cli/wait.go
index ca8cdce3f56..ea897d5adb3 100644
--- a/cli/wait.go
+++ b/cli/wait.go
@@ -7,12 +7,12 @@ import (
"github.com/urfave/cli/v2"
)
-var waitApiCmd = &cli.Command{
+var WaitApiCmd = &cli.Command{
Name: "wait-api",
Usage: "Wait for lotus api to come online",
Action: func(cctx *cli.Context) error {
for i := 0; i < 30; i++ {
- api, closer, err := GetFullNodeAPI(cctx)
+ api, closer, err := GetAPI(cctx)
if err != nil {
fmt.Printf("Not online yet... (%s)\n", err)
time.Sleep(time.Second)
diff --git a/cli/wallet.go b/cli/wallet.go
index f6368cbfa44..802d85702e8 100644
--- a/cli/wallet.go
+++ b/cli/wallet.go
@@ -16,11 +16,8 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/crypto"
- "github.com/filecoin-project/specs-actors/actors/builtin/market"
- "github.com/filecoin-project/specs-actors/v2/actors/builtin"
- "github.com/filecoin-project/lotus/chain/actors"
- types "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/tablewriter"
)
@@ -512,6 +509,7 @@ var walletMarket = &cli.Command{
Usage: "Interact with market balances",
Subcommands: []*cli.Command{
walletMarketWithdraw,
+ walletMarketAdd,
},
}
@@ -521,13 +519,13 @@ var walletMarketWithdraw = &cli.Command{
ArgsUsage: "[amount (FIL) optional, otherwise will withdraw max available]",
Flags: []cli.Flag{
&cli.StringFlag{
- Name: "from",
- Usage: "Specify address to withdraw funds from, otherwise it will use the default wallet address",
- Aliases: []string{"f"},
+ Name: "wallet",
+ Usage: "Specify address to withdraw funds to, otherwise it will use the default wallet address",
+ Aliases: []string{"w"},
},
&cli.StringFlag{
Name: "address",
- Usage: "Market address to withdraw from (account or miner actor address, defaults to --from address)",
+ Usage: "Market address to withdraw from (account or miner actor address, defaults to --wallet address)",
Aliases: []string{"a"},
},
},
@@ -539,20 +537,20 @@ var walletMarketWithdraw = &cli.Command{
defer closer()
ctx := ReqContext(cctx)
- var from address.Address
- if cctx.String("from") != "" {
- from, err = address.NewFromString(cctx.String("from"))
+ var wallet address.Address
+ if cctx.String("wallet") != "" {
+ wallet, err = address.NewFromString(cctx.String("wallet"))
if err != nil {
return xerrors.Errorf("parsing from address: %w", err)
}
} else {
- from, err = api.WalletDefaultAddress(ctx)
+ wallet, err = api.WalletDefaultAddress(ctx)
if err != nil {
return xerrors.Errorf("getting default wallet address: %w", err)
}
}
- addr := from
+ addr := wallet
if cctx.String("address") != "" {
addr, err = address.NewFromString(cctx.String("address"))
if err != nil {
@@ -560,14 +558,34 @@ var walletMarketWithdraw = &cli.Command{
}
}
+ // Work out if there are enough unreserved, unlocked funds to withdraw
bal, err := api.StateMarketBalance(ctx, addr, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("getting market balance for address %s: %w", addr.String(), err)
}
- avail := big.Subtract(bal.Escrow, bal.Locked)
+ reserved, err := api.MarketGetReserved(ctx, addr)
+ if err != nil {
+ return xerrors.Errorf("getting market reserved amount for address %s: %w", addr.String(), err)
+ }
+
+ avail := big.Subtract(big.Subtract(bal.Escrow, bal.Locked), reserved)
+
+ notEnoughErr := func(msg string) error {
+ return xerrors.Errorf("%s; "+
+ "available (%s) = escrow (%s) - locked (%s) - reserved (%s)",
+ msg, types.FIL(avail), types.FIL(bal.Escrow), types.FIL(bal.Locked), types.FIL(reserved))
+ }
+
+ if avail.IsZero() || avail.LessThan(big.Zero()) {
+ avail = big.Zero()
+ return notEnoughErr("no funds available to withdraw")
+ }
+
+ // Default to withdrawing all available funds
amt := avail
+ // If there was an amount argument, only withdraw that amount
if cctx.Args().Present() {
f, err := types.ParseFIL(cctx.Args().First())
if err != nil {
@@ -577,35 +595,95 @@ var walletMarketWithdraw = &cli.Command{
amt = abi.TokenAmount(f)
}
+ // Check the amount is positive
+ if amt.IsZero() || amt.LessThan(big.Zero()) {
+ return xerrors.Errorf("amount must be > 0")
+ }
+
+ // Check there are enough available funds
if amt.GreaterThan(avail) {
- return xerrors.Errorf("can't withdraw more funds than available; requested: %s; available: %s", types.FIL(amt), types.FIL(avail))
+ msg := fmt.Sprintf("can't withdraw more funds than available; requested: %s", types.FIL(amt))
+ return notEnoughErr(msg)
}
- if avail.IsZero() {
- return xerrors.Errorf("zero unlocked funds available to withdraw")
+ fmt.Printf("Submitting WithdrawBalance message for amount %s for address %s\n", types.FIL(amt), wallet.String())
+ smsg, err := api.MarketWithdraw(ctx, wallet, addr, amt)
+ if err != nil {
+ return xerrors.Errorf("fund manager withdraw error: %w", err)
}
- params, err := actors.SerializeParams(&market.WithdrawBalanceParams{
- ProviderOrClientAddress: addr,
- Amount: amt,
- })
+ fmt.Printf("WithdrawBalance message cid: %s\n", smsg)
+
+ return nil
+ },
+}
+
+var walletMarketAdd = &cli.Command{
+ Name: "add",
+ Usage: "Add funds to the Storage Market Actor",
+ ArgsUsage: "",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "from",
+ Usage: "Specify address to move funds from, otherwise it will use the default wallet address",
+ Aliases: []string{"f"},
+ },
+ &cli.StringFlag{
+ Name: "address",
+ Usage: "Market address to move funds to (account or miner actor address, defaults to --from address)",
+ Aliases: []string{"a"},
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := GetFullNodeAPI(cctx)
+ if err != nil {
+ return xerrors.Errorf("getting node API: %w", err)
+ }
+ defer closer()
+ ctx := ReqContext(cctx)
+
+ // Get amount param
+ if !cctx.Args().Present() {
+ return fmt.Errorf("must pass amount to add")
+ }
+ f, err := types.ParseFIL(cctx.Args().First())
if err != nil {
- return xerrors.Errorf("serializing params: %w", err)
+ return xerrors.Errorf("parsing 'amount' argument: %w", err)
+ }
+
+ amt := abi.TokenAmount(f)
+
+ // Get from param
+ var from address.Address
+ if cctx.String("from") != "" {
+ from, err = address.NewFromString(cctx.String("from"))
+ if err != nil {
+ return xerrors.Errorf("parsing from address: %w", err)
+ }
+ } else {
+ from, err = api.WalletDefaultAddress(ctx)
+ if err != nil {
+ return xerrors.Errorf("getting default wallet address: %w", err)
+ }
+ }
+
+ // Get address param
+ addr := from
+ if cctx.String("address") != "" {
+ addr, err = address.NewFromString(cctx.String("address"))
+ if err != nil {
+ return xerrors.Errorf("parsing market address: %w", err)
+ }
}
- fmt.Printf("Submitting WithdrawBalance message for amount %s for address %s\n", types.FIL(amt), from.String())
- smsg, err := api.MpoolPushMessage(ctx, &types.Message{
- To: builtin.StorageMarketActorAddr,
- From: from,
- Value: types.NewInt(0),
- Method: builtin.MethodsMarket.WithdrawBalance,
- Params: params,
- }, nil)
+ // Add balance to market actor
+ fmt.Printf("Submitting Add Balance message for amount %s for address %s\n", types.FIL(amt), addr)
+ smsg, err := api.MarketAddBalance(ctx, from, addr, amt)
if err != nil {
- return xerrors.Errorf("submitting WithdrawBalance message: %w", err)
+ return xerrors.Errorf("add balance error: %w", err)
}
- fmt.Printf("WithdrawBalance message cid: %s\n", smsg.Cid())
+ fmt.Printf("AddBalance message cid: %s\n", smsg)
return nil
},
diff --git a/cmd/chain-noise/main.go b/cmd/chain-noise/main.go
index 81586e1b206..8106ce592b2 100644
--- a/cmd/chain-noise/main.go
+++ b/cmd/chain-noise/main.go
@@ -8,7 +8,7 @@ import (
"time"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/api/v0api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
@@ -27,6 +27,16 @@ func main() {
Hidden: true,
Value: "~/.lotus", // TODO: Consider XDG_DATA_HOME
},
+ &cli.IntFlag{
+ Name: "limit",
+ Usage: "spam transaction count limit, <= 0 is no limit",
+ Value: 0,
+ },
+ &cli.IntFlag{
+ Name: "rate",
+ Usage: "spam transaction rate, count per second",
+ Value: 5,
+ },
},
Commands: []*cli.Command{runCmd},
}
@@ -52,11 +62,17 @@ var runCmd = &cli.Command{
defer closer()
ctx := lcli.ReqContext(cctx)
- return sendSmallFundsTxs(ctx, api, addr, 5)
+ rate := cctx.Int("rate")
+ if rate <= 0 {
+ rate = 5
+ }
+ limit := cctx.Int("limit")
+
+ return sendSmallFundsTxs(ctx, api, addr, rate, limit)
},
}
-func sendSmallFundsTxs(ctx context.Context, api api.FullNode, from address.Address, rate int) error {
+func sendSmallFundsTxs(ctx context.Context, api v0api.FullNode, from address.Address, rate, limit int) error {
var sendSet []address.Address
for i := 0; i < 20; i++ {
naddr, err := api.WalletNew(ctx, types.KTSecp256k1)
@@ -66,9 +82,14 @@ func sendSmallFundsTxs(ctx context.Context, api api.FullNode, from address.Addre
sendSet = append(sendSet, naddr)
}
+ count := limit
tick := build.Clock.Ticker(time.Second / time.Duration(rate))
for {
+ if count <= 0 && limit > 0 {
+ fmt.Printf("%d messages sent.\n", limit)
+ return nil
+ }
select {
case <-tick.C:
msg := &types.Message{
@@ -81,6 +102,7 @@ func sendSmallFundsTxs(ctx context.Context, api api.FullNode, from address.Addre
if err != nil {
return err
}
+ count--
fmt.Println("Message sent: ", smsg.Cid())
case <-ctx.Done():
return nil
diff --git a/cmd/lotus-bench/caching_verifier.go b/cmd/lotus-bench/caching_verifier.go
index 5b434c762a3..f4cc0f83741 100644
--- a/cmd/lotus-bench/caching_verifier.go
+++ b/cmd/lotus-bench/caching_verifier.go
@@ -8,6 +8,7 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
+ proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/ipfs/go-datastore"
"github.com/minio/blake2b-simd"
cbg "github.com/whyrusleeping/cbor-gen"
@@ -96,4 +97,8 @@ func (cv *cachingVerifier) GenerateWinningPoStSectorChallenge(ctx context.Contex
return cv.backend.GenerateWinningPoStSectorChallenge(ctx, proofType, a, rnd, u)
}
+func (cv cachingVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) {
+ return cv.backend.VerifyAggregateSeals(aggregate)
+}
+
var _ ffiwrapper.Verifier = (*cachingVerifier)(nil)
diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go
index acbf9ebdcbf..4b464bebeb1 100644
--- a/cmd/lotus-bench/import.go
+++ b/cmd/lotus-bench/import.go
@@ -16,21 +16,29 @@ import (
"sort"
"time"
+ ocprom "contrib.go.opencensus.io/exporter/prometheus"
"github.com/cockroachdb/pebble"
"github.com/cockroachdb/pebble/bloom"
+ "github.com/ipfs/go-cid"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
+
"github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/blockstore"
+ badgerbs "github.com/filecoin-project/lotus/blockstore/badger"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
- "github.com/filecoin-project/lotus/lib/blockstore"
+ lcli "github.com/filecoin-project/lotus/cli"
_ "github.com/filecoin-project/lotus/lib/sigs/bls"
_ "github.com/filecoin-project/lotus/lib/sigs/secp"
+ "github.com/filecoin-project/lotus/node/repo"
+
+ "github.com/filecoin-project/go-state-types/abi"
metricsprometheus "github.com/ipfs/go-metrics-prometheus"
"github.com/ipld/go-car"
- "github.com/prometheus/client_golang/prometheus/promhttp"
- "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
bdg "github.com/dgraph-io/badger/v2"
@@ -51,14 +59,30 @@ type TipSetExec struct {
var importBenchCmd = &cli.Command{
Name: "import",
- Usage: "benchmark chain import and validation",
+ Usage: "Benchmark chain import and validation",
Subcommands: []*cli.Command{
importAnalyzeCmd,
},
Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "start-tipset",
+ Usage: "start validation at the given tipset key; in format cid1,cid2,cid3...",
+ },
+ &cli.StringFlag{
+ Name: "end-tipset",
+ Usage: "halt validation at the given tipset key; in format cid1,cid2,cid3...",
+ },
+ &cli.StringFlag{
+ Name: "genesis-tipset",
+ Usage: "genesis tipset key; in format cid1,cid2,cid3...",
+ },
+ &cli.Int64Flag{
+ Name: "start-height",
+ Usage: "start validation at given height; beware that chain traversal by height is very slow",
+ },
&cli.Int64Flag{
- Name: "height",
- Usage: "halt validation after given height",
+ Name: "end-height",
+ Usage: "halt validation after given height; beware that chain traversal by height is very slow",
},
&cli.IntFlag{
Name: "batch-seal-verify-threads",
@@ -86,32 +110,52 @@ var importBenchCmd = &cli.Command{
Name: "global-profile",
Value: true,
},
- &cli.Int64Flag{
- Name: "start-at",
- },
&cli.BoolFlag{
Name: "only-import",
},
&cli.BoolFlag{
Name: "use-pebble",
},
+ &cli.BoolFlag{
+ Name: "use-native-badger",
+ },
+ &cli.StringFlag{
+ Name: "car",
+ Usage: "path to CAR file; required for import; on validation, either " +
+ "a CAR path or the --head flag are required",
+ },
+ &cli.StringFlag{
+ Name: "head",
+ Usage: "tipset key of the head, useful when benchmarking validation " +
+ "on an existing chain store, where a CAR is not available; " +
+ "if both --car and --head are provided, --head takes precedence " +
+ "over the CAR root; the format is cid1,cid2,cid3...",
+ },
},
Action: func(cctx *cli.Context) error {
metricsprometheus.Inject() //nolint:errcheck
vm.BatchSealVerifyParallelism = cctx.Int("batch-seal-verify-threads")
- if !cctx.Args().Present() {
- fmt.Println("must pass car file of chain to benchmark importing")
- return nil
- }
-
- cfi, err := os.Open(cctx.Args().First())
- if err != nil {
- return err
- }
- defer cfi.Close() //nolint:errcheck // read only file
go func() {
- http.Handle("/debug/metrics/prometheus", promhttp.Handler())
+ // Prometheus globals are exposed as interfaces, but the prometheus
+ // OpenCensus exporter expects a concrete *Registry. The concrete type of
+ // the globals are actually *Registry, so we downcast them, staying
+ // defensive in case things change under the hood.
+ registry, ok := prometheus.DefaultRegisterer.(*prometheus.Registry)
+ if !ok {
+ log.Warnf("failed to export default prometheus registry; some metrics will be unavailable; unexpected type: %T", prometheus.DefaultRegisterer)
+ return
+ }
+ exporter, err := ocprom.NewExporter(ocprom.Options{
+ Registry: registry,
+ Namespace: "lotus",
+ })
+ if err != nil {
+ log.Fatalf("could not create the prometheus stats exporter: %v", err)
+ }
+
+ http.Handle("/debug/metrics", exporter)
+
http.ListenAndServe("localhost:6060", nil) //nolint:errcheck
}()
@@ -126,17 +170,17 @@ var importBenchCmd = &cli.Command{
tdir = tmp
}
- bdgOpt := badger.DefaultOptions
- bdgOpt.GcInterval = 0
- bdgOpt.Options = bdg.DefaultOptions("")
- bdgOpt.Options.SyncWrites = false
- bdgOpt.Options.Truncate = true
- bdgOpt.Options.DetectConflicts = false
+ var (
+ ds datastore.Batching
+ bs blockstore.Blockstore
+ err error
+ )
- var bds datastore.Batching
- if cctx.Bool("use-pebble") {
+ switch {
+ case cctx.Bool("use-pebble"):
+ log.Info("using pebble")
cache := 512
- bds, err = pebbleds.NewDatastore(tdir, &pebble.Options{
+ ds, err = pebbleds.NewDatastore(tdir, &pebble.Options{
// Pebble has a single combined cache area and the write
// buffers are taken from this too. Assign all available
// memory allowance for cache.
@@ -155,30 +199,45 @@ var importBenchCmd = &cli.Command{
},
Logger: log,
})
- } else {
- bds, err = badger.NewDatastore(tdir, &bdgOpt)
+
+ case cctx.Bool("use-native-badger"):
+ log.Info("using native badger")
+ var opts badgerbs.Options
+ if opts, err = repo.BadgerBlockstoreOptions(repo.UniversalBlockstore, tdir, false); err != nil {
+ return err
+ }
+ opts.SyncWrites = false
+ bs, err = badgerbs.Open(opts)
+
+ default: // legacy badger via datastore.
+ log.Info("using legacy badger")
+ bdgOpt := badger.DefaultOptions
+ bdgOpt.GcInterval = 0
+ bdgOpt.Options = bdg.DefaultOptions("")
+ bdgOpt.Options.SyncWrites = false
+ bdgOpt.Options.Truncate = true
+ bdgOpt.Options.DetectConflicts = false
+
+ ds, err = badger.NewDatastore(tdir, &bdgOpt)
}
+
if err != nil {
return err
}
- defer bds.Close() //nolint:errcheck
- bds = measure.New("dsbench", bds)
-
- bs := blockstore.NewBlockstore(bds)
- cacheOpts := blockstore.DefaultCacheOpts()
- cacheOpts.HasBloomFilterSize = 0
+ if ds != nil {
+ ds = measure.New("dsbench", ds)
+ defer ds.Close() //nolint:errcheck
+ bs = blockstore.FromDatastore(ds)
+ }
- cbs, err := blockstore.CachedBlockstore(context.TODO(), bs, cacheOpts)
- if err != nil {
- return err
+ if c, ok := bs.(io.Closer); ok {
+ defer c.Close() //nolint:errcheck
}
- bs = cbs
- ds := datastore.NewMapDatastore()
var verifier ffiwrapper.Verifier = ffiwrapper.ProofVerifier
if cctx.IsSet("syscall-cache") {
- scds, err := badger.NewDatastore(cctx.String("syscall-cache"), &bdgOpt)
+ scds, err := badger.NewDatastore(cctx.String("syscall-cache"), &badger.DefaultOptions)
if err != nil {
return xerrors.Errorf("opening syscall-cache datastore: %w", err)
}
@@ -193,29 +252,111 @@ var importBenchCmd = &cli.Command{
return nil
}
- cs := store.NewChainStore(bs, ds, vm.Syscalls(verifier), nil)
+ metadataDs := datastore.NewMapDatastore()
+ cs := store.NewChainStore(bs, bs, metadataDs, vm.Syscalls(verifier), nil)
+ defer cs.Close() //nolint:errcheck
+
stm := stmgr.NewStateManager(cs)
- if cctx.Bool("global-profile") {
- prof, err := os.Create("import-bench.prof")
+ var carFile *os.File
+ // open the CAR file if one is provided.
+ if path := cctx.String("car"); path != "" {
+ var err error
+ if carFile, err = os.Open(path); err != nil {
+ return xerrors.Errorf("failed to open provided CAR file: %w", err)
+ }
+ }
+
+ startTime := time.Now()
+
+ // register a gauge that reports how long since the measurable
+ // operation began.
+ promauto.NewGaugeFunc(prometheus.GaugeOpts{
+ Name: "lotus_bench_time_taken_secs",
+ }, func() float64 {
+ return time.Since(startTime).Seconds()
+ })
+
+ defer func() {
+ end := time.Now().Format(time.RFC3339)
+
+ resp, err := http.Get("http://localhost:6060/debug/metrics")
if err != nil {
- return err
+ log.Warnf("failed to scape prometheus: %s", err)
}
- defer prof.Close() //nolint:errcheck
- if err := pprof.StartCPUProfile(prof); err != nil {
- return err
+ metricsfi, err := os.Create("bench.metrics")
+ if err != nil {
+ log.Warnf("failed to write prometheus data: %s", err)
}
- }
+
+ _, _ = io.Copy(metricsfi, resp.Body) //nolint:errcheck
+ _ = metricsfi.Close() //nolint:errcheck
+
+ writeProfile := func(name string) {
+ if file, err := os.Create(fmt.Sprintf("%s.%s.%s.pprof", name, startTime.Format(time.RFC3339), end)); err == nil {
+ if err := pprof.Lookup(name).WriteTo(file, 0); err != nil {
+ log.Warnf("failed to write %s pprof: %s", name, err)
+ }
+ _ = file.Close()
+ } else {
+ log.Warnf("failed to create %s pprof file: %s", name, err)
+ }
+ }
+
+ writeProfile("heap")
+ writeProfile("allocs")
+ }()
var head *types.TipSet
+ // --- IMPORT ---
if !cctx.Bool("no-import") {
- head, err = cs.Import(cfi)
+ if cctx.Bool("global-profile") {
+ prof, err := os.Create("bench.import.pprof")
+ if err != nil {
+ return err
+ }
+ defer prof.Close() //nolint:errcheck
+
+ if err := pprof.StartCPUProfile(prof); err != nil {
+ return err
+ }
+ }
+
+ // import is NOT suppressed; do it.
+ if carFile == nil { // a CAR is compulsory for the import.
+ return fmt.Errorf("no CAR file provided for import")
+ }
+
+ head, err = cs.Import(carFile)
if err != nil {
return err
}
- } else {
- cr, err := car.NewCarReader(cfi)
+
+ pprof.StopCPUProfile()
+ }
+
+ if cctx.Bool("only-import") {
+ return nil
+ }
+
+ // --- VALIDATION ---
+ //
+ // we are now preparing for the validation benchmark.
+ // a HEAD needs to be set; --head takes precedence over the root
+ // of the CAR, if both are provided.
+ if h := cctx.String("head"); h != "" {
+ cids, err := lcli.ParseTipSetString(h)
+ if err != nil {
+ return xerrors.Errorf("failed to parse head tipset key: %w", err)
+ }
+
+ head, err = cs.LoadTipSet(types.NewTipSetKey(cids...))
+ if err != nil {
+ return err
+ }
+ } else if carFile != nil && head == nil {
+ cr, err := car.NewCarReader(carFile)
if err != nil {
return err
}
@@ -223,59 +364,99 @@ var importBenchCmd = &cli.Command{
if err != nil {
return err
}
+ } else if h == "" && carFile == nil {
+ return xerrors.Errorf("neither --car nor --head flags supplied")
}
- if cctx.Bool("only-import") {
- return nil
+ log.Infof("chain head is tipset: %s", head.Key())
+
+ var genesis *types.TipSet
+ log.Infof("getting genesis block")
+ if tsk := cctx.String("genesis-tipset"); tsk != "" {
+ var cids []cid.Cid
+ if cids, err = lcli.ParseTipSetString(tsk); err != nil {
+ return xerrors.Errorf("failed to parse genesis tipset key: %w", err)
+ }
+ genesis, err = cs.LoadTipSet(types.NewTipSetKey(cids...))
+ } else {
+ log.Warnf("getting genesis by height; this will be slow; pass in the genesis tipset through --genesis-tipset")
+ // fallback to the slow path of walking the chain.
+ genesis, err = cs.GetTipsetByHeight(context.TODO(), 0, head, true)
}
- gb, err := cs.GetTipsetByHeight(context.TODO(), 0, head, true)
if err != nil {
return err
}
- err = cs.SetGenesis(gb.Blocks()[0])
- if err != nil {
+ if err = cs.SetGenesis(genesis.Blocks()[0]); err != nil {
return err
}
- startEpoch := abi.ChainEpoch(1)
- if cctx.IsSet("start-at") {
- startEpoch = abi.ChainEpoch(cctx.Int64("start-at"))
- start, err := cs.GetTipsetByHeight(context.TODO(), abi.ChainEpoch(cctx.Int64("start-at")), head, true)
- if err != nil {
- return err
+ // Resolve the end tipset, falling back to head if not provided.
+ end := head
+ if tsk := cctx.String("end-tipset"); tsk != "" {
+ var cids []cid.Cid
+ if cids, err = lcli.ParseTipSetString(tsk); err != nil {
+ return xerrors.Errorf("failed to end genesis tipset key: %w", err)
}
+ end, err = cs.LoadTipSet(types.NewTipSetKey(cids...))
+ } else if h := cctx.Int64("end-height"); h != 0 {
+ log.Infof("getting end tipset at height %d...", h)
+ end, err = cs.GetTipsetByHeight(context.TODO(), abi.ChainEpoch(h), head, true)
+ }
- err = cs.SetHead(start)
- if err != nil {
- return err
+ if err != nil {
+ return err
+ }
+
+ // Resolve the start tipset, if provided; otherwise, fallback to
+ // height 1 for a start point.
+ var (
+ startEpoch = abi.ChainEpoch(1)
+ start *types.TipSet
+ )
+
+ if tsk := cctx.String("start-tipset"); tsk != "" {
+ var cids []cid.Cid
+ if cids, err = lcli.ParseTipSetString(tsk); err != nil {
+ return xerrors.Errorf("failed to start genesis tipset key: %w", err)
}
+ start, err = cs.LoadTipSet(types.NewTipSetKey(cids...))
+ } else if h := cctx.Int64("start-height"); h != 0 {
+ log.Infof("getting start tipset at height %d...", h)
+ // lookback from the end tipset (which falls back to head if not supplied).
+ start, err = cs.GetTipsetByHeight(context.TODO(), abi.ChainEpoch(h), end, true)
}
- if h := cctx.Int64("height"); h != 0 {
- tsh, err := cs.GetTipsetByHeight(context.TODO(), abi.ChainEpoch(h), head, true)
- if err != nil {
+ if err != nil {
+ return err
+ }
+
+ if start != nil {
+ startEpoch = start.Height()
+ if err := cs.ForceHeadSilent(context.Background(), start); err != nil {
+ // if err := cs.SetHead(start); err != nil {
return err
}
- head = tsh
}
- ts := head
- tschain := []*types.TipSet{ts}
- for ts.Height() > startEpoch {
+ inverseChain := append(make([]*types.TipSet, 0, end.Height()), end)
+ for ts := end; ts.Height() > startEpoch; {
+ if h := ts.Height(); h%100 == 0 {
+ log.Infof("walking back the chain; loaded tipset at height %d...", h)
+ }
next, err := cs.LoadTipSet(ts.Parents())
if err != nil {
return err
}
- tschain = append(tschain, next)
+ inverseChain = append(inverseChain, next)
ts = next
}
var enc *json.Encoder
if cctx.Bool("export-traces") {
- ibj, err := os.Create("import-bench.json")
+ ibj, err := os.Create("bench.json")
if err != nil {
return err
}
@@ -284,8 +465,20 @@ var importBenchCmd = &cli.Command{
enc = json.NewEncoder(ibj)
}
- for i := len(tschain) - 1; i >= 1; i-- {
- cur := tschain[i]
+ if cctx.Bool("global-profile") {
+ prof, err := os.Create("bench.validation.pprof")
+ if err != nil {
+ return err
+ }
+ defer prof.Close() //nolint:errcheck
+
+ if err := pprof.StartCPUProfile(prof); err != nil {
+ return err
+ }
+ }
+
+ for i := len(inverseChain) - 1; i >= 1; i-- {
+ cur := inverseChain[i]
start := time.Now()
log.Infof("computing state (height: %d, ts=%s)", cur.Height(), cur.Cids())
st, trace, err := stm.ExecutionTrace(context.TODO(), cur)
@@ -304,7 +497,7 @@ var importBenchCmd = &cli.Command{
return xerrors.Errorf("failed to write out tipsetexec: %w", err)
}
}
- if tschain[i-1].ParentState() != st {
+ if inverseChain[i-1].ParentState() != st {
stripCallers(tse.Trace)
lastTrace := tse.Trace
d, err := json.MarshalIndent(lastTrace, "", " ")
@@ -320,23 +513,7 @@ var importBenchCmd = &cli.Command{
pprof.StopCPUProfile()
- if true {
- resp, err := http.Get("http://localhost:6060/debug/metrics/prometheus")
- if err != nil {
- return err
- }
-
- metricsfi, err := os.Create("import-bench.metrics")
- if err != nil {
- return err
- }
-
- io.Copy(metricsfi, resp.Body) //nolint:errcheck
- metricsfi.Close() //nolint:errcheck
- }
-
return nil
-
},
}
diff --git a/cmd/lotus-bench/main.go b/cmd/lotus-bench/main.go
index 1af5c1c62f5..0b8ec6fe3fc 100644
--- a/cmd/lotus-bench/main.go
+++ b/cmd/lotus-bench/main.go
@@ -31,7 +31,7 @@ import (
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
- "github.com/filecoin-project/lotus/chain/actors/policy"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/genesis"
)
@@ -39,8 +39,12 @@ import (
var log = logging.Logger("lotus-bench")
type BenchResults struct {
- SectorSize abi.SectorSize
+ EnvVar map[string]string
+ SectorSize abi.SectorSize
+ SectorNumber int
+
+ SealingSum SealingResult
SealingResults []SealingResult
PostGenerateCandidates time.Duration
@@ -55,6 +59,26 @@ type BenchResults struct {
VerifyWindowPostHot time.Duration
}
+func (bo *BenchResults) SumSealingTime() error {
+ if len(bo.SealingResults) <= 0 {
+ return xerrors.Errorf("BenchResults SealingResults len <= 0")
+ }
+ if len(bo.SealingResults) != bo.SectorNumber {
+ return xerrors.Errorf("BenchResults SealingResults len(%d) != bo.SectorNumber(%d)", len(bo.SealingResults), bo.SectorNumber)
+ }
+
+ for _, sealing := range bo.SealingResults {
+ bo.SealingSum.AddPiece += sealing.AddPiece
+ bo.SealingSum.PreCommit1 += sealing.PreCommit1
+ bo.SealingSum.PreCommit2 += sealing.PreCommit2
+ bo.SealingSum.Commit1 += sealing.Commit1
+ bo.SealingSum.Commit2 += sealing.Commit2
+ bo.SealingSum.Verify += sealing.Verify
+ bo.SealingSum.Unseal += sealing.Unseal
+ }
+ return nil
+}
+
type SealingResult struct {
AddPiece time.Duration
PreCommit1 time.Duration
@@ -94,12 +118,13 @@ func main() {
}
var sealBenchCmd = &cli.Command{
- Name: "sealing",
+ Name: "sealing",
+ Usage: "Benchmark seal and winning post and window post",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "storage-dir",
Value: "~/.lotus-bench",
- Usage: "Path to the storage directory that will store sectors long term",
+ Usage: "path to the storage directory that will store sectors long term",
},
&cli.StringFlag{
Name: "sector-size",
@@ -131,22 +156,26 @@ var sealBenchCmd = &cli.Command{
Name: "skip-unseal",
Usage: "skip the unseal portion of the benchmark",
},
+ &cli.StringFlag{
+ Name: "ticket-preimage",
+ Usage: "ticket random",
+ },
&cli.StringFlag{
Name: "save-commit2-input",
- Usage: "Save commit2 input to a file",
+ Usage: "save commit2 input to a file",
},
&cli.IntFlag{
Name: "num-sectors",
+ Usage: "select number of sectors to seal",
Value: 1,
},
&cli.IntFlag{
Name: "parallel",
+ Usage: "num run in parallel",
Value: 1,
},
},
Action: func(c *cli.Context) error {
- policy.AddSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
-
if c.Bool("no-gpu") {
err := os.Setenv("BELLMAN_NO_GPU", "1")
if err != nil {
@@ -211,18 +240,10 @@ var sealBenchCmd = &cli.Command{
}
sectorSize := abi.SectorSize(sectorSizeInt)
- spt, err := ffiwrapper.SealProofTypeFromSectorSize(sectorSize)
- if err != nil {
- return err
- }
-
- cfg := &ffiwrapper.Config{
- SealProofType: spt,
- }
-
// Only fetch parameters if actually needed
- if !c.Bool("skip-commit2") {
- if err := paramfetch.GetParams(lcli.ReqContext(c), build.ParametersJSON(), uint64(sectorSize)); err != nil {
+ skipc2 := c.Bool("skip-commit2")
+ if !skipc2 {
+ if err := paramfetch.GetParams(lcli.ReqContext(c), build.ParametersJSON(), build.SrsJSON(), uint64(sectorSize)); err != nil {
return xerrors.Errorf("getting params: %w", err)
}
}
@@ -231,11 +252,13 @@ var sealBenchCmd = &cli.Command{
Root: sbdir,
}
- sb, err := ffiwrapper.New(sbfs, cfg)
+ sb, err := ffiwrapper.New(sbfs)
if err != nil {
return err
}
+ sectorNumber := c.Int("num-sectors")
+
var sealTimings []SealingResult
var sealedSectors []saproof2.SectorInfo
@@ -246,18 +269,11 @@ var sealBenchCmd = &cli.Command{
PreCommit2: 1,
Commit: 1,
}
- sealTimings, sealedSectors, err = runSeals(sb, sbfs, c.Int("num-sectors"), parCfg, mid, sectorSize, []byte(c.String("ticket-preimage")), c.String("save-commit2-input"), c.Bool("skip-commit2"), c.Bool("skip-unseal"))
+ sealTimings, sealedSectors, err = runSeals(sb, sbfs, sectorNumber, parCfg, mid, sectorSize, []byte(c.String("ticket-preimage")), c.String("save-commit2-input"), skipc2, c.Bool("skip-unseal"))
if err != nil {
return xerrors.Errorf("failed to run seals: %w", err)
}
- }
-
- beforePost := time.Now()
-
- var challenge [32]byte
- rand.Read(challenge[:])
-
- if robench != "" {
+ } else {
// TODO: implement sbfs.List() and use that for all cases (preexisting sectorbuilder or not)
// TODO: this assumes we only ever benchmark a preseal
@@ -290,12 +306,21 @@ var sealBenchCmd = &cli.Command{
bo := BenchResults{
SectorSize: sectorSize,
+ SectorNumber: sectorNumber,
SealingResults: sealTimings,
}
+ if err := bo.SumSealingTime(); err != nil {
+ return err
+ }
- if !c.Bool("skip-commit2") {
+ var challenge [32]byte
+ rand.Read(challenge[:])
+
+ beforePost := time.Now()
+
+ if !skipc2 {
log.Info("generating winning post candidates")
- wipt, err := spt.RegisteredWinningPoStProof()
+ wipt, err := spt(sectorSize).RegisteredWinningPoStProof()
if err != nil {
return err
}
@@ -420,6 +445,15 @@ var sealBenchCmd = &cli.Command{
bo.VerifyWindowPostHot = verifyWindowpost2.Sub(verifyWindowpost1)
}
+ bo.EnvVar = make(map[string]string)
+ for _, envKey := range []string{"BELLMAN_NO_GPU", "FIL_PROOFS_MAXIMIZE_CACHING", "FIL_PROOFS_USE_GPU_COLUMN_BUILDER",
+ "FIL_PROOFS_USE_GPU_TREE_BUILDER", "FIL_PROOFS_USE_MULTICORE_SDR", "BELLMAN_CUSTOM_GPU"} {
+ envValue, found := os.LookupEnv(envKey)
+ if found {
+ bo.EnvVar[envKey] = envValue
+ }
+ }
+
if c.Bool("json-out") {
data, err := json.MarshalIndent(bo, "", " ")
if err != nil {
@@ -428,21 +462,25 @@ var sealBenchCmd = &cli.Command{
fmt.Println(string(data))
} else {
- fmt.Printf("----\nresults (v27) (%d)\n", sectorSize)
+ fmt.Println("environment variable list:")
+ for envKey, envValue := range bo.EnvVar {
+ fmt.Printf("%s=%s\n", envKey, envValue)
+ }
+ fmt.Printf("----\nresults (v28) SectorSize:(%d), SectorNumber:(%d)\n", sectorSize, sectorNumber)
if robench == "" {
- fmt.Printf("seal: addPiece: %s (%s)\n", bo.SealingResults[0].AddPiece, bps(bo.SectorSize, bo.SealingResults[0].AddPiece)) // TODO: average across multiple sealings
- fmt.Printf("seal: preCommit phase 1: %s (%s)\n", bo.SealingResults[0].PreCommit1, bps(bo.SectorSize, bo.SealingResults[0].PreCommit1))
- fmt.Printf("seal: preCommit phase 2: %s (%s)\n", bo.SealingResults[0].PreCommit2, bps(bo.SectorSize, bo.SealingResults[0].PreCommit2))
- fmt.Printf("seal: commit phase 1: %s (%s)\n", bo.SealingResults[0].Commit1, bps(bo.SectorSize, bo.SealingResults[0].Commit1))
- fmt.Printf("seal: commit phase 2: %s (%s)\n", bo.SealingResults[0].Commit2, bps(bo.SectorSize, bo.SealingResults[0].Commit2))
- fmt.Printf("seal: verify: %s\n", bo.SealingResults[0].Verify)
+ fmt.Printf("seal: addPiece: %s (%s)\n", bo.SealingSum.AddPiece, bps(bo.SectorSize, bo.SectorNumber, bo.SealingSum.AddPiece))
+ fmt.Printf("seal: preCommit phase 1: %s (%s)\n", bo.SealingSum.PreCommit1, bps(bo.SectorSize, bo.SectorNumber, bo.SealingSum.PreCommit1))
+ fmt.Printf("seal: preCommit phase 2: %s (%s)\n", bo.SealingSum.PreCommit2, bps(bo.SectorSize, bo.SectorNumber, bo.SealingSum.PreCommit2))
+ fmt.Printf("seal: commit phase 1: %s (%s)\n", bo.SealingSum.Commit1, bps(bo.SectorSize, bo.SectorNumber, bo.SealingSum.Commit1))
+ fmt.Printf("seal: commit phase 2: %s (%s)\n", bo.SealingSum.Commit2, bps(bo.SectorSize, bo.SectorNumber, bo.SealingSum.Commit2))
+ fmt.Printf("seal: verify: %s\n", bo.SealingSum.Verify)
if !c.Bool("skip-unseal") {
- fmt.Printf("unseal: %s (%s)\n", bo.SealingResults[0].Unseal, bps(bo.SectorSize, bo.SealingResults[0].Unseal))
+ fmt.Printf("unseal: %s (%s)\n", bo.SealingSum.Unseal, bps(bo.SectorSize, bo.SectorNumber, bo.SealingSum.Unseal))
}
fmt.Println("")
}
- if !c.Bool("skip-commit2") {
- fmt.Printf("generate candidates: %s (%s)\n", bo.PostGenerateCandidates, bps(bo.SectorSize*abi.SectorSize(len(bo.SealingResults)), bo.PostGenerateCandidates))
+ if !skipc2 {
+ fmt.Printf("generate candidates: %s (%s)\n", bo.PostGenerateCandidates, bps(bo.SectorSize, len(bo.SealingResults), bo.PostGenerateCandidates))
fmt.Printf("compute winning post proof (cold): %s\n", bo.PostWinningProofCold)
fmt.Printf("compute winning post proof (hot): %s\n", bo.PostWinningProofHot)
fmt.Printf("verify winning post proof (cold): %s\n", bo.VerifyWinningPostCold)
@@ -475,11 +513,13 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
if numSectors%par.PreCommit1 != 0 {
return nil, nil, fmt.Errorf("parallelism factor must cleanly divide numSectors")
}
-
- for i := abi.SectorNumber(1); i <= abi.SectorNumber(numSectors); i++ {
- sid := abi.SectorID{
- Miner: mid,
- Number: i,
+ for i := abi.SectorNumber(0); i < abi.SectorNumber(numSectors); i++ {
+ sid := storage.SectorRef{
+ ID: abi.SectorID{
+ Miner: mid,
+ Number: i,
+ },
+ ProofType: spt(sectorSize),
}
start := time.Now()
@@ -494,7 +534,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
pieces = append(pieces, pi)
- sealTimings[i-1].AddPiece = time.Since(start)
+ sealTimings[i].AddPiece = time.Since(start)
}
sectorsPerWorker := numSectors / par.PreCommit1
@@ -503,13 +543,15 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
for wid := 0; wid < par.PreCommit1; wid++ {
go func(worker int) {
sealerr := func() error {
- start := 1 + (worker * sectorsPerWorker)
+ start := worker * sectorsPerWorker
end := start + sectorsPerWorker
for i := abi.SectorNumber(start); i < abi.SectorNumber(end); i++ {
- ix := int(i - 1)
- sid := abi.SectorID{
- Miner: mid,
- Number: i,
+ sid := storage.SectorRef{
+ ID: abi.SectorID{
+ Miner: mid,
+ Number: i,
+ },
+ ProofType: spt(sectorSize),
}
start := time.Now()
@@ -518,8 +560,8 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
ticket := abi.SealRandomness(trand[:])
log.Infof("[%d] Running replication(1)...", i)
- pieces := []abi.PieceInfo{pieces[ix]}
- pc1o, err := sb.SealPreCommit1(context.TODO(), sid, ticket, pieces)
+ piece := []abi.PieceInfo{pieces[i]}
+ pc1o, err := sb.SealPreCommit1(context.TODO(), sid, ticket, piece)
if err != nil {
return xerrors.Errorf("commit: %w", err)
}
@@ -537,8 +579,8 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
precommit2 := time.Now()
<-preCommit2Sema
- sealedSectors[ix] = saproof2.SectorInfo{
- SealProof: sb.SealProofType(),
+ sealedSectors[i] = saproof2.SectorInfo{
+ SealProof: sid.ProofType,
SectorNumber: i,
SealedCID: cids.Sealed,
}
@@ -551,7 +593,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
commitSema <- struct{}{}
commitStart := time.Now()
log.Infof("[%d] Generating PoRep for sector (1)", i)
- c1o, err := sb.SealCommit1(context.TODO(), sid, ticket, seed.Value, pieces, cids)
+ c1o, err := sb.SealCommit1(context.TODO(), sid, ticket, seed.Value, piece, cids)
if err != nil {
return err
}
@@ -592,7 +634,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
svi := saproof2.SealVerifyInfo{
SectorID: abi.SectorID{Miner: mid, Number: i},
SealedCID: cids.Sealed,
- SealProof: sb.SealProofType(),
+ SealProof: sid.ProofType,
Proof: proof,
DealIDs: nil,
Randomness: ticket,
@@ -614,7 +656,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
if !skipunseal {
log.Infof("[%d] Unsealing sector", i)
{
- p, done, err := sbfs.AcquireSector(context.TODO(), abi.SectorID{Miner: mid, Number: 1}, storiface.FTUnsealed, storiface.FTNone, storiface.PathSealing)
+ p, done, err := sbfs.AcquireSector(context.TODO(), sid, storiface.FTUnsealed, storiface.FTNone, storiface.PathSealing)
if err != nil {
return xerrors.Errorf("acquire unsealed sector for removing: %w", err)
}
@@ -625,19 +667,19 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
}
}
- err := sb.UnsealPiece(context.TODO(), abi.SectorID{Miner: mid, Number: 1}, 0, abi.PaddedPieceSize(sectorSize).Unpadded(), ticket, cids.Unsealed)
+ err := sb.UnsealPiece(context.TODO(), sid, 0, abi.PaddedPieceSize(sectorSize).Unpadded(), ticket, cids.Unsealed)
if err != nil {
return err
}
}
unseal := time.Now()
- sealTimings[ix].PreCommit1 = precommit1.Sub(start)
- sealTimings[ix].PreCommit2 = precommit2.Sub(pc2Start)
- sealTimings[ix].Commit1 = sealcommit1.Sub(commitStart)
- sealTimings[ix].Commit2 = sealcommit2.Sub(sealcommit1)
- sealTimings[ix].Verify = verifySeal.Sub(sealcommit2)
- sealTimings[ix].Unseal = unseal.Sub(verifySeal)
+ sealTimings[i].PreCommit1 = precommit1.Sub(start)
+ sealTimings[i].PreCommit2 = precommit2.Sub(pc2Start)
+ sealTimings[i].Commit1 = sealcommit1.Sub(commitStart)
+ sealTimings[i].Commit2 = sealcommit2.Sub(sealcommit1)
+ sealTimings[i].Verify = verifySeal.Sub(sealcommit2)
+ sealTimings[i].Unseal = unseal.Sub(verifySeal)
}
return nil
}()
@@ -660,8 +702,9 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
}
var proveCmd = &cli.Command{
- Name: "prove",
- Usage: "Benchmark a proof computation",
+ Name: "prove",
+ Usage: "Benchmark a proof computation",
+ ArgsUsage: "[input.json]",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "no-gpu",
@@ -695,7 +738,7 @@ var proveCmd = &cli.Command{
return xerrors.Errorf("unmarshalling input file: %w", err)
}
- if err := paramfetch.GetParams(lcli.ReqContext(c), build.ParametersJSON(), c2in.SectorSize); err != nil {
+ if err := paramfetch.GetParams(lcli.ReqContext(c), build.ParametersJSON(), build.SrsJSON(), c2in.SectorSize); err != nil {
return xerrors.Errorf("getting params: %w", err)
}
@@ -708,23 +751,23 @@ var proveCmd = &cli.Command{
return err
}
- spt, err := ffiwrapper.SealProofTypeFromSectorSize(abi.SectorSize(c2in.SectorSize))
+ sb, err := ffiwrapper.New(nil)
if err != nil {
return err
}
- cfg := &ffiwrapper.Config{
- SealProofType: spt,
- }
-
- sb, err := ffiwrapper.New(nil, cfg)
- if err != nil {
- return err
+ ref := storage.SectorRef{
+ ID: abi.SectorID{
+ Miner: abi.ActorID(mid),
+ Number: abi.SectorNumber(c2in.SectorNum),
+ },
+ ProofType: spt(abi.SectorSize(c2in.SectorSize)),
}
+ fmt.Printf("----\nstart proof computation\n")
start := time.Now()
- proof, err := sb.SealCommit2(context.TODO(), abi.SectorID{Miner: abi.ActorID(mid), Number: abi.SectorNumber(c2in.SectorNum)}, c2in.Phase1Out)
+ proof, err := sb.SealCommit2(context.TODO(), ref, c2in.Phase1Out)
if err != nil {
return err
}
@@ -733,17 +776,27 @@ var proveCmd = &cli.Command{
fmt.Printf("proof: %x\n", proof)
- fmt.Printf("----\nresults (v27) (%d)\n", c2in.SectorSize)
+ fmt.Printf("----\nresults (v28) (%d)\n", c2in.SectorSize)
dur := sealCommit2.Sub(start)
- fmt.Printf("seal: commit phase 2: %s (%s)\n", dur, bps(abi.SectorSize(c2in.SectorSize), dur))
+ fmt.Printf("seal: commit phase 2: %s (%s)\n", dur, bps(abi.SectorSize(c2in.SectorSize), 1, dur))
return nil
},
}
-func bps(data abi.SectorSize, d time.Duration) string {
- bdata := new(big.Int).SetUint64(uint64(data))
+func bps(sectorSize abi.SectorSize, sectorNum int, d time.Duration) string {
+ bdata := new(big.Int).SetUint64(uint64(sectorSize))
+ bdata = bdata.Mul(bdata, big.NewInt(int64(sectorNum)))
bdata = bdata.Mul(bdata, big.NewInt(time.Second.Nanoseconds()))
bps := bdata.Div(bdata, big.NewInt(d.Nanoseconds()))
return types.SizeStr(types.BigInt{Int: bps}) + "/s"
}
+
+func spt(ssize abi.SectorSize) abi.RegisteredSealProof {
+ spt, err := miner.SealProofTypeFromSectorSize(ssize, build.NewestNetworkVersion)
+ if err != nil {
+ panic(err)
+ }
+
+ return spt
+}
diff --git a/cmd/lotus-chainwatch/processor/miner.go b/cmd/lotus-chainwatch/processor/miner.go
index 3a37a82f800..f3514df88ce 100644
--- a/cmd/lotus-chainwatch/processor/miner.go
+++ b/cmd/lotus-chainwatch/processor/miner.go
@@ -14,8 +14,8 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
- "github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/api/apibstore"
+ "github.com/filecoin-project/lotus/api/v0api"
+ "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/events/state"
@@ -202,7 +202,7 @@ func (p *Processor) processMiners(ctx context.Context, minerTips map[types.TipSe
log.Debugw("Processed Miners", "duration", time.Since(start).String())
}()
- stor := store.ActorStore(ctx, apibstore.NewAPIBlockstore(p.node))
+ stor := store.ActorStore(ctx, blockstore.NewAPIBlockstore(p.node))
var out []minerActorInfo
// TODO add parallel calls if this becomes slow
@@ -649,7 +649,7 @@ func (p *Processor) getMinerStateAt(ctx context.Context, maddr address.Address,
if err != nil {
return nil, err
}
- return miner.Load(store.ActorStore(ctx, apibstore.NewAPIBlockstore(p.node)), prevActor)
+ return miner.Load(store.ActorStore(ctx, blockstore.NewAPIBlockstore(p.node)), prevActor)
}
func (p *Processor) getMinerPreCommitChanges(ctx context.Context, m minerActorInfo) (*miner.PreCommitChanges, error) {
@@ -1026,7 +1026,7 @@ func (p *Processor) storeMinersPower(miners []minerActorInfo) error {
}
// load the power actor state clam as an adt.Map at the tipset `ts`.
-func getPowerActorState(ctx context.Context, api api.FullNode, ts types.TipSetKey) (power.State, error) {
+func getPowerActorState(ctx context.Context, api v0api.FullNode, ts types.TipSetKey) (power.State, error) {
powerActor, err := api.StateGetActor(ctx, power.Address, ts)
if err != nil {
return nil, err
diff --git a/cmd/lotus-chainwatch/processor/processor.go b/cmd/lotus-chainwatch/processor/processor.go
index 1f8b246ed29..af5935d4795 100644
--- a/cmd/lotus-chainwatch/processor/processor.go
+++ b/cmd/lotus-chainwatch/processor/processor.go
@@ -17,7 +17,7 @@ import (
"github.com/filecoin-project/go-state-types/abi"
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
- "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/api/v0api"
"github.com/filecoin-project/lotus/chain/types"
cw_util "github.com/filecoin-project/lotus/cmd/lotus-chainwatch/util"
"github.com/filecoin-project/lotus/lib/parmap"
@@ -28,7 +28,7 @@ var log = logging.Logger("processor")
type Processor struct {
db *sql.DB
- node api.FullNode
+ node v0api.FullNode
ctxStore *cw_util.APIIpldStore
genesisTs *types.TipSet
@@ -52,7 +52,7 @@ type actorInfo struct {
state string
}
-func NewProcessor(ctx context.Context, db *sql.DB, node api.FullNode, batch int) *Processor {
+func NewProcessor(ctx context.Context, db *sql.DB, node v0api.FullNode, batch int) *Processor {
ctxStore := cw_util.NewAPIIpldStore(ctx, node)
return &Processor{
db: db,
@@ -146,7 +146,7 @@ func (p *Processor) Start(ctx context.Context) {
go func() {
defer grp.Done()
if err := p.HandleMarketChanges(ctx, actorChanges[builtin2.StorageMarketActorCodeID]); err != nil {
- log.Errorf("Failed to handle market changes: %w", err)
+ log.Errorf("Failed to handle market changes: %v", err)
return
}
}()
@@ -155,7 +155,7 @@ func (p *Processor) Start(ctx context.Context) {
go func() {
defer grp.Done()
if err := p.HandleMinerChanges(ctx, actorChanges[builtin2.StorageMinerActorCodeID]); err != nil {
- log.Errorf("Failed to handle miner changes: %w", err)
+ log.Errorf("Failed to handle miner changes: %v", err)
return
}
}()
@@ -164,7 +164,7 @@ func (p *Processor) Start(ctx context.Context) {
go func() {
defer grp.Done()
if err := p.HandleRewardChanges(ctx, actorChanges[builtin2.RewardActorCodeID], nullRounds); err != nil {
- log.Errorf("Failed to handle reward changes: %w", err)
+ log.Errorf("Failed to handle reward changes: %v", err)
return
}
}()
@@ -173,7 +173,7 @@ func (p *Processor) Start(ctx context.Context) {
go func() {
defer grp.Done()
if err := p.HandlePowerChanges(ctx, actorChanges[builtin2.StoragePowerActorCodeID]); err != nil {
- log.Errorf("Failed to handle power actor changes: %w", err)
+ log.Errorf("Failed to handle power actor changes: %v", err)
return
}
}()
@@ -182,7 +182,7 @@ func (p *Processor) Start(ctx context.Context) {
go func() {
defer grp.Done()
if err := p.HandleMessageChanges(ctx, toProcess); err != nil {
- log.Errorf("Failed to handle message changes: %w", err)
+ log.Errorf("Failed to handle message changes: %v", err)
return
}
}()
@@ -191,7 +191,7 @@ func (p *Processor) Start(ctx context.Context) {
go func() {
defer grp.Done()
if err := p.HandleCommonActorsChanges(ctx, actorChanges); err != nil {
- log.Errorf("Failed to handle common actor changes: %w", err)
+ log.Errorf("Failed to handle common actor changes: %v", err)
return
}
}()
diff --git a/cmd/lotus-chainwatch/run.go b/cmd/lotus-chainwatch/run.go
index 64f242755ab..6e47a100d79 100644
--- a/cmd/lotus-chainwatch/run.go
+++ b/cmd/lotus-chainwatch/run.go
@@ -8,6 +8,8 @@ import (
"os"
"strings"
+ "github.com/filecoin-project/lotus/api/v0api"
+
_ "github.com/lib/pq"
"github.com/filecoin-project/go-jsonrpc"
@@ -15,7 +17,6 @@ import (
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
- "github.com/filecoin-project/lotus/api"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/cmd/lotus-chainwatch/processor"
"github.com/filecoin-project/lotus/cmd/lotus-chainwatch/scheduler"
@@ -44,7 +45,7 @@ var runCmd = &cli.Command{
return err
}
- var api api.FullNode
+ var api v0api.FullNode
var closer jsonrpc.ClientCloser
var err error
if tokenMaddr := cctx.String("api"); tokenMaddr != "" {
diff --git a/cmd/lotus-chainwatch/syncer/sync.go b/cmd/lotus-chainwatch/syncer/sync.go
index 37af9cce08c..b5e9c73d6f4 100644
--- a/cmd/lotus-chainwatch/syncer/sync.go
+++ b/cmd/lotus-chainwatch/syncer/sync.go
@@ -13,7 +13,7 @@ import (
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
- "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/api/v0api"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
)
@@ -26,10 +26,10 @@ type Syncer struct {
lookbackLimit uint64
headerLk sync.Mutex
- node api.FullNode
+ node v0api.FullNode
}
-func NewSyncer(db *sql.DB, node api.FullNode, lookbackLimit uint64) *Syncer {
+func NewSyncer(db *sql.DB, node v0api.FullNode, lookbackLimit uint64) *Syncer {
return &Syncer{
db: db,
node: node,
diff --git a/cmd/lotus-chainwatch/util/api.go b/cmd/lotus-chainwatch/util/api.go
index cfda833e023..f8f22cbbf67 100644
--- a/cmd/lotus-chainwatch/util/api.go
+++ b/cmd/lotus-chainwatch/util/api.go
@@ -5,13 +5,13 @@ import (
"net/http"
"github.com/filecoin-project/go-jsonrpc"
- "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/client"
+ "github.com/filecoin-project/lotus/api/v0api"
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
)
-func GetFullNodeAPIUsingCredentials(ctx context.Context, listenAddr, token string) (api.FullNode, jsonrpc.ClientCloser, error) {
+func GetFullNodeAPIUsingCredentials(ctx context.Context, listenAddr, token string) (v0api.FullNode, jsonrpc.ClientCloser, error) {
parsedAddr, err := ma.NewMultiaddr(listenAddr)
if err != nil {
return nil, nil, err
@@ -22,7 +22,7 @@ func GetFullNodeAPIUsingCredentials(ctx context.Context, listenAddr, token strin
return nil, nil, err
}
- return client.NewFullNodeRPC(ctx, apiURI(addr), apiHeaders(token))
+ return client.NewFullNodeRPCV0(ctx, apiURI(addr), apiHeaders(token))
}
func apiURI(addr string) string {
return "ws://" + addr + "/rpc/v0"
diff --git a/cmd/lotus-chainwatch/util/contextStore.go b/cmd/lotus-chainwatch/util/contextStore.go
index bd812581b13..c93f87f9b66 100644
--- a/cmd/lotus-chainwatch/util/contextStore.go
+++ b/cmd/lotus-chainwatch/util/contextStore.go
@@ -8,7 +8,7 @@ import (
"github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
- "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/api/v0api"
)
// TODO extract this to a common location in lotus and reuse the code
@@ -16,10 +16,10 @@ import (
// APIIpldStore is required for AMT and HAMT access.
type APIIpldStore struct {
ctx context.Context
- api api.FullNode
+ api v0api.FullNode
}
-func NewAPIIpldStore(ctx context.Context, api api.FullNode) *APIIpldStore {
+func NewAPIIpldStore(ctx context.Context, api v0api.FullNode) *APIIpldStore {
return &APIIpldStore{
ctx: ctx,
api: api,
diff --git a/cmd/lotus-fountain/main.go b/cmd/lotus-fountain/main.go
index ea7190e8364..7ac598d8e9a 100644
--- a/cmd/lotus-fountain/main.go
+++ b/cmd/lotus-fountain/main.go
@@ -3,6 +3,7 @@ package main
import (
"context"
"fmt"
+ "html/template"
"net"
"net/http"
"os"
@@ -14,7 +15,7 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
- "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/api/v0api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
@@ -68,6 +69,10 @@ var runCmd = &cli.Command{
EnvVars: []string{"LOTUS_FOUNTAIN_AMOUNT"},
Value: "50",
},
+ &cli.Float64Flag{
+ Name: "captcha-threshold",
+ Value: 0.5,
+ },
},
Action: func(cctx *cli.Context) error {
sendPerRequest, err := types.ParseFIL(cctx.String("amount"))
@@ -87,7 +92,7 @@ var runCmd = &cli.Command{
return err
}
- log.Info("Remote version: %s", v.Version)
+ log.Infof("Remote version: %s", v.Version)
from, err := address.NewFromString(cctx.String("from"))
if err != nil {
@@ -107,11 +112,13 @@ var runCmd = &cli.Command{
WalletRate: 15 * time.Minute,
WalletBurst: 2,
}),
+ recapThreshold: cctx.Float64("captcha-threshold"),
}
- http.Handle("/", http.FileServer(rice.MustFindBox("site").HTTPBox()))
- http.HandleFunc("/send", h.send)
-
+ box := rice.MustFindBox("site")
+ http.Handle("/", http.FileServer(box.HTTPBox()))
+ http.HandleFunc("/funds.html", prepFundsHtml(box))
+ http.Handle("/send", h)
fmt.Printf("Open http://%s\n", cctx.String("front"))
go func() {
@@ -123,22 +130,63 @@ var runCmd = &cli.Command{
},
}
+func prepFundsHtml(box *rice.Box) http.HandlerFunc {
+ tmpl := template.Must(template.New("funds").Parse(box.MustString("funds.html")))
+ return func(w http.ResponseWriter, r *http.Request) {
+ err := tmpl.Execute(w, os.Getenv("RECAPTCHA_SITE_KEY"))
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadGateway)
+ return
+ }
+ }
+}
+
type handler struct {
ctx context.Context
- api api.FullNode
+ api v0api.FullNode
from address.Address
sendPerRequest types.FIL
- limiter *Limiter
+ limiter *Limiter
+ recapThreshold float64
}
-func (h *handler) send(w http.ResponseWriter, r *http.Request) {
+func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if r.Method != http.MethodPost {
+ http.Error(w, "only POST is allowed", http.StatusBadRequest)
+ return
+ }
+
+ reqIP := r.Header.Get("X-Real-IP")
+ if reqIP == "" {
+ h, _, err := net.SplitHostPort(r.RemoteAddr)
+ if err != nil {
+ log.Errorf("could not get ip from: %s, err: %s", r.RemoteAddr, err)
+ }
+ reqIP = h
+ }
+
+ capResp, err := VerifyToken(r.FormValue("g-recaptcha-response"), reqIP)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadGateway)
+ return
+ }
+ if !capResp.Success || capResp.Score < h.recapThreshold {
+ log.Infow("spam", "capResp", capResp)
+ http.Error(w, "spam protection", http.StatusUnprocessableEntity)
+ return
+ }
+
to, err := address.NewFromString(r.FormValue("address"))
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
+ if to == address.Undef {
+ http.Error(w, "empty address", http.StatusBadRequest)
+ return
+ }
// Limit based on wallet address
limiter := h.limiter.GetWalletLimiter(to.String())
@@ -148,15 +196,6 @@ func (h *handler) send(w http.ResponseWriter, r *http.Request) {
}
// Limit based on IP
-
- reqIP := r.Header.Get("X-Real-IP")
- if reqIP == "" {
- h, _, err := net.SplitHostPort(r.RemoteAddr)
- if err != nil {
- log.Errorf("could not get ip from: %s, err: %s", r.RemoteAddr, err)
- }
- reqIP = h
- }
if i := net.ParseIP(reqIP); i != nil && i.IsLoopback() {
log.Errorf("rate limiting localhost: %s", reqIP)
}
diff --git a/cmd/lotus-fountain/recaptcha.go b/cmd/lotus-fountain/recaptcha.go
new file mode 100644
index 00000000000..69359faa3bc
--- /dev/null
+++ b/cmd/lotus-fountain/recaptcha.go
@@ -0,0 +1,73 @@
+// From https://github.com/lukasaron/recaptcha
+// BLS-3 Licensed
+// Copyright (c) 2020, Lukas Aron
+// Modified by Kubuxu
+package main
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "time"
+)
+
+// content type for communication with the verification server.
+const (
+ contentType = "application/json"
+)
+
+// VerifyURL defines the endpoint which is called when a token needs to be verified.
+var (
+ VerifyURL, _ = url.Parse("https://www.google.com/recaptcha/api/siteverify")
+)
+
+// Response defines the response format from the verification endpoint.
+type Response struct {
+ Success bool `json:"success"` // status of the verification
+ TimeStamp time.Time `json:"challenge_ts"` // timestamp of the challenge load (ISO format)
+ HostName string `json:"hostname"` // the hostname of the site where the reCAPTCHA was solved
+ Score float64 `json:"score"` // the score for this request (0.0 - 1.0)
+ Action string `json:"action"` // the action name for this request
+ ErrorCodes []string `json:"error-codes"` // error codes
+ AndroidPackageName string `json:"apk_package_name"` // android related only
+}
+
+// VerifyToken function implements the basic logic of verification of ReCaptcha token that is usually created
+// on the user site (front-end) and then sent to verify on the server side (back-end).
+// To provide a successful verification process the secret key is required. Based on the security recommendations
+// the key has to be passed as an environmental variable SECRET_KEY.
+//
+// Token parameter is required, however remoteIP is optional.
+func VerifyToken(token, remoteIP string) (Response, error) {
+ resp := Response{}
+ if len(token) == 0 {
+ resp.ErrorCodes = []string{"no-token"}
+ return resp, nil
+ }
+
+ q := url.Values{}
+ q.Add("secret", os.Getenv("RECAPTCHA_SECRET_KEY"))
+ q.Add("response", token)
+ q.Add("remoteip", remoteIP)
+
+ var u *url.URL
+ {
+ verifyCopy := *VerifyURL
+ u = &verifyCopy
+ }
+ u.RawQuery = q.Encode()
+ r, err := http.Post(u.String(), contentType, nil)
+ if err != nil {
+ return resp, err
+ }
+
+ b, err := ioutil.ReadAll(r.Body)
+ _ = r.Body.Close() // close immediately after reading finished
+ if err != nil {
+ return resp, err
+ }
+
+ return resp, json.Unmarshal(b, &resp)
+}
diff --git a/cmd/lotus-fountain/site/funds.html b/cmd/lotus-fountain/site/funds.html
index cd26032f3a4..c6916239fcd 100644
--- a/cmd/lotus-fountain/site/funds.html
+++ b/cmd/lotus-fountain/site/funds.html
@@ -3,6 +3,13 @@
Sending Funds - Lotus Fountain
+
+
+
@@ -11,10 +18,13 @@
[SENDING FUNDS]
-
diff --git a/cmd/lotus-gateway/api.go b/cmd/lotus-gateway/api.go
deleted file mode 100644
index 875eaac7d6c..00000000000
--- a/cmd/lotus-gateway/api.go
+++ /dev/null
@@ -1,390 +0,0 @@
-package main
-
-import (
- "context"
- "fmt"
- "time"
-
- "github.com/filecoin-project/go-address"
- "github.com/filecoin-project/go-bitfield"
- "github.com/filecoin-project/go-state-types/abi"
- "github.com/filecoin-project/go-state-types/crypto"
- "github.com/filecoin-project/go-state-types/dline"
- "github.com/filecoin-project/go-state-types/network"
- "github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/build"
- "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
- "github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/lotus/lib/sigs"
- _ "github.com/filecoin-project/lotus/lib/sigs/bls"
- _ "github.com/filecoin-project/lotus/lib/sigs/secp"
- "github.com/filecoin-project/lotus/node/impl/full"
- "github.com/ipfs/go-cid"
-)
-
-const (
- LookbackCap = time.Hour * 24
- StateWaitLookbackLimit = abi.ChainEpoch(20)
-)
-
-var (
- ErrLookbackTooLong = fmt.Errorf("lookbacks of more than %s are disallowed", LookbackCap)
-)
-
-// gatewayDepsAPI defines the API methods that the GatewayAPI depends on
-// (to make it easy to mock for tests)
-type gatewayDepsAPI interface {
- Version(context.Context) (api.Version, error)
- ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error)
- ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error)
- ChainGetNode(ctx context.Context, p string) (*api.IpldObject, error)
- ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error)
- ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error)
- ChainHasObj(context.Context, cid.Cid) (bool, error)
- ChainHead(ctx context.Context) (*types.TipSet, error)
- ChainNotify(context.Context) (<-chan []*api.HeadChange, error)
- ChainReadObj(context.Context, cid.Cid) ([]byte, error)
- GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error)
- MpoolPushUntrusted(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error)
- MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error)
- MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error)
- StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
- StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error)
- StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
- StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error)
- StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
- StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
- StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error)
- StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error)
- StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error)
- StateWaitMsgLimited(ctx context.Context, msg cid.Cid, confidence uint64, h abi.ChainEpoch) (*api.MsgLookup, error)
- StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*api.ActorState, error)
- StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error)
- StateMinerFaults(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error)
- StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error)
- StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error)
- StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]api.Deadline, error)
- StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error)
- StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error)
- StateCirculatingSupply(context.Context, types.TipSetKey) (abi.TokenAmount, error)
- StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
- StateVMCirculatingSupplyInternal(context.Context, types.TipSetKey) (api.CirculatingSupply, error)
-}
-
-type GatewayAPI struct {
- api gatewayDepsAPI
- lookbackCap time.Duration
- stateWaitLookbackLimit abi.ChainEpoch
-}
-
-// NewGatewayAPI creates a new GatewayAPI with the default lookback cap
-func NewGatewayAPI(api gatewayDepsAPI) *GatewayAPI {
- return newGatewayAPI(api, LookbackCap, StateWaitLookbackLimit)
-}
-
-// used by the tests
-func newGatewayAPI(api gatewayDepsAPI, lookbackCap time.Duration, stateWaitLookbackLimit abi.ChainEpoch) *GatewayAPI {
- return &GatewayAPI{api: api, lookbackCap: lookbackCap, stateWaitLookbackLimit: stateWaitLookbackLimit}
-}
-
-func (a *GatewayAPI) checkTipsetKey(ctx context.Context, tsk types.TipSetKey) error {
- if tsk.IsEmpty() {
- return nil
- }
-
- ts, err := a.api.ChainGetTipSet(ctx, tsk)
- if err != nil {
- return err
- }
-
- return a.checkTipset(ts)
-}
-
-func (a *GatewayAPI) checkTipset(ts *types.TipSet) error {
- at := time.Unix(int64(ts.Blocks()[0].Timestamp), 0)
- if err := a.checkTimestamp(at); err != nil {
- return fmt.Errorf("bad tipset: %w", err)
- }
- return nil
-}
-
-func (a *GatewayAPI) checkTipsetHeight(ts *types.TipSet, h abi.ChainEpoch) error {
- tsBlock := ts.Blocks()[0]
- heightDelta := time.Duration(uint64(tsBlock.Height-h)*build.BlockDelaySecs) * time.Second
- timeAtHeight := time.Unix(int64(tsBlock.Timestamp), 0).Add(-heightDelta)
-
- if err := a.checkTimestamp(timeAtHeight); err != nil {
- return fmt.Errorf("bad tipset height: %w", err)
- }
- return nil
-}
-
-func (a *GatewayAPI) checkTimestamp(at time.Time) error {
- if time.Since(at) > a.lookbackCap {
- return ErrLookbackTooLong
- }
-
- return nil
-}
-
-func (a *GatewayAPI) Version(ctx context.Context) (api.Version, error) {
- return a.api.Version(ctx)
-}
-
-func (a *GatewayAPI) ChainGetBlockMessages(ctx context.Context, c cid.Cid) (*api.BlockMessages, error) {
- return a.api.ChainGetBlockMessages(ctx, c)
-}
-
-func (a *GatewayAPI) ChainHasObj(ctx context.Context, c cid.Cid) (bool, error) {
- return a.api.ChainHasObj(ctx, c)
-}
-
-func (a *GatewayAPI) ChainHead(ctx context.Context) (*types.TipSet, error) {
- // TODO: cache and invalidate cache when timestamp is up (or have internal ChainNotify)
-
- return a.api.ChainHead(ctx)
-}
-
-func (a *GatewayAPI) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) {
- return a.api.ChainGetMessage(ctx, mc)
-}
-
-func (a *GatewayAPI) ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) {
- return a.api.ChainGetTipSet(ctx, tsk)
-}
-
-func (a *GatewayAPI) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) {
- var ts *types.TipSet
- if tsk.IsEmpty() {
- head, err := a.api.ChainHead(ctx)
- if err != nil {
- return nil, err
- }
- ts = head
- } else {
- gts, err := a.api.ChainGetTipSet(ctx, tsk)
- if err != nil {
- return nil, err
- }
- ts = gts
- }
-
- // Check if the tipset key refers to a tipset that's too far in the past
- if err := a.checkTipset(ts); err != nil {
- return nil, err
- }
-
- // Check if the height is too far in the past
- if err := a.checkTipsetHeight(ts, h); err != nil {
- return nil, err
- }
-
- return a.api.ChainGetTipSetByHeight(ctx, h, tsk)
-}
-
-func (a *GatewayAPI) ChainGetNode(ctx context.Context, p string) (*api.IpldObject, error) {
- return a.api.ChainGetNode(ctx, p)
-}
-
-func (a *GatewayAPI) ChainNotify(ctx context.Context) (<-chan []*api.HeadChange, error) {
- return a.api.ChainNotify(ctx)
-}
-
-func (a *GatewayAPI) ChainReadObj(ctx context.Context, c cid.Cid) ([]byte, error) {
- return a.api.ChainReadObj(ctx, c)
-}
-
-func (a *GatewayAPI) GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) {
- if err := a.checkTipsetKey(ctx, tsk); err != nil {
- return nil, err
- }
-
- return a.api.GasEstimateMessageGas(ctx, msg, spec, tsk)
-}
-
-func (a *GatewayAPI) MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) {
- // TODO: additional anti-spam checks
- return a.api.MpoolPushUntrusted(ctx, sm)
-}
-
-func (a *GatewayAPI) MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) {
- if err := a.checkTipsetKey(ctx, tsk); err != nil {
- return types.NewInt(0), err
- }
-
- return a.api.MsigGetAvailableBalance(ctx, addr, tsk)
-}
-
-func (a *GatewayAPI) MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) {
- if err := a.checkTipsetKey(ctx, start); err != nil {
- return types.NewInt(0), err
- }
- if err := a.checkTipsetKey(ctx, end); err != nil {
- return types.NewInt(0), err
- }
-
- return a.api.MsigGetVested(ctx, addr, start, end)
-}
-
-func (a *GatewayAPI) StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) {
- if err := a.checkTipsetKey(ctx, tsk); err != nil {
- return address.Undef, err
- }
-
- return a.api.StateAccountKey(ctx, addr, tsk)
-}
-
-func (a *GatewayAPI) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) {
- if err := a.checkTipsetKey(ctx, tsk); err != nil {
- return api.DealCollateralBounds{}, err
- }
-
- return a.api.StateDealProviderCollateralBounds(ctx, size, verified, tsk)
-}
-
-func (a *GatewayAPI) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) {
- if err := a.checkTipsetKey(ctx, tsk); err != nil {
- return nil, err
- }
-
- return a.api.StateGetActor(ctx, actor, tsk)
-}
-
-func (a *GatewayAPI) StateGetReceipt(ctx context.Context, c cid.Cid, tsk types.TipSetKey) (*types.MessageReceipt, error) {
- if err := a.checkTipsetKey(ctx, tsk); err != nil {
- return nil, err
- }
-
- return a.api.StateGetReceipt(ctx, c, tsk)
-}
-
-func (a *GatewayAPI) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) {
- if err := a.checkTipsetKey(ctx, tsk); err != nil {
- return nil, err
- }
-
- return a.api.StateListMiners(ctx, tsk)
-}
-
-func (a *GatewayAPI) StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) {
- if err := a.checkTipsetKey(ctx, tsk); err != nil {
- return address.Undef, err
- }
-
- return a.api.StateLookupID(ctx, addr, tsk)
-}
-
-func (a *GatewayAPI) StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) {
- if err := a.checkTipsetKey(ctx, tsk); err != nil {
- return api.MarketBalance{}, err
- }
-
- return a.api.StateMarketBalance(ctx, addr, tsk)
-}
-
-func (a *GatewayAPI) StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) {
- if err := a.checkTipsetKey(ctx, tsk); err != nil {
- return nil, err
- }
-
- return a.api.StateMarketStorageDeal(ctx, dealId, tsk)
-}
-
-func (a *GatewayAPI) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (network.Version, error) {
- if err := a.checkTipsetKey(ctx, tsk); err != nil {
- return network.VersionMax, err
- }
-
- return a.api.StateNetworkVersion(ctx, tsk)
-}
-
-func (a *GatewayAPI) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error) {
- return a.api.StateWaitMsgLimited(ctx, msg, confidence, a.stateWaitLookbackLimit)
-}
-
-func (a *GatewayAPI) StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*api.ActorState, error) {
- if err := a.checkTipsetKey(ctx, tsk); err != nil {
- return nil, err
- }
- return a.api.StateReadState(ctx, actor, tsk)
-}
-
-func (a *GatewayAPI) StateMinerPower(ctx context.Context, m address.Address, tsk types.TipSetKey) (*api.MinerPower, error) {
- if err := a.checkTipsetKey(ctx, tsk); err != nil {
- return nil, err
- }
- return a.api.StateMinerPower(ctx, m, tsk)
-}
-
-func (a *GatewayAPI) StateMinerFaults(ctx context.Context, m address.Address, tsk types.TipSetKey) (bitfield.BitField, error) {
- if err := a.checkTipsetKey(ctx, tsk); err != nil {
- return bitfield.BitField{}, err
- }
- return a.api.StateMinerFaults(ctx, m, tsk)
-}
-func (a *GatewayAPI) StateMinerRecoveries(ctx context.Context, m address.Address, tsk types.TipSetKey) (bitfield.BitField, error) {
- if err := a.checkTipsetKey(ctx, tsk); err != nil {
- return bitfield.BitField{}, err
- }
- return a.api.StateMinerRecoveries(ctx, m, tsk)
-}
-
-func (a *GatewayAPI) StateMinerInfo(ctx context.Context, m address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) {
- if err := a.checkTipsetKey(ctx, tsk); err != nil {
- return miner.MinerInfo{}, err
- }
- return a.api.StateMinerInfo(ctx, m, tsk)
-}
-
-func (a *GatewayAPI) StateMinerDeadlines(ctx context.Context, m address.Address, tsk types.TipSetKey) ([]api.Deadline, error) {
- if err := a.checkTipsetKey(ctx, tsk); err != nil {
- return nil, err
- }
- return a.api.StateMinerDeadlines(ctx, m, tsk)
-}
-
-func (a *GatewayAPI) StateMinerAvailableBalance(ctx context.Context, m address.Address, tsk types.TipSetKey) (types.BigInt, error) {
- if err := a.checkTipsetKey(ctx, tsk); err != nil {
- return types.BigInt{}, err
- }
- return a.api.StateMinerAvailableBalance(ctx, m, tsk)
-}
-
-func (a *GatewayAPI) StateMinerProvingDeadline(ctx context.Context, m address.Address, tsk types.TipSetKey) (*dline.Info, error) {
- if err := a.checkTipsetKey(ctx, tsk); err != nil {
- return nil, err
- }
- return a.api.StateMinerProvingDeadline(ctx, m, tsk)
-}
-
-func (a *GatewayAPI) StateCirculatingSupply(ctx context.Context, tsk types.TipSetKey) (abi.TokenAmount, error) {
- if err := a.checkTipsetKey(ctx, tsk); err != nil {
- return types.BigInt{}, err
- }
- return a.api.StateCirculatingSupply(ctx, tsk)
-
-}
-
-func (a *GatewayAPI) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) {
- if err := a.checkTipsetKey(ctx, tsk); err != nil {
- return nil, err
- }
- return a.api.StateVerifiedClientStatus(ctx, addr, tsk)
-}
-
-func (a *GatewayAPI) StateVMCirculatingSupplyInternal(ctx context.Context, tsk types.TipSetKey) (api.CirculatingSupply, error) {
- if err := a.checkTipsetKey(ctx, tsk); err != nil {
- return api.CirculatingSupply{}, err
- }
- return a.api.StateVMCirculatingSupplyInternal(ctx, tsk)
-}
-
-func (a *GatewayAPI) WalletVerify(ctx context.Context, k address.Address, msg []byte, sig *crypto.Signature) (bool, error) {
- return sigs.Verify(sig, k, msg) == nil, nil
-}
-
-var _ api.GatewayAPI = (*GatewayAPI)(nil)
-var _ full.ChainModuleAPI = (*GatewayAPI)(nil)
-var _ full.GasModuleAPI = (*GatewayAPI)(nil)
-var _ full.MpoolModuleAPI = (*GatewayAPI)(nil)
-var _ full.StateModuleAPI = (*GatewayAPI)(nil)
diff --git a/cmd/lotus-gateway/main.go b/cmd/lotus-gateway/main.go
index 3fed88468ba..cfda02d86d8 100644
--- a/cmd/lotus-gateway/main.go
+++ b/cmd/lotus-gateway/main.go
@@ -2,23 +2,31 @@ package main
import (
"context"
+ "fmt"
"net"
- "net/http"
"os"
+ "github.com/urfave/cli/v2"
+ "go.opencensus.io/stats/view"
+ "golang.org/x/xerrors"
+
+ logging "github.com/ipfs/go-log/v2"
+
"github.com/filecoin-project/go-jsonrpc"
- "go.opencensus.io/tag"
+ "github.com/filecoin-project/go-state-types/abi"
+ manet "github.com/multiformats/go-multiaddr/net"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/lotus/api/client"
"github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
+ cliutil "github.com/filecoin-project/lotus/cli/util"
+ "github.com/filecoin-project/lotus/gateway"
"github.com/filecoin-project/lotus/lib/lotuslog"
"github.com/filecoin-project/lotus/metrics"
-
- logging "github.com/ipfs/go-log"
- "go.opencensus.io/stats/view"
-
- "github.com/gorilla/mux"
- "github.com/urfave/cli/v2"
+ "github.com/filecoin-project/lotus/node"
)
var log = logging.Logger("gateway")
@@ -28,6 +36,7 @@ func main() {
local := []*cli.Command{
runCmd,
+ checkCmd,
}
app := &cli.App{
@@ -47,11 +56,60 @@ func main() {
app.Setup()
if err := app.Run(os.Args); err != nil {
- log.Warnf("%+v", err)
+ log.Errorf("%+v", err)
+ os.Exit(1)
return
}
}
+var checkCmd = &cli.Command{
+ Name: "check",
+ Usage: "performs a simple check to verify that a connection can be made to a gateway",
+ ArgsUsage: "[apiInfo]",
+ Description: `Any valid value for FULLNODE_API_INFO is a valid argument to the check command.
+
+ Examples
+ - ws://127.0.0.1:2346
+ - http://127.0.0.1:2346
+ - /ip4/127.0.0.1/tcp/2346`,
+ Flags: []cli.Flag{},
+ Action: func(cctx *cli.Context) error {
+ ctx := lcli.ReqContext(cctx)
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ ainfo := cliutil.ParseApiInfo(cctx.Args().First())
+
+ darg, err := ainfo.DialArgs("v1")
+ if err != nil {
+ return err
+ }
+
+ api, closer, err := client.NewFullNodeRPCV1(ctx, darg, nil)
+ if err != nil {
+ return err
+ }
+
+ defer closer()
+
+ addr, err := address.NewIDAddress(100)
+ if err != nil {
+ return err
+ }
+
+ laddr, err := api.StateLookupID(ctx, addr, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ if laddr != addr {
+ return fmt.Errorf("looked up addresses does not match returned address, %s != %s", addr, laddr)
+ }
+
+ return nil
+ },
+}
+
var runCmd = &cli.Command{
Name: "run",
Usage: "Start api server",
@@ -61,65 +119,75 @@ var runCmd = &cli.Command{
Usage: "host address and port the api server will listen on",
Value: "0.0.0.0:2346",
},
+ &cli.IntFlag{
+ Name: "api-max-req-size",
+ Usage: "maximum API request size accepted by the JSON RPC server",
+ },
+ &cli.DurationFlag{
+ Name: "api-max-lookback",
+ Usage: "maximum duration allowable for tipset lookbacks",
+ Value: gateway.DefaultLookbackCap,
+ },
+ &cli.Int64Flag{
+ Name: "api-wait-lookback-limit",
+ Usage: "maximum number of blocks to search back through for message inclusion",
+ Value: int64(gateway.DefaultStateWaitLookbackLimit),
+ },
},
Action: func(cctx *cli.Context) error {
log.Info("Starting lotus gateway")
- ctx := lcli.ReqContext(cctx)
- ctx, cancel := context.WithCancel(ctx)
- defer cancel()
-
// Register all metric views
if err := view.Register(
- metrics.DefaultViews...,
+ metrics.ChainNodeViews...,
); err != nil {
log.Fatalf("Cannot register the view: %v", err)
}
- api, closer, err := lcli.GetFullNodeAPI(cctx)
+ api, closer, err := lcli.GetFullNodeAPIV1(cctx)
if err != nil {
return err
}
defer closer()
- address := cctx.String("listen")
- mux := mux.NewRouter()
+ var (
+ lookbackCap = cctx.Duration("api-max-lookback")
+ address = cctx.String("listen")
+ waitLookback = abi.ChainEpoch(cctx.Int64("api-wait-lookback-limit"))
+ )
- log.Info("Setting up API endpoint at " + address)
-
- rpcServer := jsonrpc.NewServer()
- rpcServer.Register("Filecoin", metrics.MetricedGatewayAPI(NewGatewayAPI(api)))
+ serverOptions := make([]jsonrpc.ServerOption, 0)
+ if maxRequestSize := cctx.Int("api-max-req-size"); maxRequestSize != 0 {
+ serverOptions = append(serverOptions, jsonrpc.WithMaxRequestSize(int64(maxRequestSize)))
+ }
- mux.Handle("/rpc/v0", rpcServer)
- mux.PathPrefix("/").Handler(http.DefaultServeMux)
+ log.Info("setting up API endpoint at " + address)
- /*ah := &auth.Handler{
- Verify: nodeApi.AuthVerify,
- Next: mux.ServeHTTP,
- }*/
+ addr, err := net.ResolveTCPAddr("tcp", address)
+ if err != nil {
+ return xerrors.Errorf("failed to resolve endpoint address: %w", err)
+ }
- srv := &http.Server{
- Handler: mux,
- BaseContext: func(listener net.Listener) context.Context {
- ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-gateway"))
- return ctx
- },
+ maddr, err := manet.FromNetAddr(addr)
+ if err != nil {
+ return xerrors.Errorf("failed to convert endpoint address to multiaddr: %w", err)
}
- go func() {
- <-ctx.Done()
- log.Warn("Shutting down...")
- if err := srv.Shutdown(context.TODO()); err != nil {
- log.Errorf("shutting down RPC server failed: %s", err)
- }
- log.Warn("Graceful shutdown successful")
- }()
+ gwapi := gateway.NewNode(api, lookbackCap, waitLookback)
+ h, err := gateway.Handler(gwapi, serverOptions...)
+ if err != nil {
+ return xerrors.Errorf("failed to set up gateway HTTP handler")
+ }
- nl, err := net.Listen("tcp", address)
+ stopFunc, err := node.ServeRPC(h, "lotus-gateway", maddr)
if err != nil {
- return err
+ return xerrors.Errorf("failed to serve rpc endpoint: %w", err)
}
- return srv.Serve(nl)
+ <-node.MonitorShutdown(nil, node.ShutdownHandler{
+ Component: "rpc",
+ StopFunc: stopFunc,
+ })
+ return nil
},
}
diff --git a/cmd/lotus-health/main.go b/cmd/lotus-health/main.go
index e8a32a71946..da90242c888 100644
--- a/cmd/lotus-health/main.go
+++ b/cmd/lotus-health/main.go
@@ -8,13 +8,14 @@ import (
"syscall"
"time"
+ "github.com/filecoin-project/lotus/api/v0api"
+
cid "github.com/ipfs/go-cid"
- logging "github.com/ipfs/go-log"
+ logging "github.com/ipfs/go-log/v2"
"github.com/urfave/cli/v2"
"github.com/filecoin-project/go-jsonrpc"
- "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
@@ -180,7 +181,7 @@ func checkWindow(window CidWindow, t int) bool {
* returns a slice of slices of Cids
* len of slice <= `t` - threshold
*/
-func updateWindow(ctx context.Context, a api.FullNode, w CidWindow, t int, r int, to time.Duration) (CidWindow, error) {
+func updateWindow(ctx context.Context, a v0api.FullNode, w CidWindow, t int, r int, to time.Duration) (CidWindow, error) {
head, err := getHead(ctx, a, r, to)
if err != nil {
return nil, err
@@ -194,7 +195,7 @@ func updateWindow(ctx context.Context, a api.FullNode, w CidWindow, t int, r int
* retries if API no available
* returns tipset
*/
-func getHead(ctx context.Context, a api.FullNode, r int, t time.Duration) (*types.TipSet, error) {
+func getHead(ctx context.Context, a v0api.FullNode, r int, t time.Duration) (*types.TipSet, error) {
for i := 0; i < r; i++ {
head, err := a.ChainHead(ctx)
if err != nil && i == (r-1) {
@@ -226,7 +227,7 @@ func appendCIDsToWindow(w CidWindow, c []cid.Cid, t int) CidWindow {
/*
* wait for node to sync
*/
-func waitForSyncComplete(ctx context.Context, a api.FullNode, r int, t time.Duration) error {
+func waitForSyncComplete(ctx context.Context, a v0api.FullNode, r int, t time.Duration) error {
for {
select {
case <-ctx.Done():
@@ -248,7 +249,7 @@ func waitForSyncComplete(ctx context.Context, a api.FullNode, r int, t time.Dura
* A thin wrapper around lotus cli GetFullNodeAPI
* Adds retry logic
*/
-func getFullNodeAPI(ctx *cli.Context, r int, t time.Duration) (api.FullNode, jsonrpc.ClientCloser, error) {
+func getFullNodeAPI(ctx *cli.Context, r int, t time.Duration) (v0api.FullNode, jsonrpc.ClientCloser, error) {
for i := 0; i < r; i++ {
api, closer, err := lcli.GetFullNodeAPI(ctx)
if err != nil && i == (r-1) {
diff --git a/cmd/lotus-keygen/main.go b/cmd/lotus-keygen/main.go
index d296cb5da70..ebf981e8b7a 100644
--- a/cmd/lotus-keygen/main.go
+++ b/cmd/lotus-keygen/main.go
@@ -22,6 +22,11 @@ func main() {
Value: "bls",
Usage: "specify key type to generate (bls or secp256k1)",
},
+ &cli.StringFlag{
+ Name: "out",
+ Aliases: []string{"o"},
+ Usage: "specify key file name to generate",
+ },
}
app.Action = func(cctx *cli.Context) error {
memks := wallet.NewMemKeyStore()
@@ -50,7 +55,11 @@ func main() {
return err
}
- fi, err := os.Create(fmt.Sprintf("%s.key", kaddr))
+ outFile := fmt.Sprintf("%s.key", kaddr)
+ if cctx.IsSet("out") {
+ outFile = fmt.Sprintf("%s.key", cctx.String("out"))
+ }
+ fi, err := os.Create(outFile)
if err != nil {
return err
}
diff --git a/cmd/lotus-seal-worker/cli.go b/cmd/lotus-seal-worker/cli.go
new file mode 100644
index 00000000000..b1501fca745
--- /dev/null
+++ b/cmd/lotus-seal-worker/cli.go
@@ -0,0 +1,51 @@
+package main
+
+import (
+ "github.com/urfave/cli/v2"
+ "golang.org/x/xerrors"
+
+ lcli "github.com/filecoin-project/lotus/cli"
+)
+
+var setCmd = &cli.Command{
+ Name: "set",
+ Usage: "Manage worker settings",
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "enabled",
+ Usage: "enable/disable new task processing",
+ Value: true,
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := lcli.GetWorkerAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ ctx := lcli.ReqContext(cctx)
+
+ if err := api.SetEnabled(ctx, cctx.Bool("enabled")); err != nil {
+ return xerrors.Errorf("SetEnabled: %w", err)
+ }
+
+ return nil
+ },
+}
+
+var waitQuietCmd = &cli.Command{
+ Name: "wait-quiet",
+ Usage: "Block until all running tasks exit",
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := lcli.GetWorkerAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ ctx := lcli.ReqContext(cctx)
+
+ return api.WaitQuiet(ctx)
+ },
+}
diff --git a/cmd/lotus-seal-worker/info.go b/cmd/lotus-seal-worker/info.go
index 9b08a0c800e..6d5c2d64ebc 100644
--- a/cmd/lotus-seal-worker/info.go
+++ b/cmd/lotus-seal-worker/info.go
@@ -2,12 +2,14 @@ package main
import (
"fmt"
+ "sort"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
+ "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
)
var infoCmd = &cli.Command{
@@ -32,15 +34,39 @@ var infoCmd = &cli.Command{
cli.VersionPrinter(cctx)
fmt.Println()
+ sess, err := api.ProcessSession(ctx)
+ if err != nil {
+ return xerrors.Errorf("getting session: %w", err)
+ }
+ fmt.Printf("Session: %s\n", sess)
+
+ enabled, err := api.Enabled(ctx)
+ if err != nil {
+ return xerrors.Errorf("checking worker status: %w", err)
+ }
+ fmt.Printf("Enabled: %t\n", enabled)
+
info, err := api.Info(ctx)
if err != nil {
return xerrors.Errorf("getting info: %w", err)
}
+ tt, err := api.TaskTypes(ctx)
+ if err != nil {
+ return xerrors.Errorf("getting task types: %w", err)
+ }
+
fmt.Printf("Hostname: %s\n", info.Hostname)
fmt.Printf("CPUs: %d; GPUs: %v\n", info.Resources.CPUs, info.Resources.GPUs)
fmt.Printf("RAM: %s; Swap: %s\n", types.SizeStr(types.NewInt(info.Resources.MemPhysical)), types.SizeStr(types.NewInt(info.Resources.MemSwap)))
fmt.Printf("Reserved memory: %s\n", types.SizeStr(types.NewInt(info.Resources.MemReserved)))
+
+ fmt.Printf("Task types: ")
+ for _, t := range ttList(tt) {
+ fmt.Printf("%s ", t.Short())
+ }
+ fmt.Println()
+
fmt.Println()
paths, err := api.Paths(ctx)
@@ -52,7 +78,6 @@ var infoCmd = &cli.Command{
fmt.Printf("%s:\n", path.ID)
fmt.Printf("\tWeight: %d; Use: ", path.Weight)
if path.CanSeal || path.CanStore {
- fmt.Printf("Weight: %d; Use: ", path.Weight)
if path.CanSeal {
fmt.Print("Seal ")
}
@@ -69,3 +94,14 @@ var infoCmd = &cli.Command{
return nil
},
}
+
+func ttList(tt map[sealtasks.TaskType]struct{}) []sealtasks.TaskType {
+ tasks := make([]sealtasks.TaskType, 0, len(tt))
+ for taskType := range tt {
+ tasks = append(tasks, taskType)
+ }
+ sort.Slice(tasks, func(i, j int) bool {
+ return tasks[i].Less(tasks[j])
+ })
+ return tasks
+}
diff --git a/cmd/lotus-seal-worker/main.go b/cmd/lotus-seal-worker/main.go
index 36c9d5effc1..adcf0f86934 100644
--- a/cmd/lotus-seal-worker/main.go
+++ b/cmd/lotus-seal-worker/main.go
@@ -28,11 +28,10 @@ import (
"github.com/filecoin-project/go-statestore"
"github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/api/apistruct"
"github.com/filecoin-project/lotus/build"
lcli "github.com/filecoin-project/lotus/cli"
+ cliutil "github.com/filecoin-project/lotus/cli/util"
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
- "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
"github.com/filecoin-project/lotus/lib/lotuslog"
@@ -50,7 +49,7 @@ const FlagWorkerRepo = "worker-repo"
const FlagWorkerRepoDeprecation = "workerrepo"
func main() {
- build.RunningNodeType = build.NodeWorker
+ api.RunningNodeType = api.NodeWorker
lotuslog.SetupLogLevels()
@@ -58,12 +57,16 @@ func main() {
runCmd,
infoCmd,
storageCmd,
+ setCmd,
+ waitQuietCmd,
+ tasksCmd,
}
app := &cli.App{
- Name: "lotus-worker",
- Usage: "Remote miner worker",
- Version: build.UserVersion(),
+ Name: "lotus-worker",
+ Usage: "Remote miner worker",
+ Version: build.UserVersion(),
+ EnableBashCompletion: true,
Flags: []cli.Flag{
&cli.StringFlag{
Name: FlagWorkerRepo,
@@ -181,7 +184,7 @@ var runCmd = &cli.Command{
var closer func()
var err error
for {
- nodeApi, closer, err = lcli.GetStorageMinerAPI(cctx, lcli.StorageMinerUseHttp)
+ nodeApi, closer, err = lcli.GetStorageMinerAPI(cctx, cliutil.StorageMinerUseHttp)
if err == nil {
_, err = nodeApi.Version(ctx)
if err == nil {
@@ -208,8 +211,8 @@ var runCmd = &cli.Command{
if err != nil {
return err
}
- if v.APIVersion != build.MinerAPIVersion {
- return xerrors.Errorf("lotus-miner API version doesn't match: expected: %s", api.Version{APIVersion: build.MinerAPIVersion})
+ if v.APIVersion != api.MinerAPIVersion0 {
+ return xerrors.Errorf("lotus-miner API version doesn't match: expected: %s", api.APIVersion{APIVersion: api.MinerAPIVersion0})
}
log.Infof("Remote version %s", v)
@@ -225,7 +228,7 @@ var runCmd = &cli.Command{
}
if cctx.Bool("commit") {
- if err := paramfetch.GetParams(ctx, build.ParametersJSON(), uint64(ssize)); err != nil {
+ if err := paramfetch.GetParams(ctx, build.ParametersJSON(), build.SrsJSON(), uint64(ssize)); err != nil {
return xerrors.Errorf("get params: %w", err)
}
}
@@ -306,7 +309,7 @@ var runCmd = &cli.Command{
{
// init datastore for r.Exists
- _, err := lr.Datastore("/metadata")
+ _, err := lr.Datastore(context.Background(), "/metadata")
if err != nil {
return err
}
@@ -325,7 +328,7 @@ var runCmd = &cli.Command{
log.Error("closing repo", err)
}
}()
- ds, err := lr.Datastore("/metadata")
+ ds, err := lr.Datastore(context.Background(), "/metadata")
if err != nil {
return err
}
@@ -354,17 +357,24 @@ var runCmd = &cli.Command{
}
// Setup remote sector store
- spt, err := ffiwrapper.SealProofTypeFromSectorSize(ssize)
- if err != nil {
- return xerrors.Errorf("getting proof type: %w", err)
- }
-
sminfo, err := lcli.GetAPIInfo(cctx, repo.StorageMiner)
if err != nil {
return xerrors.Errorf("could not get api info: %w", err)
}
- remote := stores.NewRemote(localStore, nodeApi, sminfo.AuthHeader(), cctx.Int("parallel-fetch-limit"))
+ remote := stores.NewRemote(localStore, nodeApi, sminfo.AuthHeader(), cctx.Int("parallel-fetch-limit"),
+ &stores.DefaultPartialFileHandler{})
+
+ fh := &stores.FetchHandler{Local: localStore, PfHandler: &stores.DefaultPartialFileHandler{}}
+ remoteHandler := func(w http.ResponseWriter, r *http.Request) {
+ if !auth.HasPerm(r.Context(), nil, api.PermAdmin) {
+ w.WriteHeader(401)
+ _ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing admin permission"})
+ return
+ }
+
+ fh.ServeHTTP(w, r)
+ }
// Create / expose the worker
@@ -372,7 +382,6 @@ var runCmd = &cli.Command{
workerApi := &worker{
LocalWorker: sectorstorage.NewLocalWorker(sectorstorage.WorkerConfig{
- SealProof: spt,
TaskTypes: taskTypes,
NoSwap: cctx.Bool("no-swap"),
}, remote, localStore, nodeApi, nodeApi, wsts),
@@ -386,11 +395,11 @@ var runCmd = &cli.Command{
readerHandler, readerServerOpt := rpcenc.ReaderParamDecoder()
rpcServer := jsonrpc.NewServer(readerServerOpt)
- rpcServer.Register("Filecoin", apistruct.PermissionedWorkerAPI(metrics.MetricedWorkerAPI(workerApi)))
+ rpcServer.Register("Filecoin", api.PermissionedWorkerAPI(metrics.MetricedWorkerAPI(workerApi)))
mux.Handle("/rpc/v0", rpcServer)
mux.Handle("/rpc/streams/v0/push/{uuid}", readerHandler)
- mux.PathPrefix("/remote").HandlerFunc((&stores.FetchHandler{Local: localStore}).ServeHTTP)
+ mux.PathPrefix("/remote").HandlerFunc(remoteHandler)
mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof
ah := &auth.Handler{
@@ -451,14 +460,24 @@ var runCmd = &cli.Command{
return xerrors.Errorf("getting miner session: %w", err)
}
+ waitQuietCh := func() chan struct{} {
+ out := make(chan struct{})
+ go func() {
+ workerApi.LocalWorker.WaitQuiet()
+ close(out)
+ }()
+ return out
+ }
+
go func() {
heartbeats := time.NewTicker(stores.HeartbeatInterval)
defer heartbeats.Stop()
- var connected, reconnect bool
+ var redeclareStorage bool
+ var readyCh chan struct{}
for {
// If we're reconnecting, redeclare storage first
- if reconnect {
+ if redeclareStorage {
log.Info("Redeclaring local storage")
if err := localStore.Redeclare(ctx); err != nil {
@@ -471,14 +490,13 @@ var runCmd = &cli.Command{
}
continue
}
-
- connected = false
}
- log.Info("Making sure no local tasks are running")
-
// TODO: we could get rid of this, but that requires tracking resources for restarted tasks correctly
- workerApi.LocalWorker.WaitQuiet()
+ if readyCh == nil {
+ log.Info("Making sure no local tasks are running")
+ readyCh = waitQuietCh()
+ }
for {
curSession, err := nodeApi.Session(ctx)
@@ -489,29 +507,28 @@ var runCmd = &cli.Command{
minerSession = curSession
break
}
-
- if !connected {
- if err := nodeApi.WorkerConnect(ctx, "http://"+address+"/rpc/v0"); err != nil {
- log.Errorf("Registering worker failed: %+v", err)
- cancel()
- return
- }
-
- log.Info("Worker registered successfully, waiting for tasks")
- connected = true
- }
}
select {
+ case <-readyCh:
+ if err := nodeApi.WorkerConnect(ctx, "http://"+address+"/rpc/v0"); err != nil {
+ log.Errorf("Registering worker failed: %+v", err)
+ cancel()
+ return
+ }
+
+ log.Info("Worker registered successfully, waiting for tasks")
+
+ readyCh = nil
+ case <-heartbeats.C:
case <-ctx.Done():
return // graceful shutdown
- case <-heartbeats.C:
}
}
log.Errorf("LOTUS-MINER CONNECTION LOST")
- reconnect = true
+ redeclareStorage = true
}
}()
diff --git a/cmd/lotus-seal-worker/rpc.go b/cmd/lotus-seal-worker/rpc.go
index b543babbf7d..6a6263671bd 100644
--- a/cmd/lotus-seal-worker/rpc.go
+++ b/cmd/lotus-seal-worker/rpc.go
@@ -2,10 +2,14 @@ package main
import (
"context"
+ "sync/atomic"
+ "github.com/google/uuid"
"github.com/mitchellh/go-homedir"
"golang.org/x/xerrors"
+ "github.com/filecoin-project/lotus/api"
+ apitypes "github.com/filecoin-project/lotus/api/types"
"github.com/filecoin-project/lotus/build"
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
@@ -17,10 +21,12 @@ type worker struct {
localStore *stores.Local
ls stores.LocalStorage
+
+ disabled int64
}
-func (w *worker) Version(context.Context) (build.Version, error) {
- return build.WorkerAPIVersion, nil
+func (w *worker) Version(context.Context) (api.Version, error) {
+ return api.WorkerAPIVersion0, nil
}
func (w *worker) StorageAddLocal(ctx context.Context, path string) error {
@@ -42,4 +48,38 @@ func (w *worker) StorageAddLocal(ctx context.Context, path string) error {
return nil
}
+func (w *worker) SetEnabled(ctx context.Context, enabled bool) error {
+ disabled := int64(1)
+ if enabled {
+ disabled = 0
+ }
+ atomic.StoreInt64(&w.disabled, disabled)
+ return nil
+}
+
+func (w *worker) Enabled(ctx context.Context) (bool, error) {
+ return atomic.LoadInt64(&w.disabled) == 0, nil
+}
+
+func (w *worker) WaitQuiet(ctx context.Context) error {
+ w.LocalWorker.WaitQuiet() // uses WaitGroup under the hood so no ctx :/
+ return nil
+}
+
+func (w *worker) ProcessSession(ctx context.Context) (uuid.UUID, error) {
+ return w.LocalWorker.Session(ctx)
+}
+
+func (w *worker) Session(ctx context.Context) (uuid.UUID, error) {
+ if atomic.LoadInt64(&w.disabled) == 1 {
+ return uuid.UUID{}, xerrors.Errorf("worker disabled")
+ }
+
+ return w.LocalWorker.Session(ctx)
+}
+
+func (w *worker) Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) {
+ return build.OpenRPCDiscoverJSON_Worker(), nil
+}
+
var _ storiface.WorkerCalls = &worker{}
diff --git a/cmd/lotus-seal-worker/storage.go b/cmd/lotus-seal-worker/storage.go
index 39cd3ad5afb..be662a6c36b 100644
--- a/cmd/lotus-seal-worker/storage.go
+++ b/cmd/lotus-seal-worker/storage.go
@@ -6,6 +6,7 @@ import (
"os"
"path/filepath"
+ "github.com/docker/go-units"
"github.com/google/uuid"
"github.com/mitchellh/go-homedir"
"github.com/urfave/cli/v2"
@@ -46,6 +47,10 @@ var storageAttachCmd = &cli.Command{
Name: "store",
Usage: "(for init) use path for long-term storage",
},
+ &cli.StringFlag{
+ Name: "max-storage",
+ Usage: "(for init) limit storage space for sectors (expensive for very large paths!)",
+ },
},
Action: func(cctx *cli.Context) error {
nodeApi, closer, err := lcli.GetWorkerAPI(cctx)
@@ -79,15 +84,24 @@ var storageAttachCmd = &cli.Command{
return err
}
+ var maxStor int64
+ if cctx.IsSet("max-storage") {
+ maxStor, err = units.RAMInBytes(cctx.String("max-storage"))
+ if err != nil {
+ return xerrors.Errorf("parsing max-storage: %w", err)
+ }
+ }
+
cfg := &stores.LocalStorageMeta{
- ID: stores.ID(uuid.New().String()),
- Weight: cctx.Uint64("weight"),
- CanSeal: cctx.Bool("seal"),
- CanStore: cctx.Bool("store"),
+ ID: stores.ID(uuid.New().String()),
+ Weight: cctx.Uint64("weight"),
+ CanSeal: cctx.Bool("seal"),
+ CanStore: cctx.Bool("store"),
+ MaxStorage: uint64(maxStor),
}
if !(cfg.CanStore || cfg.CanSeal) {
- return xerrors.Errorf("must specify at least one of --store of --seal")
+ return xerrors.Errorf("must specify at least one of --store or --seal")
}
b, err := json.MarshalIndent(cfg, "", " ")
diff --git a/cmd/lotus-seal-worker/tasks.go b/cmd/lotus-seal-worker/tasks.go
new file mode 100644
index 00000000000..02e5d6cfd8e
--- /dev/null
+++ b/cmd/lotus-seal-worker/tasks.go
@@ -0,0 +1,82 @@
+package main
+
+import (
+ "context"
+ "strings"
+
+ "github.com/urfave/cli/v2"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/lotus/api"
+ lcli "github.com/filecoin-project/lotus/cli"
+ "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
+)
+
+var tasksCmd = &cli.Command{
+ Name: "tasks",
+ Usage: "Manage task processing",
+ Subcommands: []*cli.Command{
+ tasksEnableCmd,
+ tasksDisableCmd,
+ },
+}
+
+var allowSetting = map[sealtasks.TaskType]struct{}{
+ sealtasks.TTAddPiece: {},
+ sealtasks.TTPreCommit1: {},
+ sealtasks.TTPreCommit2: {},
+ sealtasks.TTCommit2: {},
+ sealtasks.TTUnseal: {},
+}
+
+var settableStr = func() string {
+ var s []string
+ for _, tt := range ttList(allowSetting) {
+ s = append(s, tt.Short())
+ }
+ return strings.Join(s, "|")
+}()
+
+var tasksEnableCmd = &cli.Command{
+ Name: "enable",
+ Usage: "Enable a task type",
+ ArgsUsage: "[" + settableStr + "]",
+ Action: taskAction(api.Worker.TaskEnable),
+}
+
+var tasksDisableCmd = &cli.Command{
+ Name: "disable",
+ Usage: "Disable a task type",
+ ArgsUsage: "[" + settableStr + "]",
+ Action: taskAction(api.Worker.TaskDisable),
+}
+
+func taskAction(tf func(a api.Worker, ctx context.Context, tt sealtasks.TaskType) error) func(cctx *cli.Context) error {
+ return func(cctx *cli.Context) error {
+ if cctx.NArg() != 1 {
+ return xerrors.Errorf("expected 1 argument")
+ }
+
+ var tt sealtasks.TaskType
+ for taskType := range allowSetting {
+ if taskType.Short() == cctx.Args().First() {
+ tt = taskType
+ break
+ }
+ }
+
+ if tt == "" {
+ return xerrors.Errorf("unknown task type '%s'", cctx.Args().First())
+ }
+
+ api, closer, err := lcli.GetWorkerAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ ctx := lcli.ReqContext(cctx)
+
+ return tf(api, ctx, tt)
+ }
+}
diff --git a/cmd/lotus-seed/genesis.go b/cmd/lotus-seed/genesis.go
index bbaea6969a9..a27cc0a2f7c 100644
--- a/cmd/lotus-seed/genesis.go
+++ b/cmd/lotus-seed/genesis.go
@@ -9,6 +9,13 @@ import (
"strconv"
"strings"
+ "github.com/filecoin-project/go-state-types/network"
+
+ "github.com/filecoin-project/lotus/blockstore"
+ "github.com/filecoin-project/lotus/chain/vm"
+ "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
+ "github.com/filecoin-project/lotus/journal"
+ "github.com/filecoin-project/lotus/node/modules/testing"
"github.com/google/uuid"
"github.com/mitchellh/go-homedir"
"github.com/urfave/cli/v2"
@@ -32,6 +39,10 @@ var genesisCmd = &cli.Command{
genesisNewCmd,
genesisAddMinerCmd,
genesisAddMsigsCmd,
+ genesisSetVRKCmd,
+ genesisSetRemainderCmd,
+ genesisSetActorVersionCmd,
+ genesisCarCmd,
},
}
@@ -48,6 +59,7 @@ var genesisNewCmd = &cli.Command{
return xerrors.New("seed genesis new [genesis.json]")
}
out := genesis.Template{
+ NetworkVersion: build.NewestNetworkVersion,
Accounts: []genesis.Actor{},
Miners: []genesis.Miner{},
VerifregRootKey: gen.DefaultVerifregRootkeyActor,
@@ -302,3 +314,267 @@ func parseMultisigCsv(csvf string) ([]GenAccountEntry, error) {
return entries, nil
}
+
+var genesisSetVRKCmd = &cli.Command{
+ Name: "set-vrk",
+ Usage: "Set the verified registry's root key",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "multisig",
+ Usage: "CSV file to parse the multisig that will be set as the root key",
+ },
+ &cli.StringFlag{
+ Name: "account",
+ Usage: "pubkey address that will be set as the root key (must NOT be declared anywhere else, since it must be given ID 80)",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ if cctx.Args().Len() != 1 {
+ return fmt.Errorf("must specify template file")
+ }
+
+ genf, err := homedir.Expand(cctx.Args().First())
+ if err != nil {
+ return err
+ }
+
+ var template genesis.Template
+ b, err := ioutil.ReadFile(genf)
+ if err != nil {
+ return xerrors.Errorf("read genesis template: %w", err)
+ }
+
+ if err := json.Unmarshal(b, &template); err != nil {
+ return xerrors.Errorf("unmarshal genesis template: %w", err)
+ }
+
+ if cctx.IsSet("account") {
+ addr, err := address.NewFromString(cctx.String("account"))
+ if err != nil {
+ return err
+ }
+
+ am := genesis.AccountMeta{Owner: addr}
+
+ template.VerifregRootKey = genesis.Actor{
+ Type: genesis.TAccount,
+ Balance: big.Zero(),
+ Meta: am.ActorMeta(),
+ }
+ } else if cctx.IsSet("multisig") {
+ csvf, err := homedir.Expand(cctx.String("multisig"))
+ if err != nil {
+ return err
+ }
+
+ entries, err := parseMultisigCsv(csvf)
+ if err != nil {
+ return xerrors.Errorf("parsing multisig csv file: %w", err)
+ }
+
+ if len(entries) == 0 {
+ return xerrors.Errorf("no msig entries in csv file: %w", err)
+ }
+
+ e := entries[0]
+ if len(e.Addresses) != e.N {
+ return fmt.Errorf("entry had mismatch between 'N' and number of addresses")
+ }
+
+ msig := &genesis.MultisigMeta{
+ Signers: e.Addresses,
+ Threshold: e.M,
+ VestingDuration: monthsToBlocks(e.VestingMonths),
+ VestingStart: 0,
+ }
+
+ act := genesis.Actor{
+ Type: genesis.TMultisig,
+ Balance: abi.TokenAmount(e.Amount),
+ Meta: msig.ActorMeta(),
+ }
+
+ template.VerifregRootKey = act
+ } else {
+ return xerrors.Errorf("must include either --account or --multisig flag")
+ }
+
+ b, err = json.MarshalIndent(&template, "", " ")
+ if err != nil {
+ return err
+ }
+
+ if err := ioutil.WriteFile(genf, b, 0644); err != nil {
+ return err
+ }
+ return nil
+ },
+}
+
+var genesisSetRemainderCmd = &cli.Command{
+ Name: "set-remainder",
+ Usage: "Set the remainder actor",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "multisig",
+ Usage: "CSV file to parse the multisig that will be set as the remainder actor",
+ },
+ &cli.StringFlag{
+ Name: "account",
+ Usage: "pubkey address that will be set as the remainder key (must NOT be declared anywhere else, since it must be given ID 90)",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ if cctx.Args().Len() != 1 {
+ return fmt.Errorf("must specify template file")
+ }
+
+ genf, err := homedir.Expand(cctx.Args().First())
+ if err != nil {
+ return err
+ }
+
+ var template genesis.Template
+ b, err := ioutil.ReadFile(genf)
+ if err != nil {
+ return xerrors.Errorf("read genesis template: %w", err)
+ }
+
+ if err := json.Unmarshal(b, &template); err != nil {
+ return xerrors.Errorf("unmarshal genesis template: %w", err)
+ }
+
+ if cctx.IsSet("account") {
+ addr, err := address.NewFromString(cctx.String("account"))
+ if err != nil {
+ return err
+ }
+
+ am := genesis.AccountMeta{Owner: addr}
+
+ template.RemainderAccount = genesis.Actor{
+ Type: genesis.TAccount,
+ Balance: big.Zero(),
+ Meta: am.ActorMeta(),
+ }
+ } else if cctx.IsSet("multisig") {
+ csvf, err := homedir.Expand(cctx.String("multisig"))
+ if err != nil {
+ return err
+ }
+
+ entries, err := parseMultisigCsv(csvf)
+ if err != nil {
+ return xerrors.Errorf("parsing multisig csv file: %w", err)
+ }
+
+ if len(entries) == 0 {
+ return xerrors.Errorf("no msig entries in csv file: %w", err)
+ }
+
+ e := entries[0]
+ if len(e.Addresses) != e.N {
+ return fmt.Errorf("entry had mismatch between 'N' and number of addresses")
+ }
+
+ msig := &genesis.MultisigMeta{
+ Signers: e.Addresses,
+ Threshold: e.M,
+ VestingDuration: monthsToBlocks(e.VestingMonths),
+ VestingStart: 0,
+ }
+
+ act := genesis.Actor{
+ Type: genesis.TMultisig,
+ Balance: abi.TokenAmount(e.Amount),
+ Meta: msig.ActorMeta(),
+ }
+
+ template.RemainderAccount = act
+ } else {
+ return xerrors.Errorf("must include either --account or --multisig flag")
+ }
+
+ b, err = json.MarshalIndent(&template, "", " ")
+ if err != nil {
+ return err
+ }
+
+ if err := ioutil.WriteFile(genf, b, 0644); err != nil {
+ return err
+ }
+ return nil
+ },
+}
+
+var genesisSetActorVersionCmd = &cli.Command{
+ Name: "set-network-version",
+ Usage: "Set the version that this network will start from",
+ ArgsUsage: " ",
+ Action: func(cctx *cli.Context) error {
+ if cctx.Args().Len() != 2 {
+ return fmt.Errorf("must specify genesis file and network version (e.g. '0'")
+ }
+
+ genf, err := homedir.Expand(cctx.Args().First())
+ if err != nil {
+ return err
+ }
+
+ var template genesis.Template
+ b, err := ioutil.ReadFile(genf)
+ if err != nil {
+ return xerrors.Errorf("read genesis template: %w", err)
+ }
+
+ if err := json.Unmarshal(b, &template); err != nil {
+ return xerrors.Errorf("unmarshal genesis template: %w", err)
+ }
+
+ nv, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64)
+ if err != nil {
+ return xerrors.Errorf("parsing network version: %w", err)
+ }
+
+ if nv > uint64(build.NewestNetworkVersion) {
+ return xerrors.Errorf("invalid network version: %d", nv)
+ }
+
+ template.NetworkVersion = network.Version(nv)
+
+ b, err = json.MarshalIndent(&template, "", " ")
+ if err != nil {
+ return err
+ }
+
+ if err := ioutil.WriteFile(genf, b, 0644); err != nil {
+ return err
+ }
+ return nil
+ },
+}
+
+var genesisCarCmd = &cli.Command{
+ Name: "car",
+ Description: "write genesis car file",
+ ArgsUsage: "genesis template `FILE`",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "out",
+ Aliases: []string{"o"},
+ Value: "genesis.car",
+ Usage: "write output to `FILE`",
+ },
+ },
+ Action: func(c *cli.Context) error {
+ if c.Args().Len() != 1 {
+ return xerrors.Errorf("Please specify a genesis template. (i.e, the one created with `genesis new`)")
+ }
+ ofile := c.String("out")
+ jrnl := journal.NilJournal()
+ bstor := blockstore.WrapIDStore(blockstore.NewMemorySync())
+ sbldr := vm.Syscalls(ffiwrapper.ProofVerifier)
+ _, err := testing.MakeGenesis(ofile, c.Args().First())(bstor, sbldr, jrnl)()
+ return err
+ },
+}
diff --git a/cmd/lotus-seed/main.go b/cmd/lotus-seed/main.go
index d365f6493e1..42f4b74e4d9 100644
--- a/cmd/lotus-seed/main.go
+++ b/cmd/lotus-seed/main.go
@@ -7,9 +7,9 @@ import (
"io/ioutil"
"os"
- "github.com/docker/go-units"
- "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
+ "github.com/filecoin-project/go-state-types/network"
+ "github.com/docker/go-units"
logging "github.com/ipfs/go-log/v2"
"github.com/mitchellh/go-homedir"
"github.com/urfave/cli/v2"
@@ -19,6 +19,7 @@ import (
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/cmd/lotus-seed/seed"
"github.com/filecoin-project/lotus/genesis"
@@ -93,6 +94,10 @@ var preSealCmd = &cli.Command{
Name: "fake-sectors",
Value: false,
},
+ &cli.IntFlag{
+ Name: "network-version",
+ Usage: "specify network version",
+ },
},
Action: func(c *cli.Context) error {
sdir := c.String("sector-dir")
@@ -128,12 +133,17 @@ var preSealCmd = &cli.Command{
}
sectorSize := abi.SectorSize(sectorSizeInt)
- rp, err := ffiwrapper.SealProofTypeFromSectorSize(sectorSize)
+ nv := build.NewestNetworkVersion
+ if c.IsSet("network-version") {
+ nv = network.Version(c.Uint64("network-version"))
+ }
+
+ spt, err := miner.SealProofTypeFromSectorSize(sectorSize, nv)
if err != nil {
return err
}
- gm, key, err := seed.PreSeal(maddr, rp, abi.SectorNumber(c.Uint64("sector-offset")), c.Int("num-sectors"), sbroot, []byte(c.String("ticket-preimage")), k, c.Bool("fake-sectors"))
+ gm, key, err := seed.PreSeal(maddr, spt, abi.SectorNumber(c.Uint64("sector-offset")), c.Int("num-sectors"), sbroot, []byte(c.String("ticket-preimage")), k, c.Bool("fake-sectors"))
if err != nil {
return err
}
diff --git a/cmd/lotus-seed/seed/seed.go b/cmd/lotus-seed/seed/seed.go
index ab8e5a52a2b..48183690db7 100644
--- a/cmd/lotus-seed/seed/seed.go
+++ b/cmd/lotus-seed/seed/seed.go
@@ -19,9 +19,10 @@ import (
ffi "github.com/filecoin-project/filecoin-ffi"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-commp-utils/zerocomm"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
- "github.com/filecoin-project/lotus/extern/sector-storage/zerocomm"
+ "github.com/filecoin-project/specs-storage/storage"
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
@@ -42,10 +43,6 @@ func PreSeal(maddr address.Address, spt abi.RegisteredSealProof, offset abi.Sect
return nil, nil, err
}
- cfg := &ffiwrapper.Config{
- SealProofType: spt,
- }
-
if err := os.MkdirAll(sbroot, 0775); err != nil { //nolint:gosec
return nil, nil, err
}
@@ -56,7 +53,7 @@ func PreSeal(maddr address.Address, spt abi.RegisteredSealProof, offset abi.Sect
Root: sbroot,
}
- sb, err := ffiwrapper.New(sbfs, cfg)
+ sb, err := ffiwrapper.New(sbfs)
if err != nil {
return nil, nil, err
}
@@ -69,16 +66,17 @@ func PreSeal(maddr address.Address, spt abi.RegisteredSealProof, offset abi.Sect
var sealedSectors []*genesis.PreSeal
for i := 0; i < sectors; i++ {
sid := abi.SectorID{Miner: abi.ActorID(mid), Number: next}
+ ref := storage.SectorRef{ID: sid, ProofType: spt}
next++
var preseal *genesis.PreSeal
if !fakeSectors {
- preseal, err = presealSector(sb, sbfs, sid, spt, ssize, preimage)
+ preseal, err = presealSector(sb, sbfs, ref, ssize, preimage)
if err != nil {
return nil, nil, err
}
} else {
- preseal, err = presealSectorFake(sbfs, sid, spt, ssize)
+ preseal, err = presealSectorFake(sbfs, ref, ssize)
if err != nil {
return nil, nil, err
}
@@ -148,7 +146,7 @@ func PreSeal(maddr address.Address, spt abi.RegisteredSealProof, offset abi.Sect
return miner, &minerAddr.KeyInfo, nil
}
-func presealSector(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, sid abi.SectorID, spt abi.RegisteredSealProof, ssize abi.SectorSize, preimage []byte) (*genesis.PreSeal, error) {
+func presealSector(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, sid storage.SectorRef, ssize abi.SectorSize, preimage []byte) (*genesis.PreSeal, error) {
pi, err := sb.AddPiece(context.TODO(), sid, nil, abi.PaddedPieceSize(ssize).Unpadded(), rand.Reader)
if err != nil {
return nil, err
@@ -182,12 +180,12 @@ func presealSector(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, sid abi.Sector
return &genesis.PreSeal{
CommR: cids.Sealed,
CommD: cids.Unsealed,
- SectorID: sid.Number,
- ProofType: spt,
+ SectorID: sid.ID.Number,
+ ProofType: sid.ProofType,
}, nil
}
-func presealSectorFake(sbfs *basicfs.Provider, sid abi.SectorID, spt abi.RegisteredSealProof, ssize abi.SectorSize) (*genesis.PreSeal, error) {
+func presealSectorFake(sbfs *basicfs.Provider, sid storage.SectorRef, ssize abi.SectorSize) (*genesis.PreSeal, error) {
paths, done, err := sbfs.AcquireSector(context.TODO(), sid, 0, storiface.FTSealed|storiface.FTCache, storiface.PathSealing)
if err != nil {
return nil, xerrors.Errorf("acquire unsealed sector: %w", err)
@@ -198,7 +196,7 @@ func presealSectorFake(sbfs *basicfs.Provider, sid abi.SectorID, spt abi.Registe
return nil, xerrors.Errorf("mkdir cache: %w", err)
}
- commr, err := ffi.FauxRep(spt, paths.Cache, paths.Sealed)
+ commr, err := ffi.FauxRep(sid.ProofType, paths.Cache, paths.Sealed)
if err != nil {
return nil, xerrors.Errorf("fauxrep: %w", err)
}
@@ -206,13 +204,13 @@ func presealSectorFake(sbfs *basicfs.Provider, sid abi.SectorID, spt abi.Registe
return &genesis.PreSeal{
CommR: commr,
CommD: zerocomm.ZeroPieceCommitment(abi.PaddedPieceSize(ssize).Unpadded()),
- SectorID: sid.Number,
- ProofType: spt,
+ SectorID: sid.ID.Number,
+ ProofType: sid.ProofType,
}, nil
}
-func cleanupUnsealed(sbfs *basicfs.Provider, sid abi.SectorID) error {
- paths, done, err := sbfs.AcquireSector(context.TODO(), sid, storiface.FTUnsealed, storiface.FTNone, storiface.PathSealing)
+func cleanupUnsealed(sbfs *basicfs.Provider, ref storage.SectorRef) error {
+ paths, done, err := sbfs.AcquireSector(context.TODO(), ref, storiface.FTUnsealed, storiface.FTNone, storiface.PathSealing)
if err != nil {
return err
}
diff --git a/cmd/lotus-shed/actor.go b/cmd/lotus-shed/actor.go
new file mode 100644
index 00000000000..b78f283497f
--- /dev/null
+++ b/cmd/lotus-shed/actor.go
@@ -0,0 +1,740 @@
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/fatih/color"
+ "github.com/urfave/cli/v2"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/lotus/api"
+
+ miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
+
+ "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+ "github.com/filecoin-project/lotus/chain/types"
+ lcli "github.com/filecoin-project/lotus/cli"
+ "github.com/filecoin-project/lotus/lib/tablewriter"
+)
+
+var actorCmd = &cli.Command{
+ Name: "actor",
+ Usage: "manipulate the miner actor",
+ Subcommands: []*cli.Command{
+ actorWithdrawCmd,
+ actorSetOwnerCmd,
+ actorControl,
+ actorProposeChangeWorker,
+ actorConfirmChangeWorker,
+ },
+}
+
+var actorWithdrawCmd = &cli.Command{
+ Name: "withdraw",
+ Usage: "withdraw available balance",
+ ArgsUsage: "[amount (FIL)]",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "actor",
+ Usage: "specify the address of miner actor",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ var maddr address.Address
+ if act := cctx.String("actor"); act != "" {
+ var err error
+ maddr, err = address.NewFromString(act)
+ if err != nil {
+ return fmt.Errorf("parsing address %s: %w", act, err)
+ }
+ }
+
+ nodeAPI, acloser, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer acloser()
+
+ ctx := lcli.ReqContext(cctx)
+
+ if maddr.Empty() {
+ minerAPI, closer, err := lcli.GetStorageMinerAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ maddr, err = minerAPI.ActorAddress(ctx)
+ if err != nil {
+ return err
+ }
+ }
+
+ mi, err := nodeAPI.StateMinerInfo(ctx, maddr, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ available, err := nodeAPI.StateMinerAvailableBalance(ctx, maddr, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ amount := available
+ if cctx.Args().Present() {
+ f, err := types.ParseFIL(cctx.Args().First())
+ if err != nil {
+ return xerrors.Errorf("parsing 'amount' argument: %w", err)
+ }
+
+ amount = abi.TokenAmount(f)
+
+ if amount.GreaterThan(available) {
+ return xerrors.Errorf("can't withdraw more funds than available; requested: %s; available: %s", amount, available)
+ }
+ }
+
+ params, err := actors.SerializeParams(&miner2.WithdrawBalanceParams{
+ AmountRequested: amount, // Default to attempting to withdraw all the extra funds in the miner actor
+ })
+ if err != nil {
+ return err
+ }
+
+ smsg, err := nodeAPI.MpoolPushMessage(ctx, &types.Message{
+ To: maddr,
+ From: mi.Owner,
+ Value: types.NewInt(0),
+ Method: miner.Methods.WithdrawBalance,
+ Params: params,
+ }, &api.MessageSendSpec{MaxFee: abi.TokenAmount(types.MustParseFIL("0.1"))})
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("Requested rewards withdrawal in message %s\n", smsg.Cid())
+
+ return nil
+ },
+}
+
+var actorSetOwnerCmd = &cli.Command{
+ Name: "set-owner",
+ Usage: "Set owner address (this command should be invoked twice, first with the old owner as the senderAddress, and then with the new owner)",
+ ArgsUsage: "[newOwnerAddress senderAddress]",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "actor",
+ Usage: "specify the address of miner actor",
+ },
+ &cli.BoolFlag{
+ Name: "really-do-it",
+ Usage: "Actually send transaction performing the action",
+ Value: false,
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ if !cctx.Bool("really-do-it") {
+ fmt.Println("Pass --really-do-it to actually execute this action")
+ return nil
+ }
+
+ if cctx.NArg() != 2 {
+ return fmt.Errorf("must pass new owner address and sender address")
+ }
+
+ var maddr address.Address
+ if act := cctx.String("actor"); act != "" {
+ var err error
+ maddr, err = address.NewFromString(act)
+ if err != nil {
+ return fmt.Errorf("parsing address %s: %w", act, err)
+ }
+ }
+
+ nodeAPI, acloser, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer acloser()
+
+ ctx := lcli.ReqContext(cctx)
+
+ na, err := address.NewFromString(cctx.Args().First())
+ if err != nil {
+ return err
+ }
+
+ newAddrId, err := nodeAPI.StateLookupID(ctx, na, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ fa, err := address.NewFromString(cctx.Args().Get(1))
+ if err != nil {
+ return err
+ }
+
+ fromAddrId, err := nodeAPI.StateLookupID(ctx, fa, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ if maddr.Empty() {
+ minerAPI, closer, err := lcli.GetStorageMinerAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ maddr, err = minerAPI.ActorAddress(ctx)
+ if err != nil {
+ return err
+ }
+ }
+
+ mi, err := nodeAPI.StateMinerInfo(ctx, maddr, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ if fromAddrId != mi.Owner && fromAddrId != newAddrId {
+ return xerrors.New("from address must either be the old owner or the new owner")
+ }
+
+ sp, err := actors.SerializeParams(&newAddrId)
+ if err != nil {
+ return xerrors.Errorf("serializing params: %w", err)
+ }
+
+ smsg, err := nodeAPI.MpoolPushMessage(ctx, &types.Message{
+ From: fromAddrId,
+ To: maddr,
+ Method: miner.Methods.ChangeOwnerAddress,
+ Value: big.Zero(),
+ Params: sp,
+ }, nil)
+ if err != nil {
+ return xerrors.Errorf("mpool push: %w", err)
+ }
+
+ fmt.Println("Message CID:", smsg.Cid())
+
+ // wait for it to get mined into a block
+ wait, err := nodeAPI.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence)
+ if err != nil {
+ return err
+ }
+
+ // check it executed successfully
+ if wait.Receipt.ExitCode != 0 {
+ fmt.Println("owner change failed!")
+ return err
+ }
+
+ fmt.Println("message succeeded!")
+
+ return nil
+ },
+}
+
+var actorControl = &cli.Command{
+ Name: "control",
+ Usage: "Manage control addresses",
+ Subcommands: []*cli.Command{
+ actorControlList,
+ actorControlSet,
+ },
+}
+
+var actorControlList = &cli.Command{
+ Name: "list",
+ Usage: "Get currently set control addresses",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "actor",
+ Usage: "specify the address of miner actor",
+ },
+ &cli.BoolFlag{
+ Name: "verbose",
+ },
+ &cli.BoolFlag{
+ Name: "color",
+ Usage: "use color in display output",
+ DefaultText: "depends on output being a TTY",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ if cctx.IsSet("color") {
+ color.NoColor = !cctx.Bool("color")
+ }
+
+ var maddr address.Address
+ if act := cctx.String("actor"); act != "" {
+ var err error
+ maddr, err = address.NewFromString(act)
+ if err != nil {
+ return fmt.Errorf("parsing address %s: %w", act, err)
+ }
+ }
+
+ nodeAPI, acloser, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer acloser()
+
+ ctx := lcli.ReqContext(cctx)
+
+ if maddr.Empty() {
+ minerAPI, closer, err := lcli.GetStorageMinerAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ maddr, err = minerAPI.ActorAddress(ctx)
+ if err != nil {
+ return err
+ }
+ }
+
+ mi, err := nodeAPI.StateMinerInfo(ctx, maddr, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ tw := tablewriter.New(
+ tablewriter.Col("name"),
+ tablewriter.Col("ID"),
+ tablewriter.Col("key"),
+ tablewriter.Col("balance"),
+ )
+
+ printKey := func(name string, a address.Address) {
+ b, err := nodeAPI.WalletBalance(ctx, a)
+ if err != nil {
+ fmt.Printf("%s\t%s: error getting balance: %s\n", name, a, err)
+ return
+ }
+
+ k, err := nodeAPI.StateAccountKey(ctx, a, types.EmptyTSK)
+ if err != nil {
+ fmt.Printf("%s\t%s: error getting account key: %s\n", name, a, err)
+ return
+ }
+
+ kstr := k.String()
+ if !cctx.Bool("verbose") {
+ kstr = kstr[:9] + "..."
+ }
+
+ bstr := types.FIL(b).String()
+ switch {
+ case b.LessThan(types.FromFil(10)):
+ bstr = color.RedString(bstr)
+ case b.LessThan(types.FromFil(50)):
+ bstr = color.YellowString(bstr)
+ default:
+ bstr = color.GreenString(bstr)
+ }
+
+ tw.Write(map[string]interface{}{
+ "name": name,
+ "ID": a,
+ "key": kstr,
+ "balance": bstr,
+ })
+ }
+
+ printKey("owner", mi.Owner)
+ printKey("worker", mi.Worker)
+ for i, ca := range mi.ControlAddresses {
+ printKey(fmt.Sprintf("control-%d", i), ca)
+ }
+
+ return tw.Flush(os.Stdout)
+ },
+}
+
+var actorControlSet = &cli.Command{
+ Name: "set",
+ Usage: "Set control address(-es)",
+ ArgsUsage: "[...address]",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "actor",
+ Usage: "specify the address of miner actor",
+ },
+ &cli.BoolFlag{
+ Name: "really-do-it",
+ Usage: "Actually send transaction performing the action",
+ Value: false,
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ if !cctx.Bool("really-do-it") {
+ fmt.Println("Pass --really-do-it to actually execute this action")
+ return nil
+ }
+
+ var maddr address.Address
+ if act := cctx.String("actor"); act != "" {
+ var err error
+ maddr, err = address.NewFromString(act)
+ if err != nil {
+ return fmt.Errorf("parsing address %s: %w", act, err)
+ }
+ }
+
+ nodeAPI, acloser, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer acloser()
+
+ ctx := lcli.ReqContext(cctx)
+
+ if maddr.Empty() {
+ minerAPI, closer, err := lcli.GetStorageMinerAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ maddr, err = minerAPI.ActorAddress(ctx)
+ if err != nil {
+ return err
+ }
+ }
+
+ mi, err := nodeAPI.StateMinerInfo(ctx, maddr, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ del := map[address.Address]struct{}{}
+ existing := map[address.Address]struct{}{}
+ for _, controlAddress := range mi.ControlAddresses {
+ ka, err := nodeAPI.StateAccountKey(ctx, controlAddress, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ del[ka] = struct{}{}
+ existing[ka] = struct{}{}
+ }
+
+ var toSet []address.Address
+
+ for i, as := range cctx.Args().Slice() {
+ a, err := address.NewFromString(as)
+ if err != nil {
+ return xerrors.Errorf("parsing address %d: %w", i, err)
+ }
+
+ ka, err := nodeAPI.StateAccountKey(ctx, a, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ // make sure the address exists on chain
+ _, err = nodeAPI.StateLookupID(ctx, ka, types.EmptyTSK)
+ if err != nil {
+ return xerrors.Errorf("looking up %s: %w", ka, err)
+ }
+
+ delete(del, ka)
+ toSet = append(toSet, ka)
+ }
+
+ for a := range del {
+ fmt.Println("Remove", a)
+ }
+ for _, a := range toSet {
+ if _, exists := existing[a]; !exists {
+ fmt.Println("Add", a)
+ }
+ }
+
+ cwp := &miner2.ChangeWorkerAddressParams{
+ NewWorker: mi.Worker,
+ NewControlAddrs: toSet,
+ }
+
+ sp, err := actors.SerializeParams(cwp)
+ if err != nil {
+ return xerrors.Errorf("serializing params: %w", err)
+ }
+
+ smsg, err := nodeAPI.MpoolPushMessage(ctx, &types.Message{
+ From: mi.Owner,
+ To: maddr,
+ Method: miner.Methods.ChangeWorkerAddress,
+
+ Value: big.Zero(),
+ Params: sp,
+ }, nil)
+ if err != nil {
+ return xerrors.Errorf("mpool push: %w", err)
+ }
+
+ fmt.Println("Message CID:", smsg.Cid())
+
+ return nil
+ },
+}
+
+var actorProposeChangeWorker = &cli.Command{
+ Name: "propose-change-worker",
+ Usage: "Propose a worker address change",
+ ArgsUsage: "[address]",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "actor",
+ Usage: "specify the address of miner actor",
+ },
+ &cli.BoolFlag{
+ Name: "really-do-it",
+ Usage: "Actually send transaction performing the action",
+ Value: false,
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ if !cctx.Args().Present() {
+ return fmt.Errorf("must pass address of new worker address")
+ }
+
+ if !cctx.Bool("really-do-it") {
+ fmt.Fprintln(cctx.App.Writer, "Pass --really-do-it to actually execute this action")
+ return nil
+ }
+
+ var maddr address.Address
+ if act := cctx.String("actor"); act != "" {
+ var err error
+ maddr, err = address.NewFromString(act)
+ if err != nil {
+ return fmt.Errorf("parsing address %s: %w", act, err)
+ }
+ }
+
+ nodeAPI, acloser, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer acloser()
+
+ ctx := lcli.ReqContext(cctx)
+
+ na, err := address.NewFromString(cctx.Args().First())
+ if err != nil {
+ return err
+ }
+
+ newAddr, err := nodeAPI.StateLookupID(ctx, na, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ if maddr.Empty() {
+ minerAPI, closer, err := lcli.GetStorageMinerAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ maddr, err = minerAPI.ActorAddress(ctx)
+ if err != nil {
+ return err
+ }
+ }
+
+ mi, err := nodeAPI.StateMinerInfo(ctx, maddr, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ if mi.NewWorker.Empty() {
+ if mi.Worker == newAddr {
+ return fmt.Errorf("worker address already set to %s", na)
+ }
+ } else {
+ if mi.NewWorker == newAddr {
+ return fmt.Errorf("change to worker address %s already pending", na)
+ }
+ }
+
+ cwp := &miner2.ChangeWorkerAddressParams{
+ NewWorker: newAddr,
+ NewControlAddrs: mi.ControlAddresses,
+ }
+
+ sp, err := actors.SerializeParams(cwp)
+ if err != nil {
+ return xerrors.Errorf("serializing params: %w", err)
+ }
+
+ smsg, err := nodeAPI.MpoolPushMessage(ctx, &types.Message{
+ From: mi.Owner,
+ To: maddr,
+ Method: miner.Methods.ChangeWorkerAddress,
+ Value: big.Zero(),
+ Params: sp,
+ }, nil)
+ if err != nil {
+ return xerrors.Errorf("mpool push: %w", err)
+ }
+
+ fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", smsg.Cid())
+
+ // wait for it to get mined into a block
+ wait, err := nodeAPI.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence)
+ if err != nil {
+ return err
+ }
+
+ // check it executed successfully
+ if wait.Receipt.ExitCode != 0 {
+ fmt.Fprintln(cctx.App.Writer, "Propose worker change failed!")
+ return err
+ }
+
+ mi, err = nodeAPI.StateMinerInfo(ctx, maddr, wait.TipSet)
+ if err != nil {
+ return err
+ }
+ if mi.NewWorker != newAddr {
+ return fmt.Errorf("Proposed worker address change not reflected on chain: expected '%s', found '%s'", na, mi.NewWorker)
+ }
+
+ fmt.Fprintf(cctx.App.Writer, "Worker key change to %s successfully proposed.\n", na)
+ fmt.Fprintf(cctx.App.Writer, "Call 'confirm-change-worker' at or after height %d to complete.\n", mi.WorkerChangeEpoch)
+
+ return nil
+ },
+}
+
+var actorConfirmChangeWorker = &cli.Command{
+ Name: "confirm-change-worker",
+ Usage: "Confirm a worker address change",
+ ArgsUsage: "[address]",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "actor",
+ Usage: "specify the address of miner actor",
+ },
+ &cli.BoolFlag{
+ Name: "really-do-it",
+ Usage: "Actually send transaction performing the action",
+ Value: false,
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ if !cctx.Args().Present() {
+ return fmt.Errorf("must pass address of new worker address")
+ }
+
+ if !cctx.Bool("really-do-it") {
+ fmt.Fprintln(cctx.App.Writer, "Pass --really-do-it to actually execute this action")
+ return nil
+ }
+
+ var maddr address.Address
+ if act := cctx.String("actor"); act != "" {
+ var err error
+ maddr, err = address.NewFromString(act)
+ if err != nil {
+ return fmt.Errorf("parsing address %s: %w", act, err)
+ }
+ }
+
+ nodeAPI, acloser, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer acloser()
+
+ ctx := lcli.ReqContext(cctx)
+
+ na, err := address.NewFromString(cctx.Args().First())
+ if err != nil {
+ return err
+ }
+
+ newAddr, err := nodeAPI.StateLookupID(ctx, na, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ if maddr.Empty() {
+ minerAPI, closer, err := lcli.GetStorageMinerAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ maddr, err = minerAPI.ActorAddress(ctx)
+ if err != nil {
+ return err
+ }
+ }
+
+ mi, err := nodeAPI.StateMinerInfo(ctx, maddr, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ if mi.NewWorker.Empty() {
+ return xerrors.Errorf("no worker key change proposed")
+ } else if mi.NewWorker != newAddr {
+ return xerrors.Errorf("worker key %s does not match current worker key proposal %s", newAddr, mi.NewWorker)
+ }
+
+ if head, err := nodeAPI.ChainHead(ctx); err != nil {
+ return xerrors.Errorf("failed to get the chain head: %w", err)
+ } else if head.Height() < mi.WorkerChangeEpoch {
+ return xerrors.Errorf("worker key change cannot be confirmed until %d, current height is %d", mi.WorkerChangeEpoch, head.Height())
+ }
+
+ smsg, err := nodeAPI.MpoolPushMessage(ctx, &types.Message{
+ From: mi.Owner,
+ To: maddr,
+ Method: miner.Methods.ConfirmUpdateWorkerKey,
+ Value: big.Zero(),
+ }, nil)
+ if err != nil {
+ return xerrors.Errorf("mpool push: %w", err)
+ }
+
+ fmt.Fprintln(cctx.App.Writer, "Confirm Message CID:", smsg.Cid())
+
+ // wait for it to get mined into a block
+ wait, err := nodeAPI.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence)
+ if err != nil {
+ return err
+ }
+
+ // check it executed successfully
+ if wait.Receipt.ExitCode != 0 {
+ fmt.Fprintln(cctx.App.Writer, "Worker change failed!")
+ return err
+ }
+
+ mi, err = nodeAPI.StateMinerInfo(ctx, maddr, wait.TipSet)
+ if err != nil {
+ return err
+ }
+ if mi.Worker != newAddr {
+ return fmt.Errorf("Confirmed worker address change not reflected on chain: expected '%s', found '%s'", newAddr, mi.Worker)
+ }
+
+ return nil
+ },
+}
diff --git a/cmd/lotus-shed/balances.go b/cmd/lotus-shed/balances.go
index b12c069f573..87530c666ee 100644
--- a/cmd/lotus-shed/balances.go
+++ b/cmd/lotus-shed/balances.go
@@ -2,14 +2,25 @@ package main
import (
"context"
+ "encoding/csv"
+ "encoding/json"
"fmt"
+ "io"
+ "os"
+ "runtime"
"strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/gen/genesis"
_init "github.com/filecoin-project/lotus/chain/actors/builtin/init"
"github.com/docker/go-units"
+
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
@@ -24,6 +35,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
+
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/state"
@@ -33,7 +45,6 @@ import (
"github.com/filecoin-project/lotus/chain/vm"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
- "github.com/filecoin-project/lotus/lib/blockstore"
"github.com/filecoin-project/lotus/node/repo"
)
@@ -58,8 +69,321 @@ var auditsCmd = &cli.Command{
Description: "a collection of utilities for auditing the filecoin chain",
Subcommands: []*cli.Command{
chainBalanceCmd,
+ chainBalanceSanityCheckCmd,
chainBalanceStateCmd,
chainPledgeCmd,
+ fillBalancesCmd,
+ duplicatedMessagesCmd,
+ },
+}
+
+var duplicatedMessagesCmd = &cli.Command{
+ Name: "duplicate-messages",
+ Usage: "Check for duplicate messages included in a tipset.",
+ UsageText: `Check for duplicate messages included in a tipset.
+
+Due to Filecoin's expected consensus, a tipset may include the same message multiple times in
+different blocks. The message will only be executed once.
+
+This command will find such duplicate messages and print them to standard out as newline-delimited
+JSON. Status messages in the form of "H: $HEIGHT ($PROGRESS%)" will be printed to standard error for
+every day of chain processed.
+`,
+ Flags: []cli.Flag{
+ &cli.IntFlag{
+ Name: "parallel",
+ Usage: "the number of parallel threads for block processing",
+ DefaultText: "half the number of cores",
+ },
+ &cli.IntFlag{
+ Name: "start",
+ Usage: "the first epoch to check",
+ DefaultText: "genesis",
+ },
+ &cli.IntFlag{
+ Name: "end",
+ Usage: "the last epoch to check",
+ DefaultText: "the current head",
+ },
+ &cli.IntSliceFlag{
+ Name: "method",
+ Usage: "filter results by method number",
+ DefaultText: "all methods",
+ },
+ &cli.StringSliceFlag{
+ Name: "include-to",
+ Usage: "include only messages to the given address (does not perform address resolution)",
+ DefaultText: "all recipients",
+ },
+ &cli.StringSliceFlag{
+ Name: "include-from",
+ Usage: "include only messages from the given address (does not perform address resolution)",
+ DefaultText: "all senders",
+ },
+ &cli.StringSliceFlag{
+ Name: "exclude-to",
+ Usage: "exclude messages to the given address (does not perform address resolution)",
+ },
+ &cli.StringSliceFlag{
+ Name: "exclude-from",
+ Usage: "exclude messages from the given address (does not perform address resolution)",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+
+ defer closer()
+ ctx := lcli.ReqContext(cctx)
+
+ var head *types.TipSet
+ if cctx.IsSet("end") {
+ epoch := abi.ChainEpoch(cctx.Int("end"))
+ head, err = api.ChainGetTipSetByHeight(ctx, epoch, types.EmptyTSK)
+ } else {
+ head, err = api.ChainHead(ctx)
+ }
+ if err != nil {
+ return err
+ }
+
+ var printLk sync.Mutex
+
+ threads := runtime.NumCPU() / 2
+ if cctx.IsSet("parallel") {
+ threads = cctx.Int("int")
+ if threads <= 0 {
+ return fmt.Errorf("parallelism needs to be at least 1")
+ }
+ } else if threads == 0 {
+ threads = 1 // if we have one core, but who are we kidding...
+ }
+
+ throttle := make(chan struct{}, threads)
+
+ methods := map[abi.MethodNum]bool{}
+ for _, m := range cctx.IntSlice("method") {
+ if m < 0 {
+ return fmt.Errorf("expected method numbers to be non-negative")
+ }
+ methods[abi.MethodNum(m)] = true
+ }
+
+ addressSet := func(flag string) (map[address.Address]bool, error) {
+ if !cctx.IsSet(flag) {
+ return nil, nil
+ }
+ addrs := cctx.StringSlice(flag)
+ set := make(map[address.Address]bool, len(addrs))
+ for _, addrStr := range addrs {
+ addr, err := address.NewFromString(addrStr)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse address %s: %w", addrStr, err)
+ }
+ set[addr] = true
+ }
+ return set, nil
+ }
+
+ onlyFrom, err := addressSet("include-from")
+ if err != nil {
+ return err
+ }
+ onlyTo, err := addressSet("include-to")
+ if err != nil {
+ return err
+ }
+ excludeFrom, err := addressSet("exclude-from")
+ if err != nil {
+ return err
+ }
+ excludeTo, err := addressSet("exclude-to")
+ if err != nil {
+ return err
+ }
+
+ target := abi.ChainEpoch(cctx.Int("start"))
+ if target < 0 || target > head.Height() {
+ return fmt.Errorf("start height must be greater than 0 and less than the end height")
+ }
+ totalEpochs := head.Height() - target
+
+ for target <= head.Height() {
+ select {
+ case throttle <- struct{}{}:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+
+ go func(ts *types.TipSet) {
+ defer func() {
+ <-throttle
+ }()
+
+ type addrNonce struct {
+ s address.Address
+ n uint64
+ }
+ anonce := func(m *types.Message) addrNonce {
+ return addrNonce{
+ s: m.From,
+ n: m.Nonce,
+ }
+ }
+
+ msgs := map[addrNonce]map[cid.Cid]*types.Message{}
+
+ processMessage := func(c cid.Cid, m *types.Message) {
+ // Filter
+ if len(methods) > 0 && !methods[m.Method] {
+ return
+ }
+ if len(onlyFrom) > 0 && !onlyFrom[m.From] {
+ return
+ }
+ if len(onlyTo) > 0 && !onlyTo[m.To] {
+ return
+ }
+ if excludeFrom[m.From] || excludeTo[m.To] {
+ return
+ }
+
+ // Record
+ msgSet, ok := msgs[anonce(m)]
+ if !ok {
+ msgSet = make(map[cid.Cid]*types.Message, 1)
+ msgs[anonce(m)] = msgSet
+ }
+ msgSet[c] = m
+ }
+
+ encoder := json.NewEncoder(os.Stdout)
+
+ for _, bh := range ts.Blocks() {
+ bms, err := api.ChainGetBlockMessages(ctx, bh.Cid())
+ if err != nil {
+ fmt.Fprintln(os.Stderr, "ERROR: ", err)
+ return
+ }
+
+ for i, m := range bms.BlsMessages {
+ processMessage(bms.Cids[i], m)
+ }
+
+ for i, m := range bms.SecpkMessages {
+ processMessage(bms.Cids[len(bms.BlsMessages)+i], &m.Message)
+ }
+ }
+ for _, ms := range msgs {
+ if len(ms) == 1 {
+ continue
+ }
+ type Msg struct {
+ Cid string
+ Value string
+ Method uint64
+ }
+ grouped := map[string][]Msg{}
+ for c, m := range ms {
+ addr := m.To.String()
+ grouped[addr] = append(grouped[addr], Msg{
+ Cid: c.String(),
+ Value: types.FIL(m.Value).String(),
+ Method: uint64(m.Method),
+ })
+ }
+ printLk.Lock()
+ err := encoder.Encode(grouped)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, "ERROR: ", err)
+ }
+ printLk.Unlock()
+ }
+ }(head)
+
+ if head.Parents().IsEmpty() {
+ break
+ }
+
+ head, err = api.ChainGetTipSet(ctx, head.Parents())
+ if err != nil {
+ return err
+ }
+
+ if head.Height()%2880 == 0 {
+ printLk.Lock()
+ fmt.Fprintf(os.Stderr, "H: %s (%d%%)\n", head.Height(), (100*(head.Height()-target))/totalEpochs)
+ printLk.Unlock()
+ }
+ }
+
+ for i := 0; i < threads; i++ {
+ select {
+ case throttle <- struct{}{}:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+
+ }
+
+ printLk.Lock()
+ fmt.Fprintf(os.Stderr, "H: %s (100%%)\n", head.Height())
+ printLk.Unlock()
+
+ return nil
+ },
+}
+
+var chainBalanceSanityCheckCmd = &cli.Command{
+ Name: "chain-balance-sanity",
+ Description: "Confirms that the total balance of every actor in state is still 2 billion",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "tipset",
+ Usage: "specify tipset to start from",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+
+ defer closer()
+ ctx := lcli.ReqContext(cctx)
+
+ ts, err := lcli.LoadTipSet(ctx, cctx, api)
+ if err != nil {
+ return err
+ }
+
+ tsk := ts.Key()
+ actors, err := api.StateListActors(ctx, tsk)
+ if err != nil {
+ return err
+ }
+
+ bal := big.Zero()
+ for _, addr := range actors {
+ act, err := api.StateGetActor(ctx, addr, tsk)
+ if err != nil {
+ return err
+ }
+
+ bal = big.Add(bal, act.Balance)
+ }
+
+ attoBase := big.Mul(big.NewInt(int64(build.FilBase)), big.NewInt(int64(build.FilecoinPrecision)))
+
+ if big.Cmp(attoBase, bal) != 0 {
+ return xerrors.Errorf("sanity check failed (expected %s, actual %s)", attoBase, bal)
+ }
+
+ fmt.Println("sanity check successful")
+
+ return nil
},
}
@@ -168,19 +492,26 @@ var chainBalanceStateCmd = &cli.Command{
defer lkrepo.Close() //nolint:errcheck
- ds, err := lkrepo.Datastore("/chain")
+ bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore)
if err != nil {
- return err
+ return fmt.Errorf("failed to open blockstore: %w", err)
}
- mds, err := lkrepo.Datastore("/metadata")
+ defer func() {
+ if c, ok := bs.(io.Closer); ok {
+ if err := c.Close(); err != nil {
+ log.Warnf("failed to close blockstore: %s", err)
+ }
+ }
+ }()
+
+ mds, err := lkrepo.Datastore(context.Background(), "/metadata")
if err != nil {
return err
}
- bs := blockstore.NewBlockstore(ds)
-
- cs := store.NewChainStore(bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), nil)
+ cs := store.NewChainStore(bs, bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), nil)
+ defer cs.Close() //nolint:errcheck
cst := cbor.NewCborStore(bs)
store := adt.WrapStore(ctx, cst)
@@ -382,19 +713,26 @@ var chainPledgeCmd = &cli.Command{
defer lkrepo.Close() //nolint:errcheck
- ds, err := lkrepo.Datastore("/chain")
+ bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore)
if err != nil {
- return err
+ return xerrors.Errorf("failed to open blockstore: %w", err)
}
- mds, err := lkrepo.Datastore("/metadata")
+ defer func() {
+ if c, ok := bs.(io.Closer); ok {
+ if err := c.Close(); err != nil {
+ log.Warnf("failed to close blockstore: %s", err)
+ }
+ }
+ }()
+
+ mds, err := lkrepo.Datastore(context.Background(), "/metadata")
if err != nil {
return err
}
- bs := blockstore.NewBlockstore(ds)
-
- cs := store.NewChainStore(bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), nil)
+ cs := store.NewChainStore(bs, bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), nil)
+ defer cs.Close() //nolint:errcheck
cst := cbor.NewCborStore(bs)
store := adt.WrapStore(ctx, cst)
@@ -471,3 +809,119 @@ var chainPledgeCmd = &cli.Command{
return nil
},
}
+
+const dateFmt = "1/02/06"
+
+func parseCsv(inp string) ([]time.Time, []address.Address, error) {
+ fi, err := os.Open(inp)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ r := csv.NewReader(fi)
+ recs, err := r.ReadAll()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var addrs []address.Address
+ for _, rec := range recs[1:] {
+ a, err := address.NewFromString(rec[0])
+ if err != nil {
+ return nil, nil, err
+ }
+ addrs = append(addrs, a)
+ }
+
+ var dates []time.Time
+ for _, d := range recs[0][1:] {
+ if len(d) == 0 {
+ continue
+ }
+ p := strings.Split(d, " ")
+ t, err := time.Parse(dateFmt, p[len(p)-1])
+ if err != nil {
+ return nil, nil, err
+ }
+
+ dates = append(dates, t)
+ }
+
+ return dates, addrs, nil
+}
+
+func heightForDate(d time.Time, ts *types.TipSet) abi.ChainEpoch {
+ secs := d.Unix()
+ gents := ts.Blocks()[0].Timestamp
+ gents -= uint64(30 * ts.Height())
+ return abi.ChainEpoch((secs - int64(gents)) / 30)
+}
+
+var fillBalancesCmd = &cli.Command{
+ Name: "fill-balances",
+ Description: "fill out balances for addresses on dates in given spreadsheet",
+ Flags: []cli.Flag{},
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+
+ defer closer()
+ ctx := lcli.ReqContext(cctx)
+
+ dates, addrs, err := parseCsv(cctx.Args().First())
+ if err != nil {
+ return err
+ }
+
+ ts, err := api.ChainHead(ctx)
+ if err != nil {
+ return err
+ }
+
+ var tipsets []*types.TipSet
+ for _, d := range dates {
+ h := heightForDate(d, ts)
+ hts, err := api.ChainGetTipSetByHeight(ctx, h, ts.Key())
+ if err != nil {
+ return err
+ }
+ tipsets = append(tipsets, hts)
+ }
+
+ var balances [][]abi.TokenAmount
+ for _, a := range addrs {
+ var b []abi.TokenAmount
+ for _, hts := range tipsets {
+ act, err := api.StateGetActor(ctx, a, hts.Key())
+ if err != nil {
+ if !strings.Contains(err.Error(), "actor not found") {
+ return fmt.Errorf("error for %s at %s: %w", a, hts.Key(), err)
+ }
+ b = append(b, types.NewInt(0))
+ continue
+ }
+ b = append(b, act.Balance)
+ }
+ balances = append(balances, b)
+ }
+
+ var datestrs []string
+ for _, d := range dates {
+ datestrs = append(datestrs, "Balance at "+d.Format(dateFmt))
+ }
+
+ w := csv.NewWriter(os.Stdout)
+ w.Write(append([]string{"Wallet Address"}, datestrs...)) // nolint:errcheck
+ for i := 0; i < len(addrs); i++ {
+ row := []string{addrs[i].String()}
+ for _, b := range balances[i] {
+ row = append(row, types.FIL(b).String())
+ }
+ w.Write(row) // nolint:errcheck
+ }
+ w.Flush()
+ return nil
+ },
+}
diff --git a/cmd/lotus-shed/base64.go b/cmd/lotus-shed/base64.go
new file mode 100644
index 00000000000..3f0469ef982
--- /dev/null
+++ b/cmd/lotus-shed/base64.go
@@ -0,0 +1,75 @@
+package main
+
+import (
+ "encoding/base64"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+
+ "github.com/filecoin-project/go-state-types/abi"
+
+ "github.com/filecoin-project/go-address"
+
+ "github.com/urfave/cli/v2"
+)
+
+var base64Cmd = &cli.Command{
+ Name: "base64",
+ Description: "multiformats base64",
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "decodeAddr",
+ Value: false,
+ Usage: "Decode a base64 addr",
+ },
+ &cli.BoolFlag{
+ Name: "decodeBig",
+ Value: false,
+ Usage: "Decode a base64 big",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ var input io.Reader
+
+ if cctx.Args().Len() == 0 {
+ input = os.Stdin
+ } else {
+ input = strings.NewReader(cctx.Args().First())
+ }
+
+ bytes, err := ioutil.ReadAll(input)
+ if err != nil {
+ return nil
+ }
+
+ decoded, err := base64.RawStdEncoding.DecodeString(strings.TrimSpace(string(bytes)))
+ if err != nil {
+ return err
+ }
+
+ if cctx.Bool("decodeAddr") {
+ addr, err := address.NewFromBytes(decoded)
+ if err != nil {
+ return err
+ }
+
+ fmt.Println(addr)
+
+ return nil
+ }
+
+ if cctx.Bool("decodeBig") {
+ var val abi.TokenAmount
+ err = val.UnmarshalBinary(decoded)
+ if err != nil {
+ return err
+ }
+
+ fmt.Println(val)
+ }
+
+ return nil
+ },
+}
diff --git a/cmd/lotus-shed/bitfield.go b/cmd/lotus-shed/bitfield.go
index 442cbef4846..f0824de4f2a 100644
--- a/cmd/lotus-shed/bitfield.go
+++ b/cmd/lotus-shed/bitfield.go
@@ -17,6 +17,7 @@ import (
var bitFieldCmd = &cli.Command{
Name: "bitfield",
+ Usage: "Bitfield analyze tool",
Description: "analyze bitfields",
Flags: []cli.Flag{
&cli.StringFlag{
@@ -26,53 +27,24 @@ var bitFieldCmd = &cli.Command{
},
},
Subcommands: []*cli.Command{
+ bitFieldEncodeCmd,
+ bitFieldDecodeCmd,
bitFieldRunsCmd,
bitFieldStatCmd,
- bitFieldDecodeCmd,
+ bitFieldMergeCmd,
bitFieldIntersectCmd,
- bitFieldEncodeCmd,
bitFieldSubCmd,
},
}
var bitFieldRunsCmd = &cli.Command{
Name: "runs",
+ Usage: "Bitfield bit runs",
Description: "print bit runs in a bitfield",
- Flags: []cli.Flag{
- &cli.StringFlag{
- Name: "enc",
- Value: "base64",
- Usage: "specify input encoding to parse",
- },
- },
Action: func(cctx *cli.Context) error {
- var val string
- if cctx.Args().Present() {
- val = cctx.Args().Get(0)
- } else {
- b, err := ioutil.ReadAll(os.Stdin)
- if err != nil {
- return err
- }
- val = string(b)
- }
-
- var dec []byte
- switch cctx.String("enc") {
- case "base64":
- d, err := base64.StdEncoding.DecodeString(val)
- if err != nil {
- return fmt.Errorf("decoding base64 value: %w", err)
- }
- dec = d
- case "hex":
- d, err := hex.DecodeString(val)
- if err != nil {
- return fmt.Errorf("decoding hex value: %w", err)
- }
- dec = d
- default:
- return fmt.Errorf("unrecognized encoding: %s", cctx.String("enc"))
+ dec, err := decodeToByte(cctx, 0)
+ if err != nil {
+ return err
}
rle, err := rlepluslazy.FromBuf(dec)
@@ -98,7 +70,7 @@ var bitFieldRunsCmd = &cli.Command{
s = "FALSE"
}
- fmt.Printf("@%d %s * %d\n", idx, s, r.Len)
+ fmt.Printf("@%08d %s * %d\n", idx, s, r.Len)
idx += r.Len
}
@@ -109,43 +81,14 @@ var bitFieldRunsCmd = &cli.Command{
var bitFieldStatCmd = &cli.Command{
Name: "stat",
+ Usage: "Bitfield stats",
Description: "print bitfield stats",
- Flags: []cli.Flag{
- &cli.StringFlag{
- Name: "enc",
- Value: "base64",
- Usage: "specify input encoding to parse",
- },
- },
Action: func(cctx *cli.Context) error {
- var val string
- if cctx.Args().Present() {
- val = cctx.Args().Get(0)
- } else {
- b, err := ioutil.ReadAll(os.Stdin)
- if err != nil {
- return err
- }
- val = string(b)
- }
-
- var dec []byte
- switch cctx.String("enc") {
- case "base64":
- d, err := base64.StdEncoding.DecodeString(val)
- if err != nil {
- return fmt.Errorf("decoding base64 value: %w", err)
- }
- dec = d
- case "hex":
- d, err := hex.DecodeString(val)
- if err != nil {
- return fmt.Errorf("decoding hex value: %w", err)
- }
- dec = d
- default:
- return fmt.Errorf("unrecognized encoding: %s", cctx.String("enc"))
+ dec, err := decodeToByte(cctx, 0)
+ if err != nil {
+ return err
}
+ fmt.Printf("Raw length: %d bits (%d bytes)\n", len(dec)*8, len(dec))
rle, err := rlepluslazy.FromBuf(dec)
if err != nil {
@@ -157,10 +100,7 @@ var bitFieldStatCmd = &cli.Command{
return xerrors.Errorf("getting run iterator: %w", err)
}
- fmt.Printf("Raw length: %d bits (%d bytes)\n", len(dec)*8, len(dec))
-
var ones, zeros, oneRuns, zeroRuns, invalid uint64
-
for rit.HasNext() {
r, err := rit.NextRun()
if err != nil {
@@ -195,14 +135,8 @@ var bitFieldStatCmd = &cli.Command{
var bitFieldDecodeCmd = &cli.Command{
Name: "decode",
+ Usage: "Bitfield to decimal number",
Description: "decode bitfield and print all numbers in it",
- Flags: []cli.Flag{
- &cli.StringFlag{
- Name: "enc",
- Value: "base64",
- Usage: "specify input encoding to parse",
- },
- },
Action: func(cctx *cli.Context) error {
rle, err := decode(cctx, 0)
if err != nil {
@@ -219,43 +153,61 @@ var bitFieldDecodeCmd = &cli.Command{
},
}
-var bitFieldIntersectCmd = &cli.Command{
- Name: "intersect",
- Description: "intersect 2 bitfields and print the resulting bitfield as base64",
- Flags: []cli.Flag{
- &cli.StringFlag{
- Name: "enc",
- Value: "base64",
- Usage: "specify input encoding to parse",
- },
- },
+var bitFieldMergeCmd = &cli.Command{
+ Name: "merge",
+ Usage: "Merge 2 bitfields",
+ Description: "Merge 2 bitfields and print the resulting bitfield",
Action: func(cctx *cli.Context) error {
- b, err := decode(cctx, 1)
+ a, err := decode(cctx, 0)
if err != nil {
return err
}
- a, err := decode(cctx, 0)
+ b, err := decode(cctx, 1)
if err != nil {
return err
}
- o, err := bitfield.IntersectBitField(a, b)
+ o, err := bitfield.MergeBitFields(a, b)
if err != nil {
- return xerrors.Errorf("intersect: %w", err)
+ return xerrors.Errorf("merge: %w", err)
}
- s, err := o.RunIterator()
+ str, err := encode(cctx, o)
if err != nil {
return err
}
+ fmt.Println(str)
- bytes, err := rlepluslazy.EncodeRuns(s, []byte{})
+ return nil
+ },
+}
+
+var bitFieldIntersectCmd = &cli.Command{
+ Name: "intersect",
+ Usage: "Intersect 2 bitfields",
+ Description: "intersect 2 bitfields and print the resulting bitfield",
+ Action: func(cctx *cli.Context) error {
+ a, err := decode(cctx, 0)
+ if err != nil {
+ return err
+ }
+
+ b, err := decode(cctx, 1)
if err != nil {
return err
}
- fmt.Println(base64.StdEncoding.EncodeToString(bytes))
+ o, err := bitfield.IntersectBitField(a, b)
+ if err != nil {
+ return xerrors.Errorf("intersect: %w", err)
+ }
+
+ str, err := encode(cctx, o)
+ if err != nil {
+ return err
+ }
+ fmt.Println(str)
return nil
},
@@ -263,41 +215,29 @@ var bitFieldIntersectCmd = &cli.Command{
var bitFieldSubCmd = &cli.Command{
Name: "sub",
- Description: "subtract 2 bitfields and print the resulting bitfield as base64",
- Flags: []cli.Flag{
- &cli.StringFlag{
- Name: "enc",
- Value: "base64",
- Usage: "specify input encoding to parse",
- },
- },
+ Usage: "Subtract 2 bitfields",
+ Description: "subtract 2 bitfields and print the resulting bitfield",
Action: func(cctx *cli.Context) error {
- b, err := decode(cctx, 1)
+ a, err := decode(cctx, 0)
if err != nil {
return err
}
- a, err := decode(cctx, 0)
+ b, err := decode(cctx, 1)
if err != nil {
return err
}
o, err := bitfield.SubtractBitField(a, b)
if err != nil {
- return xerrors.Errorf("intersect: %w", err)
- }
-
- s, err := o.RunIterator()
- if err != nil {
- return err
+ return xerrors.Errorf("subtract: %w", err)
}
- bytes, err := rlepluslazy.EncodeRuns(s, []byte{})
+ str, err := encode(cctx, o)
if err != nil {
return err
}
-
- fmt.Println(base64.StdEncoding.EncodeToString(bytes))
+ fmt.Println(str)
return nil
},
@@ -305,15 +245,9 @@ var bitFieldSubCmd = &cli.Command{
var bitFieldEncodeCmd = &cli.Command{
Name: "encode",
+ Usage: "Decimal number to bitfield",
Description: "encode a series of decimal numbers into a bitfield",
ArgsUsage: "[infile]",
- Flags: []cli.Flag{
- &cli.StringFlag{
- Name: "enc",
- Value: "base64",
- Usage: "specify input encoding to parse",
- },
- },
Action: func(cctx *cli.Context) error {
f, err := os.Open(cctx.Args().First())
if err != nil {
@@ -331,38 +265,64 @@ var bitFieldEncodeCmd = &cli.Command{
out.Set(i)
}
- s, err := out.RunIterator()
- if err != nil {
- return err
- }
-
- bytes, err := rlepluslazy.EncodeRuns(s, []byte{})
+ str, err := encode(cctx, out)
if err != nil {
return err
}
-
- fmt.Println(base64.StdEncoding.EncodeToString(bytes))
+ fmt.Println(str)
return nil
},
}
-func decode(cctx *cli.Context, a int) (bitfield.BitField, error) {
+func encode(cctx *cli.Context, field bitfield.BitField) (string, error) {
+ s, err := field.RunIterator()
+ if err != nil {
+ return "", err
+ }
+
+ bytes, err := rlepluslazy.EncodeRuns(s, []byte{})
+ if err != nil {
+ return "", err
+ }
+
+ var str string
+ switch cctx.String("enc") {
+ case "base64":
+ str = base64.StdEncoding.EncodeToString(bytes)
+ case "hex":
+ str = hex.EncodeToString(bytes)
+ default:
+ return "", fmt.Errorf("unrecognized encoding: %s", cctx.String("enc"))
+ }
+
+ return str, nil
+
+}
+func decode(cctx *cli.Context, i int) (bitfield.BitField, error) {
+ b, err := decodeToByte(cctx, i)
+ if err != nil {
+ return bitfield.BitField{}, err
+ }
+ return bitfield.NewFromBytes(b)
+}
+
+func decodeToByte(cctx *cli.Context, i int) ([]byte, error) {
var val string
if cctx.Args().Present() {
- if a >= cctx.NArg() {
- return bitfield.BitField{}, xerrors.Errorf("need more than %d args", a)
+ if i >= cctx.NArg() {
+ return nil, xerrors.Errorf("need more than %d args", i)
}
- val = cctx.Args().Get(a)
+ val = cctx.Args().Get(i)
} else {
- if a > 0 {
- return bitfield.BitField{}, xerrors.Errorf("need more than %d args", a)
+ if i > 0 {
+ return nil, xerrors.Errorf("need more than %d args", i)
}
- b, err := ioutil.ReadAll(os.Stdin)
+ r, err := ioutil.ReadAll(os.Stdin)
if err != nil {
- return bitfield.BitField{}, err
+ return nil, err
}
- val = string(b)
+ val = string(r)
}
var dec []byte
@@ -370,18 +330,18 @@ func decode(cctx *cli.Context, a int) (bitfield.BitField, error) {
case "base64":
d, err := base64.StdEncoding.DecodeString(val)
if err != nil {
- return bitfield.BitField{}, fmt.Errorf("decoding base64 value: %w", err)
+ return nil, fmt.Errorf("decoding base64 value: %w", err)
}
dec = d
case "hex":
d, err := hex.DecodeString(val)
if err != nil {
- return bitfield.BitField{}, fmt.Errorf("decoding hex value: %w", err)
+ return nil, fmt.Errorf("decoding hex value: %w", err)
}
dec = d
default:
- return bitfield.BitField{}, fmt.Errorf("unrecognized encoding: %s", cctx.String("enc"))
+ return nil, fmt.Errorf("unrecognized encoding: %s", cctx.String("enc"))
}
- return bitfield.NewFromBytes(dec)
+ return dec, nil
}
diff --git a/cmd/lotus-shed/blockmsgid.go b/cmd/lotus-shed/blockmsgid.go
new file mode 100644
index 00000000000..85b786ec0e2
--- /dev/null
+++ b/cmd/lotus-shed/blockmsgid.go
@@ -0,0 +1,70 @@
+package main
+
+import (
+ "encoding/base64"
+ "fmt"
+
+ blake2b "github.com/minio/blake2b-simd"
+ "github.com/urfave/cli/v2"
+
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/types"
+ lcli "github.com/filecoin-project/lotus/cli"
+)
+
+var blockmsgidCmd = &cli.Command{
+ Name: "blockmsgid",
+ Usage: "Print a block's pubsub message ID",
+ ArgsUsage: " ...",
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+
+ defer closer()
+ ctx := lcli.ReqContext(cctx)
+
+ for _, arg := range cctx.Args().Slice() {
+ blkcid, err := cid.Decode(arg)
+ if err != nil {
+ return fmt.Errorf("error decoding block cid: %w", err)
+ }
+
+ blkhdr, err := api.ChainGetBlock(ctx, blkcid)
+ if err != nil {
+ return fmt.Errorf("error retrieving block header: %w", err)
+ }
+
+ blkmsgs, err := api.ChainGetBlockMessages(ctx, blkcid)
+ if err != nil {
+ return fmt.Errorf("error retrieving block messages: %w", err)
+ }
+
+ blkmsg := &types.BlockMsg{
+ Header: blkhdr,
+ }
+
+ for _, m := range blkmsgs.BlsMessages {
+ blkmsg.BlsMessages = append(blkmsg.BlsMessages, m.Cid())
+ }
+
+ for _, m := range blkmsgs.SecpkMessages {
+ blkmsg.SecpkMessages = append(blkmsg.SecpkMessages, m.Cid())
+ }
+
+ bytes, err := blkmsg.Serialize()
+ if err != nil {
+ return fmt.Errorf("error serializing BlockMsg: %w", err)
+ }
+
+ msgId := blake2b.Sum256(bytes)
+ msgId64 := base64.StdEncoding.EncodeToString(msgId[:])
+
+ fmt.Println(msgId64)
+ }
+
+ return nil
+ },
+}
diff --git a/cmd/lotus-shed/cid.go b/cmd/lotus-shed/cid.go
new file mode 100644
index 00000000000..d3bd2c3c9fa
--- /dev/null
+++ b/cmd/lotus-shed/cid.go
@@ -0,0 +1,82 @@
+package main
+
+import (
+ "encoding/base64"
+ "encoding/hex"
+ "fmt"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+ mh "github.com/multiformats/go-multihash"
+ "github.com/urfave/cli/v2"
+ "golang.org/x/xerrors"
+)
+
+var cidCmd = &cli.Command{
+ Name: "cid",
+ Usage: "Cid command",
+ Subcommands: cli.Commands{
+ cidIdCmd,
+ },
+}
+
+var cidIdCmd = &cli.Command{
+ Name: "id",
+ Usage: "Create identity CID from hex or base64 data",
+ ArgsUsage: "[data]",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "encoding",
+ Value: "base64",
+ Usage: "specify input encoding to parse",
+ },
+ &cli.StringFlag{
+ Name: "codec",
+ Value: "id",
+ Usage: "multicodec-packed content types: abi or id",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ if !cctx.Args().Present() {
+ return fmt.Errorf("must specify data")
+ }
+
+ var dec []byte
+ switch cctx.String("encoding") {
+ case "base64":
+ data, err := base64.StdEncoding.DecodeString(cctx.Args().First())
+ if err != nil {
+ return xerrors.Errorf("decoding base64 value: %w", err)
+ }
+ dec = data
+ case "hex":
+ data, err := hex.DecodeString(cctx.Args().First())
+ if err != nil {
+ return xerrors.Errorf("decoding hex value: %w", err)
+ }
+ dec = data
+ default:
+ return xerrors.Errorf("unrecognized encoding: %s", cctx.String("encoding"))
+ }
+
+ switch cctx.String("codec") {
+ case "abi":
+ aCid, err := abi.CidBuilder.Sum(dec)
+ if err != nil {
+ return xerrors.Errorf("cidBuilder abi: %w", err)
+ }
+ fmt.Println(aCid)
+ case "id":
+ builder := cid.V1Builder{Codec: cid.Raw, MhType: mh.IDENTITY}
+ rCid, err := builder.Sum(dec)
+ if err != nil {
+ return xerrors.Errorf("cidBuilder raw: %w", err)
+ }
+ fmt.Println(rCid)
+ default:
+ return xerrors.Errorf("unrecognized codec: %s", cctx.String("codec"))
+ }
+
+ return nil
+ },
+}
diff --git a/cmd/lotus-shed/commp.go b/cmd/lotus-shed/commp.go
index 9b0cab75df2..6f7923c241a 100644
--- a/cmd/lotus-shed/commp.go
+++ b/cmd/lotus-shed/commp.go
@@ -1,27 +1,55 @@
package main
import (
+ "encoding/base64"
"encoding/hex"
"fmt"
commcid "github.com/filecoin-project/go-fil-commcid"
"github.com/urfave/cli/v2"
+ "golang.org/x/xerrors"
)
var commpToCidCmd = &cli.Command{
Name: "commp-to-cid",
+ Usage: "Convert commP to Cid",
Description: "Convert a raw commP to a piece-Cid",
+ ArgsUsage: "[data]",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "encoding",
+ Value: "base64",
+ Usage: "specify input encoding to parse",
+ },
+ },
Action: func(cctx *cli.Context) error {
if !cctx.Args().Present() {
return fmt.Errorf("must specify commP to convert")
}
- dec, err := hex.DecodeString(cctx.Args().First())
- if err != nil {
- return fmt.Errorf("failed to decode input as hex string: %w", err)
+ var dec []byte
+ switch cctx.String("encoding") {
+ case "base64":
+ data, err := base64.StdEncoding.DecodeString(cctx.Args().First())
+ if err != nil {
+ return xerrors.Errorf("decoding base64 value: %w", err)
+ }
+ dec = data
+ case "hex":
+ data, err := hex.DecodeString(cctx.Args().First())
+ if err != nil {
+ return xerrors.Errorf("decoding hex value: %w", err)
+ }
+ dec = data
+ default:
+ return xerrors.Errorf("unrecognized encoding: %s", cctx.String("encoding"))
}
- fmt.Println(commcid.PieceCommitmentV1ToCID(dec))
+ cid, err := commcid.PieceCommitmentV1ToCID(dec)
+ if err != nil {
+ return err
+ }
+ fmt.Println(cid)
return nil
},
}
diff --git a/cmd/lotus-shed/consensus.go b/cmd/lotus-shed/consensus.go
index 1fe7756c1fa..2c5df4ea59f 100644
--- a/cmd/lotus-shed/consensus.go
+++ b/cmd/lotus-shed/consensus.go
@@ -36,7 +36,7 @@ type consensusItem struct {
targetTipset *types.TipSet
headTipset *types.TipSet
peerID peer.ID
- version api.Version
+ version api.APIVersion
api api.FullNode
}
@@ -113,12 +113,12 @@ var consensusCheckCmd = &cli.Command{
return err
}
ainfo := cliutil.APIInfo{Addr: apima.String()}
- addr, err := ainfo.DialArgs()
+ addr, err := ainfo.DialArgs("v1")
if err != nil {
return err
}
- api, closer, err := client.NewFullNodeRPC(cctx.Context, addr, nil)
+ api, closer, err := client.NewFullNodeRPCV1(cctx.Context, addr, nil)
if err != nil {
return err
}
diff --git a/cmd/lotus-shed/cron-count.go b/cmd/lotus-shed/cron-count.go
new file mode 100644
index 00000000000..622f38791ff
--- /dev/null
+++ b/cmd/lotus-shed/cron-count.go
@@ -0,0 +1,99 @@
+package main
+
+import (
+ "fmt"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/lotus/build"
+ lcli "github.com/filecoin-project/lotus/cli"
+ "github.com/urfave/cli/v2"
+ "golang.org/x/xerrors"
+)
+
+var cronWcCmd = &cli.Command{
+ Name: "cron-wc",
+ Description: "cron stats",
+ Subcommands: []*cli.Command{
+ minerDeadlineCronCountCmd,
+ },
+}
+
+var minerDeadlineCronCountCmd = &cli.Command{
+ Name: "deadline",
+ Description: "list all addresses of miners with active deadline crons",
+ Action: func(c *cli.Context) error {
+ return countDeadlineCrons(c)
+ },
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "tipset",
+ Usage: "specify tipset state to search on (pass comma separated array of cids)",
+ },
+ },
+}
+
+func findDeadlineCrons(c *cli.Context) (map[address.Address]struct{}, error) {
+ api, acloser, err := lcli.GetFullNodeAPI(c)
+ if err != nil {
+ return nil, err
+ }
+ defer acloser()
+ ctx := lcli.ReqContext(c)
+
+ ts, err := lcli.LoadTipSet(ctx, c, api)
+ if err != nil {
+ return nil, err
+ }
+ if ts == nil {
+ ts, err = api.ChainHead(ctx)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ mAddrs, err := api.StateListMiners(ctx, ts.Key())
+ if err != nil {
+ return nil, err
+ }
+ activeMiners := make(map[address.Address]struct{})
+ for _, mAddr := range mAddrs {
+ // All miners have active cron before v4.
+ // v4 upgrade epoch is last epoch running v3 epoch and api.StateReadState reads
+ // parent state, so v4 state isn't read until upgrade epoch + 2
+ if ts.Height() <= build.UpgradeTurboHeight+1 {
+ activeMiners[mAddr] = struct{}{}
+ continue
+ }
+ st, err := api.StateReadState(ctx, mAddr, ts.Key())
+ if err != nil {
+ return nil, err
+ }
+ minerState, ok := st.State.(map[string]interface{})
+ if !ok {
+ return nil, xerrors.Errorf("internal error: failed to cast miner state to expected map type")
+ }
+
+ activeDlineIface, ok := minerState["DeadlineCronActive"]
+ if !ok {
+ return nil, xerrors.Errorf("miner %s had no deadline state, is this a v3 state root?", mAddr)
+ }
+ active := activeDlineIface.(bool)
+ if active {
+ activeMiners[mAddr] = struct{}{}
+ }
+ }
+
+ return activeMiners, nil
+}
+
+func countDeadlineCrons(c *cli.Context) error {
+ activeMiners, err := findDeadlineCrons(c)
+ if err != nil {
+ return err
+ }
+ for addr := range activeMiners {
+ fmt.Printf("%s\n", addr)
+ }
+
+ return nil
+}
diff --git a/cmd/lotus-shed/datastore.go b/cmd/lotus-shed/datastore.go
index c6bac6815bf..c3a9e572ce4 100644
--- a/cmd/lotus-shed/datastore.go
+++ b/cmd/lotus-shed/datastore.go
@@ -1,17 +1,23 @@
package main
import (
+ "bufio"
+ "context"
"encoding/json"
"fmt"
+ "io"
"os"
"strings"
+ "github.com/dgraph-io/badger/v2"
"github.com/docker/go-units"
"github.com/ipfs/go-datastore"
dsq "github.com/ipfs/go-datastore/query"
- logging "github.com/ipfs/go-log"
+ logging "github.com/ipfs/go-log/v2"
+ "github.com/mitchellh/go-homedir"
"github.com/polydawn/refmt/cbor"
"github.com/urfave/cli/v2"
+ "go.uber.org/multierr"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/lib/backupds"
@@ -25,6 +31,7 @@ var datastoreCmd = &cli.Command{
datastoreBackupCmd,
datastoreListCmd,
datastoreGetCmd,
+ datastoreRewriteCmd,
},
}
@@ -69,7 +76,7 @@ var datastoreListCmd = &cli.Command{
}
defer lr.Close() //nolint:errcheck
- ds, err := lr.Datastore(datastore.NewKey(cctx.Args().First()).String())
+ ds, err := lr.Datastore(context.Background(), datastore.NewKey(cctx.Args().First()).String())
if err != nil {
return err
}
@@ -114,7 +121,7 @@ var datastoreGetCmd = &cli.Command{
},
ArgsUsage: "[namespace key]",
Action: func(cctx *cli.Context) error {
- logging.SetLogLevel("badger", "ERROR") // nolint:errchec
+ logging.SetLogLevel("badger", "ERROR") // nolint:errcheck
r, err := repo.NewFS(cctx.String("repo"))
if err != nil {
@@ -135,7 +142,7 @@ var datastoreGetCmd = &cli.Command{
}
defer lr.Close() //nolint:errcheck
- ds, err := lr.Datastore(datastore.NewKey(cctx.Args().First()).String())
+ ds, err := lr.Datastore(context.Background(), datastore.NewKey(cctx.Args().First()).String())
if err != nil {
return err
}
@@ -173,8 +180,11 @@ var datastoreBackupStatCmd = &cli.Command{
}
defer f.Close() // nolint:errcheck
- var keys, kbytes, vbytes uint64
- err = backupds.ReadBackup(f, func(key datastore.Key, value []byte) error {
+ var keys, logs, kbytes, vbytes uint64
+ clean, err := backupds.ReadBackup(f, func(key datastore.Key, value []byte, log bool) error {
+ if log {
+ logs++
+ }
keys++
kbytes += uint64(len(key.String()))
vbytes += uint64(len(value))
@@ -184,7 +194,9 @@ var datastoreBackupStatCmd = &cli.Command{
return err
}
+ fmt.Println("Truncated: ", !clean)
fmt.Println("Keys: ", keys)
+ fmt.Println("Log values: ", log)
fmt.Println("Key bytes: ", units.BytesSize(float64(kbytes)))
fmt.Println("Value bytes: ", units.BytesSize(float64(vbytes)))
@@ -218,7 +230,7 @@ var datastoreBackupListCmd = &cli.Command{
defer f.Close() // nolint:errcheck
printKv := kvPrinter(cctx.Bool("top-level"), cctx.String("get-enc"))
- err = backupds.ReadBackup(f, func(key datastore.Key, value []byte) error {
+ _, err = backupds.ReadBackup(f, func(key datastore.Key, value []byte, _ bool) error {
return printKv(key.String(), value)
})
if err != nil {
@@ -288,3 +300,76 @@ func printVal(enc string, val []byte) error {
return nil
}
+
+var datastoreRewriteCmd = &cli.Command{
+ Name: "rewrite",
+ Description: "rewrites badger datastore to compact it and possibly change params",
+ ArgsUsage: "source destination",
+ Action: func(cctx *cli.Context) error {
+ if cctx.NArg() != 2 {
+ return xerrors.Errorf("expected 2 arguments, got %d", cctx.NArg())
+ }
+ fromPath, err := homedir.Expand(cctx.Args().Get(0))
+ if err != nil {
+ return xerrors.Errorf("cannot get fromPath: %w", err)
+ }
+ toPath, err := homedir.Expand(cctx.Args().Get(1))
+ if err != nil {
+ return xerrors.Errorf("cannot get toPath: %w", err)
+ }
+
+ var (
+ from *badger.DB
+ to *badger.DB
+ )
+
+ // open the destination (to) store.
+ opts, err := repo.BadgerBlockstoreOptions(repo.UniversalBlockstore, toPath, false)
+ if err != nil {
+ return xerrors.Errorf("failed to get badger options: %w", err)
+ }
+ opts.SyncWrites = false
+ if to, err = badger.Open(opts.Options); err != nil {
+ return xerrors.Errorf("opening 'to' badger store: %w", err)
+ }
+
+ // open the source (from) store.
+ opts, err = repo.BadgerBlockstoreOptions(repo.UniversalBlockstore, fromPath, true)
+ if err != nil {
+ return xerrors.Errorf("failed to get badger options: %w", err)
+ }
+ if from, err = badger.Open(opts.Options); err != nil {
+ return xerrors.Errorf("opening 'from' datastore: %w", err)
+ }
+
+ pr, pw := io.Pipe()
+ errCh := make(chan error)
+ go func() {
+ bw := bufio.NewWriterSize(pw, 64<<20)
+ _, err := from.Backup(bw, 0)
+ _ = bw.Flush()
+ _ = pw.CloseWithError(err)
+ errCh <- err
+ }()
+ go func() {
+ err := to.Load(pr, 256)
+ errCh <- err
+ }()
+
+ err = <-errCh
+ if err != nil {
+ select {
+ case nerr := <-errCh:
+ err = multierr.Append(err, nerr)
+ default:
+ }
+ return err
+ }
+
+ err = <-errCh
+ if err != nil {
+ return err
+ }
+ return multierr.Append(from.Close(), to.Close())
+ },
+}
diff --git a/cmd/lotus-shed/dealtracker.go b/cmd/lotus-shed/dealtracker.go
deleted file mode 100644
index 8ded6bf4acd..00000000000
--- a/cmd/lotus-shed/dealtracker.go
+++ /dev/null
@@ -1,325 +0,0 @@
-package main
-
-import (
- "context"
- "encoding/json"
- "net"
- "net/http"
- "sync"
-
- "github.com/filecoin-project/go-address"
- "github.com/filecoin-project/go-state-types/abi"
- "github.com/filecoin-project/lotus/api"
- lcli "github.com/filecoin-project/lotus/cli"
- "github.com/ipfs/go-cid"
- "github.com/urfave/cli/v2"
-)
-
-type dealStatsServer struct {
- api api.FullNode
-}
-
-// Requested by @jbenet
-// How many epochs back to look at for dealstats
-var epochLookback = abi.ChainEpoch(10)
-
-// these lists grow continuously with the network
-// TODO: need to switch this to an LRU of sorts, to ensure refreshes
-var knownFiltered = new(sync.Map)
-var resolvedWallets = new(sync.Map)
-
-func init() {
- for _, a := range []string{
- "t0100", // client for genesis miner
- "t0101", // client for genesis miner
- "t0102", // client for genesis miner
- "t0112", // client for genesis miner
- "t0113", // client for genesis miner
- "t0114", // client for genesis miner
- "t1nslxql4pck5pq7hddlzym3orxlx35wkepzjkm3i", // SR1 dealbot wallet
- "t1stghxhdp2w53dym2nz2jtbpk6ccd4l2lxgmezlq", // SR1 dealbot wallet
- "t1mcr5xkgv4jdl3rnz77outn6xbmygb55vdejgbfi", // SR1 dealbot wallet
- "t1qiqdbbmrdalbntnuapriirduvxu5ltsc5mhy7si", // SR1 dealbot wallet
- } {
- a, err := address.NewFromString(a)
- if err != nil {
- panic(err)
- }
- knownFiltered.Store(a, true)
- }
-}
-
-type dealCountResp struct {
- Epoch int64 `json:"epoch"`
- Endpoint string `json:"endpoint"`
- Payload int64 `json:"payload"`
-}
-
-func (dss *dealStatsServer) handleStorageDealCount(w http.ResponseWriter, r *http.Request) {
-
- epoch, deals := dss.filteredDealList()
- if epoch == 0 {
- w.WriteHeader(500)
- return
- }
-
- if err := json.NewEncoder(w).Encode(&dealCountResp{
- Endpoint: "COUNT_DEALS",
- Payload: int64(len(deals)),
- Epoch: epoch,
- }); err != nil {
- log.Warnf("failed to write back deal count response: %s", err)
- return
- }
-}
-
-type dealAverageResp struct {
- Epoch int64 `json:"epoch"`
- Endpoint string `json:"endpoint"`
- Payload int64 `json:"payload"`
-}
-
-func (dss *dealStatsServer) handleStorageDealAverageSize(w http.ResponseWriter, r *http.Request) {
-
- epoch, deals := dss.filteredDealList()
- if epoch == 0 {
- w.WriteHeader(500)
- return
- }
-
- var totalBytes int64
- for _, d := range deals {
- totalBytes += int64(d.deal.Proposal.PieceSize.Unpadded())
- }
-
- if err := json.NewEncoder(w).Encode(&dealAverageResp{
- Endpoint: "AVERAGE_DEAL_SIZE",
- Payload: totalBytes / int64(len(deals)),
- Epoch: epoch,
- }); err != nil {
- log.Warnf("failed to write back deal average response: %s", err)
- return
- }
-}
-
-type dealTotalResp struct {
- Epoch int64 `json:"epoch"`
- Endpoint string `json:"endpoint"`
- Payload int64 `json:"payload"`
-}
-
-func (dss *dealStatsServer) handleStorageDealTotalReal(w http.ResponseWriter, r *http.Request) {
- epoch, deals := dss.filteredDealList()
- if epoch == 0 {
- w.WriteHeader(500)
- return
- }
-
- var totalBytes int64
- for _, d := range deals {
- totalBytes += int64(d.deal.Proposal.PieceSize.Unpadded())
- }
-
- if err := json.NewEncoder(w).Encode(&dealTotalResp{
- Endpoint: "DEAL_BYTES",
- Payload: totalBytes,
- Epoch: epoch,
- }); err != nil {
- log.Warnf("failed to write back deal average response: %s", err)
- return
- }
-
-}
-
-type clientStatsOutput struct {
- Epoch int64 `json:"epoch"`
- Endpoint string `json:"endpoint"`
- Payload []*clientStats `json:"payload"`
-}
-
-type clientStats struct {
- Client address.Address `json:"client"`
- DataSize int64 `json:"data_size"`
- NumCids int `json:"num_cids"`
- NumDeals int `json:"num_deals"`
- NumMiners int `json:"num_miners"`
-
- cids map[cid.Cid]bool
- providers map[address.Address]bool
-}
-
-func (dss *dealStatsServer) handleStorageClientStats(w http.ResponseWriter, r *http.Request) {
- epoch, deals := dss.filteredDealList()
- if epoch == 0 {
- w.WriteHeader(500)
- return
- }
-
- stats := make(map[address.Address]*clientStats)
-
- for _, d := range deals {
-
- st, ok := stats[d.deal.Proposal.Client]
- if !ok {
- st = &clientStats{
- Client: d.resolvedWallet,
- cids: make(map[cid.Cid]bool),
- providers: make(map[address.Address]bool),
- }
- stats[d.deal.Proposal.Client] = st
- }
-
- st.DataSize += int64(d.deal.Proposal.PieceSize.Unpadded())
- st.cids[d.deal.Proposal.PieceCID] = true
- st.providers[d.deal.Proposal.Provider] = true
- st.NumDeals++
- }
-
- out := clientStatsOutput{
- Epoch: epoch,
- Endpoint: "CLIENT_DEAL_STATS",
- Payload: make([]*clientStats, 0, len(stats)),
- }
- for _, cs := range stats {
- cs.NumCids = len(cs.cids)
- cs.NumMiners = len(cs.providers)
- out.Payload = append(out.Payload, cs)
- }
-
- if err := json.NewEncoder(w).Encode(out); err != nil {
- log.Warnf("failed to write back client stats response: %s", err)
- return
- }
-}
-
-type dealInfo struct {
- deal api.MarketDeal
- resolvedWallet address.Address
-}
-
-// filteredDealList returns the current epoch and a list of filtered deals
-// on error returns an epoch of 0
-func (dss *dealStatsServer) filteredDealList() (int64, map[string]dealInfo) {
- ctx := context.Background()
-
- head, err := dss.api.ChainHead(ctx)
- if err != nil {
- log.Warnf("failed to get chain head: %s", err)
- return 0, nil
- }
-
- head, err = dss.api.ChainGetTipSetByHeight(ctx, head.Height()-epochLookback, head.Key())
- if err != nil {
- log.Warnf("failed to walk back %s epochs: %s", epochLookback, err)
- return 0, nil
- }
-
- // Disabled as per @pooja's request
- //
- // // Exclude any address associated with a miner
- // miners, err := dss.api.StateListMiners(ctx, head.Key())
- // if err != nil {
- // log.Warnf("failed to get miner list: %s", err)
- // return 0, nil
- // }
- // for _, m := range miners {
- // info, err := dss.api.StateMinerInfo(ctx, m, head.Key())
- // if err != nil {
- // log.Warnf("failed to get info for known miner '%s': %s", m, err)
- // continue
- // }
-
- // knownFiltered.Store(info.Owner, true)
- // knownFiltered.Store(info.Worker, true)
- // for _, a := range info.ControlAddresses {
- // knownFiltered.Store(a, true)
- // }
- // }
-
- deals, err := dss.api.StateMarketDeals(ctx, head.Key())
- if err != nil {
- log.Warnf("failed to get market deals: %s", err)
- return 0, nil
- }
-
- ret := make(map[string]dealInfo, len(deals))
- for dealKey, d := range deals {
-
- // Counting no-longer-active deals as per Pooja's request
- // // https://github.com/filecoin-project/specs-actors/blob/v0.9.9/actors/builtin/market/deal.go#L81-L85
- // if d.State.SectorStartEpoch < 0 {
- // continue
- // }
-
- if _, isFiltered := knownFiltered.Load(d.Proposal.Client); isFiltered {
- continue
- }
-
- if _, wasSeen := resolvedWallets.Load(d.Proposal.Client); !wasSeen {
- w, err := dss.api.StateAccountKey(ctx, d.Proposal.Client, head.Key())
- if err != nil {
- log.Warnf("failed to resolve id '%s' to wallet address: %s", d.Proposal.Client, err)
- continue
- } else {
- resolvedWallets.Store(d.Proposal.Client, w)
- }
- }
-
- w, _ := resolvedWallets.Load(d.Proposal.Client)
- if _, isFiltered := knownFiltered.Load(w); isFiltered {
- continue
- }
-
- ret[dealKey] = dealInfo{
- deal: d,
- resolvedWallet: w.(address.Address),
- }
- }
-
- return int64(head.Height()), ret
-}
-
-var serveDealStatsCmd = &cli.Command{
- Name: "serve-deal-stats",
- Flags: []cli.Flag{},
- Action: func(cctx *cli.Context) error {
- api, closer, err := lcli.GetFullNodeAPI(cctx)
- if err != nil {
- return err
- }
-
- defer closer()
- ctx := lcli.ReqContext(cctx)
-
- _ = ctx
-
- dss := &dealStatsServer{api}
-
- mux := &http.ServeMux{}
- mux.HandleFunc("/api/storagedeal/count", dss.handleStorageDealCount)
- mux.HandleFunc("/api/storagedeal/averagesize", dss.handleStorageDealAverageSize)
- mux.HandleFunc("/api/storagedeal/totalreal", dss.handleStorageDealTotalReal)
- mux.HandleFunc("/api/storagedeal/clientstats", dss.handleStorageClientStats)
-
- s := &http.Server{
- Addr: ":7272",
- Handler: mux,
- }
-
- go func() {
- <-ctx.Done()
- if err := s.Shutdown(context.TODO()); err != nil {
- log.Error(err)
- }
- }()
-
- list, err := net.Listen("tcp", ":7272") // nolint
- if err != nil {
- panic(err)
- }
-
- log.Warnf("deal-stat server listening on %s\n== NOTE: QUERIES ARE EXPENSIVE - YOU MUST FRONT-CACHE THIS SERVICE\n", list.Addr().String())
-
- return s.Serve(list)
- },
-}
diff --git a/cmd/lotus-shed/election.go b/cmd/lotus-shed/election.go
new file mode 100644
index 00000000000..d49d5c04f4f
--- /dev/null
+++ b/cmd/lotus-shed/election.go
@@ -0,0 +1,227 @@
+package main
+
+import (
+ "context"
+ "encoding/binary"
+ "fmt"
+ "math/rand"
+
+ "github.com/filecoin-project/lotus/api/v0api"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/lotus/chain/gen"
+ "github.com/filecoin-project/lotus/chain/types"
+ lcli "github.com/filecoin-project/lotus/cli"
+ builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+ "github.com/urfave/cli/v2"
+ "golang.org/x/xerrors"
+)
+
+var electionCmd = &cli.Command{
+ Name: "election",
+ Usage: "Commands related to leader election",
+ Subcommands: []*cli.Command{
+ electionRunDummy,
+ electionEstimate,
+ electionBacktest,
+ },
+}
+
+var electionRunDummy = &cli.Command{
+ Name: "run-dummy",
+ Usage: "Runs dummy elections with given power",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "network-power",
+ Usage: "network storage power",
+ },
+ &cli.StringFlag{
+ Name: "miner-power",
+ Usage: "miner storage power",
+ },
+ &cli.Uint64Flag{
+ Name: "seed",
+ Usage: "rand number",
+ Value: 0,
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ ctx := lcli.ReqContext(cctx)
+ minerPow, err := types.BigFromString(cctx.String("miner-power"))
+ if err != nil {
+ return xerrors.Errorf("decoding miner-power: %w", err)
+ }
+ networkPow, err := types.BigFromString(cctx.String("network-power"))
+ if err != nil {
+ return xerrors.Errorf("decoding network-power: %w", err)
+ }
+
+ ep := &types.ElectionProof{}
+ ep.VRFProof = make([]byte, 32)
+ seed := cctx.Uint64("seed")
+ if seed == 0 {
+ seed = rand.Uint64()
+ }
+ binary.BigEndian.PutUint64(ep.VRFProof, seed)
+
+ i := uint64(0)
+ for {
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+ binary.BigEndian.PutUint64(ep.VRFProof[8:], i)
+ j := ep.ComputeWinCount(minerPow, networkPow)
+ _, err := fmt.Printf("%t, %d\n", j != 0, j)
+ if err != nil {
+ return err
+ }
+ i++
+ }
+ },
+}
+
+var electionEstimate = &cli.Command{
+ Name: "estimate",
+ Usage: "Estimate elections with given power",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "network-power",
+ Usage: "network storage power",
+ },
+ &cli.StringFlag{
+ Name: "miner-power",
+ Usage: "miner storage power",
+ },
+ &cli.Uint64Flag{
+ Name: "seed",
+ Usage: "rand number",
+ Value: 0,
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ minerPow, err := types.BigFromString(cctx.String("miner-power"))
+ if err != nil {
+ return xerrors.Errorf("decoding miner-power: %w", err)
+ }
+ networkPow, err := types.BigFromString(cctx.String("network-power"))
+ if err != nil {
+ return xerrors.Errorf("decoding network-power: %w", err)
+ }
+
+ ep := &types.ElectionProof{}
+ ep.VRFProof = make([]byte, 32)
+ seed := cctx.Uint64("seed")
+ if seed == 0 {
+ seed = rand.Uint64()
+ }
+ binary.BigEndian.PutUint64(ep.VRFProof, seed)
+
+ winYear := int64(0)
+ for i := 0; i < builtin2.EpochsInYear; i++ {
+ binary.BigEndian.PutUint64(ep.VRFProof[8:], uint64(i))
+ j := ep.ComputeWinCount(minerPow, networkPow)
+ winYear += j
+ }
+ winHour := winYear * builtin2.EpochsInHour / builtin2.EpochsInYear
+ winDay := winYear * builtin2.EpochsInDay / builtin2.EpochsInYear
+ winMonth := winYear * builtin2.EpochsInDay * 30 / builtin2.EpochsInYear
+ fmt.Println("winInHour, winInDay, winInMonth, winInYear")
+ fmt.Printf("%d, %d, %d, %d\n", winHour, winDay, winMonth, winYear)
+ return nil
+ },
+}
+
+var electionBacktest = &cli.Command{
+ Name: "backtest",
+ Usage: "Backtest elections with given miner",
+ ArgsUsage: "[minerAddress]",
+ Flags: []cli.Flag{
+ &cli.Uint64Flag{
+ Name: "height",
+ Usage: "blockchain head height",
+ },
+ &cli.IntFlag{
+ Name: "count",
+ Usage: "number of won elections to look for",
+ Value: 120,
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return xerrors.Errorf("GetFullNodeAPI: %w", err)
+ }
+
+ defer closer()
+ ctx := lcli.ReqContext(cctx)
+
+ var head *types.TipSet
+ if cctx.IsSet("height") {
+ head, err = api.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(cctx.Uint64("height")), types.EmptyTSK)
+ if err != nil {
+ return xerrors.Errorf("ChainGetTipSetByHeight: %w", err)
+ }
+ } else {
+ head, err = api.ChainHead(ctx)
+ if err != nil {
+ return xerrors.Errorf("ChainHead: %w", err)
+ }
+ }
+
+ miner, err := address.NewFromString(cctx.Args().First())
+ if err != nil {
+ return xerrors.Errorf("miner address: %w", err)
+ }
+
+ count := cctx.Int("count")
+ if count < 1 {
+ return xerrors.Errorf("count: %d", count)
+ }
+
+ fmt.Println("height, winCount")
+ roundEnd := head.Height() + abi.ChainEpoch(1)
+ for i := 0; i < count; {
+ for round := head.Height() + abi.ChainEpoch(1); round <= roundEnd; round++ {
+ i++
+ win, err := backTestWinner(ctx, miner, round, head, api)
+ if err == nil && win != nil {
+ fmt.Printf("%d, %d\n", round, win.WinCount)
+ }
+ }
+
+ roundEnd = head.Height()
+ head, err = api.ChainGetTipSet(ctx, head.Parents())
+ if err != nil {
+ break
+ }
+ }
+ return nil
+ },
+}
+
+func backTestWinner(ctx context.Context, miner address.Address, round abi.ChainEpoch, ts *types.TipSet, api v0api.FullNode) (*types.ElectionProof, error) {
+ mbi, err := api.MinerGetBaseInfo(ctx, miner, round, ts.Key())
+ if err != nil {
+ return nil, xerrors.Errorf("failed to get mining base info: %w", err)
+ }
+ if mbi == nil {
+ return nil, nil
+ }
+ if !mbi.EligibleForMining {
+ return nil, nil
+ }
+
+ brand := mbi.PrevBeaconEntry
+ bvals := mbi.BeaconEntries
+ if len(bvals) > 0 {
+ brand = bvals[len(bvals)-1]
+ }
+
+ winner, err := gen.IsRoundWinner(ctx, ts, round, miner, brand, mbi, api)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to check if we win next round: %w", err)
+ }
+
+ return winner, nil
+}
diff --git a/cmd/lotus-shed/export-car.go b/cmd/lotus-shed/export-car.go
new file mode 100644
index 00000000000..97e4fb6c608
--- /dev/null
+++ b/cmd/lotus-shed/export-car.go
@@ -0,0 +1,103 @@
+package main
+
+import (
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/ipfs/go-blockservice"
+ "github.com/ipfs/go-cid"
+ offline "github.com/ipfs/go-ipfs-exchange-offline"
+ format "github.com/ipfs/go-ipld-format"
+ "github.com/ipfs/go-merkledag"
+ "github.com/ipld/go-car"
+ "github.com/urfave/cli/v2"
+ "golang.org/x/xerrors"
+
+ lcli "github.com/filecoin-project/lotus/cli"
+ "github.com/filecoin-project/lotus/node/repo"
+)
+
+func carWalkFunc(nd format.Node) (out []*format.Link, err error) {
+ for _, link := range nd.Links() {
+ if link.Cid.Prefix().Codec == cid.FilCommitmentSealed || link.Cid.Prefix().Codec == cid.FilCommitmentUnsealed {
+ continue
+ }
+ out = append(out, link)
+ }
+ return out, nil
+}
+
+var exportCarCmd = &cli.Command{
+ Name: "export-car",
+ Description: "Export a car from repo (requires node to be offline)",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "repo",
+ Value: "~/.lotus",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ if cctx.Args().Len() != 2 {
+ return lcli.ShowHelp(cctx, fmt.Errorf("must specify file name and object"))
+ }
+
+ outfile := cctx.Args().First()
+ var roots []cid.Cid
+ for _, arg := range cctx.Args().Tail() {
+ c, err := cid.Decode(arg)
+ if err != nil {
+ return err
+ }
+ roots = append(roots, c)
+ }
+
+ ctx := lcli.ReqContext(cctx)
+
+ r, err := repo.NewFS(cctx.String("repo"))
+ if err != nil {
+ return xerrors.Errorf("opening fs repo: %w", err)
+ }
+
+ exists, err := r.Exists()
+ if err != nil {
+ return err
+ }
+ if !exists {
+ return xerrors.Errorf("lotus repo doesn't exist")
+ }
+
+ lr, err := r.Lock(repo.FullNode)
+ if err != nil {
+ return err
+ }
+ defer lr.Close() //nolint:errcheck
+
+ fi, err := os.Create(outfile)
+ if err != nil {
+ return xerrors.Errorf("opening the output file: %w", err)
+ }
+
+ defer fi.Close() //nolint:errcheck
+
+ bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore)
+ if err != nil {
+ return fmt.Errorf("failed to open blockstore: %w", err)
+ }
+
+ defer func() {
+ if c, ok := bs.(io.Closer); ok {
+ if err := c.Close(); err != nil {
+ log.Warnf("failed to close blockstore: %s", err)
+ }
+ }
+ }()
+
+ dag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
+ err = car.WriteCarWithWalker(ctx, dag, roots, fi, carWalkFunc)
+ if err != nil {
+ return err
+ }
+ return nil
+ },
+}
diff --git a/cmd/lotus-shed/export.go b/cmd/lotus-shed/export.go
index 3be49f0e030..e711ba2bb05 100644
--- a/cmd/lotus-shed/export.go
+++ b/cmd/lotus-shed/export.go
@@ -3,16 +3,17 @@ package main
import (
"context"
"fmt"
+ "io"
"os"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi"
+
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
- "github.com/filecoin-project/lotus/lib/blockstore"
"github.com/filecoin-project/lotus/node/repo"
)
@@ -71,19 +72,27 @@ var exportChainCmd = &cli.Command{
defer fi.Close() //nolint:errcheck
- ds, err := lr.Datastore("/chain")
+ bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore)
if err != nil {
- return err
+ return fmt.Errorf("failed to open blockstore: %w", err)
}
- mds, err := lr.Datastore("/metadata")
+ defer func() {
+ if c, ok := bs.(io.Closer); ok {
+ if err := c.Close(); err != nil {
+ log.Warnf("failed to close blockstore: %s", err)
+ }
+ }
+ }()
+
+ mds, err := lr.Datastore(context.Background(), "/metadata")
if err != nil {
return err
}
- bs := blockstore.NewBlockstore(ds)
+ cs := store.NewChainStore(bs, bs, mds, nil, nil)
+ defer cs.Close() //nolint:errcheck
- cs := store.NewChainStore(bs, mds, nil, nil)
if err := cs.Load(); err != nil {
return err
}
diff --git a/cmd/lotus-shed/frozen-miners.go b/cmd/lotus-shed/frozen-miners.go
index 6b843f0d6ba..ed09c00c5a9 100644
--- a/cmd/lotus-shed/frozen-miners.go
+++ b/cmd/lotus-shed/frozen-miners.go
@@ -35,12 +35,6 @@ var frozenMinersCmd = &cli.Command{
if err != nil {
return err
}
- if ts == nil {
- ts, err = api.ChainHead(ctx)
- if err != nil {
- return err
- }
- }
queryEpoch := ts.Height()
diff --git a/cmd/lotus-shed/genesis-verify.go b/cmd/lotus-shed/genesis-verify.go
index 4b197c58f1d..32e4e14ad0b 100644
--- a/cmd/lotus-shed/genesis-verify.go
+++ b/cmd/lotus-shed/genesis-verify.go
@@ -17,6 +17,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin/account"
@@ -26,7 +27,6 @@ import (
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/lotus/lib/blockstore"
)
type addrInfo struct {
@@ -50,9 +50,10 @@ var genesisVerifyCmd = &cli.Command{
if !cctx.Args().Present() {
return fmt.Errorf("must pass genesis car file")
}
- bs := blockstore.NewBlockstore(datastore.NewMapDatastore())
+ bs := blockstore.FromDatastore(datastore.NewMapDatastore())
- cs := store.NewChainStore(bs, datastore.NewMapDatastore(), nil, nil)
+ cs := store.NewChainStore(bs, bs, datastore.NewMapDatastore(), nil, nil)
+ defer cs.Close() //nolint:errcheck
cf := cctx.Args().Get(0)
f, err := os.Open(cf)
diff --git a/cmd/lotus-shed/import-car.go b/cmd/lotus-shed/import-car.go
index 9cbff953b16..4e465029f2d 100644
--- a/cmd/lotus-shed/import-car.go
+++ b/cmd/lotus-shed/import-car.go
@@ -1,6 +1,7 @@
package main
import (
+ "context"
"encoding/hex"
"fmt"
"io"
@@ -12,7 +13,6 @@ import (
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
- "github.com/filecoin-project/lotus/lib/blockstore"
"github.com/filecoin-project/lotus/node/repo"
)
@@ -25,6 +25,8 @@ var importCarCmd = &cli.Command{
return xerrors.Errorf("opening fs repo: %w", err)
}
+ ctx := context.TODO()
+
exists, err := r.Exists()
if err != nil {
return err
@@ -45,12 +47,18 @@ var importCarCmd = &cli.Command{
return xerrors.Errorf("opening the car file: %w", err)
}
- ds, err := lr.Datastore("/chain")
+ bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore)
if err != nil {
return err
}
- bs := blockstore.NewBlockstore(ds)
+ defer func() {
+ if c, ok := bs.(io.Closer); ok {
+ if err := c.Close(); err != nil {
+ log.Warnf("failed to close blockstore: %s", err)
+ }
+ }
+ }()
cr, err := car.NewCarReader(f)
if err != nil {
@@ -65,7 +73,7 @@ var importCarCmd = &cli.Command{
return err
}
fmt.Println()
- return ds.Close()
+ return nil
default:
if err := f.Close(); err != nil {
return err
@@ -94,6 +102,8 @@ var importObjectCmd = &cli.Command{
return xerrors.Errorf("opening fs repo: %w", err)
}
+ ctx := context.TODO()
+
exists, err := r.Exists()
if err != nil {
return err
@@ -108,12 +118,18 @@ var importObjectCmd = &cli.Command{
}
defer lr.Close() //nolint:errcheck
- ds, err := lr.Datastore("/chain")
+ bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore)
if err != nil {
- return err
+ return fmt.Errorf("failed to open blockstore: %w", err)
}
- bs := blockstore.NewBlockstore(ds)
+ defer func() {
+ if c, ok := bs.(io.Closer); ok {
+ if err := c.Close(); err != nil {
+ log.Warnf("failed to close blockstore: %s", err)
+ }
+ }
+ }()
c, err := cid.Decode(cctx.Args().Get(0))
if err != nil {
diff --git a/cmd/lotus-shed/jwt.go b/cmd/lotus-shed/jwt.go
index 7fa1a18dd6f..e8853b419b6 100644
--- a/cmd/lotus-shed/jwt.go
+++ b/cmd/lotus-shed/jwt.go
@@ -15,7 +15,8 @@ import (
"github.com/urfave/cli/v2"
"github.com/filecoin-project/go-jsonrpc/auth"
- "github.com/filecoin-project/lotus/api/apistruct"
+
+ "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/node/modules"
)
@@ -98,19 +99,19 @@ var jwtTokenCmd = &cli.Command{
perms := []auth.Permission{}
if cctx.Bool("read") {
- perms = append(perms, apistruct.PermRead)
+ perms = append(perms, api.PermRead)
}
if cctx.Bool("write") {
- perms = append(perms, apistruct.PermWrite)
+ perms = append(perms, api.PermWrite)
}
if cctx.Bool("sign") {
- perms = append(perms, apistruct.PermSign)
+ perms = append(perms, api.PermSign)
}
if cctx.Bool("admin") {
- perms = append(perms, apistruct.PermAdmin)
+ perms = append(perms, api.PermAdmin)
}
p := modules.JwtPayload{
@@ -152,7 +153,7 @@ var jwtNewCmd = &cli.Command{
}
p := modules.JwtPayload{
- Allow: apistruct.AllPermissions,
+ Allow: api.AllPermissions,
}
token, err := jwt.Sign(&p, jwt.NewHS256(keyInfo.PrivateKey))
@@ -168,7 +169,7 @@ var jwtNewCmd = &cli.Command{
defer func() {
if err := file.Close(); err != nil {
- log.Warnf("failed to close output file: %w", err)
+ log.Warnf("failed to close output file: %v", err)
}
}()
diff --git a/cmd/lotus-shed/keyinfo.go b/cmd/lotus-shed/keyinfo.go
index 4dcd10cbfe8..3c99b5050a0 100644
--- a/cmd/lotus-shed/keyinfo.go
+++ b/cmd/lotus-shed/keyinfo.go
@@ -427,7 +427,7 @@ var keyinfoNewCmd = &cli.Command{
defer func() {
if err := file.Close(); err != nil {
- log.Warnf("failed to close output file: %w", err)
+ log.Warnf("failed to close output file: %v", err)
}
}()
diff --git a/cmd/lotus-shed/ledger.go b/cmd/lotus-shed/ledger.go
index ecb13ec645a..0e9c11742cc 100644
--- a/cmd/lotus-shed/ledger.go
+++ b/cmd/lotus-shed/ledger.go
@@ -6,12 +6,14 @@ import (
"strconv"
"strings"
+ "github.com/filecoin-project/lotus/api/v0api"
+
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/crypto"
"github.com/urfave/cli/v2"
ledgerfil "github.com/whyrusleeping/ledger-filecoin-go"
- "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
ledgerwallet "github.com/filecoin-project/lotus/chain/wallet/ledger"
lcli "github.com/filecoin-project/lotus/cli"
@@ -25,6 +27,7 @@ var ledgerCmd = &cli.Command{
ledgerListAddressesCmd,
ledgerKeyInfoCmd,
ledgerSignTestCmd,
+ ledgerShowCmd,
},
}
@@ -40,7 +43,7 @@ var ledgerListAddressesCmd = &cli.Command{
},
},
Action: func(cctx *cli.Context) error {
- var api api.FullNode
+ var api v0api.FullNode
if cctx.Bool("print-balances") {
a, closer, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
@@ -57,6 +60,7 @@ var ledgerListAddressesCmd = &cli.Command{
if err != nil {
return err
}
+ defer fl.Close() // nolint
end := 20
for i := 0; i < end; i++ {
@@ -166,6 +170,7 @@ var ledgerKeyInfoCmd = &cli.Command{
if err != nil {
return err
}
+ defer fl.Close() // nolint
p, err := parseHDPath(cctx.Args().First())
if err != nil {
@@ -242,13 +247,46 @@ var ledgerSignTestCmd = &cli.Command{
if err != nil {
return err
}
+ fmt.Printf("Message: %x\n", b.RawData())
sig, err := fl.SignSECP256K1(p, b.RawData())
if err != nil {
return err
}
- fmt.Println(sig.SignatureBytes())
+ sigBytes := append([]byte{byte(crypto.SigTypeSecp256k1)}, sig.SignatureBytes()...)
+
+ fmt.Printf("Signature: %x\n", sigBytes)
+
+ return nil
+ },
+}
+
+var ledgerShowCmd = &cli.Command{
+ Name: "show",
+ ArgsUsage: "[hd path]",
+ Action: func(cctx *cli.Context) error {
+ if !cctx.Args().Present() {
+ return cli.ShowCommandHelp(cctx, cctx.Command.Name)
+ }
+
+ fl, err := ledgerfil.FindLedgerFilecoinApp()
+ if err != nil {
+ return err
+ }
+ defer fl.Close() // nolint
+
+ p, err := parseHDPath(cctx.Args().First())
+ if err != nil {
+ return err
+ }
+
+ _, _, a, err := fl.ShowAddressPubKeySECP256K1(p)
+ if err != nil {
+ return err
+ }
+
+ fmt.Println(a)
return nil
},
diff --git a/cmd/lotus-shed/main.go b/cmd/lotus-shed/main.go
index 8201ec1117d..e06b630800c 100644
--- a/cmd/lotus-shed/main.go
+++ b/cmd/lotus-shed/main.go
@@ -16,9 +16,11 @@ func main() {
logging.SetLogLevel("*", "INFO")
local := []*cli.Command{
+ base64Cmd,
base32Cmd,
base16Cmd,
bitFieldCmd,
+ cronWcCmd,
frozenMinersCmd,
keyinfoCmd,
jwtCmd,
@@ -30,22 +32,34 @@ func main() {
importObjectCmd,
commpToCidCmd,
fetchParamCmd,
+ postFindCmd,
proofsCmd,
verifRegCmd,
+ marketCmd,
miscCmd,
mpoolCmd,
genesisVerifyCmd,
mathCmd,
+ minerCmd,
mpoolStatsCmd,
exportChainCmd,
+ exportCarCmd,
consensusCmd,
- serveDealStatsCmd,
+ storageStatsCmd,
syncCmd,
stateTreePruneCmd,
datastoreCmd,
ledgerCmd,
sectorsCmd,
msgCmd,
+ electionCmd,
+ rpcCmd,
+ cidCmd,
+ blockmsgidCmd,
+ signaturesCmd,
+ actorCmd,
+ minerTypesCmd,
+ minerMultisigsCmd,
}
app := &cli.App{
diff --git a/cmd/lotus-shed/market.go b/cmd/lotus-shed/market.go
new file mode 100644
index 00000000000..e2e322784cb
--- /dev/null
+++ b/cmd/lotus-shed/market.go
@@ -0,0 +1,102 @@
+package main
+
+import (
+ "fmt"
+
+ lcli "github.com/filecoin-project/lotus/cli"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/urfave/cli/v2"
+ "golang.org/x/xerrors"
+)
+
+var marketCmd = &cli.Command{
+ Name: "market",
+ Usage: "Interact with the market actor",
+ Flags: []cli.Flag{},
+ Subcommands: []*cli.Command{
+ marketDealFeesCmd,
+ },
+}
+
+var marketDealFeesCmd = &cli.Command{
+ Name: "get-deal-fees",
+ Usage: "View the storage fees associated with a particular deal or storage provider",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "provider",
+ Usage: "provider whose outstanding fees you'd like to calculate",
+ },
+ &cli.IntFlag{
+ Name: "dealId",
+ Usage: "deal whose outstanding fees you'd like to calculate",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ ctx := lcli.ReqContext(cctx)
+
+ ts, err := lcli.LoadTipSet(ctx, cctx, api)
+ if err != nil {
+ return err
+ }
+
+ ht := ts.Height()
+
+ if cctx.IsSet("provider") {
+ p, err := address.NewFromString(cctx.String("provider"))
+ if err != nil {
+ return fmt.Errorf("failed to parse provider: %w", err)
+ }
+
+ deals, err := api.StateMarketDeals(ctx, ts.Key())
+ if err != nil {
+ return err
+ }
+
+ ef := big.Zero()
+ pf := big.Zero()
+ count := 0
+
+ for _, deal := range deals {
+ if deal.Proposal.Provider == p {
+ e, p := deal.Proposal.GetDealFees(ht)
+ ef = big.Add(ef, e)
+ pf = big.Add(pf, p)
+ count++
+ }
+ }
+
+ fmt.Println("Total deals: ", count)
+ fmt.Println("Total earned fees: ", ef)
+ fmt.Println("Total pending fees: ", pf)
+ fmt.Println("Total fees: ", big.Add(ef, pf))
+
+ return nil
+ }
+
+ if dealid := cctx.Int("dealId"); dealid != 0 {
+ deal, err := api.StateMarketStorageDeal(ctx, abi.DealID(dealid), ts.Key())
+ if err != nil {
+ return err
+ }
+
+ ef, pf := deal.Proposal.GetDealFees(ht)
+
+ fmt.Println("Earned fees: ", ef)
+ fmt.Println("Pending fees: ", pf)
+ fmt.Println("Total fees: ", big.Add(ef, pf))
+
+ return nil
+ }
+
+ return xerrors.New("must provide either --provider or --dealId flag")
+ },
+}
diff --git a/cmd/lotus-shed/math.go b/cmd/lotus-shed/math.go
index 434559f09a0..c6d4ed0c952 100644
--- a/cmd/lotus-shed/math.go
+++ b/cmd/lotus-shed/math.go
@@ -8,8 +8,10 @@ import (
"strings"
"github.com/urfave/cli/v2"
+ "golang.org/x/xerrors"
"github.com/filecoin-project/lotus/chain/types"
+ miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
)
var mathCmd = &cli.Command{
@@ -17,6 +19,7 @@ var mathCmd = &cli.Command{
Usage: "utility commands around doing math on a list of numbers",
Subcommands: []*cli.Command{
mathSumCmd,
+ mathAggFeesCmd,
},
}
@@ -101,3 +104,30 @@ var mathSumCmd = &cli.Command{
return nil
},
}
+
+var mathAggFeesCmd = &cli.Command{
+ Name: "agg-fees",
+ Flags: []cli.Flag{
+ &cli.IntFlag{
+ Name: "size",
+ Required: true,
+ },
+ &cli.StringFlag{
+ Name: "base-fee",
+ Usage: "baseFee aFIL",
+ Required: true,
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ as := cctx.Int("size")
+
+ bf, err := types.BigFromString(cctx.String("base-fee"))
+ if err != nil {
+ return xerrors.Errorf("parsing basefee: %w", err)
+ }
+
+ fmt.Println(types.FIL(miner5.AggregateNetworkFee(as, bf)))
+
+ return nil
+ },
+}
diff --git a/cmd/lotus-shed/mempool-stats.go b/cmd/lotus-shed/mempool-stats.go
index bc4a801f0eb..597ba03936a 100644
--- a/cmd/lotus-shed/mempool-stats.go
+++ b/cmd/lotus-shed/mempool-stats.go
@@ -8,7 +8,7 @@ import (
"contrib.go.opencensus.io/exporter/prometheus"
"github.com/ipfs/go-cid"
- logging "github.com/ipfs/go-log"
+ logging "github.com/ipfs/go-log/v2"
"github.com/urfave/cli/v2"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
diff --git a/cmd/lotus-shed/miner-multisig.go b/cmd/lotus-shed/miner-multisig.go
new file mode 100644
index 00000000000..d9f15809021
--- /dev/null
+++ b/cmd/lotus-shed/miner-multisig.go
@@ -0,0 +1,388 @@
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
+
+ msig5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/multisig"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+ "github.com/filecoin-project/lotus/chain/types"
+ lcli "github.com/filecoin-project/lotus/cli"
+ "github.com/urfave/cli/v2"
+ "golang.org/x/xerrors"
+)
+
+var minerMultisigsCmd = &cli.Command{
+ Name: "miner-multisig",
+ Description: "a collection of utilities for using multisigs as owner addresses of miners",
+ Subcommands: []*cli.Command{
+ mmProposeWithdrawBalance,
+ mmApproveWithdrawBalance,
+ mmProposeChangeOwner,
+ mmApproveChangeOwner,
+ },
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "from",
+ Usage: "specify address to send message from",
+ Required: true,
+ },
+ &cli.StringFlag{
+ Name: "multisig",
+ Usage: "specify multisig that will receive the message",
+ Required: true,
+ },
+ &cli.StringFlag{
+ Name: "miner",
+ Usage: "specify miner being acted upon",
+ Required: true,
+ },
+ },
+}
+
+var mmProposeWithdrawBalance = &cli.Command{
+ Name: "propose-withdraw",
+ Usage: "Propose to withdraw FIL from the miner",
+ ArgsUsage: "[amount]",
+ Action: func(cctx *cli.Context) error {
+ if !cctx.Args().Present() {
+ return fmt.Errorf("must pass amount to withdraw")
+ }
+
+ api, closer, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ ctx := lcli.ReqContext(cctx)
+
+ multisigAddr, sender, minerAddr, err := getInputs(cctx)
+ if err != nil {
+ return err
+ }
+
+ val, err := types.ParseFIL(cctx.Args().First())
+ if err != nil {
+ return err
+ }
+
+ sp, err := actors.SerializeParams(&miner5.WithdrawBalanceParams{
+ AmountRequested: abi.TokenAmount(val),
+ })
+ if err != nil {
+ return err
+ }
+
+ pcid, err := api.MsigPropose(ctx, multisigAddr, minerAddr, big.Zero(), sender, uint64(miner.Methods.WithdrawBalance), sp)
+ if err != nil {
+ return xerrors.Errorf("proposing message: %w", err)
+ }
+
+ fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", pcid)
+
+ // wait for it to get mined into a block
+ wait, err := api.StateWaitMsg(ctx, pcid, build.MessageConfidence)
+ if err != nil {
+ return err
+ }
+
+ // check it executed successfully
+ if wait.Receipt.ExitCode != 0 {
+ fmt.Fprintln(cctx.App.Writer, "Propose owner change tx failed!")
+ return err
+ }
+
+ var retval msig5.ProposeReturn
+ if err := retval.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil {
+ return fmt.Errorf("failed to unmarshal propose return value: %w", err)
+ }
+
+ fmt.Printf("Transaction ID: %d\n", retval.TxnID)
+ if retval.Applied {
+ fmt.Printf("Transaction was executed during propose\n")
+ fmt.Printf("Exit Code: %d\n", retval.Code)
+ fmt.Printf("Return Value: %x\n", retval.Ret)
+ }
+
+ return nil
+ },
+}
+
+var mmApproveWithdrawBalance = &cli.Command{
+ Name: "approve-withdraw",
+ Usage: "Approve to withdraw FIL from the miner",
+ ArgsUsage: "[amount txnId proposer]",
+ Action: func(cctx *cli.Context) error {
+ if cctx.NArg() != 3 {
+ return fmt.Errorf("must pass amount, txn Id, and proposer address")
+ }
+
+ api, closer, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ ctx := lcli.ReqContext(cctx)
+
+ multisigAddr, sender, minerAddr, err := getInputs(cctx)
+ if err != nil {
+ return err
+ }
+
+ val, err := types.ParseFIL(cctx.Args().First())
+ if err != nil {
+ return err
+ }
+
+ sp, err := actors.SerializeParams(&miner5.WithdrawBalanceParams{
+ AmountRequested: abi.TokenAmount(val),
+ })
+ if err != nil {
+ return err
+ }
+
+ txid, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64)
+ if err != nil {
+ return err
+ }
+
+ proposer, err := address.NewFromString(cctx.Args().Get(2))
+ if err != nil {
+ return err
+ }
+
+ acid, err := api.MsigApproveTxnHash(ctx, multisigAddr, txid, proposer, minerAddr, big.Zero(), sender, uint64(miner.Methods.WithdrawBalance), sp)
+ if err != nil {
+ return xerrors.Errorf("approving message: %w", err)
+ }
+
+ fmt.Fprintln(cctx.App.Writer, "Approve Message CID:", acid)
+
+ // wait for it to get mined into a block
+ wait, err := api.StateWaitMsg(ctx, acid, build.MessageConfidence)
+ if err != nil {
+ return err
+ }
+
+ // check it executed successfully
+ if wait.Receipt.ExitCode != 0 {
+ fmt.Fprintln(cctx.App.Writer, "Approve owner change tx failed!")
+ return err
+ }
+
+ var retval msig5.ApproveReturn
+ if err := retval.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil {
+ return fmt.Errorf("failed to unmarshal approve return value: %w", err)
+ }
+
+ if retval.Applied {
+ fmt.Printf("Transaction was executed with the approve\n")
+ fmt.Printf("Exit Code: %d\n", retval.Code)
+ fmt.Printf("Return Value: %x\n", retval.Ret)
+ } else {
+ fmt.Println("Transaction was approved, but not executed")
+ }
+ return nil
+ },
+}
+
+var mmProposeChangeOwner = &cli.Command{
+ Name: "propose-change-owner",
+ Usage: "Propose an owner address change",
+ ArgsUsage: "[newOwner]",
+ Action: func(cctx *cli.Context) error {
+ if !cctx.Args().Present() {
+ return fmt.Errorf("must pass new owner address")
+ }
+
+ api, closer, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ ctx := lcli.ReqContext(cctx)
+
+ multisigAddr, sender, minerAddr, err := getInputs(cctx)
+ if err != nil {
+ return err
+ }
+
+ na, err := address.NewFromString(cctx.Args().First())
+ if err != nil {
+ return err
+ }
+
+ newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ mi, err := api.StateMinerInfo(ctx, minerAddr, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ if mi.Owner == newAddr {
+ return fmt.Errorf("owner address already set to %s", na)
+ }
+
+ sp, err := actors.SerializeParams(&newAddr)
+ if err != nil {
+ return xerrors.Errorf("serializing params: %w", err)
+ }
+
+ pcid, err := api.MsigPropose(ctx, multisigAddr, minerAddr, big.Zero(), sender, uint64(miner.Methods.ChangeOwnerAddress), sp)
+ if err != nil {
+ return xerrors.Errorf("proposing message: %w", err)
+ }
+
+ fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", pcid)
+
+ // wait for it to get mined into a block
+ wait, err := api.StateWaitMsg(ctx, pcid, build.MessageConfidence)
+ if err != nil {
+ return err
+ }
+
+ // check it executed successfully
+ if wait.Receipt.ExitCode != 0 {
+ fmt.Fprintln(cctx.App.Writer, "Propose owner change tx failed!")
+ return err
+ }
+
+ var retval msig5.ProposeReturn
+ if err := retval.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil {
+ return fmt.Errorf("failed to unmarshal propose return value: %w", err)
+ }
+
+ fmt.Printf("Transaction ID: %d\n", retval.TxnID)
+ if retval.Applied {
+ fmt.Printf("Transaction was executed during propose\n")
+ fmt.Printf("Exit Code: %d\n", retval.Code)
+ fmt.Printf("Return Value: %x\n", retval.Ret)
+ }
+ return nil
+ },
+}
+
+var mmApproveChangeOwner = &cli.Command{
+ Name: "approve-change-owner",
+ Usage: "Approve an owner address change",
+ ArgsUsage: "[newOwner txnId proposer]",
+ Action: func(cctx *cli.Context) error {
+ if cctx.NArg() != 3 {
+ return fmt.Errorf("must pass new owner address, txn Id, and proposer address")
+ }
+
+ api, closer, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ ctx := lcli.ReqContext(cctx)
+
+ multisigAddr, sender, minerAddr, err := getInputs(cctx)
+ if err != nil {
+ return err
+ }
+
+ na, err := address.NewFromString(cctx.Args().First())
+ if err != nil {
+ return err
+ }
+
+ newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ txid, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64)
+ if err != nil {
+ return err
+ }
+
+ proposer, err := address.NewFromString(cctx.Args().Get(2))
+ if err != nil {
+ return err
+ }
+
+ mi, err := api.StateMinerInfo(ctx, minerAddr, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ if mi.Owner == newAddr {
+ return fmt.Errorf("owner address already set to %s", na)
+ }
+
+ sp, err := actors.SerializeParams(&newAddr)
+ if err != nil {
+ return xerrors.Errorf("serializing params: %w", err)
+ }
+
+ acid, err := api.MsigApproveTxnHash(ctx, multisigAddr, txid, proposer, minerAddr, big.Zero(), sender, uint64(miner.Methods.ChangeOwnerAddress), sp)
+ if err != nil {
+ return xerrors.Errorf("approving message: %w", err)
+ }
+
+ fmt.Fprintln(cctx.App.Writer, "Approve Message CID:", acid)
+
+ // wait for it to get mined into a block
+ wait, err := api.StateWaitMsg(ctx, acid, build.MessageConfidence)
+ if err != nil {
+ return err
+ }
+
+ // check it executed successfully
+ if wait.Receipt.ExitCode != 0 {
+ fmt.Fprintln(cctx.App.Writer, "Approve owner change tx failed!")
+ return err
+ }
+
+ var retval msig5.ApproveReturn
+ if err := retval.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil {
+ return fmt.Errorf("failed to unmarshal approve return value: %w", err)
+ }
+
+ if retval.Applied {
+ fmt.Printf("Transaction was executed with the approve\n")
+ fmt.Printf("Exit Code: %d\n", retval.Code)
+ fmt.Printf("Return Value: %x\n", retval.Ret)
+ } else {
+ fmt.Println("Transaction was approved, but not executed")
+ }
+ return nil
+ },
+}
+
+func getInputs(cctx *cli.Context) (address.Address, address.Address, address.Address, error) {
+ multisigAddr, err := address.NewFromString(cctx.String("multisig"))
+ if err != nil {
+ return address.Undef, address.Undef, address.Undef, err
+ }
+
+ sender, err := address.NewFromString(cctx.String("from"))
+ if err != nil {
+ return address.Undef, address.Undef, address.Undef, err
+ }
+
+ minerAddr, err := address.NewFromString(cctx.String("miner"))
+ if err != nil {
+ return address.Undef, address.Undef, address.Undef, err
+ }
+
+ return multisigAddr, sender, minerAddr, nil
+}
diff --git a/cmd/lotus-shed/miner-types.go b/cmd/lotus-shed/miner-types.go
new file mode 100644
index 00000000000..19a30c4b99a
--- /dev/null
+++ b/cmd/lotus-shed/miner-types.go
@@ -0,0 +1,154 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "math/big"
+
+ big2 "github.com/filecoin-project/go-state-types/big"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/power"
+ "github.com/filecoin-project/lotus/chain/state"
+ "github.com/filecoin-project/lotus/chain/store"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/vm"
+ "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
+ "github.com/filecoin-project/lotus/node/repo"
+ builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
+ "github.com/filecoin-project/specs-actors/v4/actors/util/adt"
+ "github.com/ipfs/go-cid"
+ cbor "github.com/ipfs/go-ipld-cbor"
+ "github.com/urfave/cli/v2"
+ "golang.org/x/xerrors"
+)
+
+var minerTypesCmd = &cli.Command{
+ Name: "miner-types",
+ Usage: "Scrape state to report on how many miners of each WindowPoStProofType exist", Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "repo",
+ Value: "~/.lotus",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ ctx := context.TODO()
+
+ if !cctx.Args().Present() {
+ return fmt.Errorf("must pass state root")
+ }
+
+ sroot, err := cid.Decode(cctx.Args().First())
+ if err != nil {
+ return fmt.Errorf("failed to parse input: %w", err)
+ }
+
+ fsrepo, err := repo.NewFS(cctx.String("repo"))
+ if err != nil {
+ return err
+ }
+
+ lkrepo, err := fsrepo.Lock(repo.FullNode)
+ if err != nil {
+ return err
+ }
+
+ defer lkrepo.Close() //nolint:errcheck
+
+ bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore)
+ if err != nil {
+ return fmt.Errorf("failed to open blockstore: %w", err)
+ }
+
+ defer func() {
+ if c, ok := bs.(io.Closer); ok {
+ if err := c.Close(); err != nil {
+ log.Warnf("failed to close blockstore: %s", err)
+ }
+ }
+ }()
+
+ mds, err := lkrepo.Datastore(context.Background(), "/metadata")
+ if err != nil {
+ return err
+ }
+
+ cs := store.NewChainStore(bs, bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), nil)
+ defer cs.Close() //nolint:errcheck
+
+ cst := cbor.NewCborStore(bs)
+ store := adt.WrapStore(ctx, cst)
+
+ tree, err := state.LoadStateTree(cst, sroot)
+ if err != nil {
+ return err
+ }
+
+ typeMap := make(map[abi.RegisteredPoStProof]int64)
+ pa, err := tree.GetActor(power.Address)
+ if err != nil {
+ return err
+ }
+
+ ps, err := power.Load(store, pa)
+ if err != nil {
+ return err
+ }
+
+ dc := 0
+ dz := power.Claim{
+ RawBytePower: abi.NewStoragePower(0),
+ QualityAdjPower: abi.NewStoragePower(0),
+ }
+
+ err = tree.ForEach(func(addr address.Address, act *types.Actor) error {
+ if act.Code == builtin4.StorageMinerActorCodeID {
+ ms, err := miner.Load(store, act)
+ if err != nil {
+ return err
+ }
+
+ mi, err := ms.Info()
+ if err != nil {
+ return err
+ }
+
+ if mi.WindowPoStProofType == abi.RegisteredPoStProof_StackedDrgWindow64GiBV1 {
+ mp, f, err := ps.MinerPower(addr)
+ if err != nil {
+ return err
+ }
+
+ if f && mp.RawBytePower.Cmp(big.NewInt(10<<40)) >= 0 && mp.RawBytePower.Cmp(big.NewInt(20<<40)) < 0 {
+ dc = dc + 1
+ dz.RawBytePower = big2.Add(dz.RawBytePower, mp.RawBytePower)
+ dz.QualityAdjPower = big2.Add(dz.QualityAdjPower, mp.QualityAdjPower)
+ }
+ }
+
+ c, f := typeMap[mi.WindowPoStProofType]
+ if !f {
+ typeMap[mi.WindowPoStProofType] = 1
+ } else {
+ typeMap[mi.WindowPoStProofType] = c + 1
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return xerrors.Errorf("failed to loop over actors: %w", err)
+ }
+
+ for k, v := range typeMap {
+ fmt.Println("Type:", k, " Count: ", v)
+ }
+
+ fmt.Println("Mismatched power (raw, QA): ", dz.RawBytePower, " ", dz.QualityAdjPower)
+ fmt.Println("Mismatched 64 GiB miner count: ", dc)
+
+ return nil
+ },
+}
diff --git a/cmd/lotus-shed/miner.go b/cmd/lotus-shed/miner.go
new file mode 100644
index 00000000000..ec5a445f94d
--- /dev/null
+++ b/cmd/lotus-shed/miner.go
@@ -0,0 +1,113 @@
+package main
+
+import (
+ "bufio"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/mitchellh/go-homedir"
+ "github.com/urfave/cli/v2"
+ "golang.org/x/xerrors"
+)
+
+var minerCmd = &cli.Command{
+ Name: "miner",
+ Usage: "miner-related utilities",
+ Subcommands: []*cli.Command{
+ minerUnpackInfoCmd,
+ },
+}
+
+var minerUnpackInfoCmd = &cli.Command{
+ Name: "unpack-info",
+ Usage: "unpack miner info all dump",
+ ArgsUsage: "[allinfo.txt] [dir]",
+ Action: func(cctx *cli.Context) error {
+ if cctx.Args().Len() != 2 {
+ return xerrors.Errorf("expected 2 args")
+ }
+
+ src, err := homedir.Expand(cctx.Args().Get(0))
+ if err != nil {
+ return xerrors.Errorf("expand src: %w", err)
+ }
+
+ f, err := os.Open(src)
+ if err != nil {
+ return xerrors.Errorf("open file: %w", err)
+ }
+ defer f.Close() // nolint
+
+ dest, err := homedir.Expand(cctx.Args().Get(1))
+ if err != nil {
+ return xerrors.Errorf("expand dest: %w", err)
+ }
+
+ var outf *os.File
+
+ r := bufio.NewReader(f)
+ for {
+ l, _, err := r.ReadLine()
+ if err == io.EOF {
+ if outf != nil {
+ return outf.Close()
+ }
+ }
+ if err != nil {
+ return xerrors.Errorf("read line: %w", err)
+ }
+ sl := string(l)
+
+ if strings.HasPrefix(sl, "#") {
+ if strings.Contains(sl, "..") {
+ return xerrors.Errorf("bad name %s", sl)
+ }
+
+ if strings.HasPrefix(sl, "#: ") {
+ if outf != nil {
+ if err := outf.Close(); err != nil {
+ return xerrors.Errorf("close out file: %w", err)
+ }
+ }
+ p := filepath.Join(dest, sl[len("#: "):])
+ if err := os.MkdirAll(filepath.Dir(p), 0775); err != nil {
+ return xerrors.Errorf("mkdir: %w", err)
+ }
+ outf, err = os.Create(p)
+ if err != nil {
+ return xerrors.Errorf("create out file: %w", err)
+ }
+ continue
+ }
+
+ if strings.HasPrefix(sl, "##: ") {
+ if outf != nil {
+ if err := outf.Close(); err != nil {
+ return xerrors.Errorf("close out file: %w", err)
+ }
+ }
+ p := filepath.Join(dest, "Per Sector Infos", sl[len("##: "):])
+ if err := os.MkdirAll(filepath.Dir(p), 0775); err != nil {
+ return xerrors.Errorf("mkdir: %w", err)
+ }
+ outf, err = os.Create(p)
+ if err != nil {
+ return xerrors.Errorf("create out file: %w", err)
+ }
+ continue
+ }
+ }
+
+ if outf != nil {
+ if _, err := outf.Write(l); err != nil {
+ return xerrors.Errorf("write line: %w", err)
+ }
+ if _, err := outf.Write([]byte("\n")); err != nil {
+ return xerrors.Errorf("write line end: %w", err)
+ }
+ }
+ }
+ },
+}
diff --git a/cmd/lotus-shed/mpool.go b/cmd/lotus-shed/mpool.go
index d3660db6958..004bd99a6bd 100644
--- a/cmd/lotus-shed/mpool.go
+++ b/cmd/lotus-shed/mpool.go
@@ -15,6 +15,7 @@ var mpoolCmd = &cli.Command{
Flags: []cli.Flag{},
Subcommands: []*cli.Command{
minerSelectMsgsCmd,
+ mpoolClear,
},
}
@@ -66,3 +67,36 @@ var minerSelectMsgsCmd = &cli.Command{
return nil
},
}
+
+var mpoolClear = &cli.Command{
+ Name: "clear",
+ Usage: "Clear all pending messages from the mpool (USE WITH CARE)",
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "local",
+ Usage: "also clear local messages",
+ },
+ &cli.BoolFlag{
+ Name: "really-do-it",
+ Usage: "must be specified for the action to take effect",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ really := cctx.Bool("really-do-it")
+ if !really {
+ //nolint:golint
+ return fmt.Errorf("--really-do-it must be specified for this action to have an effect; you have been warned")
+ }
+
+ local := cctx.Bool("local")
+
+ ctx := lcli.ReqContext(cctx)
+ return api.MpoolClear(ctx, local)
+ },
+}
diff --git a/cmd/lotus-shed/params.go b/cmd/lotus-shed/params.go
index 3f7e7b6fb7e..e45d9489c35 100644
--- a/cmd/lotus-shed/params.go
+++ b/cmd/lotus-shed/params.go
@@ -25,7 +25,7 @@ var fetchParamCmd = &cli.Command{
return err
}
sectorSize := uint64(sectorSizeInt)
- err = paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), sectorSize)
+ err = paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), build.SrsJSON(), sectorSize)
if err != nil {
return xerrors.Errorf("fetching proof parameters: %w", err)
}
diff --git a/cmd/lotus-shed/postfind.go b/cmd/lotus-shed/postfind.go
new file mode 100644
index 00000000000..c8a4c990769
--- /dev/null
+++ b/cmd/lotus-shed/postfind.go
@@ -0,0 +1,123 @@
+package main
+
+import (
+ "fmt"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ lapi "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/chain/types"
+ lcli "github.com/filecoin-project/lotus/cli"
+ "github.com/filecoin-project/specs-actors/v2/actors/builtin"
+ "github.com/urfave/cli/v2"
+)
+
+var postFindCmd = &cli.Command{
+ Name: "post-find",
+ Description: "return addresses of all miners who have over zero power and have posted in the last day",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "tipset",
+ Usage: "specify tipset state to search on",
+ },
+ &cli.BoolFlag{
+ Name: "verbose",
+ Usage: "get more frequent print updates",
+ },
+ &cli.BoolFlag{
+ Name: "withpower",
+ Usage: "only print addrs of miners with more than zero power",
+ },
+ &cli.IntFlag{
+ Name: "lookback",
+ Usage: "number of past epochs to search for post",
+ Value: 2880, //default 1 day
+ },
+ },
+ Action: func(c *cli.Context) error {
+ api, acloser, err := lcli.GetFullNodeAPI(c)
+ if err != nil {
+ return err
+ }
+ defer acloser()
+ ctx := lcli.ReqContext(c)
+ verbose := c.Bool("verbose")
+ withpower := c.Bool("withpower")
+
+ startTs, err := lcli.LoadTipSet(ctx, c, api)
+ if err != nil {
+ return err
+ }
+ stopEpoch := startTs.Height() - abi.ChainEpoch(c.Int("lookback"))
+ if verbose {
+ fmt.Printf("Collecting messages between %d and %d\n", startTs.Height(), stopEpoch)
+ }
+ // Get all messages over the last day
+ ts := startTs
+ msgs := make([]*types.Message, 0)
+ for ts.Height() > stopEpoch {
+ // Get messages on ts parent
+ next, err := api.ChainGetParentMessages(ctx, ts.Cids()[0])
+ if err != nil {
+ return err
+ }
+ msgs = append(msgs, messagesFromAPIMessages(next)...)
+
+ // Next ts
+ ts, err = api.ChainGetTipSet(ctx, ts.Parents())
+ if err != nil {
+ return err
+ }
+ if verbose && int64(ts.Height())%100 == 0 {
+ fmt.Printf("Collected messages back to height %d\n", ts.Height())
+ }
+ }
+ fmt.Printf("Loaded messages to height %d\n", ts.Height())
+
+ mAddrs, err := api.StateListMiners(ctx, startTs.Key())
+ if err != nil {
+ return err
+ }
+
+ minersToCheck := make(map[address.Address]struct{})
+ for _, mAddr := range mAddrs {
+ // if they have no power ignore. This filters out 14k inactive miners
+ // so we can do 100x fewer expensive message queries
+ if withpower {
+ power, err := api.StateMinerPower(ctx, mAddr, startTs.Key())
+ if err != nil {
+ return err
+ }
+ if power.MinerPower.RawBytePower.GreaterThan(big.Zero()) {
+ minersToCheck[mAddr] = struct{}{}
+ }
+ } else {
+ minersToCheck[mAddr] = struct{}{}
+ }
+ }
+ fmt.Printf("Loaded %d miners to check\n", len(minersToCheck))
+
+ postedMiners := make(map[address.Address]struct{})
+ for _, msg := range msgs {
+ _, shouldCheck := minersToCheck[msg.To]
+ _, seenBefore := postedMiners[msg.To]
+
+ if shouldCheck && !seenBefore {
+ if msg.Method == builtin.MethodsMiner.SubmitWindowedPoSt {
+ fmt.Printf("%s\n", msg.To)
+ postedMiners[msg.To] = struct{}{}
+ }
+ }
+ }
+ return nil
+ },
+}
+
+func messagesFromAPIMessages(apiMessages []lapi.Message) []*types.Message {
+ messages := make([]*types.Message, len(apiMessages))
+ for i, apiMessage := range apiMessages {
+ messages[i] = apiMessage.Message
+ }
+ return messages
+}
diff --git a/cmd/lotus-shed/pruning.go b/cmd/lotus-shed/pruning.go
index 6cf4f8c6f91..1afe76c4d38 100644
--- a/cmd/lotus-shed/pruning.go
+++ b/cmd/lotus-shed/pruning.go
@@ -3,20 +3,19 @@ package main
import (
"context"
"fmt"
+ "io"
"github.com/filecoin-project/go-state-types/abi"
- "github.com/filecoin-project/lotus/chain/store"
- "github.com/filecoin-project/lotus/chain/vm"
- "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
- "github.com/filecoin-project/lotus/lib/blockstore"
- "github.com/filecoin-project/lotus/node/repo"
"github.com/ipfs/bbloom"
"github.com/ipfs/go-cid"
- "github.com/ipfs/go-datastore"
- "github.com/ipfs/go-datastore/query"
- dshelp "github.com/ipfs/go-ipfs-ds-help"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
+
+ badgerbs "github.com/filecoin-project/lotus/blockstore/badger"
+ "github.com/filecoin-project/lotus/chain/store"
+ "github.com/filecoin-project/lotus/chain/vm"
+ "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
+ "github.com/filecoin-project/lotus/node/repo"
)
type cidSet interface {
@@ -132,37 +131,47 @@ var stateTreePruneCmd = &cli.Command{
defer lkrepo.Close() //nolint:errcheck
- ds, err := lkrepo.Datastore("/chain")
+ bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore)
if err != nil {
- return err
+ return fmt.Errorf("failed to open blockstore: %w", err)
}
- defer ds.Close() //nolint:errcheck
+ defer func() {
+ if c, ok := bs.(io.Closer); ok {
+ if err := c.Close(); err != nil {
+ log.Warnf("failed to close blockstore: %s", err)
+ }
+ }
+ }()
+
+ // After migrating to native blockstores, this has been made
+ // database-specific.
+ badgbs, ok := bs.(*badgerbs.Blockstore)
+ if !ok {
+ return fmt.Errorf("only badger blockstores are supported")
+ }
- mds, err := lkrepo.Datastore("/metadata")
+ mds, err := lkrepo.Datastore(context.Background(), "/metadata")
if err != nil {
return err
}
defer mds.Close() //nolint:errcheck
+ const DiscardRatio = 0.2
if cctx.Bool("only-ds-gc") {
- gcds, ok := ds.(datastore.GCDatastore)
- if ok {
- fmt.Println("running datastore gc....")
- for i := 0; i < cctx.Int("gc-count"); i++ {
- if err := gcds.CollectGarbage(); err != nil {
- return xerrors.Errorf("datastore GC failed: %w", err)
- }
+ fmt.Println("running datastore gc....")
+ for i := 0; i < cctx.Int("gc-count"); i++ {
+ if err := badgbs.DB.RunValueLogGC(DiscardRatio); err != nil {
+ return xerrors.Errorf("datastore GC failed: %w", err)
}
- fmt.Println("gc complete!")
- return nil
}
- return fmt.Errorf("datastore doesnt support gc")
+ fmt.Println("gc complete!")
+ return nil
}
- bs := blockstore.NewBlockstore(ds)
+ cs := store.NewChainStore(bs, bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), nil)
+ defer cs.Close() //nolint:errcheck
- cs := store.NewChainStore(bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), nil)
if err := cs.Load(); err != nil {
return fmt.Errorf("loading chainstore: %w", err)
}
@@ -182,7 +191,7 @@ var stateTreePruneCmd = &cli.Command{
rrLb := abi.ChainEpoch(cctx.Int64("keep-from-lookback"))
- if err := cs.WalkSnapshot(ctx, ts, rrLb, true, func(c cid.Cid) error {
+ if err := cs.WalkSnapshot(ctx, ts, rrLb, true, true, func(c cid.Cid) error {
if goodSet.Len()%20 == 0 {
fmt.Printf("\renumerating keep set: %d ", goodSet.Len())
}
@@ -199,63 +208,30 @@ var stateTreePruneCmd = &cli.Command{
return nil
}
- var b datastore.Batch
- var batchCount int
- markForRemoval := func(c cid.Cid) error {
- if b == nil {
- nb, err := ds.Batch()
- if err != nil {
- return fmt.Errorf("opening batch: %w", err)
- }
+ b := badgbs.DB.NewWriteBatch()
+ defer b.Cancel()
- b = nb
- }
- batchCount++
-
- if err := b.Delete(dshelp.MultihashToDsKey(c.Hash())); err != nil {
- return err
- }
-
- if batchCount > 100 {
- if err := b.Commit(); err != nil {
- return xerrors.Errorf("failed to commit batch deletes: %w", err)
- }
- b = nil
- batchCount = 0
- }
- return nil
+ markForRemoval := func(c cid.Cid) error {
+ return b.Delete(badgbs.StorageKey(nil, c))
}
- res, err := ds.Query(query.Query{KeysOnly: true})
+ keys, err := bs.AllKeysChan(context.Background())
if err != nil {
- return xerrors.Errorf("failed to query datastore: %w", err)
+ return xerrors.Errorf("failed to query blockstore: %w", err)
}
dupTo := cctx.Int("delete-up-to")
var deleteCount int
var goodHits int
- for {
- v, ok := res.NextSync()
- if !ok {
- break
- }
-
- bk, err := dshelp.BinaryFromDsKey(datastore.RawKey(v.Key[len("/blocks"):]))
- if err != nil {
- return xerrors.Errorf("failed to parse key: %w", err)
- }
-
- if goodSet.HasRaw(bk) {
+ for k := range keys {
+ if goodSet.HasRaw(k.Bytes()) {
goodHits++
continue
}
- nc := cid.NewCidV1(cid.Raw, bk)
-
- deleteCount++
- if err := markForRemoval(nc); err != nil {
- return fmt.Errorf("failed to remove cid %s: %w", nc, err)
+ if err := markForRemoval(k); err != nil {
+ return fmt.Errorf("failed to remove cid %s: %w", k, err)
}
if deleteCount%20 == 0 {
@@ -267,22 +243,17 @@ var stateTreePruneCmd = &cli.Command{
}
}
- if b != nil {
- if err := b.Commit(); err != nil {
- return xerrors.Errorf("failed to commit final batch delete: %w", err)
- }
+ if err := b.Flush(); err != nil {
+ return xerrors.Errorf("failed to flush final batch delete: %w", err)
}
- gcds, ok := ds.(datastore.GCDatastore)
- if ok {
- fmt.Println("running datastore gc....")
- for i := 0; i < cctx.Int("gc-count"); i++ {
- if err := gcds.CollectGarbage(); err != nil {
- return xerrors.Errorf("datastore GC failed: %w", err)
- }
+ fmt.Println("running datastore gc....")
+ for i := 0; i < cctx.Int("gc-count"); i++ {
+ if err := badgbs.DB.RunValueLogGC(DiscardRatio); err != nil {
+ return xerrors.Errorf("datastore GC failed: %w", err)
}
- fmt.Println("gc complete!")
}
+ fmt.Println("gc complete!")
return nil
},
diff --git a/cmd/lotus-shed/rpc.go b/cmd/lotus-shed/rpc.go
new file mode 100644
index 00000000000..81171916e14
--- /dev/null
+++ b/cmd/lotus-shed/rpc.go
@@ -0,0 +1,172 @@
+package main
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+ "text/scanner"
+
+ "github.com/chzyer/readline"
+ "github.com/urfave/cli/v2"
+ "golang.org/x/xerrors"
+
+ lcli "github.com/filecoin-project/lotus/cli"
+ "github.com/filecoin-project/lotus/node/repo"
+)
+
+var rpcCmd = &cli.Command{
+ Name: "rpc",
+ Usage: "Interactive JsonPRC shell",
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "miner",
+ },
+ &cli.StringFlag{
+ Name: "version",
+ Value: "v0",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ rt := repo.FullNode
+ if cctx.Bool("miner") {
+ rt = repo.StorageMiner
+ }
+
+ addr, headers, err := lcli.GetRawAPI(cctx, rt, cctx.String("version"))
+ if err != nil {
+ return err
+ }
+
+ u, err := url.Parse(addr)
+ if err != nil {
+ return xerrors.Errorf("parsing api URL: %w", err)
+ }
+
+ switch u.Scheme {
+ case "ws":
+ u.Scheme = "http"
+ case "wss":
+ u.Scheme = "https"
+ }
+
+ addr = u.String()
+
+ ctx := lcli.ReqContext(cctx)
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+ afmt := lcli.NewAppFmt(cctx.App)
+
+ cs := readline.NewCancelableStdin(afmt.Stdin)
+ go func() {
+ <-ctx.Done()
+ cs.Close() // nolint:errcheck
+ }()
+
+ send := func(method, params string) error {
+ jreq, err := json.Marshal(struct {
+ Jsonrpc string `json:"jsonrpc"`
+ ID int `json:"id"`
+ Method string `json:"method"`
+ Params json.RawMessage `json:"params"`
+ }{
+ Jsonrpc: "2.0",
+ Method: "Filecoin." + method,
+ Params: json.RawMessage(params),
+ ID: 0,
+ })
+ if err != nil {
+ return err
+ }
+
+ req, err := http.NewRequest("POST", addr, bytes.NewReader(jreq))
+ if err != nil {
+ return err
+ }
+ req.Header = headers
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+
+ rb, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return err
+ }
+
+ fmt.Println(string(rb))
+
+ if err := resp.Body.Close(); err != nil {
+ return err
+ }
+
+ return nil
+ }
+
+ if cctx.Args().Present() {
+ if cctx.Args().Len() > 2 {
+ return xerrors.Errorf("expected 1 or 2 arguments: method [params]")
+ }
+
+ params := cctx.Args().Get(1)
+ if params == "" {
+ // TODO: try to be smart and use zero-values for method
+ params = "[]"
+ }
+
+ return send(cctx.Args().Get(0), params)
+ }
+
+ cctx.App.Metadata["repoType"] = repo.FullNode
+ if err := lcli.VersionCmd.Action(cctx); err != nil {
+ return err
+ }
+ fmt.Println("Usage: > Method [Param1, Param2, ...]")
+
+ rl, err := readline.NewEx(&readline.Config{
+ Stdin: cs,
+ HistoryFile: "/tmp/lotusrpc.tmp",
+ Prompt: "> ",
+ EOFPrompt: "exit",
+ HistorySearchFold: true,
+
+ // TODO: Some basic auto completion
+ })
+ if err != nil {
+ return err
+ }
+
+ for {
+ line, err := rl.Readline()
+ if err == readline.ErrInterrupt {
+ if len(line) == 0 {
+ break
+ } else {
+ continue
+ }
+ } else if err == io.EOF {
+ break
+ }
+
+ var s scanner.Scanner
+ s.Init(strings.NewReader(line))
+ s.Scan()
+ method := s.TokenText()
+
+ s.Scan()
+ params := line[s.Position.Offset:]
+
+ if err := send(method, params); err != nil {
+ _, _ = fmt.Fprintf(os.Stderr, "%v", err)
+ }
+ }
+
+ return nil
+ },
+}
diff --git a/cmd/lotus-shed/sectors.go b/cmd/lotus-shed/sectors.go
index 2e78469fa3e..cf40e1152d0 100644
--- a/cmd/lotus-shed/sectors.go
+++ b/cmd/lotus-shed/sectors.go
@@ -6,6 +6,7 @@ import (
"golang.org/x/xerrors"
+ "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
@@ -25,6 +26,7 @@ var sectorsCmd = &cli.Command{
Flags: []cli.Flag{},
Subcommands: []*cli.Command{
terminateSectorCmd,
+ terminateSectorPenaltyEstimationCmd,
},
}
@@ -33,6 +35,10 @@ var terminateSectorCmd = &cli.Command{
Usage: "Forcefully terminate a sector (WARNING: This means losing power and pay a one-time termination penalty(including collateral) for the terminated sector)",
ArgsUsage: "[sectorNum1 sectorNum2 ...]",
Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "actor",
+ Usage: "specify the address of miner actor",
+ },
&cli.BoolFlag{
Name: "really-do-it",
Usage: "pass this flag if you know what you are doing",
@@ -43,6 +49,15 @@ var terminateSectorCmd = &cli.Command{
return fmt.Errorf("at least one sector must be specified")
}
+ var maddr address.Address
+ if act := cctx.String("actor"); act != "" {
+ var err error
+ maddr, err = address.NewFromString(act)
+ if err != nil {
+ return fmt.Errorf("parsing address %s: %w", act, err)
+ }
+ }
+
if !cctx.Bool("really-do-it") {
return fmt.Errorf("this is a command for advanced users, only use it if you are sure of what you are doing")
}
@@ -53,17 +68,19 @@ var terminateSectorCmd = &cli.Command{
}
defer closer()
- api, acloser, err := lcli.GetStorageMinerAPI(cctx)
- if err != nil {
- return err
- }
- defer acloser()
-
ctx := lcli.ReqContext(cctx)
- maddr, err := api.ActorAddress(ctx)
- if err != nil {
- return err
+ if maddr.Empty() {
+ api, acloser, err := lcli.GetStorageMinerAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer acloser()
+
+ maddr, err = api.ActorAddress(ctx)
+ if err != nil {
+ return err
+ }
}
mi, err := nodeApi.StateMinerInfo(ctx, maddr, types.EmptyTSK)
@@ -131,3 +148,118 @@ var terminateSectorCmd = &cli.Command{
return nil
},
}
+
+func findPenaltyInInternalExecutions(prefix string, trace []types.ExecutionTrace) {
+ for _, im := range trace {
+ if im.Msg.To.String() == "f099" /*Burn actor*/ {
+ fmt.Printf("Estimated termination penalty: %s attoFIL\n", im.Msg.Value)
+ return
+ }
+ findPenaltyInInternalExecutions(prefix+"\t", im.Subcalls)
+ }
+}
+
+var terminateSectorPenaltyEstimationCmd = &cli.Command{
+ Name: "termination-estimate",
+ Usage: "Estimate the termination penalty",
+ ArgsUsage: "[sectorNum1 sectorNum2 ...]",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "actor",
+ Usage: "specify the address of miner actor",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ if cctx.Args().Len() < 1 {
+ return fmt.Errorf("at least one sector must be specified")
+ }
+
+ var maddr address.Address
+ if act := cctx.String("actor"); act != "" {
+ var err error
+ maddr, err = address.NewFromString(act)
+ if err != nil {
+ return fmt.Errorf("parsing address %s: %w", act, err)
+ }
+ }
+
+ nodeApi, closer, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ ctx := lcli.ReqContext(cctx)
+
+ if maddr.Empty() {
+ api, acloser, err := lcli.GetStorageMinerAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer acloser()
+
+ maddr, err = api.ActorAddress(ctx)
+ if err != nil {
+ return err
+ }
+ }
+
+ mi, err := nodeApi.StateMinerInfo(ctx, maddr, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ terminationDeclarationParams := []miner2.TerminationDeclaration{}
+
+ for _, sn := range cctx.Args().Slice() {
+ sectorNum, err := strconv.ParseUint(sn, 10, 64)
+ if err != nil {
+ return fmt.Errorf("could not parse sector number: %w", err)
+ }
+
+ sectorbit := bitfield.New()
+ sectorbit.Set(sectorNum)
+
+ loca, err := nodeApi.StateSectorPartition(ctx, maddr, abi.SectorNumber(sectorNum), types.EmptyTSK)
+ if err != nil {
+ return fmt.Errorf("get state sector partition %s", err)
+ }
+
+ para := miner2.TerminationDeclaration{
+ Deadline: loca.Deadline,
+ Partition: loca.Partition,
+ Sectors: sectorbit,
+ }
+
+ terminationDeclarationParams = append(terminationDeclarationParams, para)
+ }
+
+ terminateSectorParams := &miner2.TerminateSectorsParams{
+ Terminations: terminationDeclarationParams,
+ }
+
+ sp, err := actors.SerializeParams(terminateSectorParams)
+ if err != nil {
+ return xerrors.Errorf("serializing params: %w", err)
+ }
+
+ msg := &types.Message{
+ From: mi.Owner,
+ To: maddr,
+ Method: miner.Methods.TerminateSectors,
+
+ Value: big.Zero(),
+ Params: sp,
+ }
+
+ //TODO: 4667 add an option to give a more precise estimation with pending termination penalty excluded
+
+ invocResult, err := nodeApi.StateCall(ctx, msg, types.EmptyTSK)
+ if err != nil {
+ return xerrors.Errorf("fail to state call: %w", err)
+ }
+
+ findPenaltyInInternalExecutions("\t", invocResult.ExecutionTrace.Subcalls)
+ return nil
+ },
+}
diff --git a/cmd/lotus-shed/signatures.go b/cmd/lotus-shed/signatures.go
new file mode 100644
index 00000000000..d287e0c3f18
--- /dev/null
+++ b/cmd/lotus-shed/signatures.go
@@ -0,0 +1,148 @@
+package main
+
+import (
+ "encoding/hex"
+ "fmt"
+ "strconv"
+
+ ffi "github.com/filecoin-project/filecoin-ffi"
+ lcli "github.com/filecoin-project/lotus/cli"
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/go-state-types/crypto"
+ "github.com/filecoin-project/lotus/lib/sigs"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/urfave/cli/v2"
+ "golang.org/x/xerrors"
+)
+
+var signaturesCmd = &cli.Command{
+ Name: "signatures",
+ Usage: "tools involving signatures",
+ Subcommands: []*cli.Command{
+ sigsVerifyVoteCmd,
+ sigsVerifyBlsMsgsCmd,
+ },
+}
+
+var sigsVerifyBlsMsgsCmd = &cli.Command{
+ Name: "verify-bls",
+ Description: "given a block, verifies the bls signature of the messages in the block",
+ Usage: "",
+ Action: func(cctx *cli.Context) error {
+ if cctx.Args().Len() != 1 {
+ return xerrors.Errorf("usage: ")
+ }
+
+ api, closer, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+
+ defer closer()
+ ctx := lcli.ReqContext(cctx)
+
+ bc, err := cid.Decode(cctx.Args().First())
+ if err != nil {
+ return err
+ }
+
+ b, err := api.ChainGetBlock(ctx, bc)
+ if err != nil {
+ return err
+ }
+
+ ms, err := api.ChainGetBlockMessages(ctx, bc)
+ if err != nil {
+ return err
+ }
+
+ var sigCids []cid.Cid // this is what we get for people not wanting the marshalcbor method on the cid type
+ var pubks [][]byte
+
+ for _, m := range ms.BlsMessages {
+ sigCids = append(sigCids, m.Cid())
+
+ if m.From.Protocol() != address.BLS {
+ return xerrors.Errorf("address must be BLS address")
+ }
+
+ pubks = append(pubks, m.From.Payload())
+ }
+
+ msgsS := make([]ffi.Message, len(sigCids))
+ pubksS := make([]ffi.PublicKey, len(sigCids))
+ for i := 0; i < len(sigCids); i++ {
+ msgsS[i] = sigCids[i].Bytes()
+ copy(pubksS[i][:], pubks[i][:ffi.PublicKeyBytes])
+ }
+
+ sigS := new(ffi.Signature)
+ copy(sigS[:], b.BLSAggregate.Data[:ffi.SignatureBytes])
+
+ if len(sigCids) == 0 {
+ return nil
+ }
+
+ valid := ffi.HashVerify(sigS, msgsS, pubksS)
+ if !valid {
+ return xerrors.New("bls aggregate signature failed to verify")
+ }
+
+ fmt.Println("BLS siggys valid!")
+ return nil
+ },
+}
+
+var sigsVerifyVoteCmd = &cli.Command{
+ Name: "verify-vote",
+ Description: "can be used to verify signed votes being submitted for FILPolls",
+ Usage: " ",
+ Action: func(cctx *cli.Context) error {
+
+ if cctx.Args().Len() != 3 {
+ return xerrors.Errorf("usage: verify-vote ")
+ }
+
+ fip, err := strconv.ParseInt(cctx.Args().First(), 10, 64)
+ if err != nil {
+ return xerrors.Errorf("couldn't parse FIP number: %w", err)
+ }
+
+ addr, err := address.NewFromString(cctx.Args().Get(1))
+ if err != nil {
+ return xerrors.Errorf("couldn't parse signing address: %w", err)
+ }
+
+ sigBytes, err := hex.DecodeString(cctx.Args().Get(2))
+ if err != nil {
+ return xerrors.Errorf("couldn't parse sig: %w", err)
+ }
+
+ var sig crypto.Signature
+ if err := sig.UnmarshalBinary(sigBytes); err != nil {
+ return xerrors.Errorf("couldn't unmarshal sig: %w", err)
+ }
+
+ switch fip {
+ case 14:
+ approve := []byte("7 - Approve")
+
+ if sigs.Verify(&sig, addr, approve) == nil {
+ fmt.Println("valid vote for approving FIP-0014")
+ return nil
+ }
+
+ reject := []byte("7 - Reject")
+ if sigs.Verify(&sig, addr, reject) == nil {
+ fmt.Println("valid vote for rejecting FIP-0014")
+ return nil
+ }
+
+ return xerrors.Errorf("invalid vote for FIP-0014!")
+ default:
+ return xerrors.Errorf("unrecognized FIP number")
+ }
+ },
+}
diff --git a/cmd/lotus-shed/stateroot-stats.go b/cmd/lotus-shed/stateroot-stats.go
index 023f782bdd1..6d5d577089f 100644
--- a/cmd/lotus-shed/stateroot-stats.go
+++ b/cmd/lotus-shed/stateroot-stats.go
@@ -56,13 +56,6 @@ var staterootDiffsCmd = &cli.Command{
return err
}
- if ts == nil {
- ts, err = api.ChainHead(ctx)
- if err != nil {
- return err
- }
- }
-
fn := func(ts *types.TipSet) (cid.Cid, []cid.Cid) {
blk := ts.Blocks()[0]
strt := blk.ParentStateRoot
@@ -134,13 +127,6 @@ var staterootStatCmd = &cli.Command{
return err
}
- if ts == nil {
- ts, err = api.ChainHead(ctx)
- if err != nil {
- return err
- }
- }
-
var addrs []address.Address
for _, inp := range cctx.Args().Slice() {
diff --git a/cmd/lotus-shed/storage-stats.go b/cmd/lotus-shed/storage-stats.go
new file mode 100644
index 00000000000..a9a5744a6bd
--- /dev/null
+++ b/cmd/lotus-shed/storage-stats.go
@@ -0,0 +1,131 @@
+package main
+
+import (
+ "encoding/json"
+ corebig "math/big"
+ "os"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ filbig "github.com/filecoin-project/go-state-types/big"
+ lcli "github.com/filecoin-project/lotus/cli"
+ "github.com/ipfs/go-cid"
+ "github.com/urfave/cli/v2"
+)
+
+// How many epochs back to look at for dealstats
+var defaultEpochLookback = abi.ChainEpoch(10)
+
+type networkTotalsOutput struct {
+ Epoch int64 `json:"epoch"`
+ Endpoint string `json:"endpoint"`
+ Payload networkTotals `json:"payload"`
+}
+
+type networkTotals struct {
+ QaNetworkPower filbig.Int `json:"total_qa_power"`
+ RawNetworkPower filbig.Int `json:"total_raw_capacity"`
+ CapacityCarryingData float64 `json:"capacity_fraction_carrying_data"`
+ UniqueCids int `json:"total_unique_cids"`
+ UniqueProviders int `json:"total_unique_providers"`
+ UniqueClients int `json:"total_unique_clients"`
+ TotalDeals int `json:"total_num_deals"`
+ TotalBytes int64 `json:"total_stored_data_size"`
+ FilplusTotalDeals int `json:"filplus_total_num_deals"`
+ FilplusTotalBytes int64 `json:"filplus_total_stored_data_size"`
+
+ seenClient map[address.Address]bool
+ seenProvider map[address.Address]bool
+ seenPieceCid map[cid.Cid]bool
+}
+
+var storageStatsCmd = &cli.Command{
+ Name: "storage-stats",
+ Usage: "Translates current lotus state into a json summary suitable for driving https://storage.filecoin.io/",
+ Flags: []cli.Flag{
+ &cli.Int64Flag{
+ Name: "height",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ ctx := lcli.ReqContext(cctx)
+
+ api, apiCloser, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer apiCloser()
+
+ head, err := api.ChainHead(ctx)
+ if err != nil {
+ return err
+ }
+
+ requestedHeight := cctx.Int64("height")
+ if requestedHeight > 0 {
+ head, err = api.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(requestedHeight), head.Key())
+ } else {
+ head, err = api.ChainGetTipSetByHeight(ctx, head.Height()-defaultEpochLookback, head.Key())
+ }
+ if err != nil {
+ return err
+ }
+
+ power, err := api.StateMinerPower(ctx, address.Address{}, head.Key())
+ if err != nil {
+ return err
+ }
+
+ netTotals := networkTotals{
+ QaNetworkPower: power.TotalPower.QualityAdjPower,
+ RawNetworkPower: power.TotalPower.RawBytePower,
+ seenClient: make(map[address.Address]bool),
+ seenProvider: make(map[address.Address]bool),
+ seenPieceCid: make(map[cid.Cid]bool),
+ }
+
+ deals, err := api.StateMarketDeals(ctx, head.Key())
+ if err != nil {
+ return err
+ }
+
+ for _, dealInfo := range deals {
+
+ // Only count deals that have properly started, not past/future ones
+ // https://github.com/filecoin-project/specs-actors/blob/v0.9.9/actors/builtin/market/deal.go#L81-L85
+ // Bail on 0 as well in case SectorStartEpoch is uninitialized due to some bug
+ if dealInfo.State.SectorStartEpoch <= 0 ||
+ dealInfo.State.SectorStartEpoch > head.Height() {
+ continue
+ }
+
+ netTotals.seenClient[dealInfo.Proposal.Client] = true
+ netTotals.TotalBytes += int64(dealInfo.Proposal.PieceSize)
+ netTotals.seenProvider[dealInfo.Proposal.Provider] = true
+ netTotals.seenPieceCid[dealInfo.Proposal.PieceCID] = true
+ netTotals.TotalDeals++
+
+ if dealInfo.Proposal.VerifiedDeal {
+ netTotals.FilplusTotalDeals++
+ netTotals.FilplusTotalBytes += int64(dealInfo.Proposal.PieceSize)
+ }
+ }
+
+ netTotals.UniqueCids = len(netTotals.seenPieceCid)
+ netTotals.UniqueClients = len(netTotals.seenClient)
+ netTotals.UniqueProviders = len(netTotals.seenProvider)
+
+ netTotals.CapacityCarryingData, _ = new(corebig.Rat).SetFrac(
+ corebig.NewInt(netTotals.TotalBytes),
+ netTotals.RawNetworkPower.Int,
+ ).Float64()
+
+ return json.NewEncoder(os.Stdout).Encode(
+ networkTotalsOutput{
+ Epoch: int64(head.Height()),
+ Endpoint: "NETWORK_WIDE_TOTALS",
+ Payload: netTotals,
+ },
+ )
+ },
+}
diff --git a/cmd/lotus-shed/sync.go b/cmd/lotus-shed/sync.go
index 65d2b6d6f50..cab3bd29ead 100644
--- a/cmd/lotus-shed/sync.go
+++ b/cmd/lotus-shed/sync.go
@@ -172,12 +172,13 @@ var syncScrapePowerCmd = &cli.Command{
return err
}
- qpercI := types.BigDiv(types.BigMul(totalWonPower.QualityAdjPower, types.NewInt(1000000)), totalPower.TotalPower.QualityAdjPower)
-
fmt.Println("Number of winning miners: ", len(miners))
fmt.Println("QAdjPower of winning miners: ", totalWonPower.QualityAdjPower)
fmt.Println("QAdjPower of all miners: ", totalPower.TotalPower.QualityAdjPower)
- fmt.Println("Percentage of winning QAdjPower: ", float64(qpercI.Int64())/10000)
+ fmt.Println("Percentage of winning QAdjPower: ", types.BigDivFloat(
+ types.BigMul(totalWonPower.QualityAdjPower, big.NewInt(100)),
+ totalPower.TotalPower.QualityAdjPower,
+ ))
return nil
},
diff --git a/cmd/lotus-shed/verifreg.go b/cmd/lotus-shed/verifreg.go
index df1f0d99012..7640e636a77 100644
--- a/cmd/lotus-shed/verifreg.go
+++ b/cmd/lotus-shed/verifreg.go
@@ -13,7 +13,7 @@ import (
verifreg2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/verifreg"
- "github.com/filecoin-project/lotus/api/apibstore"
+ "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt"
@@ -67,11 +67,13 @@ var verifRegAddVerifierCmd = &cli.Command{
return err
}
- api, closer, err := lcli.GetFullNodeAPI(cctx)
+ srv, err := lcli.GetFullNodeServices(cctx)
if err != nil {
return err
}
- defer closer()
+ defer srv.Close() //nolint:errcheck
+
+ api := srv.FullNodeAPI()
ctx := lcli.ReqContext(cctx)
vrk, err := api.StateVerifiedRegistryRootKey(ctx, types.EmptyTSK)
@@ -79,14 +81,21 @@ var verifRegAddVerifierCmd = &cli.Command{
return err
}
- smsg, err := api.MsigPropose(ctx, vrk, verifreg.Address, big.Zero(), sender, uint64(verifreg.Methods.AddVerifier), params)
+ proto, err := api.MsigPropose(ctx, vrk, verifreg.Address, big.Zero(), sender, uint64(verifreg.Methods.AddVerifier), params)
+ if err != nil {
+ return err
+ }
+
+ sm, _, err := srv.PublishMessage(ctx, proto, false)
if err != nil {
return err
}
- fmt.Printf("message sent, now waiting on cid: %s\n", smsg)
+ msgCid := sm.Cid()
+
+ fmt.Printf("message sent, now waiting on cid: %s\n", msgCid)
- mwait, err := api.StateWaitMsg(ctx, smsg, build.MessageConfidence)
+ mwait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true)
if err != nil {
return err
}
@@ -102,8 +111,9 @@ var verifRegAddVerifierCmd = &cli.Command{
}
var verifRegVerifyClientCmd = &cli.Command{
- Name: "verify-client",
- Usage: "make a given account a verified client",
+ Name: "verify-client",
+ Usage: "make a given account a verified client",
+ Hidden: true,
Flags: []cli.Flag{
&cli.StringFlag{
Name: "from",
@@ -111,6 +121,7 @@ var verifRegVerifyClientCmd = &cli.Command{
},
},
Action: func(cctx *cli.Context) error {
+ fmt.Println("DEPRECATED: This behavior is being moved to `lotus verifreg`")
froms := cctx.String("from")
if froms == "" {
return fmt.Errorf("must specify from address with --from")
@@ -175,9 +186,11 @@ var verifRegVerifyClientCmd = &cli.Command{
}
var verifRegListVerifiersCmd = &cli.Command{
- Name: "list-verifiers",
- Usage: "list all verifiers",
+ Name: "list-verifiers",
+ Usage: "list all verifiers",
+ Hidden: true,
Action: func(cctx *cli.Context) error {
+ fmt.Println("DEPRECATED: This behavior is being moved to `lotus verifreg`")
api, closer, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
@@ -190,7 +203,7 @@ var verifRegListVerifiersCmd = &cli.Command{
return err
}
- apibs := apibstore.NewAPIBlockstore(api)
+ apibs := blockstore.NewAPIBlockstore(api)
store := adt.WrapStore(ctx, cbor.NewCborStore(apibs))
st, err := verifreg.Load(store, act)
@@ -205,9 +218,11 @@ var verifRegListVerifiersCmd = &cli.Command{
}
var verifRegListClientsCmd = &cli.Command{
- Name: "list-clients",
- Usage: "list all verified clients",
+ Name: "list-clients",
+ Usage: "list all verified clients",
+ Hidden: true,
Action: func(cctx *cli.Context) error {
+ fmt.Println("DEPRECATED: This behavior is being moved to `lotus verifreg`")
api, closer, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
@@ -220,7 +235,7 @@ var verifRegListClientsCmd = &cli.Command{
return err
}
- apibs := apibstore.NewAPIBlockstore(api)
+ apibs := blockstore.NewAPIBlockstore(api)
store := adt.WrapStore(ctx, cbor.NewCborStore(apibs))
st, err := verifreg.Load(store, act)
@@ -235,9 +250,11 @@ var verifRegListClientsCmd = &cli.Command{
}
var verifRegCheckClientCmd = &cli.Command{
- Name: "check-client",
- Usage: "check verified client remaining bytes",
+ Name: "check-client",
+ Usage: "check verified client remaining bytes",
+ Hidden: true,
Action: func(cctx *cli.Context) error {
+ fmt.Println("DEPRECATED: This behavior is being moved to `lotus verifreg`")
if !cctx.Args().Present() {
return fmt.Errorf("must specify client address to check")
}
@@ -269,9 +286,11 @@ var verifRegCheckClientCmd = &cli.Command{
}
var verifRegCheckVerifierCmd = &cli.Command{
- Name: "check-verifier",
- Usage: "check verifiers remaining bytes",
+ Name: "check-verifier",
+ Usage: "check verifiers remaining bytes",
+ Hidden: true,
Action: func(cctx *cli.Context) error {
+ fmt.Println("DEPRECATED: This behavior is being moved to `lotus verifreg`")
if !cctx.Args().Present() {
return fmt.Errorf("must specify verifier address to check")
}
@@ -303,7 +322,7 @@ var verifRegCheckVerifierCmd = &cli.Command{
return err
}
- apibs := apibstore.NewAPIBlockstore(api)
+ apibs := blockstore.NewAPIBlockstore(api)
store := adt.WrapStore(ctx, cbor.NewCborStore(apibs))
st, err := verifreg.Load(store, act)
diff --git a/cmd/lotus-sim/copy.go b/cmd/lotus-sim/copy.go
new file mode 100644
index 00000000000..5faba69f21d
--- /dev/null
+++ b/cmd/lotus-sim/copy.go
@@ -0,0 +1,28 @@
+package main
+
+import (
+ "fmt"
+
+ "github.com/urfave/cli/v2"
+)
+
+var copySimCommand = &cli.Command{
+ Name: "copy",
+ ArgsUsage: "",
+ Action: func(cctx *cli.Context) (err error) {
+ node, err := open(cctx)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if cerr := node.Close(); err == nil {
+ err = cerr
+ }
+ }()
+ if cctx.NArg() != 1 {
+ return fmt.Errorf("expected 1 argument")
+ }
+ name := cctx.Args().First()
+ return node.CopySim(cctx.Context, cctx.String("simulation"), name)
+ },
+}
diff --git a/cmd/lotus-sim/create.go b/cmd/lotus-sim/create.go
new file mode 100644
index 00000000000..4867a5da5ec
--- /dev/null
+++ b/cmd/lotus-sim/create.go
@@ -0,0 +1,49 @@
+package main
+
+import (
+ "fmt"
+
+ "github.com/urfave/cli/v2"
+
+ "github.com/filecoin-project/lotus/chain/types"
+ lcli "github.com/filecoin-project/lotus/cli"
+)
+
+var createSimCommand = &cli.Command{
+ Name: "create",
+ ArgsUsage: "[tipset]",
+ Action: func(cctx *cli.Context) (err error) {
+ node, err := open(cctx)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if cerr := node.Close(); err == nil {
+ err = cerr
+ }
+ }()
+
+ var ts *types.TipSet
+ switch cctx.NArg() {
+ case 0:
+ if err := node.Chainstore.Load(); err != nil {
+ return err
+ }
+ ts = node.Chainstore.GetHeaviestTipSet()
+ case 1:
+ cids, err := lcli.ParseTipSetString(cctx.Args().Get(1))
+ if err != nil {
+ return err
+ }
+ tsk := types.NewTipSetKey(cids...)
+ ts, err = node.Chainstore.LoadTipSet(tsk)
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("expected 0 or 1 arguments")
+ }
+ _, err = node.CreateSim(cctx.Context, cctx.String("simulation"), ts)
+ return err
+ },
+}
diff --git a/cmd/lotus-sim/delete.go b/cmd/lotus-sim/delete.go
new file mode 100644
index 00000000000..c19b3d27d04
--- /dev/null
+++ b/cmd/lotus-sim/delete.go
@@ -0,0 +1,22 @@
+package main
+
+import (
+ "github.com/urfave/cli/v2"
+)
+
+var deleteSimCommand = &cli.Command{
+ Name: "delete",
+ Action: func(cctx *cli.Context) (err error) {
+ node, err := open(cctx)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if cerr := node.Close(); err == nil {
+ err = cerr
+ }
+ }()
+
+ return node.DeleteSim(cctx.Context, cctx.String("simulation"))
+ },
+}
diff --git a/cmd/lotus-sim/info.go b/cmd/lotus-sim/info.go
new file mode 100644
index 00000000000..864adb3bc9b
--- /dev/null
+++ b/cmd/lotus-sim/info.go
@@ -0,0 +1,110 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "text/tabwriter"
+ "time"
+
+ "github.com/urfave/cli/v2"
+
+ "github.com/filecoin-project/go-state-types/big"
+
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/power"
+ "github.com/filecoin-project/lotus/chain/stmgr"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation"
+)
+
+func getTotalPower(ctx context.Context, sm *stmgr.StateManager, ts *types.TipSet) (power.Claim, error) {
+ actor, err := sm.LoadActor(ctx, power.Address, ts)
+ if err != nil {
+ return power.Claim{}, err
+ }
+ state, err := power.Load(sm.ChainStore().ActorStore(ctx), actor)
+ if err != nil {
+ return power.Claim{}, err
+ }
+ return state.TotalPower()
+}
+
+func printInfo(ctx context.Context, sim *simulation.Simulation, out io.Writer) error {
+ head := sim.GetHead()
+ start := sim.GetStart()
+
+ powerNow, err := getTotalPower(ctx, sim.StateManager, head)
+ if err != nil {
+ return err
+ }
+ powerLookbackEpoch := head.Height() - builtin.EpochsInDay*2
+ if powerLookbackEpoch < start.Height() {
+ powerLookbackEpoch = start.Height()
+ }
+ lookbackTs, err := sim.Node.Chainstore.GetTipsetByHeight(ctx, powerLookbackEpoch, head, false)
+ if err != nil {
+ return err
+ }
+ powerLookback, err := getTotalPower(ctx, sim.StateManager, lookbackTs)
+ if err != nil {
+ return err
+ }
+ // growth rate in size/day
+ growthRate := big.Div(
+ big.Mul(big.Sub(powerNow.RawBytePower, powerLookback.RawBytePower),
+ big.NewInt(builtin.EpochsInDay)),
+ big.NewInt(int64(head.Height()-lookbackTs.Height())),
+ )
+
+ tw := tabwriter.NewWriter(out, 8, 8, 1, ' ', 0)
+
+ headEpoch := head.Height()
+ firstEpoch := start.Height() + 1
+
+ headTime := time.Unix(int64(head.MinTimestamp()), 0)
+ startTime := time.Unix(int64(start.MinTimestamp()), 0)
+ duration := headTime.Sub(startTime)
+
+ fmt.Fprintf(tw, "Name:\t%s\n", sim.Name())
+ fmt.Fprintf(tw, "Head:\t%s\n", head)
+ fmt.Fprintf(tw, "Start Epoch:\t%d\n", firstEpoch)
+ fmt.Fprintf(tw, "End Epoch:\t%d\n", headEpoch)
+ fmt.Fprintf(tw, "Length:\t%d\n", headEpoch-firstEpoch)
+ fmt.Fprintf(tw, "Start Date:\t%s\n", startTime)
+ fmt.Fprintf(tw, "End Date:\t%s\n", headTime)
+ fmt.Fprintf(tw, "Duration:\t%.2f day(s)\n", duration.Hours()/24)
+ fmt.Fprintf(tw, "Capacity:\t%s\n", types.SizeStr(powerNow.RawBytePower))
+ fmt.Fprintf(tw, "Daily Capacity Growth:\t%s/day\n", types.SizeStr(growthRate))
+ fmt.Fprintf(tw, "Network Version:\t%d\n", sim.GetNetworkVersion())
+ return tw.Flush()
+}
+
+var infoSimCommand = &cli.Command{
+ Name: "info",
+ Description: "Output information about the simulation.",
+ Subcommands: []*cli.Command{
+ infoCommitGasSimCommand,
+ infoMessageSizeSimCommand,
+ infoWindowPostBandwidthSimCommand,
+ infoCapacityGrowthSimCommand,
+ infoStateGrowthSimCommand,
+ },
+ Action: func(cctx *cli.Context) (err error) {
+ node, err := open(cctx)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if cerr := node.Close(); err == nil {
+ err = cerr
+ }
+ }()
+
+ sim, err := node.LoadSim(cctx.Context, cctx.String("simulation"))
+ if err != nil {
+ return err
+ }
+ return printInfo(cctx.Context, sim, cctx.App.Writer)
+ },
+}
diff --git a/cmd/lotus-sim/info_capacity.go b/cmd/lotus-sim/info_capacity.go
new file mode 100644
index 00000000000..4372ee34afb
--- /dev/null
+++ b/cmd/lotus-sim/info_capacity.go
@@ -0,0 +1,67 @@
+package main
+
+import (
+ "fmt"
+
+ "github.com/urfave/cli/v2"
+
+ "github.com/filecoin-project/go-state-types/big"
+
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+var infoCapacityGrowthSimCommand = &cli.Command{
+ Name: "capacity-growth",
+ Description: "List daily capacity growth over the course of the simulation starting at the end.",
+ Action: func(cctx *cli.Context) (err error) {
+ node, err := open(cctx)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if cerr := node.Close(); err == nil {
+ err = cerr
+ }
+ }()
+
+ sim, err := node.LoadSim(cctx.Context, cctx.String("simulation"))
+ if err != nil {
+ return err
+ }
+
+ firstEpoch := sim.GetStart().Height()
+ ts := sim.GetHead()
+ lastPower, err := getTotalPower(cctx.Context, sim.StateManager, ts)
+ if err != nil {
+ return err
+ }
+ lastHeight := ts.Height()
+
+ for ts.Height() > firstEpoch && cctx.Err() == nil {
+ ts, err = sim.Node.Chainstore.LoadTipSet(ts.Parents())
+ if err != nil {
+ return err
+ }
+ newEpoch := ts.Height()
+ if newEpoch != firstEpoch && newEpoch+builtin.EpochsInDay > lastHeight {
+ continue
+ }
+
+ newPower, err := getTotalPower(cctx.Context, sim.StateManager, ts)
+ if err != nil {
+ return err
+ }
+
+ growthRate := big.Div(
+ big.Mul(big.Sub(lastPower.RawBytePower, newPower.RawBytePower),
+ big.NewInt(builtin.EpochsInDay)),
+ big.NewInt(int64(lastHeight-newEpoch)),
+ )
+ lastPower = newPower
+ lastHeight = newEpoch
+ fmt.Fprintf(cctx.App.Writer, "%s/day\n", types.SizeStr(growthRate))
+ }
+ return cctx.Err()
+ },
+}
diff --git a/cmd/lotus-sim/info_commit.go b/cmd/lotus-sim/info_commit.go
new file mode 100644
index 00000000000..738fcde95e5
--- /dev/null
+++ b/cmd/lotus-sim/info_commit.go
@@ -0,0 +1,148 @@
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "syscall"
+
+ "github.com/streadway/quantile"
+ "github.com/urfave/cli/v2"
+
+ "github.com/filecoin-project/go-state-types/exitcode"
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+ "github.com/filecoin-project/lotus/chain/stmgr"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation"
+ "github.com/filecoin-project/lotus/lib/stati"
+)
+
+var infoCommitGasSimCommand = &cli.Command{
+ Name: "commit-gas",
+ Description: "Output information about the gas for commits",
+ Flags: []cli.Flag{
+ &cli.Int64Flag{
+ Name: "lookback",
+ Value: 0,
+ },
+ },
+ Action: func(cctx *cli.Context) (err error) {
+ log := func(f string, i ...interface{}) {
+ fmt.Fprintf(os.Stderr, f, i...)
+ }
+ node, err := open(cctx)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if cerr := node.Close(); err == nil {
+ err = cerr
+ }
+ }()
+
+ go profileOnSignal(cctx, syscall.SIGUSR2)
+
+ sim, err := node.LoadSim(cctx.Context, cctx.String("simulation"))
+ if err != nil {
+ return err
+ }
+
+ var gasAgg, proofsAgg uint64
+ var gasAggMax, proofsAggMax uint64
+ var gasSingle, proofsSingle uint64
+
+ qpoints := []struct{ q, tol float64 }{
+ {0.01, 0.0005},
+ {0.05, 0.001},
+ {0.20, 0.01},
+ {0.25, 0.01},
+ {0.30, 0.01},
+ {0.40, 0.01},
+ {0.45, 0.01},
+ {0.50, 0.01},
+ {0.60, 0.01},
+ {0.80, 0.01},
+ {0.95, 0.001},
+ {0.99, 0.0005},
+ }
+ estims := make([]quantile.Estimate, len(qpoints))
+ for i, p := range qpoints {
+ estims[i] = quantile.Known(p.q, p.tol)
+ }
+ qua := quantile.New(estims...)
+ hist, err := stati.NewHistogram([]float64{
+ 1, 3, 5, 7, 15, 30, 50, 100, 200, 400, 600, 700, 819})
+ if err != nil {
+ return err
+ }
+
+ err = sim.Walk(cctx.Context, cctx.Int64("lookback"), func(
+ sm *stmgr.StateManager, ts *types.TipSet, stCid cid.Cid,
+ messages []*simulation.AppliedMessage,
+ ) error {
+ for _, m := range messages {
+ if m.ExitCode != exitcode.Ok {
+ continue
+ }
+ if m.Method == miner.Methods.ProveCommitAggregate {
+ param := miner.ProveCommitAggregateParams{}
+ err := param.UnmarshalCBOR(bytes.NewReader(m.Params))
+ if err != nil {
+ log("failed to decode params: %+v", err)
+ return nil
+ }
+ c, err := param.SectorNumbers.Count()
+ if err != nil {
+ log("failed to count sectors")
+ return nil
+ }
+ gasAgg += uint64(m.GasUsed)
+ proofsAgg += c
+ if c == 819 {
+ gasAggMax += uint64(m.GasUsed)
+ proofsAggMax += c
+ }
+ for i := uint64(0); i < c; i++ {
+ qua.Add(float64(c))
+ }
+ hist.Observe(float64(c))
+ }
+
+ if m.Method == miner.Methods.ProveCommitSector {
+ gasSingle += uint64(m.GasUsed)
+ proofsSingle++
+ qua.Add(1)
+ hist.Observe(1)
+ }
+ }
+
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ idealGassUsed := float64(gasAggMax) / float64(proofsAggMax) * float64(proofsAgg+proofsSingle)
+
+ fmt.Printf("Gas usage efficiency in comparison to all 819: %f%%\n", 100*idealGassUsed/float64(gasAgg+gasSingle))
+
+ fmt.Printf("Proofs in singles: %d\n", proofsSingle)
+ fmt.Printf("Proofs in Aggs: %d\n", proofsAgg)
+ fmt.Printf("Proofs in Aggs(819): %d\n", proofsAggMax)
+
+ fmt.Println()
+ fmt.Println("Quantiles of proofs in given aggregate size:")
+ for _, p := range qpoints {
+ fmt.Printf("%.0f%%\t%.0f\n", p.q*100, qua.Get(p.q))
+ }
+ fmt.Println()
+ fmt.Println("Histogram of messages:")
+ fmt.Printf("Total\t%d\n", hist.Total())
+ for i, b := range hist.Buckets[1:] {
+ fmt.Printf("%.0f\t%d\n", b, hist.Get(i))
+ }
+
+ return nil
+ },
+}
diff --git a/cmd/lotus-sim/info_message.go b/cmd/lotus-sim/info_message.go
new file mode 100644
index 00000000000..33c45e7280f
--- /dev/null
+++ b/cmd/lotus-sim/info_message.go
@@ -0,0 +1,95 @@
+package main
+
+import (
+ "fmt"
+ "syscall"
+
+ "github.com/filecoin-project/lotus/chain/stmgr"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation"
+ "github.com/filecoin-project/lotus/lib/stati"
+ "github.com/ipfs/go-cid"
+ "github.com/streadway/quantile"
+ "github.com/urfave/cli/v2"
+)
+
+var infoMessageSizeSimCommand = &cli.Command{
+ Name: "message-size",
+ Description: "Output information about message size distribution",
+ Flags: []cli.Flag{
+ &cli.Int64Flag{
+ Name: "lookback",
+ Value: 0,
+ },
+ },
+ Action: func(cctx *cli.Context) (err error) {
+ node, err := open(cctx)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if cerr := node.Close(); err == nil {
+ err = cerr
+ }
+ }()
+
+ go profileOnSignal(cctx, syscall.SIGUSR2)
+
+ sim, err := node.LoadSim(cctx.Context, cctx.String("simulation"))
+ if err != nil {
+ return err
+ }
+
+ qpoints := []struct{ q, tol float64 }{
+ {0.30, 0.01},
+ {0.40, 0.01},
+ {0.60, 0.01},
+ {0.70, 0.01},
+ {0.80, 0.01},
+ {0.85, 0.01},
+ {0.90, 0.01},
+ {0.95, 0.001},
+ {0.99, 0.0005},
+ {0.999, 0.0001},
+ }
+ estims := make([]quantile.Estimate, len(qpoints))
+ for i, p := range qpoints {
+ estims[i] = quantile.Known(p.q, p.tol)
+ }
+ qua := quantile.New(estims...)
+ hist, err := stati.NewHistogram([]float64{
+ 1 << 8, 1 << 10, 1 << 11, 1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+ })
+ if err != nil {
+ return err
+ }
+
+ err = sim.Walk(cctx.Context, cctx.Int64("lookback"), func(
+ sm *stmgr.StateManager, ts *types.TipSet, stCid cid.Cid,
+ messages []*simulation.AppliedMessage,
+ ) error {
+ for _, m := range messages {
+ msgSize := float64(m.ChainLength())
+ qua.Add(msgSize)
+ hist.Observe(msgSize)
+ }
+
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ fmt.Println("Quantiles of message sizes:")
+ for _, p := range qpoints {
+ fmt.Printf("%.1f%%\t%.0f\n", p.q*100, qua.Get(p.q))
+ }
+ fmt.Println()
+ fmt.Println("Histogram of message sizes:")
+ fmt.Printf("Total\t%d\n", hist.Total())
+ for i, b := range hist.Buckets[1:] {
+ fmt.Printf("%.0f\t%d\t%.1f%%\n", b, hist.Get(i), 100*hist.GetRatio(i))
+ }
+
+ return nil
+ },
+}
diff --git a/cmd/lotus-sim/info_state.go b/cmd/lotus-sim/info_state.go
new file mode 100644
index 00000000000..5c9541513c6
--- /dev/null
+++ b/cmd/lotus-sim/info_state.go
@@ -0,0 +1,141 @@
+package main
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "math"
+ "sync"
+ "sync/atomic"
+
+ "github.com/ipfs/go-cid"
+ "github.com/urfave/cli/v2"
+ cbg "github.com/whyrusleeping/cbor-gen"
+
+ "github.com/filecoin-project/go-state-types/abi"
+
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+var infoStateGrowthSimCommand = &cli.Command{
+ Name: "state-size",
+ Description: "List daily state size over the course of the simulation starting at the end.",
+ Action: func(cctx *cli.Context) (err error) {
+ node, err := open(cctx)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if cerr := node.Close(); err == nil {
+ err = cerr
+ }
+ }()
+
+ sim, err := node.LoadSim(cctx.Context, cctx.String("simulation"))
+ if err != nil {
+ return err
+ }
+
+ // NOTE: This code is entirely read-bound.
+ store := node.Chainstore.StateBlockstore()
+ stateSize := func(ctx context.Context, c cid.Cid) (uint64, error) {
+ seen := cid.NewSet()
+ sema := make(chan struct{}, 40)
+ var lock sync.Mutex
+ var recSize func(cid.Cid) (uint64, error)
+ recSize = func(c cid.Cid) (uint64, error) {
+ // Not a part of the chain state.
+ if err := ctx.Err(); err != nil {
+ return 0, err
+ }
+
+ lock.Lock()
+ visit := seen.Visit(c)
+ lock.Unlock()
+ // Already seen?
+ if !visit {
+ return 0, nil
+ }
+
+ var links []cid.Cid
+ var totalSize uint64
+ if err := store.View(c, func(data []byte) error {
+ totalSize += uint64(len(data))
+ return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) {
+ if c.Prefix().Codec != cid.DagCBOR {
+ return
+ }
+
+ links = append(links, c)
+ })
+ }); err != nil {
+ return 0, err
+ }
+
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+ cb := func(c cid.Cid) {
+ size, err := recSize(c)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ return
+ }
+ atomic.AddUint64(&totalSize, size)
+ }
+ asyncCb := func(c cid.Cid) {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ defer func() { <-sema }()
+ cb(c)
+ }()
+ }
+ for _, link := range links {
+ select {
+ case sema <- struct{}{}:
+ asyncCb(link)
+ default:
+ cb(link)
+ }
+
+ }
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return 0, err
+ default:
+ }
+
+ return totalSize, nil
+ }
+ return recSize(c)
+ }
+
+ firstEpoch := sim.GetStart().Height()
+ ts := sim.GetHead()
+ lastHeight := abi.ChainEpoch(math.MaxInt64)
+ for ts.Height() > firstEpoch && cctx.Err() == nil {
+ if ts.Height()+builtin.EpochsInDay <= lastHeight {
+ lastHeight = ts.Height()
+
+ parentStateSize, err := stateSize(cctx.Context, ts.ParentState())
+ if err != nil {
+ return err
+ }
+
+ fmt.Fprintf(cctx.App.Writer, "%d: %s\n", ts.Height(), types.SizeStr(types.NewInt(parentStateSize)))
+ }
+
+ ts, err = sim.Node.Chainstore.LoadTipSet(ts.Parents())
+ if err != nil {
+ return err
+ }
+ }
+ return cctx.Err()
+ },
+}
diff --git a/cmd/lotus-sim/info_wdpost.go b/cmd/lotus-sim/info_wdpost.go
new file mode 100644
index 00000000000..719a133b17e
--- /dev/null
+++ b/cmd/lotus-sim/info_wdpost.go
@@ -0,0 +1,69 @@
+package main
+
+import (
+ "fmt"
+
+ "github.com/ipfs/go-cid"
+ "github.com/urfave/cli/v2"
+
+ "github.com/filecoin-project/go-state-types/exitcode"
+
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+ "github.com/filecoin-project/lotus/chain/stmgr"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation"
+)
+
+var infoWindowPostBandwidthSimCommand = &cli.Command{
+ Name: "post-bandwidth",
+ Description: "List average chain bandwidth used by window posts for each day of the simulation.",
+ Action: func(cctx *cli.Context) (err error) {
+ node, err := open(cctx)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if cerr := node.Close(); err == nil {
+ err = cerr
+ }
+ }()
+
+ sim, err := node.LoadSim(cctx.Context, cctx.String("simulation"))
+ if err != nil {
+ return err
+ }
+
+ var postGas, totalGas int64
+ printStats := func() {
+ fmt.Fprintf(cctx.App.Writer, "%.4f%%\n", float64(100*postGas)/float64(totalGas))
+ }
+ idx := 0
+ err = sim.Walk(cctx.Context, 0, func(
+ sm *stmgr.StateManager, ts *types.TipSet, stCid cid.Cid,
+ messages []*simulation.AppliedMessage,
+ ) error {
+ for _, m := range messages {
+ totalGas += m.GasUsed
+ if m.ExitCode != exitcode.Ok {
+ continue
+ }
+ if m.Method == miner.Methods.SubmitWindowedPoSt {
+ postGas += m.GasUsed
+ }
+ }
+ idx++
+ idx %= builtin.EpochsInDay
+ if idx == 0 {
+ printStats()
+ postGas = 0
+ totalGas = 0
+ }
+ return nil
+ })
+ if idx > 0 {
+ printStats()
+ }
+ return err
+ },
+}
diff --git a/cmd/lotus-sim/list.go b/cmd/lotus-sim/list.go
new file mode 100644
index 00000000000..37e767b9ab0
--- /dev/null
+++ b/cmd/lotus-sim/list.go
@@ -0,0 +1,38 @@
+package main
+
+import (
+ "fmt"
+ "text/tabwriter"
+
+ "github.com/urfave/cli/v2"
+)
+
+var listSimCommand = &cli.Command{
+ Name: "list",
+ Action: func(cctx *cli.Context) (err error) {
+ node, err := open(cctx)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if cerr := node.Close(); err == nil {
+ err = cerr
+ }
+ }()
+
+ list, err := node.ListSims(cctx.Context)
+ if err != nil {
+ return err
+ }
+ tw := tabwriter.NewWriter(cctx.App.Writer, 8, 8, 0, ' ', 0)
+ for _, name := range list {
+ sim, err := node.LoadSim(cctx.Context, name)
+ if err != nil {
+ return err
+ }
+ head := sim.GetHead()
+ fmt.Fprintf(tw, "%s\t%s\t%s\n", name, head.Height(), head.Key())
+ }
+ return tw.Flush()
+ },
+}
diff --git a/cmd/lotus-sim/main.go b/cmd/lotus-sim/main.go
new file mode 100644
index 00000000000..e6cd5d9932b
--- /dev/null
+++ b/cmd/lotus-sim/main.go
@@ -0,0 +1,63 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "os/signal"
+ "syscall"
+
+ "github.com/urfave/cli/v2"
+
+ logging "github.com/ipfs/go-log/v2"
+)
+
+var root []*cli.Command = []*cli.Command{
+ createSimCommand,
+ deleteSimCommand,
+ copySimCommand,
+ renameSimCommand,
+ listSimCommand,
+
+ runSimCommand,
+ infoSimCommand,
+ upgradeCommand,
+}
+
+func main() {
+ if _, set := os.LookupEnv("GOLOG_LOG_LEVEL"); !set {
+ _ = logging.SetLogLevel("simulation", "DEBUG")
+ _ = logging.SetLogLevel("simulation-mock", "DEBUG")
+ }
+ app := &cli.App{
+ Name: "lotus-sim",
+ Usage: "A tool to simulate a network.",
+ Commands: root,
+ Writer: os.Stdout,
+ ErrWriter: os.Stderr,
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "repo",
+ EnvVars: []string{"LOTUS_PATH"},
+ Hidden: true,
+ Value: "~/.lotus",
+ },
+ &cli.StringFlag{
+ Name: "simulation",
+ Aliases: []string{"sim"},
+ EnvVars: []string{"LOTUS_SIMULATION"},
+ Value: "default",
+ },
+ },
+ }
+
+ ctx, cancel := signal.NotifyContext(context.Background(),
+ syscall.SIGTERM, syscall.SIGINT, syscall.SIGHUP)
+ defer cancel()
+
+ if err := app.RunContext(ctx, os.Args); err != nil {
+ fmt.Fprintf(os.Stderr, "Error: %s\n", err)
+ os.Exit(1)
+ return
+ }
+}
diff --git a/cmd/lotus-sim/profile.go b/cmd/lotus-sim/profile.go
new file mode 100644
index 00000000000..63e0ef3bd86
--- /dev/null
+++ b/cmd/lotus-sim/profile.go
@@ -0,0 +1,94 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "os/signal"
+ "path/filepath"
+ "runtime/pprof"
+ "time"
+
+ "github.com/urfave/cli/v2"
+)
+
+func takeProfiles(ctx context.Context) (fname string, _err error) {
+ dir, err := os.MkdirTemp(".", ".profiles-temp*")
+ if err != nil {
+ return "", err
+ }
+
+ if err := writeProfiles(ctx, dir); err != nil {
+ _ = os.RemoveAll(dir)
+ return "", err
+ }
+
+ fname = fmt.Sprintf("pprof-simulation-%s", time.Now().Format(time.RFC3339))
+ if err := os.Rename(dir, fname); err != nil {
+ _ = os.RemoveAll(dir)
+ return "", err
+ }
+ return fname, nil
+}
+
+func writeProfiles(ctx context.Context, dir string) error {
+ for _, profile := range pprof.Profiles() {
+ file, err := os.Create(filepath.Join(dir, profile.Name()+".pprof.gz"))
+ if err != nil {
+ return err
+ }
+ if err := profile.WriteTo(file, 0); err != nil {
+ _ = file.Close()
+ return err
+ }
+ if err := file.Close(); err != nil {
+ return err
+ }
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+ }
+
+ file, err := os.Create(filepath.Join(dir, "cpu.pprof.gz"))
+ if err != nil {
+ return err
+ }
+
+ if err := pprof.StartCPUProfile(file); err != nil {
+ _ = file.Close()
+ return err
+ }
+ select {
+ case <-time.After(30 * time.Second):
+ case <-ctx.Done():
+ }
+ pprof.StopCPUProfile()
+ err = file.Close()
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+ return err
+}
+
+func profileOnSignal(cctx *cli.Context, signals ...os.Signal) {
+ ch := make(chan os.Signal, 1)
+ signal.Notify(ch, signals...)
+ defer signal.Stop(ch)
+
+ for {
+ select {
+ case <-ch:
+ fname, err := takeProfiles(cctx.Context)
+ switch err {
+ case context.Canceled:
+ return
+ case nil:
+ fmt.Fprintf(cctx.App.ErrWriter, "Wrote profile to %q\n", fname)
+ default:
+ fmt.Fprintf(cctx.App.ErrWriter, "ERROR: failed to write profile: %s\n", err)
+ }
+ case <-cctx.Done():
+ return
+ }
+ }
+}
diff --git a/cmd/lotus-sim/rename.go b/cmd/lotus-sim/rename.go
new file mode 100644
index 00000000000..c336717c792
--- /dev/null
+++ b/cmd/lotus-sim/rename.go
@@ -0,0 +1,29 @@
+package main
+
+import (
+ "fmt"
+
+ "github.com/urfave/cli/v2"
+)
+
+var renameSimCommand = &cli.Command{
+ Name: "rename",
+ ArgsUsage: "",
+ Action: func(cctx *cli.Context) (err error) {
+ node, err := open(cctx)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if cerr := node.Close(); err == nil {
+ err = cerr
+ }
+ }()
+
+ if cctx.NArg() != 1 {
+ return fmt.Errorf("expected 1 argument")
+ }
+ name := cctx.Args().First()
+ return node.RenameSim(cctx.Context, cctx.String("simulation"), name)
+ },
+}
diff --git a/cmd/lotus-sim/run.go b/cmd/lotus-sim/run.go
new file mode 100644
index 00000000000..a985fdf9ec9
--- /dev/null
+++ b/cmd/lotus-sim/run.go
@@ -0,0 +1,72 @@
+package main
+
+import (
+ "fmt"
+ "os"
+ "os/signal"
+ "syscall"
+
+ "github.com/urfave/cli/v2"
+)
+
+var runSimCommand = &cli.Command{
+ Name: "run",
+ Description: `Run the simulation.
+
+Signals:
+- SIGUSR1: Print information about the current simulation (equivalent to 'lotus-sim info').
+- SIGUSR2: Write pprof profiles to ./pprof-simulation-$DATE/`,
+ Flags: []cli.Flag{
+ &cli.IntFlag{
+ Name: "epochs",
+ Usage: "Advance the given number of epochs then stop.",
+ },
+ },
+ Action: func(cctx *cli.Context) (err error) {
+ node, err := open(cctx)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if cerr := node.Close(); err == nil {
+ err = cerr
+ }
+ }()
+
+ go profileOnSignal(cctx, syscall.SIGUSR2)
+
+ sim, err := node.LoadSim(cctx.Context, cctx.String("simulation"))
+ if err != nil {
+ return err
+ }
+ targetEpochs := cctx.Int("epochs")
+
+ ch := make(chan os.Signal, 1)
+ signal.Notify(ch, syscall.SIGUSR1)
+ defer signal.Stop(ch)
+
+ for i := 0; targetEpochs == 0 || i < targetEpochs; i++ {
+ ts, err := sim.Step(cctx.Context)
+ if err != nil {
+ return err
+ }
+
+ fmt.Fprintf(cctx.App.Writer, "advanced to %d %s\n", ts.Height(), ts.Key())
+
+ // Print
+ select {
+ case <-ch:
+ fmt.Fprintln(cctx.App.Writer, "---------------------")
+ if err := printInfo(cctx.Context, sim, cctx.App.Writer); err != nil {
+ fmt.Fprintf(cctx.App.ErrWriter, "ERROR: failed to print info: %s\n", err)
+ }
+ fmt.Fprintln(cctx.App.Writer, "---------------------")
+ case <-cctx.Context.Done():
+ return cctx.Err()
+ default:
+ }
+ }
+ fmt.Fprintln(cctx.App.Writer, "simulation done")
+ return err
+ },
+}
diff --git a/cmd/lotus-sim/simulation/block.go b/cmd/lotus-sim/simulation/block.go
new file mode 100644
index 00000000000..93e6a319177
--- /dev/null
+++ b/cmd/lotus-sim/simulation/block.go
@@ -0,0 +1,93 @@
+package simulation
+
+import (
+ "context"
+ "crypto/sha256"
+ "encoding/binary"
+ "time"
+
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+const beaconPrefix = "mockbeacon:"
+
+// nextBeaconEntries returns a fake beacon entries for the next block.
+func (sim *Simulation) nextBeaconEntries() []types.BeaconEntry {
+ parentBeacons := sim.head.Blocks()[0].BeaconEntries
+ lastBeacon := parentBeacons[len(parentBeacons)-1]
+ beaconRound := lastBeacon.Round + 1
+
+ buf := make([]byte, len(beaconPrefix)+8)
+ copy(buf, beaconPrefix)
+ binary.BigEndian.PutUint64(buf[len(beaconPrefix):], beaconRound)
+ beaconRand := sha256.Sum256(buf)
+ return []types.BeaconEntry{{
+ Round: beaconRound,
+ Data: beaconRand[:],
+ }}
+}
+
+// nextTicket returns a fake ticket for the next block.
+func (sim *Simulation) nextTicket() *types.Ticket {
+ newProof := sha256.Sum256(sim.head.MinTicket().VRFProof)
+ return &types.Ticket{
+ VRFProof: newProof[:],
+ }
+}
+
+// makeTipSet generates and executes the next tipset from the given messages. This method:
+//
+// 1. Stores the given messages in the Chainstore.
+// 2. Creates and persists a single block mined by the same miner as the parent.
+// 3. Creates a tipset from this block and executes it.
+// 4. Returns the resulting tipset.
+//
+// This method does _not_ mutate local state (although it does add blocks to the datastore).
+func (sim *Simulation) makeTipSet(ctx context.Context, messages []*types.Message) (*types.TipSet, error) {
+ parentTs := sim.head
+ parentState, parentRec, err := sim.StateManager.TipSetState(ctx, parentTs)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to compute parent tipset: %w", err)
+ }
+ msgsCid, err := sim.storeMessages(ctx, messages)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to store block messages: %w", err)
+ }
+
+ uts := parentTs.MinTimestamp() + build.BlockDelaySecs
+
+ blks := []*types.BlockHeader{{
+ Miner: parentTs.MinTicketBlock().Miner, // keep reusing the same miner.
+ Ticket: sim.nextTicket(),
+ BeaconEntries: sim.nextBeaconEntries(),
+ Parents: parentTs.Cids(),
+ Height: parentTs.Height() + 1,
+ ParentStateRoot: parentState,
+ ParentMessageReceipts: parentRec,
+ Messages: msgsCid,
+ ParentBaseFee: abi.NewTokenAmount(0),
+ Timestamp: uts,
+ ElectionProof: &types.ElectionProof{WinCount: 1},
+ }}
+ err = sim.Node.Chainstore.PersistBlockHeaders(blks...)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to persist block headers: %w", err)
+ }
+ newTipSet, err := types.NewTipSet(blks)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to create new tipset: %w", err)
+ }
+ now := time.Now()
+ _, _, err = sim.StateManager.TipSetState(ctx, newTipSet)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to compute new tipset: %w", err)
+ }
+ duration := time.Since(now)
+ log.Infow("computed tipset", "duration", duration, "height", newTipSet.Height())
+
+ return newTipSet, nil
+}
diff --git a/cmd/lotus-sim/simulation/blockbuilder/blockbuilder.go b/cmd/lotus-sim/simulation/blockbuilder/blockbuilder.go
new file mode 100644
index 00000000000..2ffc0bf140b
--- /dev/null
+++ b/cmd/lotus-sim/simulation/blockbuilder/blockbuilder.go
@@ -0,0 +1,280 @@
+package blockbuilder
+
+import (
+ "context"
+ "math"
+
+ "go.uber.org/zap"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/network"
+
+ "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/account"
+ "github.com/filecoin-project/lotus/chain/state"
+ "github.com/filecoin-project/lotus/chain/stmgr"
+ "github.com/filecoin-project/lotus/chain/store"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/vm"
+)
+
+const (
+ // 0.25 is the default, but the number below is from the network.
+ gasOverestimation = 1.0 / 0.808
+ // The number of expected blocks in a tipset. We use this to determine how much gas a tipset
+ // has.
+ // 5 per tipset, but we effectively get 4 blocks worth of messages.
+ expectedBlocks = 4
+ // TODO: This will produce invalid blocks but it will accurately model the amount of gas
+ // we're willing to use per-tipset.
+ // A more correct approach would be to produce 5 blocks. We can do that later.
+ targetGas = build.BlockGasTarget * expectedBlocks
+)
+
+type BlockBuilder struct {
+ ctx context.Context
+ logger *zap.SugaredLogger
+
+ parentTs *types.TipSet
+ parentSt *state.StateTree
+ vm *vm.VM
+ sm *stmgr.StateManager
+
+ gasTotal int64
+ messages []*types.Message
+}
+
+// NewBlockBuilder constructs a new block builder from the parent state. Use this to pack a block
+// with messages.
+//
+// NOTE: The context applies to the life of the block builder itself (but does not need to be canceled).
+func NewBlockBuilder(ctx context.Context, logger *zap.SugaredLogger, sm *stmgr.StateManager, parentTs *types.TipSet) (*BlockBuilder, error) {
+ parentState, _, err := sm.TipSetState(ctx, parentTs)
+ if err != nil {
+ return nil, err
+ }
+ parentSt, err := sm.StateTree(parentState)
+ if err != nil {
+ return nil, err
+ }
+
+ bb := &BlockBuilder{
+ ctx: ctx,
+ logger: logger.With("epoch", parentTs.Height()+1),
+ sm: sm,
+ parentTs: parentTs,
+ parentSt: parentSt,
+ }
+
+ // Then we construct a VM to execute messages for gas estimation.
+ //
+ // Most parts of this VM are "real" except:
+ // 1. We don't charge a fee.
+ // 2. The runtime has "fake" proof logic.
+ // 3. We don't actually save any of the results.
+ r := store.NewChainRand(sm.ChainStore(), parentTs.Cids())
+ vmopt := &vm.VMOpts{
+ StateBase: parentState,
+ Epoch: parentTs.Height() + 1,
+ Rand: r,
+ Bstore: sm.ChainStore().StateBlockstore(),
+ Syscalls: sm.ChainStore().VMSys(),
+ CircSupplyCalc: sm.GetVMCirculatingSupply,
+ NtwkVersion: sm.GetNtwkVersion,
+ BaseFee: abi.NewTokenAmount(0),
+ LookbackState: stmgr.LookbackStateGetterForTipset(sm, parentTs),
+ }
+ bb.vm, err = vm.NewVM(bb.ctx, vmopt)
+ if err != nil {
+ return nil, err
+ }
+ return bb, nil
+}
+
+// PushMessages tries to push the specified message into the block.
+//
+// 1. All messages will be executed in-order.
+// 2. Gas computation & nonce selection will be handled internally.
+// 3. The base-fee is 0 so the sender does not need funds.
+// 4. As usual, the sender must be an account (any account).
+// 5. If the message fails to execute, this method will fail.
+//
+// Returns ErrOutOfGas when out of gas. Check BlockBuilder.GasRemaining and try pushing a cheaper
+// message.
+func (bb *BlockBuilder) PushMessage(msg *types.Message) (*types.MessageReceipt, error) {
+ if bb.gasTotal >= targetGas {
+ return nil, new(ErrOutOfGas)
+ }
+
+ st := bb.StateTree()
+ store := bb.ActorStore()
+
+ // Copy the message before we start mutating it.
+ msgCpy := *msg
+ msg = &msgCpy
+
+ actor, err := st.GetActor(msg.From)
+ if err != nil {
+ return nil, err
+ }
+ if !builtin.IsAccountActor(actor.Code) {
+ return nil, xerrors.Errorf(
+ "messags may only be sent from account actors, got message from %s (%s)",
+ msg.From, builtin.ActorNameByCode(actor.Code),
+ )
+ }
+ msg.Nonce = actor.Nonce
+ if msg.From.Protocol() == address.ID {
+ state, err := account.Load(store, actor)
+ if err != nil {
+ return nil, err
+ }
+ msg.From, err = state.PubkeyAddress()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // TODO: Our gas estimation is broken for payment channels due to horrible hacks in
+ // gasEstimateGasLimit.
+ if msg.Value == types.EmptyInt {
+ msg.Value = abi.NewTokenAmount(0)
+ }
+ msg.GasPremium = abi.NewTokenAmount(0)
+ msg.GasFeeCap = abi.NewTokenAmount(0)
+ msg.GasLimit = build.BlockGasTarget
+
+ // We manually snapshot so we can revert nonce changes, etc. on failure.
+ err = st.Snapshot(bb.ctx)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to take a snapshot while estimating message gas: %w", err)
+ }
+ defer st.ClearSnapshot()
+
+ ret, err := bb.vm.ApplyMessage(bb.ctx, msg)
+ if err != nil {
+ _ = st.Revert()
+ return nil, err
+ }
+ if ret.ActorErr != nil {
+ _ = st.Revert()
+ return nil, ret.ActorErr
+ }
+
+ // Sometimes there are bugs. Let's catch them.
+ if ret.GasUsed == 0 {
+ _ = st.Revert()
+ return nil, xerrors.Errorf("used no gas %v -> %v", msg, ret)
+ }
+
+ // Update the gas limit taking overestimation into account.
+ msg.GasLimit = int64(math.Ceil(float64(ret.GasUsed) * gasOverestimation))
+
+ // Did we go over? Yes, revert.
+ newTotal := bb.gasTotal + msg.GasLimit
+ if newTotal > targetGas {
+ _ = st.Revert()
+ return nil, &ErrOutOfGas{Available: targetGas - bb.gasTotal, Required: msg.GasLimit}
+ }
+ bb.gasTotal = newTotal
+
+ bb.messages = append(bb.messages, msg)
+ return &ret.MessageReceipt, nil
+}
+
+// ActorStore returns the VM's current (pending) blockstore.
+func (bb *BlockBuilder) ActorStore() adt.Store {
+ return bb.vm.ActorStore(bb.ctx)
+}
+
+// StateTree returns the VM's current (pending) state-tree. This includes any changes made by
+// successfully pushed messages.
+//
+// You probably want ParentStateTree
+func (bb *BlockBuilder) StateTree() *state.StateTree {
+ return bb.vm.StateTree().(*state.StateTree)
+}
+
+// ParentStateTree returns the parent state-tree (not the paren't tipset's parent state-tree).
+func (bb *BlockBuilder) ParentStateTree() *state.StateTree {
+ return bb.parentSt
+}
+
+// StateTreeByHeight will return a state-tree up through and including the current in-progress
+// epoch.
+//
+// NOTE: This will return the state after the given epoch, not the parent state for the epoch.
+func (bb *BlockBuilder) StateTreeByHeight(epoch abi.ChainEpoch) (*state.StateTree, error) {
+ now := bb.Height()
+ if epoch > now {
+ return nil, xerrors.Errorf(
+ "cannot load state-tree from future: %d > %d", epoch, bb.Height(),
+ )
+ } else if epoch <= 0 {
+ return nil, xerrors.Errorf(
+ "cannot load state-tree: epoch %d <= 0", epoch,
+ )
+ }
+
+ // Manually handle "now" and "previous".
+ switch epoch {
+ case now:
+ return bb.StateTree(), nil
+ case now - 1:
+ return bb.ParentStateTree(), nil
+ }
+
+ // Get the tipset of the block _after_ the target epoch so we can use its parent state.
+ targetTs, err := bb.sm.ChainStore().GetTipsetByHeight(bb.ctx, epoch+1, bb.parentTs, false)
+ if err != nil {
+ return nil, err
+ }
+
+ return bb.sm.StateTree(targetTs.ParentState())
+}
+
+// Messages returns all messages currently packed into the next block.
+// 1. DO NOT modify the slice, copy it.
+// 2. DO NOT retain the slice, copy it.
+func (bb *BlockBuilder) Messages() []*types.Message {
+ return bb.messages
+}
+
+// GasRemaining returns the amount of remaining gas in the next block.
+func (bb *BlockBuilder) GasRemaining() int64 {
+ return targetGas - bb.gasTotal
+}
+
+// ParentTipSet returns the parent tipset.
+func (bb *BlockBuilder) ParentTipSet() *types.TipSet {
+ return bb.parentTs
+}
+
+// Height returns the epoch for the target block.
+func (bb *BlockBuilder) Height() abi.ChainEpoch {
+ return bb.parentTs.Height() + 1
+}
+
+// NetworkVersion returns the network version for the target block.
+func (bb *BlockBuilder) NetworkVersion() network.Version {
+ return bb.sm.GetNtwkVersion(bb.ctx, bb.Height())
+}
+
+// StateManager returns the stmgr.StateManager.
+func (bb *BlockBuilder) StateManager() *stmgr.StateManager {
+ return bb.sm
+}
+
+// ActorsVersion returns the actors version for the target block.
+func (bb *BlockBuilder) ActorsVersion() actors.Version {
+ return actors.VersionForNetwork(bb.NetworkVersion())
+}
+
+func (bb *BlockBuilder) L() *zap.SugaredLogger {
+ return bb.logger
+}
diff --git a/cmd/lotus-sim/simulation/blockbuilder/errors.go b/cmd/lotus-sim/simulation/blockbuilder/errors.go
new file mode 100644
index 00000000000..ddf08ea1899
--- /dev/null
+++ b/cmd/lotus-sim/simulation/blockbuilder/errors.go
@@ -0,0 +1,25 @@
+package blockbuilder
+
+import (
+ "errors"
+ "fmt"
+)
+
+// ErrOutOfGas is returned from BlockBuilder.PushMessage when the block does not have enough gas to
+// fit the given message.
+type ErrOutOfGas struct {
+ Available, Required int64
+}
+
+func (e *ErrOutOfGas) Error() string {
+ if e.Available == 0 {
+ return "out of gas: block full"
+ }
+ return fmt.Sprintf("out of gas: %d < %d", e.Required, e.Available)
+}
+
+// IsOutOfGas returns true if the error is an "out of gas" error.
+func IsOutOfGas(err error) bool {
+ var oog *ErrOutOfGas
+ return errors.As(err, &oog)
+}
diff --git a/cmd/lotus-sim/simulation/messages.go b/cmd/lotus-sim/simulation/messages.go
new file mode 100644
index 00000000000..5bed2743670
--- /dev/null
+++ b/cmd/lotus-sim/simulation/messages.go
@@ -0,0 +1,58 @@
+package simulation
+
+import (
+ "context"
+
+ "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+
+ blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
+
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+// toArray converts the given set of CIDs to an AMT. This is usually used to pack messages into blocks.
+func toArray(store blockadt.Store, cids []cid.Cid) (cid.Cid, error) {
+ arr := blockadt.MakeEmptyArray(store)
+ for i, c := range cids {
+ oc := cbg.CborCid(c)
+ if err := arr.Set(uint64(i), &oc); err != nil {
+ return cid.Undef, err
+ }
+ }
+ return arr.Root()
+}
+
+// storeMessages packs a set of messages into a types.MsgMeta and returns the resulting CID. The
+// resulting CID is valid for the BlocKHeader's Messages field.
+func (sim *Simulation) storeMessages(ctx context.Context, messages []*types.Message) (cid.Cid, error) {
+ // We store all messages as "bls" messages so they're executed in-order. This ensures
+ // accurate gas accounting. It also ensures we don't, e.g., try to fund a miner after we
+ // fail a pre-commit...
+ var msgCids []cid.Cid
+ for _, msg := range messages {
+ c, err := sim.Node.Chainstore.PutMessage(msg)
+ if err != nil {
+ return cid.Undef, err
+ }
+ msgCids = append(msgCids, c)
+ }
+ adtStore := sim.Node.Chainstore.ActorStore(ctx)
+ blsMsgArr, err := toArray(adtStore, msgCids)
+ if err != nil {
+ return cid.Undef, err
+ }
+ sekpMsgArr, err := toArray(adtStore, nil)
+ if err != nil {
+ return cid.Undef, err
+ }
+
+ msgsCid, err := adtStore.Put(adtStore.Context(), &types.MsgMeta{
+ BlsMessages: blsMsgArr,
+ SecpkMessages: sekpMsgArr,
+ })
+ if err != nil {
+ return cid.Undef, err
+ }
+ return msgsCid, nil
+}
diff --git a/cmd/lotus-sim/simulation/mock/mock.go b/cmd/lotus-sim/simulation/mock/mock.go
new file mode 100644
index 00000000000..38648f758dc
--- /dev/null
+++ b/cmd/lotus-sim/simulation/mock/mock.go
@@ -0,0 +1,179 @@
+package mock
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "fmt"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+ logging "github.com/ipfs/go-log/v2"
+
+ miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
+ proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
+ tutils "github.com/filecoin-project/specs-actors/v5/support/testing"
+
+ "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
+)
+
+// Ideally, we'd use extern/sector-storage/mock. Unfortunately, those mocks are a bit _too_ accurate
+// and would force us to load sector info for window post proofs.
+
+const (
+ mockSealProofPrefix = "valid seal proof:"
+ mockAggregateSealProofPrefix = "valid aggregate seal proof:"
+ mockPoStProofPrefix = "valid post proof:"
+)
+
+var log = logging.Logger("simulation-mock")
+
+// mockVerifier is a simple mock for verifying "fake" proofs.
+type mockVerifier struct{}
+
+var Verifier ffiwrapper.Verifier = mockVerifier{}
+
+func (mockVerifier) VerifySeal(proof proof5.SealVerifyInfo) (bool, error) {
+ addr, err := address.NewIDAddress(uint64(proof.Miner))
+ if err != nil {
+ return false, err
+ }
+ mockProof, err := MockSealProof(proof.SealProof, addr)
+ if err != nil {
+ return false, err
+ }
+ if bytes.Equal(proof.Proof, mockProof) {
+ return true, nil
+ }
+ log.Debugw("invalid seal proof", "expected", mockProof, "actual", proof.Proof, "miner", addr)
+ return false, nil
+}
+
+func (mockVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) {
+ addr, err := address.NewIDAddress(uint64(aggregate.Miner))
+ if err != nil {
+ return false, err
+ }
+ mockProof, err := MockAggregateSealProof(aggregate.SealProof, addr, len(aggregate.Infos))
+ if err != nil {
+ return false, err
+ }
+ if bytes.Equal(aggregate.Proof, mockProof) {
+ return true, nil
+ }
+ log.Debugw("invalid aggregate seal proof",
+ "expected", mockProof,
+ "actual", aggregate.Proof,
+ "count", len(aggregate.Infos),
+ "miner", addr,
+ )
+ return false, nil
+}
+func (mockVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) {
+ panic("should not be called")
+}
+func (mockVerifier) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) {
+ if len(info.Proofs) != 1 {
+ return false, fmt.Errorf("expected exactly one proof")
+ }
+ proof := info.Proofs[0]
+ addr, err := address.NewIDAddress(uint64(info.Prover))
+ if err != nil {
+ return false, err
+ }
+ mockProof, err := MockWindowPoStProof(proof.PoStProof, addr)
+ if err != nil {
+ return false, err
+ }
+ if bytes.Equal(proof.ProofBytes, mockProof) {
+ return true, nil
+ }
+
+ log.Debugw("invalid window post proof",
+ "expected", mockProof,
+ "actual", info.Proofs[0],
+ "miner", addr,
+ )
+ return false, nil
+}
+
+func (mockVerifier) GenerateWinningPoStSectorChallenge(context.Context, abi.RegisteredPoStProof, abi.ActorID, abi.PoStRandomness, uint64) ([]uint64, error) {
+ panic("should not be called")
+}
+
+// MockSealProof generates a mock "seal" proof tied to the specified proof type and the given miner.
+func MockSealProof(proofType abi.RegisteredSealProof, minerAddr address.Address) ([]byte, error) {
+ plen, err := proofType.ProofSize()
+ if err != nil {
+ return nil, err
+ }
+ proof := make([]byte, plen)
+ i := copy(proof, mockSealProofPrefix)
+ binary.BigEndian.PutUint64(proof[i:], uint64(proofType))
+ i += 8
+ i += copy(proof[i:], minerAddr.Bytes())
+ return proof, nil
+}
+
+// MockAggregateSealProof generates a mock "seal" aggregate proof tied to the specified proof type,
+// the given miner, and the number of proven sectors.
+func MockAggregateSealProof(proofType abi.RegisteredSealProof, minerAddr address.Address, count int) ([]byte, error) {
+ proof := make([]byte, aggProofLen(count))
+ i := copy(proof, mockAggregateSealProofPrefix)
+ binary.BigEndian.PutUint64(proof[i:], uint64(proofType))
+ i += 8
+ binary.BigEndian.PutUint64(proof[i:], uint64(count))
+ i += 8
+ i += copy(proof[i:], minerAddr.Bytes())
+
+ return proof, nil
+}
+
+// MockWindowPoStProof generates a mock "window post" proof tied to the specified proof type, and the
+// given miner.
+func MockWindowPoStProof(proofType abi.RegisteredPoStProof, minerAddr address.Address) ([]byte, error) {
+ plen, err := proofType.ProofSize()
+ if err != nil {
+ return nil, err
+ }
+ proof := make([]byte, plen)
+ i := copy(proof, mockPoStProofPrefix)
+ i += copy(proof[i:], minerAddr.Bytes())
+ return proof, nil
+}
+
+// makeCommR generates a "fake" but valid CommR for a sector. It is unique for the given sector/miner.
+func MockCommR(minerAddr address.Address, sno abi.SectorNumber) cid.Cid {
+ return tutils.MakeCID(fmt.Sprintf("%s:%d", minerAddr, sno), &miner5.SealedCIDPrefix)
+}
+
+// TODO: dedup
+func aggProofLen(nproofs int) int {
+ switch {
+ case nproofs <= 8:
+ return 11220
+ case nproofs <= 16:
+ return 14196
+ case nproofs <= 32:
+ return 17172
+ case nproofs <= 64:
+ return 20148
+ case nproofs <= 128:
+ return 23124
+ case nproofs <= 256:
+ return 26100
+ case nproofs <= 512:
+ return 29076
+ case nproofs <= 1024:
+ return 32052
+ case nproofs <= 2048:
+ return 35028
+ case nproofs <= 4096:
+ return 38004
+ case nproofs <= 8192:
+ return 40980
+ default:
+ panic("too many proofs")
+ }
+}
diff --git a/cmd/lotus-sim/simulation/node.go b/cmd/lotus-sim/simulation/node.go
new file mode 100644
index 00000000000..5b8bf2bf91f
--- /dev/null
+++ b/cmd/lotus-sim/simulation/node.go
@@ -0,0 +1,241 @@
+package simulation
+
+import (
+ "context"
+ "strings"
+
+ "go.uber.org/multierr"
+ "golang.org/x/xerrors"
+
+ "github.com/ipfs/go-datastore"
+ "github.com/ipfs/go-datastore/query"
+
+ "github.com/filecoin-project/lotus/blockstore"
+ "github.com/filecoin-project/lotus/chain/stmgr"
+ "github.com/filecoin-project/lotus/chain/store"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/vm"
+ "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/mock"
+ "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/stages"
+ "github.com/filecoin-project/lotus/node/repo"
+)
+
+// Node represents the local lotus node, or at least the part of it we care about.
+type Node struct {
+ repo repo.LockedRepo
+ Blockstore blockstore.Blockstore
+ MetadataDS datastore.Batching
+ Chainstore *store.ChainStore
+}
+
+// OpenNode opens the local lotus node for writing. This will fail if the node is online.
+func OpenNode(ctx context.Context, path string) (*Node, error) {
+ r, err := repo.NewFS(path)
+ if err != nil {
+ return nil, err
+ }
+
+ return NewNode(ctx, r)
+}
+
+// NewNode constructs a new node from the given repo.
+func NewNode(ctx context.Context, r repo.Repo) (nd *Node, _err error) {
+ lr, err := r.Lock(repo.FullNode)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if _err != nil {
+ _ = lr.Close()
+ }
+ }()
+
+ bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore)
+ if err != nil {
+ return nil, err
+ }
+
+ ds, err := lr.Datastore(ctx, "/metadata")
+ if err != nil {
+ return nil, err
+ }
+ return &Node{
+ repo: lr,
+ Chainstore: store.NewChainStore(bs, bs, ds, vm.Syscalls(mock.Verifier), nil),
+ MetadataDS: ds,
+ Blockstore: bs,
+ }, err
+}
+
+// Close cleanly close the repo. Please call this on shutdown to make sure everything is flushed.
+func (nd *Node) Close() error {
+ if nd.repo != nil {
+ return nd.repo.Close()
+ }
+ return nil
+}
+
+// LoadSim loads
+func (nd *Node) LoadSim(ctx context.Context, name string) (*Simulation, error) {
+ stages, err := stages.DefaultPipeline()
+ if err != nil {
+ return nil, err
+ }
+ sim := &Simulation{
+ Node: nd,
+ name: name,
+ stages: stages,
+ }
+
+ sim.head, err = sim.loadNamedTipSet("head")
+ if err != nil {
+ return nil, err
+ }
+ sim.start, err = sim.loadNamedTipSet("start")
+ if err != nil {
+ return nil, err
+ }
+
+ err = sim.loadConfig()
+ if err != nil {
+ return nil, xerrors.Errorf("failed to load config for simulation %s: %w", name, err)
+ }
+
+ us, err := sim.config.upgradeSchedule()
+ if err != nil {
+ return nil, xerrors.Errorf("failed to create upgrade schedule for simulation %s: %w", name, err)
+ }
+ sim.StateManager, err = stmgr.NewStateManagerWithUpgradeSchedule(nd.Chainstore, us)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to create state manager for simulation %s: %w", name, err)
+ }
+ return sim, nil
+}
+
+// Create creates a new simulation.
+//
+// - This will fail if a simulation already exists with the given name.
+// - Name must not contain a '/'.
+func (nd *Node) CreateSim(ctx context.Context, name string, head *types.TipSet) (*Simulation, error) {
+ if strings.Contains(name, "/") {
+ return nil, xerrors.Errorf("simulation name %q cannot contain a '/'", name)
+ }
+ stages, err := stages.DefaultPipeline()
+ if err != nil {
+ return nil, err
+ }
+ sim := &Simulation{
+ name: name,
+ Node: nd,
+ StateManager: stmgr.NewStateManager(nd.Chainstore),
+ stages: stages,
+ }
+ if has, err := nd.MetadataDS.Has(sim.key("head")); err != nil {
+ return nil, err
+ } else if has {
+ return nil, xerrors.Errorf("simulation named %s already exists", name)
+ }
+
+ if err := sim.storeNamedTipSet("start", head); err != nil {
+ return nil, xerrors.Errorf("failed to set simulation start: %w", err)
+ }
+
+ if err := sim.SetHead(head); err != nil {
+ return nil, err
+ }
+
+ return sim, nil
+}
+
+// ListSims lists all simulations.
+func (nd *Node) ListSims(ctx context.Context) ([]string, error) {
+ prefix := simulationPrefix.ChildString("head").String()
+ items, err := nd.MetadataDS.Query(query.Query{
+ Prefix: prefix,
+ KeysOnly: true,
+ Orders: []query.Order{query.OrderByKey{}},
+ })
+ if err != nil {
+ return nil, xerrors.Errorf("failed to list simulations: %w", err)
+ }
+
+ defer func() { _ = items.Close() }()
+
+ var names []string
+ for {
+ select {
+ case result, ok := <-items.Next():
+ if !ok {
+ return names, nil
+ }
+ if result.Error != nil {
+ return nil, xerrors.Errorf("failed to retrieve next simulation: %w", result.Error)
+ }
+ names = append(names, strings.TrimPrefix(result.Key, prefix+"/"))
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ }
+ }
+}
+
+var simFields = []string{"head", "start", "config"}
+
+// DeleteSim deletes a simulation and all related metadata.
+//
+// NOTE: This function does not delete associated messages, blocks, or chain state.
+func (nd *Node) DeleteSim(ctx context.Context, name string) error {
+ var err error
+ for _, field := range simFields {
+ key := simulationPrefix.ChildString(field).ChildString(name)
+ err = multierr.Append(err, nd.MetadataDS.Delete(key))
+ }
+ return err
+}
+
+// CopySim copies a simulation.
+func (nd *Node) CopySim(ctx context.Context, oldName, newName string) error {
+ if strings.Contains(newName, "/") {
+ return xerrors.Errorf("simulation name %q cannot contain a '/'", newName)
+ }
+ if strings.Contains(oldName, "/") {
+ return xerrors.Errorf("simulation name %q cannot contain a '/'", oldName)
+ }
+
+ values := make(map[string][]byte)
+ for _, field := range simFields {
+ key := simulationPrefix.ChildString(field).ChildString(oldName)
+ value, err := nd.MetadataDS.Get(key)
+ if err == datastore.ErrNotFound {
+ continue
+ } else if err != nil {
+ return err
+ }
+ values[field] = value
+ }
+
+ if _, ok := values["head"]; !ok {
+ return xerrors.Errorf("simulation named %s not found", oldName)
+ }
+
+ for _, field := range simFields {
+ key := simulationPrefix.ChildString(field).ChildString(newName)
+ var err error
+ if value, ok := values[field]; ok {
+ err = nd.MetadataDS.Put(key, value)
+ } else {
+ err = nd.MetadataDS.Delete(key)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// RenameSim renames a simulation.
+func (nd *Node) RenameSim(ctx context.Context, oldName, newName string) error {
+ if err := nd.CopySim(ctx, oldName, newName); err != nil {
+ return err
+ }
+ return nd.DeleteSim(ctx, oldName)
+}
diff --git a/cmd/lotus-sim/simulation/simulation.go b/cmd/lotus-sim/simulation/simulation.go
new file mode 100644
index 00000000000..d91d30edaf2
--- /dev/null
+++ b/cmd/lotus-sim/simulation/simulation.go
@@ -0,0 +1,408 @@
+package simulation
+
+import (
+ "context"
+ "encoding/json"
+ "runtime"
+
+ "golang.org/x/sync/errgroup"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/network"
+ "github.com/ipfs/go-cid"
+ "github.com/ipfs/go-datastore"
+ logging "github.com/ipfs/go-log/v2"
+
+ blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
+
+ "github.com/filecoin-project/lotus/chain/stmgr"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/stages"
+)
+
+var log = logging.Logger("simulation")
+
+// config is the simulation's config, persisted to the local metadata store and loaded on start.
+//
+// See Simulation.loadConfig and Simulation.saveConfig.
+type config struct {
+ Upgrades map[network.Version]abi.ChainEpoch
+}
+
+// upgradeSchedule constructs an stmgr.StateManager upgrade schedule, overriding any network upgrade
+// epochs as specified in the config.
+func (c *config) upgradeSchedule() (stmgr.UpgradeSchedule, error) {
+ upgradeSchedule := stmgr.DefaultUpgradeSchedule()
+ expected := make(map[network.Version]struct{}, len(c.Upgrades))
+ for nv := range c.Upgrades {
+ expected[nv] = struct{}{}
+ }
+
+ // Update network upgrade epochs.
+ newUpgradeSchedule := upgradeSchedule[:0]
+ for _, upgrade := range upgradeSchedule {
+ if height, ok := c.Upgrades[upgrade.Network]; ok {
+ delete(expected, upgrade.Network)
+ if height < 0 {
+ continue
+ }
+ upgrade.Height = height
+ }
+ newUpgradeSchedule = append(newUpgradeSchedule, upgrade)
+ }
+
+ // Make sure we didn't try to configure an unknown network version.
+ if len(expected) > 0 {
+ missing := make([]network.Version, 0, len(expected))
+ for nv := range expected {
+ missing = append(missing, nv)
+ }
+ return nil, xerrors.Errorf("unknown network versions %v in config", missing)
+ }
+
+ // Finally, validate it. This ensures we don't change the order of the upgrade or anything
+ // like that.
+ if err := newUpgradeSchedule.Validate(); err != nil {
+ return nil, err
+ }
+ return newUpgradeSchedule, nil
+}
+
+// Simulation specifies a lotus-sim simulation.
+type Simulation struct {
+ Node *Node
+ StateManager *stmgr.StateManager
+
+ name string
+ config config
+ start *types.TipSet
+
+ // head
+ head *types.TipSet
+
+ stages []stages.Stage
+}
+
+// loadConfig loads a simulation's config from the datastore. This must be called on startup and may
+// be called to restore the config from-disk.
+func (sim *Simulation) loadConfig() error {
+ configBytes, err := sim.Node.MetadataDS.Get(sim.key("config"))
+ if err == nil {
+ err = json.Unmarshal(configBytes, &sim.config)
+ }
+ switch err {
+ case nil:
+ case datastore.ErrNotFound:
+ sim.config = config{}
+ default:
+ return xerrors.Errorf("failed to load config: %w", err)
+ }
+ return nil
+}
+
+// saveConfig saves the current config to the datastore. This must be called whenever the config is
+// changed.
+func (sim *Simulation) saveConfig() error {
+ buf, err := json.Marshal(sim.config)
+ if err != nil {
+ return err
+ }
+ return sim.Node.MetadataDS.Put(sim.key("config"), buf)
+}
+
+var simulationPrefix = datastore.NewKey("/simulation")
+
+// key returns the the key in the form /simulation//. For example,
+// /simulation/head/default.
+func (sim *Simulation) key(subkey string) datastore.Key {
+ return simulationPrefix.ChildString(subkey).ChildString(sim.name)
+}
+
+// loadNamedTipSet the tipset with the given name (for this simulation)
+func (sim *Simulation) loadNamedTipSet(name string) (*types.TipSet, error) {
+ tskBytes, err := sim.Node.MetadataDS.Get(sim.key(name))
+ if err != nil {
+ return nil, xerrors.Errorf("failed to load tipset %s/%s: %w", sim.name, name, err)
+ }
+ tsk, err := types.TipSetKeyFromBytes(tskBytes)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to parse tipste %v (%s/%s): %w", tskBytes, sim.name, name, err)
+ }
+ ts, err := sim.Node.Chainstore.LoadTipSet(tsk)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to load tipset %s (%s/%s): %w", tsk, sim.name, name, err)
+ }
+ return ts, nil
+}
+
+// storeNamedTipSet stores the tipset at name (relative to the simulation).
+func (sim *Simulation) storeNamedTipSet(name string, ts *types.TipSet) error {
+ if err := sim.Node.MetadataDS.Put(sim.key(name), ts.Key().Bytes()); err != nil {
+ return xerrors.Errorf("failed to store tipset (%s/%s): %w", sim.name, name, err)
+ }
+ return nil
+}
+
+// GetHead returns the current simulation head.
+func (sim *Simulation) GetHead() *types.TipSet {
+ return sim.head
+}
+
+// GetStart returns simulation's parent tipset.
+func (sim *Simulation) GetStart() *types.TipSet {
+ return sim.start
+}
+
+// GetNetworkVersion returns the current network version for the simulation.
+func (sim *Simulation) GetNetworkVersion() network.Version {
+ return sim.StateManager.GetNtwkVersion(context.TODO(), sim.head.Height())
+}
+
+// SetHead updates the current head of the simulation and stores it in the metadata store. This is
+// called for every Simulation.Step.
+func (sim *Simulation) SetHead(head *types.TipSet) error {
+ if err := sim.storeNamedTipSet("head", head); err != nil {
+ return err
+ }
+ sim.head = head
+ return nil
+}
+
+// Name returns the simulation's name.
+func (sim *Simulation) Name() string {
+ return sim.name
+}
+
+// SetUpgradeHeight sets the height of the given network version change (and saves the config).
+//
+// This fails if the specified epoch has already passed or the new upgrade schedule is invalid.
+func (sim *Simulation) SetUpgradeHeight(nv network.Version, epoch abi.ChainEpoch) (_err error) {
+ if epoch <= sim.head.Height() {
+ return xerrors.Errorf("cannot set upgrade height in the past (%d <= %d)", epoch, sim.head.Height())
+ }
+
+ if sim.config.Upgrades == nil {
+ sim.config.Upgrades = make(map[network.Version]abi.ChainEpoch, 1)
+ }
+
+ sim.config.Upgrades[nv] = epoch
+ defer func() {
+ if _err != nil {
+ // try to restore the old config on error.
+ _ = sim.loadConfig()
+ }
+ }()
+
+ newUpgradeSchedule, err := sim.config.upgradeSchedule()
+ if err != nil {
+ return err
+ }
+ sm, err := stmgr.NewStateManagerWithUpgradeSchedule(sim.Node.Chainstore, newUpgradeSchedule)
+ if err != nil {
+ return err
+ }
+ err = sim.saveConfig()
+ if err != nil {
+ return err
+ }
+
+ sim.StateManager = sm
+ return nil
+}
+
+// ListUpgrades returns any future network upgrades.
+func (sim *Simulation) ListUpgrades() (stmgr.UpgradeSchedule, error) {
+ upgrades, err := sim.config.upgradeSchedule()
+ if err != nil {
+ return nil, err
+ }
+ var pending stmgr.UpgradeSchedule
+ for _, upgrade := range upgrades {
+ if upgrade.Height < sim.head.Height() {
+ continue
+ }
+ pending = append(pending, upgrade)
+ }
+ return pending, nil
+}
+
+type AppliedMessage struct {
+ types.Message
+ types.MessageReceipt
+}
+
+// Walk walks the simulation's chain from the current head back to the first tipset.
+func (sim *Simulation) Walk(
+ ctx context.Context,
+ lookback int64,
+ cb func(sm *stmgr.StateManager,
+ ts *types.TipSet,
+ stCid cid.Cid,
+ messages []*AppliedMessage) error,
+) error {
+ store := sim.Node.Chainstore.ActorStore(ctx)
+ minEpoch := sim.start.Height()
+ if lookback != 0 {
+ minEpoch = sim.head.Height() - abi.ChainEpoch(lookback)
+ }
+
+ // Given tha loading messages and receipts can be a little bit slow, we do this in parallel.
+ //
+ // 1. We spin up some number of workers.
+ // 2. We hand tipsets to workers in round-robin order.
+ // 3. We pull "resolved" tipsets in the same round-robin order.
+ // 4. We serially call the callback in reverse-chain order.
+ //
+ // We have a buffer of size 1 for both resolved tipsets and unresolved tipsets. This should
+ // ensure that we never block unecessarily.
+
+ type work struct {
+ ts *types.TipSet
+ stCid cid.Cid
+ recCid cid.Cid
+ }
+ type result struct {
+ ts *types.TipSet
+ stCid cid.Cid
+ messages []*AppliedMessage
+ }
+
+ // This is more disk bound than CPU bound, but eh...
+ workerCount := runtime.NumCPU() * 2
+
+ workQs := make([]chan *work, workerCount)
+ resultQs := make([]chan *result, workerCount)
+
+ for i := range workQs {
+ workQs[i] = make(chan *work, 1)
+ }
+
+ for i := range resultQs {
+ resultQs[i] = make(chan *result, 1)
+ }
+
+ grp, ctx := errgroup.WithContext(ctx)
+
+ // Walk the chain and fire off work items.
+ grp.Go(func() error {
+ ts := sim.head
+ stCid, recCid, err := sim.StateManager.TipSetState(ctx, ts)
+ if err != nil {
+ return err
+ }
+ i := 0
+ for ts.Height() > minEpoch {
+ if err := ctx.Err(); err != nil {
+ return ctx.Err()
+ }
+
+ select {
+ case workQs[i] <- &work{ts, stCid, recCid}:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+
+ stCid = ts.MinTicketBlock().ParentStateRoot
+ recCid = ts.MinTicketBlock().ParentMessageReceipts
+ ts, err = sim.Node.Chainstore.LoadTipSet(ts.Parents())
+ if err != nil {
+ return xerrors.Errorf("loading parent: %w", err)
+ }
+ i = (i + 1) % workerCount
+ }
+ for _, q := range workQs {
+ close(q)
+ }
+ return nil
+ })
+
+ // Spin up one worker per queue pair.
+ for i := 0; i < workerCount; i++ {
+ workQ := workQs[i]
+ resultQ := resultQs[i]
+ grp.Go(func() error {
+ for {
+ if err := ctx.Err(); err != nil {
+ return ctx.Err()
+ }
+
+ var job *work
+ var ok bool
+ select {
+ case job, ok = <-workQ:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+
+ if !ok {
+ break
+ }
+
+ msgs, err := sim.Node.Chainstore.MessagesForTipset(job.ts)
+ if err != nil {
+ return err
+ }
+
+ recs, err := blockadt.AsArray(store, job.recCid)
+ if err != nil {
+ return xerrors.Errorf("amt load: %w", err)
+ }
+ applied := make([]*AppliedMessage, len(msgs))
+ var rec types.MessageReceipt
+ err = recs.ForEach(&rec, func(i int64) error {
+ applied[i] = &AppliedMessage{
+ Message: *msgs[i].VMMessage(),
+ MessageReceipt: rec,
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ select {
+ case resultQ <- &result{
+ ts: job.ts,
+ stCid: job.stCid,
+ messages: applied,
+ }:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+ close(resultQ)
+ return nil
+ })
+ }
+
+ // Process results in the same order we enqueued them.
+ grp.Go(func() error {
+ qs := resultQs
+ for len(qs) > 0 {
+ newQs := qs[:0]
+ for _, q := range qs {
+ if err := ctx.Err(); err != nil {
+ return ctx.Err()
+ }
+ select {
+ case r, ok := <-q:
+ if !ok {
+ continue
+ }
+ err := cb(sim.StateManager, r.ts, r.stCid, r.messages)
+ if err != nil {
+ return err
+ }
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ newQs = append(newQs, q)
+ }
+ qs = newQs
+ }
+ return nil
+ })
+
+ // Wait for everything to finish.
+ return grp.Wait()
+}
diff --git a/cmd/lotus-sim/simulation/stages/actor_iter.go b/cmd/lotus-sim/simulation/stages/actor_iter.go
new file mode 100644
index 00000000000..b2c14ebdb0d
--- /dev/null
+++ b/cmd/lotus-sim/simulation/stages/actor_iter.go
@@ -0,0 +1,38 @@
+package stages
+
+import (
+ "math/rand"
+
+ "github.com/filecoin-project/go-address"
+)
+
+// actorIter is a simple persistent iterator that loops over a set of actors.
+type actorIter struct {
+ actors []address.Address
+ offset int
+}
+
+// shuffle randomly permutes the set of actors.
+func (p *actorIter) shuffle() {
+ rand.Shuffle(len(p.actors), func(i, j int) {
+ p.actors[i], p.actors[j] = p.actors[j], p.actors[i]
+ })
+}
+
+// next returns the next actor's address and advances the iterator.
+func (p *actorIter) next() address.Address {
+ next := p.actors[p.offset]
+ p.offset++
+ p.offset %= len(p.actors)
+ return next
+}
+
+// add adds a new actor to the iterator.
+func (p *actorIter) add(addr address.Address) {
+ p.actors = append(p.actors, addr)
+}
+
+// len returns the number of actors in the iterator.
+func (p *actorIter) len() int {
+ return len(p.actors)
+}
diff --git a/cmd/lotus-sim/simulation/stages/commit_queue.go b/cmd/lotus-sim/simulation/stages/commit_queue.go
new file mode 100644
index 00000000000..d625dedb65f
--- /dev/null
+++ b/cmd/lotus-sim/simulation/stages/commit_queue.go
@@ -0,0 +1,200 @@
+package stages
+
+import (
+ "sort"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+ "github.com/filecoin-project/lotus/chain/actors/policy"
+)
+
+// pendingCommitTracker tracks pending commits per-miner for a single epoch.
+type pendingCommitTracker map[address.Address]minerPendingCommits
+
+// minerPendingCommits tracks a miner's pending commits during a single epoch (grouped by seal proof type).
+type minerPendingCommits map[abi.RegisteredSealProof][]abi.SectorNumber
+
+// finish marks count sectors of the given proof type as "prove-committed".
+func (m minerPendingCommits) finish(proof abi.RegisteredSealProof, count int) {
+ snos := m[proof]
+ if len(snos) < count {
+ panic("not enough sector numbers to finish")
+ } else if len(snos) == count {
+ delete(m, proof)
+ } else {
+ m[proof] = snos[count:]
+ }
+}
+
+// empty returns true if there are no pending commits.
+func (m minerPendingCommits) empty() bool {
+ return len(m) == 0
+}
+
+// count returns the number of pending commits.
+func (m minerPendingCommits) count() int {
+ count := 0
+ for _, snos := range m {
+ count += len(snos)
+ }
+ return count
+}
+
+// commitQueue is used to track pending prove-commits.
+//
+// Miners are processed in round-robin where _all_ commits from a given miner are finished before
+// moving on to the next. This is designed to maximize batching.
+type commitQueue struct {
+ minerQueue []address.Address
+ queue []pendingCommitTracker
+ offset abi.ChainEpoch
+}
+
+// ready returns the number of prove-commits ready to be proven at the current epoch. Useful for logging.
+func (q *commitQueue) ready() int {
+ if len(q.queue) == 0 {
+ return 0
+ }
+ count := 0
+ for _, pending := range q.queue[0] {
+ count += pending.count()
+ }
+ return count
+}
+
+// nextMiner returns the next miner to be proved and the set of pending prove commits for that
+// miner. When some number of sectors have successfully been proven, call "finish" so we don't try
+// to prove them again.
+func (q *commitQueue) nextMiner() (address.Address, minerPendingCommits, bool) {
+ if len(q.queue) == 0 {
+ return address.Undef, nil, false
+ }
+ next := q.queue[0]
+
+ // Go through the queue and find the first non-empty batch.
+ for len(q.minerQueue) > 0 {
+ addr := q.minerQueue[0]
+ q.minerQueue = q.minerQueue[1:]
+ pending := next[addr]
+ if !pending.empty() {
+ return addr, pending, true
+ }
+ delete(next, addr)
+ }
+
+ return address.Undef, nil, false
+}
+
+// advanceEpoch will advance to the next epoch. If some sectors were left unproven in the current
+// epoch, they will be "prepended" into the next epochs sector set.
+func (q *commitQueue) advanceEpoch(epoch abi.ChainEpoch) {
+ if epoch < q.offset {
+ panic("cannot roll epoch backwards")
+ }
+ // Now we "roll forwards", merging each epoch we advance over with the next.
+ for len(q.queue) > 1 && q.offset < epoch {
+ curr := q.queue[0]
+ q.queue[0] = nil
+ q.queue = q.queue[1:]
+ q.offset++
+
+ next := q.queue[0]
+
+ // Cleanup empty entries.
+ for addr, pending := range curr {
+ if pending.empty() {
+ delete(curr, addr)
+ }
+ }
+
+ // If the entire level is actually empty, just skip to the next one.
+ if len(curr) == 0 {
+ continue
+ }
+
+ // Otherwise, merge the next into the current.
+ for addr, nextPending := range next {
+ currPending := curr[addr]
+ if currPending.empty() {
+ curr[addr] = nextPending
+ continue
+ }
+ for ty, nextSnos := range nextPending {
+ currSnos := currPending[ty]
+ if len(currSnos) == 0 {
+ currPending[ty] = nextSnos
+ continue
+ }
+ currPending[ty] = append(currSnos, nextSnos...)
+ }
+ }
+ // Now replace next with the merged curr.
+ q.queue[0] = curr
+ }
+ q.offset = epoch
+ if len(q.queue) == 0 {
+ return
+ }
+
+ next := q.queue[0]
+ seenMiners := make(map[address.Address]struct{}, len(q.minerQueue))
+ for _, addr := range q.minerQueue {
+ seenMiners[addr] = struct{}{}
+ }
+
+ // Find the new miners not already in the queue.
+ offset := len(q.minerQueue)
+ for addr, pending := range next {
+ if pending.empty() {
+ delete(next, addr)
+ continue
+ }
+ if _, ok := seenMiners[addr]; ok {
+ continue
+ }
+ q.minerQueue = append(q.minerQueue, addr)
+ }
+
+ // Sort the new miners only.
+ newMiners := q.minerQueue[offset:]
+ sort.Slice(newMiners, func(i, j int) bool {
+ // eh, escape analysis should be fine here...
+ return string(newMiners[i].Bytes()) < string(newMiners[j].Bytes())
+ })
+}
+
+// enquueProveCommit enqueues prove-commit for the given pre-commit for the given miner.
+func (q *commitQueue) enqueueProveCommit(addr address.Address, preCommitEpoch abi.ChainEpoch, info miner.SectorPreCommitInfo) error {
+ // Compute the epoch at which we can start trying to commit.
+ preCommitDelay := policy.GetPreCommitChallengeDelay()
+ minCommitEpoch := preCommitEpoch + preCommitDelay + 1
+
+ // Figure out the offset in the queue.
+ i := int(minCommitEpoch - q.offset)
+ if i < 0 {
+ i = 0
+ }
+
+ // Expand capacity and insert.
+ if cap(q.queue) <= i {
+ pc := make([]pendingCommitTracker, i+1, preCommitDelay*2)
+ copy(pc, q.queue)
+ q.queue = pc
+ } else if len(q.queue) <= i {
+ q.queue = q.queue[:i+1]
+ }
+ tracker := q.queue[i]
+ if tracker == nil {
+ tracker = make(pendingCommitTracker)
+ q.queue[i] = tracker
+ }
+ minerPending := tracker[addr]
+ if minerPending == nil {
+ minerPending = make(minerPendingCommits)
+ tracker[addr] = minerPending
+ }
+ minerPending[info.SealProof] = append(minerPending[info.SealProof], info.SectorNumber)
+ return nil
+}
diff --git a/cmd/lotus-sim/simulation/stages/commit_queue_test.go b/cmd/lotus-sim/simulation/stages/commit_queue_test.go
new file mode 100644
index 00000000000..8ab05250efb
--- /dev/null
+++ b/cmd/lotus-sim/simulation/stages/commit_queue_test.go
@@ -0,0 +1,128 @@
+package stages
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+ "github.com/filecoin-project/lotus/chain/actors/policy"
+)
+
+func TestCommitQueue(t *testing.T) {
+ var q commitQueue
+ addr1, err := address.NewIDAddress(1000)
+ require.NoError(t, err)
+ proofType := abi.RegisteredSealProof_StackedDrg64GiBV1_1
+ require.NoError(t, q.enqueueProveCommit(addr1, 0, miner.SectorPreCommitInfo{
+ SealProof: proofType,
+ SectorNumber: 0,
+ }))
+ require.NoError(t, q.enqueueProveCommit(addr1, 0, miner.SectorPreCommitInfo{
+ SealProof: proofType,
+ SectorNumber: 1,
+ }))
+ require.NoError(t, q.enqueueProveCommit(addr1, 1, miner.SectorPreCommitInfo{
+ SealProof: proofType,
+ SectorNumber: 2,
+ }))
+ require.NoError(t, q.enqueueProveCommit(addr1, 1, miner.SectorPreCommitInfo{
+ SealProof: proofType,
+ SectorNumber: 3,
+ }))
+ require.NoError(t, q.enqueueProveCommit(addr1, 3, miner.SectorPreCommitInfo{
+ SealProof: proofType,
+ SectorNumber: 4,
+ }))
+ require.NoError(t, q.enqueueProveCommit(addr1, 4, miner.SectorPreCommitInfo{
+ SealProof: proofType,
+ SectorNumber: 5,
+ }))
+ require.NoError(t, q.enqueueProveCommit(addr1, 6, miner.SectorPreCommitInfo{
+ SealProof: proofType,
+ SectorNumber: 6,
+ }))
+
+ epoch := abi.ChainEpoch(0)
+ q.advanceEpoch(epoch)
+ _, _, ok := q.nextMiner()
+ require.False(t, ok)
+
+ epoch += policy.GetPreCommitChallengeDelay()
+ q.advanceEpoch(epoch)
+ _, _, ok = q.nextMiner()
+ require.False(t, ok)
+
+ // 0 : empty + non-empty
+ epoch++
+ q.advanceEpoch(epoch)
+ addr, sectors, ok := q.nextMiner()
+ require.True(t, ok)
+ require.Equal(t, sectors.count(), 2)
+ require.Equal(t, addr, addr1)
+ sectors.finish(proofType, 1)
+ require.Equal(t, sectors.count(), 1)
+ require.EqualValues(t, []abi.SectorNumber{1}, sectors[proofType])
+
+ // 1 : non-empty + non-empty
+ epoch++
+ q.advanceEpoch(epoch)
+ addr, sectors, ok = q.nextMiner()
+ require.True(t, ok)
+ require.Equal(t, addr, addr1)
+ require.Equal(t, sectors.count(), 3)
+ require.EqualValues(t, []abi.SectorNumber{1, 2, 3}, sectors[proofType])
+ sectors.finish(proofType, 3)
+ require.Equal(t, sectors.count(), 0)
+
+ // 2 : empty + empty
+ epoch++
+ q.advanceEpoch(epoch)
+ _, _, ok = q.nextMiner()
+ require.False(t, ok)
+
+ // 3 : empty + non-empty
+ epoch++
+ q.advanceEpoch(epoch)
+ _, sectors, ok = q.nextMiner()
+ require.True(t, ok)
+ require.Equal(t, sectors.count(), 1)
+ require.EqualValues(t, []abi.SectorNumber{4}, sectors[proofType])
+
+ // 4 : non-empty + non-empty
+ epoch++
+ q.advanceEpoch(epoch)
+ _, sectors, ok = q.nextMiner()
+ require.True(t, ok)
+ require.Equal(t, sectors.count(), 2)
+ require.EqualValues(t, []abi.SectorNumber{4, 5}, sectors[proofType])
+
+ // 5 : empty + non-empty
+ epoch++
+ q.advanceEpoch(epoch)
+ _, sectors, ok = q.nextMiner()
+ require.True(t, ok)
+ require.Equal(t, sectors.count(), 2)
+ require.EqualValues(t, []abi.SectorNumber{4, 5}, sectors[proofType])
+ sectors.finish(proofType, 1)
+ require.EqualValues(t, []abi.SectorNumber{5}, sectors[proofType])
+
+ // 6
+ epoch++
+ q.advanceEpoch(epoch)
+ _, sectors, ok = q.nextMiner()
+ require.True(t, ok)
+ require.Equal(t, sectors.count(), 2)
+ require.EqualValues(t, []abi.SectorNumber{5, 6}, sectors[proofType])
+
+ // 8
+ epoch += 2
+ q.advanceEpoch(epoch)
+ _, sectors, ok = q.nextMiner()
+ require.True(t, ok)
+ require.Equal(t, sectors.count(), 2)
+ require.EqualValues(t, []abi.SectorNumber{5, 6}, sectors[proofType])
+}
diff --git a/cmd/lotus-sim/simulation/stages/funding_stage.go b/cmd/lotus-sim/simulation/stages/funding_stage.go
new file mode 100644
index 00000000000..f57f852931c
--- /dev/null
+++ b/cmd/lotus-sim/simulation/stages/funding_stage.go
@@ -0,0 +1,318 @@
+package stages
+
+import (
+ "bytes"
+ "context"
+ "sort"
+ "time"
+
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/exitcode"
+
+ "github.com/filecoin-project/lotus/chain/actors/aerrors"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/blockbuilder"
+)
+
+var (
+ TargetFunds = abi.TokenAmount(types.MustParseFIL("1000FIL"))
+ MinimumFunds = abi.TokenAmount(types.MustParseFIL("100FIL"))
+)
+
+type FundingStage struct {
+ fundAccount address.Address
+ taxMin abi.TokenAmount
+ minFunds, maxFunds abi.TokenAmount
+}
+
+func NewFundingStage() (*FundingStage, error) {
+ // TODO: make all this configurable.
+ addr, err := address.NewIDAddress(100)
+ if err != nil {
+ return nil, err
+ }
+ return &FundingStage{
+ fundAccount: addr,
+ taxMin: abi.TokenAmount(types.MustParseFIL("1000FIL")),
+ minFunds: abi.TokenAmount(types.MustParseFIL("1000000FIL")),
+ maxFunds: abi.TokenAmount(types.MustParseFIL("100000000FIL")),
+ }, nil
+}
+
+func (*FundingStage) Name() string {
+ return "funding"
+}
+
+func (fs *FundingStage) Fund(bb *blockbuilder.BlockBuilder, target address.Address) error {
+ return fs.fund(bb, target, 0)
+}
+
+// sendAndFund "packs" the given message, funding the actor if necessary. It:
+//
+// 1. Tries to send the given message.
+// 2. If that fails, it checks to see if the exit code was ErrInsufficientFunds.
+// 3. If so, it sends 1K FIL from the "burnt funds actor" (because we need to send it from
+// somewhere) and re-tries the message.0
+func (fs *FundingStage) SendAndFund(bb *blockbuilder.BlockBuilder, msg *types.Message) (res *types.MessageReceipt, err error) {
+ for i := 0; i < 10; i++ {
+ res, err = bb.PushMessage(msg)
+ if err == nil {
+ return res, nil
+ }
+ aerr, ok := err.(aerrors.ActorError)
+ if !ok || aerr.RetCode() != exitcode.ErrInsufficientFunds {
+ return nil, err
+ }
+
+ // Ok, insufficient funds. Let's fund this miner and try again.
+ if err := fs.fund(bb, msg.To, i); err != nil {
+ if !blockbuilder.IsOutOfGas(err) {
+ err = xerrors.Errorf("failed to fund %s: %w", msg.To, err)
+ }
+ return nil, err
+ }
+ }
+ return res, err
+}
+
+// fund funds the target actor with 'TargetFunds << shift' FIL. The "shift" parameter allows us to
+// keep doubling the amount until the intended operation succeeds.
+func (fs *FundingStage) fund(bb *blockbuilder.BlockBuilder, target address.Address, shift int) error {
+ amt := TargetFunds
+ if shift > 0 {
+ if shift >= 8 {
+ shift = 8 // cap
+ }
+ amt = big.Lsh(amt, uint(shift))
+ }
+ _, err := bb.PushMessage(&types.Message{
+ From: fs.fundAccount,
+ To: target,
+ Value: amt,
+ Method: builtin.MethodSend,
+ })
+ return err
+}
+
+func (fs *FundingStage) PackMessages(ctx context.Context, bb *blockbuilder.BlockBuilder) (_err error) {
+ st := bb.StateTree()
+ fundAccActor, err := st.GetActor(fs.fundAccount)
+ if err != nil {
+ return err
+ }
+ if fs.minFunds.LessThan(fundAccActor.Balance) {
+ return nil
+ }
+
+ // Ok, we're going to go fund this thing.
+ start := time.Now()
+
+ type actor struct {
+ types.Actor
+ Address address.Address
+ }
+
+ var targets []*actor
+ err = st.ForEach(func(addr address.Address, act *types.Actor) error {
+ // Don't steal from ourselves!
+ if addr == fs.fundAccount {
+ return nil
+ }
+ if act.Balance.LessThan(fs.taxMin) {
+ return nil
+ }
+ if !(builtin.IsAccountActor(act.Code) || builtin.IsMultisigActor(act.Code)) {
+ return nil
+ }
+ targets = append(targets, &actor{*act, addr})
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ balance := fundAccActor.Balance.Copy()
+
+ sort.Slice(targets, func(i, j int) bool {
+ return targets[i].Balance.GreaterThan(targets[j].Balance)
+ })
+
+ store := bb.ActorStore()
+ epoch := bb.Height()
+ actorsVersion := bb.ActorsVersion()
+
+ var accounts, multisigs int
+ defer func() {
+ if _err != nil {
+ return
+ }
+ bb.L().Infow("finished funding the simulation",
+ "duration", time.Since(start),
+ "targets", len(targets),
+ "epoch", epoch,
+ "new-balance", types.FIL(balance),
+ "old-balance", types.FIL(fundAccActor.Balance),
+ "multisigs", multisigs,
+ "accounts", accounts,
+ )
+ }()
+
+ for _, actor := range targets {
+ switch {
+ case builtin.IsAccountActor(actor.Code):
+ if _, err := bb.PushMessage(&types.Message{
+ From: actor.Address,
+ To: fs.fundAccount,
+ Value: actor.Balance,
+ }); blockbuilder.IsOutOfGas(err) {
+ return nil
+ } else if err != nil {
+ return err
+ }
+ accounts++
+ case builtin.IsMultisigActor(actor.Code):
+ msigState, err := multisig.Load(store, &actor.Actor)
+ if err != nil {
+ return err
+ }
+
+ threshold, err := msigState.Threshold()
+ if err != nil {
+ return err
+ }
+
+ if threshold > 16 {
+ bb.L().Debugw("ignoring multisig with high threshold",
+ "multisig", actor.Address,
+ "threshold", threshold,
+ "max", 16,
+ )
+ continue
+ }
+
+ locked, err := msigState.LockedBalance(epoch)
+ if err != nil {
+ return err
+ }
+
+ if locked.LessThan(fs.taxMin) {
+ continue // not worth it.
+ }
+
+ allSigners, err := msigState.Signers()
+ if err != nil {
+ return err
+ }
+ signers := make([]address.Address, 0, threshold)
+ for _, signer := range allSigners {
+ actor, err := st.GetActor(signer)
+ if err != nil {
+ return err
+ }
+ if !builtin.IsAccountActor(actor.Code) {
+ // I am so not dealing with this mess.
+ continue
+ }
+ if uint64(len(signers)) >= threshold {
+ break
+ }
+ }
+ // Ok, we're not dealing with this one.
+ if uint64(len(signers)) < threshold {
+ continue
+ }
+
+ available := big.Sub(actor.Balance, locked)
+
+ var txnId uint64
+ {
+ msg, err := multisig.Message(actorsVersion, signers[0]).Propose(
+ actor.Address, fs.fundAccount, available,
+ builtin.MethodSend, nil,
+ )
+ if err != nil {
+ return err
+ }
+ res, err := bb.PushMessage(msg)
+ if err != nil {
+ if blockbuilder.IsOutOfGas(err) {
+ err = nil
+ }
+ return err
+ }
+ var ret multisig.ProposeReturn
+ err = ret.UnmarshalCBOR(bytes.NewReader(res.Return))
+ if err != nil {
+ return err
+ }
+ if ret.Applied {
+ if !ret.Code.IsSuccess() {
+ bb.L().Errorw("failed to tax multisig",
+ "multisig", actor.Address,
+ "exitcode", ret.Code,
+ )
+ }
+ break
+ }
+ txnId = uint64(ret.TxnID)
+ }
+ var ret multisig.ProposeReturn
+ for _, signer := range signers[1:] {
+ msg, err := multisig.Message(actorsVersion, signer).Approve(actor.Address, txnId, nil)
+ if err != nil {
+ return err
+ }
+ res, err := bb.PushMessage(msg)
+ if err != nil {
+ if blockbuilder.IsOutOfGas(err) {
+ err = nil
+ }
+ return err
+ }
+ var ret multisig.ProposeReturn
+ err = ret.UnmarshalCBOR(bytes.NewReader(res.Return))
+ if err != nil {
+ return err
+ }
+ // A bit redundant, but nice.
+ if ret.Applied {
+ break
+ }
+
+ }
+ if !ret.Applied {
+ bb.L().Errorw("failed to apply multisig transaction",
+ "multisig", actor.Address,
+ "txnid", txnId,
+ "signers", len(signers),
+ "threshold", threshold,
+ )
+ continue
+ }
+ if !ret.Code.IsSuccess() {
+ bb.L().Errorw("failed to tax multisig",
+ "multisig", actor.Address,
+ "txnid", txnId,
+ "exitcode", ret.Code,
+ )
+ } else {
+ multisigs++
+ }
+ default:
+ panic("impossible case")
+ }
+ balance = big.Int{Int: balance.Add(balance.Int, actor.Balance.Int)}
+ if balance.GreaterThanEqual(fs.maxFunds) {
+ // There's no need to get greedy.
+ // Well, really, we're trying to avoid messing with state _too_ much.
+ return nil
+ }
+ }
+ return nil
+}
diff --git a/cmd/lotus-sim/simulation/stages/interface.go b/cmd/lotus-sim/simulation/stages/interface.go
new file mode 100644
index 00000000000..0c40a9b2308
--- /dev/null
+++ b/cmd/lotus-sim/simulation/stages/interface.go
@@ -0,0 +1,27 @@
+package stages
+
+import (
+ "context"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/blockbuilder"
+)
+
+// Stage is a stage of the simulation. It's asked to pack messages for every block.
+type Stage interface {
+ Name() string
+ PackMessages(ctx context.Context, bb *blockbuilder.BlockBuilder) error
+}
+
+type Funding interface {
+ SendAndFund(*blockbuilder.BlockBuilder, *types.Message) (*types.MessageReceipt, error)
+ Fund(*blockbuilder.BlockBuilder, address.Address) error
+}
+
+type Committer interface {
+ EnqueueProveCommit(addr address.Address, preCommitEpoch abi.ChainEpoch, info miner.SectorPreCommitInfo) error
+}
diff --git a/cmd/lotus-sim/simulation/stages/pipeline.go b/cmd/lotus-sim/simulation/stages/pipeline.go
new file mode 100644
index 00000000000..317e5b5a9e0
--- /dev/null
+++ b/cmd/lotus-sim/simulation/stages/pipeline.go
@@ -0,0 +1,31 @@
+package stages
+
+// DefaultPipeline returns the default stage pipeline. This pipeline.
+//
+// 1. Funds a "funding" actor, if necessary.
+// 2. Submits any ready window posts.
+// 3. Submits any ready prove commits.
+// 4. Submits pre-commits with the remaining gas.
+func DefaultPipeline() ([]Stage, error) {
+ // TODO: make this configurable. E.g., through DI?
+ // Ideally, we'd also be able to change priority, limit throughput (by limiting gas in the
+ // block builder, etc.
+ funding, err := NewFundingStage()
+ if err != nil {
+ return nil, err
+ }
+ wdpost, err := NewWindowPoStStage()
+ if err != nil {
+ return nil, err
+ }
+ provecommit, err := NewProveCommitStage(funding)
+ if err != nil {
+ return nil, err
+ }
+ precommit, err := NewPreCommitStage(funding, provecommit)
+ if err != nil {
+ return nil, err
+ }
+
+ return []Stage{funding, wdpost, provecommit, precommit}, nil
+}
diff --git a/cmd/lotus-sim/simulation/stages/precommit_stage.go b/cmd/lotus-sim/simulation/stages/precommit_stage.go
new file mode 100644
index 00000000000..5b9fed09e2a
--- /dev/null
+++ b/cmd/lotus-sim/simulation/stages/precommit_stage.go
@@ -0,0 +1,347 @@
+package stages
+
+import (
+ "context"
+ "sort"
+ "time"
+
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/network"
+
+ miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/aerrors"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/power"
+ "github.com/filecoin-project/lotus/chain/actors/policy"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/blockbuilder"
+ "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/mock"
+)
+
+const (
+ minPreCommitBatchSize = 1
+ maxPreCommitBatchSize = miner5.PreCommitSectorBatchMaxSize
+)
+
+type PreCommitStage struct {
+ funding Funding
+ committer Committer
+
+ // The tiers represent the top 1%, top 10%, and everyone else. When sealing sectors, we seal
+ // a group of sectors for the top 1%, a group (half that size) for the top 10%, and one
+ // sector for everyone else. We determine these rates by looking at two power tables.
+ // TODO Ideally we'd "learn" this distribution from the network. But this is good enough for
+ // now.
+ top1, top10, rest actorIter
+ initialized bool
+}
+
+func NewPreCommitStage(funding Funding, committer Committer) (*PreCommitStage, error) {
+ return &PreCommitStage{
+ funding: funding,
+ committer: committer,
+ }, nil
+}
+
+func (*PreCommitStage) Name() string {
+ return "pre-commit"
+}
+
+// packPreCommits packs pre-commit messages until the block is full.
+func (stage *PreCommitStage) PackMessages(ctx context.Context, bb *blockbuilder.BlockBuilder) (_err error) {
+ if !stage.initialized {
+ if err := stage.load(ctx, bb); err != nil {
+ return err
+ }
+ }
+
+ var (
+ full bool
+ top1Count, top10Count, restCount int
+ )
+ start := time.Now()
+ defer func() {
+ if _err != nil {
+ return
+ }
+ bb.L().Debugw("packed pre commits",
+ "done", top1Count+top10Count+restCount,
+ "top1", top1Count,
+ "top10", top10Count,
+ "rest", restCount,
+ "filled-block", full,
+ "duration", time.Since(start),
+ )
+ }()
+
+ var top1Miners, top10Miners, restMiners int
+ for i := 0; ; i++ {
+ var (
+ minerAddr address.Address
+ count *int
+ )
+
+ // We pre-commit for the top 1%, 10%, and the of the network 1/3rd of the time each.
+ // This won't yield the most accurate distribution... but it'll give us a good
+ // enough distribution.
+ switch {
+ case (i%3) <= 0 && top1Miners < stage.top1.len():
+ count = &top1Count
+ minerAddr = stage.top1.next()
+ top1Miners++
+ case (i%3) <= 1 && top10Miners < stage.top10.len():
+ count = &top10Count
+ minerAddr = stage.top10.next()
+ top10Miners++
+ case (i%3) <= 2 && restMiners < stage.rest.len():
+ count = &restCount
+ minerAddr = stage.rest.next()
+ restMiners++
+ default:
+ // Well, we've run through all miners.
+ return nil
+ }
+
+ var (
+ added int
+ err error
+ )
+ added, full, err = stage.packMiner(ctx, bb, minerAddr, maxProveCommitBatchSize)
+ if err != nil {
+ return xerrors.Errorf("failed to pack precommits for miner %s: %w", minerAddr, err)
+ }
+ *count += added
+ if full {
+ return nil
+ }
+ }
+}
+
+// packPreCommitsMiner packs count pre-commits for the given miner.
+func (stage *PreCommitStage) packMiner(
+ ctx context.Context, bb *blockbuilder.BlockBuilder,
+ minerAddr address.Address, count int,
+) (int, bool, error) {
+ log := bb.L().With("miner", minerAddr)
+ epoch := bb.Height()
+ nv := bb.NetworkVersion()
+
+ minerActor, err := bb.StateTree().GetActor(minerAddr)
+ if err != nil {
+ return 0, false, err
+ }
+ minerState, err := miner.Load(bb.ActorStore(), minerActor)
+ if err != nil {
+ return 0, false, err
+ }
+
+ minerInfo, err := minerState.Info()
+ if err != nil {
+ return 0, false, err
+ }
+
+ // Make sure the miner is funded.
+ minerBalance, err := minerState.AvailableBalance(minerActor.Balance)
+ if err != nil {
+ return 0, false, err
+ }
+
+ if big.Cmp(minerBalance, MinimumFunds) < 0 {
+ err := stage.funding.Fund(bb, minerAddr)
+ if err != nil {
+ if blockbuilder.IsOutOfGas(err) {
+ return 0, true, nil
+ }
+ return 0, false, err
+ }
+ }
+
+ // Generate pre-commits.
+ sealType, err := miner.PreferredSealProofTypeFromWindowPoStType(
+ nv, minerInfo.WindowPoStProofType,
+ )
+ if err != nil {
+ return 0, false, err
+ }
+
+ sectorNos, err := minerState.UnallocatedSectorNumbers(count)
+ if err != nil {
+ return 0, false, err
+ }
+
+ expiration := epoch + policy.GetMaxSectorExpirationExtension()
+ infos := make([]miner.SectorPreCommitInfo, len(sectorNos))
+ for i, sno := range sectorNos {
+ infos[i] = miner.SectorPreCommitInfo{
+ SealProof: sealType,
+ SectorNumber: sno,
+ SealedCID: mock.MockCommR(minerAddr, sno),
+ SealRandEpoch: epoch - 1,
+ Expiration: expiration,
+ }
+ }
+
+ // Commit the pre-commits.
+ added := 0
+ if nv >= network.Version13 {
+ targetBatchSize := maxPreCommitBatchSize
+ for targetBatchSize >= minPreCommitBatchSize && len(infos) >= minPreCommitBatchSize {
+ batch := infos
+ if len(batch) > targetBatchSize {
+ batch = batch[:targetBatchSize]
+ }
+ params := miner5.PreCommitSectorBatchParams{
+ Sectors: batch,
+ }
+ enc, err := actors.SerializeParams(¶ms)
+ if err != nil {
+ return added, false, err
+ }
+ // NOTE: just in-case, sendAndFund will "fund" and re-try for any message
+ // that fails due to "insufficient funds".
+ if _, err := stage.funding.SendAndFund(bb, &types.Message{
+ To: minerAddr,
+ From: minerInfo.Worker,
+ Value: abi.NewTokenAmount(0),
+ Method: miner.Methods.PreCommitSectorBatch,
+ Params: enc,
+ }); blockbuilder.IsOutOfGas(err) {
+ // try again with a smaller batch.
+ targetBatchSize /= 2
+ continue
+ } else if aerr, ok := err.(aerrors.ActorError); ok && !aerr.IsFatal() {
+ // Log the error and move on. No reason to stop.
+ log.Errorw("failed to pre-commit for unknown reasons",
+ "error", aerr,
+ "sectors", batch,
+ )
+ return added, false, nil
+ } else if err != nil {
+ return added, false, err
+ }
+
+ for _, info := range batch {
+ if err := stage.committer.EnqueueProveCommit(minerAddr, epoch, info); err != nil {
+ return added, false, err
+ }
+ added++
+ }
+ infos = infos[len(batch):]
+ }
+ }
+ for _, info := range infos {
+ enc, err := actors.SerializeParams(&info) //nolint
+ if err != nil {
+ return 0, false, err
+ }
+ if _, err := stage.funding.SendAndFund(bb, &types.Message{
+ To: minerAddr,
+ From: minerInfo.Worker,
+ Value: abi.NewTokenAmount(0),
+ Method: miner.Methods.PreCommitSector,
+ Params: enc,
+ }); blockbuilder.IsOutOfGas(err) {
+ return added, true, nil
+ } else if err != nil {
+ return added, false, err
+ }
+
+ if err := stage.committer.EnqueueProveCommit(minerAddr, epoch, info); err != nil {
+ return added, false, err
+ }
+ added++
+ }
+ return added, false, nil
+}
+
+func (stage *PreCommitStage) load(ctx context.Context, bb *blockbuilder.BlockBuilder) (_err error) {
+ bb.L().Infow("loading miner power for pre-commits")
+ start := time.Now()
+ defer func() {
+ if _err != nil {
+ return
+ }
+ bb.L().Infow("loaded miner power for pre-commits",
+ "duration", time.Since(start),
+ "top1", stage.top1.len(),
+ "top10", stage.top10.len(),
+ "rest", stage.rest.len(),
+ )
+ }()
+
+ store := bb.ActorStore()
+ st := bb.ParentStateTree()
+ powerState, err := loadPower(store, st)
+ if err != nil {
+ return xerrors.Errorf("failed to power actor: %w", err)
+ }
+
+ type onboardingInfo struct {
+ addr address.Address
+ sectorCount uint64
+ }
+ var sealList []onboardingInfo
+ err = powerState.ForEachClaim(func(addr address.Address, claim power.Claim) error {
+ if claim.RawBytePower.IsZero() {
+ return nil
+ }
+
+ minerState, err := loadMiner(store, st, addr)
+ if err != nil {
+ return err
+ }
+ info, err := minerState.Info()
+ if err != nil {
+ return err
+ }
+
+ sectorCount := sectorsFromClaim(info.SectorSize, claim)
+
+ if sectorCount > 0 {
+ sealList = append(sealList, onboardingInfo{addr, uint64(sectorCount)})
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ if len(sealList) == 0 {
+ return xerrors.Errorf("simulation has no miners")
+ }
+
+ // Now that we have a list of sealing miners, sort them into percentiles.
+ sort.Slice(sealList, func(i, j int) bool {
+ return sealList[i].sectorCount < sealList[j].sectorCount
+ })
+
+ // reset, just in case.
+ stage.top1 = actorIter{}
+ stage.top10 = actorIter{}
+ stage.rest = actorIter{}
+
+ for i, oi := range sealList {
+ var dist *actorIter
+ if i < len(sealList)/100 {
+ dist = &stage.top1
+ } else if i < len(sealList)/10 {
+ dist = &stage.top10
+ } else {
+ dist = &stage.rest
+ }
+ dist.add(oi.addr)
+ }
+
+ stage.top1.shuffle()
+ stage.top10.shuffle()
+ stage.rest.shuffle()
+
+ stage.initialized = true
+ return nil
+}
diff --git a/cmd/lotus-sim/simulation/stages/provecommit_stage.go b/cmd/lotus-sim/simulation/stages/provecommit_stage.go
new file mode 100644
index 00000000000..6cbca7de9fb
--- /dev/null
+++ b/cmd/lotus-sim/simulation/stages/provecommit_stage.go
@@ -0,0 +1,372 @@
+package stages
+
+import (
+ "context"
+ "time"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-bitfield"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/exitcode"
+ "github.com/filecoin-project/go-state-types/network"
+
+ miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
+ power5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/power"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/aerrors"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/power"
+ "github.com/filecoin-project/lotus/chain/actors/policy"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/blockbuilder"
+ "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/mock"
+)
+
+const (
+ minProveCommitBatchSize = 4
+ maxProveCommitBatchSize = miner5.MaxAggregatedSectors
+)
+
+type ProveCommitStage struct {
+ funding Funding
+ // We track the set of pending commits. On simulation load, and when a new pre-commit is
+ // added to the chain, we put the commit in this queue. advanceEpoch(currentEpoch) should be
+ // called on this queue at every epoch before using it.
+ commitQueue commitQueue
+ initialized bool
+}
+
+func NewProveCommitStage(funding Funding) (*ProveCommitStage, error) {
+ return &ProveCommitStage{
+ funding: funding,
+ }, nil
+}
+
+func (*ProveCommitStage) Name() string {
+ return "prove-commit"
+}
+
+func (stage *ProveCommitStage) EnqueueProveCommit(
+ minerAddr address.Address, preCommitEpoch abi.ChainEpoch, info miner.SectorPreCommitInfo,
+) error {
+ return stage.commitQueue.enqueueProveCommit(minerAddr, preCommitEpoch, info)
+}
+
+// packProveCommits packs all prove-commits for all "ready to be proven" sectors until it fills the
+// block or runs out.
+func (stage *ProveCommitStage) PackMessages(ctx context.Context, bb *blockbuilder.BlockBuilder) (_err error) {
+ if !stage.initialized {
+ if err := stage.load(ctx, bb); err != nil {
+ return err
+ }
+ }
+ // Roll the commitQueue forward.
+ stage.commitQueue.advanceEpoch(bb.Height())
+
+ start := time.Now()
+ var failed, done, unbatched, count int
+ defer func() {
+ if _err != nil {
+ return
+ }
+ remaining := stage.commitQueue.ready()
+ bb.L().Debugw("packed prove commits",
+ "remaining", remaining,
+ "done", done,
+ "failed", failed,
+ "unbatched", unbatched,
+ "miners-processed", count,
+ "duration", time.Since(start),
+ )
+ }()
+
+ for {
+ addr, pending, ok := stage.commitQueue.nextMiner()
+ if !ok {
+ return nil
+ }
+
+ res, err := stage.packProveCommitsMiner(ctx, bb, addr, pending)
+ if err != nil {
+ return err
+ }
+ failed += res.failed
+ done += res.done
+ unbatched += res.unbatched
+ count++
+ if res.full {
+ return nil
+ }
+ }
+}
+
+type proveCommitResult struct {
+ done, failed, unbatched int
+ full bool
+}
+
+// packProveCommitsMiner enqueues a prove commits from the given miner until it runs out of
+// available prove-commits, batching as much as possible.
+//
+// This function will fund as necessary from the "burnt funds actor" (look, it's convenient).
+func (stage *ProveCommitStage) packProveCommitsMiner(
+ ctx context.Context, bb *blockbuilder.BlockBuilder, minerAddr address.Address,
+ pending minerPendingCommits,
+) (res proveCommitResult, _err error) {
+ minerActor, err := bb.StateTree().GetActor(minerAddr)
+ if err != nil {
+ return res, err
+ }
+ minerState, err := miner.Load(bb.ActorStore(), minerActor)
+ if err != nil {
+ return res, err
+ }
+ info, err := minerState.Info()
+ if err != nil {
+ return res, err
+ }
+
+ log := bb.L().With("miner", minerAddr)
+
+ nv := bb.NetworkVersion()
+ for sealType, snos := range pending {
+ if nv >= network.Version13 {
+ for len(snos) > minProveCommitBatchSize {
+ batchSize := maxProveCommitBatchSize
+ if len(snos) < batchSize {
+ batchSize = len(snos)
+ }
+ batch := snos[:batchSize]
+
+ proof, err := mock.MockAggregateSealProof(sealType, minerAddr, batchSize)
+ if err != nil {
+ return res, err
+ }
+
+ params := miner5.ProveCommitAggregateParams{
+ SectorNumbers: bitfield.New(),
+ AggregateProof: proof,
+ }
+ for _, sno := range batch {
+ params.SectorNumbers.Set(uint64(sno))
+ }
+
+ enc, err := actors.SerializeParams(¶ms)
+ if err != nil {
+ return res, err
+ }
+
+ if _, err := stage.funding.SendAndFund(bb, &types.Message{
+ From: info.Worker,
+ To: minerAddr,
+ Value: abi.NewTokenAmount(0),
+ Method: miner.Methods.ProveCommitAggregate,
+ Params: enc,
+ }); err == nil {
+ res.done += len(batch)
+ } else if blockbuilder.IsOutOfGas(err) {
+ res.full = true
+ return res, nil
+ } else if aerr, ok := err.(aerrors.ActorError); !ok || aerr.IsFatal() {
+ // If we get a random error, or a fatal actor error, bail.
+ return res, err
+ } else if aerr.RetCode() == exitcode.ErrNotFound || aerr.RetCode() == exitcode.ErrIllegalArgument {
+ // If we get a "not-found" or illegal argument error, try to
+ // remove any missing prove-commits and continue. This can
+ // happen either because:
+ //
+ // 1. The pre-commit failed on execution (but not when
+ // packing). This shouldn't happen, but we might as well
+ // gracefully handle it.
+ // 2. The pre-commit has expired. We'd have to be really
+ // backloged to hit this case, but we might as well handle
+ // it.
+ // First, split into "good" and "missing"
+ good, err := stage.filterProveCommits(ctx, bb, minerAddr, batch)
+ if err != nil {
+ log.Errorw("failed to filter prove commits", "error", err)
+ // fail with the original error.
+ return res, aerr
+ }
+ removed := len(batch) - len(good)
+ if removed == 0 {
+ log.Errorw("failed to prove-commit for unknown reasons",
+ "error", aerr,
+ "sectors", batch,
+ )
+ res.failed += len(batch)
+ } else if len(good) == 0 {
+ log.Errorw("failed to prove commit missing pre-commits",
+ "error", aerr,
+ "discarded", removed,
+ )
+ res.failed += len(batch)
+ } else {
+ // update the pending sector numbers in-place to remove the expired ones.
+ snos = snos[removed:]
+ copy(snos, good)
+ pending.finish(sealType, removed)
+
+ log.Errorw("failed to prove commit expired/missing pre-commits",
+ "error", aerr,
+ "discarded", removed,
+ "kept", len(good),
+ )
+ res.failed += removed
+
+ // Then try again.
+ continue
+ }
+ } else {
+ log.Errorw("failed to prove commit sector(s)",
+ "error", err,
+ "sectors", batch,
+ )
+ res.failed += len(batch)
+ }
+ pending.finish(sealType, len(batch))
+ snos = snos[len(batch):]
+ }
+ }
+ for len(snos) > 0 && res.unbatched < power5.MaxMinerProveCommitsPerEpoch {
+ sno := snos[0]
+ snos = snos[1:]
+
+ proof, err := mock.MockSealProof(sealType, minerAddr)
+ if err != nil {
+ return res, err
+ }
+ params := miner.ProveCommitSectorParams{
+ SectorNumber: sno,
+ Proof: proof,
+ }
+ enc, err := actors.SerializeParams(¶ms)
+ if err != nil {
+ return res, err
+ }
+ if _, err := stage.funding.SendAndFund(bb, &types.Message{
+ From: info.Worker,
+ To: minerAddr,
+ Value: abi.NewTokenAmount(0),
+ Method: miner.Methods.ProveCommitSector,
+ Params: enc,
+ }); err == nil {
+ res.unbatched++
+ res.done++
+ } else if blockbuilder.IsOutOfGas(err) {
+ res.full = true
+ return res, nil
+ } else if aerr, ok := err.(aerrors.ActorError); !ok || aerr.IsFatal() {
+ return res, err
+ } else {
+ log.Errorw("failed to prove commit sector(s)",
+ "error", err,
+ "sectors", []abi.SectorNumber{sno},
+ )
+ res.failed++
+ }
+ // mark it as "finished" regardless so we skip it.
+ pending.finish(sealType, 1)
+ }
+ // if we get here, we can't pre-commit anything more.
+ }
+ return res, nil
+}
+
+// loadMiner enqueue all pending prove-commits for the given miner. This is called on load to
+// populate the commitQueue and should not need to be called later.
+//
+// It will drop any pre-commits that have already expired.
+func (stage *ProveCommitStage) loadMiner(ctx context.Context, bb *blockbuilder.BlockBuilder, addr address.Address) error {
+ epoch := bb.Height()
+ av := bb.ActorsVersion()
+ minerState, err := loadMiner(bb.ActorStore(), bb.ParentStateTree(), addr)
+ if err != nil {
+ return err
+ }
+
+ // Find all pending prove commits and group by proof type. Really, there should never
+ // (except during upgrades be more than one type.
+ var total, dropped int
+ err = minerState.ForEachPrecommittedSector(func(info miner.SectorPreCommitOnChainInfo) error {
+ total++
+ msd := policy.GetMaxProveCommitDuration(av, info.Info.SealProof)
+ if epoch > info.PreCommitEpoch+msd {
+ dropped++
+ return nil
+ }
+ return stage.commitQueue.enqueueProveCommit(addr, info.PreCommitEpoch, info.Info)
+ })
+ if err != nil {
+ return err
+ }
+ if dropped > 0 {
+ bb.L().Warnw("dropped expired pre-commits on load",
+ "miner", addr,
+ "total", total,
+ "expired", dropped,
+ )
+ }
+ return nil
+}
+
+// filterProveCommits filters out expired and/or missing pre-commits.
+func (stage *ProveCommitStage) filterProveCommits(
+ ctx context.Context, bb *blockbuilder.BlockBuilder,
+ minerAddr address.Address, snos []abi.SectorNumber,
+) ([]abi.SectorNumber, error) {
+ act, err := bb.StateTree().GetActor(minerAddr)
+ if err != nil {
+ return nil, err
+ }
+
+ minerState, err := miner.Load(bb.ActorStore(), act)
+ if err != nil {
+ return nil, err
+ }
+
+ nextEpoch := bb.Height()
+ av := bb.ActorsVersion()
+
+ good := make([]abi.SectorNumber, 0, len(snos))
+ for _, sno := range snos {
+ info, err := minerState.GetPrecommittedSector(sno)
+ if err != nil {
+ return nil, err
+ }
+ if info == nil {
+ continue
+ }
+ msd := policy.GetMaxProveCommitDuration(av, info.Info.SealProof)
+ if nextEpoch > info.PreCommitEpoch+msd {
+ continue
+ }
+ good = append(good, sno)
+ }
+ return good, nil
+}
+
+func (stage *ProveCommitStage) load(ctx context.Context, bb *blockbuilder.BlockBuilder) error {
+ stage.initialized = false // in case something failes while we're doing this.
+ stage.commitQueue = commitQueue{offset: bb.Height()}
+ powerState, err := loadPower(bb.ActorStore(), bb.ParentStateTree())
+ if err != nil {
+ return err
+ }
+
+ err = powerState.ForEachClaim(func(minerAddr address.Address, claim power.Claim) error {
+ // TODO: If we want to finish pre-commits for "new" miners, we'll need to change
+ // this.
+ if claim.RawBytePower.IsZero() {
+ return nil
+ }
+ return stage.loadMiner(ctx, bb, minerAddr)
+ })
+ if err != nil {
+ return err
+ }
+
+ stage.initialized = true
+ return nil
+}
diff --git a/cmd/lotus-sim/simulation/stages/util.go b/cmd/lotus-sim/simulation/stages/util.go
new file mode 100644
index 00000000000..97c1e57af83
--- /dev/null
+++ b/cmd/lotus-sim/simulation/stages/util.go
@@ -0,0 +1,51 @@
+package stages
+
+import (
+ "context"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/crypto"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/power"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/blockbuilder"
+)
+
+func loadMiner(store adt.Store, st types.StateTree, addr address.Address) (miner.State, error) {
+ minerActor, err := st.GetActor(addr)
+ if err != nil {
+ return nil, err
+ }
+ return miner.Load(store, minerActor)
+}
+
+func loadPower(store adt.Store, st types.StateTree) (power.State, error) {
+ powerActor, err := st.GetActor(power.Address)
+ if err != nil {
+ return nil, err
+ }
+ return power.Load(store, powerActor)
+}
+
+// Compute the number of sectors a miner has from their power claim.
+func sectorsFromClaim(sectorSize abi.SectorSize, c power.Claim) int64 {
+ if c.RawBytePower.Int == nil {
+ return 0
+ }
+ sectorCount := big.Div(c.RawBytePower, big.NewIntUnsigned(uint64(sectorSize)))
+ if !sectorCount.IsInt64() {
+ panic("impossible number of sectors")
+ }
+ return sectorCount.Int64()
+}
+
+func postChainCommitInfo(ctx context.Context, bb *blockbuilder.BlockBuilder, epoch abi.ChainEpoch) (abi.Randomness, error) {
+ cs := bb.StateManager().ChainStore()
+ ts := bb.ParentTipSet()
+ commitRand, err := cs.GetChainRandomness(ctx, ts.Cids(), crypto.DomainSeparationTag_PoStChainCommit, epoch, nil, true)
+ return commitRand, err
+}
diff --git a/cmd/lotus-sim/simulation/stages/windowpost_stage.go b/cmd/lotus-sim/simulation/stages/windowpost_stage.go
new file mode 100644
index 00000000000..68f8ea179b3
--- /dev/null
+++ b/cmd/lotus-sim/simulation/stages/windowpost_stage.go
@@ -0,0 +1,317 @@
+package stages
+
+import (
+ "context"
+ "math"
+ "time"
+
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+
+ proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/aerrors"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/power"
+ "github.com/filecoin-project/lotus/chain/actors/policy"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/blockbuilder"
+ "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/mock"
+)
+
+type WindowPoStStage struct {
+ // We track the window post periods per miner and assume that no new miners are ever added.
+
+ // We record all pending window post messages, and the epoch up through which we've
+ // generated window post messages.
+ pendingWposts []*types.Message
+ wpostPeriods [][]address.Address // (epoch % (epochs in a deadline)) -> miner
+ nextWpostEpoch abi.ChainEpoch
+}
+
+func NewWindowPoStStage() (*WindowPoStStage, error) {
+ return new(WindowPoStStage), nil
+}
+
+func (*WindowPoStStage) Name() string {
+ return "window-post"
+}
+
+// packWindowPoSts packs window posts until either the block is full or all healty sectors
+// have been proven. It does not recover sectors.
+func (stage *WindowPoStStage) PackMessages(ctx context.Context, bb *blockbuilder.BlockBuilder) (_err error) {
+ // Push any new window posts into the queue.
+ if err := stage.tick(ctx, bb); err != nil {
+ return err
+ }
+ done := 0
+ failed := 0
+ defer func() {
+ if _err != nil {
+ return
+ }
+
+ bb.L().Debugw("packed window posts",
+ "done", done,
+ "failed", failed,
+ "remaining", len(stage.pendingWposts),
+ )
+ }()
+ // Then pack as many as we can.
+ for len(stage.pendingWposts) > 0 {
+ next := stage.pendingWposts[0]
+ if _, err := bb.PushMessage(next); err != nil {
+ if blockbuilder.IsOutOfGas(err) {
+ return nil
+ }
+ if aerr, ok := err.(aerrors.ActorError); !ok || aerr.IsFatal() {
+ return err
+ }
+ bb.L().Errorw("failed to submit windowed post",
+ "error", err,
+ "miner", next.To,
+ )
+ failed++
+ } else {
+ done++
+ }
+
+ stage.pendingWposts = stage.pendingWposts[1:]
+ }
+ stage.pendingWposts = nil
+ return nil
+}
+
+// stepWindowPoStsMiner enqueues all missing window posts for the current epoch for the given miner.
+func (stage *WindowPoStStage) queueMiner(
+ ctx context.Context, bb *blockbuilder.BlockBuilder,
+ addr address.Address, minerState miner.State,
+ commitEpoch abi.ChainEpoch, commitRand abi.Randomness,
+) error {
+
+ if active, err := minerState.DeadlineCronActive(); err != nil {
+ return err
+ } else if !active {
+ return nil
+ }
+
+ minerInfo, err := minerState.Info()
+ if err != nil {
+ return err
+ }
+
+ di, err := minerState.DeadlineInfo(bb.Height())
+ if err != nil {
+ return err
+ }
+ di = di.NextNotElapsed()
+
+ dl, err := minerState.LoadDeadline(di.Index)
+ if err != nil {
+ return err
+ }
+
+ provenBf, err := dl.PartitionsPoSted()
+ if err != nil {
+ return err
+ }
+ proven, err := provenBf.AllMap(math.MaxUint64)
+ if err != nil {
+ return err
+ }
+
+ poStBatchSize, err := policy.GetMaxPoStPartitions(bb.NetworkVersion(), minerInfo.WindowPoStProofType)
+ if err != nil {
+ return err
+ }
+
+ var (
+ partitions []miner.PoStPartition
+ partitionGroups [][]miner.PoStPartition
+ )
+ // Only prove partitions with live sectors.
+ err = dl.ForEachPartition(func(idx uint64, part miner.Partition) error {
+ if proven[idx] {
+ return nil
+ }
+ // NOTE: We're mimicing the behavior of wdpost_run.go here.
+ if len(partitions) > 0 && idx%uint64(poStBatchSize) == 0 {
+ partitionGroups = append(partitionGroups, partitions)
+ partitions = nil
+
+ }
+ live, err := part.LiveSectors()
+ if err != nil {
+ return err
+ }
+ liveCount, err := live.Count()
+ if err != nil {
+ return err
+ }
+ faulty, err := part.FaultySectors()
+ if err != nil {
+ return err
+ }
+ faultyCount, err := faulty.Count()
+ if err != nil {
+ return err
+ }
+ if liveCount-faultyCount > 0 {
+ partitions = append(partitions, miner.PoStPartition{Index: idx})
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ if len(partitions) > 0 {
+ partitionGroups = append(partitionGroups, partitions)
+ partitions = nil
+ }
+
+ proof, err := mock.MockWindowPoStProof(minerInfo.WindowPoStProofType, addr)
+ if err != nil {
+ return err
+ }
+ for _, group := range partitionGroups {
+ params := miner.SubmitWindowedPoStParams{
+ Deadline: di.Index,
+ Partitions: group,
+ Proofs: []proof5.PoStProof{{
+ PoStProof: minerInfo.WindowPoStProofType,
+ ProofBytes: proof,
+ }},
+ ChainCommitEpoch: commitEpoch,
+ ChainCommitRand: commitRand,
+ }
+ enc, aerr := actors.SerializeParams(¶ms)
+ if aerr != nil {
+ return xerrors.Errorf("could not serialize submit window post parameters: %w", aerr)
+ }
+ msg := &types.Message{
+ To: addr,
+ From: minerInfo.Worker,
+ Method: miner.Methods.SubmitWindowedPoSt,
+ Params: enc,
+ Value: types.NewInt(0),
+ }
+ stage.pendingWposts = append(stage.pendingWposts, msg)
+ }
+ return nil
+}
+
+func (stage *WindowPoStStage) load(ctx context.Context, bb *blockbuilder.BlockBuilder) (_err error) {
+ bb.L().Info("loading window post info")
+
+ start := time.Now()
+ defer func() {
+ if _err != nil {
+ return
+ }
+
+ bb.L().Infow("loaded window post info", "duration", time.Since(start))
+ }()
+
+ // reset
+ stage.wpostPeriods = make([][]address.Address, miner.WPoStChallengeWindow)
+ stage.pendingWposts = nil
+ stage.nextWpostEpoch = bb.Height() + 1
+
+ st := bb.ParentStateTree()
+ store := bb.ActorStore()
+
+ powerState, err := loadPower(store, st)
+ if err != nil {
+ return err
+ }
+
+ commitEpoch := bb.ParentTipSet().Height()
+ commitRand, err := postChainCommitInfo(ctx, bb, commitEpoch)
+ if err != nil {
+ return err
+ }
+
+ return powerState.ForEachClaim(func(minerAddr address.Address, claim power.Claim) error {
+ // TODO: If we start recovering power, we'll need to change this.
+ if claim.RawBytePower.IsZero() {
+ return nil
+ }
+
+ minerState, err := loadMiner(store, st, minerAddr)
+ if err != nil {
+ return err
+ }
+
+ // Shouldn't be necessary if the miner has power, but we might as well be safe.
+ if active, err := minerState.DeadlineCronActive(); err != nil {
+ return err
+ } else if !active {
+ return nil
+ }
+
+ // Record when we need to prove for this miner.
+ dinfo, err := minerState.DeadlineInfo(bb.Height())
+ if err != nil {
+ return err
+ }
+ dinfo = dinfo.NextNotElapsed()
+
+ ppOffset := int(dinfo.PeriodStart % miner.WPoStChallengeWindow)
+ stage.wpostPeriods[ppOffset] = append(stage.wpostPeriods[ppOffset], minerAddr)
+
+ return stage.queueMiner(ctx, bb, minerAddr, minerState, commitEpoch, commitRand)
+ })
+}
+
+func (stage *WindowPoStStage) tick(ctx context.Context, bb *blockbuilder.BlockBuilder) error {
+ // If this is our first time, load from scratch.
+ if stage.wpostPeriods == nil {
+ return stage.load(ctx, bb)
+ }
+
+ targetHeight := bb.Height()
+ now := time.Now()
+ was := len(stage.pendingWposts)
+ count := 0
+ defer func() {
+ bb.L().Debugw("computed window posts",
+ "miners", count,
+ "count", len(stage.pendingWposts)-was,
+ "duration", time.Since(now),
+ )
+ }()
+
+ st := bb.ParentStateTree()
+ store := bb.ActorStore()
+
+ // Perform a bit of catch up. This lets us do things like skip blocks at upgrades then catch
+ // up to make the simulation easier.
+ for ; stage.nextWpostEpoch <= targetHeight; stage.nextWpostEpoch++ {
+ if stage.nextWpostEpoch+miner.WPoStChallengeWindow < targetHeight {
+ bb.L().Warnw("skipping old window post", "deadline-open", stage.nextWpostEpoch)
+ continue
+ }
+ commitEpoch := stage.nextWpostEpoch - 1
+ commitRand, err := postChainCommitInfo(ctx, bb, commitEpoch)
+ if err != nil {
+ return err
+ }
+
+ for _, addr := range stage.wpostPeriods[int(stage.nextWpostEpoch%miner.WPoStChallengeWindow)] {
+ minerState, err := loadMiner(store, st, addr)
+ if err != nil {
+ return err
+ }
+
+ if err := stage.queueMiner(ctx, bb, addr, minerState, commitEpoch, commitRand); err != nil {
+ return err
+ }
+ count++
+ }
+
+ }
+ return nil
+}
diff --git a/cmd/lotus-sim/simulation/step.go b/cmd/lotus-sim/simulation/step.go
new file mode 100644
index 00000000000..902f2ad6ca6
--- /dev/null
+++ b/cmd/lotus-sim/simulation/step.go
@@ -0,0 +1,71 @@
+package simulation
+
+import (
+ "context"
+
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/blockbuilder"
+)
+
+// Step steps the simulation forward one step. This may move forward by more than one epoch.
+func (sim *Simulation) Step(ctx context.Context) (*types.TipSet, error) {
+ log.Infow("step", "epoch", sim.head.Height()+1)
+ messages, err := sim.popNextMessages(ctx)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to select messages for block: %w", err)
+ }
+ head, err := sim.makeTipSet(ctx, messages)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to make tipset: %w", err)
+ }
+ if err := sim.SetHead(head); err != nil {
+ return nil, xerrors.Errorf("failed to update head: %w", err)
+ }
+ return head, nil
+}
+
+// popNextMessages generates/picks a set of messages to be included in the next block.
+//
+// - This function is destructive and should only be called once per epoch.
+// - This function does not store anything in the repo.
+// - This function handles all gas estimation. The returned messages should all fit in a single
+// block.
+func (sim *Simulation) popNextMessages(ctx context.Context) ([]*types.Message, error) {
+ parentTs := sim.head
+
+ // First we make sure we don't have an upgrade at this epoch. If we do, we return no
+ // messages so we can just create an empty block at that epoch.
+ //
+ // This isn't what the network does, but it makes things easier. Otherwise, we'd need to run
+ // migrations before this epoch and I'd rather not deal with that.
+ nextHeight := parentTs.Height() + 1
+ prevVer := sim.StateManager.GetNtwkVersion(ctx, nextHeight-1)
+ nextVer := sim.StateManager.GetNtwkVersion(ctx, nextHeight)
+ if nextVer != prevVer {
+ log.Warnw("packing no messages for version upgrade block",
+ "old", prevVer,
+ "new", nextVer,
+ "epoch", nextHeight,
+ )
+ return nil, nil
+ }
+
+ bb, err := blockbuilder.NewBlockBuilder(
+ ctx, log.With("simulation", sim.name),
+ sim.StateManager, parentTs,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ for _, stage := range sim.stages {
+ // We're intentionally ignoring the "full" signal so we can try to pack a few more
+ // messages.
+ if err := stage.PackMessages(ctx, bb); err != nil && !blockbuilder.IsOutOfGas(err) {
+ return nil, xerrors.Errorf("when packing messages with %s: %w", stage.Name(), err)
+ }
+ }
+ return bb.Messages(), nil
+}
diff --git a/cmd/lotus-sim/upgrade.go b/cmd/lotus-sim/upgrade.go
new file mode 100644
index 00000000000..dfc726d6b01
--- /dev/null
+++ b/cmd/lotus-sim/upgrade.go
@@ -0,0 +1,109 @@
+package main
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "text/tabwriter"
+
+ "github.com/urfave/cli/v2"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/network"
+)
+
+var upgradeCommand = &cli.Command{
+ Name: "upgrade",
+ Description: "Modifies network upgrade heights.",
+ Subcommands: []*cli.Command{
+ upgradeSetCommand,
+ upgradeList,
+ },
+}
+
+var upgradeList = &cli.Command{
+ Name: "list",
+ Description: "Lists all pending upgrades.",
+ Subcommands: []*cli.Command{
+ upgradeSetCommand,
+ },
+ Action: func(cctx *cli.Context) (err error) {
+ node, err := open(cctx)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if cerr := node.Close(); err == nil {
+ err = cerr
+ }
+ }()
+
+ sim, err := node.LoadSim(cctx.Context, cctx.String("simulation"))
+ if err != nil {
+ return err
+ }
+ upgrades, err := sim.ListUpgrades()
+ if err != nil {
+ return err
+ }
+
+ tw := tabwriter.NewWriter(cctx.App.Writer, 8, 8, 0, ' ', 0)
+ fmt.Fprintf(tw, "version\theight\tepochs\tmigration\texpensive")
+ epoch := sim.GetHead().Height()
+ for _, upgrade := range upgrades {
+ fmt.Fprintf(
+ tw, "%d\t%d\t%+d\t%t\t%t",
+ upgrade.Network, upgrade.Height, upgrade.Height-epoch,
+ upgrade.Migration != nil,
+ upgrade.Expensive,
+ )
+ }
+ return nil
+ },
+}
+
+var upgradeSetCommand = &cli.Command{
+ Name: "set",
+ ArgsUsage: " [+]",
+ Description: "Set a network upgrade height. Prefix with '+' to set it relative to the last epoch.",
+ Action: func(cctx *cli.Context) (err error) {
+ args := cctx.Args()
+ if args.Len() != 2 {
+ return fmt.Errorf("expected 2 arguments")
+ }
+ nvString := args.Get(0)
+ networkVersion, err := strconv.ParseUint(nvString, 10, 32)
+ if err != nil {
+ return fmt.Errorf("failed to parse network version %q: %w", nvString, err)
+ }
+ heightString := args.Get(1)
+ relative := false
+ if strings.HasPrefix(heightString, "+") {
+ heightString = heightString[1:]
+ relative = true
+ }
+ height, err := strconv.ParseInt(heightString, 10, 64)
+ if err != nil {
+ return fmt.Errorf("failed to parse height version %q: %w", heightString, err)
+ }
+
+ node, err := open(cctx)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if cerr := node.Close(); err == nil {
+ err = cerr
+ }
+ }()
+
+ sim, err := node.LoadSim(cctx.Context, cctx.String("simulation"))
+ if err != nil {
+ return err
+ }
+ if relative {
+ height += int64(sim.GetHead().Height())
+ }
+ return sim.SetUpgradeHeight(network.Version(networkVersion), abi.ChainEpoch(height))
+ },
+}
diff --git a/cmd/lotus-sim/util.go b/cmd/lotus-sim/util.go
new file mode 100644
index 00000000000..cd15cca0dd8
--- /dev/null
+++ b/cmd/lotus-sim/util.go
@@ -0,0 +1,18 @@
+package main
+
+import (
+ "fmt"
+
+ "github.com/urfave/cli/v2"
+
+ "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation"
+ "github.com/filecoin-project/lotus/lib/ulimit"
+)
+
+func open(cctx *cli.Context) (*simulation.Node, error) {
+ _, _, err := ulimit.ManageFdLimit()
+ if err != nil {
+ fmt.Fprintf(cctx.App.ErrWriter, "ERROR: failed to raise ulimit: %s\n", err)
+ }
+ return simulation.OpenNode(cctx.Context, cctx.String("repo"))
+}
diff --git a/cmd/lotus-stats/chain.dashboard.json b/cmd/lotus-stats/chain.dashboard.json
index 5ff7654d016..8083c96b183 100644
--- a/cmd/lotus-stats/chain.dashboard.json
+++ b/cmd/lotus-stats/chain.dashboard.json
@@ -1,20 +1,11 @@
{
- "__inputs": [
- {
- "name": "DS_INFLUXDB",
- "label": "InfluxDB",
- "description": "",
- "type": "datasource",
- "pluginId": "influxdb",
- "pluginName": "InfluxDB"
- }
- ],
+ "__inputs": [],
"__requires": [
{
"type": "grafana",
"id": "grafana",
"name": "Grafana",
- "version": "6.5.0-pre"
+ "version": "7.3.0"
},
{
"type": "panel",
@@ -36,8 +27,8 @@
},
{
"type": "panel",
- "id": "table",
- "name": "Table",
+ "id": "table-old",
+ "name": "Table (old)",
"version": ""
}
],
@@ -58,6 +49,7 @@
"gnetId": null,
"graphTooltip": 0,
"id": null,
+ "iteration": 1604018016916,
"links": [],
"panels": [
{
@@ -65,8 +57,15 @@
"bars": true,
"dashLength": 10,
"dashes": false,
- "datasource": "${DS_INFLUXDB}",
+ "datasource": "$network",
"decimals": 2,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 3,
"fillGradient": 0,
"gridPos": {
@@ -75,6 +74,7 @@
"x": 0,
"y": 0
},
+ "hiddenSeries": false,
"hideTimeOverride": false,
"id": 38,
"interval": "",
@@ -93,15 +93,25 @@
},
"lines": false,
"linewidth": 1,
- "nullPointMode": "null",
+ "nullPointMode": "null as zero",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"percentage": false,
+ "pluginVersion": "7.3.0",
"pointradius": 2,
"points": false,
"renderer": "flot",
- "seriesOverrides": [],
+ "seriesOverrides": [
+ {
+ "alias": "all",
+ "bars": false,
+ "color": "rgb(99, 99, 99)",
+ "fill": 1,
+ "lines": true,
+ "stack": false
+ }
+ ],
"spaceLength": 10,
"stack": true,
"steppedLine": false,
@@ -128,10 +138,11 @@
"type": "fill"
}
],
+ "hide": false,
"measurement": "chain.election",
"orderByTime": "ASC",
"policy": "default",
- "query": "SELECT count(\"value\") FROM \"chain.election\" WHERE $timeFilter -10m GROUP BY time($__interval), \"miner\" fill(null)",
+ "query": "SELECT sum(\"value\") FROM \"chain.election\" WHERE $timeFilter GROUP BY time($blockInterval), \"miner\" fill(null)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@@ -156,13 +167,52 @@
]
],
"tags": []
+ },
+ {
+ "alias": "all",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "hide": false,
+ "orderByTime": "ASC",
+ "policy": "defult",
+ "query": "SELECT TRIPLE_EXPONENTIAL_MOVING_AVERAGE(sum(\"value\"), 40) FROM \"chain.election\" WHERE $timeFilter -$blockInterval*40 AND time < now() - $blockInterval*3 GROUP BY time($blockInterval) fill(0)",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
- "title": "Blocks Won",
+ "title": "Blocks and Win Counts",
"tooltip": {
"shared": true,
"sort": 2,
@@ -207,7 +257,14 @@
"cacheTimeout": null,
"dashLength": 10,
"dashes": false,
- "datasource": "${DS_INFLUXDB}",
+ "datasource": "$network",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
@@ -216,6 +273,7 @@
"x": 0,
"y": 9
},
+ "hiddenSeries": false,
"id": 22,
"interval": "",
"legend": {
@@ -232,9 +290,10 @@
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"percentage": false,
+ "pluginVersion": "7.3.0",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -318,7 +377,13 @@
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
- "datasource": "${DS_INFLUXDB}",
+ "datasource": "$network",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
"format": "s",
"gauge": {
"maxValue": 100,
@@ -350,7 +415,6 @@
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
- "options": {},
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
@@ -422,7 +486,13 @@
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
- "datasource": "${DS_INFLUXDB}",
+ "datasource": "$network",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
"format": "bytes",
"gauge": {
"maxValue": 100,
@@ -454,7 +524,6 @@
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
- "options": {},
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
@@ -493,7 +562,7 @@
],
"orderByTime": "ASC",
"policy": "default",
- "query": "SELECT sum(\"value\") FROM \"chain.miner_power\" WHERE $timeFilter GROUP BY time(45s)",
+ "query": "SELECT sum(\"value\") FROM \"chain.power\" WHERE $timeFilter GROUP BY time(25s)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@@ -538,7 +607,13 @@
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
- "datasource": "${DS_INFLUXDB}",
+ "datasource": "$network",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
"format": "none",
"gauge": {
"maxValue": 100,
@@ -570,7 +645,6 @@
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
- "options": {},
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
@@ -596,7 +670,7 @@
"groupBy": [
{
"params": [
- "$interval"
+ "$blockInterval"
],
"type": "time"
}
@@ -616,7 +690,7 @@
},
{
"params": [],
- "type": "sum"
+ "type": "count"
}
]
],
@@ -648,7 +722,13 @@
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
- "datasource": "${DS_INFLUXDB}",
+ "datasource": "$network",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
"format": "none",
"gauge": {
"maxValue": 100,
@@ -680,7 +760,6 @@
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
- "options": {},
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
@@ -746,7 +825,13 @@
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
- "datasource": "${DS_INFLUXDB}",
+ "datasource": "$network",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
"format": "s",
"gauge": {
"maxValue": 100,
@@ -778,7 +863,6 @@
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
- "options": {},
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
@@ -848,7 +932,13 @@
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
- "datasource": "${DS_INFLUXDB}",
+ "datasource": "$network",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
"format": "none",
"gauge": {
"maxValue": 100,
@@ -880,7 +970,6 @@
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
- "options": {},
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
@@ -906,7 +995,7 @@
"groupBy": [
{
"params": [
- "$__interval"
+ "$blockInterval"
],
"type": "time"
},
@@ -917,7 +1006,7 @@
"type": "fill"
}
],
- "measurement": "chain.message_gasprice",
+ "measurement": "chain.message_gaspremium",
"orderByTime": "ASC",
"policy": "default",
"refId": "A",
@@ -932,7 +1021,7 @@
},
{
"params": [],
- "type": "mean"
+ "type": "median"
}
]
],
@@ -942,7 +1031,7 @@
"thresholds": "",
"timeFrom": null,
"timeShift": null,
- "title": "Avg Gas Price",
+ "title": "Avg Gas Premium",
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
@@ -963,7 +1052,13 @@
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
- "datasource": "${DS_INFLUXDB}",
+ "datasource": "$network",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
"format": "decbytes",
"gauge": {
"maxValue": 100,
@@ -995,7 +1090,6 @@
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
- "options": {},
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
@@ -1021,7 +1115,7 @@
"groupBy": [
{
"params": [
- "$__interval"
+ "$blockInterval"
],
"type": "time"
},
@@ -1078,7 +1172,13 @@
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
- "datasource": "${DS_INFLUXDB}",
+ "datasource": "$network",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
"format": "bytes",
"gauge": {
"maxValue": 100,
@@ -1110,7 +1210,6 @@
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
- "options": {},
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
@@ -1136,7 +1235,7 @@
"groupBy": [
{
"params": [
- "$__interval"
+ "$blockInterval"
],
"type": "time"
},
@@ -1193,7 +1292,13 @@
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
- "datasource": "${DS_INFLUXDB}",
+ "datasource": "$network",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
"format": "none",
"gauge": {
"maxValue": 100,
@@ -1225,7 +1330,6 @@
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
- "options": {},
"pluginVersion": "6.4.2",
"postfix": "",
"postfixFontSize": "50%",
@@ -1252,7 +1356,7 @@
"groupBy": [
{
"params": [
- "$__interval"
+ "$blockInterval"
],
"type": "time"
},
@@ -1311,8 +1415,14 @@
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
- "datasource": "${DS_INFLUXDB}",
+ "datasource": "$network",
"decimals": 0,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
"format": "dateTimeFromNow",
"gauge": {
"maxValue": 100,
@@ -1344,7 +1454,6 @@
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
- "options": {},
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
@@ -1413,7 +1522,14 @@
"cacheTimeout": null,
"dashLength": 10,
"dashes": false,
- "datasource": "${DS_INFLUXDB}",
+ "datasource": "$network",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
@@ -1422,6 +1538,7 @@
"x": 4,
"y": 16
},
+ "hiddenSeries": false,
"id": 2,
"legend": {
"alignAsTable": true,
@@ -1441,9 +1558,10 @@
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"percentage": false,
+ "pluginVersion": "7.3.0",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -1569,7 +1687,13 @@
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
- "datasource": "${DS_INFLUXDB}",
+ "datasource": "$network",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
"format": "none",
"gauge": {
"maxValue": 100,
@@ -1601,7 +1725,6 @@
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
- "options": {},
"postfix": "FIL",
"postfixFontSize": "50%",
"prefix": "",
@@ -1660,7 +1783,13 @@
},
{
"columns": [],
- "datasource": "${DS_INFLUXDB}",
+ "datasource": "$network",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
"fontSize": "100%",
"gridPos": {
"h": 21,
@@ -1669,7 +1798,6 @@
"y": 19
},
"id": 28,
- "options": {},
"pageSize": null,
"showHeader": true,
"sort": {
@@ -1679,12 +1807,14 @@
"styles": [
{
"alias": "Time",
+ "align": "auto",
"dateFormat": "YYYY-MM-DD HH:mm:ss",
"pattern": "Time",
"type": "hidden"
},
{
"alias": "",
+ "align": "auto",
"colorMode": null,
"colors": [
"rgba(245, 54, 54, 0.9)",
@@ -1701,6 +1831,7 @@
},
{
"alias": "",
+ "align": "auto",
"colorMode": null,
"colors": [
"rgba(245, 54, 54, 0.9)",
@@ -1741,7 +1872,7 @@
"timeShift": null,
"title": "Top Power Table",
"transform": "table",
- "type": "table"
+ "type": "table-old"
},
{
"aliasColors": {},
@@ -1749,7 +1880,14 @@
"cacheTimeout": null,
"dashLength": 10,
"dashes": false,
- "datasource": "${DS_INFLUXDB}",
+ "datasource": "$network",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 5,
"fillGradient": 0,
"gridPos": {
@@ -1758,8 +1896,9 @@
"x": 4,
"y": 19
},
+ "hiddenSeries": false,
"id": 40,
- "interval": "",
+ "interval": "300s",
"legend": {
"alignAsTable": true,
"avg": false,
@@ -1778,11 +1917,12 @@
"lines": true,
"linewidth": 1,
"links": [],
- "nullPointMode": "null",
+ "nullPointMode": "connected",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"percentage": true,
+ "pluginVersion": "7.3.0",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -1817,7 +1957,7 @@
"measurement": "chain.miner_power",
"orderByTime": "ASC",
"policy": "default",
- "query": "SELECT mean(\"value\") FROM \"chain.miner_power\" WHERE $timeFilter GROUP BY time($__interval), \"miner\" fill(previous)",
+ "query": "SELECT mean(\"value\") FROM \"chain.miner_power\" WHERE $timeFilter GROUP BY time($__interval), \"miner\" fill(null)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@@ -1885,7 +2025,13 @@
},
{
"columns": [],
- "datasource": "${DS_INFLUXDB}",
+ "datasource": "$network",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
"fontSize": "100%",
"gridPos": {
"h": 21,
@@ -1894,7 +2040,6 @@
"y": 19
},
"id": 18,
- "options": {},
"pageSize": null,
"showHeader": true,
"sort": {
@@ -1904,6 +2049,7 @@
"styles": [
{
"alias": "Height",
+ "align": "auto",
"dateFormat": "YYYY-MM-DD HH:mm:ss",
"link": false,
"mappingType": 1,
@@ -1914,6 +2060,7 @@
},
{
"alias": "Tipset",
+ "align": "auto",
"colorMode": null,
"colors": [
"rgba(245, 54, 54, 0.9)",
@@ -1930,6 +2077,7 @@
},
{
"alias": "",
+ "align": "auto",
"colorMode": null,
"colors": [
"rgba(245, 54, 54, 0.9)",
@@ -1973,74 +2121,77 @@
"timeShift": null,
"title": "Chain Table",
"transform": "timeseries_to_columns",
- "type": "table"
+ "type": "table-old"
},
{
"aliasColors": {},
"bars": false,
- "cacheTimeout": null,
"dashLength": 10,
"dashes": false,
- "datasource": "${DS_INFLUXDB}",
+ "datasource": "$network",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
- "h": 6,
+ "h": 7,
"w": 12,
"x": 4,
"y": 27
},
- "id": 24,
+ "hiddenSeries": false,
+ "id": 50,
"legend": {
- "alignAsTable": false,
"avg": false,
"current": false,
"max": false,
"min": false,
- "rightSide": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
- "links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"percentage": false,
+ "pluginVersion": "7.3.0",
"pointradius": 2,
"points": false,
"renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "/.*/",
- "color": "rgb(31, 120, 193)"
- }
- ],
+ "seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
+ "alias": "Total GasLimit",
"groupBy": [
{
"params": [
- "$__interval"
+ "$blockInterval"
],
"type": "time"
},
{
"params": [
- "previous"
+ "null"
],
"type": "fill"
}
],
- "measurement": "chain.pledge_collateral",
+ "measurement": "chain.gas_limit_total",
"orderByTime": "ASC",
"policy": "default",
+ "query": "SELECT max(\"value\") FROM \"chain.gas_limit_total\" WHERE $timeFilter GROUP BY time($blockInterval) fill(null)",
+ "rawQuery": false,
"refId": "A",
"resultFormat": "time_series",
"select": [
@@ -2053,18 +2204,107 @@
},
{
"params": [],
- "type": "mean"
+ "type": "max"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "Total GasUsed",
+ "groupBy": [
+ {
+ "params": [
+ "$blockInterval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "chain.gas_used_total",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT max(\"value\") FROM \"chain.gas_used_total\" WHERE $timeFilter GROUP BY time($blockInterval) fill(null)",
+ "rawQuery": false,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "max"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "Total Unique GasLimit",
+ "groupBy": [
+ {
+ "params": [
+ "$blockInterval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "chain.gas_limit_uniq_total",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT max(\"value\") FROM \"chain.gas_limit_total\" WHERE $timeFilter GROUP BY time($blockInterval) fill(null)",
+ "rawQuery": false,
+ "refId": "C",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "max"
}
]
],
"tags": []
}
],
- "thresholds": [],
+ "thresholds": [
+ {
+ "colorMode": "custom",
+ "fill": false,
+ "fillColor": "rgba(50, 116, 217, 0.2)",
+ "line": true,
+ "lineColor": "rgba(31, 96, 196, 0.6)",
+ "op": "gt",
+ "value": 25000000000,
+ "yaxis": "left"
+ }
+ ],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
- "title": "Pledge Collateral",
+ "title": "Network Gas",
"tooltip": {
"shared": true,
"sort": 0,
@@ -2081,7 +2321,7 @@
"yaxes": [
{
"format": "short",
- "label": "FIL",
+ "label": null,
"logBase": 1,
"max": null,
"min": null,
@@ -2107,15 +2347,23 @@
"cacheTimeout": null,
"dashLength": 10,
"dashes": false,
- "datasource": "${DS_INFLUXDB}",
+ "datasource": "$network",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
- "h": 7,
+ "h": 6,
"w": 12,
"x": 4,
- "y": 33
+ "y": 34
},
+ "hiddenSeries": false,
"id": 44,
"legend": {
"avg": false,
@@ -2131,9 +2379,10 @@
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"percentage": false,
+ "pluginVersion": "7.3.0",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -2146,7 +2395,7 @@
"groupBy": [
{
"params": [
- "$__interval"
+ "$blockInterval"
],
"type": "time"
},
@@ -2228,7 +2477,14 @@
"bars": true,
"dashLength": 10,
"dashes": false,
- "datasource": "${DS_INFLUXDB}",
+ "datasource": "$network",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
@@ -2237,6 +2493,7 @@
"x": 0,
"y": 40
},
+ "hiddenSeries": false,
"id": 34,
"legend": {
"alignAsTable": true,
@@ -2251,11 +2508,12 @@
},
"lines": false,
"linewidth": 1,
- "nullPointMode": "null",
+ "nullPointMode": "null as zero",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"percentage": false,
+ "pluginVersion": "7.3.0",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -2269,7 +2527,7 @@
"groupBy": [
{
"params": [
- "$__interval"
+ "$blockInterval"
],
"type": "time"
},
@@ -2360,7 +2618,14 @@
"bars": true,
"dashLength": 10,
"dashes": false,
- "datasource": "${DS_INFLUXDB}",
+ "datasource": "$network",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
@@ -2369,6 +2634,7 @@
"x": 12,
"y": 40
},
+ "hiddenSeries": false,
"id": 36,
"legend": {
"alignAsTable": true,
@@ -2387,11 +2653,12 @@
},
"lines": false,
"linewidth": 1,
- "nullPointMode": "null",
+ "nullPointMode": "null as zero",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"percentage": false,
+ "pluginVersion": "7.3.0",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -2437,7 +2704,7 @@
"measurement": "chain.message_count",
"orderByTime": "ASC",
"policy": "default",
- "query": "SELECT sum(\"value\") FROM \"chain.message_count\" WHERE $timeFilter GROUP BY time($__interval), \"method\", \"exitcode\", \"actor\" fill(null)",
+ "query": "SELECT sum(\"value\") FROM \"chain.message_count\" WHERE $timeFilter GROUP BY time($blockInterval), \"method\", \"exitcode\", \"actor\" fill(null)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@@ -2498,14 +2765,701 @@
"align": false,
"alignLevel": null
}
- }
- ],
- "refresh": "45s",
- "schemaVersion": 20,
- "style": "dark",
- "tags": [],
- "templating": {
- "list": []
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$network",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 49
+ },
+ "hiddenSeries": false,
+ "id": 48,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "connected",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.0",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "Transfer Fee",
+ "groupBy": [
+ {
+ "params": [
+ "$blockInterval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "chain.basefee",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"value\")*1000000 FROM \"chain.basefee\" WHERE $timeFilter GROUP BY time($blockInterval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Cost of simple transfer [FIL]",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 2,
+ "format": "sci",
+ "label": "",
+ "logBase": 10,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$network",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 49
+ },
+ "hiddenSeries": false,
+ "id": 46,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "connected",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.0",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "Transfer Fee",
+ "groupBy": [
+ {
+ "params": [
+ "$blockInterval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "chain.basefee",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"value\") FROM \"chain.basefee\" WHERE $timeFilter GROUP BY time($blockInterval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Base Fee[FIL]",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$network",
+ "decimals": null,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 57
+ },
+ "hiddenSeries": false,
+ "id": 51,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": false,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "connected",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.0",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "Precommit Transfer Fee",
+ "groupBy": [
+ {
+ "params": [
+ "$blockInterval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "chain.basefee",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"value\")*24000000 FROM \"chain.basefee\" WHERE $timeFilter GROUP BY time($blockInterval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "Commit Transfer Fee",
+ "groupBy": [
+ {
+ "params": [
+ "$blockInterval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "chain.basefee",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"value\")*56000000 FROM \"chain.basefee\" WHERE $timeFilter GROUP BY time($blockInterval) fill(null)",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Message Gas fees [FIL]",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 2,
+ "format": "none",
+ "label": null,
+ "logBase": 10,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$network",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 57
+ },
+ "hiddenSeries": false,
+ "id": 52,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "connected",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.0",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "10 PIB PoSt Fee",
+ "groupBy": [
+ {
+ "params": [
+ "$blockInterval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "chain.basefee",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"value\")*940000000 FROM \"chain.basefee\" WHERE $timeFilter GROUP BY time($blockInterval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "750TiB miner PoSt Fee",
+ "groupBy": [
+ {
+ "params": [
+ "$blockInterval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "chain.basefee",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"value\")*580000000 FROM \"chain.basefee\" WHERE $timeFilter GROUP BY time($blockInterval) fill(null)",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "10TiB miner PoSt Fee",
+ "groupBy": [
+ {
+ "params": [
+ "$blockInterval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "chain.basefee",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"value\")*380000000 FROM \"chain.basefee\" WHERE $timeFilter GROUP BY time($blockInterval) fill(null)",
+ "rawQuery": true,
+ "refId": "C",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Message Gas fees [FIL]",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": 2,
+ "format": "none",
+ "label": null,
+ "logBase": 10,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "refresh": false,
+ "schemaVersion": 26,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "selected": false,
+ "text": "filecoin-ntwk-testnet",
+ "value": "filecoin-ntwk-testnet"
+ },
+ "error": null,
+ "hide": 0,
+ "includeAll": false,
+ "label": "Network",
+ "multi": false,
+ "name": "network",
+ "options": [],
+ "query": "influxdb",
+ "queryValue": "",
+ "refresh": 1,
+ "regex": "/^filecoin-ntwk-/",
+ "skipUrlSync": false,
+ "type": "datasource"
+ },
+ {
+ "auto": false,
+ "auto_count": 30,
+ "auto_min": "10s",
+ "current": {
+ "selected": false,
+ "text": "30s",
+ "value": "30s"
+ },
+ "error": null,
+ "hide": 2,
+ "label": null,
+ "name": "blockInterval",
+ "options": [
+ {
+ "selected": true,
+ "text": "30s",
+ "value": "30s"
+ }
+ ],
+ "query": "30s",
+ "refresh": 2,
+ "skipUrlSync": false,
+ "type": "interval"
+ }
+ ]
},
"time": {
"from": "now-30m",
@@ -2515,6 +3469,7 @@
"refresh_intervals": [
"5s",
"10s",
+ "25s",
"30s",
"45s",
"1m",
@@ -2527,7 +3482,7 @@
]
},
"timezone": "",
- "title": "Chain",
+ "title": "Filecoin Chain Stats",
"uid": "z6FtI92Zz",
- "version": 9
+ "version": 4
}
diff --git a/cmd/lotus-stats/docker-compose.yml b/cmd/lotus-stats/docker-compose.yml
index 03d573b94ab..b08a2157eea 100644
--- a/cmd/lotus-stats/docker-compose.yml
+++ b/cmd/lotus-stats/docker-compose.yml
@@ -4,10 +4,10 @@ services:
influxdb:
image: influxdb:latest
container_name: influxdb
+ ports:
+ - "18086:8086"
environment:
- INFLUXDB_DB=lotus
- ports:
- - "8086:8086"
volumes:
- influxdb:/var/lib/influxdb
@@ -15,7 +15,7 @@ services:
image: grafana/grafana:latest
container_name: grafana
ports:
- - "3000:3000"
+ - "13000:3000"
links:
- influxdb
volumes:
diff --git a/cmd/lotus-stats/env.stats b/cmd/lotus-stats/env.stats
index a76e7554aa2..ad5ec1619ee 100644
--- a/cmd/lotus-stats/env.stats
+++ b/cmd/lotus-stats/env.stats
@@ -1,3 +1,3 @@
-export INFLUX_ADDR="http://localhost:8086"
+export INFLUX_ADDR="http://localhost:18086"
export INFLUX_USER=""
export INFLUX_PASS=""
diff --git a/cmd/lotus-stats/main.go b/cmd/lotus-stats/main.go
index 3ca139b7dc0..b4c13ea8c26 100644
--- a/cmd/lotus-stats/main.go
+++ b/cmd/lotus-stats/main.go
@@ -2,71 +2,160 @@ package main
import (
"context"
- "flag"
"os"
+ "github.com/filecoin-project/lotus/build"
+ lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/tools/stats"
+
logging "github.com/ipfs/go-log/v2"
+ "github.com/urfave/cli/v2"
)
var log = logging.Logger("stats")
-const (
- influxAddrEnvVar = "INFLUX_ADDR"
- influxUserEnvVar = "INFLUX_USER"
- influxPassEnvVar = "INFLUX_PASS"
-)
-
func main() {
- var repo string = "~/.lotus"
- var database string = "lotus"
- var reset bool = false
- var nosync bool = false
- var height int64 = 0
- var headlag int = 3
-
- flag.StringVar(&repo, "repo", repo, "lotus repo path")
- flag.StringVar(&database, "database", database, "influx database")
- flag.Int64Var(&height, "height", height, "block height to start syncing from (0 will resume)")
- flag.IntVar(&headlag, "head-lag", headlag, "number of head events to hold to protect against small reorgs")
- flag.BoolVar(&reset, "reset", reset, "truncate database before starting stats gathering")
- flag.BoolVar(&nosync, "nosync", nosync, "skip waiting for sync")
-
- flag.Parse()
-
- ctx := context.Background()
-
- influx, err := stats.InfluxClient(os.Getenv(influxAddrEnvVar), os.Getenv(influxUserEnvVar), os.Getenv(influxPassEnvVar))
- if err != nil {
- log.Fatal(err)
+ local := []*cli.Command{
+ runCmd,
+ versionCmd,
}
- if reset {
- if err := stats.ResetDatabase(influx, database); err != nil {
- log.Fatal(err)
- }
+ app := &cli.App{
+ Name: "lotus-stats",
+ Usage: "Collect basic information about a filecoin network using lotus",
+ Version: build.UserVersion(),
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "lotus-path",
+ EnvVars: []string{"LOTUS_PATH"},
+ Value: "~/.lotus", // TODO: Consider XDG_DATA_HOME
+ },
+ &cli.StringFlag{
+ Name: "log-level",
+ EnvVars: []string{"LOTUS_STATS_LOG_LEVEL"},
+ Value: "info",
+ },
+ },
+ Before: func(cctx *cli.Context) error {
+ return logging.SetLogLevel("stats", cctx.String("log-level"))
+ },
+ Commands: local,
+ }
+
+ if err := app.Run(os.Args); err != nil {
+ log.Errorw("exit in error", "err", err)
+ os.Exit(1)
+ return
}
+}
+
+var versionCmd = &cli.Command{
+ Name: "version",
+ Usage: "Print version",
+ Action: func(cctx *cli.Context) error {
+ cli.VersionPrinter(cctx)
+ return nil
+ },
+}
+
+var runCmd = &cli.Command{
+ Name: "run",
+ Usage: "",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "influx-database",
+ EnvVars: []string{"LOTUS_STATS_INFLUX_DATABASE"},
+ Usage: "influx database",
+ Value: "",
+ },
+ &cli.StringFlag{
+ Name: "influx-hostname",
+ EnvVars: []string{"LOTUS_STATS_INFLUX_HOSTNAME"},
+ Value: "http://localhost:8086",
+ Usage: "influx hostname",
+ },
+ &cli.StringFlag{
+ Name: "influx-username",
+ EnvVars: []string{"LOTUS_STATS_INFLUX_USERNAME"},
+ Usage: "influx username",
+ Value: "",
+ },
+ &cli.StringFlag{
+ Name: "influx-password",
+ EnvVars: []string{"LOTUS_STATS_INFLUX_PASSWORD"},
+ Usage: "influx password",
+ Value: "",
+ },
+ &cli.IntFlag{
+ Name: "height",
+ EnvVars: []string{"LOTUS_STATS_HEIGHT"},
+ Usage: "tipset height to start processing from",
+ Value: 0,
+ },
+ &cli.IntFlag{
+ Name: "head-lag",
+ EnvVars: []string{"LOTUS_STATS_HEAD_LAG"},
+ Usage: "the number of tipsets to delay processing on to smooth chain reorgs",
+ Value: int(build.MessageConfidence),
+ },
+ &cli.BoolFlag{
+ Name: "no-sync",
+ EnvVars: []string{"LOTUS_STATS_NO_SYNC"},
+ Usage: "do not wait for chain sync to complete",
+ Value: false,
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ ctx := context.Background()
+
+ resetFlag := cctx.Bool("reset")
+ noSyncFlag := cctx.Bool("no-sync")
+ heightFlag := cctx.Int("height")
+ headLagFlag := cctx.Int("head-lag")
+
+ influxHostnameFlag := cctx.String("influx-hostname")
+ influxUsernameFlag := cctx.String("influx-username")
+ influxPasswordFlag := cctx.String("influx-password")
+ influxDatabaseFlag := cctx.String("influx-database")
+
+ log.Infow("opening influx client", "hostname", influxHostnameFlag, "username", influxUsernameFlag, "database", influxDatabaseFlag)
- if !reset && height == 0 {
- h, err := stats.GetLastRecordedHeight(influx, database)
+ influx, err := stats.InfluxClient(influxHostnameFlag, influxUsernameFlag, influxPasswordFlag)
if err != nil {
- log.Info(err)
+ log.Fatal(err)
}
- height = h
- }
+ if resetFlag {
+ if err := stats.ResetDatabase(influx, influxDatabaseFlag); err != nil {
+ log.Fatal(err)
+ }
+ }
- api, closer, err := stats.GetFullNodeAPI(ctx, repo)
- if err != nil {
- log.Fatal(err)
- }
- defer closer()
+ height := int64(heightFlag)
- if !nosync {
- if err := stats.WaitForSyncComplete(ctx, api); err != nil {
- log.Fatal(err)
+ if !resetFlag && height == 0 {
+ h, err := stats.GetLastRecordedHeight(influx, influxDatabaseFlag)
+ if err != nil {
+ log.Info(err)
+ }
+
+ height = h
}
- }
- stats.Collect(ctx, api, influx, database, height, headlag)
+ api, closer, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ if !noSyncFlag {
+ if err := stats.WaitForSyncComplete(ctx, api); err != nil {
+ log.Fatal(err)
+ }
+ }
+
+ stats.Collect(ctx, api, influx, influxDatabaseFlag, height, headLagFlag)
+
+ return nil
+ },
}
diff --git a/cmd/lotus-stats/setup.bash b/cmd/lotus-stats/setup.bash
index e2812b93a61..6510c2fc6d7 100755
--- a/cmd/lotus-stats/setup.bash
+++ b/cmd/lotus-stats/setup.bash
@@ -1,10 +1,10 @@
#!/usr/bin/env bash
-GRAFANA_HOST="localhost:3000"
+GRAFANA_HOST="http://localhost:13000"
curl -s -XPOST http://admin:admin@$GRAFANA_HOST/api/datasources -H 'Content-Type: text/json' --data-binary @- > /dev/null << EOF
{
- "name":"InfluxDB",
+ "name":"filecoin-ntwk-localstats",
"type":"influxdb",
"database":"lotus",
"url": "http://influxdb:8086",
diff --git a/cmd/lotus-storage-miner/actor.go b/cmd/lotus-storage-miner/actor.go
index 0027ceb73b4..6ba57366376 100644
--- a/cmd/lotus-storage-miner/actor.go
+++ b/cmd/lotus-storage-miner/actor.go
@@ -21,7 +21,7 @@ import (
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
- "github.com/filecoin-project/lotus/api/apibstore"
+ "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt"
@@ -29,7 +29,6 @@ import (
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/lib/tablewriter"
- "github.com/filecoin-project/lotus/storage"
)
var actorCmd = &cli.Command{
@@ -56,8 +55,22 @@ var actorSetAddrsCmd = &cli.Command{
Usage: "set gas limit",
Value: 0,
},
+ &cli.BoolFlag{
+ Name: "unset",
+ Usage: "unset address",
+ Value: false,
+ },
},
Action: func(cctx *cli.Context) error {
+ args := cctx.Args().Slice()
+ unset := cctx.Bool("unset")
+ if len(args) == 0 && !unset {
+ return cli.ShowSubcommandHelp(cctx)
+ }
+ if len(args) > 0 && unset {
+ return fmt.Errorf("unset can only be used with no arguments")
+ }
+
nodeAPI, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
@@ -73,7 +86,7 @@ var actorSetAddrsCmd = &cli.Command{
ctx := lcli.ReqContext(cctx)
var addrs []abi.Multiaddrs
- for _, a := range cctx.Args().Slice() {
+ for _, a := range args {
maddr, err := ma.NewMultiaddr(a)
if err != nil {
return fmt.Errorf("failed to parse %q as a multiaddr: %w", a, err)
@@ -310,7 +323,7 @@ var actorRepayDebtCmd = &cli.Command{
return err
}
- store := adt.WrapStore(ctx, cbor.NewCborStore(apibstore.NewAPIBlockstore(api)))
+ store := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(api)))
mst, err := miner.Load(store, mact)
if err != nil {
@@ -377,12 +390,15 @@ var actorControlList = &cli.Command{
Name: "verbose",
},
&cli.BoolFlag{
- Name: "color",
- Value: true,
+ Name: "color",
+ Usage: "use color in display output",
+ DefaultText: "depends on output being a TTY",
},
},
Action: func(cctx *cli.Context) error {
- color.NoColor = !cctx.Bool("color")
+ if cctx.IsSet("color") {
+ color.NoColor = !cctx.Bool("color")
+ }
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
@@ -416,9 +432,59 @@ var actorControlList = &cli.Command{
tablewriter.Col("balance"),
)
- postAddr, err := storage.AddressFor(ctx, api, mi, storage.PoStAddr, types.FromFil(1))
+ ac, err := nodeApi.ActorAddressConfig(ctx)
if err != nil {
- return xerrors.Errorf("getting address for post: %w", err)
+ return err
+ }
+
+ commit := map[address.Address]struct{}{}
+ precommit := map[address.Address]struct{}{}
+ terminate := map[address.Address]struct{}{}
+ dealPublish := map[address.Address]struct{}{}
+ post := map[address.Address]struct{}{}
+
+ for _, ca := range mi.ControlAddresses {
+ post[ca] = struct{}{}
+ }
+
+ for _, ca := range ac.PreCommitControl {
+ ca, err := api.StateLookupID(ctx, ca, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ delete(post, ca)
+ precommit[ca] = struct{}{}
+ }
+
+ for _, ca := range ac.CommitControl {
+ ca, err := api.StateLookupID(ctx, ca, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ delete(post, ca)
+ commit[ca] = struct{}{}
+ }
+
+ for _, ca := range ac.TerminateControl {
+ ca, err := api.StateLookupID(ctx, ca, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ delete(post, ca)
+ terminate[ca] = struct{}{}
+ }
+
+ for _, ca := range ac.DealPublishControl {
+ ca, err := api.StateLookupID(ctx, ca, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ delete(post, ca)
+ dealPublish[ca] = struct{}{}
}
printKey := func(name string, a address.Address) {
@@ -453,9 +519,21 @@ var actorControlList = &cli.Command{
if a == mi.Worker {
uses = append(uses, color.YellowString("other"))
}
- if a == postAddr {
+ if _, ok := post[a]; ok {
uses = append(uses, color.GreenString("post"))
}
+ if _, ok := precommit[a]; ok {
+ uses = append(uses, color.CyanString("precommit"))
+ }
+ if _, ok := commit[a]; ok {
+ uses = append(uses, color.BlueString("commit"))
+ }
+ if _, ok := terminate[a]; ok {
+ uses = append(uses, color.YellowString("terminate"))
+ }
+ if _, ok := dealPublish[a]; ok {
+ uses = append(uses, color.MagentaString("deals"))
+ }
tw.Write(map[string]interface{}{
"name": name,
@@ -591,8 +669,8 @@ var actorControlSet = &cli.Command{
var actorSetOwnerCmd = &cli.Command{
Name: "set-owner",
- Usage: "Set owner address",
- ArgsUsage: "[address]",
+ Usage: "Set owner address (this command should be invoked twice, first with the old owner as the senderAddress, and then with the new owner)",
+ ArgsUsage: "[newOwnerAddress senderAddress]",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "really-do-it",
@@ -606,8 +684,8 @@ var actorSetOwnerCmd = &cli.Command{
return nil
}
- if !cctx.Args().Present() {
- return fmt.Errorf("must pass address of new owner address")
+ if cctx.NArg() != 2 {
+ return fmt.Errorf("must pass new owner address and sender address")
}
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
@@ -629,53 +707,42 @@ var actorSetOwnerCmd = &cli.Command{
return err
}
- newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK)
+ newAddrId, err := api.StateLookupID(ctx, na, types.EmptyTSK)
if err != nil {
return err
}
- maddr, err := nodeApi.ActorAddress(ctx)
+ fa, err := address.NewFromString(cctx.Args().Get(1))
if err != nil {
return err
}
- mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
+ fromAddrId, err := api.StateLookupID(ctx, fa, types.EmptyTSK)
if err != nil {
return err
}
- sp, err := actors.SerializeParams(&newAddr)
+ maddr, err := nodeApi.ActorAddress(ctx)
if err != nil {
- return xerrors.Errorf("serializing params: %w", err)
+ return err
}
- smsg, err := api.MpoolPushMessage(ctx, &types.Message{
- From: mi.Owner,
- To: maddr,
- Method: miner.Methods.ChangeOwnerAddress,
- Value: big.Zero(),
- Params: sp,
- }, nil)
+ mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
if err != nil {
- return xerrors.Errorf("mpool push: %w", err)
+ return err
}
- fmt.Println("Propose Message CID:", smsg.Cid())
-
- // wait for it to get mined into a block
- wait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence)
- if err != nil {
- return err
+ if fromAddrId != mi.Owner && fromAddrId != newAddrId {
+ return xerrors.New("from address must either be the old owner or the new owner")
}
- // check it executed successfully
- if wait.Receipt.ExitCode != 0 {
- fmt.Println("Propose owner change failed!")
- return err
+ sp, err := actors.SerializeParams(&newAddrId)
+ if err != nil {
+ return xerrors.Errorf("serializing params: %w", err)
}
- smsg, err = api.MpoolPushMessage(ctx, &types.Message{
- From: newAddr,
+ smsg, err := api.MpoolPushMessage(ctx, &types.Message{
+ From: fromAddrId,
To: maddr,
Method: miner.Methods.ChangeOwnerAddress,
Value: big.Zero(),
@@ -685,20 +752,22 @@ var actorSetOwnerCmd = &cli.Command{
return xerrors.Errorf("mpool push: %w", err)
}
- fmt.Println("Approve Message CID:", smsg.Cid())
+ fmt.Println("Message CID:", smsg.Cid())
// wait for it to get mined into a block
- wait, err = api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence)
+ wait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence)
if err != nil {
return err
}
// check it executed successfully
if wait.Receipt.ExitCode != 0 {
- fmt.Println("Approve owner change failed!")
+ fmt.Println("owner change failed!")
return err
}
+ fmt.Println("message succeeded!")
+
return nil
},
}
diff --git a/cmd/lotus-storage-miner/actor_test.go b/cmd/lotus-storage-miner/actor_test.go
index 949171699e0..073a8305988 100644
--- a/cmd/lotus-storage-miner/actor_test.go
+++ b/cmd/lotus-storage-miner/actor_test.go
@@ -7,23 +7,20 @@ import (
"fmt"
"regexp"
"strconv"
- "sync/atomic"
"testing"
"time"
- logging "github.com/ipfs/go-log/v2"
+ "github.com/filecoin-project/go-state-types/network"
"github.com/stretchr/testify/require"
"github.com/urfave/cli/v2"
"github.com/filecoin-project/go-state-types/abi"
- "github.com/filecoin-project/lotus/api/test"
- "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/lotus/lib/lotuslog"
+ "github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/node/repo"
- builder "github.com/filecoin-project/lotus/node/test"
)
func TestWorkerKeyChange(t *testing.T) {
@@ -34,43 +31,24 @@ func TestWorkerKeyChange(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- _ = logging.SetLogLevel("*", "INFO")
-
- policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
- policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
- policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
-
- lotuslog.SetupLogLevels()
- logging.SetLogLevel("miner", "ERROR")
- logging.SetLogLevel("chainstore", "ERROR")
- logging.SetLogLevel("chain", "ERROR")
- logging.SetLogLevel("pubsub", "ERROR")
- logging.SetLogLevel("sub", "ERROR")
- logging.SetLogLevel("storageminer", "ERROR")
+ kit.QuietMiningLogs()
blocktime := 1 * time.Millisecond
-
- n, sn := builder.MockSbBuilder(t, []test.FullNodeOpts{test.FullNodeWithUpgradeAt(1), test.FullNodeWithUpgradeAt(1)}, test.OneMiner)
-
- client1 := n[0]
- client2 := n[1]
-
- // Connect the nodes.
- addrinfo, err := client1.NetAddrsListen(ctx)
- require.NoError(t, err)
- err = client2.NetConnect(ctx, addrinfo)
- require.NoError(t, err)
+ client1, client2, miner, ens := kit.EnsembleTwoOne(t, kit.MockProofs(),
+ kit.ConstructorOpts(kit.InstantaneousNetworkVersion(network.Version13)),
+ )
+ ens.InterconnectAll().BeginMining(blocktime)
output := bytes.NewBuffer(nil)
run := func(cmd *cli.Command, args ...string) error {
app := cli.NewApp()
app.Metadata = map[string]interface{}{
"repoType": repo.StorageMiner,
- "testnode-full": n[0],
- "testnode-storage": sn[0],
+ "testnode-full": client1,
+ "testnode-storage": miner,
}
app.Writer = output
- build.RunningNodeType = build.NodeMiner
+ api.RunningNodeType = api.NodeMiner
fs := flag.NewFlagSet("", flag.ContinueOnError)
for _, f := range cmd.Flags {
@@ -84,29 +62,11 @@ func TestWorkerKeyChange(t *testing.T) {
return cmd.Action(cctx)
}
- // setup miner
- mine := int64(1)
- done := make(chan struct{})
- go func() {
- defer close(done)
- for atomic.LoadInt64(&mine) == 1 {
- time.Sleep(blocktime)
- if err := sn[0].MineOne(ctx, test.MineNext); err != nil {
- t.Error(err)
- }
- }
- }()
- defer func() {
- atomic.AddInt64(&mine, -1)
- fmt.Println("shutting down mining")
- <-done
- }()
-
newKey, err := client1.WalletNew(ctx, types.KTBLS)
require.NoError(t, err)
// Initialize wallet.
- test.SendFunds(ctx, t, client1, newKey, abi.NewTokenAmount(0))
+ kit.SendFunds(ctx, t, client1, newKey, abi.NewTokenAmount(0))
require.NoError(t, run(actorProposeChangeWorker, "--really-do-it", newKey.String()))
@@ -126,14 +86,8 @@ func TestWorkerKeyChange(t *testing.T) {
require.Error(t, run(actorConfirmChangeWorker, "--really-do-it", newKey.String()))
output.Reset()
- for {
- head, err := client1.ChainHead(ctx)
- require.NoError(t, err)
- if head.Height() >= abi.ChainEpoch(targetEpoch) {
- break
- }
- build.Clock.Sleep(10 * blocktime)
- }
+ client1.WaitTillChain(ctx, kit.HeightAtLeast(abi.ChainEpoch(targetEpoch)))
+
require.NoError(t, run(actorConfirmChangeWorker, "--really-do-it", newKey.String()))
output.Reset()
@@ -142,23 +96,8 @@ func TestWorkerKeyChange(t *testing.T) {
// Wait for finality (worker key switch).
targetHeight := head.Height() + policy.ChainFinality
- for {
- head, err := client1.ChainHead(ctx)
- require.NoError(t, err)
- if head.Height() >= targetHeight {
- break
- }
- build.Clock.Sleep(10 * blocktime)
- }
+ client1.WaitTillChain(ctx, kit.HeightAtLeast(targetHeight))
// Make sure the other node can catch up.
- for i := 0; i < 20; i++ {
- head, err := client2.ChainHead(ctx)
- require.NoError(t, err)
- if head.Height() >= targetHeight {
- return
- }
- build.Clock.Sleep(10 * blocktime)
- }
- t.Fatal("failed to reach target epoch on the second miner")
+ client2.WaitTillChain(ctx, kit.HeightAtLeast(targetHeight))
}
diff --git a/cmd/lotus-storage-miner/allinfo_test.go b/cmd/lotus-storage-miner/allinfo_test.go
index a458c024b55..5f30b4fec3d 100644
--- a/cmd/lotus-storage-miner/allinfo_test.go
+++ b/cmd/lotus-storage-miner/allinfo_test.go
@@ -1,22 +1,18 @@
package main
import (
+ "context"
"flag"
"testing"
"time"
- logging "github.com/ipfs/go-log/v2"
+ "github.com/filecoin-project/lotus/itests/kit"
"github.com/stretchr/testify/require"
"github.com/urfave/cli/v2"
- "github.com/filecoin-project/go-state-types/abi"
-
- "github.com/filecoin-project/lotus/api/test"
- "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/policy"
- "github.com/filecoin-project/lotus/lib/lotuslog"
"github.com/filecoin-project/lotus/node/repo"
- builder "github.com/filecoin-project/lotus/node/test"
)
func TestMinerAllInfo(t *testing.T) {
@@ -24,20 +20,9 @@ func TestMinerAllInfo(t *testing.T) {
t.Skip("skipping test in short mode")
}
- _ = logging.SetLogLevel("*", "INFO")
-
- policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
- policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
- policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
-
_test = true
- lotuslog.SetupLogLevels()
- logging.SetLogLevel("miner", "ERROR")
- logging.SetLogLevel("chainstore", "ERROR")
- logging.SetLogLevel("chain", "ERROR")
- logging.SetLogLevel("sub", "ERROR")
- logging.SetLogLevel("storageminer", "ERROR")
+ kit.QuietMiningLogs()
oldDelay := policy.GetPreCommitChallengeDelay()
policy.SetPreCommitChallengeDelay(5)
@@ -45,32 +30,29 @@ func TestMinerAllInfo(t *testing.T) {
policy.SetPreCommitChallengeDelay(oldDelay)
})
- var n []test.TestNode
- var sn []test.TestStorageNode
+ client, miner, ens := kit.EnsembleMinimal(t)
+ ens.InterconnectAll().BeginMining(time.Second)
run := func(t *testing.T) {
app := cli.NewApp()
app.Metadata = map[string]interface{}{
"repoType": repo.StorageMiner,
- "testnode-full": n[0],
- "testnode-storage": sn[0],
+ "testnode-full": client,
+ "testnode-storage": miner,
}
- build.RunningNodeType = build.NodeMiner
+ api.RunningNodeType = api.NodeMiner
cctx := cli.NewContext(app, flag.NewFlagSet("", flag.ContinueOnError), nil)
require.NoError(t, infoAllCmd.Action(cctx))
}
- bp := func(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.StorageMiner) ([]test.TestNode, []test.TestStorageNode) {
- n, sn = builder.Builder(t, fullOpts, storage)
-
- t.Run("pre-info-all", run)
-
- return n, sn
- }
+ t.Run("pre-info-all", run)
- test.TestDealFlow(t, bp, time.Second, false, false)
+ dh := kit.NewDealHarness(t, client, miner, miner)
+ deal, res, inPath := dh.MakeOnlineDeal(context.Background(), kit.MakeFullDealParams{Rseed: 6})
+ outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, false)
+ kit.AssertFilesEqual(t, inPath, outPath)
t.Run("post-info-all", run)
}
diff --git a/cmd/lotus-storage-miner/info.go b/cmd/lotus-storage-miner/info.go
index 213d62e6e93..3941ce5632f 100644
--- a/cmd/lotus-storage-miner/info.go
+++ b/cmd/lotus-storage-miner/info.go
@@ -3,7 +3,12 @@ package main
import (
"context"
"fmt"
+ "math"
+ corebig "math/big"
+ "os"
"sort"
+ "strings"
+ "text/tabwriter"
"time"
"github.com/fatih/color"
@@ -12,19 +17,20 @@ import (
cbor "github.com/ipfs/go-ipld-cbor"
+ "github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
"github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/api/apibstore"
+ "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
- "github.com/filecoin-project/lotus/lib/blockstore"
- "github.com/filecoin-project/lotus/lib/bufbstore"
)
var infoCmd = &cli.Command{
@@ -43,8 +49,6 @@ var infoCmd = &cli.Command{
}
func infoCmdAct(cctx *cli.Context) error {
- color.NoColor = !cctx.Bool("color")
-
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
@@ -59,7 +63,39 @@ func infoCmdAct(cctx *cli.Context) error {
ctx := lcli.ReqContext(cctx)
- maddr, err := getActorAddress(ctx, nodeApi, cctx.String("actor"))
+ fmt.Print("Chain: ")
+
+ head, err := api.ChainHead(ctx)
+ if err != nil {
+ return err
+ }
+
+ switch {
+ case time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs*3/2): // within 1.5 epochs
+ fmt.Printf("[%s]", color.GreenString("sync ok"))
+ case time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs*5): // within 5 epochs
+ fmt.Printf("[%s]", color.YellowString("sync slow (%s behind)", time.Now().Sub(time.Unix(int64(head.MinTimestamp()), 0)).Truncate(time.Second)))
+ default:
+ fmt.Printf("[%s]", color.RedString("sync behind! (%s behind)", time.Now().Sub(time.Unix(int64(head.MinTimestamp()), 0)).Truncate(time.Second)))
+ }
+
+ basefee := head.MinTicketBlock().ParentBaseFee
+ gasCol := []color.Attribute{color.FgBlue}
+ switch {
+ case basefee.GreaterThan(big.NewInt(7000_000_000)): // 7 nFIL
+ gasCol = []color.Attribute{color.BgRed, color.FgBlack}
+ case basefee.GreaterThan(big.NewInt(3000_000_000)): // 3 nFIL
+ gasCol = []color.Attribute{color.FgRed}
+ case basefee.GreaterThan(big.NewInt(750_000_000)): // 750 uFIL
+ gasCol = []color.Attribute{color.FgYellow}
+ case basefee.GreaterThan(big.NewInt(100_000_000)): // 100 uFIL
+ gasCol = []color.Attribute{color.FgGreen}
+ }
+ fmt.Printf(" [basefee %s]", color.New(gasCol...).Sprint(types.FIL(basefee).Short()))
+
+ fmt.Println()
+
+ maddr, err := getActorAddress(ctx, cctx)
if err != nil {
return err
}
@@ -69,40 +105,43 @@ func infoCmdAct(cctx *cli.Context) error {
return err
}
- tbs := bufbstore.NewTieredBstore(apibstore.NewAPIBlockstore(api), blockstore.NewTemporary())
+ tbs := blockstore.NewTieredBstore(blockstore.NewAPIBlockstore(api), blockstore.NewMemory())
mas, err := miner.Load(adt.WrapStore(ctx, cbor.NewCborStore(tbs)), mact)
if err != nil {
return err
}
- fmt.Printf("Miner: %s\n", color.BlueString("%s", maddr))
-
// Sector size
mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
if err != nil {
return err
}
- fmt.Printf("Sector Size: %s\n", types.SizeStr(types.NewInt(uint64(mi.SectorSize))))
+ ssize := types.SizeStr(types.NewInt(uint64(mi.SectorSize)))
+ fmt.Printf("Miner: %s (%s sectors)\n", color.BlueString("%s", maddr), ssize)
pow, err := api.StateMinerPower(ctx, maddr, types.EmptyTSK)
if err != nil {
return err
}
- rpercI := types.BigDiv(types.BigMul(pow.MinerPower.RawBytePower, types.NewInt(1000000)), pow.TotalPower.RawBytePower)
- qpercI := types.BigDiv(types.BigMul(pow.MinerPower.QualityAdjPower, types.NewInt(1000000)), pow.TotalPower.QualityAdjPower)
-
- fmt.Printf("Byte Power: %s / %s (%0.4f%%)\n",
- color.BlueString(types.SizeStr(pow.MinerPower.RawBytePower)),
- types.SizeStr(pow.TotalPower.RawBytePower),
- float64(rpercI.Int64())/10000)
-
- fmt.Printf("Actual Power: %s / %s (%0.4f%%)\n",
+ fmt.Printf("Power: %s / %s (%0.4f%%)\n",
color.GreenString(types.DeciStr(pow.MinerPower.QualityAdjPower)),
types.DeciStr(pow.TotalPower.QualityAdjPower),
- float64(qpercI.Int64())/10000)
+ types.BigDivFloat(
+ types.BigMul(pow.MinerPower.QualityAdjPower, big.NewInt(100)),
+ pow.TotalPower.QualityAdjPower,
+ ),
+ )
+ fmt.Printf("\tRaw: %s / %s (%0.4f%%)\n",
+ color.BlueString(types.SizeStr(pow.MinerPower.RawBytePower)),
+ types.SizeStr(pow.TotalPower.RawBytePower),
+ types.BigDivFloat(
+ types.BigMul(pow.MinerPower.RawBytePower, big.NewInt(100)),
+ pow.TotalPower.RawBytePower,
+ ),
+ )
secCounts, err := api.StateMinerSectorCount(ctx, maddr, types.EmptyTSK)
if err != nil {
return err
@@ -116,7 +155,7 @@ func infoCmdAct(cctx *cli.Context) error {
} else {
var faultyPercentage float64
if secCounts.Live != 0 {
- faultyPercentage = float64(10000*nfaults/secCounts.Live) / 100.
+ faultyPercentage = float64(100*nfaults) / float64(secCounts.Live)
}
fmt.Printf("\tProving: %s (%s Faulty, %.2f%%)\n",
types.SizeStr(types.BigMul(types.NewInt(proving), types.NewInt(uint64(mi.SectorSize)))),
@@ -127,16 +166,54 @@ func infoCmdAct(cctx *cli.Context) error {
if !pow.HasMinPower {
fmt.Print("Below minimum power threshold, no blocks will be won")
} else {
- expWinChance := float64(types.BigMul(qpercI, types.NewInt(build.BlocksPerEpoch)).Int64()) / 1000000
- if expWinChance > 0 {
- if expWinChance > 1 {
- expWinChance = 1
+
+ winRatio := new(corebig.Rat).SetFrac(
+ types.BigMul(pow.MinerPower.QualityAdjPower, types.NewInt(build.BlocksPerEpoch)).Int,
+ pow.TotalPower.QualityAdjPower.Int,
+ )
+
+ if winRatioFloat, _ := winRatio.Float64(); winRatioFloat > 0 {
+
+ // if the corresponding poisson distribution isn't infinitely small then
+ // throw it into the mix as well, accounting for multi-wins
+ winRationWithPoissonFloat := -math.Expm1(-winRatioFloat)
+ winRationWithPoisson := new(corebig.Rat).SetFloat64(winRationWithPoissonFloat)
+ if winRationWithPoisson != nil {
+ winRatio = winRationWithPoisson
+ winRatioFloat = winRationWithPoissonFloat
}
- winRate := time.Duration(float64(time.Second*time.Duration(build.BlockDelaySecs)) / expWinChance)
- winPerDay := float64(time.Hour*24) / float64(winRate)
- fmt.Print("Expected block win rate: ")
- color.Blue("%.4f/day (every %s)", winPerDay, winRate.Truncate(time.Second))
+ weekly, _ := new(corebig.Rat).Mul(
+ winRatio,
+ new(corebig.Rat).SetInt64(7*builtin.EpochsInDay),
+ ).Float64()
+
+ avgDuration, _ := new(corebig.Rat).Mul(
+ new(corebig.Rat).SetInt64(builtin.EpochDurationSeconds),
+ new(corebig.Rat).Inv(winRatio),
+ ).Float64()
+
+ fmt.Print("Projected average block win rate: ")
+ color.Blue(
+ "%.02f/week (every %s)",
+ weekly,
+ (time.Second * time.Duration(avgDuration)).Truncate(time.Second).String(),
+ )
+
+ // Geometric distribution of P(Y < k) calculated as described in https://en.wikipedia.org/wiki/Geometric_distribution#Probability_Outcomes_Examples
+ // https://www.wolframalpha.com/input/?i=t+%3E+0%3B+p+%3E+0%3B+p+%3C+1%3B+c+%3E+0%3B+c+%3C1%3B+1-%281-p%29%5E%28t%29%3Dc%3B+solve+t
+ // t == how many dice-rolls (epochs) before win
+ // p == winRate == ( minerPower / netPower )
+ // c == target probability of win ( 99.9% in this case )
+ fmt.Print("Projected block win with ")
+ color.Green(
+ "99.9%% probability every %s",
+ (time.Second * time.Duration(
+ builtin.EpochDurationSeconds*math.Log(1-0.999)/
+ math.Log(1-winRatioFloat),
+ )).Truncate(time.Second).String(),
+ )
+ fmt.Println("(projections DO NOT account for future network and miner growth)")
}
}
@@ -147,27 +224,93 @@ func infoCmdAct(cctx *cli.Context) error {
return err
}
- var nactiveDeals, nVerifDeals, ndeals uint64
- var activeDealBytes, activeVerifDealBytes, dealBytes abi.PaddedPieceSize
+ type dealStat struct {
+ count, verifCount int
+ bytes, verifBytes uint64
+ }
+ dsAdd := func(ds *dealStat, deal storagemarket.MinerDeal) {
+ ds.count++
+ ds.bytes += uint64(deal.Proposal.PieceSize)
+ if deal.Proposal.VerifiedDeal {
+ ds.verifCount++
+ ds.verifBytes += uint64(deal.Proposal.PieceSize)
+ }
+ }
+
+ showDealStates := map[storagemarket.StorageDealStatus]struct{}{
+ storagemarket.StorageDealActive: {},
+ storagemarket.StorageDealTransferring: {},
+ storagemarket.StorageDealStaged: {},
+ storagemarket.StorageDealAwaitingPreCommit: {},
+ storagemarket.StorageDealSealing: {},
+ storagemarket.StorageDealPublish: {},
+ storagemarket.StorageDealCheckForAcceptance: {},
+ storagemarket.StorageDealPublishing: {},
+ }
+
+ var total dealStat
+ perState := map[storagemarket.StorageDealStatus]*dealStat{}
for _, deal := range deals {
- ndeals++
- dealBytes += deal.Proposal.PieceSize
+ if _, ok := showDealStates[deal.State]; !ok {
+ continue
+ }
+ if perState[deal.State] == nil {
+ perState[deal.State] = new(dealStat)
+ }
- if deal.State == storagemarket.StorageDealActive {
- nactiveDeals++
- activeDealBytes += deal.Proposal.PieceSize
+ dsAdd(&total, deal)
+ dsAdd(perState[deal.State], deal)
+ }
- if deal.Proposal.VerifiedDeal {
- nVerifDeals++
- activeVerifDealBytes += deal.Proposal.PieceSize
- }
+ type wstr struct {
+ str string
+ status storagemarket.StorageDealStatus
+ }
+ sorted := make([]wstr, 0, len(perState))
+ for status, stat := range perState {
+ st := strings.TrimPrefix(storagemarket.DealStates[status], "StorageDeal")
+ sorted = append(sorted, wstr{
+ str: fmt.Sprintf(" %s:\t%d\t\t%s\t(Verified: %d\t%s)\n", st, stat.count, types.SizeStr(types.NewInt(stat.bytes)), stat.verifCount, types.SizeStr(types.NewInt(stat.verifBytes))),
+ status: status,
+ },
+ )
+ }
+ sort.Slice(sorted, func(i, j int) bool {
+ if sorted[i].status == storagemarket.StorageDealActive || sorted[j].status == storagemarket.StorageDealActive {
+ return sorted[i].status == storagemarket.StorageDealActive
+ }
+ return sorted[i].status > sorted[j].status
+ })
+
+ fmt.Printf("Storage Deals: %d, %s\n", total.count, types.SizeStr(types.NewInt(total.bytes)))
+
+ tw := tabwriter.NewWriter(os.Stdout, 1, 1, 1, ' ', 0)
+ for _, e := range sorted {
+ _, _ = tw.Write([]byte(e.str))
+ }
+
+ _ = tw.Flush()
+ fmt.Println()
+
+ retrievals, err := nodeApi.MarketListRetrievalDeals(ctx)
+ if err != nil {
+ return xerrors.Errorf("getting retrieval deal list: %w", err)
+ }
+
+ var retrComplete dealStat
+ for _, retrieval := range retrievals {
+ if retrieval.Status == retrievalmarket.DealStatusCompleted {
+ retrComplete.count++
+ retrComplete.bytes += retrieval.TotalSent
}
}
- fmt.Printf("Deals: %d, %s\n", ndeals, types.SizeStr(types.NewInt(uint64(dealBytes))))
- fmt.Printf("\tActive: %d, %s (Verified: %d, %s)\n", nactiveDeals, types.SizeStr(types.NewInt(uint64(activeDealBytes))), nVerifDeals, types.SizeStr(types.NewInt(uint64(activeVerifDealBytes))))
+ fmt.Printf("Retrieval Deals (complete): %d, %s\n", retrComplete.count, types.SizeStr(types.NewInt(retrComplete.bytes)))
+
fmt.Println()
+ spendable := big.Zero()
+
// NOTE: there's no need to unlock anything here. Funds only
// vest on deadline boundaries, and they're unlocked by cron.
lockedFunds, err := mas.LockedFunds()
@@ -178,32 +321,46 @@ func infoCmdAct(cctx *cli.Context) error {
if err != nil {
return xerrors.Errorf("getting available balance: %w", err)
}
- fmt.Printf("Miner Balance: %s\n", color.YellowString("%s", types.FIL(mact.Balance)))
- fmt.Printf("\tPreCommit: %s\n", types.FIL(lockedFunds.PreCommitDeposits))
- fmt.Printf("\tPledge: %s\n", types.FIL(lockedFunds.InitialPledgeRequirement))
- fmt.Printf("\tVesting: %s\n", types.FIL(lockedFunds.VestingFunds))
- color.Green("\tAvailable: %s", types.FIL(availBalance))
- wb, err := api.WalletBalance(ctx, mi.Worker)
- if err != nil {
- return xerrors.Errorf("getting worker balance: %w", err)
- }
- color.Cyan("Worker Balance: %s", types.FIL(wb))
+ spendable = big.Add(spendable, availBalance)
+
+ fmt.Printf("Miner Balance: %s\n", color.YellowString("%s", types.FIL(mact.Balance).Short()))
+ fmt.Printf(" PreCommit: %s\n", types.FIL(lockedFunds.PreCommitDeposits).Short())
+ fmt.Printf(" Pledge: %s\n", types.FIL(lockedFunds.InitialPledgeRequirement).Short())
+ fmt.Printf(" Vesting: %s\n", types.FIL(lockedFunds.VestingFunds).Short())
+ colorTokenAmount(" Available: %s\n", availBalance)
mb, err := api.StateMarketBalance(ctx, maddr, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("getting market balance: %w", err)
}
- fmt.Printf("Market (Escrow): %s\n", types.FIL(mb.Escrow))
- fmt.Printf("Market (Locked): %s\n", types.FIL(mb.Locked))
+ spendable = big.Add(spendable, big.Sub(mb.Escrow, mb.Locked))
- fmt.Println()
+ fmt.Printf("Market Balance: %s\n", types.FIL(mb.Escrow).Short())
+ fmt.Printf(" Locked: %s\n", types.FIL(mb.Locked).Short())
+ colorTokenAmount(" Available: %s\n", big.Sub(mb.Escrow, mb.Locked))
- sealdur, err := nodeApi.SectorGetExpectedSealDuration(ctx)
+ wb, err := api.WalletBalance(ctx, mi.Worker)
if err != nil {
- return err
+ return xerrors.Errorf("getting worker balance: %w", err)
}
+ spendable = big.Add(spendable, wb)
+ color.Cyan("Worker Balance: %s", types.FIL(wb).Short())
+ if len(mi.ControlAddresses) > 0 {
+ cbsum := big.Zero()
+ for _, ca := range mi.ControlAddresses {
+ b, err := api.WalletBalance(ctx, ca)
+ if err != nil {
+ return xerrors.Errorf("getting control address balance: %w", err)
+ }
+ cbsum = big.Add(cbsum, b)
+ }
+ spendable = big.Add(spendable, cbsum)
+
+ fmt.Printf(" Control: %s\n", types.FIL(cbsum).Short())
+ }
+ colorTokenAmount("Total Spendable: %s\n", spendable)
- fmt.Printf("Expected Seal Duration: %s\n\n", sealdur)
+ fmt.Println()
if !cctx.Bool("hide-sectors-info") {
fmt.Println("Sectors:")
@@ -232,28 +389,41 @@ var stateList = []stateMeta{
{col: color.FgBlue, state: sealing.Empty},
{col: color.FgBlue, state: sealing.WaitDeals},
+ {col: color.FgBlue, state: sealing.AddPiece},
{col: color.FgRed, state: sealing.UndefinedSectorState},
{col: color.FgYellow, state: sealing.Packing},
+ {col: color.FgYellow, state: sealing.GetTicket},
{col: color.FgYellow, state: sealing.PreCommit1},
{col: color.FgYellow, state: sealing.PreCommit2},
{col: color.FgYellow, state: sealing.PreCommitting},
{col: color.FgYellow, state: sealing.PreCommitWait},
+ {col: color.FgYellow, state: sealing.SubmitPreCommitBatch},
+ {col: color.FgYellow, state: sealing.PreCommitBatchWait},
{col: color.FgYellow, state: sealing.WaitSeed},
{col: color.FgYellow, state: sealing.Committing},
+ {col: color.FgYellow, state: sealing.CommitFinalize},
{col: color.FgYellow, state: sealing.SubmitCommit},
{col: color.FgYellow, state: sealing.CommitWait},
+ {col: color.FgYellow, state: sealing.SubmitCommitAggregate},
+ {col: color.FgYellow, state: sealing.CommitAggregateWait},
{col: color.FgYellow, state: sealing.FinalizeSector},
+ {col: color.FgCyan, state: sealing.Terminating},
+ {col: color.FgCyan, state: sealing.TerminateWait},
+ {col: color.FgCyan, state: sealing.TerminateFinality},
+ {col: color.FgCyan, state: sealing.TerminateFailed},
{col: color.FgCyan, state: sealing.Removing},
{col: color.FgCyan, state: sealing.Removed},
{col: color.FgRed, state: sealing.FailedUnrecoverable},
+ {col: color.FgRed, state: sealing.AddPieceFailed},
{col: color.FgRed, state: sealing.SealPreCommit1Failed},
{col: color.FgRed, state: sealing.SealPreCommit2Failed},
{col: color.FgRed, state: sealing.PreCommitFailed},
{col: color.FgRed, state: sealing.ComputeProofFailed},
{col: color.FgRed, state: sealing.CommitFailed},
+ {col: color.FgRed, state: sealing.CommitFinalizeFailed},
{col: color.FgRed, state: sealing.PackingFailed},
{col: color.FgRed, state: sealing.FinalizeFailed},
{col: color.FgRed, state: sealing.Faulty},
@@ -274,22 +444,18 @@ func init() {
}
func sectorsInfo(ctx context.Context, napi api.StorageMiner) error {
- sectors, err := napi.SectorsList(ctx)
+ summary, err := napi.SectorsSummary(ctx)
if err != nil {
return err
}
- buckets := map[sealing.SectorState]int{
- "Total": len(sectors),
- }
- for _, s := range sectors {
- st, err := napi.SectorsStatus(ctx, s, false)
- if err != nil {
- return err
- }
-
- buckets[sealing.SectorState(st.State)]++
+ buckets := make(map[sealing.SectorState]int)
+ var total int
+ for s, c := range summary {
+ buckets[sealing.SectorState(s)] = c
+ total += c
}
+ buckets["Total"] = total
var sorted []stateMeta
for state, i := range buckets {
@@ -306,3 +472,13 @@ func sectorsInfo(ctx context.Context, napi api.StorageMiner) error {
return nil
}
+
+func colorTokenAmount(format string, amount abi.TokenAmount) {
+ if amount.GreaterThan(big.Zero()) {
+ color.Green(format, types.FIL(amount).Short())
+ } else if amount.Equals(big.Zero()) {
+ color.Yellow(format, types.FIL(amount).Short())
+ } else {
+ color.Red(format, types.FIL(amount).Short())
+ }
+}
diff --git a/cmd/lotus-storage-miner/info_all.go b/cmd/lotus-storage-miner/info_all.go
index 408f9b5c750..e5e08a56911 100644
--- a/cmd/lotus-storage-miner/info_all.go
+++ b/cmd/lotus-storage-miner/info_all.go
@@ -35,80 +35,80 @@ var infoAllCmd = &cli.Command{
fmt.Println("#: Version")
if err := lcli.VersionCmd.Action(cctx); err != nil {
- return err
+ fmt.Println("ERROR: ", err)
}
fmt.Println("\n#: Miner Info")
if err := infoCmdAct(cctx); err != nil {
- return err
+ fmt.Println("ERROR: ", err)
}
// Verbose info
fmt.Println("\n#: Storage List")
if err := storageListCmd.Action(cctx); err != nil {
- return err
+ fmt.Println("ERROR: ", err)
}
fmt.Println("\n#: Worker List")
if err := sealingWorkersCmd.Action(cctx); err != nil {
- return err
+ fmt.Println("ERROR: ", err)
}
fmt.Println("\n#: PeerID")
if err := lcli.NetId.Action(cctx); err != nil {
- return err
+ fmt.Println("ERROR: ", err)
}
fmt.Println("\n#: Listen Addresses")
if err := lcli.NetListen.Action(cctx); err != nil {
- return err
+ fmt.Println("ERROR: ", err)
}
fmt.Println("\n#: Reachability")
if err := lcli.NetReachability.Action(cctx); err != nil {
- return err
+ fmt.Println("ERROR: ", err)
}
// Very Verbose info
fmt.Println("\n#: Peers")
if err := lcli.NetPeers.Action(cctx); err != nil {
- return err
+ fmt.Println("ERROR: ", err)
}
fmt.Println("\n#: Sealing Jobs")
if err := sealingJobsCmd.Action(cctx); err != nil {
- return err
+ fmt.Println("ERROR: ", err)
}
fmt.Println("\n#: Sched Diag")
if err := sealingSchedDiagCmd.Action(cctx); err != nil {
- return err
+ fmt.Println("ERROR: ", err)
}
fmt.Println("\n#: Storage Ask")
if err := getAskCmd.Action(cctx); err != nil {
- return err
+ fmt.Println("ERROR: ", err)
}
fmt.Println("\n#: Storage Deals")
if err := dealsListCmd.Action(cctx); err != nil {
- return err
+ fmt.Println("ERROR: ", err)
}
fmt.Println("\n#: Retrieval Deals")
if err := retrievalDealsListCmd.Action(cctx); err != nil {
- return err
+ fmt.Println("ERROR: ", err)
}
fmt.Println("\n#: Sector List")
if err := sectorsListCmd.Action(cctx); err != nil {
- return err
+ fmt.Println("ERROR: ", err)
}
fmt.Println("\n#: Sector Refs")
if err := sectorsRefsCmd.Action(cctx); err != nil {
- return err
+ fmt.Println("ERROR: ", err)
}
// Very Very Verbose info
@@ -116,7 +116,7 @@ var infoAllCmd = &cli.Command{
list, err := nodeApi.SectorsList(ctx)
if err != nil {
- return err
+ fmt.Println("ERROR: ", err)
}
sort.Slice(list, func(i, j int) bool {
@@ -129,11 +129,11 @@ var infoAllCmd = &cli.Command{
fs := &flag.FlagSet{}
for _, f := range sectorsStatusCmd.Flags {
if err := f.Apply(fs); err != nil {
- return err
+ fmt.Println("ERROR: ", err)
}
}
if err := fs.Parse([]string{"--log", "--on-chain-info", fmt.Sprint(s)}); err != nil {
- return err
+ fmt.Println("ERROR: ", err)
}
if err := sectorsStatusCmd.Action(cli.NewContext(cctx.App, fs, cctx)); err != nil {
@@ -144,7 +144,7 @@ var infoAllCmd = &cli.Command{
fs = &flag.FlagSet{}
if err := fs.Parse([]string{fmt.Sprint(s)}); err != nil {
- return err
+ fmt.Println("ERROR: ", err)
}
if err := storageFindCmd.Action(cli.NewContext(cctx.App, fs, cctx)); err != nil {
@@ -155,7 +155,7 @@ var infoAllCmd = &cli.Command{
if !_test {
fmt.Println("\n#: Goroutines")
if err := lcli.PprofGoroutines.Action(cctx); err != nil {
- return err
+ fmt.Println("ERROR: ", err)
}
}
diff --git a/cmd/lotus-storage-miner/init.go b/cmd/lotus-storage-miner/init.go
index a7fcd722a9e..1cce52a41a1 100644
--- a/cmd/lotus-storage-miner/init.go
+++ b/cmd/lotus-storage-miner/init.go
@@ -8,6 +8,7 @@ import (
"encoding/json"
"fmt"
"io/ioutil"
+ "net/http"
"os"
"path/filepath"
"strconv"
@@ -37,6 +38,8 @@ import (
power2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/power"
lapi "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/api/v0api"
+ "github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
@@ -118,7 +121,8 @@ var initCmd = &cli.Command{
},
},
Subcommands: []*cli.Command{
- initRestoreCmd,
+ restoreCmd,
+ serviceCmd,
},
Action: func(cctx *cli.Context) error {
log.Info("Initializing lotus miner")
@@ -143,13 +147,17 @@ var initCmd = &cli.Command{
log.Info("Checking proof parameters")
- if err := paramfetch.GetParams(ctx, build.ParametersJSON(), uint64(ssize)); err != nil {
+ if err := paramfetch.GetParams(ctx, build.ParametersJSON(), build.SrsJSON(), uint64(ssize)); err != nil {
return xerrors.Errorf("fetching proof parameters: %w", err)
}
log.Info("Trying to connect to full node RPC")
- api, closer, err := lcli.GetFullNodeAPI(cctx) // TODO: consider storing full node address in config
+ if err := checkV1ApiSupport(ctx, cctx); err != nil {
+ return err
+ }
+
+ api, closer, err := lcli.GetFullNodeAPIV1(cctx) // TODO: consider storing full node address in config
if err != nil {
return err
}
@@ -158,7 +166,7 @@ var initCmd = &cli.Command{
log.Info("Checking full node sync status")
if !cctx.Bool("genesis-miner") && !cctx.Bool("nosync") {
- if err := lcli.SyncWait(ctx, api, false); err != nil {
+ if err := lcli.SyncWait(ctx, &v0api.WrapperV1Full{FullNode: api}, false); err != nil {
return xerrors.Errorf("sync wait: %w", err)
}
}
@@ -186,8 +194,8 @@ var initCmd = &cli.Command{
return err
}
- if !v.APIVersion.EqMajorMinor(build.FullAPIVersion) {
- return xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", build.FullAPIVersion, v.APIVersion)
+ if !v.APIVersion.EqMajorMinor(lapi.FullAPIVersion1) {
+ return xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", lapi.FullAPIVersion1, v.APIVersion)
}
log.Info("Initializing repo")
@@ -269,7 +277,7 @@ var initCmd = &cli.Command{
},
}
-func migratePreSealMeta(ctx context.Context, api lapi.FullNode, metadata string, maddr address.Address, mds dtypes.MetadataDS) error {
+func migratePreSealMeta(ctx context.Context, api v1api.FullNode, metadata string, maddr address.Address, mds dtypes.MetadataDS) error {
metadata, err := homedir.Expand(metadata)
if err != nil {
return xerrors.Errorf("expanding preseal dir: %w", err)
@@ -310,9 +318,10 @@ func migratePreSealMeta(ctx context.Context, api lapi.FullNode, metadata string,
Size: abi.PaddedPieceSize(meta.SectorSize),
PieceCID: commD,
},
- DealInfo: &sealing.DealInfo{
- DealID: dealID,
- DealSchedule: sealing.DealSchedule{
+ DealInfo: &lapi.PieceDealInfo{
+ DealID: dealID,
+ DealProposal: §or.Deal,
+ DealSchedule: lapi.DealSchedule{
StartEpoch: sector.Deal.StartEpoch,
EndEpoch: sector.Deal.EndEpoch,
},
@@ -378,7 +387,7 @@ func migratePreSealMeta(ctx context.Context, api lapi.FullNode, metadata string,
return mds.Put(datastore.NewKey(modules.StorageCounterDSPrefix), buf[:size])
}
-func findMarketDealID(ctx context.Context, api lapi.FullNode, deal market2.DealProposal) (abi.DealID, error) {
+func findMarketDealID(ctx context.Context, api v1api.FullNode, deal market2.DealProposal) (abi.DealID, error) {
// TODO: find a better way
// (this is only used by genesis miners)
@@ -397,7 +406,7 @@ func findMarketDealID(ctx context.Context, api lapi.FullNode, deal market2.DealP
return 0, xerrors.New("deal not found")
}
-func storageMinerInit(ctx context.Context, cctx *cli.Context, api lapi.FullNode, r repo.Repo, ssize abi.SectorSize, gasPrice types.BigInt) error {
+func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode, r repo.Repo, ssize abi.SectorSize, gasPrice types.BigInt) error {
lr, err := r.Lock(repo.StorageMiner)
if err != nil {
return err
@@ -416,7 +425,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api lapi.FullNode,
return xerrors.Errorf("peer ID from private key: %w", err)
}
- mds, err := lr.Datastore("/metadata")
+ mds, err := lr.Datastore(context.TODO(), "/metadata")
if err != nil {
return err
}
@@ -433,11 +442,6 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api lapi.FullNode,
return err
}
- spt, err := ffiwrapper.SealProofTypeFromSectorSize(ssize)
- if err != nil {
- return err
- }
-
mid, err := address.IDFromAddress(a)
if err != nil {
return xerrors.Errorf("getting id address: %w", err)
@@ -451,16 +455,22 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api lapi.FullNode,
wsts := statestore.New(namespace.Wrap(mds, modules.WorkerCallsPrefix))
smsts := statestore.New(namespace.Wrap(mds, modules.ManagerWorkPrefix))
- smgr, err := sectorstorage.New(ctx, lr, stores.NewIndex(), &ffiwrapper.Config{
- SealProofType: spt,
- }, sectorstorage.SealerConfig{
+ si := stores.NewIndex()
+
+ lstor, err := stores.NewLocal(ctx, lr, si, nil)
+ if err != nil {
+ return err
+ }
+ stor := stores.NewRemote(lstor, si, http.Header(sa), 10, &stores.DefaultPartialFileHandler{})
+
+ smgr, err := sectorstorage.New(ctx, lstor, stor, lr, si, sectorstorage.SealerConfig{
ParallelFetchLimit: 10,
AllowAddPiece: true,
AllowPreCommit1: true,
AllowPreCommit2: true,
AllowCommit: true,
AllowUnseal: true,
- }, nil, sa, wsts, smsts)
+ }, wsts, smsts)
if err != nil {
return err
}
@@ -568,7 +578,7 @@ func makeHostKey(lr repo.LockedRepo) (crypto.PrivKey, error) {
return pk, nil
}
-func configureStorageMiner(ctx context.Context, api lapi.FullNode, addr address.Address, peerid peer.ID, gasPrice types.BigInt) error {
+func configureStorageMiner(ctx context.Context, api v1api.FullNode, addr address.Address, peerid peer.ID, gasPrice types.BigInt) error {
mi, err := api.StateMinerInfo(ctx, addr, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("getWorkerAddr returned bad address: %w", err)
@@ -594,7 +604,7 @@ func configureStorageMiner(ctx context.Context, api lapi.FullNode, addr address.
}
log.Info("Waiting for message: ", smsg.Cid())
- ret, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence)
+ ret, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence, lapi.LookbackNoLimit, true)
if err != nil {
return err
}
@@ -606,7 +616,7 @@ func configureStorageMiner(ctx context.Context, api lapi.FullNode, addr address.
return nil
}
-func createStorageMiner(ctx context.Context, api lapi.FullNode, peerid peer.ID, gasPrice types.BigInt, cctx *cli.Context) (address.Address, error) {
+func createStorageMiner(ctx context.Context, api v1api.FullNode, peerid peer.ID, gasPrice types.BigInt, cctx *cli.Context) (address.Address, error) {
var err error
var owner address.Address
if cctx.String("owner") != "" {
@@ -648,7 +658,7 @@ func createStorageMiner(ctx context.Context, api lapi.FullNode, peerid peer.ID,
log.Infof("Initializing worker account %s, message: %s", worker, signed.Cid())
log.Infof("Waiting for confirmation")
- mw, err := api.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence)
+ mw, err := api.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, lapi.LookbackNoLimit, true)
if err != nil {
return address.Undef, xerrors.Errorf("waiting for worker init: %w", err)
}
@@ -657,9 +667,14 @@ func createStorageMiner(ctx context.Context, api lapi.FullNode, peerid peer.ID,
}
}
- spt, err := ffiwrapper.SealProofTypeFromSectorSize(abi.SectorSize(ssize))
+ nv, err := api.StateNetworkVersion(ctx, types.EmptyTSK)
if err != nil {
- return address.Undef, err
+ return address.Undef, xerrors.Errorf("getting network version: %w", err)
+ }
+
+ spt, err := miner.SealProofTypeFromSectorSize(abi.SectorSize(ssize), nv)
+ if err != nil {
+ return address.Undef, xerrors.Errorf("getting seal proof type: %w", err)
}
params, err := actors.SerializeParams(&power2.CreateMinerParams{
@@ -701,7 +716,7 @@ func createStorageMiner(ctx context.Context, api lapi.FullNode, peerid peer.ID,
log.Infof("Pushed CreateMiner message: %s", signed.Cid())
log.Infof("Waiting for confirmation")
- mw, err := api.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence)
+ mw, err := api.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, lapi.LookbackNoLimit, true)
if err != nil {
return address.Undef, xerrors.Errorf("waiting for createMiner message: %w", err)
}
@@ -718,3 +733,26 @@ func createStorageMiner(ctx context.Context, api lapi.FullNode, peerid peer.ID,
log.Infof("New miners address is: %s (%s)", retval.IDAddress, retval.RobustAddress)
return retval.IDAddress, nil
}
+
+// checkV1ApiSupport uses v0 api version to signal support for v1 API
+// trying to query the v1 api on older lotus versions would get a 404, which can happen for any number of other reasons
+func checkV1ApiSupport(ctx context.Context, cctx *cli.Context) error {
+ // check v0 api version to make sure it supports v1 api
+ api0, closer, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+
+ v, err := api0.Version(ctx)
+ closer()
+
+ if err != nil {
+ return err
+ }
+
+ if !v.APIVersion.EqMajorMinor(lapi.FullAPIVersion0) {
+ return xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", lapi.FullAPIVersion0, v.APIVersion)
+ }
+
+ return nil
+}
diff --git a/cmd/lotus-storage-miner/init_restore.go b/cmd/lotus-storage-miner/init_restore.go
index 83a9ad87c53..3b4e2b26d2e 100644
--- a/cmd/lotus-storage-miner/init_restore.go
+++ b/cmd/lotus-storage-miner/init_restore.go
@@ -1,10 +1,13 @@
package main
import (
+ "context"
"encoding/json"
"io/ioutil"
"os"
+ "github.com/filecoin-project/lotus/api/v0api"
+
"github.com/docker/go-units"
"github.com/ipfs/go-datastore"
"github.com/libp2p/go-libp2p-core/peer"
@@ -17,7 +20,9 @@ import (
paramfetch "github.com/filecoin-project/go-paramfetch"
"github.com/filecoin-project/go-state-types/big"
+ lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
@@ -26,7 +31,7 @@ import (
"github.com/filecoin-project/lotus/node/repo"
)
-var initRestoreCmd = &cli.Command{
+var restoreCmd = &cli.Command{
Name: "restore",
Usage: "Initialize a lotus miner repo from a backup",
Flags: []cli.Flag{
@@ -45,230 +50,248 @@ var initRestoreCmd = &cli.Command{
},
ArgsUsage: "[backupFile]",
Action: func(cctx *cli.Context) error {
- log.Info("Initializing lotus miner using a backup")
- if cctx.Args().Len() != 1 {
- return xerrors.Errorf("expected 1 argument")
- }
-
- log.Info("Trying to connect to full node RPC")
-
- api, closer, err := lcli.GetFullNodeAPI(cctx) // TODO: consider storing full node address in config
- if err != nil {
- return err
- }
- defer closer()
-
- log.Info("Checking full node version")
-
ctx := lcli.ReqContext(cctx)
+ log.Info("Initializing lotus miner using a backup")
- v, err := api.Version(ctx)
- if err != nil {
- return err
- }
-
- if !v.APIVersion.EqMajorMinor(build.FullAPIVersion) {
- return xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", build.FullAPIVersion, v.APIVersion)
- }
+ var storageCfg *stores.StorageConfig
+ if cctx.IsSet("storage-config") {
+ cf, err := homedir.Expand(cctx.String("storage-config"))
+ if err != nil {
+ return xerrors.Errorf("expanding storage config path: %w", err)
+ }
- if !cctx.Bool("nosync") {
- if err := lcli.SyncWait(ctx, api, false); err != nil {
- return xerrors.Errorf("sync wait: %w", err)
+ cfb, err := ioutil.ReadFile(cf)
+ if err != nil {
+ return xerrors.Errorf("reading storage config: %w", err)
}
- }
- bf, err := homedir.Expand(cctx.Args().First())
- if err != nil {
- return xerrors.Errorf("expand backup file path: %w", err)
+ storageCfg = &stores.StorageConfig{}
+ err = json.Unmarshal(cfb, storageCfg)
+ if err != nil {
+ return xerrors.Errorf("cannot unmarshal json for storage config: %w", err)
+ }
}
- st, err := os.Stat(bf)
- if err != nil {
- return xerrors.Errorf("stat backup file (%s): %w", bf, err)
- }
+ if err := restore(ctx, cctx, storageCfg, nil, func(api lapi.FullNode, maddr address.Address, peerid peer.ID, mi miner.MinerInfo) error {
+ log.Info("Checking proof parameters")
- f, err := os.Open(bf)
- if err != nil {
- return xerrors.Errorf("opening backup file: %w", err)
- }
- defer f.Close() // nolint:errcheck
+ if err := paramfetch.GetParams(ctx, build.ParametersJSON(), build.SrsJSON(), uint64(mi.SectorSize)); err != nil {
+ return xerrors.Errorf("fetching proof parameters: %w", err)
+ }
- log.Info("Checking if repo exists")
+ log.Info("Configuring miner actor")
- repoPath := cctx.String(FlagMinerRepo)
- r, err := repo.NewFS(repoPath)
- if err != nil {
- return err
- }
+ if err := configureStorageMiner(ctx, api, maddr, peerid, big.Zero()); err != nil {
+ return err
+ }
- ok, err := r.Exists()
- if err != nil {
+ return nil
+ }); err != nil {
return err
}
- if ok {
- return xerrors.Errorf("repo at '%s' is already initialized", cctx.String(FlagMinerRepo))
- }
- log.Info("Initializing repo")
+ return nil
+ },
+}
- if err := r.Init(repo.StorageMiner); err != nil {
- return err
- }
+func restore(ctx context.Context, cctx *cli.Context, strConfig *stores.StorageConfig, manageConfig func(*config.StorageMiner) error, after func(api lapi.FullNode, addr address.Address, peerid peer.ID, mi miner.MinerInfo) error) error {
+ if cctx.Args().Len() != 1 {
+ return xerrors.Errorf("expected 1 argument")
+ }
- lr, err := r.Lock(repo.StorageMiner)
- if err != nil {
- return err
- }
- defer lr.Close() //nolint:errcheck
+ log.Info("Trying to connect to full node RPC")
- if cctx.IsSet("config") {
- log.Info("Restoring config")
+ api, closer, err := lcli.GetFullNodeAPIV1(cctx) // TODO: consider storing full node address in config
+ if err != nil {
+ return err
+ }
+ defer closer()
- cf, err := homedir.Expand(cctx.String("config"))
- if err != nil {
- return xerrors.Errorf("expanding config path: %w", err)
- }
+ log.Info("Checking full node version")
- _, err = os.Stat(cf)
- if err != nil {
- return xerrors.Errorf("stat config file (%s): %w", cf, err)
- }
+ v, err := api.Version(ctx)
+ if err != nil {
+ return err
+ }
- var cerr error
- err = lr.SetConfig(func(raw interface{}) {
- rcfg, ok := raw.(*config.StorageMiner)
- if !ok {
- cerr = xerrors.New("expected miner config")
- return
- }
-
- ff, err := config.FromFile(cf, rcfg)
- if err != nil {
- cerr = xerrors.Errorf("loading config: %w", err)
- return
- }
-
- *rcfg = *ff.(*config.StorageMiner)
- })
- if cerr != nil {
- return cerr
- }
- if err != nil {
- return xerrors.Errorf("setting config: %w", err)
- }
+ if !v.APIVersion.EqMajorMinor(lapi.FullAPIVersion1) {
+ return xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", lapi.FullAPIVersion1, v.APIVersion)
+ }
- } else {
- log.Warn("--config NOT SET, WILL USE DEFAULT VALUES")
+ if !cctx.Bool("nosync") {
+ if err := lcli.SyncWait(ctx, &v0api.WrapperV1Full{FullNode: api}, false); err != nil {
+ return xerrors.Errorf("sync wait: %w", err)
+ }
+ }
+
+ bf, err := homedir.Expand(cctx.Args().First())
+ if err != nil {
+ return xerrors.Errorf("expand backup file path: %w", err)
+ }
+
+ st, err := os.Stat(bf)
+ if err != nil {
+ return xerrors.Errorf("stat backup file (%s): %w", bf, err)
+ }
+
+ f, err := os.Open(bf)
+ if err != nil {
+ return xerrors.Errorf("opening backup file: %w", err)
+ }
+ defer f.Close() // nolint:errcheck
+
+ log.Info("Checking if repo exists")
+
+ repoPath := cctx.String(FlagMinerRepo)
+ r, err := repo.NewFS(repoPath)
+ if err != nil {
+ return err
+ }
+
+ ok, err := r.Exists()
+ if err != nil {
+ return err
+ }
+ if ok {
+ return xerrors.Errorf("repo at '%s' is already initialized", cctx.String(FlagMinerRepo))
+ }
+
+ log.Info("Initializing repo")
+
+ if err := r.Init(repo.StorageMiner); err != nil {
+ return err
+ }
+
+ lr, err := r.Lock(repo.StorageMiner)
+ if err != nil {
+ return err
+ }
+ defer lr.Close() //nolint:errcheck
+
+ if cctx.IsSet("config") {
+ log.Info("Restoring config")
+
+ cf, err := homedir.Expand(cctx.String("config"))
+ if err != nil {
+ return xerrors.Errorf("expanding config path: %w", err)
}
- if cctx.IsSet("storage-config") {
- log.Info("Restoring storage path config")
+ _, err = os.Stat(cf)
+ if err != nil {
+ return xerrors.Errorf("stat config file (%s): %w", cf, err)
+ }
- cf, err := homedir.Expand(cctx.String("storage-config"))
- if err != nil {
- return xerrors.Errorf("expanding storage config path: %w", err)
+ var cerr error
+ err = lr.SetConfig(func(raw interface{}) {
+ rcfg, ok := raw.(*config.StorageMiner)
+ if !ok {
+ cerr = xerrors.New("expected miner config")
+ return
}
- cfb, err := ioutil.ReadFile(cf)
+ ff, err := config.FromFile(cf, rcfg)
if err != nil {
- return xerrors.Errorf("reading storage config: %w", err)
+ cerr = xerrors.Errorf("loading config: %w", err)
+ return
}
- var cerr error
- err = lr.SetStorage(func(scfg *stores.StorageConfig) {
- cerr = json.Unmarshal(cfb, scfg)
- })
- if cerr != nil {
- return xerrors.Errorf("unmarshalling storage config: %w", cerr)
- }
- if err != nil {
- return xerrors.Errorf("setting storage config: %w", err)
+ *rcfg = *ff.(*config.StorageMiner)
+ if manageConfig != nil {
+ cerr = manageConfig(rcfg)
}
- } else {
- log.Warn("--storage-config NOT SET. NO SECTOR PATHS WILL BE CONFIGURED")
+ })
+ if cerr != nil {
+ return cerr
}
-
- log.Info("Restoring metadata backup")
-
- mds, err := lr.Datastore("/metadata")
if err != nil {
- return err
+ return xerrors.Errorf("setting config: %w", err)
}
- bar := pb.New64(st.Size())
- br := bar.NewProxyReader(f)
- bar.ShowTimeLeft = true
- bar.ShowPercent = true
- bar.ShowSpeed = true
- bar.Units = pb.U_BYTES
+ } else {
+ log.Warn("--config NOT SET, WILL USE DEFAULT VALUES")
+ }
- bar.Start()
- err = backupds.RestoreInto(br, mds)
- bar.Finish()
+ if strConfig != nil {
+ log.Info("Restoring storage path config")
+ err = lr.SetStorage(func(scfg *stores.StorageConfig) {
+ *scfg = *strConfig
+ })
if err != nil {
- return xerrors.Errorf("restoring metadata: %w", err)
+ return xerrors.Errorf("setting storage config: %w", err)
}
+ } else {
+ log.Warn("--storage-config NOT SET. NO SECTOR PATHS WILL BE CONFIGURED")
+ }
- log.Info("Checking actor metadata")
+ log.Info("Restoring metadata backup")
- abytes, err := mds.Get(datastore.NewKey("miner-address"))
- if err != nil {
- return xerrors.Errorf("getting actor address from metadata datastore: %w", err)
- }
+ mds, err := lr.Datastore(context.TODO(), "/metadata")
+ if err != nil {
+ return err
+ }
- maddr, err := address.NewFromBytes(abytes)
- if err != nil {
- return xerrors.Errorf("parsing actor address: %w", err)
- }
+ bar := pb.New64(st.Size())
+ br := bar.NewProxyReader(f)
+ bar.ShowTimeLeft = true
+ bar.ShowPercent = true
+ bar.ShowSpeed = true
+ bar.Units = pb.U_BYTES
- log.Info("ACTOR ADDRESS: ", maddr.String())
+ bar.Start()
+ err = backupds.RestoreInto(br, mds)
+ bar.Finish()
- mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
- if err != nil {
- return xerrors.Errorf("getting miner info: %w", err)
- }
+ if err != nil {
+ return xerrors.Errorf("restoring metadata: %w", err)
+ }
- log.Info("SECTOR SIZE: ", units.BytesSize(float64(mi.SectorSize)))
+ log.Info("Checking actor metadata")
- wk, err := api.StateAccountKey(ctx, mi.Worker, types.EmptyTSK)
- if err != nil {
- return xerrors.Errorf("resolving worker key: %w", err)
- }
+ abytes, err := mds.Get(datastore.NewKey("miner-address"))
+ if err != nil {
+ return xerrors.Errorf("getting actor address from metadata datastore: %w", err)
+ }
- has, err := api.WalletHas(ctx, wk)
- if err != nil {
- return xerrors.Errorf("checking worker address: %w", err)
- }
+ maddr, err := address.NewFromBytes(abytes)
+ if err != nil {
+ return xerrors.Errorf("parsing actor address: %w", err)
+ }
- if !has {
- return xerrors.Errorf("worker address %s for miner actor %s not present in full node wallet", mi.Worker, maddr)
- }
+ log.Info("ACTOR ADDRESS: ", maddr.String())
- log.Info("Checking proof parameters")
+ mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
+ if err != nil {
+ return xerrors.Errorf("getting miner info: %w", err)
+ }
- if err := paramfetch.GetParams(ctx, build.ParametersJSON(), uint64(mi.SectorSize)); err != nil {
- return xerrors.Errorf("fetching proof parameters: %w", err)
- }
+ log.Info("SECTOR SIZE: ", units.BytesSize(float64(mi.SectorSize)))
- log.Info("Initializing libp2p identity")
+ wk, err := api.StateAccountKey(ctx, mi.Worker, types.EmptyTSK)
+ if err != nil {
+ return xerrors.Errorf("resolving worker key: %w", err)
+ }
- p2pSk, err := makeHostKey(lr)
- if err != nil {
- return xerrors.Errorf("make host key: %w", err)
- }
+ has, err := api.WalletHas(ctx, wk)
+ if err != nil {
+ return xerrors.Errorf("checking worker address: %w", err)
+ }
- peerid, err := peer.IDFromPrivateKey(p2pSk)
- if err != nil {
- return xerrors.Errorf("peer ID from private key: %w", err)
- }
+ if !has {
+ return xerrors.Errorf("worker address %s for miner actor %s not present in full node wallet", mi.Worker, maddr)
+ }
- log.Info("Configuring miner actor")
+ log.Info("Initializing libp2p identity")
- if err := configureStorageMiner(ctx, api, maddr, peerid, big.Zero()); err != nil {
- return err
- }
+ p2pSk, err := makeHostKey(lr)
+ if err != nil {
+ return xerrors.Errorf("make host key: %w", err)
+ }
- return nil
- },
+ peerid, err := peer.IDFromPrivateKey(p2pSk)
+ if err != nil {
+ return xerrors.Errorf("peer ID from private key: %w", err)
+ }
+
+ return after(api, maddr, peerid, mi)
}
diff --git a/cmd/lotus-storage-miner/init_service.go b/cmd/lotus-storage-miner/init_service.go
new file mode 100644
index 00000000000..ad803a83040
--- /dev/null
+++ b/cmd/lotus-storage-miner/init_service.go
@@ -0,0 +1,152 @@
+package main
+
+import (
+ "context"
+ "strings"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/big"
+ lapi "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/api/client"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
+ lcli "github.com/filecoin-project/lotus/cli"
+ cliutil "github.com/filecoin-project/lotus/cli/util"
+ "github.com/filecoin-project/lotus/extern/sector-storage/stores"
+ "github.com/filecoin-project/lotus/node/config"
+ "github.com/libp2p/go-libp2p-core/peer"
+ "github.com/urfave/cli/v2"
+ "golang.org/x/xerrors"
+)
+
+const (
+ MarketsService = "markets"
+)
+
+var serviceCmd = &cli.Command{
+ Name: "service",
+ Usage: "Initialize a lotus miner sub-service",
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "config",
+ Usage: "config file (config.toml)",
+ Required: true,
+ },
+ &cli.BoolFlag{
+ Name: "nosync",
+ Usage: "don't check full-node sync status",
+ },
+ &cli.StringSliceFlag{
+ Name: "type",
+ Usage: "type of service to be enabled",
+ },
+ &cli.StringFlag{
+ Name: "api-sealer",
+ Usage: "sealer API info (lotus-miner auth api-info --perm=admin)",
+ },
+ &cli.StringFlag{
+ Name: "api-sector-index",
+ Usage: "sector Index API info (lotus-miner auth api-info --perm=admin)",
+ },
+ },
+ ArgsUsage: "[backupFile]",
+ Action: func(cctx *cli.Context) error {
+ ctx := lcli.ReqContext(cctx)
+ log.Info("Initializing lotus miner service")
+
+ es := EnabledServices(cctx.StringSlice("type"))
+
+ if len(es) == 0 {
+ return xerrors.Errorf("at least one module must be enabled")
+ }
+
+ // we should remove this as soon as we have more service types and not just `markets`
+ if !es.Contains(MarketsService) {
+ return xerrors.Errorf("markets module must be enabled")
+ }
+
+ if !cctx.IsSet("api-sealer") {
+ return xerrors.Errorf("--api-sealer is required without the sealer module enabled")
+ }
+ if !cctx.IsSet("api-sector-index") {
+ return xerrors.Errorf("--api-sector-index is required without the sector storage module enabled")
+ }
+
+ if err := restore(ctx, cctx, &stores.StorageConfig{}, func(cfg *config.StorageMiner) error {
+ cfg.Subsystems.EnableMarkets = es.Contains(MarketsService)
+ cfg.Subsystems.EnableMining = false
+ cfg.Subsystems.EnableSealing = false
+ cfg.Subsystems.EnableSectorStorage = false
+
+ if !cfg.Subsystems.EnableSealing {
+ ai, err := checkApiInfo(ctx, cctx.String("api-sealer"))
+ if err != nil {
+ return xerrors.Errorf("checking sealer API: %w", err)
+ }
+ cfg.Subsystems.SealerApiInfo = ai
+ }
+
+ if !cfg.Subsystems.EnableSectorStorage {
+ ai, err := checkApiInfo(ctx, cctx.String("api-sector-index"))
+ if err != nil {
+ return xerrors.Errorf("checking sector index API: %w", err)
+ }
+ cfg.Subsystems.SectorIndexApiInfo = ai
+ }
+
+ return nil
+ }, func(api lapi.FullNode, maddr address.Address, peerid peer.ID, mi miner.MinerInfo) error {
+ if es.Contains(MarketsService) {
+ log.Info("Configuring miner actor")
+
+ if err := configureStorageMiner(ctx, api, maddr, peerid, big.Zero()); err != nil {
+ return err
+ }
+ }
+
+ return nil
+ }); err != nil {
+ return err
+ }
+
+ return nil
+ },
+}
+
+type EnabledServices []string
+
+func (es EnabledServices) Contains(name string) bool {
+ for _, s := range es {
+ if s == name {
+ return true
+ }
+ }
+ return false
+}
+
+func checkApiInfo(ctx context.Context, ai string) (string, error) {
+ ai = strings.TrimPrefix(strings.TrimSpace(ai), "MINER_API_INFO=")
+ info := cliutil.ParseApiInfo(ai)
+ addr, err := info.DialArgs("v0")
+ if err != nil {
+ return "", xerrors.Errorf("could not get DialArgs: %w", err)
+ }
+
+ log.Infof("Checking api version of %s", addr)
+
+ api, closer, err := client.NewStorageMinerRPCV0(ctx, addr, info.AuthHeader())
+ if err != nil {
+ return "", err
+ }
+ defer closer()
+
+ v, err := api.Version(ctx)
+ if err != nil {
+ return "", xerrors.Errorf("checking version: %w", err)
+ }
+
+ if !v.APIVersion.EqMajorMinor(lapi.MinerAPIVersion0) {
+ return "", xerrors.Errorf("remote service API version didn't match (expected %s, remote %s)", lapi.MinerAPIVersion0, v.APIVersion)
+ }
+
+ return ai, nil
+}
diff --git a/cmd/lotus-storage-miner/main.go b/cmd/lotus-storage-miner/main.go
index 671f75cf0fc..c555531d66a 100644
--- a/cmd/lotus-storage-miner/main.go
+++ b/cmd/lotus-storage-miner/main.go
@@ -4,6 +4,7 @@ import (
"context"
"fmt"
+ "github.com/fatih/color"
logging "github.com/ipfs/go-log/v2"
"github.com/urfave/cli/v2"
"go.opencensus.io/trace"
@@ -26,7 +27,7 @@ const FlagMinerRepo = "miner-repo"
const FlagMinerRepoDeprecation = "storagerepo"
func main() {
- build.RunningNodeType = build.NodeMiner
+ api.RunningNodeType = api.NodeMiner
lotuslog.SetupLogLevels()
@@ -61,9 +62,14 @@ func main() {
trace.UnregisterExporter(jaeger)
jaeger = tracing.SetupJaegerTracing("lotus/" + cmd.Name)
+ if cctx.IsSet("color") {
+ color.NoColor = !cctx.Bool("color")
+ }
+
if originBefore != nil {
return originBefore(cctx)
}
+
return nil
}
}
@@ -81,7 +87,10 @@ func main() {
Aliases: []string{"a"},
},
&cli.BoolFlag{
- Name: "color",
+ // examined in the Before above
+ Name: "color",
+ Usage: "use color in display output",
+ DefaultText: "depends on output being a TTY",
},
&cli.StringFlag{
Name: "repo",
@@ -106,15 +115,21 @@ func main() {
lcli.RunApp(app)
}
-func getActorAddress(ctx context.Context, nodeAPI api.StorageMiner, overrideMaddr string) (maddr address.Address, err error) {
- if overrideMaddr != "" {
- maddr, err = address.NewFromString(overrideMaddr)
+func getActorAddress(ctx context.Context, cctx *cli.Context) (maddr address.Address, err error) {
+ if cctx.IsSet("actor") {
+ maddr, err = address.NewFromString(cctx.String("actor"))
if err != nil {
return maddr, err
}
return
}
+ nodeAPI, closer, err := lcli.GetStorageMinerAPI(cctx)
+ if err != nil {
+ return address.Undef, err
+ }
+ defer closer()
+
maddr, err = nodeAPI.ActorAddress(ctx)
if err != nil {
return maddr, xerrors.Errorf("getting actor address: %w", err)
diff --git a/cmd/lotus-storage-miner/market.go b/cmd/lotus-storage-miner/market.go
index be4a529e982..b216d24fcd4 100644
--- a/cmd/lotus-storage-miner/market.go
+++ b/cmd/lotus-storage-miner/market.go
@@ -2,6 +2,7 @@ package main
import (
"bufio"
+ "context"
"errors"
"fmt"
"io"
@@ -14,7 +15,7 @@ import (
tm "github.com/buger/goterm"
"github.com/docker/go-units"
- datatransfer "github.com/filecoin-project/go-data-transfer"
+ "github.com/fatih/color"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-cidutil/cidenc"
"github.com/libp2p/go-libp2p-core/peer"
@@ -22,6 +23,8 @@ import (
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
+ cborutil "github.com/filecoin-project/go-cbor-util"
+ datatransfer "github.com/filecoin-project/go-data-transfer"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi"
@@ -113,6 +116,16 @@ var storageDealSelectionResetCmd = &cli.Command{
return err
}
+ err = smapi.DealsSetConsiderVerifiedStorageDeals(lcli.DaemonContext(cctx), true)
+ if err != nil {
+ return err
+ }
+
+ err = smapi.DealsSetConsiderUnverifiedStorageDeals(lcli.DaemonContext(cctx), true)
+ if err != nil {
+ return err
+ }
+
return nil
},
}
@@ -127,6 +140,12 @@ var storageDealSelectionRejectCmd = &cli.Command{
&cli.BoolFlag{
Name: "offline",
},
+ &cli.BoolFlag{
+ Name: "verified",
+ },
+ &cli.BoolFlag{
+ Name: "unverified",
+ },
},
Action: func(cctx *cli.Context) error {
smapi, closer, err := lcli.GetStorageMinerAPI(cctx)
@@ -149,6 +168,20 @@ var storageDealSelectionRejectCmd = &cli.Command{
}
}
+ if cctx.Bool("verified") {
+ err = smapi.DealsSetConsiderVerifiedStorageDeals(lcli.DaemonContext(cctx), false)
+ if err != nil {
+ return err
+ }
+ }
+
+ if cctx.Bool("unverified") {
+ err = smapi.DealsSetConsiderUnverifiedStorageDeals(lcli.DaemonContext(cctx), false)
+ if err != nil {
+ return err
+ }
+ }
+
return nil
},
}
@@ -310,6 +343,7 @@ var storageDealsCmd = &cli.Command{
getBlocklistCmd,
resetBlocklistCmd,
setSealDurationCmd,
+ dealsPendingPublish,
},
}
@@ -420,7 +454,7 @@ func outputStorageDeals(out io.Writer, deals []storagemarket.MinerDeal, verbose
w := tabwriter.NewWriter(out, 2, 4, 2, ' ', 0)
if verbose {
- _, _ = fmt.Fprintf(w, "Creation\tProposalCid\tDealId\tState\tClient\tSize\tPrice\tDuration\tMessage\n")
+ _, _ = fmt.Fprintf(w, "Creation\tVerified\tProposalCid\tDealId\tState\tClient\tSize\tPrice\tDuration\tTransferChannelID\tMessage\n")
} else {
_, _ = fmt.Fprintf(w, "ProposalCid\tDealId\tState\tClient\tSize\tPrice\tDuration\n")
}
@@ -434,11 +468,16 @@ func outputStorageDeals(out io.Writer, deals []storagemarket.MinerDeal, verbose
fil := types.FIL(types.BigMul(deal.Proposal.StoragePricePerEpoch, types.NewInt(uint64(deal.Proposal.Duration()))))
if verbose {
- _, _ = fmt.Fprintf(w, "%s\t", deal.CreationTime.Time().Format(time.Stamp))
+ _, _ = fmt.Fprintf(w, "%s\t%t\t", deal.CreationTime.Time().Format(time.Stamp), deal.Proposal.VerifiedDeal)
}
_, _ = fmt.Fprintf(w, "%s\t%d\t%s\t%s\t%s\t%s\t%s", propcid, deal.DealID, storagemarket.DealStates[deal.State], deal.Proposal.Client, units.BytesSize(float64(deal.Proposal.PieceSize)), fil, deal.Proposal.Duration())
if verbose {
+ tchid := ""
+ if deal.TransferChannelId != nil {
+ tchid = deal.TransferChannelId.String()
+ }
+ _, _ = fmt.Fprintf(w, "\t%s", tchid)
_, _ = fmt.Fprintf(w, "\t%s", deal.Message)
}
@@ -650,6 +689,11 @@ var marketCancelTransfer = &cli.Command{
Usage: "specify only transfers where peer is/is not initiator",
Value: false,
},
+ &cli.DurationFlag{
+ Name: "cancel-timeout",
+ Usage: "time to wait for cancel to be sent to client",
+ Value: 5 * time.Second,
+ },
},
Action: func(cctx *cli.Context) error {
if !cctx.Args().Present() {
@@ -693,7 +737,9 @@ var marketCancelTransfer = &cli.Command{
}
}
- return nodeApi.MarketCancelDataTransfer(ctx, transferID, other, initiator)
+ timeoutCtx, cancel := context.WithTimeout(ctx, cctx.Duration("cancel-timeout"))
+ defer cancel()
+ return nodeApi.MarketCancelDataTransfer(timeoutCtx, transferID, other, initiator)
},
}
@@ -702,9 +748,14 @@ var transfersListCmd = &cli.Command{
Usage: "List ongoing data transfers for this miner",
Flags: []cli.Flag{
&cli.BoolFlag{
- Name: "color",
- Usage: "use color in display output",
- Value: true,
+ Name: "verbose",
+ Aliases: []string{"v"},
+ Usage: "print verbose transfer details",
+ },
+ &cli.BoolFlag{
+ Name: "color",
+ Usage: "use color in display output",
+ DefaultText: "depends on output being a TTY",
},
&cli.BoolFlag{
Name: "completed",
@@ -720,6 +771,10 @@ var transfersListCmd = &cli.Command{
},
},
Action: func(cctx *cli.Context) error {
+ if cctx.IsSet("color") {
+ color.NoColor = !cctx.Bool("color")
+ }
+
api, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
@@ -732,8 +787,8 @@ var transfersListCmd = &cli.Command{
return err
}
+ verbose := cctx.Bool("verbose")
completed := cctx.Bool("completed")
- color := cctx.Bool("color")
watch := cctx.Bool("watch")
showFailed := cctx.Bool("show-failed")
if watch {
@@ -747,7 +802,7 @@ var transfersListCmd = &cli.Command{
tm.MoveCursor(1, 1)
- lcli.OutputDataTransferChannels(tm.Screen, channels, completed, color, showFailed)
+ lcli.OutputDataTransferChannels(tm.Screen, channels, verbose, completed, showFailed)
tm.Flush()
@@ -772,7 +827,61 @@ var transfersListCmd = &cli.Command{
}
}
}
- lcli.OutputDataTransferChannels(os.Stdout, channels, completed, color, showFailed)
+ lcli.OutputDataTransferChannels(os.Stdout, channels, verbose, completed, showFailed)
+ return nil
+ },
+}
+
+var dealsPendingPublish = &cli.Command{
+ Name: "pending-publish",
+ Usage: "list deals waiting in publish queue",
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "publish-now",
+ Usage: "send a publish message now",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := lcli.GetStorageMinerAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := lcli.ReqContext(cctx)
+
+ if cctx.Bool("publish-now") {
+ if err := api.MarketPublishPendingDeals(ctx); err != nil {
+ return xerrors.Errorf("publishing deals: %w", err)
+ }
+ fmt.Println("triggered deal publishing")
+ return nil
+ }
+
+ pending, err := api.MarketPendingDeals(ctx)
+ if err != nil {
+ return xerrors.Errorf("getting pending deals: %w", err)
+ }
+
+ if len(pending.Deals) > 0 {
+ endsIn := pending.PublishPeriodStart.Add(pending.PublishPeriod).Sub(time.Now())
+ w := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0)
+ _, _ = fmt.Fprintf(w, "Publish period: %s (ends in %s)\n", pending.PublishPeriod, endsIn.Round(time.Second))
+ _, _ = fmt.Fprintf(w, "First deal queued at: %s\n", pending.PublishPeriodStart)
+ _, _ = fmt.Fprintf(w, "Deals will be published at: %s\n", pending.PublishPeriodStart.Add(pending.PublishPeriod))
+ _, _ = fmt.Fprintf(w, "%d deals queued to be published:\n", len(pending.Deals))
+ _, _ = fmt.Fprintf(w, "ProposalCID\tClient\tSize\n")
+ for _, deal := range pending.Deals {
+ proposalNd, err := cborutil.AsIpld(&deal) // nolint
+ if err != nil {
+ return err
+ }
+
+ _, _ = fmt.Fprintf(w, "%s\t%s\t%s\n", proposalNd.Cid(), deal.Proposal.Client, units.BytesSize(float64(deal.Proposal.PieceSize)))
+ }
+ return w.Flush()
+ }
+
+ fmt.Println("No deals queued to be published")
return nil
},
}
diff --git a/cmd/lotus-storage-miner/proving.go b/cmd/lotus-storage-miner/proving.go
index 377b81d328f..5dfe5d4ceda 100644
--- a/cmd/lotus-storage-miner/proving.go
+++ b/cmd/lotus-storage-miner/proving.go
@@ -10,11 +10,14 @@ import (
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
- "github.com/filecoin-project/lotus/api/apibstore"
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
+ "github.com/filecoin-project/specs-storage/storage"
)
var provingCmd = &cli.Command{
@@ -25,6 +28,7 @@ var provingCmd = &cli.Command{
provingDeadlinesCmd,
provingDeadlineInfoCmd,
provingFaultsCmd,
+ provingCheckProvableCmd,
},
}
@@ -32,14 +36,6 @@ var provingFaultsCmd = &cli.Command{
Name: "faults",
Usage: "View the currently known proving faulty sectors information",
Action: func(cctx *cli.Context) error {
- color.NoColor = !cctx.Bool("color")
-
- nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
- if err != nil {
- return err
- }
- defer closer()
-
api, acloser, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
@@ -48,9 +44,9 @@ var provingFaultsCmd = &cli.Command{
ctx := lcli.ReqContext(cctx)
- stor := store.ActorStore(ctx, apibstore.NewAPIBlockstore(api))
+ stor := store.ActorStore(ctx, blockstore.NewAPIBlockstore(api))
- maddr, err := getActorAddress(ctx, nodeApi, cctx.String("actor"))
+ maddr, err := getActorAddress(ctx, cctx)
if err != nil {
return err
}
@@ -92,14 +88,6 @@ var provingInfoCmd = &cli.Command{
Name: "info",
Usage: "View current state information",
Action: func(cctx *cli.Context) error {
- color.NoColor = !cctx.Bool("color")
-
- nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
- if err != nil {
- return err
- }
- defer closer()
-
api, acloser, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
@@ -108,7 +96,7 @@ var provingInfoCmd = &cli.Command{
ctx := lcli.ReqContext(cctx)
- maddr, err := getActorAddress(ctx, nodeApi, cctx.String("actor"))
+ maddr, err := getActorAddress(ctx, cctx)
if err != nil {
return err
}
@@ -123,7 +111,7 @@ var provingInfoCmd = &cli.Command{
return err
}
- stor := store.ActorStore(ctx, apibstore.NewAPIBlockstore(api))
+ stor := store.ActorStore(ctx, blockstore.NewAPIBlockstore(api))
mas, err := miner.Load(stor, mact)
if err != nil {
@@ -179,7 +167,7 @@ var provingInfoCmd = &cli.Command{
var faultPerc float64
if proving > 0 {
- faultPerc = float64(faults*10000/proving) / 100
+ faultPerc = float64(faults * 100 / proving)
}
fmt.Printf("Current Epoch: %d\n", cd.CurrentEpoch)
@@ -205,14 +193,6 @@ var provingDeadlinesCmd = &cli.Command{
Name: "deadlines",
Usage: "View the current proving period deadlines information",
Action: func(cctx *cli.Context) error {
- color.NoColor = !cctx.Bool("color")
-
- nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
- if err != nil {
- return err
- }
- defer closer()
-
api, acloser, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
@@ -221,7 +201,7 @@ var provingDeadlinesCmd = &cli.Command{
ctx := lcli.ReqContext(cctx)
- maddr, err := getActorAddress(ctx, nodeApi, cctx.String("actor"))
+ maddr, err := getActorAddress(ctx, cctx)
if err != nil {
return err
}
@@ -297,12 +277,6 @@ var provingDeadlineInfoCmd = &cli.Command{
return xerrors.Errorf("could not parse deadline index: %w", err)
}
- nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
- if err != nil {
- return err
- }
- defer closer()
-
api, acloser, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
@@ -311,7 +285,7 @@ var provingDeadlineInfoCmd = &cli.Command{
ctx := lcli.ReqContext(cctx)
- maddr, err := getActorAddress(ctx, nodeApi, cctx.String("actor"))
+ maddr, err := getActorAddress(ctx, cctx)
if err != nil {
return err
}
@@ -371,3 +345,103 @@ var provingDeadlineInfoCmd = &cli.Command{
return nil
},
}
+
+var provingCheckProvableCmd = &cli.Command{
+ Name: "check",
+ Usage: "Check sectors provable",
+ ArgsUsage: "",
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "only-bad",
+ Usage: "print only bad sectors",
+ Value: false,
+ },
+ &cli.BoolFlag{
+ Name: "slow",
+ Usage: "run slower checks",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ if cctx.Args().Len() != 1 {
+ return xerrors.Errorf("must pass deadline index")
+ }
+
+ dlIdx, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64)
+ if err != nil {
+ return xerrors.Errorf("could not parse deadline index: %w", err)
+ }
+
+ api, closer, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ sapi, scloser, err := lcli.GetStorageMinerAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer scloser()
+
+ ctx := lcli.ReqContext(cctx)
+
+ addr, err := sapi.ActorAddress(ctx)
+ if err != nil {
+ return err
+ }
+
+ mid, err := address.IDFromAddress(addr)
+ if err != nil {
+ return err
+ }
+
+ info, err := api.StateMinerInfo(ctx, addr, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ partitions, err := api.StateMinerPartitions(ctx, addr, dlIdx, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ tw := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0)
+ _, _ = fmt.Fprintln(tw, "deadline\tpartition\tsector\tstatus")
+
+ for parIdx, par := range partitions {
+ sectors := make(map[abi.SectorNumber]struct{})
+
+ sectorInfos, err := api.StateMinerSectors(ctx, addr, &par.LiveSectors, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ var tocheck []storage.SectorRef
+ for _, info := range sectorInfos {
+ sectors[info.SectorNumber] = struct{}{}
+ tocheck = append(tocheck, storage.SectorRef{
+ ProofType: info.SealProof,
+ ID: abi.SectorID{
+ Miner: abi.ActorID(mid),
+ Number: info.SectorNumber,
+ },
+ })
+ }
+
+ bad, err := sapi.CheckProvable(ctx, info.WindowPoStProofType, tocheck, cctx.Bool("slow"))
+ if err != nil {
+ return err
+ }
+
+ for s := range sectors {
+ if err, exist := bad[s]; exist {
+ _, _ = fmt.Fprintf(tw, "%d\t%d\t%d\t%s\n", dlIdx, parIdx, s, color.RedString("bad")+fmt.Sprintf(" (%s)", err))
+ } else if !cctx.Bool("only-bad") {
+ _, _ = fmt.Fprintf(tw, "%d\t%d\t%d\t%s\n", dlIdx, parIdx, s, color.GreenString("good"))
+ }
+ }
+ }
+
+ return tw.Flush()
+ },
+}
diff --git a/cmd/lotus-storage-miner/retrieval-deals.go b/cmd/lotus-storage-miner/retrieval-deals.go
index 03d397852d8..0411f7f130a 100644
--- a/cmd/lotus-storage-miner/retrieval-deals.go
+++ b/cmd/lotus-storage-miner/retrieval-deals.go
@@ -235,7 +235,7 @@ var retrievalSetAskCmd = &cli.Command{
var retrievalGetAskCmd = &cli.Command{
Name: "get-ask",
- Usage: "Get the provider's current retrieval ask",
+ Usage: "Get the provider's current retrieval ask configured by the provider in the ask-store using the set-ask CLI command",
Flags: []cli.Flag{},
Action: func(cctx *cli.Context) error {
ctx := lcli.DaemonContext(cctx)
diff --git a/cmd/lotus-storage-miner/run.go b/cmd/lotus-storage-miner/run.go
index 0c2fba8b387..f276f319c9b 100644
--- a/cmd/lotus-storage-miner/run.go
+++ b/cmd/lotus-storage-miner/run.go
@@ -1,33 +1,28 @@
package main
import (
- "context"
- "net"
- "net/http"
+ "fmt"
_ "net/http/pprof"
"os"
- "os/signal"
- "syscall"
- mux "github.com/gorilla/mux"
+ "github.com/filecoin-project/lotus/api/v1api"
+
+ "github.com/filecoin-project/lotus/api/v0api"
+
"github.com/multiformats/go-multiaddr"
- manet "github.com/multiformats/go-multiaddr/net"
"github.com/urfave/cli/v2"
+ "go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"golang.org/x/xerrors"
- "github.com/filecoin-project/go-jsonrpc"
- "github.com/filecoin-project/go-jsonrpc/auth"
-
"github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/api/apistruct"
"github.com/filecoin-project/lotus/build"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/lib/ulimit"
"github.com/filecoin-project/lotus/metrics"
"github.com/filecoin-project/lotus/node"
- "github.com/filecoin-project/lotus/node/impl"
+ "github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/node/repo"
)
@@ -63,19 +58,29 @@ var runCmd = &cli.Command{
}
}
- nodeApi, ncloser, err := lcli.GetFullNodeAPI(cctx)
- if err != nil {
- return xerrors.Errorf("getting full node api: %w", err)
- }
- defer ncloser()
- ctx := lcli.DaemonContext(cctx)
-
+ ctx, _ := tag.New(lcli.DaemonContext(cctx),
+ tag.Insert(metrics.Version, build.BuildVersion),
+ tag.Insert(metrics.Commit, build.CurrentCommit),
+ tag.Insert(metrics.NodeType, "miner"),
+ )
// Register all metric views
if err := view.Register(
- metrics.DefaultViews...,
+ metrics.MinerNodeViews...,
); err != nil {
log.Fatalf("Cannot register the view: %v", err)
}
+ // Set the metric to one so it is published to the exporter
+ stats.Record(ctx, metrics.LotusInfo.M(1))
+
+ if err := checkV1ApiSupport(ctx, cctx); err != nil {
+ return err
+ }
+
+ nodeApi, ncloser, err := lcli.GetFullNodeAPIV1(cctx)
+ if err != nil {
+ return xerrors.Errorf("getting full node api: %w", err)
+ }
+ defer ncloser()
v, err := nodeApi.Version(ctx)
if err != nil {
@@ -88,14 +93,14 @@ var runCmd = &cli.Command{
}
}
- if v.APIVersion != build.FullAPIVersion {
- return xerrors.Errorf("lotus-daemon API version doesn't match: expected: %s", api.Version{APIVersion: build.FullAPIVersion})
+ if v.APIVersion != api.FullAPIVersion1 {
+ return xerrors.Errorf("lotus-daemon API version doesn't match: expected: %s", api.APIVersion{APIVersion: api.FullAPIVersion1})
}
log.Info("Checking full node sync status")
if !cctx.Bool("nosync") {
- if err := lcli.SyncWait(ctx, nodeApi, false); err != nil {
+ if err := lcli.SyncWait(ctx, &v0api.WrapperV1Full{FullNode: nodeApi}, false); err != nil {
return xerrors.Errorf("sync wait: %w", err)
}
}
@@ -114,20 +119,40 @@ var runCmd = &cli.Command{
return xerrors.Errorf("repo at '%s' is not initialized, run 'lotus-miner init' to set it up", minerRepoPath)
}
+ lr, err := r.Lock(repo.StorageMiner)
+ if err != nil {
+ return err
+ }
+ c, err := lr.Config()
+ if err != nil {
+ return err
+ }
+ cfg, ok := c.(*config.StorageMiner)
+ if !ok {
+ return xerrors.Errorf("invalid config for repo, got: %T", c)
+ }
+
+ bootstrapLibP2P := cfg.Subsystems.EnableMarkets
+
+ err = lr.Close()
+ if err != nil {
+ return err
+ }
+
shutdownChan := make(chan struct{})
var minerapi api.StorageMiner
stop, err := node.New(ctx,
- node.StorageMiner(&minerapi),
+ node.StorageMiner(&minerapi, cfg.Subsystems),
node.Override(new(dtypes.ShutdownChan), shutdownChan),
- node.Online(),
+ node.Base(),
node.Repo(r),
node.ApplyIf(func(s *node.Settings) bool { return cctx.IsSet("miner-api") },
node.Override(new(dtypes.APIEndpoint), func() (dtypes.APIEndpoint, error) {
return multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/" + cctx.String("miner-api"))
})),
- node.Override(new(api.FullNode), nodeApi),
+ node.Override(new(v1api.FullNode), nodeApi),
)
if err != nil {
return xerrors.Errorf("creating node: %w", err)
@@ -138,65 +163,41 @@ var runCmd = &cli.Command{
return xerrors.Errorf("getting API endpoint: %w", err)
}
- // Bootstrap with full node
- remoteAddrs, err := nodeApi.NetAddrsListen(ctx)
- if err != nil {
- return xerrors.Errorf("getting full node libp2p address: %w", err)
- }
+ if bootstrapLibP2P {
+ log.Infof("Bootstrapping libp2p network with full node")
+
+ // Bootstrap with full node
+ remoteAddrs, err := nodeApi.NetAddrsListen(ctx)
+ if err != nil {
+ return xerrors.Errorf("getting full node libp2p address: %w", err)
+ }
- if err := minerapi.NetConnect(ctx, remoteAddrs); err != nil {
- return xerrors.Errorf("connecting to full node (libp2p): %w", err)
+ if err := minerapi.NetConnect(ctx, remoteAddrs); err != nil {
+ return xerrors.Errorf("connecting to full node (libp2p): %w", err)
+ }
}
log.Infof("Remote version %s", v)
- lst, err := manet.Listen(endpoint)
+ // Instantiate the miner node handler.
+ handler, err := node.MinerHandler(minerapi, true)
if err != nil {
- return xerrors.Errorf("could not listen: %w", err)
- }
-
- mux := mux.NewRouter()
-
- rpcServer := jsonrpc.NewServer()
- rpcServer.Register("Filecoin", apistruct.PermissionedStorMinerAPI(metrics.MetricedStorMinerAPI(minerapi)))
-
- mux.Handle("/rpc/v0", rpcServer)
- mux.PathPrefix("/remote").HandlerFunc(minerapi.(*impl.StorageMinerAPI).ServeRemote)
- mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof
-
- ah := &auth.Handler{
- Verify: minerapi.AuthVerify,
- Next: mux.ServeHTTP,
+ return xerrors.Errorf("failed to instantiate rpc handler: %w", err)
}
- srv := &http.Server{
- Handler: ah,
- BaseContext: func(listener net.Listener) context.Context {
- ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-miner"))
- return ctx
- },
+ // Serve the RPC.
+ rpcStopper, err := node.ServeRPC(handler, "lotus-miner", endpoint)
+ if err != nil {
+ return fmt.Errorf("failed to start json-rpc endpoint: %s", err)
}
- sigChan := make(chan os.Signal, 2)
- go func() {
- select {
- case sig := <-sigChan:
- log.Warnw("received shutdown", "signal", sig)
- case <-shutdownChan:
- log.Warn("received shutdown")
- }
-
- log.Warn("Shutting down...")
- if err := stop(context.TODO()); err != nil {
- log.Errorf("graceful shutting down failed: %s", err)
- }
- if err := srv.Shutdown(context.TODO()); err != nil {
- log.Errorf("shutting down RPC server failed: %s", err)
- }
- log.Warn("Graceful shutdown successful")
- }()
- signal.Notify(sigChan, syscall.SIGTERM, syscall.SIGINT)
+ // Monitor for shutdown.
+ finishCh := node.MonitorShutdown(shutdownChan,
+ node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper},
+ node.ShutdownHandler{Component: "miner", StopFunc: stop},
+ )
- return srv.Serve(manet.NetListener(lst))
+ <-finishCh
+ return nil
},
}
diff --git a/cmd/lotus-storage-miner/sealing.go b/cmd/lotus-storage-miner/sealing.go
index 440d4aaea76..3bf4c675fd7 100644
--- a/cmd/lotus-storage-miner/sealing.go
+++ b/cmd/lotus-storage-miner/sealing.go
@@ -28,6 +28,7 @@ var sealingCmd = &cli.Command{
sealingJobsCmd,
sealingWorkersCmd,
sealingSchedDiagCmd,
+ sealingAbortCmd,
},
}
@@ -35,10 +36,16 @@ var sealingWorkersCmd = &cli.Command{
Name: "workers",
Usage: "list workers",
Flags: []cli.Flag{
- &cli.BoolFlag{Name: "color"},
+ &cli.BoolFlag{
+ Name: "color",
+ Usage: "use color in display output",
+ DefaultText: "depends on output being a TTY",
+ },
},
Action: func(cctx *cli.Context) error {
- color.NoColor = !cctx.Bool("color")
+ if cctx.IsSet("color") {
+ color.NoColor = !cctx.Bool("color")
+ }
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
@@ -124,12 +131,22 @@ var sealingWorkersCmd = &cli.Command{
var sealingJobsCmd = &cli.Command{
Name: "jobs",
- Usage: "list workers",
+ Usage: "list running jobs",
Flags: []cli.Flag{
- &cli.BoolFlag{Name: "color"},
+ &cli.BoolFlag{
+ Name: "color",
+ Usage: "use color in display output",
+ DefaultText: "depends on output being a TTY",
+ },
+ &cli.BoolFlag{
+ Name: "show-ret-done",
+ Usage: "show returned but not consumed calls",
+ },
},
Action: func(cctx *cli.Context) error {
- color.NoColor = !cctx.Bool("color")
+ if cctx.IsSet("color") {
+ color.NoColor = !cctx.Bool("color")
+ }
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
@@ -187,10 +204,17 @@ var sealingJobsCmd = &cli.Command{
for _, l := range lines {
state := "running"
- if l.RunWait > 0 {
+ switch {
+ case l.RunWait > 0:
state = fmt.Sprintf("assigned(%d)", l.RunWait-1)
- }
- if l.RunWait == -1 {
+ case l.RunWait == storiface.RWRetDone:
+ if !cctx.Bool("show-ret-done") {
+ continue
+ }
+ state = "ret-done"
+ case l.RunWait == storiface.RWReturned:
+ state = "returned"
+ case l.RunWait == storiface.RWRetWait:
state = "ret-wait"
}
dur := "n/a"
@@ -198,11 +222,16 @@ var sealingJobsCmd = &cli.Command{
dur = time.Now().Sub(l.Start).Truncate(time.Millisecond * 100).String()
}
+ hostname, ok := workerHostnames[l.wid]
+ if !ok {
+ hostname = l.Hostname
+ }
+
_, _ = fmt.Fprintf(tw, "%s\t%d\t%s\t%s\t%s\t%s\t%s\n",
- hex.EncodeToString(l.ID.ID[10:]),
+ hex.EncodeToString(l.ID.ID[:4]),
l.Sector.Number,
- hex.EncodeToString(l.wid[5:]),
- workerHostnames[l.wid],
+ hex.EncodeToString(l.wid[:4]),
+ hostname,
l.Task.Short(),
state,
dur)
@@ -215,6 +244,11 @@ var sealingJobsCmd = &cli.Command{
var sealingSchedDiagCmd = &cli.Command{
Name: "sched-diag",
Usage: "Dump internal scheduler state",
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "force-sched",
+ },
+ },
Action: func(cctx *cli.Context) error {
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
@@ -224,7 +258,7 @@ var sealingSchedDiagCmd = &cli.Command{
ctx := lcli.ReqContext(cctx)
- st, err := nodeApi.SealingSchedDiag(ctx)
+ st, err := nodeApi.SealingSchedDiag(ctx, cctx.Bool("force-sched"))
if err != nil {
return err
}
@@ -239,3 +273,47 @@ var sealingSchedDiagCmd = &cli.Command{
return nil
},
}
+
+var sealingAbortCmd = &cli.Command{
+ Name: "abort",
+ Usage: "Abort a running job",
+ ArgsUsage: "[callid]",
+ Action: func(cctx *cli.Context) error {
+ if cctx.Args().Len() != 1 {
+ return xerrors.Errorf("expected 1 argument")
+ }
+
+ nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ ctx := lcli.ReqContext(cctx)
+
+ jobs, err := nodeApi.WorkerJobs(ctx)
+ if err != nil {
+ return xerrors.Errorf("getting worker jobs: %w", err)
+ }
+
+ var job *storiface.WorkerJob
+ outer:
+ for _, workerJobs := range jobs {
+ for _, j := range workerJobs {
+ if strings.HasPrefix(j.ID.ID.String(), cctx.Args().First()) {
+ j := j
+ job = &j
+ break outer
+ }
+ }
+ }
+
+ if job == nil {
+ return xerrors.Errorf("job with specified id prefix not found")
+ }
+
+ fmt.Printf("aborting job %s, task %s, sector %d, running on host %s\n", job.ID.String(), job.Task.Short(), job.Sector.Number, job.Hostname)
+
+ return nodeApi.SealingAbort(ctx, job.ID)
+ },
+}
diff --git a/cmd/lotus-storage-miner/sectors.go b/cmd/lotus-storage-miner/sectors.go
index 967e2d413b7..5c4581bbc53 100644
--- a/cmd/lotus-storage-miner/sectors.go
+++ b/cmd/lotus-storage-miner/sectors.go
@@ -5,6 +5,7 @@ import (
"os"
"sort"
"strconv"
+ "strings"
"time"
"github.com/docker/go-units"
@@ -12,10 +13,13 @@ import (
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
+ "github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
+ miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner"
"github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/types"
@@ -34,11 +38,14 @@ var sectorsCmd = &cli.Command{
sectorsRefsCmd,
sectorsUpdateCmd,
sectorsPledgeCmd,
+ sectorsExtendCmd,
+ sectorsTerminateCmd,
sectorsRemoveCmd,
sectorsMarkForUpgradeCmd,
sectorsStartSealCmd,
sectorsSealDelayCmd,
sectorsCapacityCollateralCmd,
+ sectorsBatching,
},
}
@@ -53,7 +60,14 @@ var sectorsPledgeCmd = &cli.Command{
defer closer()
ctx := lcli.ReqContext(cctx)
- return nodeApi.PledgeSector(ctx)
+ id, err := nodeApi.PledgeSector(ctx)
+ if err != nil {
+ return err
+ }
+
+ fmt.Println("Created CC sector: ", id.Number)
+
+ return nil
},
}
@@ -147,17 +161,32 @@ var sectorsListCmd = &cli.Command{
Usage: "show removed sectors",
},
&cli.BoolFlag{
- Name: "color",
- Aliases: []string{"c"},
- Value: true,
+ Name: "color",
+ Usage: "use color in display output",
+ DefaultText: "depends on output being a TTY",
+ Aliases: []string{"c"},
},
&cli.BoolFlag{
Name: "fast",
Usage: "don't show on-chain info for better performance",
},
+ &cli.BoolFlag{
+ Name: "events",
+ Usage: "display number of events the sector has received",
+ },
+ &cli.BoolFlag{
+ Name: "seal-time",
+ Usage: "display how long it took for the sector to be sealed",
+ },
+ &cli.StringFlag{
+ Name: "states",
+ Usage: "filter sectors by a comma-separated list of states",
+ },
},
Action: func(cctx *cli.Context) error {
- color.NoColor = !cctx.Bool("color")
+ if cctx.IsSet("color") {
+ color.NoColor = !cctx.Bool("color")
+ }
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
@@ -173,7 +202,22 @@ var sectorsListCmd = &cli.Command{
ctx := lcli.ReqContext(cctx)
- list, err := nodeApi.SectorsList(ctx)
+ var list []abi.SectorNumber
+
+ showRemoved := cctx.Bool("show-removed")
+ states := cctx.String("states")
+ if len(states) == 0 {
+ list, err = nodeApi.SectorsList(ctx)
+ } else {
+ showRemoved = true
+ sList := strings.Split(states, ",")
+ ss := make([]api.SectorState, len(sList))
+ for i := range sList {
+ ss[i] = api.SectorState(sList[i])
+ }
+ list, err = nodeApi.SectorsListInStates(ctx, ss)
+ }
+
if err != nil {
return err
}
@@ -201,7 +245,7 @@ var sectorsListCmd = &cli.Command{
if err != nil {
return err
}
- commitedIDs := make(map[abi.SectorNumber]struct{}, len(activeSet))
+ commitedIDs := make(map[abi.SectorNumber]struct{}, len(sset))
for _, info := range sset {
commitedIDs[info.SectorNumber] = struct{}{}
}
@@ -216,8 +260,11 @@ var sectorsListCmd = &cli.Command{
tablewriter.Col("OnChain"),
tablewriter.Col("Active"),
tablewriter.Col("Expiration"),
+ tablewriter.Col("SealTime"),
+ tablewriter.Col("Events"),
tablewriter.Col("Deals"),
tablewriter.Col("DealWeight"),
+ tablewriter.Col("VerifiedPower"),
tablewriter.NewLineCol("Error"),
tablewriter.NewLineCol("RecoveryTimeout"))
@@ -233,13 +280,15 @@ var sectorsListCmd = &cli.Command{
continue
}
- if cctx.Bool("show-removed") || st.State != api.SectorState(sealing.Removed) {
+ if showRemoved || st.State != api.SectorState(sealing.Removed) {
_, inSSet := commitedIDs[s]
_, inASet := activeIDs[s]
- dw := .0
+ dw, vp := .0, .0
if st.Expiration-st.Activation > 0 {
- dw = float64(big.Div(st.DealWeight, big.NewInt(int64(st.Expiration-st.Activation))).Uint64())
+ rdw := big.Add(st.DealWeight, st.VerifiedDealWeight)
+ dw = float64(big.Div(rdw, big.NewInt(int64(st.Expiration-st.Activation))).Uint64())
+ vp = float64(big.Div(big.Mul(st.VerifiedDealWeight, big.NewInt(9)), big.NewInt(int64(st.Expiration-st.Activation))).Uint64())
}
var deals int
@@ -278,6 +327,9 @@ var sectorsListCmd = &cli.Command{
if !fast && deals > 0 {
m["DealWeight"] = units.BytesSize(dw)
+ if vp > 0 {
+ m["VerifiedPower"] = color.GreenString(units.BytesSize(vp))
+ }
}
if st.Early > 0 {
@@ -286,6 +338,52 @@ var sectorsListCmd = &cli.Command{
}
}
+ if cctx.Bool("events") {
+ var events int
+ for _, sectorLog := range st.Log {
+ if !strings.HasPrefix(sectorLog.Kind, "event") {
+ continue
+ }
+ if sectorLog.Kind == "event;sealing.SectorRestart" {
+ continue
+ }
+ events++
+ }
+
+ pieces := len(st.Deals)
+
+ switch {
+ case events < 12+pieces:
+ m["Events"] = color.GreenString("%d", events)
+ case events < 20+pieces:
+ m["Events"] = color.YellowString("%d", events)
+ default:
+ m["Events"] = color.RedString("%d", events)
+ }
+ }
+
+ if cctx.Bool("seal-time") && len(st.Log) > 1 {
+ start := time.Unix(int64(st.Log[0].Timestamp), 0)
+
+ for _, sectorLog := range st.Log {
+ if sectorLog.Kind == "event;sealing.SectorProving" {
+ end := time.Unix(int64(sectorLog.Timestamp), 0)
+ dur := end.Sub(start)
+
+ switch {
+ case dur < 12*time.Hour:
+ m["SealTime"] = color.GreenString("%s", dur)
+ case dur < 24*time.Hour:
+ m["SealTime"] = color.YellowString("%s", dur)
+ default:
+ m["SealTime"] = color.RedString("%s", dur)
+ }
+
+ break
+ }
+ }
+ }
+
tw.Write(m)
}
}
@@ -320,9 +418,360 @@ var sectorsRefsCmd = &cli.Command{
},
}
+var sectorsExtendCmd = &cli.Command{
+ Name: "extend",
+ Usage: "Extend sector expiration",
+ ArgsUsage: "",
+ Flags: []cli.Flag{
+ &cli.Int64Flag{
+ Name: "new-expiration",
+ Usage: "new expiration epoch",
+ Required: false,
+ },
+ &cli.BoolFlag{
+ Name: "v1-sectors",
+ Usage: "renews all v1 sectors up to the maximum possible lifetime",
+ Required: false,
+ },
+ &cli.Int64Flag{
+ Name: "tolerance",
+ Value: 20160,
+ Usage: "when extending v1 sectors, don't try to extend sectors by fewer than this number of epochs",
+ Required: false,
+ },
+ &cli.Int64Flag{
+ Name: "expiration-ignore",
+ Value: 120,
+ Usage: "when extending v1 sectors, skip sectors whose current expiration is less than epochs from now",
+ Required: false,
+ },
+ &cli.Int64Flag{
+ Name: "expiration-cutoff",
+ Usage: "when extending v1 sectors, skip sectors whose current expiration is more than epochs from now (infinity if unspecified)",
+ Required: false,
+ },
+ &cli.StringFlag{},
+ },
+ Action: func(cctx *cli.Context) error {
+
+ api, nCloser, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer nCloser()
+
+ ctx := lcli.ReqContext(cctx)
+
+ maddr, err := getActorAddress(ctx, cctx)
+ if err != nil {
+ return err
+ }
+
+ var params []miner3.ExtendSectorExpirationParams
+
+ if cctx.Bool("v1-sectors") {
+
+ head, err := api.ChainHead(ctx)
+ if err != nil {
+ return err
+ }
+
+ nv, err := api.StateNetworkVersion(ctx, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ extensions := map[miner.SectorLocation]map[abi.ChainEpoch][]uint64{}
+
+ // are given durations within tolerance epochs
+ withinTolerance := func(a, b abi.ChainEpoch) bool {
+ diff := a - b
+ if diff < 0 {
+ diff = b - a
+ }
+
+ return diff <= abi.ChainEpoch(cctx.Int64("tolerance"))
+ }
+
+ sis, err := api.StateMinerActiveSectors(ctx, maddr, types.EmptyTSK)
+ if err != nil {
+ return xerrors.Errorf("getting miner sector infos: %w", err)
+ }
+
+ for _, si := range sis {
+ if si.SealProof >= abi.RegisteredSealProof_StackedDrg2KiBV1_1 {
+ continue
+ }
+
+ if si.Expiration < (head.Height() + abi.ChainEpoch(cctx.Int64("expiration-ignore"))) {
+ continue
+ }
+
+ if cctx.IsSet("expiration-cutoff") {
+ if si.Expiration > (head.Height() + abi.ChainEpoch(cctx.Int64("expiration-cutoff"))) {
+ continue
+ }
+ }
+
+ ml := policy.GetSectorMaxLifetime(si.SealProof, nv)
+ // if the sector's missing less than "tolerance" of its maximum possible lifetime, don't bother extending it
+ if withinTolerance(si.Expiration-si.Activation, ml) {
+ continue
+ }
+
+ // Set the new expiration to 48 hours less than the theoretical maximum lifetime
+ newExp := ml - (miner3.WPoStProvingPeriod * 2) + si.Activation
+ if withinTolerance(si.Expiration, newExp) || si.Expiration >= newExp {
+ continue
+ }
+
+ p, err := api.StateSectorPartition(ctx, maddr, si.SectorNumber, types.EmptyTSK)
+ if err != nil {
+ return xerrors.Errorf("getting sector location for sector %d: %w", si.SectorNumber, err)
+ }
+
+ if p == nil {
+ return xerrors.Errorf("sector %d not found in any partition", si.SectorNumber)
+ }
+
+ es, found := extensions[*p]
+ if !found {
+ ne := make(map[abi.ChainEpoch][]uint64)
+ ne[newExp] = []uint64{uint64(si.SectorNumber)}
+ extensions[*p] = ne
+ } else {
+ added := false
+ for exp := range es {
+ if withinTolerance(exp, newExp) && newExp >= exp && exp > si.Expiration {
+ es[exp] = append(es[exp], uint64(si.SectorNumber))
+ added = true
+ break
+ }
+ }
+
+ if !added {
+ es[newExp] = []uint64{uint64(si.SectorNumber)}
+ }
+ }
+ }
+
+ p := miner3.ExtendSectorExpirationParams{}
+ scount := 0
+
+ for l, exts := range extensions {
+ for newExp, numbers := range exts {
+ scount += len(numbers)
+ if scount > policy.GetAddressedSectorsMax(nv) || len(p.Extensions) == policy.GetDeclarationsMax(nv) {
+ params = append(params, p)
+ p = miner3.ExtendSectorExpirationParams{}
+ scount = len(numbers)
+ }
+
+ p.Extensions = append(p.Extensions, miner3.ExpirationExtension{
+ Deadline: l.Deadline,
+ Partition: l.Partition,
+ Sectors: bitfield.NewFromSet(numbers),
+ NewExpiration: newExp,
+ })
+ }
+ }
+
+ // if we have any sectors, then one last append is needed here
+ if scount != 0 {
+ params = append(params, p)
+ }
+
+ } else {
+ if !cctx.Args().Present() || !cctx.IsSet("new-expiration") {
+ return xerrors.Errorf("must pass at least one sector number and new expiration")
+ }
+ sectors := map[miner.SectorLocation][]uint64{}
+
+ for i, s := range cctx.Args().Slice() {
+ id, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return xerrors.Errorf("could not parse sector %d: %w", i, err)
+ }
+
+ p, err := api.StateSectorPartition(ctx, maddr, abi.SectorNumber(id), types.EmptyTSK)
+ if err != nil {
+ return xerrors.Errorf("getting sector location for sector %d: %w", id, err)
+ }
+
+ if p == nil {
+ return xerrors.Errorf("sector %d not found in any partition", id)
+ }
+
+ sectors[*p] = append(sectors[*p], id)
+ }
+
+ p := miner3.ExtendSectorExpirationParams{}
+ for l, numbers := range sectors {
+
+ // TODO: Dedup with above loop
+ p.Extensions = append(p.Extensions, miner3.ExpirationExtension{
+ Deadline: l.Deadline,
+ Partition: l.Partition,
+ Sectors: bitfield.NewFromSet(numbers),
+ NewExpiration: abi.ChainEpoch(cctx.Int64("new-expiration")),
+ })
+ }
+
+ params = append(params, p)
+ }
+
+ if len(params) == 0 {
+ fmt.Println("nothing to extend")
+ return nil
+ }
+
+ mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
+ if err != nil {
+ return xerrors.Errorf("getting miner info: %w", err)
+ }
+
+ for i := range params {
+ sp, aerr := actors.SerializeParams(¶ms[i])
+ if aerr != nil {
+ return xerrors.Errorf("serializing params: %w", err)
+ }
+
+ smsg, err := api.MpoolPushMessage(ctx, &types.Message{
+ From: mi.Worker,
+ To: maddr,
+ Method: miner.Methods.ExtendSectorExpiration,
+
+ Value: big.Zero(),
+ Params: sp,
+ }, nil)
+ if err != nil {
+ return xerrors.Errorf("mpool push message: %w", err)
+ }
+
+ fmt.Println(smsg.Cid())
+ }
+
+ return nil
+ },
+}
+
+var sectorsTerminateCmd = &cli.Command{
+ Name: "terminate",
+ Usage: "Terminate sector on-chain then remove (WARNING: This means losing power and collateral for the removed sector)",
+ ArgsUsage: "",
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "really-do-it",
+ Usage: "pass this flag if you know what you are doing",
+ },
+ },
+ Subcommands: []*cli.Command{
+ sectorsTerminateFlushCmd,
+ sectorsTerminatePendingCmd,
+ },
+ Action: func(cctx *cli.Context) error {
+ if !cctx.Bool("really-do-it") {
+ return xerrors.Errorf("pass --really-do-it to confirm this action")
+ }
+ nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := lcli.ReqContext(cctx)
+ if cctx.Args().Len() != 1 {
+ return xerrors.Errorf("must pass sector number")
+ }
+
+ id, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64)
+ if err != nil {
+ return xerrors.Errorf("could not parse sector number: %w", err)
+ }
+
+ return nodeApi.SectorTerminate(ctx, abi.SectorNumber(id))
+ },
+}
+
+var sectorsTerminateFlushCmd = &cli.Command{
+ Name: "flush",
+ Usage: "Send a terminate message if there are sectors queued for termination",
+ Action: func(cctx *cli.Context) error {
+ nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := lcli.ReqContext(cctx)
+
+ mcid, err := nodeApi.SectorTerminateFlush(ctx)
+ if err != nil {
+ return err
+ }
+
+ if mcid == nil {
+ return xerrors.New("no sectors were queued for termination")
+ }
+
+ fmt.Println(mcid)
+
+ return nil
+ },
+}
+
+var sectorsTerminatePendingCmd = &cli.Command{
+ Name: "pending",
+ Usage: "List sector numbers of sectors pending termination",
+ Action: func(cctx *cli.Context) error {
+ nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ api, nCloser, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer nCloser()
+ ctx := lcli.ReqContext(cctx)
+
+ pending, err := nodeApi.SectorTerminatePending(ctx)
+ if err != nil {
+ return err
+ }
+
+ maddr, err := nodeApi.ActorAddress(ctx)
+ if err != nil {
+ return err
+ }
+
+ dl, err := api.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
+ if err != nil {
+ return xerrors.Errorf("getting proving deadline info failed: %w", err)
+ }
+
+ for _, id := range pending {
+ loc, err := api.StateSectorPartition(ctx, maddr, id.Number, types.EmptyTSK)
+ if err != nil {
+ return xerrors.Errorf("finding sector partition: %w", err)
+ }
+
+ fmt.Print(id.Number)
+
+ if loc.Deadline == (dl.Index+1)%miner.WPoStPeriodDeadlines || // not in next (in case the terminate message takes a while to get on chain)
+ loc.Deadline == dl.Index || // not in current
+ (loc.Deadline+1)%miner.WPoStPeriodDeadlines == dl.Index { // not in previous
+ fmt.Print(" (in proving window)")
+ }
+ fmt.Println()
+ }
+
+ return nil
+ },
+}
+
var sectorsRemoveCmd = &cli.Command{
Name: "remove",
- Usage: "Forcefully remove a sector (WARNING: This means losing power and collateral for the removed sector)",
+ Usage: "Forcefully remove a sector (WARNING: This means losing power and collateral for the removed sector (use 'terminate' for lower penalty))",
ArgsUsage: "",
Flags: []cli.Flag{
&cli.BoolFlag{
@@ -439,37 +888,58 @@ var sectorsCapacityCollateralCmd = &cli.Command{
},
Action: func(cctx *cli.Context) error {
- mApi, mCloser, err := lcli.GetStorageMinerAPI(cctx)
+ nApi, nCloser, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
}
- defer mCloser()
+ defer nCloser()
- nApi, nCloser, err := lcli.GetFullNodeAPI(cctx)
+ ctx := lcli.ReqContext(cctx)
+
+ maddr, err := getActorAddress(ctx, cctx)
if err != nil {
return err
}
- defer nCloser()
- ctx := lcli.ReqContext(cctx)
+ mi, err := nApi.StateMinerInfo(ctx, maddr, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
- maddr, err := mApi.ActorAddress(ctx)
+ nv, err := nApi.StateNetworkVersion(ctx, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ spt, err := miner.PreferredSealProofTypeFromWindowPoStType(nv, mi.WindowPoStProofType)
if err != nil {
return err
}
pci := miner.SectorPreCommitInfo{
+ SealProof: spt,
Expiration: abi.ChainEpoch(cctx.Uint64("expiration")),
}
if pci.Expiration == 0 {
- pci.Expiration = policy.GetMaxSectorExpirationExtension()
+ h, err := nApi.ChainHead(ctx)
+ if err != nil {
+ return err
+ }
+
+ pci.Expiration = policy.GetMaxSectorExpirationExtension() + h.Height()
}
+
pc, err := nApi.StateMinerInitialPledgeCollateral(ctx, maddr, pci, types.EmptyTSK)
if err != nil {
return err
}
- fmt.Printf("Estimated collateral: %s\n", types.FIL(pc))
+ pcd, err := nApi.StateMinerPreCommitDepositForPower(ctx, maddr, pci, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("Estimated collateral: %s\n", types.FIL(big.Max(pc, pcd)))
return nil
},
@@ -517,6 +987,135 @@ var sectorsUpdateCmd = &cli.Command{
},
}
+var sectorsBatching = &cli.Command{
+ Name: "batching",
+ Usage: "manage batch sector operations",
+ Subcommands: []*cli.Command{
+ sectorsBatchingPendingCommit,
+ sectorsBatchingPendingPreCommit,
+ },
+}
+
+var sectorsBatchingPendingCommit = &cli.Command{
+ Name: "commit",
+ Usage: "list sectors waiting in commit batch queue",
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "publish-now",
+ Usage: "send a batch now",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := lcli.GetStorageMinerAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := lcli.ReqContext(cctx)
+
+ if cctx.Bool("publish-now") {
+ res, err := api.SectorCommitFlush(ctx)
+ if err != nil {
+ return xerrors.Errorf("flush: %w", err)
+ }
+ if res == nil {
+ return xerrors.Errorf("no sectors to publish")
+ }
+
+ for i, re := range res {
+ fmt.Printf("Batch %d:\n", i)
+ if re.Error != "" {
+ fmt.Printf("\tError: %s\n", re.Error)
+ } else {
+ fmt.Printf("\tMessage: %s\n", re.Msg)
+ }
+ fmt.Printf("\tSectors:\n")
+ for _, sector := range re.Sectors {
+ if e, found := re.FailedSectors[sector]; found {
+ fmt.Printf("\t\t%d\tERROR %s\n", sector, e)
+ } else {
+ fmt.Printf("\t\t%d\tOK\n", sector)
+ }
+ }
+ }
+ return nil
+ }
+
+ pending, err := api.SectorCommitPending(ctx)
+ if err != nil {
+ return xerrors.Errorf("getting pending deals: %w", err)
+ }
+
+ if len(pending) > 0 {
+ for _, sector := range pending {
+ fmt.Println(sector.Number)
+ }
+ return nil
+ }
+
+ fmt.Println("No sectors queued to be committed")
+ return nil
+ },
+}
+
+var sectorsBatchingPendingPreCommit = &cli.Command{
+ Name: "precommit",
+ Usage: "list sectors waiting in precommit batch queue",
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "publish-now",
+ Usage: "send a batch now",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := lcli.GetStorageMinerAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+ ctx := lcli.ReqContext(cctx)
+
+ if cctx.Bool("publish-now") {
+ res, err := api.SectorPreCommitFlush(ctx)
+ if err != nil {
+ return xerrors.Errorf("flush: %w", err)
+ }
+ if res == nil {
+ return xerrors.Errorf("no sectors to publish")
+ }
+
+ for i, re := range res {
+ fmt.Printf("Batch %d:\n", i)
+ if re.Error != "" {
+ fmt.Printf("\tError: %s\n", re.Error)
+ } else {
+ fmt.Printf("\tMessage: %s\n", re.Msg)
+ }
+ fmt.Printf("\tSectors:\n")
+ for _, sector := range re.Sectors {
+ fmt.Printf("\t\t%d\tOK\n", sector)
+ }
+ }
+ return nil
+ }
+
+ pending, err := api.SectorPreCommitPending(ctx)
+ if err != nil {
+ return xerrors.Errorf("getting pending deals: %w", err)
+ }
+
+ if len(pending) > 0 {
+ for _, sector := range pending {
+ fmt.Println(sector.Number)
+ }
+ return nil
+ }
+
+ fmt.Println("No sectors queued to be committed")
+ return nil
+ },
+}
+
func yesno(b bool) string {
if b {
return color.GreenString("YES")
diff --git a/cmd/lotus-storage-miner/storage.go b/cmd/lotus-storage-miner/storage.go
index 8b960a4bf50..e7508eb295c 100644
--- a/cmd/lotus-storage-miner/storage.go
+++ b/cmd/lotus-storage-miner/storage.go
@@ -1,6 +1,7 @@
package main
import (
+ "context"
"encoding/json"
"fmt"
"io/ioutil"
@@ -11,6 +12,9 @@ import (
"strings"
"time"
+ "github.com/filecoin-project/lotus/api/v0api"
+
+ "github.com/docker/go-units"
"github.com/fatih/color"
"github.com/google/uuid"
"github.com/mitchellh/go-homedir"
@@ -20,11 +24,14 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
+ sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
+ "github.com/filecoin-project/lotus/lib/tablewriter"
)
const metaFile = "sectorstore.json"
@@ -40,6 +47,7 @@ stored while moving through the sealing pipeline (references as 'seal').`,
storageAttachCmd,
storageListCmd,
storageFindCmd,
+ storageCleanupCmd,
},
}
@@ -83,6 +91,10 @@ over time
Name: "store",
Usage: "(for init) use path for long-term storage",
},
+ &cli.StringFlag{
+ Name: "max-storage",
+ Usage: "(for init) limit storage space for sectors (expensive for very large paths!)",
+ },
},
Action: func(cctx *cli.Context) error {
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
@@ -116,15 +128,24 @@ over time
return err
}
+ var maxStor int64
+ if cctx.IsSet("max-storage") {
+ maxStor, err = units.RAMInBytes(cctx.String("max-storage"))
+ if err != nil {
+ return xerrors.Errorf("parsing max-storage: %w", err)
+ }
+ }
+
cfg := &stores.LocalStorageMeta{
- ID: stores.ID(uuid.New().String()),
- Weight: cctx.Uint64("weight"),
- CanSeal: cctx.Bool("seal"),
- CanStore: cctx.Bool("store"),
+ ID: stores.ID(uuid.New().String()),
+ Weight: cctx.Uint64("weight"),
+ CanSeal: cctx.Bool("seal"),
+ CanStore: cctx.Bool("store"),
+ MaxStorage: uint64(maxStor),
}
if !(cfg.CanStore || cfg.CanSeal) {
- return xerrors.Errorf("must specify at least one of --store of --seal")
+ return xerrors.Errorf("must specify at least one of --store or --seal")
}
b, err := json.MarshalIndent(cfg, "", " ")
@@ -145,10 +166,19 @@ var storageListCmd = &cli.Command{
Name: "list",
Usage: "list local storage paths",
Flags: []cli.Flag{
- &cli.BoolFlag{Name: "color"},
+ &cli.BoolFlag{
+ Name: "color",
+ Usage: "use color in display output",
+ DefaultText: "depends on output being a TTY",
+ },
+ },
+ Subcommands: []*cli.Command{
+ storageListSectorsCmd,
},
Action: func(cctx *cli.Context) error {
- color.NoColor = !cctx.Bool("color")
+ if cctx.IsSet("color") {
+ color.NoColor = !cctx.Bool("color")
+ }
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
@@ -212,26 +242,66 @@ var storageListCmd = &cli.Command{
}
ping := time.Now().Sub(pingStart)
- usedPercent := (st.Capacity - st.Available) * 100 / st.Capacity
-
- percCol := color.FgGreen
- switch {
- case usedPercent > 98:
- percCol = color.FgRed
- case usedPercent > 90:
- percCol = color.FgYellow
+ safeRepeat := func(s string, count int) string {
+ if count < 0 {
+ return ""
+ }
+ return strings.Repeat(s, count)
}
var barCols = int64(50)
- set := (st.Capacity - st.Available) * barCols / st.Capacity
- used := (st.Capacity - (st.Available + st.Reserved)) * barCols / st.Capacity
- reserved := set - used
- bar := strings.Repeat("#", int(used)) + strings.Repeat("*", int(reserved)) + strings.Repeat(" ", int(barCols-set))
-
- fmt.Printf("\t[%s] %s/%s %s\n", color.New(percCol).Sprint(bar),
- types.SizeStr(types.NewInt(uint64(st.Capacity-st.Available))),
- types.SizeStr(types.NewInt(uint64(st.Capacity))),
- color.New(percCol).Sprintf("%d%%", usedPercent))
+
+ // filesystem use bar
+ {
+ usedPercent := (st.Capacity - st.FSAvailable) * 100 / st.Capacity
+
+ percCol := color.FgGreen
+ switch {
+ case usedPercent > 98:
+ percCol = color.FgRed
+ case usedPercent > 90:
+ percCol = color.FgYellow
+ }
+
+ set := (st.Capacity - st.FSAvailable) * barCols / st.Capacity
+ used := (st.Capacity - (st.FSAvailable + st.Reserved)) * barCols / st.Capacity
+ reserved := set - used
+ bar := safeRepeat("#", int(used)) + safeRepeat("*", int(reserved)) + safeRepeat(" ", int(barCols-set))
+
+ desc := ""
+ if st.Max > 0 {
+ desc = " (filesystem)"
+ }
+
+ fmt.Printf("\t[%s] %s/%s %s%s\n", color.New(percCol).Sprint(bar),
+ types.SizeStr(types.NewInt(uint64(st.Capacity-st.FSAvailable))),
+ types.SizeStr(types.NewInt(uint64(st.Capacity))),
+ color.New(percCol).Sprintf("%d%%", usedPercent), desc)
+ }
+
+ // optional configured limit bar
+ if st.Max > 0 {
+ usedPercent := st.Used * 100 / st.Max
+
+ percCol := color.FgGreen
+ switch {
+ case usedPercent > 98:
+ percCol = color.FgRed
+ case usedPercent > 90:
+ percCol = color.FgYellow
+ }
+
+ set := st.Used * barCols / st.Max
+ used := (st.Used + st.Reserved) * barCols / st.Max
+ reserved := set - used
+ bar := safeRepeat("#", int(used)) + safeRepeat("*", int(reserved)) + safeRepeat(" ", int(barCols-set))
+
+ fmt.Printf("\t[%s] %s/%s %s (limit)\n", color.New(percCol).Sprint(bar),
+ types.SizeStr(types.NewInt(uint64(st.Used))),
+ types.SizeStr(types.NewInt(uint64(st.Max))),
+ color.New(percCol).Sprintf("%d%%", usedPercent))
+ }
+
fmt.Printf("\t%s; %s; %s; Reserved: %s\n",
color.YellowString("Unsealed: %d", cnt[0]),
color.GreenString("Sealed: %d", cnt[1]),
@@ -408,3 +478,266 @@ var storageFindCmd = &cli.Command{
return nil
},
}
+
+var storageListSectorsCmd = &cli.Command{
+ Name: "sectors",
+ Usage: "get list of all sector files",
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "color",
+ Usage: "use color in display output",
+ DefaultText: "depends on output being a TTY",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ if cctx.IsSet("color") {
+ color.NoColor = !cctx.Bool("color")
+ }
+
+ nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ napi, closer2, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer2()
+
+ ctx := lcli.ReqContext(cctx)
+
+ sectors, err := nodeApi.SectorsList(ctx)
+ if err != nil {
+ return xerrors.Errorf("listing sectors: %w", err)
+ }
+
+ maddr, err := nodeApi.ActorAddress(ctx)
+ if err != nil {
+ return err
+ }
+
+ aid, err := address.IDFromAddress(maddr)
+ if err != nil {
+ return err
+ }
+
+ mi, err := napi.StateMinerInfo(ctx, maddr, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ sid := func(sn abi.SectorNumber) abi.SectorID {
+ return abi.SectorID{
+ Miner: abi.ActorID(aid),
+ Number: sn,
+ }
+ }
+
+ type entry struct {
+ id abi.SectorNumber
+ storage stores.ID
+ ft storiface.SectorFileType
+ urls string
+
+ primary, seal, store bool
+
+ state api.SectorState
+ }
+
+ var list []entry
+
+ for _, sector := range sectors {
+ st, err := nodeApi.SectorsStatus(ctx, sector, false)
+ if err != nil {
+ return xerrors.Errorf("getting sector status for sector %d: %w", sector, err)
+ }
+
+ for _, ft := range storiface.PathTypes {
+ si, err := nodeApi.StorageFindSector(ctx, sid(sector), ft, mi.SectorSize, false)
+ if err != nil {
+ return xerrors.Errorf("find sector %d: %w", sector, err)
+ }
+
+ for _, info := range si {
+
+ list = append(list, entry{
+ id: sector,
+ storage: info.ID,
+ ft: ft,
+ urls: strings.Join(info.URLs, ";"),
+
+ primary: info.Primary,
+ seal: info.CanSeal,
+ store: info.CanStore,
+
+ state: st.State,
+ })
+ }
+ }
+
+ }
+
+ sort.Slice(list, func(i, j int) bool {
+ if list[i].store != list[j].store {
+ return list[i].store
+ }
+
+ if list[i].storage != list[j].storage {
+ return list[i].storage < list[j].storage
+ }
+
+ if list[i].id != list[j].id {
+ return list[i].id < list[j].id
+ }
+
+ return list[i].ft < list[j].ft
+ })
+
+ tw := tablewriter.New(
+ tablewriter.Col("Storage"),
+ tablewriter.Col("Sector"),
+ tablewriter.Col("Type"),
+ tablewriter.Col("State"),
+ tablewriter.Col("Primary"),
+ tablewriter.Col("Path use"),
+ tablewriter.Col("URLs"),
+ )
+
+ if len(list) == 0 {
+ return nil
+ }
+
+ lastS := list[0].storage
+ sc1, sc2 := color.FgBlue, color.FgCyan
+
+ for _, e := range list {
+ if e.storage != lastS {
+ lastS = e.storage
+ sc1, sc2 = sc2, sc1
+ }
+
+ m := map[string]interface{}{
+ "Storage": color.New(sc1).Sprint(e.storage),
+ "Sector": e.id,
+ "Type": e.ft.String(),
+ "State": color.New(stateOrder[sealing.SectorState(e.state)].col).Sprint(e.state),
+ "Primary": maybeStr(e.seal, color.FgGreen, "primary"),
+ "Path use": maybeStr(e.seal, color.FgMagenta, "seal ") + maybeStr(e.store, color.FgCyan, "store"),
+ "URLs": e.urls,
+ }
+ tw.Write(m)
+ }
+
+ return tw.Flush(os.Stdout)
+ },
+}
+
+func maybeStr(c bool, col color.Attribute, s string) string {
+ if !c {
+ return ""
+ }
+
+ return color.New(col).Sprint(s)
+}
+
+var storageCleanupCmd = &cli.Command{
+ Name: "cleanup",
+ Usage: "trigger cleanup actions",
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "removed",
+ Usage: "cleanup remaining files from removed sectors",
+ Value: true,
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := lcli.GetStorageMinerAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ napi, closer2, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer2()
+
+ ctx := lcli.ReqContext(cctx)
+
+ if cctx.Bool("removed") {
+ if err := cleanupRemovedSectorData(ctx, api, napi); err != nil {
+ return err
+ }
+ }
+
+ // TODO: proving sectors in sealing storage
+
+ return nil
+ },
+}
+
+func cleanupRemovedSectorData(ctx context.Context, api api.StorageMiner, napi v0api.FullNode) error {
+ sectors, err := api.SectorsList(ctx)
+ if err != nil {
+ return err
+ }
+
+ maddr, err := api.ActorAddress(ctx)
+ if err != nil {
+ return err
+ }
+
+ aid, err := address.IDFromAddress(maddr)
+ if err != nil {
+ return err
+ }
+
+ sid := func(sn abi.SectorNumber) abi.SectorID {
+ return abi.SectorID{
+ Miner: abi.ActorID(aid),
+ Number: sn,
+ }
+ }
+
+ mi, err := napi.StateMinerInfo(ctx, maddr, types.EmptyTSK)
+ if err != nil {
+ return err
+ }
+
+ toRemove := map[abi.SectorNumber]struct{}{}
+
+ for _, sector := range sectors {
+ st, err := api.SectorsStatus(ctx, sector, false)
+ if err != nil {
+ return xerrors.Errorf("getting sector status for sector %d: %w", sector, err)
+ }
+
+ if sealing.SectorState(st.State) != sealing.Removed {
+ continue
+ }
+
+ for _, ft := range storiface.PathTypes {
+ si, err := api.StorageFindSector(ctx, sid(sector), ft, mi.SectorSize, false)
+ if err != nil {
+ return xerrors.Errorf("find sector %d: %w", sector, err)
+ }
+
+ if len(si) > 0 {
+ toRemove[sector] = struct{}{}
+ }
+ }
+ }
+
+ for sn := range toRemove {
+ fmt.Printf("cleaning up data for sector %d\n", sn)
+ err := api.SectorRemove(ctx, sn)
+ if err != nil {
+ log.Error(err)
+ }
+ }
+
+ return nil
+}
diff --git a/cmd/lotus-townhall/main.go b/cmd/lotus-townhall/main.go
index 7e8f6df7ff3..1e0460deee1 100644
--- a/cmd/lotus-townhall/main.go
+++ b/cmd/lotus-townhall/main.go
@@ -15,8 +15,8 @@ import (
"github.com/libp2p/go-libp2p-core/peer"
pubsub "github.com/libp2p/go-libp2p-pubsub"
+ "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
- "github.com/filecoin-project/lotus/lib/blockstore"
)
var topic = "/fil/headnotifs/"
@@ -28,7 +28,7 @@ func init() {
return
}
- bs := blockstore.NewTemporary()
+ bs := blockstore.NewMemory()
c, err := car.LoadCar(bs, bytes.NewReader(genBytes))
if err != nil {
diff --git a/cmd/lotus-wallet/interactive.go b/cmd/lotus-wallet/interactive.go
new file mode 100644
index 00000000000..e1ad2cbb292
--- /dev/null
+++ b/cmd/lotus-wallet/interactive.go
@@ -0,0 +1,245 @@
+package main
+
+import (
+ "bytes"
+ "context"
+ "crypto/rand"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ gobig "math/big"
+ "strings"
+ "sync"
+
+ "github.com/ipfs/go-cid"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-jsonrpc"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/crypto"
+
+ "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/api/v0api"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
+ "github.com/filecoin-project/lotus/chain/stmgr"
+ "github.com/filecoin-project/lotus/chain/types"
+ lcli "github.com/filecoin-project/lotus/cli"
+)
+
+type InteractiveWallet struct {
+ lk sync.Mutex
+
+ apiGetter func() (v0api.FullNode, jsonrpc.ClientCloser, error)
+ under v0api.Wallet
+}
+
+func (c *InteractiveWallet) WalletNew(ctx context.Context, typ types.KeyType) (address.Address, error) {
+ err := c.accept(func() error {
+ fmt.Println("-----")
+ fmt.Println("ACTION: WalletNew - Creating new wallet")
+ fmt.Printf("TYPE: %s\n", typ)
+ return nil
+ })
+ if err != nil {
+ return address.Address{}, err
+ }
+
+ return c.under.WalletNew(ctx, typ)
+}
+
+func (c *InteractiveWallet) WalletHas(ctx context.Context, addr address.Address) (bool, error) {
+ return c.under.WalletHas(ctx, addr)
+}
+
+func (c *InteractiveWallet) WalletList(ctx context.Context) ([]address.Address, error) {
+ return c.under.WalletList(ctx)
+}
+
+func (c *InteractiveWallet) WalletSign(ctx context.Context, k address.Address, msg []byte, meta api.MsgMeta) (*crypto.Signature, error) {
+ err := c.accept(func() error {
+ fmt.Println("-----")
+ fmt.Println("ACTION: WalletSign - Sign a message/deal")
+ fmt.Printf("ADDRESS: %s\n", k)
+ fmt.Printf("TYPE: %s\n", meta.Type)
+
+ switch meta.Type {
+ case api.MTChainMsg:
+ var cmsg types.Message
+ if err := cmsg.UnmarshalCBOR(bytes.NewReader(meta.Extra)); err != nil {
+ return xerrors.Errorf("unmarshalling message: %w", err)
+ }
+
+ _, bc, err := cid.CidFromBytes(msg)
+ if err != nil {
+ return xerrors.Errorf("getting cid from signing bytes: %w", err)
+ }
+
+ if !cmsg.Cid().Equals(bc) {
+ return xerrors.Errorf("cid(meta.Extra).bytes() != msg")
+ }
+
+ jb, err := json.MarshalIndent(&cmsg, "", " ")
+ if err != nil {
+ return xerrors.Errorf("json-marshaling the message: %w", err)
+ }
+
+ fmt.Println("Message JSON:", string(jb))
+
+ fmt.Println("Value:", types.FIL(cmsg.Value))
+ fmt.Println("Max Fees:", types.FIL(cmsg.RequiredFunds()))
+ fmt.Println("Max Total Cost:", types.FIL(big.Add(cmsg.RequiredFunds(), cmsg.Value)))
+
+ if c.apiGetter != nil {
+ napi, closer, err := c.apiGetter()
+ if err != nil {
+ return xerrors.Errorf("getting node api: %w", err)
+ }
+ defer closer()
+
+ toact, err := napi.StateGetActor(ctx, cmsg.To, types.EmptyTSK)
+ if err != nil {
+ return xerrors.Errorf("looking up dest actor: %w", err)
+ }
+
+ fmt.Println("Method:", stmgr.MethodsMap[toact.Code][cmsg.Method].Name)
+ p, err := lcli.JsonParams(toact.Code, cmsg.Method, cmsg.Params)
+ if err != nil {
+ return err
+ }
+
+ fmt.Println("Params:", p)
+
+ if builtin.IsMultisigActor(toact.Code) && cmsg.Method == multisig.Methods.Propose {
+ var mp multisig.ProposeParams
+ if err := mp.UnmarshalCBOR(bytes.NewReader(cmsg.Params)); err != nil {
+ return xerrors.Errorf("unmarshalling multisig propose params: %w", err)
+ }
+
+ fmt.Println("\tMultiSig Proposal Value:", types.FIL(mp.Value))
+ fmt.Println("\tMultiSig Proposal Hex Params:", hex.EncodeToString(mp.Params))
+
+ toact, err := napi.StateGetActor(ctx, mp.To, types.EmptyTSK)
+ if err != nil {
+ return xerrors.Errorf("looking up msig dest actor: %w", err)
+ }
+
+ fmt.Println("\tMultiSig Proposal Method:", stmgr.MethodsMap[toact.Code][mp.Method].Name)
+ p, err := lcli.JsonParams(toact.Code, mp.Method, mp.Params)
+ if err != nil {
+ return err
+ }
+
+ fmt.Println("\tMultiSig Proposal Params:", strings.ReplaceAll(p, "\n", "\n\t"))
+ }
+ } else {
+ fmt.Println("Params: No chain node connection, can't decode params")
+ }
+
+ case api.MTDealProposal:
+ return xerrors.Errorf("TODO") // TODO
+ default:
+ log.Infow("WalletSign", "address", k, "type", meta.Type)
+ }
+
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return c.under.WalletSign(ctx, k, msg, meta)
+}
+
+func (c *InteractiveWallet) WalletExport(ctx context.Context, a address.Address) (*types.KeyInfo, error) {
+ err := c.accept(func() error {
+ fmt.Println("-----")
+ fmt.Println("ACTION: WalletExport - Export private key")
+ fmt.Printf("ADDRESS: %s\n", a)
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return c.under.WalletExport(ctx, a)
+}
+
+func (c *InteractiveWallet) WalletImport(ctx context.Context, ki *types.KeyInfo) (address.Address, error) {
+ err := c.accept(func() error {
+ fmt.Println("-----")
+ fmt.Println("ACTION: WalletImport - Import private key")
+ fmt.Printf("TYPE: %s\n", ki.Type)
+ return nil
+ })
+ if err != nil {
+ return address.Undef, err
+ }
+
+ return c.under.WalletImport(ctx, ki)
+}
+
+func (c *InteractiveWallet) WalletDelete(ctx context.Context, addr address.Address) error {
+ err := c.accept(func() error {
+ fmt.Println("-----")
+ fmt.Println("ACTION: WalletDelete - Delete a private key")
+ fmt.Printf("ADDRESS: %s\n", addr)
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ return c.under.WalletDelete(ctx, addr)
+}
+
+func (c *InteractiveWallet) accept(prompt func() error) error {
+ c.lk.Lock()
+ defer c.lk.Unlock()
+
+ if err := prompt(); err != nil {
+ return err
+ }
+
+ yes := randomYes()
+ for {
+ fmt.Printf("\nAccept the above? (%s/No): ", yes)
+ var a string
+ if _, err := fmt.Scanln(&a); err != nil {
+ return err
+ }
+ switch a {
+ case yes:
+ fmt.Println("approved")
+ return nil
+ case "No":
+ return xerrors.Errorf("action rejected")
+ }
+
+ fmt.Printf("Type EXACTLY '%s' or 'No'\n", yes)
+ }
+}
+
+var yeses = []string{
+ "yes",
+ "Yes",
+ "YES",
+ "approve",
+ "Approve",
+ "accept",
+ "Accept",
+ "authorize",
+ "Authorize",
+ "confirm",
+ "Confirm",
+}
+
+func randomYes() string {
+ i, err := rand.Int(rand.Reader, gobig.NewInt(int64(len(yeses))))
+ if err != nil {
+ panic(err)
+ }
+
+ return yeses[i.Int64()]
+}
diff --git a/cmd/lotus-wallet/logged.go b/cmd/lotus-wallet/logged.go
index 272a8d10bcf..4f07d6ae46e 100644
--- a/cmd/lotus-wallet/logged.go
+++ b/cmd/lotus-wallet/logged.go
@@ -16,7 +16,7 @@ import (
)
type LoggedWallet struct {
- under api.WalletAPI
+ under api.Wallet
}
func (c *LoggedWallet) WalletNew(ctx context.Context, typ types.KeyType) (address.Address, error) {
diff --git a/cmd/lotus-wallet/main.go b/cmd/lotus-wallet/main.go
index 25b89eb9d91..3e3aa1a585b 100644
--- a/cmd/lotus-wallet/main.go
+++ b/cmd/lotus-wallet/main.go
@@ -2,25 +2,33 @@ package main
import (
"context"
+ "fmt"
"net"
"net/http"
"os"
+ "github.com/filecoin-project/lotus/api/v0api"
+
+ "github.com/gbrlsnchs/jwt/v3"
"github.com/gorilla/mux"
logging "github.com/ipfs/go-log/v2"
"github.com/urfave/cli/v2"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
+ "golang.org/x/xerrors"
"github.com/filecoin-project/go-jsonrpc"
+ "github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/wallet"
ledgerwallet "github.com/filecoin-project/lotus/chain/wallet/ledger"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/lib/lotuslog"
"github.com/filecoin-project/lotus/metrics"
+ "github.com/filecoin-project/lotus/node/modules"
"github.com/filecoin-project/lotus/node/repo"
)
@@ -28,23 +36,45 @@ var log = logging.Logger("main")
const FlagWalletRepo = "wallet-repo"
+type jwtPayload struct {
+ Allow []auth.Permission
+}
+
func main() {
lotuslog.SetupLogLevels()
local := []*cli.Command{
runCmd,
+ getApiKeyCmd,
}
app := &cli.App{
Name: "lotus-wallet",
Usage: "Basic external wallet",
Version: build.UserVersion(),
+ Description: `
+lotus-wallet provides a remote wallet service for lotus.
+
+To configure your lotus node to use a remote wallet:
+* Run 'lotus-wallet get-api-key' to generate API key
+* Start lotus-wallet using 'lotus-wallet run' (see --help for additional flags)
+* Edit lotus config (~/.lotus/config.toml)
+ * Find the '[Wallet]' section
+ * Set 'RemoteBackend' to '[api key]:http://[wallet ip]:[wallet port]'
+ (the default port is 1777)
+* Start (or restart) the lotus daemon`,
Flags: []cli.Flag{
&cli.StringFlag{
Name: FlagWalletRepo,
EnvVars: []string{"WALLET_PATH"},
Value: "~/.lotuswallet", // TODO: Consider XDG_DATA_HOME
},
+ &cli.StringFlag{
+ Name: "repo",
+ EnvVars: []string{"LOTUS_PATH"},
+ Hidden: true,
+ Value: "~/.lotus",
+ },
},
Commands: local,
@@ -57,6 +87,35 @@ func main() {
}
}
+var getApiKeyCmd = &cli.Command{
+ Name: "get-api-key",
+ Usage: "Generate API Key",
+ Action: func(cctx *cli.Context) error {
+ lr, ks, err := openRepo(cctx)
+ if err != nil {
+ return err
+ }
+ defer lr.Close() // nolint
+
+ p := jwtPayload{
+ Allow: []auth.Permission{api.PermAdmin},
+ }
+
+ authKey, err := modules.APISecret(ks, lr)
+ if err != nil {
+ return xerrors.Errorf("setting up api secret: %w", err)
+ }
+
+ k, err := jwt.Sign(&p, (*jwt.HMACSHA)(authKey))
+ if err != nil {
+ return xerrors.Errorf("jwt sign: %w", err)
+ }
+
+ fmt.Println(string(k))
+ return nil
+ },
+}
+
var runCmd = &cli.Command{
Name: "run",
Usage: "Start lotus wallet",
@@ -70,7 +129,21 @@ var runCmd = &cli.Command{
Name: "ledger",
Usage: "use a ledger device instead of an on-disk wallet",
},
+ &cli.BoolFlag{
+ Name: "interactive",
+ Usage: "prompt before performing actions (DO NOT USE FOR MINER WORKER ADDRESS)",
+ },
+ &cli.BoolFlag{
+ Name: "offline",
+ Usage: "don't query chain state in interactive mode",
+ },
+ &cli.BoolFlag{
+ Name: "disable-auth",
+ Usage: "(insecure) disable api auth",
+ Hidden: true,
+ },
},
+ Description: "For setup instructions see 'lotus-wallet --help'",
Action: func(cctx *cli.Context) error {
log.Info("Starting lotus wallet")
@@ -85,40 +158,20 @@ var runCmd = &cli.Command{
log.Fatalf("Cannot register the view: %v", err)
}
- repoPath := cctx.String(FlagWalletRepo)
- r, err := repo.NewFS(repoPath)
- if err != nil {
- return err
- }
-
- ok, err := r.Exists()
- if err != nil {
- return err
- }
- if !ok {
- if err := r.Init(repo.Worker); err != nil {
- return err
- }
- }
-
- lr, err := r.Lock(repo.Wallet)
- if err != nil {
- return err
- }
-
- ks, err := lr.KeyStore()
+ lr, ks, err := openRepo(cctx)
if err != nil {
return err
}
+ defer lr.Close() // nolint
lw, err := wallet.NewWallet(ks)
if err != nil {
return err
}
- var w api.WalletAPI = lw
+ var w api.Wallet = lw
if cctx.Bool("ledger") {
- ds, err := lr.Datastore("/metadata")
+ ds, err := lr.Datastore(context.Background(), "/metadata")
if err != nil {
return err
}
@@ -134,19 +187,60 @@ var runCmd = &cli.Command{
log.Info("Setting up API endpoint at " + address)
+ if cctx.Bool("interactive") {
+ var ag func() (v0api.FullNode, jsonrpc.ClientCloser, error)
+
+ if !cctx.Bool("offline") {
+ ag = func() (v0api.FullNode, jsonrpc.ClientCloser, error) {
+ return lcli.GetFullNodeAPI(cctx)
+ }
+ }
+
+ w = &InteractiveWallet{
+ under: w,
+ apiGetter: ag,
+ }
+ } else {
+ w = &LoggedWallet{under: w}
+ }
+
+ rpcApi := metrics.MetricedWalletAPI(w)
+ if !cctx.Bool("disable-auth") {
+ rpcApi = api.PermissionedWalletAPI(rpcApi)
+ }
+
rpcServer := jsonrpc.NewServer()
- rpcServer.Register("Filecoin", &LoggedWallet{under: metrics.MetricedWalletAPI(w)})
+ rpcServer.Register("Filecoin", rpcApi)
mux.Handle("/rpc/v0", rpcServer)
mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof
- /*ah := &auth.Handler{
- Verify: nodeApi.AuthVerify,
- Next: mux.ServeHTTP,
- }*/
+ var handler http.Handler = mux
+
+ if !cctx.Bool("disable-auth") {
+ authKey, err := modules.APISecret(ks, lr)
+ if err != nil {
+ return xerrors.Errorf("setting up api secret: %w", err)
+ }
+
+ authVerify := func(ctx context.Context, token string) ([]auth.Permission, error) {
+ var payload jwtPayload
+ if _, err := jwt.Verify([]byte(token), (*jwt.HMACSHA)(authKey), &payload); err != nil {
+ return nil, xerrors.Errorf("JWT Verification failed: %w", err)
+ }
+
+ return payload.Allow, nil
+ }
+
+ log.Info("API auth enabled, use 'lotus-wallet get-api-key' to get API key")
+ handler = &auth.Handler{
+ Verify: authVerify,
+ Next: mux.ServeHTTP,
+ }
+ }
srv := &http.Server{
- Handler: mux,
+ Handler: handler,
BaseContext: func(listener net.Listener) context.Context {
ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-wallet"))
return ctx
@@ -170,3 +264,33 @@ var runCmd = &cli.Command{
return srv.Serve(nl)
},
}
+
+func openRepo(cctx *cli.Context) (repo.LockedRepo, types.KeyStore, error) {
+ repoPath := cctx.String(FlagWalletRepo)
+ r, err := repo.NewFS(repoPath)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ ok, err := r.Exists()
+ if err != nil {
+ return nil, nil, err
+ }
+ if !ok {
+ if err := r.Init(repo.Worker); err != nil {
+ return nil, nil, err
+ }
+ }
+
+ lr, err := r.Lock(repo.Wallet)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ ks, err := lr.KeyStore()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return lr, ks, nil
+}
diff --git a/cmd/lotus/backup.go b/cmd/lotus/backup.go
index aec0000c90d..d41e0c098bf 100644
--- a/cmd/lotus/backup.go
+++ b/cmd/lotus/backup.go
@@ -1,14 +1,122 @@
package main
import (
+ "context"
+ "os"
+
+ dstore "github.com/ipfs/go-datastore"
+ "github.com/mitchellh/go-homedir"
"github.com/urfave/cli/v2"
+ "golang.org/x/xerrors"
+ "gopkg.in/cheggaaa/pb.v1"
"github.com/filecoin-project/go-jsonrpc"
+ "github.com/filecoin-project/lotus/chain/store"
lcli "github.com/filecoin-project/lotus/cli"
+ "github.com/filecoin-project/lotus/lib/backupds"
+ "github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/lotus/node/repo"
)
var backupCmd = lcli.BackupCmd("repo", repo.FullNode, func(cctx *cli.Context) (lcli.BackupAPI, jsonrpc.ClientCloser, error) {
return lcli.GetFullNodeAPI(cctx)
})
+
+func restore(cctx *cli.Context, r repo.Repo) error {
+ bf, err := homedir.Expand(cctx.Path("restore"))
+ if err != nil {
+ return xerrors.Errorf("expand backup file path: %w", err)
+ }
+
+ st, err := os.Stat(bf)
+ if err != nil {
+ return xerrors.Errorf("stat backup file (%s): %w", bf, err)
+ }
+
+ f, err := os.Open(bf)
+ if err != nil {
+ return xerrors.Errorf("opening backup file: %w", err)
+ }
+ defer f.Close() // nolint:errcheck
+
+ lr, err := r.Lock(repo.FullNode)
+ if err != nil {
+ return err
+ }
+ defer lr.Close() // nolint:errcheck
+
+ if cctx.IsSet("restore-config") {
+ log.Info("Restoring config")
+
+ cf, err := homedir.Expand(cctx.String("restore-config"))
+ if err != nil {
+ return xerrors.Errorf("expanding config path: %w", err)
+ }
+
+ _, err = os.Stat(cf)
+ if err != nil {
+ return xerrors.Errorf("stat config file (%s): %w", cf, err)
+ }
+
+ var cerr error
+ err = lr.SetConfig(func(raw interface{}) {
+ rcfg, ok := raw.(*config.FullNode)
+ if !ok {
+ cerr = xerrors.New("expected miner config")
+ return
+ }
+
+ ff, err := config.FromFile(cf, rcfg)
+ if err != nil {
+ cerr = xerrors.Errorf("loading config: %w", err)
+ return
+ }
+
+ *rcfg = *ff.(*config.FullNode)
+ })
+ if cerr != nil {
+ return cerr
+ }
+ if err != nil {
+ return xerrors.Errorf("setting config: %w", err)
+ }
+
+ } else {
+ log.Warn("--restore-config NOT SET, WILL USE DEFAULT VALUES")
+ }
+
+ log.Info("Restoring metadata backup")
+
+ mds, err := lr.Datastore(context.TODO(), "/metadata")
+ if err != nil {
+ return err
+ }
+
+ bar := pb.New64(st.Size())
+ br := bar.NewProxyReader(f)
+ bar.ShowTimeLeft = true
+ bar.ShowPercent = true
+ bar.ShowSpeed = true
+ bar.Units = pb.U_BYTES
+
+ bar.Start()
+ err = backupds.RestoreInto(br, mds)
+ bar.Finish()
+
+ if err != nil {
+ return xerrors.Errorf("restoring metadata: %w", err)
+ }
+
+ log.Info("Resetting chainstore metadata")
+
+ chainHead := dstore.NewKey("head")
+ if err := mds.Delete(chainHead); err != nil {
+ return xerrors.Errorf("clearing chain head: %w", err)
+ }
+ if err := store.FlushValidationCache(mds); err != nil {
+ return xerrors.Errorf("clearing chain validation cache: %w", err)
+ }
+
+ return nil
+}
diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go
index 4ff63be1182..0d5961aaea3 100644
--- a/cmd/lotus/daemon.go
+++ b/cmd/lotus/daemon.go
@@ -15,7 +15,9 @@ import (
"runtime/pprof"
"strings"
+ "github.com/filecoin-project/go-jsonrpc"
paramfetch "github.com/filecoin-project/go-paramfetch"
+ metricsprom "github.com/ipfs/go-metrics-prometheus"
"github.com/mitchellh/go-homedir"
"github.com/multiformats/go-multiaddr"
"github.com/urfave/cli/v2"
@@ -35,7 +37,6 @@ import (
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/journal"
- "github.com/filecoin-project/lotus/lib/blockstore"
"github.com/filecoin-project/lotus/lib/peermgr"
"github.com/filecoin-project/lotus/lib/ulimit"
"github.com/filecoin-project/lotus/metrics"
@@ -136,6 +137,22 @@ var DaemonCmd = &cli.Command{
Name: "config",
Usage: "specify path of config file to use",
},
+ // FIXME: This is not the correct place to put this configuration
+ // option. Ideally it would be part of `config.toml` but at the
+ // moment that only applies to the node configuration and not outside
+ // components like the RPC server.
+ &cli.IntFlag{
+ Name: "api-max-req-size",
+ Usage: "maximum API request size accepted by the JSON RPC server",
+ },
+ &cli.PathFlag{
+ Name: "restore",
+ Usage: "restore from backup file",
+ },
+ &cli.PathFlag{
+ Name: "restore-config",
+ Usage: "config file to use when restoring from backup",
+ },
},
Action: func(cctx *cli.Context) error {
isLite := cctx.Bool("lite")
@@ -176,7 +193,20 @@ var DaemonCmd = &cli.Command{
return fmt.Errorf("unrecognized profile type: %q", profile)
}
- ctx, _ := tag.New(context.Background(), tag.Insert(metrics.Version, build.BuildVersion), tag.Insert(metrics.Commit, build.CurrentCommit))
+ ctx, _ := tag.New(context.Background(),
+ tag.Insert(metrics.Version, build.BuildVersion),
+ tag.Insert(metrics.Commit, build.CurrentCommit),
+ tag.Insert(metrics.NodeType, "chain"),
+ )
+ // Register all metric views
+ if err = view.Register(
+ metrics.ChainNodeViews...,
+ ); err != nil {
+ log.Fatalf("Cannot register the view: %v", err)
+ }
+ // Set the metric to one so it is published to the exporter
+ stats.Record(ctx, metrics.LotusInfo.M(1))
+
{
dir, err := homedir.Expand(cctx.String("repo"))
if err != nil {
@@ -195,12 +225,14 @@ var DaemonCmd = &cli.Command{
r.SetConfigPath(cctx.String("config"))
}
- if err := r.Init(repo.FullNode); err != nil && err != repo.ErrRepoExists {
+ err = r.Init(repo.FullNode)
+ if err != nil && err != repo.ErrRepoExists {
return xerrors.Errorf("repo init error: %w", err)
}
+ freshRepo := err != repo.ErrRepoExists
if !isLite {
- if err := paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), 0); err != nil {
+ if err := paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), build.SrsJSON(), 0); err != nil {
return xerrors.Errorf("fetching proof parameters: %w", err)
}
}
@@ -215,6 +247,15 @@ var DaemonCmd = &cli.Command{
genBytes = build.MaybeGenesis()
}
+ if cctx.IsSet("restore") {
+ if !freshRepo {
+ return xerrors.Errorf("restoring from backup is only possible with a fresh repo!")
+ }
+ if err := restore(cctx, r); err != nil {
+ return xerrors.Errorf("restoring from backup: %w", err)
+ }
+ }
+
chainfile := cctx.String("import-chain")
snapshot := cctx.String("import-snapshot")
if chainfile != "" || snapshot != "" {
@@ -227,7 +268,7 @@ var DaemonCmd = &cli.Command{
issnapshot = true
}
- if err := ImportChain(r, chainfile, issnapshot); err != nil {
+ if err := ImportChain(ctx, r, chainfile, issnapshot); err != nil {
return err
}
if cctx.Bool("halt-after-import") {
@@ -249,7 +290,7 @@ var DaemonCmd = &cli.Command{
shutdownChan := make(chan struct{})
- // If the daemon is started in "lite mode", provide a GatewayAPI
+ // If the daemon is started in "lite mode", provide a Gateway
// for RPC calls
liteModeDeps := node.Options()
if isLite {
@@ -259,18 +300,25 @@ var DaemonCmd = &cli.Command{
}
defer closer()
- liteModeDeps = node.Override(new(api.GatewayAPI), gapi)
+ liteModeDeps = node.Override(new(api.Gateway), gapi)
}
- var api api.FullNode
+ // some libraries like ipfs/go-ds-measure and ipfs/go-ipfs-blockstore
+ // use ipfs/go-metrics-interface. This injects a Prometheus exporter
+ // for those. Metrics are exported to the default registry.
+ if err := metricsprom.Inject(); err != nil {
+ log.Warnf("unable to inject prometheus ipfs/go-metrics exporter; some metrics will be unavailable; err: %s", err)
+ }
+ var api api.FullNode
stop, err := node.New(ctx,
node.FullAPI(&api, node.Lite(isLite)),
+ node.Base(),
+ node.Repo(r),
+
node.Override(new(dtypes.Bootstrapper), isBootstrapper),
node.Override(new(dtypes.ShutdownChan), shutdownChan),
- node.Online(),
- node.Repo(r),
genesis,
liteModeDeps,
@@ -299,23 +347,42 @@ var DaemonCmd = &cli.Command{
}
}
- // Register all metric views
- if err = view.Register(
- metrics.DefaultViews...,
- ); err != nil {
- log.Fatalf("Cannot register the view: %v", err)
+ endpoint, err := r.APIEndpoint()
+ if err != nil {
+ return xerrors.Errorf("getting api endpoint: %w", err)
}
- // Set the metric to one so it is published to the exporter
- stats.Record(ctx, metrics.LotusInfo.M(1))
+ //
+ // Instantiate JSON-RPC endpoint.
+ // ----
- endpoint, err := r.APIEndpoint()
+ // Populate JSON-RPC options.
+ serverOptions := make([]jsonrpc.ServerOption, 0)
+ if maxRequestSize := cctx.Int("api-max-req-size"); maxRequestSize != 0 {
+ serverOptions = append(serverOptions, jsonrpc.WithMaxRequestSize(int64(maxRequestSize)))
+ }
+
+ // Instantiate the full node handler.
+ h, err := node.FullNodeHandler(api, true, serverOptions...)
if err != nil {
- return xerrors.Errorf("getting api endpoint: %w", err)
+ return fmt.Errorf("failed to instantiate rpc handler: %s", err)
+ }
+
+ // Serve the RPC.
+ rpcStopper, err := node.ServeRPC(h, "lotus-daemon", endpoint)
+ if err != nil {
+ return fmt.Errorf("failed to start json-rpc endpoint: %s", err)
}
+ // Monitor for shutdown.
+ finishCh := node.MonitorShutdown(shutdownChan,
+ node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper},
+ node.ShutdownHandler{Component: "node", StopFunc: stop},
+ )
+ <-finishCh // fires when shutdown is complete.
+
// TODO: properly parse api endpoint (or make it a URL)
- return serveRPC(api, stop, endpoint, shutdownChan)
+ return nil
},
Subcommands: []*cli.Command{
daemonStopCmd,
@@ -352,11 +419,11 @@ func importKey(ctx context.Context, api api.FullNode, f string) error {
return err
}
- log.Info("successfully imported key for %s", addr)
+ log.Infof("successfully imported key for %s", addr)
return nil
}
-func ImportChain(r repo.Repo, fname string, snapshot bool) (err error) {
+func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) (err error) {
var rd io.Reader
var l int64
if strings.HasPrefix(fname, "http://") || strings.HasPrefix(fname, "https://") {
@@ -367,7 +434,7 @@ func ImportChain(r repo.Repo, fname string, snapshot bool) (err error) {
defer resp.Body.Close() //nolint:errcheck
if resp.StatusCode != http.StatusOK {
- return xerrors.Errorf("non-200 response: %d", resp.StatusCode)
+ return xerrors.Errorf("fetching chain CAR failed with non-200 response: %d", resp.StatusCode)
}
rd = resp.Body
@@ -399,23 +466,23 @@ func ImportChain(r repo.Repo, fname string, snapshot bool) (err error) {
}
defer lr.Close() //nolint:errcheck
- ds, err := lr.Datastore("/chain")
+ bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore)
if err != nil {
- return err
+ return xerrors.Errorf("failed to open blockstore: %w", err)
}
- mds, err := lr.Datastore("/metadata")
+ mds, err := lr.Datastore(context.TODO(), "/metadata")
if err != nil {
return err
}
- bs := blockstore.NewBlockstore(ds)
-
j, err := journal.OpenFSJournal(lr, journal.EnvDisabledEvents())
if err != nil {
return xerrors.Errorf("failed to open journal: %w", err)
}
- cst := store.NewChainStore(bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), j)
+
+ cst := store.NewChainStore(bs, bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), j)
+ defer cst.Close() //nolint:errcheck
log.Infof("importing chain from %s...", fname)
@@ -440,7 +507,7 @@ func ImportChain(r repo.Repo, fname string, snapshot bool) (err error) {
return xerrors.Errorf("flushing validation cache failed: %w", err)
}
- gb, err := cst.GetTipsetByHeight(context.TODO(), 0, ts, true)
+ gb, err := cst.GetTipsetByHeight(ctx, 0, ts, true)
if err != nil {
return err
}
@@ -454,13 +521,13 @@ func ImportChain(r repo.Repo, fname string, snapshot bool) (err error) {
if !snapshot {
log.Infof("validating imported chain...")
- if err := stm.ValidateChain(context.TODO(), ts); err != nil {
+ if err := stm.ValidateChain(ctx, ts); err != nil {
return xerrors.Errorf("chain validation failed: %w", err)
}
}
log.Infof("accepting %s as new head", ts.Cids())
- if err := cst.SetHead(ts); err != nil {
+ if err := cst.ForceHeadSilent(ctx, ts); err != nil {
return err
}
diff --git a/cmd/lotus/main.go b/cmd/lotus/main.go
index eb97045eeb1..63d01f89162 100644
--- a/cmd/lotus/main.go
+++ b/cmd/lotus/main.go
@@ -2,10 +2,14 @@ package main
import (
"context"
+ "os"
+ logging "github.com/ipfs/go-log/v2"
+ "github.com/mattn/go-isatty"
"github.com/urfave/cli/v2"
"go.opencensus.io/trace"
+ "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/lib/lotuslog"
@@ -13,10 +17,12 @@ import (
"github.com/filecoin-project/lotus/node/repo"
)
+var log = logging.Logger("main")
+
var AdvanceBlockCmd *cli.Command
func main() {
- build.RunningNodeType = build.NodeFull
+ api.RunningNodeType = api.NodeFull
lotuslog.SetupLogLevels()
@@ -51,6 +57,8 @@ func main() {
ctx, span := trace.StartSpan(context.Background(), "/cli")
defer span.End()
+ interactiveDef := isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd())
+
app := &cli.App{
Name: "lotus",
Usage: "Filecoin decentralized storage network client",
@@ -63,10 +71,20 @@ func main() {
Hidden: true,
Value: "~/.lotus", // TODO: Consider XDG_DATA_HOME
},
+ &cli.BoolFlag{
+ Name: "interactive",
+ Usage: "setting to false will disable interactive functionality of commands",
+ Value: interactiveDef,
+ },
+ &cli.BoolFlag{
+ Name: "force-send",
+ Usage: "if true, will ignore pre-send checks",
+ },
},
Commands: append(local, lcli.Commands...),
}
+
app.Setup()
app.Metadata["traceContext"] = ctx
app.Metadata["repoType"] = repo.FullNode
diff --git a/cmd/lotus/rpc.go b/cmd/lotus/rpc.go
deleted file mode 100644
index 4f68ac85a12..00000000000
--- a/cmd/lotus/rpc.go
+++ /dev/null
@@ -1,130 +0,0 @@
-package main
-
-import (
- "context"
- "encoding/json"
- "net"
- "net/http"
- _ "net/http/pprof"
- "os"
- "os/signal"
- "syscall"
-
- "github.com/ipfs/go-cid"
- logging "github.com/ipfs/go-log/v2"
- "github.com/multiformats/go-multiaddr"
- manet "github.com/multiformats/go-multiaddr/net"
- "go.opencensus.io/tag"
- "golang.org/x/xerrors"
-
- "contrib.go.opencensus.io/exporter/prometheus"
-
- "github.com/filecoin-project/go-jsonrpc"
- "github.com/filecoin-project/go-jsonrpc/auth"
-
- "github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/api/apistruct"
- "github.com/filecoin-project/lotus/metrics"
- "github.com/filecoin-project/lotus/node"
- "github.com/filecoin-project/lotus/node/impl"
-)
-
-var log = logging.Logger("main")
-
-func serveRPC(a api.FullNode, stop node.StopFunc, addr multiaddr.Multiaddr, shutdownCh <-chan struct{}) error {
- rpcServer := jsonrpc.NewServer()
- rpcServer.Register("Filecoin", apistruct.PermissionedFullAPI(metrics.MetricedFullAPI(a)))
-
- ah := &auth.Handler{
- Verify: a.AuthVerify,
- Next: rpcServer.ServeHTTP,
- }
-
- http.Handle("/rpc/v0", ah)
-
- importAH := &auth.Handler{
- Verify: a.AuthVerify,
- Next: handleImport(a.(*impl.FullNodeAPI)),
- }
-
- http.Handle("/rest/v0/import", importAH)
-
- exporter, err := prometheus.NewExporter(prometheus.Options{
- Namespace: "lotus",
- })
- if err != nil {
- log.Fatalf("could not create the prometheus stats exporter: %v", err)
- }
-
- http.Handle("/debug/metrics", exporter)
-
- lst, err := manet.Listen(addr)
- if err != nil {
- return xerrors.Errorf("could not listen: %w", err)
- }
-
- srv := &http.Server{
- Handler: http.DefaultServeMux,
- BaseContext: func(listener net.Listener) context.Context {
- ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-daemon"))
- return ctx
- },
- }
-
- sigCh := make(chan os.Signal, 2)
- shutdownDone := make(chan struct{})
- go func() {
- select {
- case sig := <-sigCh:
- log.Warnw("received shutdown", "signal", sig)
- case <-shutdownCh:
- log.Warn("received shutdown")
- }
-
- log.Warn("Shutting down...")
- if err := srv.Shutdown(context.TODO()); err != nil {
- log.Errorf("shutting down RPC server failed: %s", err)
- }
- if err := stop(context.TODO()); err != nil {
- log.Errorf("graceful shutting down failed: %s", err)
- }
- log.Warn("Graceful shutdown successful")
- _ = log.Sync() //nolint:errcheck
- close(shutdownDone)
- }()
- signal.Notify(sigCh, syscall.SIGTERM, syscall.SIGINT)
-
- err = srv.Serve(manet.NetListener(lst))
- if err == http.ErrServerClosed {
- <-shutdownDone
- return nil
- }
- return err
-}
-
-func handleImport(a *impl.FullNodeAPI) func(w http.ResponseWriter, r *http.Request) {
- return func(w http.ResponseWriter, r *http.Request) {
- if r.Method != "PUT" {
- w.WriteHeader(404)
- return
- }
- if !auth.HasPerm(r.Context(), nil, apistruct.PermWrite) {
- w.WriteHeader(401)
- _ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing write permission"})
- return
- }
-
- c, err := a.ClientImportLocal(r.Context(), r.Body)
- if err != nil {
- w.WriteHeader(500)
- _ = json.NewEncoder(w).Encode(struct{ Error string }{err.Error()})
- return
- }
- w.WriteHeader(200)
- err = json.NewEncoder(w).Encode(struct{ Cid cid.Cid }{c})
- if err != nil {
- log.Errorf("/rest/v0/import: Writing response failed: %+v", err)
- return
- }
- }
-}
diff --git a/cmd/tvx/codenames.go b/cmd/tvx/codenames.go
index b9f590914f1..f8da07e8d88 100644
--- a/cmd/tvx/codenames.go
+++ b/cmd/tvx/codenames.go
@@ -20,7 +20,7 @@ var ProtocolCodenames = []struct {
{build.UpgradeSmokeHeight + 1, "smoke"},
{build.UpgradeIgnitionHeight + 1, "ignition"},
{build.UpgradeRefuelHeight + 1, "refuel"},
- {build.UpgradeActorsV2Height + 1, "actorsv2"},
+ {build.UpgradeAssemblyHeight + 1, "actorsv2"},
{build.UpgradeTapeHeight + 1, "tape"},
{build.UpgradeLiftoffHeight + 1, "liftoff"},
{build.UpgradeKumquatHeight + 1, "postliftoff"},
diff --git a/cmd/tvx/codenames_test.go b/cmd/tvx/codenames_test.go
index 00d1077072d..e7136d6ccc8 100644
--- a/cmd/tvx/codenames_test.go
+++ b/cmd/tvx/codenames_test.go
@@ -18,7 +18,7 @@ func TestProtocolCodenames(t *testing.T) {
t.Fatal("expected breeze codename")
}
- if height := build.UpgradeActorsV2Height + 1; GetProtocolCodename(height) != "actorsv2" {
+ if height := build.UpgradeAssemblyHeight + 1; GetProtocolCodename(abi.ChainEpoch(height)) != "actorsv2" {
t.Fatal("expected actorsv2 codename")
}
diff --git a/cmd/tvx/exec.go b/cmd/tvx/exec.go
index 89ad2391351..15bb543a50e 100644
--- a/cmd/tvx/exec.go
+++ b/cmd/tvx/exec.go
@@ -1,63 +1,169 @@
package main
import (
+ "bufio"
"encoding/json"
"fmt"
"io"
"log"
"os"
+ "path/filepath"
+ "strings"
"github.com/fatih/color"
+ "github.com/filecoin-project/go-address"
+ cbornode "github.com/ipfs/go-ipld-cbor"
"github.com/urfave/cli/v2"
- "github.com/filecoin-project/lotus/conformance"
-
"github.com/filecoin-project/test-vectors/schema"
+
+ "github.com/filecoin-project/lotus/blockstore"
+ "github.com/filecoin-project/lotus/chain/state"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/conformance"
)
var execFlags struct {
- file string
+ file string
+ out string
+ driverOpts cli.StringSlice
+ fallbackBlockstore bool
}
+const (
+ optSaveBalances = "save-balances"
+)
+
var execCmd = &cli.Command{
Name: "exec",
- Description: "execute one or many test vectors against Lotus; supplied as a single JSON file, or a ndjson stdin stream",
- Action: runExecLotus,
+ Description: "execute one or many test vectors against Lotus; supplied as a single JSON file, a directory, or a ndjson stdin stream",
+ Action: runExec,
Flags: []cli.Flag{
+ &repoFlag,
&cli.StringFlag{
Name: "file",
- Usage: "input file; if not supplied, the vector will be read from stdin",
+ Usage: "input file or directory; if not supplied, the vector will be read from stdin",
TakesFile: true,
Destination: &execFlags.file,
},
+ &cli.BoolFlag{
+ Name: "fallback-blockstore",
+ Usage: "sets the full node API as a fallback blockstore; use this if you're transplanting vectors and get block not found errors",
+ Destination: &execFlags.fallbackBlockstore,
+ },
+ &cli.StringFlag{
+ Name: "out",
+ Usage: "output directory where to save the results, only used when the input is a directory",
+ Destination: &execFlags.out,
+ },
+ &cli.StringSliceFlag{
+ Name: "driver-opt",
+ Usage: "comma-separated list of driver options (EXPERIMENTAL; will change), supported: 'save-balances=', 'pipeline-basefee' (unimplemented); only available in single-file mode",
+ Destination: &execFlags.driverOpts,
+ },
},
}
-func runExecLotus(_ *cli.Context) error {
- if file := execFlags.file; file != "" {
- // we have a single test vector supplied as a file.
- file, err := os.Open(file)
- if err != nil {
- return fmt.Errorf("failed to open test vector: %w", err)
+func runExec(c *cli.Context) error {
+ if execFlags.fallbackBlockstore {
+ if err := initialize(c); err != nil {
+ return fmt.Errorf("fallback blockstore was enabled, but could not resolve lotus API endpoint: %w", err)
}
+ defer destroy(c) //nolint:errcheck
+ conformance.FallbackBlockstoreGetter = FullAPI
+ }
- var (
- dec = json.NewDecoder(file)
- tv schema.TestVector
- )
+ path := execFlags.file
+ if path == "" {
+ return execVectorsStdin()
+ }
+
+ fi, err := os.Stat(path)
+ if err != nil {
+ return err
+ }
- if err = dec.Decode(&tv); err != nil {
- return fmt.Errorf("failed to decode test vector: %w", err)
+ if fi.IsDir() {
+ // we're in directory mode; ensure the out directory exists.
+ outdir := execFlags.out
+ if outdir == "" {
+ return fmt.Errorf("no output directory provided")
}
+ if err := ensureDir(outdir); err != nil {
+ return err
+ }
+ return execVectorDir(path, outdir)
+ }
- return executeTestVector(tv)
+ // process tipset vector options.
+ if err := processTipsetOpts(); err != nil {
+ return err
}
+ _, err = execVectorFile(new(conformance.LogReporter), path)
+ return err
+}
+
+func processTipsetOpts() error {
+ for _, opt := range execFlags.driverOpts.Value() {
+ switch ss := strings.Split(opt, "="); {
+ case ss[0] == optSaveBalances:
+ filename := ss[1]
+ log.Printf("saving balances after each tipset in: %s", filename)
+ balancesFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ w := bufio.NewWriter(balancesFile)
+ cb := func(bs blockstore.Blockstore, params *conformance.ExecuteTipsetParams, res *conformance.ExecuteTipsetResult) {
+ cst := cbornode.NewCborStore(bs)
+ st, err := state.LoadStateTree(cst, res.PostStateRoot)
+ if err != nil {
+ return
+ }
+ _ = st.ForEach(func(addr address.Address, actor *types.Actor) error {
+ _, err := fmt.Fprintln(w, params.ExecEpoch, addr, actor.Balance)
+ return err
+ })
+ _ = w.Flush()
+ }
+ conformance.TipsetVectorOpts.OnTipsetApplied = append(conformance.TipsetVectorOpts.OnTipsetApplied, cb)
+
+ }
+
+ }
+ return nil
+}
+
+func execVectorDir(path string, outdir string) error {
+ files, err := filepath.Glob(filepath.Join(path, "*"))
+ if err != nil {
+ return fmt.Errorf("failed to glob input directory %s: %w", path, err)
+ }
+ for _, f := range files {
+ outfile := strings.TrimSuffix(filepath.Base(f), filepath.Ext(f)) + ".out"
+ outpath := filepath.Join(outdir, outfile)
+ outw, err := os.Create(outpath)
+ if err != nil {
+ return fmt.Errorf("failed to create file %s: %w", outpath, err)
+ }
+
+ log.Printf("processing vector %s; sending output to %s", f, outpath)
+ log.SetOutput(io.MultiWriter(os.Stderr, outw)) // tee the output.
+ _, _ = execVectorFile(new(conformance.LogReporter), f)
+ log.SetOutput(os.Stderr)
+ _ = outw.Close()
+ }
+ return nil
+}
+
+func execVectorsStdin() error {
+ r := new(conformance.LogReporter)
for dec := json.NewDecoder(os.Stdin); ; {
var tv schema.TestVector
switch err := dec.Decode(&tv); err {
case nil:
- if err = executeTestVector(tv); err != nil {
+ if _, err = executeTestVector(r, tv); err != nil {
return err
}
case io.EOF:
@@ -70,19 +176,30 @@ func runExecLotus(_ *cli.Context) error {
}
}
-func executeTestVector(tv schema.TestVector) error {
+func execVectorFile(r conformance.Reporter, path string) (diffs []string, error error) {
+ file, err := os.Open(path)
+ if err != nil {
+ return nil, fmt.Errorf("failed to open test vector: %w", err)
+ }
+
+ var tv schema.TestVector
+ if err = json.NewDecoder(file).Decode(&tv); err != nil {
+ return nil, fmt.Errorf("failed to decode test vector: %w", err)
+ }
+ return executeTestVector(r, tv)
+}
+
+func executeTestVector(r conformance.Reporter, tv schema.TestVector) (diffs []string, err error) {
log.Println("executing test vector:", tv.Meta.ID)
for _, v := range tv.Pre.Variants {
- r := new(conformance.LogReporter)
-
switch class, v := tv.Class, v; class {
case "message":
- conformance.ExecuteMessageVector(r, &tv, &v)
+ diffs, err = conformance.ExecuteMessageVector(r, &tv, &v)
case "tipset":
- conformance.ExecuteTipsetVector(r, &tv, &v)
+ diffs, err = conformance.ExecuteTipsetVector(r, &tv, &v)
default:
- return fmt.Errorf("test vector class %s not supported", class)
+ return nil, fmt.Errorf("test vector class %s not supported", class)
}
if r.Failed() {
@@ -92,5 +209,5 @@ func executeTestVector(tv schema.TestVector) error {
}
}
- return nil
+ return diffs, err
}
diff --git a/cmd/tvx/extract.go b/cmd/tvx/extract.go
index 3dfec37d883..a3d538abd02 100644
--- a/cmd/tvx/extract.go
+++ b/cmd/tvx/extract.go
@@ -1,9 +1,6 @@
package main
import (
- "bytes"
- "compress/gzip"
- "context"
"encoding/json"
"fmt"
"io"
@@ -11,19 +8,7 @@ import (
"os"
"path/filepath"
- "github.com/fatih/color"
-
- "github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/chain/actors/builtin"
- init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
- "github.com/filecoin-project/lotus/chain/actors/builtin/reward"
- "github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/lotus/chain/vm"
- "github.com/filecoin-project/lotus/conformance"
-
"github.com/filecoin-project/test-vectors/schema"
-
- "github.com/ipfs/go-cid"
"github.com/urfave/cli/v2"
)
@@ -37,10 +22,12 @@ type extractOpts struct {
block string
class string
cid string
+ tsk string
file string
retain string
precursor string
ignoreSanityChecks bool
+ squash bool
}
var extractFlags extractOpts
@@ -55,7 +42,7 @@ var extractCmd = &cli.Command{
&repoFlag,
&cli.StringFlag{
Name: "class",
- Usage: "class of vector to extract; other required flags depend on the; values: 'message'",
+ Usage: "class of vector to extract; values: 'message', 'tipset'",
Value: "message",
Destination: &extractFlags.class,
},
@@ -70,16 +57,25 @@ var extractCmd = &cli.Command{
Usage: "optionally, the block CID the message was included in, to avoid expensive chain scanning",
Destination: &extractFlags.block,
},
+ &cli.StringFlag{
+ Name: "exec-block",
+ Usage: "optionally, the block CID of a block where this message was executed, to avoid expensive chain scanning",
+ Destination: &extractFlags.block,
+ },
&cli.StringFlag{
Name: "cid",
Usage: "message CID to generate test vector from",
- Required: true,
Destination: &extractFlags.cid,
},
+ &cli.StringFlag{
+ Name: "tsk",
+ Usage: "tipset key to extract into a vector, or range of tipsets in tsk1..tsk2 form",
+ Destination: &extractFlags.tsk,
+ },
&cli.StringFlag{
Name: "out",
Aliases: []string{"o"},
- Usage: "file to write test vector to",
+ Usage: "file to write test vector to, or directory to write the batch to",
Destination: &extractFlags.file,
},
&cli.StringFlag{
@@ -104,303 +100,29 @@ var extractCmd = &cli.Command{
Value: false,
Destination: &extractFlags.ignoreSanityChecks,
},
+ &cli.BoolFlag{
+ Name: "squash",
+ Usage: "when extracting a tipset range, squash all tipsets into a single vector",
+ Value: false,
+ Destination: &extractFlags.squash,
+ },
},
}
func runExtract(_ *cli.Context) error {
- return doExtract(extractFlags)
-}
-
-func doExtract(opts extractOpts) error {
- ctx := context.Background()
-
- mcid, err := cid.Decode(opts.cid)
- if err != nil {
- return err
- }
-
- msg, execTs, incTs, err := resolveFromChain(ctx, FullAPI, mcid, opts.block)
- if err != nil {
- return fmt.Errorf("failed to resolve message and tipsets from chain: %w", err)
- }
-
- // get the circulating supply before the message was executed.
- circSupplyDetail, err := FullAPI.StateVMCirculatingSupplyInternal(ctx, incTs.Key())
- if err != nil {
- return fmt.Errorf("failed while fetching circulating supply: %w", err)
- }
-
- circSupply := circSupplyDetail.FilCirculating
-
- log.Printf("message was executed in tipset: %s", execTs.Key())
- log.Printf("message was included in tipset: %s", incTs.Key())
- log.Printf("circulating supply at inclusion tipset: %d", circSupply)
- log.Printf("finding precursor messages using mode: %s", opts.precursor)
-
- // Fetch messages in canonical order from inclusion tipset.
- msgs, err := FullAPI.ChainGetParentMessages(ctx, execTs.Blocks()[0].Cid())
- if err != nil {
- return fmt.Errorf("failed to fetch messages in canonical order from inclusion tipset: %w", err)
- }
-
- related, found, err := findMsgAndPrecursors(opts.precursor, msg, msgs)
- if err != nil {
- return fmt.Errorf("failed while finding message and precursors: %w", err)
- }
-
- if !found {
- return fmt.Errorf("message not found; precursors found: %d", len(related))
- }
-
- var (
- precursors = related[:len(related)-1]
- precursorsCids []cid.Cid
- )
-
- for _, p := range precursors {
- precursorsCids = append(precursorsCids, p.Cid())
- }
-
- log.Println(color.GreenString("found message; precursors (count: %d): %v", len(precursors), precursorsCids))
-
- var (
- // create a read-through store that uses ChainGetObject to fetch unknown CIDs.
- pst = NewProxyingStores(ctx, FullAPI)
- g = NewSurgeon(ctx, FullAPI, pst)
- )
-
- driver := conformance.NewDriver(ctx, schema.Selector{}, conformance.DriverOpts{
- DisableVMFlush: true,
- })
-
- // this is the root of the state tree we start with.
- root := incTs.ParentState()
- log.Printf("base state tree root CID: %s", root)
-
- basefee := incTs.Blocks()[0].ParentBaseFee
- log.Printf("basefee: %s", basefee)
-
- // on top of that state tree, we apply all precursors.
- log.Printf("number of precursors to apply: %d", len(precursors))
- for i, m := range precursors {
- log.Printf("applying precursor %d, cid: %s", i, m.Cid())
- _, root, err = driver.ExecuteMessage(pst.Blockstore, conformance.ExecuteMessageParams{
- Preroot: root,
- Epoch: execTs.Height(),
- Message: m,
- CircSupply: circSupplyDetail.FilCirculating,
- BaseFee: basefee,
- // recorded randomness will be discarded.
- Rand: conformance.NewRecordingRand(new(conformance.LogReporter), FullAPI),
- })
- if err != nil {
- return fmt.Errorf("failed to execute precursor message: %w", err)
- }
- }
-
- var (
- preroot cid.Cid
- postroot cid.Cid
- applyret *vm.ApplyRet
- carWriter func(w io.Writer) error
- retention = opts.retain
-
- // recordingRand will record randomness so we can embed it in the test vector.
- recordingRand = conformance.NewRecordingRand(new(conformance.LogReporter), FullAPI)
- )
-
- log.Printf("using state retention strategy: %s", retention)
- switch retention {
- case "accessed-cids":
- tbs, ok := pst.Blockstore.(TracingBlockstore)
- if !ok {
- return fmt.Errorf("requested 'accessed-cids' state retention, but no tracing blockstore was present")
- }
-
- tbs.StartTracing()
-
- preroot = root
- applyret, postroot, err = driver.ExecuteMessage(pst.Blockstore, conformance.ExecuteMessageParams{
- Preroot: preroot,
- Epoch: execTs.Height(),
- Message: msg,
- CircSupply: circSupplyDetail.FilCirculating,
- BaseFee: basefee,
- Rand: recordingRand,
- })
- if err != nil {
- return fmt.Errorf("failed to execute message: %w", err)
- }
- accessed := tbs.FinishTracing()
- carWriter = func(w io.Writer) error {
- return g.WriteCARIncluding(w, accessed, preroot, postroot)
- }
-
- case "accessed-actors":
- log.Printf("calculating accessed actors")
- // get actors accessed by message.
- retain, err := g.GetAccessedActors(ctx, FullAPI, mcid)
- if err != nil {
- return fmt.Errorf("failed to calculate accessed actors: %w", err)
- }
- // also append the reward actor and the burnt funds actor.
- retain = append(retain, reward.Address, builtin.BurntFundsActorAddr, init_.Address)
- log.Printf("calculated accessed actors: %v", retain)
-
- // get the masked state tree from the root,
- preroot, err = g.GetMaskedStateTree(root, retain)
- if err != nil {
- return err
- }
- applyret, postroot, err = driver.ExecuteMessage(pst.Blockstore, conformance.ExecuteMessageParams{
- Preroot: preroot,
- Epoch: execTs.Height(),
- Message: msg,
- CircSupply: circSupplyDetail.FilCirculating,
- BaseFee: basefee,
- Rand: recordingRand,
- })
- if err != nil {
- return fmt.Errorf("failed to execute message: %w", err)
- }
- carWriter = func(w io.Writer) error {
- return g.WriteCAR(w, preroot, postroot)
- }
-
+ switch extractFlags.class {
+ case "message":
+ return doExtractMessage(extractFlags)
+ case "tipset":
+ return doExtractTipset(extractFlags)
default:
- return fmt.Errorf("unknown state retention option: %s", retention)
- }
-
- log.Printf("message applied; preroot: %s, postroot: %s", preroot, postroot)
- log.Println("performing sanity check on receipt")
-
- // TODO sometimes this returns a nil receipt and no error ¯\_(ツ)_/¯
- // ex: https://filfox.info/en/message/bafy2bzacebpxw3yiaxzy2bako62akig46x3imji7fewszen6fryiz6nymu2b2
- // This code is lenient and skips receipt comparison in case of a nil receipt.
- rec, err := FullAPI.StateGetReceipt(ctx, mcid, execTs.Key())
- if err != nil {
- return fmt.Errorf("failed to find receipt on chain: %w", err)
+ return fmt.Errorf("unsupported vector class")
}
- log.Printf("found receipt: %+v", rec)
-
- // generate the schema receipt; if we got
- var receipt *schema.Receipt
- if rec != nil {
- receipt = &schema.Receipt{
- ExitCode: int64(rec.ExitCode),
- ReturnValue: rec.Return,
- GasUsed: rec.GasUsed,
- }
-
- reporter := new(conformance.LogReporter)
- conformance.AssertMsgResult(reporter, receipt, applyret, "as locally executed")
- if reporter.Failed() {
- if opts.ignoreSanityChecks {
- log.Println(color.YellowString("receipt sanity check failed; proceeding anyway"))
- } else {
- log.Println(color.RedString("receipt sanity check failed; aborting"))
- return fmt.Errorf("vector generation aborted")
- }
- } else {
- log.Println(color.GreenString("receipt sanity check succeeded"))
- }
-
- } else {
- receipt = &schema.Receipt{
- ExitCode: int64(applyret.ExitCode),
- ReturnValue: applyret.Return,
- GasUsed: applyret.GasUsed,
- }
- log.Println(color.YellowString("skipping receipts comparison; we got back a nil receipt from lotus"))
- }
-
- log.Println("generating vector")
- msgBytes, err := msg.Serialize()
- if err != nil {
- return err
- }
-
- var (
- out = new(bytes.Buffer)
- gw = gzip.NewWriter(out)
- )
- if err := carWriter(gw); err != nil {
- return err
- }
- if err = gw.Flush(); err != nil {
- return err
- }
- if err = gw.Close(); err != nil {
- return err
- }
-
- version, err := FullAPI.Version(ctx)
- if err != nil {
- return err
- }
-
- ntwkName, err := FullAPI.StateNetworkName(ctx)
- if err != nil {
- return err
- }
-
- nv, err := FullAPI.StateNetworkVersion(ctx, execTs.Key())
- if err != nil {
- return err
- }
-
- codename := GetProtocolCodename(execTs.Height())
-
- // Write out the test vector.
- vector := schema.TestVector{
- Class: schema.ClassMessage,
- Meta: &schema.Metadata{
- ID: opts.id,
- // TODO need to replace schema.GenerationData with a more flexible
- // data structure that makes no assumption about the traceability
- // data that's being recorded; a flexible map[string]string
- // would do.
- Gen: []schema.GenerationData{
- {Source: fmt.Sprintf("network:%s", ntwkName)},
- {Source: fmt.Sprintf("message:%s", msg.Cid().String())},
- {Source: fmt.Sprintf("inclusion_tipset:%s", incTs.Key().String())},
- {Source: fmt.Sprintf("execution_tipset:%s", execTs.Key().String())},
- {Source: "github.com/filecoin-project/lotus", Version: version.String()}},
- },
- Selector: schema.Selector{
- schema.SelectorMinProtocolVersion: codename,
- },
- Randomness: recordingRand.Recorded(),
- CAR: out.Bytes(),
- Pre: &schema.Preconditions{
- Variants: []schema.Variant{
- {ID: codename, Epoch: int64(execTs.Height()), NetworkVersion: uint(nv)},
- },
- CircSupply: circSupply.Int,
- BaseFee: basefee.Int,
- StateTree: &schema.StateTree{
- RootCID: preroot,
- },
- },
- ApplyMessages: []schema.Message{{Bytes: msgBytes}},
- Post: &schema.Postconditions{
- StateTree: &schema.StateTree{
- RootCID: postroot,
- },
- Receipts: []*schema.Receipt{
- {
- ExitCode: int64(applyret.ExitCode),
- ReturnValue: applyret.Return,
- GasUsed: applyret.GasUsed,
- },
- },
- },
- }
-
- return writeVector(vector, opts.file)
}
-func writeVector(vector schema.TestVector, file string) (err error) {
+// writeVector writes the vector into the specified file, or to stdout if
+// file is empty.
+func writeVector(vector *schema.TestVector, file string) (err error) {
output := io.WriteCloser(os.Stdout)
if file := file; file != "" {
dir := filepath.Dir(file)
@@ -420,101 +142,20 @@ func writeVector(vector schema.TestVector, file string) (err error) {
return enc.Encode(&vector)
}
-// resolveFromChain queries the chain for the provided message, using the block CID to
-// speed up the query, if provided
-func resolveFromChain(ctx context.Context, api api.FullNode, mcid cid.Cid, block string) (msg *types.Message, execTs *types.TipSet, incTs *types.TipSet, err error) {
- // Extract the full message.
- msg, err = api.ChainGetMessage(ctx, mcid)
- if err != nil {
- return nil, nil, nil, err
- }
-
- log.Printf("found message with CID %s: %+v", mcid, msg)
-
- if block == "" {
- log.Printf("locating message in blockchain")
-
- // Locate the message.
- msgInfo, err := api.StateSearchMsg(ctx, mcid)
- if err != nil {
- return nil, nil, nil, fmt.Errorf("failed to locate message: %w", err)
- }
-
- log.Printf("located message at tipset %s (height: %d) with exit code: %s", msgInfo.TipSet, msgInfo.Height, msgInfo.Receipt.ExitCode)
-
- execTs, incTs, err = fetchThisAndPrevTipset(ctx, api, msgInfo.TipSet)
- return msg, execTs, incTs, err
- }
-
- bcid, err := cid.Decode(block)
- if err != nil {
- return nil, nil, nil, err
- }
-
- log.Printf("message inclusion block CID was provided; scanning around it: %s", bcid)
-
- blk, err := api.ChainGetBlock(ctx, bcid)
- if err != nil {
- return nil, nil, nil, fmt.Errorf("failed to get block: %w", err)
- }
-
- // types.EmptyTSK hints to use the HEAD.
- execTs, err = api.ChainGetTipSetByHeight(ctx, blk.Height+1, types.EmptyTSK)
- if err != nil {
- return nil, nil, nil, fmt.Errorf("failed to get message execution tipset: %w", err)
- }
-
- // walk back from the execTs instead of HEAD, to save time.
- incTs, err = api.ChainGetTipSetByHeight(ctx, blk.Height, execTs.Key())
- if err != nil {
- return nil, nil, nil, fmt.Errorf("failed to get message inclusion tipset: %w", err)
- }
-
- return msg, execTs, incTs, nil
-}
-
-// fetchThisAndPrevTipset returns the full tipset identified by the key, as well
-// as the previous tipset. In the context of vector generation, the target
-// tipset is the one where a message was executed, and the previous tipset is
-// the one where the message was included.
-func fetchThisAndPrevTipset(ctx context.Context, api api.FullNode, target types.TipSetKey) (targetTs *types.TipSet, prevTs *types.TipSet, err error) {
- // get the tipset on which this message was "executed" on.
- // https://github.com/filecoin-project/lotus/issues/2847
- targetTs, err = api.ChainGetTipSet(ctx, target)
- if err != nil {
- return nil, nil, err
- }
- // get the previous tipset, on which this message was mined,
- // i.e. included on-chain.
- prevTs, err = api.ChainGetTipSet(ctx, targetTs.Parents())
- if err != nil {
- return nil, nil, err
+// writeVectors writes each vector to a different file under the specified
+// directory.
+func writeVectors(dir string, vectors ...*schema.TestVector) error {
+ // verify the output directory exists.
+ if err := ensureDir(dir); err != nil {
+ return err
}
- return targetTs, prevTs, nil
-}
-
-// findMsgAndPrecursors ranges through the canonical messages slice, locating
-// the target message and returning precursors in accordance to the supplied
-// mode.
-func findMsgAndPrecursors(mode string, target *types.Message, msgs []api.Message) (related []*types.Message, found bool, err error) {
- // Range through canonicalised messages, selecting only the precursors based
- // on selection mode.
- for _, other := range msgs {
- switch {
- case mode == PrecursorSelectAll:
- fallthrough
- case mode == PrecursorSelectSender && other.Message.From == target.From:
- related = append(related, other.Message)
- }
-
- // this message is the target; we're done.
- if other.Cid == target.Cid() {
- return related, true, nil
+ // write each vector to its file.
+ for _, v := range vectors {
+ id := v.Meta.ID
+ path := filepath.Join(dir, fmt.Sprintf("%s.json", id))
+ if err := writeVector(v, path); err != nil {
+ return err
}
}
-
- // this could happen because a block contained related messages, but not
- // the target (that is, messages with a lower nonce, but ultimately not the
- // target).
- return related, false, nil
+ return nil
}
diff --git a/cmd/tvx/extract_many.go b/cmd/tvx/extract_many.go
index 048271456e3..081678a1726 100644
--- a/cmd/tvx/extract_many.go
+++ b/cmd/tvx/extract_many.go
@@ -189,7 +189,7 @@ func runExtractMany(c *cli.Context) error {
precursor: PrecursorSelectSender,
}
- if err := doExtract(opts); err != nil {
+ if err := doExtractMessage(opts); err != nil {
log.Println(color.RedString("failed to extract vector for message %s: %s; queuing for 'all' precursor selection", mcid, err))
retry = append(retry, opts)
continue
@@ -206,7 +206,7 @@ func runExtractMany(c *cli.Context) error {
log.Printf("retrying %s: %s", r.cid, r.id)
r.precursor = PrecursorSelectAll
- if err := doExtract(r); err != nil {
+ if err := doExtractMessage(r); err != nil {
merr = multierror.Append(merr, fmt.Errorf("failed to extract vector for message %s: %w", r.cid, err))
continue
}
diff --git a/cmd/tvx/extract_message.go b/cmd/tvx/extract_message.go
new file mode 100644
index 00000000000..71035867f29
--- /dev/null
+++ b/cmd/tvx/extract_message.go
@@ -0,0 +1,421 @@
+package main
+
+import (
+ "bytes"
+ "compress/gzip"
+ "context"
+ "fmt"
+ "io"
+ "log"
+
+ "github.com/filecoin-project/lotus/api/v0api"
+
+ "github.com/fatih/color"
+ "github.com/filecoin-project/go-address"
+
+ "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+ init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/reward"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/vm"
+ "github.com/filecoin-project/lotus/conformance"
+
+ "github.com/filecoin-project/test-vectors/schema"
+
+ "github.com/ipfs/go-cid"
+)
+
+func doExtractMessage(opts extractOpts) error {
+ ctx := context.Background()
+
+ if opts.cid == "" {
+ return fmt.Errorf("missing message CID")
+ }
+
+ mcid, err := cid.Decode(opts.cid)
+ if err != nil {
+ return err
+ }
+
+ msg, execTs, incTs, err := resolveFromChain(ctx, FullAPI, mcid, opts.block)
+ if err != nil {
+ return fmt.Errorf("failed to resolve message and tipsets from chain: %w", err)
+ }
+
+ // get the circulating supply before the message was executed.
+ circSupplyDetail, err := FullAPI.StateVMCirculatingSupplyInternal(ctx, incTs.Key())
+ if err != nil {
+ return fmt.Errorf("failed while fetching circulating supply: %w", err)
+ }
+
+ circSupply := circSupplyDetail.FilCirculating
+
+ log.Printf("message was executed in tipset: %s", execTs.Key())
+ log.Printf("message was included in tipset: %s", incTs.Key())
+ log.Printf("circulating supply at inclusion tipset: %d", circSupply)
+ log.Printf("finding precursor messages using mode: %s", opts.precursor)
+
+ // Fetch messages in canonical order from inclusion tipset.
+ msgs, err := FullAPI.ChainGetParentMessages(ctx, execTs.Blocks()[0].Cid())
+ if err != nil {
+ return fmt.Errorf("failed to fetch messages in canonical order from inclusion tipset: %w", err)
+ }
+
+ related, found, err := findMsgAndPrecursors(opts.precursor, mcid, msg.From, msgs)
+ if err != nil {
+ return fmt.Errorf("failed while finding message and precursors: %w", err)
+ }
+
+ if !found {
+ return fmt.Errorf("message not found; precursors found: %d", len(related))
+ }
+
+ var (
+ precursors = related[:len(related)-1]
+ precursorsCids []cid.Cid
+ )
+
+ for _, p := range precursors {
+ precursorsCids = append(precursorsCids, p.Cid())
+ }
+
+ log.Println(color.GreenString("found message; precursors (count: %d): %v", len(precursors), precursorsCids))
+
+ var (
+ // create a read-through store that uses ChainGetObject to fetch unknown CIDs.
+ pst = NewProxyingStores(ctx, FullAPI)
+ g = NewSurgeon(ctx, FullAPI, pst)
+ )
+
+ driver := conformance.NewDriver(ctx, schema.Selector{}, conformance.DriverOpts{
+ DisableVMFlush: true,
+ })
+
+ // this is the root of the state tree we start with.
+ root := incTs.ParentState()
+ log.Printf("base state tree root CID: %s", root)
+
+ basefee := incTs.Blocks()[0].ParentBaseFee
+ log.Printf("basefee: %s", basefee)
+
+ // on top of that state tree, we apply all precursors.
+ log.Printf("number of precursors to apply: %d", len(precursors))
+ for i, m := range precursors {
+ log.Printf("applying precursor %d, cid: %s", i, m.Cid())
+ _, root, err = driver.ExecuteMessage(pst.Blockstore, conformance.ExecuteMessageParams{
+ Preroot: root,
+ Epoch: execTs.Height(),
+ Message: m,
+ CircSupply: circSupplyDetail.FilCirculating,
+ BaseFee: basefee,
+ // recorded randomness will be discarded.
+ Rand: conformance.NewRecordingRand(new(conformance.LogReporter), FullAPI),
+ })
+ if err != nil {
+ return fmt.Errorf("failed to execute precursor message: %w", err)
+ }
+ }
+
+ var (
+ preroot cid.Cid
+ postroot cid.Cid
+ applyret *vm.ApplyRet
+ carWriter func(w io.Writer) error
+ retention = opts.retain
+
+ // recordingRand will record randomness so we can embed it in the test vector.
+ recordingRand = conformance.NewRecordingRand(new(conformance.LogReporter), FullAPI)
+ )
+
+ log.Printf("using state retention strategy: %s", retention)
+ switch retention {
+ case "accessed-cids":
+ tbs, ok := pst.Blockstore.(TracingBlockstore)
+ if !ok {
+ return fmt.Errorf("requested 'accessed-cids' state retention, but no tracing blockstore was present")
+ }
+
+ tbs.StartTracing()
+
+ preroot = root
+ applyret, postroot, err = driver.ExecuteMessage(pst.Blockstore, conformance.ExecuteMessageParams{
+ Preroot: preroot,
+ Epoch: execTs.Height(),
+ Message: msg,
+ CircSupply: circSupplyDetail.FilCirculating,
+ BaseFee: basefee,
+ Rand: recordingRand,
+ })
+ if err != nil {
+ return fmt.Errorf("failed to execute message: %w", err)
+ }
+ accessed := tbs.FinishTracing()
+ carWriter = func(w io.Writer) error {
+ return g.WriteCARIncluding(w, accessed, preroot, postroot)
+ }
+
+ case "accessed-actors":
+ log.Printf("calculating accessed actors")
+ // get actors accessed by message.
+ retain, err := g.GetAccessedActors(ctx, FullAPI, mcid)
+ if err != nil {
+ return fmt.Errorf("failed to calculate accessed actors: %w", err)
+ }
+ // also append the reward actor and the burnt funds actor.
+ retain = append(retain, reward.Address, builtin.BurntFundsActorAddr, init_.Address)
+ log.Printf("calculated accessed actors: %v", retain)
+
+ // get the masked state tree from the root,
+ preroot, err = g.GetMaskedStateTree(root, retain)
+ if err != nil {
+ return err
+ }
+ applyret, postroot, err = driver.ExecuteMessage(pst.Blockstore, conformance.ExecuteMessageParams{
+ Preroot: preroot,
+ Epoch: execTs.Height(),
+ Message: msg,
+ CircSupply: circSupplyDetail.FilCirculating,
+ BaseFee: basefee,
+ Rand: recordingRand,
+ })
+ if err != nil {
+ return fmt.Errorf("failed to execute message: %w", err)
+ }
+ carWriter = func(w io.Writer) error {
+ return g.WriteCAR(w, preroot, postroot)
+ }
+
+ default:
+ return fmt.Errorf("unknown state retention option: %s", retention)
+ }
+
+ log.Printf("message applied; preroot: %s, postroot: %s", preroot, postroot)
+ log.Println("performing sanity check on receipt")
+
+ // TODO sometimes this returns a nil receipt and no error ¯\_(ツ)_/¯
+ // ex: https://filfox.info/en/message/bafy2bzacebpxw3yiaxzy2bako62akig46x3imji7fewszen6fryiz6nymu2b2
+ // This code is lenient and skips receipt comparison in case of a nil receipt.
+ rec, err := FullAPI.StateGetReceipt(ctx, mcid, execTs.Key())
+ if err != nil {
+ return fmt.Errorf("failed to find receipt on chain: %w", err)
+ }
+ log.Printf("found receipt: %+v", rec)
+
+ // generate the schema receipt; if we got
+ var receipt *schema.Receipt
+ if rec != nil {
+ receipt = &schema.Receipt{
+ ExitCode: int64(rec.ExitCode),
+ ReturnValue: rec.Return,
+ GasUsed: rec.GasUsed,
+ }
+
+ reporter := new(conformance.LogReporter)
+ conformance.AssertMsgResult(reporter, receipt, applyret, "as locally executed")
+ if reporter.Failed() {
+ if opts.ignoreSanityChecks {
+ log.Println(color.YellowString("receipt sanity check failed; proceeding anyway"))
+ } else {
+ log.Println(color.RedString("receipt sanity check failed; aborting"))
+ return fmt.Errorf("vector generation aborted")
+ }
+ } else {
+ log.Println(color.GreenString("receipt sanity check succeeded"))
+ }
+
+ } else {
+ receipt = &schema.Receipt{
+ ExitCode: int64(applyret.ExitCode),
+ ReturnValue: applyret.Return,
+ GasUsed: applyret.GasUsed,
+ }
+ log.Println(color.YellowString("skipping receipts comparison; we got back a nil receipt from lotus"))
+ }
+
+ log.Println("generating vector")
+ msgBytes, err := msg.Serialize()
+ if err != nil {
+ return err
+ }
+
+ var (
+ out = new(bytes.Buffer)
+ gw = gzip.NewWriter(out)
+ )
+ if err := carWriter(gw); err != nil {
+ return err
+ }
+ if err = gw.Flush(); err != nil {
+ return err
+ }
+ if err = gw.Close(); err != nil {
+ return err
+ }
+
+ version, err := FullAPI.Version(ctx)
+ if err != nil {
+ return err
+ }
+
+ ntwkName, err := FullAPI.StateNetworkName(ctx)
+ if err != nil {
+ return err
+ }
+
+ nv, err := FullAPI.StateNetworkVersion(ctx, execTs.Key())
+ if err != nil {
+ return err
+ }
+
+ codename := GetProtocolCodename(execTs.Height())
+
+ // Write out the test vector.
+ vector := schema.TestVector{
+ Class: schema.ClassMessage,
+ Meta: &schema.Metadata{
+ ID: opts.id,
+ // TODO need to replace schema.GenerationData with a more flexible
+ // data structure that makes no assumption about the traceability
+ // data that's being recorded; a flexible map[string]string
+ // would do.
+ Gen: []schema.GenerationData{
+ {Source: fmt.Sprintf("network:%s", ntwkName)},
+ {Source: fmt.Sprintf("message:%s", msg.Cid().String())},
+ {Source: fmt.Sprintf("inclusion_tipset:%s", incTs.Key().String())},
+ {Source: fmt.Sprintf("execution_tipset:%s", execTs.Key().String())},
+ {Source: "github.com/filecoin-project/lotus", Version: version.String()}},
+ },
+ Selector: schema.Selector{
+ schema.SelectorMinProtocolVersion: codename,
+ },
+ Randomness: recordingRand.Recorded(),
+ CAR: out.Bytes(),
+ Pre: &schema.Preconditions{
+ Variants: []schema.Variant{
+ {ID: codename, Epoch: int64(execTs.Height()), NetworkVersion: uint(nv)},
+ },
+ CircSupply: circSupply.Int,
+ BaseFee: basefee.Int,
+ StateTree: &schema.StateTree{
+ RootCID: preroot,
+ },
+ },
+ ApplyMessages: []schema.Message{{Bytes: msgBytes}},
+ Post: &schema.Postconditions{
+ StateTree: &schema.StateTree{
+ RootCID: postroot,
+ },
+ Receipts: []*schema.Receipt{
+ {
+ ExitCode: int64(applyret.ExitCode),
+ ReturnValue: applyret.Return,
+ GasUsed: applyret.GasUsed,
+ },
+ },
+ },
+ }
+ return writeVector(&vector, opts.file)
+}
+
+// resolveFromChain queries the chain for the provided message, using the block CID to
+// speed up the query, if provided
+func resolveFromChain(ctx context.Context, api v0api.FullNode, mcid cid.Cid, block string) (msg *types.Message, execTs *types.TipSet, incTs *types.TipSet, err error) {
+ // Extract the full message.
+ msg, err = api.ChainGetMessage(ctx, mcid)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ log.Printf("found message with CID %s: %+v", mcid, msg)
+
+ if block == "" {
+ log.Printf("locating message in blockchain")
+
+ // Locate the message.
+ msgInfo, err := api.StateSearchMsg(ctx, mcid)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("failed to locate message: %w", err)
+ }
+ if msgInfo == nil {
+ return nil, nil, nil, fmt.Errorf("failed to locate message: not found")
+ }
+
+ log.Printf("located message at tipset %s (height: %d) with exit code: %s", msgInfo.TipSet, msgInfo.Height, msgInfo.Receipt.ExitCode)
+
+ execTs, incTs, err = fetchThisAndPrevTipset(ctx, api, msgInfo.TipSet)
+ return msg, execTs, incTs, err
+ }
+
+ bcid, err := cid.Decode(block)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ log.Printf("message inclusion block CID was provided; scanning around it: %s", bcid)
+
+ blk, err := api.ChainGetBlock(ctx, bcid)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("failed to get block: %w", err)
+ }
+
+ // types.EmptyTSK hints to use the HEAD.
+ execTs, err = api.ChainGetTipSetByHeight(ctx, blk.Height+1, types.EmptyTSK)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("failed to get message execution tipset: %w", err)
+ }
+
+ // walk back from the execTs instead of HEAD, to save time.
+ incTs, err = api.ChainGetTipSetByHeight(ctx, blk.Height, execTs.Key())
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("failed to get message inclusion tipset: %w", err)
+ }
+
+ return msg, execTs, incTs, nil
+}
+
+// fetchThisAndPrevTipset returns the full tipset identified by the key, as well
+// as the previous tipset. In the context of vector generation, the target
+// tipset is the one where a message was executed, and the previous tipset is
+// the one where the message was included.
+func fetchThisAndPrevTipset(ctx context.Context, api v0api.FullNode, target types.TipSetKey) (targetTs *types.TipSet, prevTs *types.TipSet, err error) {
+ // get the tipset on which this message was "executed" on.
+ // https://github.com/filecoin-project/lotus/issues/2847
+ targetTs, err = api.ChainGetTipSet(ctx, target)
+ if err != nil {
+ return nil, nil, err
+ }
+ // get the previous tipset, on which this message was mined,
+ // i.e. included on-chain.
+ prevTs, err = api.ChainGetTipSet(ctx, targetTs.Parents())
+ if err != nil {
+ return nil, nil, err
+ }
+ return targetTs, prevTs, nil
+}
+
+// findMsgAndPrecursors ranges through the canonical messages slice, locating
+// the target message and returning precursors in accordance to the supplied
+// mode.
+func findMsgAndPrecursors(mode string, msgCid cid.Cid, sender address.Address, msgs []api.Message) (related []*types.Message, found bool, err error) {
+ // Range through canonicalised messages, selecting only the precursors based
+ // on selection mode.
+ for _, other := range msgs {
+ switch {
+ case mode == PrecursorSelectAll:
+ fallthrough
+ case mode == PrecursorSelectSender && other.Message.From == sender:
+ related = append(related, other.Message)
+ }
+
+ // this message is the target; we're done.
+ if other.Cid == msgCid {
+ return related, true, nil
+ }
+ }
+
+ // this could happen because a block contained related messages, but not
+ // the target (that is, messages with a lower nonce, but ultimately not the
+ // target).
+ return related, false, nil
+}
diff --git a/cmd/tvx/extract_tipset.go b/cmd/tvx/extract_tipset.go
new file mode 100644
index 00000000000..05e856aa1a0
--- /dev/null
+++ b/cmd/tvx/extract_tipset.go
@@ -0,0 +1,277 @@
+package main
+
+import (
+ "bytes"
+ "compress/gzip"
+ "context"
+ "fmt"
+ "log"
+ "strings"
+
+ "github.com/filecoin-project/test-vectors/schema"
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/types"
+ lcli "github.com/filecoin-project/lotus/cli"
+ "github.com/filecoin-project/lotus/conformance"
+)
+
+func doExtractTipset(opts extractOpts) error {
+ ctx := context.Background()
+
+ if opts.retain != "accessed-cids" {
+ return fmt.Errorf("tipset extraction only supports 'accessed-cids' state retention")
+ }
+
+ if opts.tsk == "" {
+ return fmt.Errorf("tipset key cannot be empty")
+ }
+
+ ss := strings.Split(opts.tsk, "..")
+ switch len(ss) {
+ case 1: // extracting a single tipset.
+ ts, err := lcli.ParseTipSetRef(ctx, FullAPI, opts.tsk)
+ if err != nil {
+ return fmt.Errorf("failed to fetch tipset: %w", err)
+ }
+ v, err := extractTipsets(ctx, ts)
+ if err != nil {
+ return err
+ }
+ return writeVector(v, opts.file)
+
+ case 2: // extracting a range of tipsets.
+ left, err := lcli.ParseTipSetRef(ctx, FullAPI, ss[0])
+ if err != nil {
+ return fmt.Errorf("failed to fetch tipset %s: %w", ss[0], err)
+ }
+ right, err := lcli.ParseTipSetRef(ctx, FullAPI, ss[1])
+ if err != nil {
+ return fmt.Errorf("failed to fetch tipset %s: %w", ss[1], err)
+ }
+
+ // resolve the tipset range.
+ tss, err := resolveTipsetRange(ctx, left, right)
+ if err != nil {
+ return err
+ }
+
+ // are are squashing all tipsets into a single multi-tipset vector?
+ if opts.squash {
+ vector, err := extractTipsets(ctx, tss...)
+ if err != nil {
+ return err
+ }
+ return writeVector(vector, opts.file)
+ }
+
+ // we are generating a single-tipset vector per tipset.
+ vectors, err := extractIndividualTipsets(ctx, tss...)
+ if err != nil {
+ return err
+ }
+ return writeVectors(opts.file, vectors...)
+
+ default:
+ return fmt.Errorf("unrecognized tipset format")
+ }
+}
+
+func resolveTipsetRange(ctx context.Context, left *types.TipSet, right *types.TipSet) (tss []*types.TipSet, err error) {
+ // start from the right tipset and walk back the chain until the left tipset, inclusive.
+ for curr := right; curr.Key() != left.Parents(); {
+ tss = append(tss, curr)
+ curr, err = FullAPI.ChainGetTipSet(ctx, curr.Parents())
+ if err != nil {
+ return nil, fmt.Errorf("failed to get tipset %s (height: %d): %w", curr.Parents(), curr.Height()-1, err)
+ }
+ }
+ // reverse the slice.
+ for i, j := 0, len(tss)-1; i < j; i, j = i+1, j-1 {
+ tss[i], tss[j] = tss[j], tss[i]
+ }
+ return tss, nil
+}
+
+func extractIndividualTipsets(ctx context.Context, tss ...*types.TipSet) (vectors []*schema.TestVector, err error) {
+ for _, ts := range tss {
+ v, err := extractTipsets(ctx, ts)
+ if err != nil {
+ return nil, err
+ }
+ vectors = append(vectors, v)
+ }
+ return vectors, nil
+}
+
+func extractTipsets(ctx context.Context, tss ...*types.TipSet) (*schema.TestVector, error) {
+ var (
+ // create a read-through store that uses ChainGetObject to fetch unknown CIDs.
+ pst = NewProxyingStores(ctx, FullAPI)
+ g = NewSurgeon(ctx, FullAPI, pst)
+
+ // recordingRand will record randomness so we can embed it in the test vector.
+ recordingRand = conformance.NewRecordingRand(new(conformance.LogReporter), FullAPI)
+ )
+
+ tbs, ok := pst.Blockstore.(TracingBlockstore)
+ if !ok {
+ return nil, fmt.Errorf("requested 'accessed-cids' state retention, but no tracing blockstore was present")
+ }
+
+ driver := conformance.NewDriver(ctx, schema.Selector{}, conformance.DriverOpts{
+ DisableVMFlush: true,
+ })
+
+ base := tss[0]
+ last := tss[len(tss)-1]
+
+ // this is the root of the state tree we start with.
+ root := base.ParentState()
+ log.Printf("base state tree root CID: %s", root)
+
+ codename := GetProtocolCodename(base.Height())
+ nv, err := FullAPI.StateNetworkVersion(ctx, base.Key())
+ if err != nil {
+ return nil, err
+ }
+
+ version, err := FullAPI.Version(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ ntwkName, err := FullAPI.StateNetworkName(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ vector := schema.TestVector{
+ Class: schema.ClassTipset,
+ Meta: &schema.Metadata{
+ ID: fmt.Sprintf("@%d..@%d", base.Height(), last.Height()),
+ Gen: []schema.GenerationData{
+ {Source: fmt.Sprintf("network:%s", ntwkName)},
+ {Source: "github.com/filecoin-project/lotus", Version: version.String()}},
+ // will be completed by extra tipset stamps.
+ },
+ Selector: schema.Selector{
+ schema.SelectorMinProtocolVersion: codename,
+ },
+ Pre: &schema.Preconditions{
+ Variants: []schema.Variant{
+ {ID: codename, Epoch: int64(base.Height()), NetworkVersion: uint(nv)},
+ },
+ StateTree: &schema.StateTree{
+ RootCID: base.ParentState(),
+ },
+ },
+ Post: &schema.Postconditions{
+ StateTree: new(schema.StateTree),
+ },
+ }
+
+ tbs.StartTracing()
+
+ roots := []cid.Cid{base.ParentState()}
+ for i, ts := range tss {
+ log.Printf("tipset %s block count: %d", ts.Key(), len(ts.Blocks()))
+
+ var blocks []schema.Block
+ for _, b := range ts.Blocks() {
+ msgs, err := FullAPI.ChainGetBlockMessages(ctx, b.Cid())
+ if err != nil {
+ return nil, fmt.Errorf("failed to get block messages (cid: %s): %w", b.Cid(), err)
+ }
+
+ log.Printf("block %s has %d messages", b.Cid(), len(msgs.Cids))
+
+ packed := make([]schema.Base64EncodedBytes, 0, len(msgs.Cids))
+ for _, m := range msgs.BlsMessages {
+ b, err := m.Serialize()
+ if err != nil {
+ return nil, fmt.Errorf("failed to serialize message: %w", err)
+ }
+ packed = append(packed, b)
+ }
+ for _, m := range msgs.SecpkMessages {
+ b, err := m.Message.Serialize()
+ if err != nil {
+ return nil, fmt.Errorf("failed to serialize message: %w", err)
+ }
+ packed = append(packed, b)
+ }
+ blocks = append(blocks, schema.Block{
+ MinerAddr: b.Miner,
+ WinCount: b.ElectionProof.WinCount,
+ Messages: packed,
+ })
+ }
+
+ basefee := base.Blocks()[0].ParentBaseFee
+ log.Printf("tipset basefee: %s", basefee)
+
+ tipset := schema.Tipset{
+ BaseFee: *basefee.Int,
+ Blocks: blocks,
+ EpochOffset: int64(i),
+ }
+
+ params := conformance.ExecuteTipsetParams{
+ Preroot: roots[len(roots)-1],
+ ParentEpoch: ts.Height() - 1,
+ Tipset: &tipset,
+ ExecEpoch: ts.Height(),
+ Rand: recordingRand,
+ }
+
+ result, err := driver.ExecuteTipset(pst.Blockstore, pst.Datastore, params)
+ if err != nil {
+ return nil, fmt.Errorf("failed to execute tipset: %w", err)
+ }
+
+ roots = append(roots, result.PostStateRoot)
+
+ // update the vector.
+ vector.ApplyTipsets = append(vector.ApplyTipsets, tipset)
+ vector.Post.ReceiptsRoots = append(vector.Post.ReceiptsRoots, result.ReceiptsRoot)
+
+ for _, res := range result.AppliedResults {
+ vector.Post.Receipts = append(vector.Post.Receipts, &schema.Receipt{
+ ExitCode: int64(res.ExitCode),
+ ReturnValue: res.Return,
+ GasUsed: res.GasUsed,
+ })
+ }
+
+ vector.Meta.Gen = append(vector.Meta.Gen, schema.GenerationData{
+ Source: "tipset:" + ts.Key().String(),
+ })
+ }
+
+ accessed := tbs.FinishTracing()
+
+ //
+ // ComputeBaseFee(ctx, baseTs)
+
+ // write a CAR with the accessed state into a buffer.
+ var (
+ out = new(bytes.Buffer)
+ gw = gzip.NewWriter(out)
+ )
+ if err := g.WriteCARIncluding(gw, accessed, roots...); err != nil {
+ return nil, err
+ }
+ if err = gw.Flush(); err != nil {
+ return nil, err
+ }
+ if err = gw.Close(); err != nil {
+ return nil, err
+ }
+
+ vector.Randomness = recordingRand.Recorded()
+ vector.Post.StateTree.RootCID = roots[len(roots)-1]
+ vector.CAR = out.Bytes()
+
+ return &vector, nil
+}
diff --git a/cmd/tvx/main.go b/cmd/tvx/main.go
index 8de851ed59d..0fed8fad4b1 100644
--- a/cmd/tvx/main.go
+++ b/cmd/tvx/main.go
@@ -9,13 +9,13 @@ import (
"github.com/filecoin-project/go-jsonrpc"
"github.com/urfave/cli/v2"
- "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/api/v0api"
lcli "github.com/filecoin-project/lotus/cli"
)
// FullAPI is a JSON-RPC client targeting a full node. It's initialized in a
// cli.BeforeFunc.
-var FullAPI api.FullNode
+var FullAPI v0api.FullNode
// Closer is the closer for the JSON-RPC client, which must be called on
// cli.AfterFunc.
@@ -102,7 +102,7 @@ func initialize(c *cli.Context) error {
// Make the API client.
var err error
if FullAPI, Closer, err = lcli.GetFullNodeAPI(c); err != nil {
- err = fmt.Errorf("failed to locate Lotus node; ")
+ err = fmt.Errorf("failed to locate Lotus node; err: %w", err)
}
return err
}
@@ -113,3 +113,19 @@ func destroy(_ *cli.Context) error {
}
return nil
}
+
+func ensureDir(path string) error {
+ switch fi, err := os.Stat(path); {
+ case os.IsNotExist(err):
+ if err := os.MkdirAll(path, 0755); err != nil {
+ return fmt.Errorf("failed to create directory %s: %w", path, err)
+ }
+ case err == nil:
+ if !fi.IsDir() {
+ return fmt.Errorf("path %s is not a directory: %w", path, err)
+ }
+ default:
+ return fmt.Errorf("failed to stat directory %s: %w", path, err)
+ }
+ return nil
+}
diff --git a/cmd/tvx/simulate.go b/cmd/tvx/simulate.go
index 82b2bc118c7..da9a034e923 100644
--- a/cmd/tvx/simulate.go
+++ b/cmd/tvx/simulate.go
@@ -154,7 +154,7 @@ func runSimulateCmd(_ *cli.Context) error {
version, err := FullAPI.Version(ctx)
if err != nil {
log.Printf("failed to get node version: %s; falling back to unknown", err)
- version = api.Version{}
+ version = api.APIVersion{}
}
nv, err := FullAPI.StateNetworkVersion(ctx, ts.Key())
@@ -202,7 +202,7 @@ func runSimulateCmd(_ *cli.Context) error {
},
}
- if err := writeVector(vector, simulateFlags.out); err != nil {
+ if err := writeVector(&vector, simulateFlags.out); err != nil {
return fmt.Errorf("failed to write vector: %w", err)
}
diff --git a/cmd/tvx/state.go b/cmd/tvx/state.go
index bff5cbd6ecb..f2d25300adb 100644
--- a/cmd/tvx/state.go
+++ b/cmd/tvx/state.go
@@ -6,6 +6,8 @@ import (
"io"
"log"
+ "github.com/filecoin-project/lotus/api/v0api"
+
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
@@ -13,7 +15,6 @@ import (
"github.com/ipld/go-car"
cbg "github.com/whyrusleeping/cbor-gen"
- "github.com/filecoin-project/lotus/api"
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/types"
@@ -23,13 +24,13 @@ import (
// StateSurgeon is an object used to fetch and manipulate state.
type StateSurgeon struct {
ctx context.Context
- api api.FullNode
+ api v0api.FullNode
stores *Stores
}
// NewSurgeon returns a state surgeon, an object used to fetch and manipulate
// state.
-func NewSurgeon(ctx context.Context, api api.FullNode, stores *Stores) *StateSurgeon {
+func NewSurgeon(ctx context.Context, api v0api.FullNode, stores *Stores) *StateSurgeon {
return &StateSurgeon{
ctx: ctx,
api: api,
@@ -85,7 +86,7 @@ func (sg *StateSurgeon) GetMaskedStateTree(previousRoot cid.Cid, retain []addres
// GetAccessedActors identifies the actors that were accessed during the
// execution of a message.
-func (sg *StateSurgeon) GetAccessedActors(ctx context.Context, a api.FullNode, mid cid.Cid) ([]address.Address, error) {
+func (sg *StateSurgeon) GetAccessedActors(ctx context.Context, a v0api.FullNode, mid cid.Cid) ([]address.Address, error) {
log.Printf("calculating accessed actors during execution of message: %s", mid)
msgInfo, err := a.StateSearchMsg(ctx, mid)
if err != nil {
diff --git a/cmd/tvx/stores.go b/cmd/tvx/stores.go
index 4f574c1752d..04000564178 100644
--- a/cmd/tvx/stores.go
+++ b/cmd/tvx/stores.go
@@ -5,11 +5,12 @@ import (
"log"
"sync"
+ "github.com/filecoin-project/lotus/api/v0api"
+
"github.com/fatih/color"
dssync "github.com/ipfs/go-datastore/sync"
- "github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/lib/blockstore"
+ "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/chain/actors/adt"
@@ -40,12 +41,12 @@ type Stores struct {
// NewProxyingStores is a set of Stores backed by a proxying Blockstore that
// proxies Get requests for unknown CIDs to a Filecoin node, via the
// ChainReadObj RPC.
-func NewProxyingStores(ctx context.Context, api api.FullNode) *Stores {
+func NewProxyingStores(ctx context.Context, api v0api.FullNode) *Stores {
ds := dssync.MutexWrap(ds.NewMapDatastore())
bs := &proxyingBlockstore{
ctx: ctx,
api: api,
- Blockstore: blockstore.NewBlockstore(ds),
+ Blockstore: blockstore.FromDatastore(ds),
}
return NewStores(ctx, ds, bs)
}
@@ -85,7 +86,7 @@ type TracingBlockstore interface {
// a Filecoin node via JSON-RPC.
type proxyingBlockstore struct {
ctx context.Context
- api api.FullNode
+ api v0api.FullNode
lk sync.Mutex
tracing bool
@@ -149,3 +150,14 @@ func (pb *proxyingBlockstore) Put(block blocks.Block) error {
pb.lk.Unlock()
return pb.Blockstore.Put(block)
}
+
+func (pb *proxyingBlockstore) PutMany(blocks []blocks.Block) error {
+ pb.lk.Lock()
+ if pb.tracing {
+ for _, b := range blocks {
+ pb.traced[b.Cid()] = struct{}{}
+ }
+ }
+ pb.lk.Unlock()
+ return pb.Blockstore.PutMany(blocks)
+}
diff --git a/conformance/chaos/actor.go b/conformance/chaos/actor.go
index cdda1db83ef..f5a94989de9 100644
--- a/conformance/chaos/actor.go
+++ b/conformance/chaos/actor.go
@@ -73,6 +73,8 @@ const (
// MethodInspectRuntime is the identifier for the method that returns the
// current runtime values.
MethodInspectRuntime
+ // MethodCreateState is the identifier for the method that creates the chaos actor's state.
+ MethodCreateState
)
// Exports defines the methods this actor exposes publicly.
@@ -87,6 +89,7 @@ func (a Actor) Exports() []interface{} {
MethodMutateState: a.MutateState,
MethodAbortWith: a.AbortWith,
MethodInspectRuntime: a.InspectRuntime,
+ MethodCreateState: a.CreateState,
}
}
@@ -227,6 +230,14 @@ type MutateStateArgs struct {
Branch MutateStateBranch
}
+// CreateState creates the chaos actor's state
+func (a Actor) CreateState(rt runtime2.Runtime, _ *abi.EmptyValue) *abi.EmptyValue {
+ rt.ValidateImmediateCallerAcceptAny()
+ rt.StateCreate(&State{})
+
+ return nil
+}
+
// MutateState attempts to mutate a state value in the actor.
func (a Actor) MutateState(rt runtime2.Runtime, args *MutateStateArgs) *abi.EmptyValue {
rt.ValidateImmediateCallerAcceptAny()
diff --git a/conformance/chaos/actor_test.go b/conformance/chaos/actor_test.go
index dbce4f4c552..e68b9a4df76 100644
--- a/conformance/chaos/actor_test.go
+++ b/conformance/chaos/actor_test.go
@@ -129,8 +129,9 @@ func TestMutateStateInTransaction(t *testing.T) {
var a Actor
rt.ExpectValidateCallerAny()
- rt.StateCreate(&State{})
+ rt.Call(a.CreateState, nil)
+ rt.ExpectValidateCallerAny()
val := "__mutstat test"
rt.Call(a.MutateState, &MutateStateArgs{
Value: val,
@@ -155,23 +156,30 @@ func TestMutateStateAfterTransaction(t *testing.T) {
var a Actor
rt.ExpectValidateCallerAny()
- rt.StateCreate(&State{})
+ rt.Call(a.CreateState, nil)
+ rt.ExpectValidateCallerAny()
val := "__mutstat test"
+ defer func() {
+ if r := recover(); r == nil {
+ t.Fatal("The code did not panic")
+ } else {
+ var st State
+ rt.GetState(&st)
+
+ // state should be updated successfully _in_ the transaction but not outside
+ if st.Value != val+"-in" {
+ t.Fatal("state was not updated")
+ }
+
+ rt.Verify()
+ }
+ }()
rt.Call(a.MutateState, &MutateStateArgs{
Value: val,
Branch: MutateAfterTransaction,
})
- var st State
- rt.GetState(&st)
-
- // state should be updated successfully _in_ the transaction but not outside
- if st.Value != val+"-in" {
- t.Fatal("state was not updated")
- }
-
- rt.Verify()
}
func TestMutateStateReadonly(t *testing.T) {
@@ -182,22 +190,30 @@ func TestMutateStateReadonly(t *testing.T) {
var a Actor
rt.ExpectValidateCallerAny()
- rt.StateCreate(&State{})
+ rt.Call(a.CreateState, nil)
+ rt.ExpectValidateCallerAny()
val := "__mutstat test"
+ defer func() {
+ if r := recover(); r == nil {
+ t.Fatal("The code did not panic")
+ } else {
+ var st State
+ rt.GetState(&st)
+
+ if st.Value != "" {
+ t.Fatal("state was not expected to be updated")
+ }
+
+ rt.Verify()
+ }
+ }()
+
rt.Call(a.MutateState, &MutateStateArgs{
Value: val,
Branch: MutateReadonly,
})
- var st State
- rt.GetState(&st)
-
- if st.Value != "" {
- t.Fatal("state was not expected to be updated")
- }
-
- rt.Verify()
}
func TestMutateStateInvalidBranch(t *testing.T) {
@@ -254,11 +270,13 @@ func TestInspectRuntime(t *testing.T) {
receiver := atesting2.NewIDAddr(t, 101)
builder := mock2.NewBuilder(context.Background(), receiver)
- rt := builder.Build(t)
- rt.SetCaller(caller, builtin2.AccountActorCodeID)
- rt.StateCreate(&State{})
var a Actor
+ rt := builder.Build(t)
+ rt.ExpectValidateCallerAny()
+ rt.Call(a.CreateState, nil)
+
+ rt.SetCaller(caller, builtin2.AccountActorCodeID)
rt.ExpectValidateCallerAny()
ret := rt.Call(a.InspectRuntime, abi.Empty)
rtr, ok := ret.(*InspectRuntimeReturn)
diff --git a/conformance/chaos/cbor_gen.go b/conformance/chaos/cbor_gen.go
index 876d6a893de..5bf85606f77 100644
--- a/conformance/chaos/cbor_gen.go
+++ b/conformance/chaos/cbor_gen.go
@@ -5,6 +5,7 @@ package chaos
import (
"fmt"
"io"
+ "sort"
address "github.com/filecoin-project/go-address"
abi "github.com/filecoin-project/go-state-types/abi"
@@ -15,6 +16,8 @@ import (
)
var _ = xerrors.Errorf
+var _ = cid.Undef
+var _ = sort.Sort
var lengthBufState = []byte{130}
diff --git a/conformance/corpus_test.go b/conformance/corpus_test.go
index a09f9a8d336..b9ba062ccba 100644
--- a/conformance/corpus_test.go
+++ b/conformance/corpus_test.go
@@ -11,7 +11,7 @@ import (
"github.com/filecoin-project/test-vectors/schema"
)
-var invokees = map[schema.Class]func(Reporter, *schema.TestVector, *schema.Variant){
+var invokees = map[schema.Class]func(Reporter, *schema.TestVector, *schema.Variant) ([]string, error){
schema.ClassMessage: ExecuteMessageVector,
schema.ClassTipset: ExecuteTipsetVector,
}
@@ -133,7 +133,7 @@ func TestConformance(t *testing.T) {
for _, variant := range vector.Pre.Variants {
variant := variant
t.Run(variant.ID, func(t *testing.T) {
- invokee(t, &vector, &variant)
+ _, _ = invokee(t, &vector, &variant) //nolint:errcheck
})
}
})
diff --git a/conformance/driver.go b/conformance/driver.go
index 95b6f2659ea..c7fc0d6c43a 100644
--- a/conformance/driver.go
+++ b/conformance/driver.go
@@ -5,6 +5,7 @@ import (
gobig "math/big"
"os"
+ "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
@@ -12,7 +13,6 @@ import (
"github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/conformance/chaos"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
- "github.com/filecoin-project/lotus/lib/blockstore"
_ "github.com/filecoin-project/lotus/lib/sigs/bls" // enable bls signatures
_ "github.com/filecoin-project/lotus/lib/sigs/secp" // enable secp signatures
@@ -71,26 +71,50 @@ type ExecuteTipsetResult struct {
AppliedMessages []*types.Message
// AppliedResults stores the results of AppliedMessages, in the same order.
AppliedResults []*vm.ApplyRet
+
+ // PostBaseFee returns the basefee after applying this tipset.
+ PostBaseFee abi.TokenAmount
+}
+
+type ExecuteTipsetParams struct {
+ Preroot cid.Cid
+ // ParentEpoch is the last epoch in which an actual tipset was processed. This
+ // is used by Lotus for null block counting and cron firing.
+ ParentEpoch abi.ChainEpoch
+ Tipset *schema.Tipset
+ ExecEpoch abi.ChainEpoch
+ // Rand is an optional vm.Rand implementation to use. If nil, the driver
+ // will use a vm.Rand that returns a fixed value for all calls.
+ Rand vm.Rand
+ // BaseFee if not nil or zero, will override the basefee of the tipset.
+ BaseFee abi.TokenAmount
}
// ExecuteTipset executes the supplied tipset on top of the state represented
// by the preroot CID.
//
-// parentEpoch is the last epoch in which an actual tipset was processed. This
-// is used by Lotus for null block counting and cron firing.
-//
// This method returns the the receipts root, the poststate root, and the VM
// message results. The latter _include_ implicit messages, such as cron ticks
// and reward withdrawal per miner.
-func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, preroot cid.Cid, parentEpoch abi.ChainEpoch, tipset *schema.Tipset, execEpoch abi.ChainEpoch) (*ExecuteTipsetResult, error) {
+func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, params ExecuteTipsetParams) (*ExecuteTipsetResult, error) {
var (
+ tipset = params.Tipset
syscalls = vm.Syscalls(ffiwrapper.ProofVerifier)
- vmRand = NewFixedRand()
- cs = store.NewChainStore(bs, ds, syscalls, nil)
+ cs = store.NewChainStore(bs, bs, ds, syscalls, nil)
sm = stmgr.NewStateManager(cs)
)
+ if params.Rand == nil {
+ params.Rand = NewFixedRand()
+ }
+
+ if params.BaseFee.NilOrZero() {
+ params.BaseFee = abi.NewTokenAmount(tipset.BaseFee.Int64())
+ }
+
+ defer cs.Close() //nolint:errcheck
+
blocks := make([]store.BlockMessages, 0, len(tipset.Blocks))
for _, b := range tipset.Blocks {
sb := store.BlockMessages{
@@ -117,19 +141,22 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, preroot
blocks = append(blocks, sb)
}
- var (
- messages []*types.Message
- results []*vm.ApplyRet
+ recordOutputs := &outputRecorder{
+ messages: []*types.Message{},
+ results: []*vm.ApplyRet{},
+ }
- basefee = abi.NewTokenAmount(tipset.BaseFee.Int64())
+ postcid, receiptsroot, err := sm.ApplyBlocks(context.Background(),
+ params.ParentEpoch,
+ params.Preroot,
+ blocks,
+ params.ExecEpoch,
+ params.Rand,
+ recordOutputs,
+ params.BaseFee,
+ nil,
)
- postcid, receiptsroot, err := sm.ApplyBlocks(context.Background(), parentEpoch, preroot, blocks, execEpoch, vmRand, func(_ cid.Cid, msg *types.Message, ret *vm.ApplyRet) error {
- messages = append(messages, msg)
- results = append(results, ret)
- return nil
- }, basefee, nil)
-
if err != nil {
return nil, err
}
@@ -137,8 +164,8 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, preroot
ret := &ExecuteTipsetResult{
ReceiptsRoot: receiptsroot,
PostStateRoot: postcid,
- AppliedMessages: messages,
- AppliedResults: results,
+ AppliedMessages: recordOutputs.messages,
+ AppliedResults: recordOutputs.results,
}
return ret, nil
}
@@ -252,3 +279,14 @@ func CircSupplyOrDefault(circSupply *gobig.Int) abi.TokenAmount {
}
return big.NewFromGo(circSupply)
}
+
+type outputRecorder struct {
+ messages []*types.Message
+ results []*vm.ApplyRet
+}
+
+func (o *outputRecorder) MessageApplied(ctx context.Context, ts *types.TipSet, mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet, implicit bool) error {
+ o.messages = append(o.messages, msg)
+ o.results = append(o.results, ret)
+ return nil
+}
diff --git a/conformance/rand_fixed.go b/conformance/rand_fixed.go
index d356b53d049..f15910e1d6d 100644
--- a/conformance/rand_fixed.go
+++ b/conformance/rand_fixed.go
@@ -19,10 +19,18 @@ func NewFixedRand() vm.Rand {
return &fixedRand{}
}
-func (r *fixedRand) GetChainRandomness(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) {
+func (r *fixedRand) GetChainRandomnessLookingForward(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) {
return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes.
}
-func (r *fixedRand) GetBeaconRandomness(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) {
+func (r *fixedRand) GetChainRandomnessLookingBack(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) {
+ return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes.
+}
+
+func (r *fixedRand) GetBeaconRandomnessLookingForward(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) {
+ return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes.
+}
+
+func (r *fixedRand) GetBeaconRandomnessLookingBack(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) {
return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes.
}
diff --git a/conformance/rand_record.go b/conformance/rand_record.go
index 6f6d064dc74..906d6b73dd1 100644
--- a/conformance/rand_record.go
+++ b/conformance/rand_record.go
@@ -10,14 +10,14 @@ import (
"github.com/filecoin-project/test-vectors/schema"
- "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/api/v0api"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
)
type RecordingRand struct {
reporter Reporter
- api api.FullNode
+ api v0api.FullNode
// once guards the loading of the head tipset.
// can be removed when https://github.com/filecoin-project/lotus/issues/4223
@@ -33,7 +33,7 @@ var _ vm.Rand = (*RecordingRand)(nil)
// NewRecordingRand returns a vm.Rand implementation that proxies calls to a
// full Lotus node via JSON-RPC, and records matching rules and responses so
// they can later be embedded in test vectors.
-func NewRecordingRand(reporter Reporter, api api.FullNode) *RecordingRand {
+func NewRecordingRand(reporter Reporter, api v0api.FullNode) *RecordingRand {
return &RecordingRand{reporter: reporter, api: api}
}
@@ -45,8 +45,17 @@ func (r *RecordingRand) loadHead() {
r.head = head.Key()
}
-func (r *RecordingRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+func (r *RecordingRand) GetChainRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+ return r.getChainRandomness(ctx, pers, round, entropy)
+}
+
+func (r *RecordingRand) GetChainRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+ return r.getChainRandomness(ctx, pers, round, entropy)
+}
+
+func (r *RecordingRand) getChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
r.once.Do(r.loadHead)
+ // FullNode's ChainGetRandomnessFromTickets handles whether we should be looking forward or back
ret, err := r.api.ChainGetRandomnessFromTickets(ctx, r.head, pers, round, entropy)
if err != nil {
return ret, err
@@ -70,7 +79,15 @@ func (r *RecordingRand) GetChainRandomness(ctx context.Context, pers crypto.Doma
return ret, err
}
-func (r *RecordingRand) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+func (r *RecordingRand) GetBeaconRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+ return r.getBeaconRandomness(ctx, pers, round, entropy)
+}
+
+func (r *RecordingRand) GetBeaconRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+ return r.getBeaconRandomness(ctx, pers, round, entropy)
+}
+
+func (r *RecordingRand) getBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
r.once.Do(r.loadHead)
ret, err := r.api.ChainGetRandomnessFromBeacon(ctx, r.head, pers, round, entropy)
if err != nil {
diff --git a/conformance/rand_replay.go b/conformance/rand_replay.go
index 1b73e5a08af..faae1d090a7 100644
--- a/conformance/rand_replay.go
+++ b/conformance/rand_replay.go
@@ -43,7 +43,15 @@ func (r *ReplayingRand) match(requested schema.RandomnessRule) ([]byte, bool) {
return nil, false
}
-func (r *ReplayingRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+func (r *ReplayingRand) GetChainRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+ return r.getChainRandomness(ctx, pers, round, entropy, false)
+}
+
+func (r *ReplayingRand) GetChainRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+ return r.getChainRandomness(ctx, pers, round, entropy, true)
+}
+
+func (r *ReplayingRand) getChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) {
rule := schema.RandomnessRule{
Kind: schema.RandomnessChain,
DomainSeparationTag: int64(pers),
@@ -57,10 +65,23 @@ func (r *ReplayingRand) GetChainRandomness(ctx context.Context, pers crypto.Doma
}
r.reporter.Logf("returning fallback chain randomness: dst=%d, epoch=%d, entropy=%x", pers, round, entropy)
- return r.fallback.GetChainRandomness(ctx, pers, round, entropy)
+
+ if lookback {
+ return r.fallback.GetChainRandomnessLookingBack(ctx, pers, round, entropy)
+ }
+
+ return r.fallback.GetChainRandomnessLookingForward(ctx, pers, round, entropy)
}
-func (r *ReplayingRand) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+func (r *ReplayingRand) GetBeaconRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+ return r.getBeaconRandomness(ctx, pers, round, entropy, false)
+}
+
+func (r *ReplayingRand) GetBeaconRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+ return r.getBeaconRandomness(ctx, pers, round, entropy, true)
+}
+
+func (r *ReplayingRand) getBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) {
rule := schema.RandomnessRule{
Kind: schema.RandomnessBeacon,
DomainSeparationTag: int64(pers),
@@ -74,6 +95,10 @@ func (r *ReplayingRand) GetBeaconRandomness(ctx context.Context, pers crypto.Dom
}
r.reporter.Logf("returning fallback beacon randomness: dst=%d, epoch=%d, entropy=%x", pers, round, entropy)
- return r.fallback.GetBeaconRandomness(ctx, pers, round, entropy)
+ if lookback {
+ return r.fallback.GetBeaconRandomnessLookingBack(ctx, pers, round, entropy)
+ }
+
+ return r.fallback.GetBeaconRandomnessLookingForward(ctx, pers, round, entropy)
}
diff --git a/conformance/runner.go b/conformance/runner.go
index 6f9d73305a3..1044bb329e8 100644
--- a/conformance/runner.go
+++ b/conformance/runner.go
@@ -14,6 +14,8 @@ import (
"github.com/fatih/color"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/exitcode"
+ "github.com/hashicorp/go-multierror"
+ blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-blockservice"
"github.com/ipfs/go-cid"
ds "github.com/ipfs/go-datastore"
@@ -24,13 +26,32 @@ import (
"github.com/filecoin-project/test-vectors/schema"
+ "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
- "github.com/filecoin-project/lotus/lib/blockstore"
)
+// FallbackBlockstoreGetter is a fallback blockstore to use for resolving CIDs
+// unknown to the test vector. This is rarely used, usually only needed
+// when transplanting vectors across versions. This is an interface tighter
+// than ChainModuleAPI. It can be backed by a FullAPI client.
+var FallbackBlockstoreGetter interface {
+ ChainReadObj(context.Context, cid.Cid) ([]byte, error)
+}
+
+var TipsetVectorOpts struct {
+ // PipelineBaseFee pipelines the basefee in multi-tipset vectors from one
+ // tipset to another. Basefees in the vector are ignored, except for that of
+ // the first tipset. UNUSED.
+ PipelineBaseFee bool
+
+ // OnTipsetApplied contains callback functions called after a tipset has been
+ // applied.
+ OnTipsetApplied []func(bs blockstore.Blockstore, params *ExecuteTipsetParams, res *ExecuteTipsetResult)
+}
+
// ExecuteMessageVector executes a message-class test vector.
-func ExecuteMessageVector(r Reporter, vector *schema.TestVector, variant *schema.Variant) {
+func ExecuteMessageVector(r Reporter, vector *schema.TestVector, variant *schema.Variant) (diffs []string, err error) {
var (
ctx = context.Background()
baseEpoch = variant.Epoch
@@ -38,7 +59,7 @@ func ExecuteMessageVector(r Reporter, vector *schema.TestVector, variant *schema
)
// Load the CAR into a new temporary Blockstore.
- bs, err := LoadVectorCAR(vector.CAR)
+ bs, err := LoadBlockstore(vector.CAR)
if err != nil {
r.Fatalf("failed to load the vector CAR: %w", err)
}
@@ -79,14 +100,16 @@ func ExecuteMessageVector(r Reporter, vector *schema.TestVector, variant *schema
// Once all messages are applied, assert that the final state root matches
// the expected postcondition root.
if expected, actual := vector.Post.StateTree.RootCID, root; expected != actual {
- r.Errorf("wrong post root cid; expected %v, but got %v", expected, actual)
- dumpThreeWayStateDiff(r, vector, bs, root)
- r.FailNow()
+ ierr := fmt.Errorf("wrong post root cid; expected %v, but got %v", expected, actual)
+ r.Errorf(ierr.Error())
+ err = multierror.Append(err, ierr)
+ diffs = dumpThreeWayStateDiff(r, vector, bs, root)
}
+ return diffs, err
}
// ExecuteTipsetVector executes a tipset-class test vector.
-func ExecuteTipsetVector(r Reporter, vector *schema.TestVector, variant *schema.Variant) {
+func ExecuteTipsetVector(r Reporter, vector *schema.TestVector, variant *schema.Variant) (diffs []string, err error) {
var (
ctx = context.Background()
baseEpoch = abi.ChainEpoch(variant.Epoch)
@@ -95,9 +118,10 @@ func ExecuteTipsetVector(r Reporter, vector *schema.TestVector, variant *schema.
)
// Load the vector CAR into a new temporary Blockstore.
- bs, err := LoadVectorCAR(vector.CAR)
+ bs, err := LoadBlockstore(vector.CAR)
if err != nil {
r.Fatalf("failed to load the vector CAR: %w", err)
+ return nil, err
}
// Create a new Driver.
@@ -109,9 +133,22 @@ func ExecuteTipsetVector(r Reporter, vector *schema.TestVector, variant *schema.
for i, ts := range vector.ApplyTipsets {
ts := ts // capture
execEpoch := baseEpoch + abi.ChainEpoch(ts.EpochOffset)
- ret, err := driver.ExecuteTipset(bs, tmpds, root, prevEpoch, &ts, execEpoch)
+ params := ExecuteTipsetParams{
+ Preroot: root,
+ ParentEpoch: prevEpoch,
+ Tipset: &ts,
+ ExecEpoch: execEpoch,
+ Rand: NewReplayingRand(r, vector.Randomness),
+ }
+ ret, err := driver.ExecuteTipset(bs, tmpds, params)
if err != nil {
- r.Fatalf("failed to apply tipset %d message: %s", i, err)
+ r.Fatalf("failed to apply tipset %d: %s", i, err)
+ return nil, err
+ }
+
+ // invoke callbacks.
+ for _, cb := range TipsetVectorOpts.OnTipsetApplied {
+ cb(bs, ¶ms, ret)
}
for j, v := range ret.AppliedResults {
@@ -121,7 +158,9 @@ func ExecuteTipsetVector(r Reporter, vector *schema.TestVector, variant *schema.
// Compare the receipts root.
if expected, actual := vector.Post.ReceiptsRoots[i], ret.ReceiptsRoot; expected != actual {
- r.Errorf("post receipts root doesn't match; expected: %s, was: %s", expected, actual)
+ ierr := fmt.Errorf("post receipts root doesn't match; expected: %s, was: %s", expected, actual)
+ r.Errorf(ierr.Error())
+ err = multierror.Append(err, ierr)
}
prevEpoch = execEpoch
@@ -131,10 +170,12 @@ func ExecuteTipsetVector(r Reporter, vector *schema.TestVector, variant *schema.
// Once all messages are applied, assert that the final state root matches
// the expected postcondition root.
if expected, actual := vector.Post.StateTree.RootCID, root; expected != actual {
- r.Errorf("wrong post root cid; expected %v, but got %v", expected, actual)
- dumpThreeWayStateDiff(r, vector, bs, root)
- r.FailNow()
+ ierr := fmt.Errorf("wrong post root cid; expected %v, but got %v", expected, actual)
+ r.Errorf(ierr.Error())
+ err = multierror.Append(err, ierr)
+ diffs = dumpThreeWayStateDiff(r, vector, bs, root)
}
+ return diffs, err
}
// AssertMsgResult compares a message result. It takes the expected receipt
@@ -154,7 +195,7 @@ func AssertMsgResult(r Reporter, expected *schema.Receipt, actual *vm.ApplyRet,
}
}
-func dumpThreeWayStateDiff(r Reporter, vector *schema.TestVector, bs blockstore.Blockstore, actual cid.Cid) {
+func dumpThreeWayStateDiff(r Reporter, vector *schema.TestVector, bs blockstore.Blockstore, actual cid.Cid) []string {
// check if statediff exists; if not, skip.
if err := exec.Command("statediff", "--help").Run(); err != nil {
r.Log("could not dump 3-way state tree diff upon test failure: statediff command not found")
@@ -163,7 +204,7 @@ func dumpThreeWayStateDiff(r Reporter, vector *schema.TestVector, bs blockstore.
r.Log("$ cd statediff")
r.Log("$ go generate ./...")
r.Log("$ go install ./cmd/statediff")
- return
+ return nil
}
tmpCar, err := writeStateToTempCAR(bs,
@@ -173,6 +214,7 @@ func dumpThreeWayStateDiff(r Reporter, vector *schema.TestVector, bs blockstore.
)
if err != nil {
r.Fatalf("failed to write temporary state CAR: %s", err)
+ return nil
}
defer os.RemoveAll(tmpCar) //nolint:errcheck
@@ -187,28 +229,43 @@ func dumpThreeWayStateDiff(r Reporter, vector *schema.TestVector, bs blockstore.
d3 = color.New(color.FgGreen, color.Bold).Sprint("[Δ3]")
)
- printDiff := func(left, right cid.Cid) {
+ diff := func(left, right cid.Cid) string {
cmd := exec.Command("statediff", "car", "--file", tmpCar, left.String(), right.String())
b, err := cmd.CombinedOutput()
if err != nil {
r.Fatalf("statediff failed: %s", err)
}
- r.Log(string(b))
+ return string(b)
}
bold := color.New(color.Bold).SprintfFunc()
+ r.Log(bold("-----BEGIN STATEDIFF-----"))
+
// run state diffs.
r.Log(bold("=== dumping 3-way diffs between %s, %s, %s ===", a, b, c))
r.Log(bold("--- %s left: %s; right: %s ---", d1, a, b))
- printDiff(vector.Post.StateTree.RootCID, actual)
+ diffA := diff(vector.Post.StateTree.RootCID, actual)
+ r.Log(bold("----------BEGIN STATEDIFF A----------"))
+ r.Log(diffA)
+ r.Log(bold("----------END STATEDIFF A----------"))
r.Log(bold("--- %s left: %s; right: %s ---", d2, c, b))
- printDiff(vector.Pre.StateTree.RootCID, actual)
+ diffB := diff(vector.Pre.StateTree.RootCID, actual)
+ r.Log(bold("----------BEGIN STATEDIFF B----------"))
+ r.Log(diffB)
+ r.Log(bold("----------END STATEDIFF B----------"))
r.Log(bold("--- %s left: %s; right: %s ---", d3, c, a))
- printDiff(vector.Pre.StateTree.RootCID, vector.Post.StateTree.RootCID)
+ diffC := diff(vector.Pre.StateTree.RootCID, vector.Post.StateTree.RootCID)
+ r.Log(bold("----------BEGIN STATEDIFF C----------"))
+ r.Log(diffC)
+ r.Log(bold("----------END STATEDIFF C----------"))
+
+ r.Log(bold("-----END STATEDIFF-----"))
+
+ return []string{diffA, diffB, diffC}
}
// writeStateToTempCAR writes the provided roots to a temporary CAR that'll be
@@ -248,8 +305,8 @@ func writeStateToTempCAR(bs blockstore.Blockstore, roots ...cid.Cid) (string, er
return tmp.Name(), nil
}
-func LoadVectorCAR(vectorCAR schema.Base64EncodedBytes) (blockstore.Blockstore, error) {
- bs := blockstore.NewTemporary()
+func LoadBlockstore(vectorCAR schema.Base64EncodedBytes) (blockstore.Blockstore, error) {
+ bs := blockstore.Blockstore(blockstore.NewMemory())
// Read the base64-encoded CAR from the vector, and inflate the gzip.
buf := bytes.NewReader(vectorCAR)
@@ -264,5 +321,18 @@ func LoadVectorCAR(vectorCAR schema.Base64EncodedBytes) (blockstore.Blockstore,
if err != nil {
return nil, fmt.Errorf("failed to load state tree car from test vector: %s", err)
}
+
+ if FallbackBlockstoreGetter != nil {
+ fbs := &blockstore.FallbackStore{Blockstore: bs}
+ fbs.SetFallback(func(ctx context.Context, c cid.Cid) (blocks.Block, error) {
+ b, err := FallbackBlockstoreGetter.ChainReadObj(ctx, c)
+ if err != nil {
+ return nil, err
+ }
+ return blocks.NewBlockWithCid(b, c)
+ })
+ bs = fbs
+ }
+
return bs, nil
}
diff --git a/documentation/en/api-v0-methods-miner.md b/documentation/en/api-v0-methods-miner.md
new file mode 100644
index 00000000000..b488e8996fe
--- /dev/null
+++ b/documentation/en/api-v0-methods-miner.md
@@ -0,0 +1,2302 @@
+# Groups
+* [](#)
+ * [Closing](#Closing)
+ * [Discover](#Discover)
+ * [Session](#Session)
+ * [Shutdown](#Shutdown)
+ * [Version](#Version)
+* [Actor](#Actor)
+ * [ActorAddress](#ActorAddress)
+ * [ActorAddressConfig](#ActorAddressConfig)
+ * [ActorSectorSize](#ActorSectorSize)
+* [Auth](#Auth)
+ * [AuthNew](#AuthNew)
+ * [AuthVerify](#AuthVerify)
+* [Check](#Check)
+ * [CheckProvable](#CheckProvable)
+* [Compute](#Compute)
+ * [ComputeProof](#ComputeProof)
+* [Create](#Create)
+ * [CreateBackup](#CreateBackup)
+* [Deals](#Deals)
+ * [DealsConsiderOfflineRetrievalDeals](#DealsConsiderOfflineRetrievalDeals)
+ * [DealsConsiderOfflineStorageDeals](#DealsConsiderOfflineStorageDeals)
+ * [DealsConsiderOnlineRetrievalDeals](#DealsConsiderOnlineRetrievalDeals)
+ * [DealsConsiderOnlineStorageDeals](#DealsConsiderOnlineStorageDeals)
+ * [DealsConsiderUnverifiedStorageDeals](#DealsConsiderUnverifiedStorageDeals)
+ * [DealsConsiderVerifiedStorageDeals](#DealsConsiderVerifiedStorageDeals)
+ * [DealsImportData](#DealsImportData)
+ * [DealsList](#DealsList)
+ * [DealsPieceCidBlocklist](#DealsPieceCidBlocklist)
+ * [DealsSetConsiderOfflineRetrievalDeals](#DealsSetConsiderOfflineRetrievalDeals)
+ * [DealsSetConsiderOfflineStorageDeals](#DealsSetConsiderOfflineStorageDeals)
+ * [DealsSetConsiderOnlineRetrievalDeals](#DealsSetConsiderOnlineRetrievalDeals)
+ * [DealsSetConsiderOnlineStorageDeals](#DealsSetConsiderOnlineStorageDeals)
+ * [DealsSetConsiderUnverifiedStorageDeals](#DealsSetConsiderUnverifiedStorageDeals)
+ * [DealsSetConsiderVerifiedStorageDeals](#DealsSetConsiderVerifiedStorageDeals)
+ * [DealsSetPieceCidBlocklist](#DealsSetPieceCidBlocklist)
+* [I](#I)
+ * [ID](#ID)
+* [Log](#Log)
+ * [LogList](#LogList)
+ * [LogSetLevel](#LogSetLevel)
+* [Market](#Market)
+ * [MarketCancelDataTransfer](#MarketCancelDataTransfer)
+ * [MarketDataTransferUpdates](#MarketDataTransferUpdates)
+ * [MarketGetAsk](#MarketGetAsk)
+ * [MarketGetDealUpdates](#MarketGetDealUpdates)
+ * [MarketGetRetrievalAsk](#MarketGetRetrievalAsk)
+ * [MarketImportDealData](#MarketImportDealData)
+ * [MarketListDataTransfers](#MarketListDataTransfers)
+ * [MarketListDeals](#MarketListDeals)
+ * [MarketListIncompleteDeals](#MarketListIncompleteDeals)
+ * [MarketListRetrievalDeals](#MarketListRetrievalDeals)
+ * [MarketPendingDeals](#MarketPendingDeals)
+ * [MarketPublishPendingDeals](#MarketPublishPendingDeals)
+ * [MarketRestartDataTransfer](#MarketRestartDataTransfer)
+ * [MarketSetAsk](#MarketSetAsk)
+ * [MarketSetRetrievalAsk](#MarketSetRetrievalAsk)
+* [Mining](#Mining)
+ * [MiningBase](#MiningBase)
+* [Net](#Net)
+ * [NetAddrsListen](#NetAddrsListen)
+ * [NetAgentVersion](#NetAgentVersion)
+ * [NetAutoNatStatus](#NetAutoNatStatus)
+ * [NetBandwidthStats](#NetBandwidthStats)
+ * [NetBandwidthStatsByPeer](#NetBandwidthStatsByPeer)
+ * [NetBandwidthStatsByProtocol](#NetBandwidthStatsByProtocol)
+ * [NetBlockAdd](#NetBlockAdd)
+ * [NetBlockList](#NetBlockList)
+ * [NetBlockRemove](#NetBlockRemove)
+ * [NetConnect](#NetConnect)
+ * [NetConnectedness](#NetConnectedness)
+ * [NetDisconnect](#NetDisconnect)
+ * [NetFindPeer](#NetFindPeer)
+ * [NetPeerInfo](#NetPeerInfo)
+ * [NetPeers](#NetPeers)
+ * [NetPubsubScores](#NetPubsubScores)
+* [Pieces](#Pieces)
+ * [PiecesGetCIDInfo](#PiecesGetCIDInfo)
+ * [PiecesGetPieceInfo](#PiecesGetPieceInfo)
+ * [PiecesListCidInfos](#PiecesListCidInfos)
+ * [PiecesListPieces](#PiecesListPieces)
+* [Pledge](#Pledge)
+ * [PledgeSector](#PledgeSector)
+* [Return](#Return)
+ * [ReturnAddPiece](#ReturnAddPiece)
+ * [ReturnFetch](#ReturnFetch)
+ * [ReturnFinalizeSector](#ReturnFinalizeSector)
+ * [ReturnMoveStorage](#ReturnMoveStorage)
+ * [ReturnReadPiece](#ReturnReadPiece)
+ * [ReturnReleaseUnsealed](#ReturnReleaseUnsealed)
+ * [ReturnSealCommit1](#ReturnSealCommit1)
+ * [ReturnSealCommit2](#ReturnSealCommit2)
+ * [ReturnSealPreCommit1](#ReturnSealPreCommit1)
+ * [ReturnSealPreCommit2](#ReturnSealPreCommit2)
+ * [ReturnUnsealPiece](#ReturnUnsealPiece)
+* [Sealing](#Sealing)
+ * [SealingAbort](#SealingAbort)
+ * [SealingSchedDiag](#SealingSchedDiag)
+* [Sector](#Sector)
+ * [SectorAddPieceToAny](#SectorAddPieceToAny)
+ * [SectorCommitFlush](#SectorCommitFlush)
+ * [SectorCommitPending](#SectorCommitPending)
+ * [SectorGetExpectedSealDuration](#SectorGetExpectedSealDuration)
+ * [SectorGetSealDelay](#SectorGetSealDelay)
+ * [SectorMarkForUpgrade](#SectorMarkForUpgrade)
+ * [SectorPreCommitFlush](#SectorPreCommitFlush)
+ * [SectorPreCommitPending](#SectorPreCommitPending)
+ * [SectorRemove](#SectorRemove)
+ * [SectorSetExpectedSealDuration](#SectorSetExpectedSealDuration)
+ * [SectorSetSealDelay](#SectorSetSealDelay)
+ * [SectorStartSealing](#SectorStartSealing)
+ * [SectorTerminate](#SectorTerminate)
+ * [SectorTerminateFlush](#SectorTerminateFlush)
+ * [SectorTerminatePending](#SectorTerminatePending)
+* [Sectors](#Sectors)
+ * [SectorsList](#SectorsList)
+ * [SectorsListInStates](#SectorsListInStates)
+ * [SectorsRefs](#SectorsRefs)
+ * [SectorsStatus](#SectorsStatus)
+ * [SectorsSummary](#SectorsSummary)
+ * [SectorsUnsealPiece](#SectorsUnsealPiece)
+ * [SectorsUpdate](#SectorsUpdate)
+* [Storage](#Storage)
+ * [StorageAddLocal](#StorageAddLocal)
+ * [StorageAttach](#StorageAttach)
+ * [StorageBestAlloc](#StorageBestAlloc)
+ * [StorageDeclareSector](#StorageDeclareSector)
+ * [StorageDropSector](#StorageDropSector)
+ * [StorageFindSector](#StorageFindSector)
+ * [StorageInfo](#StorageInfo)
+ * [StorageList](#StorageList)
+ * [StorageLocal](#StorageLocal)
+ * [StorageLock](#StorageLock)
+ * [StorageReportHealth](#StorageReportHealth)
+ * [StorageStat](#StorageStat)
+ * [StorageTryLock](#StorageTryLock)
+* [Worker](#Worker)
+ * [WorkerConnect](#WorkerConnect)
+ * [WorkerJobs](#WorkerJobs)
+ * [WorkerStats](#WorkerStats)
+##
+
+
+### Closing
+
+
+Perms: read
+
+Inputs: `null`
+
+Response: `{}`
+
+### Discover
+
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "info": {
+ "title": "Lotus RPC API",
+ "version": "1.2.1/generated=2020-11-22T08:22:42-06:00"
+ },
+ "methods": [],
+ "openrpc": "1.2.6"
+}
+```
+
+### Session
+
+
+Perms: read
+
+Inputs: `null`
+
+Response: `"07070707-0707-0707-0707-070707070707"`
+
+### Shutdown
+
+
+Perms: admin
+
+Inputs: `null`
+
+Response: `{}`
+
+### Version
+
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "Version": "string value",
+ "APIVersion": 131328,
+ "BlockDelay": 42
+}
+```
+
+## Actor
+
+
+### ActorAddress
+
+
+Perms: read
+
+Inputs: `null`
+
+Response: `"f01234"`
+
+### ActorAddressConfig
+
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "PreCommitControl": null,
+ "CommitControl": null,
+ "TerminateControl": null,
+ "DealPublishControl": null,
+ "DisableOwnerFallback": true,
+ "DisableWorkerFallback": true
+}
+```
+
+### ActorSectorSize
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ "f01234"
+]
+```
+
+Response: `34359738368`
+
+## Auth
+
+
+### AuthNew
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ null
+]
+```
+
+Response: `"Ynl0ZSBhcnJheQ=="`
+
+### AuthVerify
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ "string value"
+]
+```
+
+Response: `null`
+
+## Check
+
+
+### CheckProvable
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ 8,
+ null,
+ true
+]
+```
+
+Response:
+```json
+{
+ "123": "can't acquire read lock"
+}
+```
+
+## Compute
+
+
+### ComputeProof
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ null,
+ null
+]
+```
+
+Response: `null`
+
+## Create
+
+
+### CreateBackup
+CreateBackup creates node backup onder the specified file name. The
+method requires that the lotus-miner is running with the
+LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
+the path specified when calling CreateBackup is within the base path
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ "string value"
+]
+```
+
+Response: `{}`
+
+## Deals
+
+
+### DealsConsiderOfflineRetrievalDeals
+
+
+Perms: admin
+
+Inputs: `null`
+
+Response: `true`
+
+### DealsConsiderOfflineStorageDeals
+
+
+Perms: admin
+
+Inputs: `null`
+
+Response: `true`
+
+### DealsConsiderOnlineRetrievalDeals
+
+
+Perms: admin
+
+Inputs: `null`
+
+Response: `true`
+
+### DealsConsiderOnlineStorageDeals
+
+
+Perms: admin
+
+Inputs: `null`
+
+Response: `true`
+
+### DealsConsiderUnverifiedStorageDeals
+
+
+Perms: admin
+
+Inputs: `null`
+
+Response: `true`
+
+### DealsConsiderVerifiedStorageDeals
+
+
+Perms: admin
+
+Inputs: `null`
+
+Response: `true`
+
+### DealsImportData
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "string value"
+]
+```
+
+Response: `{}`
+
+### DealsList
+
+
+Perms: admin
+
+Inputs: `null`
+
+Response: `null`
+
+### DealsPieceCidBlocklist
+
+
+Perms: admin
+
+Inputs: `null`
+
+Response: `null`
+
+### DealsSetConsiderOfflineRetrievalDeals
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ true
+]
+```
+
+Response: `{}`
+
+### DealsSetConsiderOfflineStorageDeals
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ true
+]
+```
+
+Response: `{}`
+
+### DealsSetConsiderOnlineRetrievalDeals
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ true
+]
+```
+
+Response: `{}`
+
+### DealsSetConsiderOnlineStorageDeals
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ true
+]
+```
+
+Response: `{}`
+
+### DealsSetConsiderUnverifiedStorageDeals
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ true
+]
+```
+
+Response: `{}`
+
+### DealsSetConsiderVerifiedStorageDeals
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ true
+]
+```
+
+Response: `{}`
+
+### DealsSetPieceCidBlocklist
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ null
+]
+```
+
+Response: `{}`
+
+## I
+
+
+### ID
+
+
+Perms: read
+
+Inputs: `null`
+
+Response: `"12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"`
+
+## Log
+
+
+### LogList
+
+
+Perms: write
+
+Inputs: `null`
+
+Response: `null`
+
+### LogSetLevel
+
+
+Perms: write
+
+Inputs:
+```json
+[
+ "string value",
+ "string value"
+]
+```
+
+Response: `{}`
+
+## Market
+
+
+### MarketCancelDataTransfer
+MarketCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
+
+
+Perms: write
+
+Inputs:
+```json
+[
+ 3,
+ "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ true
+]
+```
+
+Response: `{}`
+
+### MarketDataTransferUpdates
+
+
+Perms: write
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "TransferID": 3,
+ "Status": 1,
+ "BaseCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "IsInitiator": true,
+ "IsSender": true,
+ "Voucher": "string value",
+ "Message": "string value",
+ "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Transferred": 42,
+ "Stages": {
+ "Stages": null
+ }
+}
+```
+
+### MarketGetAsk
+
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "Ask": {
+ "Price": "0",
+ "VerifiedPrice": "0",
+ "MinPieceSize": 1032,
+ "MaxPieceSize": 1032,
+ "Miner": "f01234",
+ "Timestamp": 10101,
+ "Expiry": 10101,
+ "SeqNo": 42
+ },
+ "Signature": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ }
+}
+```
+
+### MarketGetDealUpdates
+
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "Proposal": {
+ "PieceCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "PieceSize": 1032,
+ "VerifiedDeal": true,
+ "Client": "f01234",
+ "Provider": "f01234",
+ "Label": "string value",
+ "StartEpoch": 10101,
+ "EndEpoch": 10101,
+ "StoragePricePerEpoch": "0",
+ "ProviderCollateral": "0",
+ "ClientCollateral": "0"
+ },
+ "ClientSignature": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "ProposalCid": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "AddFundsCid": null,
+ "PublishCid": null,
+ "Miner": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Client": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "State": 42,
+ "PiecePath": ".lotusminer/fstmp123",
+ "MetadataPath": ".lotusminer/fstmp123",
+ "SlashEpoch": 10101,
+ "FastRetrieval": true,
+ "Message": "string value",
+ "StoreID": 12,
+ "FundsReserved": "0",
+ "Ref": {
+ "TransferType": "string value",
+ "Root": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "PieceCid": null,
+ "PieceSize": 1024,
+ "RawBlockSize": 42
+ },
+ "AvailableForRetrieval": true,
+ "DealID": 5432,
+ "CreationTime": "0001-01-01T00:00:00Z",
+ "TransferChannelId": {
+ "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "ID": 3
+ },
+ "SectorNumber": 9
+}
+```
+
+### MarketGetRetrievalAsk
+
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "PricePerByte": "0",
+ "UnsealPrice": "0",
+ "PaymentInterval": 42,
+ "PaymentIntervalIncrease": 42
+}
+```
+
+### MarketImportDealData
+
+
+Perms: write
+
+Inputs:
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "string value"
+]
+```
+
+Response: `{}`
+
+### MarketListDataTransfers
+
+
+Perms: write
+
+Inputs: `null`
+
+Response: `null`
+
+### MarketListDeals
+
+
+Perms: read
+
+Inputs: `null`
+
+Response: `null`
+
+### MarketListIncompleteDeals
+
+
+Perms: read
+
+Inputs: `null`
+
+Response: `null`
+
+### MarketListRetrievalDeals
+
+
+Perms: read
+
+Inputs: `null`
+
+Response: `null`
+
+### MarketPendingDeals
+
+
+Perms: write
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "Deals": null,
+ "PublishPeriodStart": "0001-01-01T00:00:00Z",
+ "PublishPeriod": 60000000000
+}
+```
+
+### MarketPublishPendingDeals
+
+
+Perms: admin
+
+Inputs: `null`
+
+Response: `{}`
+
+### MarketRestartDataTransfer
+MarketRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer
+
+
+Perms: write
+
+Inputs:
+```json
+[
+ 3,
+ "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ true
+]
+```
+
+Response: `{}`
+
+### MarketSetAsk
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ "0",
+ "0",
+ 10101,
+ 1032,
+ 1032
+]
+```
+
+Response: `{}`
+
+### MarketSetRetrievalAsk
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "PricePerByte": "0",
+ "UnsealPrice": "0",
+ "PaymentInterval": 42,
+ "PaymentIntervalIncrease": 42
+ }
+]
+```
+
+Response: `{}`
+
+## Mining
+
+
+### MiningBase
+
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "Cids": null,
+ "Blocks": null,
+ "Height": 0
+}
+```
+
+## Net
+
+
+### NetAddrsListen
+
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Addrs": []
+}
+```
+
+### NetAgentVersion
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"
+]
+```
+
+Response: `"string value"`
+
+### NetAutoNatStatus
+
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "Reachability": 1,
+ "PublicAddr": "string value"
+}
+```
+
+### NetBandwidthStats
+
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "TotalIn": 9,
+ "TotalOut": 9,
+ "RateIn": 12.3,
+ "RateOut": 12.3
+}
+```
+
+### NetBandwidthStatsByPeer
+
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "12D3KooWSXmXLJmBR1M7i9RW9GQPNUhZSzXKzxDHWtAgNuJAbyEJ": {
+ "TotalIn": 174000,
+ "TotalOut": 12500,
+ "RateIn": 100,
+ "RateOut": 50
+ }
+}
+```
+
+### NetBandwidthStatsByProtocol
+
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "/fil/hello/1.0.0": {
+ "TotalIn": 174000,
+ "TotalOut": 12500,
+ "RateIn": 100,
+ "RateOut": 50
+ }
+}
+```
+
+### NetBlockAdd
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "Peers": null,
+ "IPAddrs": null,
+ "IPSubnets": null
+ }
+]
+```
+
+Response: `{}`
+
+### NetBlockList
+
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "Peers": null,
+ "IPAddrs": null,
+ "IPSubnets": null
+}
+```
+
+### NetBlockRemove
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "Peers": null,
+ "IPAddrs": null,
+ "IPSubnets": null
+ }
+]
+```
+
+Response: `{}`
+
+### NetConnect
+
+
+Perms: write
+
+Inputs:
+```json
+[
+ {
+ "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Addrs": []
+ }
+]
+```
+
+Response: `{}`
+
+### NetConnectedness
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"
+]
+```
+
+Response: `1`
+
+### NetDisconnect
+
+
+Perms: write
+
+Inputs:
+```json
+[
+ "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"
+]
+```
+
+Response: `{}`
+
+### NetFindPeer
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"
+]
+```
+
+Response:
+```json
+{
+ "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Addrs": []
+}
+```
+
+### NetPeerInfo
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"
+]
+```
+
+Response:
+```json
+{
+ "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Agent": "string value",
+ "Addrs": null,
+ "Protocols": null,
+ "ConnMgrMeta": {
+ "FirstSeen": "0001-01-01T00:00:00Z",
+ "Value": 123,
+ "Tags": {
+ "name": 42
+ },
+ "Conns": {
+ "name": "2021-03-08T22:52:18Z"
+ }
+ }
+}
+```
+
+### NetPeers
+
+
+Perms: read
+
+Inputs: `null`
+
+Response: `null`
+
+### NetPubsubScores
+
+
+Perms: read
+
+Inputs: `null`
+
+Response: `null`
+
+## Pieces
+
+
+### PiecesGetCIDInfo
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response:
+```json
+{
+ "CID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "PieceBlockLocations": null
+}
+```
+
+### PiecesGetPieceInfo
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response:
+```json
+{
+ "PieceCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Deals": null
+}
+```
+
+### PiecesListCidInfos
+
+
+Perms: read
+
+Inputs: `null`
+
+Response: `null`
+
+### PiecesListPieces
+
+
+Perms: read
+
+Inputs: `null`
+
+Response: `null`
+
+## Pledge
+
+
+### PledgeSector
+Temp api for testing
+
+
+Perms: write
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "Miner": 1000,
+ "Number": 9
+}
+```
+
+## Return
+
+
+### ReturnAddPiece
+storiface.WorkerReturn
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "Sector": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ID": "07070707-0707-0707-0707-070707070707"
+ },
+ {
+ "Size": 1032,
+ "PieceCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+ },
+ {
+ "Code": 0,
+ "Message": "string value"
+ }
+]
+```
+
+Response: `{}`
+
+### ReturnFetch
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "Sector": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ID": "07070707-0707-0707-0707-070707070707"
+ },
+ {
+ "Code": 0,
+ "Message": "string value"
+ }
+]
+```
+
+Response: `{}`
+
+### ReturnFinalizeSector
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "Sector": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ID": "07070707-0707-0707-0707-070707070707"
+ },
+ {
+ "Code": 0,
+ "Message": "string value"
+ }
+]
+```
+
+Response: `{}`
+
+### ReturnMoveStorage
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "Sector": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ID": "07070707-0707-0707-0707-070707070707"
+ },
+ {
+ "Code": 0,
+ "Message": "string value"
+ }
+]
+```
+
+Response: `{}`
+
+### ReturnReadPiece
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "Sector": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ID": "07070707-0707-0707-0707-070707070707"
+ },
+ true,
+ {
+ "Code": 0,
+ "Message": "string value"
+ }
+]
+```
+
+Response: `{}`
+
+### ReturnReleaseUnsealed
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "Sector": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ID": "07070707-0707-0707-0707-070707070707"
+ },
+ {
+ "Code": 0,
+ "Message": "string value"
+ }
+]
+```
+
+Response: `{}`
+
+### ReturnSealCommit1
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "Sector": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ID": "07070707-0707-0707-0707-070707070707"
+ },
+ null,
+ {
+ "Code": 0,
+ "Message": "string value"
+ }
+]
+```
+
+Response: `{}`
+
+### ReturnSealCommit2
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "Sector": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ID": "07070707-0707-0707-0707-070707070707"
+ },
+ null,
+ {
+ "Code": 0,
+ "Message": "string value"
+ }
+]
+```
+
+Response: `{}`
+
+### ReturnSealPreCommit1
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "Sector": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ID": "07070707-0707-0707-0707-070707070707"
+ },
+ null,
+ {
+ "Code": 0,
+ "Message": "string value"
+ }
+]
+```
+
+Response: `{}`
+
+### ReturnSealPreCommit2
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "Sector": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ID": "07070707-0707-0707-0707-070707070707"
+ },
+ {
+ "Unsealed": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Sealed": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+ },
+ {
+ "Code": 0,
+ "Message": "string value"
+ }
+]
+```
+
+Response: `{}`
+
+### ReturnUnsealPiece
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "Sector": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ID": "07070707-0707-0707-0707-070707070707"
+ },
+ {
+ "Code": 0,
+ "Message": "string value"
+ }
+]
+```
+
+Response: `{}`
+
+## Sealing
+
+
+### SealingAbort
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "Sector": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ID": "07070707-0707-0707-0707-070707070707"
+ }
+]
+```
+
+Response: `{}`
+
+### SealingSchedDiag
+SealingSchedDiag dumps internal sealing scheduler state
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ true
+]
+```
+
+Response: `{}`
+
+## Sector
+
+
+### SectorAddPieceToAny
+Add piece to an open sector. If no sectors with enough space are open,
+either a new sector will be created, or this call will block until more
+sectors can be created.
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ 1024,
+ {},
+ {
+ "PublishCid": null,
+ "DealID": 5432,
+ "DealProposal": {
+ "PieceCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "PieceSize": 1032,
+ "VerifiedDeal": true,
+ "Client": "f01234",
+ "Provider": "f01234",
+ "Label": "string value",
+ "StartEpoch": 10101,
+ "EndEpoch": 10101,
+ "StoragePricePerEpoch": "0",
+ "ProviderCollateral": "0",
+ "ClientCollateral": "0"
+ },
+ "DealSchedule": {
+ "StartEpoch": 10101,
+ "EndEpoch": 10101
+ },
+ "KeepUnsealed": true
+ }
+]
+```
+
+Response:
+```json
+{
+ "Sector": 9,
+ "Offset": 1032
+}
+```
+
+### SectorCommitFlush
+SectorCommitFlush immediately sends a Commit message with sectors aggregated for Commit.
+Returns null if message wasn't sent
+
+
+Perms: admin
+
+Inputs: `null`
+
+Response: `null`
+
+### SectorCommitPending
+SectorCommitPending returns a list of pending Commit sectors to be sent in the next aggregate message
+
+
+Perms: admin
+
+Inputs: `null`
+
+Response: `null`
+
+### SectorGetExpectedSealDuration
+SectorGetExpectedSealDuration gets the expected time for a sector to seal
+
+
+Perms: read
+
+Inputs: `null`
+
+Response: `60000000000`
+
+### SectorGetSealDelay
+SectorGetSealDelay gets the time that a newly-created sector
+waits for more deals before it starts sealing
+
+
+Perms: read
+
+Inputs: `null`
+
+Response: `60000000000`
+
+### SectorMarkForUpgrade
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ 9
+]
+```
+
+Response: `{}`
+
+### SectorPreCommitFlush
+SectorPreCommitFlush immediately sends a PreCommit message with sectors batched for PreCommit.
+Returns null if message wasn't sent
+
+
+Perms: admin
+
+Inputs: `null`
+
+Response: `null`
+
+### SectorPreCommitPending
+SectorPreCommitPending returns a list of pending PreCommit sectors to be sent in the next batch message
+
+
+Perms: admin
+
+Inputs: `null`
+
+Response: `null`
+
+### SectorRemove
+SectorRemove removes the sector from storage. It doesn't terminate it on-chain, which can
+be done with SectorTerminate. Removing and not terminating live sectors will cause additional penalties.
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ 9
+]
+```
+
+Response: `{}`
+
+### SectorSetExpectedSealDuration
+SectorSetExpectedSealDuration sets the expected time for a sector to seal
+
+
+Perms: write
+
+Inputs:
+```json
+[
+ 60000000000
+]
+```
+
+Response: `{}`
+
+### SectorSetSealDelay
+SectorSetSealDelay sets the time that a newly-created sector
+waits for more deals before it starts sealing
+
+
+Perms: write
+
+Inputs:
+```json
+[
+ 60000000000
+]
+```
+
+Response: `{}`
+
+### SectorStartSealing
+SectorStartSealing can be called on sectors in Empty or WaitDeals states
+to trigger sealing early
+
+
+Perms: write
+
+Inputs:
+```json
+[
+ 9
+]
+```
+
+Response: `{}`
+
+### SectorTerminate
+SectorTerminate terminates the sector on-chain (adding it to a termination batch first), then
+automatically removes it from storage
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ 9
+]
+```
+
+Response: `{}`
+
+### SectorTerminateFlush
+SectorTerminateFlush immediately sends a terminate message with sectors batched for termination.
+Returns null if message wasn't sent
+
+
+Perms: admin
+
+Inputs: `null`
+
+Response: `null`
+
+### SectorTerminatePending
+SectorTerminatePending returns a list of pending sector terminations to be sent in the next batch message
+
+
+Perms: admin
+
+Inputs: `null`
+
+Response: `null`
+
+## Sectors
+
+
+### SectorsList
+List all staged sectors
+
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+```json
+[
+ 123,
+ 124
+]
+```
+
+### SectorsListInStates
+List sectors in particular states
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ null
+]
+```
+
+Response:
+```json
+[
+ 123,
+ 124
+]
+```
+
+### SectorsRefs
+
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "98000": [
+ {
+ "SectorID": 100,
+ "Offset": 10485760,
+ "Size": 1048576
+ }
+ ]
+}
+```
+
+### SectorsStatus
+Get the status of a given sector by ID
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ 9,
+ true
+]
+```
+
+Response:
+```json
+{
+ "SectorID": 9,
+ "State": "Proving",
+ "CommD": null,
+ "CommR": null,
+ "Proof": "Ynl0ZSBhcnJheQ==",
+ "Deals": null,
+ "Ticket": {
+ "Value": null,
+ "Epoch": 10101
+ },
+ "Seed": {
+ "Value": null,
+ "Epoch": 10101
+ },
+ "PreCommitMsg": null,
+ "CommitMsg": null,
+ "Retries": 42,
+ "ToUpgrade": true,
+ "LastErr": "string value",
+ "Log": null,
+ "SealProof": 8,
+ "Activation": 10101,
+ "Expiration": 10101,
+ "DealWeight": "0",
+ "VerifiedDealWeight": "0",
+ "InitialPledge": "0",
+ "OnTime": 10101,
+ "Early": 10101
+}
+```
+
+### SectorsSummary
+Get summary info of sectors
+
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "Proving": 120
+}
+```
+
+### SectorsUnsealPiece
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "ID": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ProofType": 8
+ },
+ 1040384,
+ 1024,
+ null,
+ null
+]
+```
+
+Response: `{}`
+
+### SectorsUpdate
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ 9,
+ "Proving"
+]
+```
+
+Response: `{}`
+
+## Storage
+
+
+### StorageAddLocal
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ "string value"
+]
+```
+
+Response: `{}`
+
+### StorageAttach
+stores.SectorIndex
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "ID": "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8",
+ "URLs": null,
+ "Weight": 42,
+ "MaxStorage": 42,
+ "CanSeal": true,
+ "CanStore": true
+ },
+ {
+ "Capacity": 9,
+ "Available": 9,
+ "FSAvailable": 9,
+ "Reserved": 9,
+ "Max": 9,
+ "Used": 9
+ }
+]
+```
+
+Response: `{}`
+
+### StorageBestAlloc
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ 1,
+ 34359738368,
+ "sealing"
+]
+```
+
+Response: `null`
+
+### StorageDeclareSector
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8",
+ {
+ "Miner": 1000,
+ "Number": 9
+ },
+ 1,
+ true
+]
+```
+
+Response: `{}`
+
+### StorageDropSector
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8",
+ {
+ "Miner": 1000,
+ "Number": 9
+ },
+ 1
+]
+```
+
+Response: `{}`
+
+### StorageFindSector
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "Miner": 1000,
+ "Number": 9
+ },
+ 1,
+ 34359738368,
+ true
+]
+```
+
+Response: `null`
+
+### StorageInfo
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8"
+]
+```
+
+Response:
+```json
+{
+ "ID": "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8",
+ "URLs": null,
+ "Weight": 42,
+ "MaxStorage": 42,
+ "CanSeal": true,
+ "CanStore": true
+}
+```
+
+### StorageList
+
+
+Perms: admin
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": [
+ {
+ "Miner": 1000,
+ "Number": 100,
+ "SectorFileType": 2
+ }
+ ]
+}
+```
+
+### StorageLocal
+
+
+Perms: admin
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": "/data/path"
+}
+```
+
+### StorageLock
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "Miner": 1000,
+ "Number": 9
+ },
+ 1,
+ 1
+]
+```
+
+Response: `{}`
+
+### StorageReportHealth
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8",
+ {
+ "Stat": {
+ "Capacity": 9,
+ "Available": 9,
+ "FSAvailable": 9,
+ "Reserved": 9,
+ "Max": 9,
+ "Used": 9
+ },
+ "Err": "string value"
+ }
+]
+```
+
+Response: `{}`
+
+### StorageStat
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8"
+]
+```
+
+Response:
+```json
+{
+ "Capacity": 9,
+ "Available": 9,
+ "FSAvailable": 9,
+ "Reserved": 9,
+ "Max": 9,
+ "Used": 9
+}
+```
+
+### StorageTryLock
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "Miner": 1000,
+ "Number": 9
+ },
+ 1,
+ 1
+]
+```
+
+Response: `true`
+
+## Worker
+
+
+### WorkerConnect
+WorkerConnect tells the node to connect to workers RPC
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ "string value"
+]
+```
+
+Response: `{}`
+
+### WorkerJobs
+
+
+Perms: admin
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "ef8d99a2-6865-4189-8ffa-9fef0f806eee": [
+ {
+ "ID": {
+ "Sector": {
+ "Miner": 1000,
+ "Number": 100
+ },
+ "ID": "76081ba0-61bd-45a5-bc08-af05f1c26e5d"
+ },
+ "Sector": {
+ "Miner": 1000,
+ "Number": 100
+ },
+ "Task": "seal/v0/precommit/2",
+ "RunWait": 0,
+ "Start": "2020-11-12T09:22:07Z",
+ "Hostname": "host"
+ }
+ ]
+}
+```
+
+### WorkerStats
+
+
+Perms: admin
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "ef8d99a2-6865-4189-8ffa-9fef0f806eee": {
+ "Info": {
+ "Hostname": "host",
+ "IgnoreResources": false,
+ "Resources": {
+ "MemPhysical": 274877906944,
+ "MemSwap": 128849018880,
+ "MemReserved": 2147483648,
+ "CPUs": 64,
+ "GPUs": [
+ "aGPU 1337"
+ ]
+ }
+ },
+ "Enabled": true,
+ "MemUsedMin": 0,
+ "MemUsedMax": 0,
+ "GpuUsed": false,
+ "CpuUse": 0
+ }
+}
+```
+
diff --git a/documentation/en/api-v0-methods-worker.md b/documentation/en/api-v0-methods-worker.md
new file mode 100644
index 00000000000..c620113f489
--- /dev/null
+++ b/documentation/en/api-v0-methods-worker.md
@@ -0,0 +1,564 @@
+# Groups
+* [](#)
+ * [Enabled](#Enabled)
+ * [Fetch](#Fetch)
+ * [Info](#Info)
+ * [Paths](#Paths)
+ * [Remove](#Remove)
+ * [Session](#Session)
+ * [Version](#Version)
+* [Add](#Add)
+ * [AddPiece](#AddPiece)
+* [Finalize](#Finalize)
+ * [FinalizeSector](#FinalizeSector)
+* [Move](#Move)
+ * [MoveStorage](#MoveStorage)
+* [Process](#Process)
+ * [ProcessSession](#ProcessSession)
+* [Release](#Release)
+ * [ReleaseUnsealed](#ReleaseUnsealed)
+* [Seal](#Seal)
+ * [SealCommit1](#SealCommit1)
+ * [SealCommit2](#SealCommit2)
+ * [SealPreCommit1](#SealPreCommit1)
+ * [SealPreCommit2](#SealPreCommit2)
+* [Set](#Set)
+ * [SetEnabled](#SetEnabled)
+* [Storage](#Storage)
+ * [StorageAddLocal](#StorageAddLocal)
+* [Task](#Task)
+ * [TaskDisable](#TaskDisable)
+ * [TaskEnable](#TaskEnable)
+ * [TaskTypes](#TaskTypes)
+* [Unseal](#Unseal)
+ * [UnsealPiece](#UnsealPiece)
+* [Wait](#Wait)
+ * [WaitQuiet](#WaitQuiet)
+##
+
+
+### Enabled
+
+
+Perms: admin
+
+Inputs: `null`
+
+Response: `true`
+
+### Fetch
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "ID": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ProofType": 8
+ },
+ 1,
+ "sealing",
+ "move"
+]
+```
+
+Response:
+```json
+{
+ "Sector": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ID": "07070707-0707-0707-0707-070707070707"
+}
+```
+
+### Info
+
+
+Perms: admin
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "Hostname": "string value",
+ "IgnoreResources": true,
+ "Resources": {
+ "MemPhysical": 42,
+ "MemSwap": 42,
+ "MemReserved": 42,
+ "CPUs": 42,
+ "GPUs": null
+ }
+}
+```
+
+### Paths
+
+
+Perms: admin
+
+Inputs: `null`
+
+Response: `null`
+
+### Remove
+Storage / Other
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "Miner": 1000,
+ "Number": 9
+ }
+]
+```
+
+Response: `{}`
+
+### Session
+Like ProcessSession, but returns an error when worker is disabled
+
+
+Perms: admin
+
+Inputs: `null`
+
+Response: `"07070707-0707-0707-0707-070707070707"`
+
+### Version
+
+
+Perms: admin
+
+Inputs: `null`
+
+Response: `131328`
+
+## Add
+
+
+### AddPiece
+storiface.WorkerCalls
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "ID": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ProofType": 8
+ },
+ null,
+ 1024,
+ {}
+]
+```
+
+Response:
+```json
+{
+ "Sector": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ID": "07070707-0707-0707-0707-070707070707"
+}
+```
+
+## Finalize
+
+
+### FinalizeSector
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "ID": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ProofType": 8
+ },
+ null
+]
+```
+
+Response:
+```json
+{
+ "Sector": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ID": "07070707-0707-0707-0707-070707070707"
+}
+```
+
+## Move
+
+
+### MoveStorage
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "ID": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ProofType": 8
+ },
+ 1
+]
+```
+
+Response:
+```json
+{
+ "Sector": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ID": "07070707-0707-0707-0707-070707070707"
+}
+```
+
+## Process
+
+
+### ProcessSession
+returns a random UUID of worker session, generated randomly when worker
+process starts
+
+
+Perms: admin
+
+Inputs: `null`
+
+Response: `"07070707-0707-0707-0707-070707070707"`
+
+## Release
+
+
+### ReleaseUnsealed
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "ID": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ProofType": 8
+ },
+ null
+]
+```
+
+Response:
+```json
+{
+ "Sector": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ID": "07070707-0707-0707-0707-070707070707"
+}
+```
+
+## Seal
+
+
+### SealCommit1
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "ID": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ProofType": 8
+ },
+ null,
+ null,
+ null,
+ {
+ "Unsealed": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Sealed": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+ }
+]
+```
+
+Response:
+```json
+{
+ "Sector": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ID": "07070707-0707-0707-0707-070707070707"
+}
+```
+
+### SealCommit2
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "ID": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ProofType": 8
+ },
+ null
+]
+```
+
+Response:
+```json
+{
+ "Sector": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ID": "07070707-0707-0707-0707-070707070707"
+}
+```
+
+### SealPreCommit1
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "ID": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ProofType": 8
+ },
+ null,
+ null
+]
+```
+
+Response:
+```json
+{
+ "Sector": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ID": "07070707-0707-0707-0707-070707070707"
+}
+```
+
+### SealPreCommit2
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "ID": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ProofType": 8
+ },
+ null
+]
+```
+
+Response:
+```json
+{
+ "Sector": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ID": "07070707-0707-0707-0707-070707070707"
+}
+```
+
+## Set
+
+
+### SetEnabled
+SetEnabled marks the worker as enabled/disabled. Not that this setting
+may take a few seconds to propagate to task scheduler
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ true
+]
+```
+
+Response: `{}`
+
+## Storage
+
+
+### StorageAddLocal
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ "string value"
+]
+```
+
+Response: `{}`
+
+## Task
+
+
+### TaskDisable
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ "seal/v0/commit/2"
+]
+```
+
+Response: `{}`
+
+### TaskEnable
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ "seal/v0/commit/2"
+]
+```
+
+Response: `{}`
+
+### TaskTypes
+TaskType -> Weight
+
+
+Perms: admin
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "seal/v0/precommit/2": {}
+}
+```
+
+## Unseal
+
+
+### UnsealPiece
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "ID": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ProofType": 8
+ },
+ 1040384,
+ 1024,
+ null,
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response:
+```json
+{
+ "Sector": {
+ "Miner": 1000,
+ "Number": 9
+ },
+ "ID": "07070707-0707-0707-0707-070707070707"
+}
+```
+
+## Wait
+
+
+### WaitQuiet
+WaitQuiet blocks until there are no tasks running
+
+
+Perms: admin
+
+Inputs: `null`
+
+Response: `{}`
+
diff --git a/documentation/en/api-methods.md b/documentation/en/api-v0-methods.md
similarity index 84%
rename from documentation/en/api-methods.md
rename to documentation/en/api-v0-methods.md
index 4e18d85c919..4466cde8c88 100644
--- a/documentation/en/api-methods.md
+++ b/documentation/en/api-v0-methods.md
@@ -1,6 +1,7 @@
# Groups
* [](#)
* [Closing](#Closing)
+ * [Discover](#Discover)
* [Session](#Session)
* [Shutdown](#Shutdown)
* [Version](#Version)
@@ -16,6 +17,7 @@
* [ChainGetBlockMessages](#ChainGetBlockMessages)
* [ChainGetGenesis](#ChainGetGenesis)
* [ChainGetMessage](#ChainGetMessage)
+ * [ChainGetMessagesInTipset](#ChainGetMessagesInTipset)
* [ChainGetNode](#ChainGetNode)
* [ChainGetParentMessages](#ChainGetParentMessages)
* [ChainGetParentReceipts](#ChainGetParentReceipts)
@@ -34,6 +36,7 @@
* [Client](#Client)
* [ClientCalcCommP](#ClientCalcCommP)
* [ClientCancelDataTransfer](#ClientCancelDataTransfer)
+ * [ClientCancelRetrievalDeal](#ClientCancelRetrievalDeal)
* [ClientDataTransferUpdates](#ClientDataTransferUpdates)
* [ClientDealPieceCID](#ClientDealPieceCID)
* [ClientDealSize](#ClientDealSize)
@@ -42,11 +45,13 @@
* [ClientGetDealInfo](#ClientGetDealInfo)
* [ClientGetDealStatus](#ClientGetDealStatus)
* [ClientGetDealUpdates](#ClientGetDealUpdates)
+ * [ClientGetRetrievalUpdates](#ClientGetRetrievalUpdates)
* [ClientHasLocal](#ClientHasLocal)
* [ClientImport](#ClientImport)
* [ClientListDataTransfers](#ClientListDataTransfers)
* [ClientListDeals](#ClientListDeals)
* [ClientListImports](#ClientListImports)
+ * [ClientListRetrievals](#ClientListRetrievals)
* [ClientMinerQueryOffer](#ClientMinerQueryOffer)
* [ClientQueryAsk](#ClientQueryAsk)
* [ClientRemoveImport](#ClientRemoveImport)
@@ -55,6 +60,7 @@
* [ClientRetrieveTryRestartInsufficientFunds](#ClientRetrieveTryRestartInsufficientFunds)
* [ClientRetrieveWithEvents](#ClientRetrieveWithEvents)
* [ClientStartDeal](#ClientStartDeal)
+ * [ClientStatelessDeal](#ClientStatelessDeal)
* [Create](#Create)
* [CreateBackup](#CreateBackup)
* [Gas](#Gas)
@@ -68,7 +74,11 @@
* [LogList](#LogList)
* [LogSetLevel](#LogSetLevel)
* [Market](#Market)
- * [MarketEnsureAvailable](#MarketEnsureAvailable)
+ * [MarketAddBalance](#MarketAddBalance)
+ * [MarketGetReserved](#MarketGetReserved)
+ * [MarketReleaseFunds](#MarketReleaseFunds)
+ * [MarketReserveFunds](#MarketReserveFunds)
+ * [MarketWithdraw](#MarketWithdraw)
* [Miner](#Miner)
* [MinerCreateBlock](#MinerCreateBlock)
* [MinerGetBaseInfo](#MinerGetBaseInfo)
@@ -95,6 +105,7 @@
* [MsigCancel](#MsigCancel)
* [MsigCreate](#MsigCreate)
* [MsigGetAvailableBalance](#MsigGetAvailableBalance)
+ * [MsigGetPending](#MsigGetPending)
* [MsigGetVested](#MsigGetVested)
* [MsigGetVestingSchedule](#MsigGetVestingSchedule)
* [MsigPropose](#MsigPropose)
@@ -109,10 +120,14 @@
* [NetBandwidthStats](#NetBandwidthStats)
* [NetBandwidthStatsByPeer](#NetBandwidthStatsByPeer)
* [NetBandwidthStatsByProtocol](#NetBandwidthStatsByProtocol)
+ * [NetBlockAdd](#NetBlockAdd)
+ * [NetBlockList](#NetBlockList)
+ * [NetBlockRemove](#NetBlockRemove)
* [NetConnect](#NetConnect)
* [NetConnectedness](#NetConnectedness)
* [NetDisconnect](#NetDisconnect)
* [NetFindPeer](#NetFindPeer)
+ * [NetPeerInfo](#NetPeerInfo)
* [NetPeers](#NetPeers)
* [NetPubsubScores](#NetPubsubScores)
* [Paych](#Paych)
@@ -140,6 +155,7 @@
* [StateCirculatingSupply](#StateCirculatingSupply)
* [StateCompute](#StateCompute)
* [StateDealProviderCollateralBounds](#StateDealProviderCollateralBounds)
+ * [StateDecodeParams](#StateDecodeParams)
* [StateGetActor](#StateGetActor)
* [StateGetReceipt](#StateGetReceipt)
* [StateListActors](#StateListActors)
@@ -169,6 +185,7 @@
* [StateReadState](#StateReadState)
* [StateReplay](#StateReplay)
* [StateSearchMsg](#StateSearchMsg)
+ * [StateSearchMsgLimited](#StateSearchMsgLimited)
* [StateSectorExpiration](#StateSectorExpiration)
* [StateSectorGetInfo](#StateSectorGetInfo)
* [StateSectorPartition](#StateSectorPartition)
@@ -215,6 +232,25 @@ Inputs: `null`
Response: `{}`
+### Discover
+
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "info": {
+ "title": "Lotus RPC API",
+ "version": "1.2.1/generated=2020-11-22T08:22:42-06:00"
+ },
+ "methods": [],
+ "openrpc": "1.2.6"
+}
+```
+
### Session
@@ -244,7 +280,7 @@ Response:
```json
{
"Version": "string value",
- "APIVersion": 4352,
+ "APIVersion": 131328,
"BlockDelay": 42
}
```
@@ -414,6 +450,17 @@ Response:
### ChainGetBlockMessages
ChainGetBlockMessages returns messages stored in the specified block.
+Note: If there are multiple blocks in a tipset, it's likely that some
+messages will be duplicated. It's also possible for blocks in a tipset to have
+different messages from the same sender at the same nonce. When that happens,
+only the first message (in a block with lowest ticket) will be considered
+for execution
+
+NOTE: THIS METHOD SHOULD ONLY BE USED FOR GETTING MESSAGES IN A SPECIFIC BLOCK
+
+DO NOT USE THIS METHOD TO GET MESSAGES INCLUDED IN A TIPSET
+Use ChainGetParentMessages, which will perform correct message deduplication
+
Perms: read
@@ -487,8 +534,30 @@ Response:
}
```
+### ChainGetMessagesInTipset
+ChainGetMessagesInTipset returns message stores in current tipset
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `null`
+
### ChainGetNode
-There are not yet any comments for this method.
+
Perms: read
@@ -529,7 +598,8 @@ Response: `null`
### ChainGetParentReceipts
ChainGetParentReceipts returns receipts for messages in parent tipset of
-the specified block.
+the specified block. The receipts in the list returned is one-to-one with the
+messages returned by a call to ChainGetParentMessages with the same blockCid.
Perms: read
@@ -842,7 +912,7 @@ retrieval markets as a client
ClientCalcCommP calculates the CommP for a specified file
-Perms: read
+Perms: write
Inputs:
```json
@@ -878,8 +948,23 @@ Inputs:
Response: `{}`
+### ClientCancelRetrievalDeal
+ClientCancelRetrievalDeal cancels an ongoing retrieval deal based on DealID
+
+
+Perms: write
+
+Inputs:
+```json
+[
+ 5
+]
+```
+
+Response: `{}`
+
### ClientDataTransferUpdates
-There are not yet any comments for this method.
+
Perms: write
@@ -898,7 +983,10 @@ Response:
"Voucher": "string value",
"Message": "string value",
"OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
- "Transferred": 42
+ "Transferred": 42,
+ "Stages": {
+ "Stages": null
+ }
}
```
@@ -1011,6 +1099,9 @@ Response:
},
"State": 42,
"Message": "string value",
+ "DealStages": {
+ "Stages": null
+ },
"Provider": "f01234",
"DataRef": {
"TransferType": "string value",
@@ -1018,7 +1109,8 @@ Response:
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
},
"PieceCid": null,
- "PieceSize": 1024
+ "PieceSize": 1024,
+ "RawBlockSize": 42
},
"PieceCID": {
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
@@ -1028,7 +1120,28 @@ Response:
"Duration": 42,
"DealID": 5432,
"CreationTime": "0001-01-01T00:00:00Z",
- "Verified": true
+ "Verified": true,
+ "TransferChannelID": {
+ "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "ID": 3
+ },
+ "DataTransfer": {
+ "TransferID": 3,
+ "Status": 1,
+ "BaseCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "IsInitiator": true,
+ "IsSender": true,
+ "Voucher": "string value",
+ "Message": "string value",
+ "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Transferred": 42,
+ "Stages": {
+ "Stages": null
+ }
+ }
}
```
@@ -1051,7 +1164,7 @@ Response: `"string value"`
ClientGetDealUpdates returns the status of updated deals
-Perms: read
+Perms: write
Inputs: `null`
@@ -1063,6 +1176,9 @@ Response:
},
"State": 42,
"Message": "string value",
+ "DealStages": {
+ "Stages": null
+ },
"Provider": "f01234",
"DataRef": {
"TransferType": "string value",
@@ -1070,7 +1186,8 @@ Response:
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
},
"PieceCid": null,
- "PieceSize": 1024
+ "PieceSize": 1024,
+ "RawBlockSize": 42
},
"PieceCID": {
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
@@ -1080,7 +1197,76 @@ Response:
"Duration": 42,
"DealID": 5432,
"CreationTime": "0001-01-01T00:00:00Z",
- "Verified": true
+ "Verified": true,
+ "TransferChannelID": {
+ "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "ID": 3
+ },
+ "DataTransfer": {
+ "TransferID": 3,
+ "Status": 1,
+ "BaseCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "IsInitiator": true,
+ "IsSender": true,
+ "Voucher": "string value",
+ "Message": "string value",
+ "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Transferred": 42,
+ "Stages": {
+ "Stages": null
+ }
+ }
+}
+```
+
+### ClientGetRetrievalUpdates
+ClientGetRetrievalUpdates returns status of updated retrieval deals
+
+
+Perms: write
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "PayloadCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "ID": 5,
+ "PieceCID": null,
+ "PricePerByte": "0",
+ "UnsealPrice": "0",
+ "Status": 0,
+ "Message": "string value",
+ "Provider": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "BytesReceived": 42,
+ "BytesPaidFor": 42,
+ "TotalPaid": "0",
+ "TransferChannelID": {
+ "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "ID": 3
+ },
+ "DataTransfer": {
+ "TransferID": 3,
+ "Status": 1,
+ "BaseCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "IsInitiator": true,
+ "IsSender": true,
+ "Voucher": "string value",
+ "Message": "string value",
+ "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Transferred": 42,
+ "Stages": {
+ "Stages": null
+ }
+ }
}
```
@@ -1151,6 +1337,17 @@ Response: `null`
ClientListImports lists imported files and their root CIDs
+Perms: write
+
+Inputs: `null`
+
+Response: `null`
+
+### ClientListRetrievals
+ClientQueryAsk returns a signed StorageAsk from the specified miner.
+ClientListRetrievals returns information about retrievals made by the local client
+
+
Perms: write
Inputs: `null`
@@ -1197,7 +1394,6 @@ Response:
```
### ClientQueryAsk
-ClientQueryAsk returns a signed StorageAsk from the specified miner.
Perms: read
@@ -1271,6 +1467,7 @@ Inputs:
},
"Piece": null,
"Size": 42,
+ "LocalStore": 12,
"Total": "0",
"UnsealPrice": "0",
"PaymentInterval": 42,
@@ -1324,6 +1521,7 @@ Inputs:
},
"Piece": null,
"Size": 42,
+ "LocalStore": 12,
"Total": "0",
"UnsealPrice": "0",
"PaymentInterval": 42,
@@ -1370,7 +1568,41 @@ Inputs:
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
},
"PieceCid": null,
- "PieceSize": 1024
+ "PieceSize": 1024,
+ "RawBlockSize": 42
+ },
+ "Wallet": "f01234",
+ "Miner": "f01234",
+ "EpochPrice": "0",
+ "MinBlocksDuration": 42,
+ "ProviderCollateral": "0",
+ "DealStartEpoch": 10101,
+ "FastRetrieval": true,
+ "VerifiedDeal": true
+ }
+]
+```
+
+Response: `null`
+
+### ClientStatelessDeal
+ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking.
+
+
+Perms: write
+
+Inputs:
+```json
+[
+ {
+ "Data": {
+ "TransferType": "string value",
+ "Root": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "PieceCid": null,
+ "PieceSize": 1024,
+ "RawBlockSize": 42
},
"Wallet": "f01234",
"Miner": "f01234",
@@ -1611,8 +1843,83 @@ Response: `{}`
## Market
-### MarketEnsureAvailable
-MarketFreeBalance
+### MarketAddBalance
+MarketAddBalance adds funds to the market actor
+
+
+Perms: sign
+
+Inputs:
+```json
+[
+ "f01234",
+ "f01234",
+ "0"
+]
+```
+
+Response:
+```json
+{
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+}
+```
+
+### MarketGetReserved
+MarketGetReserved gets the amount of funds that are currently reserved for the address
+
+
+Perms: sign
+
+Inputs:
+```json
+[
+ "f01234"
+]
+```
+
+Response: `"0"`
+
+### MarketReleaseFunds
+MarketReleaseFunds releases funds reserved by MarketReserveFunds
+
+
+Perms: sign
+
+Inputs:
+```json
+[
+ "f01234",
+ "0"
+]
+```
+
+Response: `{}`
+
+### MarketReserveFunds
+MarketReserveFunds reserves funds for a deal
+
+
+Perms: sign
+
+Inputs:
+```json
+[
+ "f01234",
+ "f01234",
+ "0"
+]
+```
+
+Response:
+```json
+{
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+}
+```
+
+### MarketWithdraw
+MarketWithdraw withdraws unlocked funds from the market actor
Perms: sign
@@ -1637,7 +1944,7 @@ Response:
### MinerCreateBlock
-There are not yet any comments for this method.
+
Perms: write
@@ -2053,7 +2360,7 @@ Response: `null`
MpoolSetConfig sets the mpool config to (a copy of) the supplied config
-Perms: write
+Perms: admin
Inputs:
```json
@@ -2072,7 +2379,7 @@ Inputs:
Response: `{}`
### MpoolSub
-There are not yet any comments for this method.
+
Perms: read
@@ -2221,7 +2528,7 @@ using both transaction ID and a hash of the parameters used in the
proposal. This method of approval can be used to ensure you only approve
exactly the transaction you think you are.
It takes the following params: , , , , ,
-, ,
+, ,
Perms: sign
@@ -2325,6 +2632,31 @@ Inputs:
Response: `"0"`
+### MsigGetPending
+MsigGetPending returns pending transactions for the given multisig
+wallet. Once pending transactions are fully approved, they will no longer
+appear here.
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ "f01234",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `null`
+
### MsigGetVested
MsigGetVested returns the amount of FIL that vested in a multisig in a certain period.
It takes the following params: , ,
@@ -2532,8 +2864,8 @@ Inputs: `null`
Response:
```json
{
- "Addrs": null,
- "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"
+ "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Addrs": []
}
```
@@ -2621,6 +2953,58 @@ Response:
}
```
+### NetBlockAdd
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "Peers": null,
+ "IPAddrs": null,
+ "IPSubnets": null
+ }
+]
+```
+
+Response: `{}`
+
+### NetBlockList
+
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "Peers": null,
+ "IPAddrs": null,
+ "IPSubnets": null
+}
+```
+
+### NetBlockRemove
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "Peers": null,
+ "IPAddrs": null,
+ "IPSubnets": null
+ }
+]
+```
+
+Response: `{}`
+
### NetConnect
@@ -2630,8 +3014,8 @@ Inputs:
```json
[
{
- "Addrs": null,
- "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"
+ "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Addrs": []
}
]
```
@@ -2681,8 +3065,40 @@ Inputs:
Response:
```json
{
+ "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Addrs": []
+}
+```
+
+### NetPeerInfo
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"
+]
+```
+
+Response:
+```json
+{
+ "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Agent": "string value",
"Addrs": null,
- "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"
+ "Protocols": null,
+ "ConnMgrMeta": {
+ "FirstSeen": "0001-01-01T00:00:00Z",
+ "Value": 123,
+ "Tags": {
+ "name": 42
+ },
+ "Conns": {
+ "name": "2021-03-08T22:52:18Z"
+ }
+ }
}
```
@@ -2709,7 +3125,7 @@ The Paych methods are for interacting with and managing payment channels
### PaychAllocateLane
-There are not yet any comments for this method.
+
Perms: sign
@@ -2723,7 +3139,7 @@ Inputs:
Response: `42`
### PaychAvailableFunds
-There are not yet any comments for this method.
+
Perms: sign
@@ -2749,7 +3165,7 @@ Response:
```
### PaychAvailableFundsByFromTo
-There are not yet any comments for this method.
+
Perms: sign
@@ -2776,7 +3192,7 @@ Response:
```
### PaychCollect
-There are not yet any comments for this method.
+
Perms: sign
@@ -2819,7 +3235,7 @@ Response:
```
### PaychGetWaitReady
-There are not yet any comments for this method.
+
Perms: sign
@@ -2835,7 +3251,7 @@ Inputs:
Response: `"f01234"`
### PaychList
-There are not yet any comments for this method.
+
Perms: read
@@ -2844,7 +3260,7 @@ Inputs: `null`
Response: `null`
### PaychNewPayment
-There are not yet any comments for this method.
+
Perms: sign
@@ -2869,7 +3285,7 @@ Response:
```
### PaychSettle
-There are not yet any comments for this method.
+
Perms: sign
@@ -2888,7 +3304,7 @@ Response:
```
### PaychStatus
-There are not yet any comments for this method.
+
Perms: read
@@ -2908,7 +3324,7 @@ Response:
```
### PaychVoucherAdd
-There are not yet any comments for this method.
+
Perms: write
@@ -2944,7 +3360,7 @@ Inputs:
Response: `"0"`
### PaychVoucherCheckSpendable
-There are not yet any comments for this method.
+
Perms: read
@@ -2980,7 +3396,7 @@ Inputs:
Response: `true`
### PaychVoucherCheckValid
-There are not yet any comments for this method.
+
Perms: read
@@ -3014,7 +3430,7 @@ Inputs:
Response: `{}`
### PaychVoucherCreate
-There are not yet any comments for this method.
+
Perms: sign
@@ -3055,7 +3471,7 @@ Response:
```
### PaychVoucherList
-There are not yet any comments for this method.
+
Perms: write
@@ -3069,7 +3485,7 @@ Inputs:
Response: `null`
### PaychVoucherSubmit
-There are not yet any comments for this method.
+
Perms: sign
@@ -3111,7 +3527,7 @@ Response:
## State
The State methods are used to query, inspect, and interact with chain state.
-Most methods take a TipSetKey as a parameter. The state looked up is the state at that tipset.
+Most methods take a TipSetKey as a parameter. The state looked up is the parent state of the tipset.
A nil TipSetKey can be provided as a param, this will cause the heaviest tipset in the chain to be used.
@@ -3164,6 +3580,10 @@ Response: `null`
### StateCall
StateCall runs the given message and returns its result without any persisted changes.
+StateCall applies the message to the tipset's parent state. The
+message is not applied on-top-of the messages in the passed-in
+tipset.
+
Perms: read
@@ -3327,6 +3747,36 @@ Response: `"0"`
StateCompute is a flexible command that applies the given messages on the given tipset.
The messages are run as though the VM were at the provided height.
+When called, StateCompute will:
+- Load the provided tipset, or use the current chain head if not provided
+- Compute the tipset state of the provided tipset on top of the parent state
+ - (note that this step runs before vmheight is applied to the execution)
+ - Execute state upgrade if any were scheduled at the epoch, or in null
+ blocks preceding the tipset
+ - Call the cron actor on null blocks preceding the tipset
+ - For each block in the tipset
+ - Apply messages in blocks in the specified
+ - Award block reward by calling the reward actor
+ - Call the cron actor for the current epoch
+- If the specified vmheight is higher than the current epoch, apply any
+ needed state upgrades to the state
+- Apply the specified messages to the state
+
+The vmheight parameter sets VM execution epoch, and can be used to simulate
+message execution in different network versions. If the specified vmheight
+epoch is higher than the epoch of the specified tipset, any state upgrades
+until the vmheight will be executed on the state before applying messages
+specified by the user.
+
+Note that the initial tipset state computation is not affected by the
+vmheight parameter - only the messages in the `apply` set are
+
+If the caller wants to simply compute the state, vmheight should be set to
+the epoch of the specified tipset.
+
+Messages in the `apply` parameter must have the correct nonces, and gas
+values set.
+
Perms: read
@@ -3387,6 +3837,31 @@ Response:
}
```
+### StateDecodeParams
+StateDecodeParams attempts to decode the provided params, based on the recipient actor address and method number.
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ "f01234",
+ 1,
+ "Ynl0ZSBhcnJheQ==",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `{}`
+
### StateGetActor
StateGetActor returns the indicated actor's nonce and balance.
@@ -3423,7 +3898,15 @@ Response:
```
### StateGetReceipt
-StateGetReceipt returns the message receipt for the given message
+StateGetReceipt returns the message receipt for the given message or for a
+matching gas-repriced replacing message
+
+NOTE: If the requested message was replaced, this method will return the receipt
+for the replacing message - if the caller needs the receipt for exactly the
+requested message, use StateSearchMsg().Receipt, and check that MsgLookup.Message
+is matching the requested CID
+
+DEPRECATED: Use StateSearchMsg, this method won't be supported in v1 API
Perms: read
@@ -3831,7 +4314,7 @@ Response:
"WorkerChangeEpoch": 10101,
"PeerId": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
"Multiaddrs": null,
- "SealProofType": 3,
+ "WindowPoStProofType": 8,
"SectorSize": 34359738368,
"WindowPoStPartitionSectors": 42,
"ConsensusFaultElapsed": 10101
@@ -3849,7 +4332,7 @@ Inputs:
[
"f01234",
{
- "SealProof": 3,
+ "SealProof": 8,
"SectorNumber": 9,
"SealedCID": {
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
@@ -3946,7 +4429,7 @@ Inputs:
[
"f01234",
{
- "SealProof": 3,
+ "SealProof": 8,
"SectorNumber": 9,
"SealedCID": {
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
@@ -4151,7 +4634,7 @@ Inputs:
]
```
-Response: `6`
+Response: `13`
### StateReadState
StateReadState returns the indicated actor's state.
@@ -4178,13 +4661,31 @@ Response:
```json
{
"Balance": "0",
+ "Code": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
"State": {}
}
```
### StateReplay
StateReplay replays a given message, assuming it was included in a block in the specified tipset.
-If no tipset key is provided, the appropriate tipset is looked up.
+
+If a tipset key is provided, and a replacing message is found on chain,
+the method will return an error saying that the message wasn't found
+
+If no tipset key is provided, the appropriate tipset is looked up, and if
+the message was gas-repriced, the on-chain message will be replayed - in
+that case the returned InvocResult.MsgCid will not match the Cid param
+
+If the caller wants to ensure that exactly the requested message was executed,
+they MUST check that InvocResult.MsgCid is equal to the provided Cid.
+Without this check both the requested and original message may appear as
+successfully executed on-chain, which may look like a double-spend.
+
+A replacing message is a message with a different CID, any of Gas values, and
+different signature, but with all other parameters matching (source/destination,
+nonce, params, etc.)
Perms: read
@@ -4278,6 +4779,20 @@ Response:
### StateSearchMsg
StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed
+NOTE: If a replacing message is found on chain, this method will return
+a MsgLookup for the replacing message - the MsgLookup.Message will be a different
+CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the
+result of the execution of the replacing message.
+
+If the caller wants to ensure that exactly the requested message was executed,
+they MUST check that MsgLookup.Message is equal to the provided 'cid'.
+Without this check both the requested and original message may appear as
+successfully executed on-chain, which may look like a double-spend.
+
+A replacing message is a message with a different CID, any of Gas values, and
+different signature, but with all other parameters matching (source/destination,
+nonce, params, etc.)
+
Perms: read
@@ -4314,6 +4829,60 @@ Response:
}
```
+### StateSearchMsgLimited
+StateSearchMsgLimited looks back up to limit epochs in the chain for a message, and returns its receipt and the tipset where it was executed
+
+NOTE: If a replacing message is found on chain, this method will return
+a MsgLookup for the replacing message - the MsgLookup.Message will be a different
+CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the
+result of the execution of the replacing message.
+
+If the caller wants to ensure that exactly the requested message was executed,
+they MUST check that MsgLookup.Message is equal to the provided 'cid'.
+Without this check both the requested and original message may appear as
+successfully executed on-chain, which may look like a double-spend.
+
+A replacing message is a message with a different CID, any of Gas values, and
+different signature, but with all other parameters matching (source/destination,
+nonce, params, etc.)
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ 10101
+]
+```
+
+Response:
+```json
+{
+ "Message": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Receipt": {
+ "ExitCode": 0,
+ "Return": "Ynl0ZSBhcnJheQ==",
+ "GasUsed": 9
+ },
+ "ReturnDec": {},
+ "TipSet": [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ],
+ "Height": 10101
+}
+```
+
### StateSectorExpiration
StateSectorExpiration returns epoch at which given sector will expire
@@ -4372,7 +4941,7 @@ Response:
```json
{
"SectorNumber": 9,
- "SealProof": 3,
+ "SealProof": 8,
"SealedCID": {
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
},
@@ -4443,7 +5012,7 @@ Response:
```json
{
"Info": {
- "SealProof": 3,
+ "SealProof": 8,
"SectorNumber": 9,
"SealedCID": {
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
@@ -4491,7 +5060,8 @@ Response:
"FilMined": "0",
"FilBurnt": "0",
"FilLocked": "0",
- "FilCirculating": "0"
+ "FilCirculating": "0",
+ "FilReserveDisbursed": "0"
}
```
@@ -4571,6 +5141,20 @@ Response: `"0"`
StateWaitMsg looks back in the chain for a message. If not found, it blocks until the
message arrives on chain, and gets to the indicated confidence depth.
+NOTE: If a replacing message is found on chain, this method will return
+a MsgLookup for the replacing message - the MsgLookup.Message will be a different
+CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the
+result of the execution of the replacing message.
+
+If the caller wants to ensure that exactly the requested message was executed,
+they MUST check that MsgLookup.Message is equal to the provided 'cid'.
+Without this check both the requested and original message may appear as
+successfully executed on-chain, which may look like a double-spend.
+
+A replacing message is a message with a different CID, any of Gas values, and
+different signature, but with all other parameters matching (source/destination,
+nonce, params, etc.)
+
Perms: read
@@ -4613,6 +5197,20 @@ StateWaitMsgLimited looks back up to limit epochs in the chain for a message.
If not found, it blocks until the message arrives on chain, and gets to the
indicated confidence depth.
+NOTE: If a replacing message is found on chain, this method will return
+a MsgLookup for the replacing message - the MsgLookup.Message will be a different
+CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the
+result of the execution of the replacing message.
+
+If the caller wants to ensure that exactly the requested message was executed,
+they MUST check that MsgLookup.Message is equal to the provided 'cid'.
+Without this check both the requested and original message may appear as
+successfully executed on-chain, which may look like a double-spend.
+
+A replacing message is a message with a different CID, any of Gas values, and
+different signature, but with all other parameters matching (source/destination,
+nonce, params, etc.)
+
Perms: read
@@ -4913,7 +5511,7 @@ Response: `"f01234"`
WalletDelete deletes an address from the wallet.
-Perms: write
+Perms: admin
Inputs:
```json
@@ -5009,7 +5607,7 @@ Response: `"f01234"`
WalletSetDefault marks the given address as as the default one.
-Perms: admin
+Perms: write
Inputs:
```json
diff --git a/documentation/en/api-v1-unstable-methods.md b/documentation/en/api-v1-unstable-methods.md
new file mode 100644
index 00000000000..ef151a94dde
--- /dev/null
+++ b/documentation/en/api-v1-unstable-methods.md
@@ -0,0 +1,5859 @@
+# Groups
+* [](#)
+ * [Closing](#Closing)
+ * [Discover](#Discover)
+ * [Session](#Session)
+ * [Shutdown](#Shutdown)
+ * [Version](#Version)
+* [Auth](#Auth)
+ * [AuthNew](#AuthNew)
+ * [AuthVerify](#AuthVerify)
+* [Beacon](#Beacon)
+ * [BeaconGetEntry](#BeaconGetEntry)
+* [Chain](#Chain)
+ * [ChainDeleteObj](#ChainDeleteObj)
+ * [ChainExport](#ChainExport)
+ * [ChainGetBlock](#ChainGetBlock)
+ * [ChainGetBlockMessages](#ChainGetBlockMessages)
+ * [ChainGetGenesis](#ChainGetGenesis)
+ * [ChainGetMessage](#ChainGetMessage)
+ * [ChainGetMessagesInTipset](#ChainGetMessagesInTipset)
+ * [ChainGetNode](#ChainGetNode)
+ * [ChainGetParentMessages](#ChainGetParentMessages)
+ * [ChainGetParentReceipts](#ChainGetParentReceipts)
+ * [ChainGetPath](#ChainGetPath)
+ * [ChainGetRandomnessFromBeacon](#ChainGetRandomnessFromBeacon)
+ * [ChainGetRandomnessFromTickets](#ChainGetRandomnessFromTickets)
+ * [ChainGetTipSet](#ChainGetTipSet)
+ * [ChainGetTipSetByHeight](#ChainGetTipSetByHeight)
+ * [ChainHasObj](#ChainHasObj)
+ * [ChainHead](#ChainHead)
+ * [ChainNotify](#ChainNotify)
+ * [ChainReadObj](#ChainReadObj)
+ * [ChainSetHead](#ChainSetHead)
+ * [ChainStatObj](#ChainStatObj)
+ * [ChainTipSetWeight](#ChainTipSetWeight)
+* [Client](#Client)
+ * [ClientCalcCommP](#ClientCalcCommP)
+ * [ClientCancelDataTransfer](#ClientCancelDataTransfer)
+ * [ClientCancelRetrievalDeal](#ClientCancelRetrievalDeal)
+ * [ClientDataTransferUpdates](#ClientDataTransferUpdates)
+ * [ClientDealPieceCID](#ClientDealPieceCID)
+ * [ClientDealSize](#ClientDealSize)
+ * [ClientFindData](#ClientFindData)
+ * [ClientGenCar](#ClientGenCar)
+ * [ClientGetDealInfo](#ClientGetDealInfo)
+ * [ClientGetDealStatus](#ClientGetDealStatus)
+ * [ClientGetDealUpdates](#ClientGetDealUpdates)
+ * [ClientGetRetrievalUpdates](#ClientGetRetrievalUpdates)
+ * [ClientHasLocal](#ClientHasLocal)
+ * [ClientImport](#ClientImport)
+ * [ClientListDataTransfers](#ClientListDataTransfers)
+ * [ClientListDeals](#ClientListDeals)
+ * [ClientListImports](#ClientListImports)
+ * [ClientListRetrievals](#ClientListRetrievals)
+ * [ClientMinerQueryOffer](#ClientMinerQueryOffer)
+ * [ClientQueryAsk](#ClientQueryAsk)
+ * [ClientRemoveImport](#ClientRemoveImport)
+ * [ClientRestartDataTransfer](#ClientRestartDataTransfer)
+ * [ClientRetrieve](#ClientRetrieve)
+ * [ClientRetrieveTryRestartInsufficientFunds](#ClientRetrieveTryRestartInsufficientFunds)
+ * [ClientRetrieveWithEvents](#ClientRetrieveWithEvents)
+ * [ClientStartDeal](#ClientStartDeal)
+ * [ClientStatelessDeal](#ClientStatelessDeal)
+* [Create](#Create)
+ * [CreateBackup](#CreateBackup)
+* [Gas](#Gas)
+ * [GasEstimateFeeCap](#GasEstimateFeeCap)
+ * [GasEstimateGasLimit](#GasEstimateGasLimit)
+ * [GasEstimateGasPremium](#GasEstimateGasPremium)
+ * [GasEstimateMessageGas](#GasEstimateMessageGas)
+* [I](#I)
+ * [ID](#ID)
+* [Log](#Log)
+ * [LogList](#LogList)
+ * [LogSetLevel](#LogSetLevel)
+* [Market](#Market)
+ * [MarketAddBalance](#MarketAddBalance)
+ * [MarketGetReserved](#MarketGetReserved)
+ * [MarketReleaseFunds](#MarketReleaseFunds)
+ * [MarketReserveFunds](#MarketReserveFunds)
+ * [MarketWithdraw](#MarketWithdraw)
+* [Miner](#Miner)
+ * [MinerCreateBlock](#MinerCreateBlock)
+ * [MinerGetBaseInfo](#MinerGetBaseInfo)
+* [Mpool](#Mpool)
+ * [MpoolBatchPush](#MpoolBatchPush)
+ * [MpoolBatchPushMessage](#MpoolBatchPushMessage)
+ * [MpoolBatchPushUntrusted](#MpoolBatchPushUntrusted)
+ * [MpoolCheckMessages](#MpoolCheckMessages)
+ * [MpoolCheckPendingMessages](#MpoolCheckPendingMessages)
+ * [MpoolCheckReplaceMessages](#MpoolCheckReplaceMessages)
+ * [MpoolClear](#MpoolClear)
+ * [MpoolGetConfig](#MpoolGetConfig)
+ * [MpoolGetNonce](#MpoolGetNonce)
+ * [MpoolPending](#MpoolPending)
+ * [MpoolPush](#MpoolPush)
+ * [MpoolPushMessage](#MpoolPushMessage)
+ * [MpoolPushUntrusted](#MpoolPushUntrusted)
+ * [MpoolSelect](#MpoolSelect)
+ * [MpoolSetConfig](#MpoolSetConfig)
+ * [MpoolSub](#MpoolSub)
+* [Msig](#Msig)
+ * [MsigAddApprove](#MsigAddApprove)
+ * [MsigAddCancel](#MsigAddCancel)
+ * [MsigAddPropose](#MsigAddPropose)
+ * [MsigApprove](#MsigApprove)
+ * [MsigApproveTxnHash](#MsigApproveTxnHash)
+ * [MsigCancel](#MsigCancel)
+ * [MsigCreate](#MsigCreate)
+ * [MsigGetAvailableBalance](#MsigGetAvailableBalance)
+ * [MsigGetPending](#MsigGetPending)
+ * [MsigGetVested](#MsigGetVested)
+ * [MsigGetVestingSchedule](#MsigGetVestingSchedule)
+ * [MsigPropose](#MsigPropose)
+ * [MsigRemoveSigner](#MsigRemoveSigner)
+ * [MsigSwapApprove](#MsigSwapApprove)
+ * [MsigSwapCancel](#MsigSwapCancel)
+ * [MsigSwapPropose](#MsigSwapPropose)
+* [Net](#Net)
+ * [NetAddrsListen](#NetAddrsListen)
+ * [NetAgentVersion](#NetAgentVersion)
+ * [NetAutoNatStatus](#NetAutoNatStatus)
+ * [NetBandwidthStats](#NetBandwidthStats)
+ * [NetBandwidthStatsByPeer](#NetBandwidthStatsByPeer)
+ * [NetBandwidthStatsByProtocol](#NetBandwidthStatsByProtocol)
+ * [NetBlockAdd](#NetBlockAdd)
+ * [NetBlockList](#NetBlockList)
+ * [NetBlockRemove](#NetBlockRemove)
+ * [NetConnect](#NetConnect)
+ * [NetConnectedness](#NetConnectedness)
+ * [NetDisconnect](#NetDisconnect)
+ * [NetFindPeer](#NetFindPeer)
+ * [NetPeerInfo](#NetPeerInfo)
+ * [NetPeers](#NetPeers)
+ * [NetPubsubScores](#NetPubsubScores)
+* [Node](#Node)
+ * [NodeStatus](#NodeStatus)
+* [Paych](#Paych)
+ * [PaychAllocateLane](#PaychAllocateLane)
+ * [PaychAvailableFunds](#PaychAvailableFunds)
+ * [PaychAvailableFundsByFromTo](#PaychAvailableFundsByFromTo)
+ * [PaychCollect](#PaychCollect)
+ * [PaychGet](#PaychGet)
+ * [PaychGetWaitReady](#PaychGetWaitReady)
+ * [PaychList](#PaychList)
+ * [PaychNewPayment](#PaychNewPayment)
+ * [PaychSettle](#PaychSettle)
+ * [PaychStatus](#PaychStatus)
+ * [PaychVoucherAdd](#PaychVoucherAdd)
+ * [PaychVoucherCheckSpendable](#PaychVoucherCheckSpendable)
+ * [PaychVoucherCheckValid](#PaychVoucherCheckValid)
+ * [PaychVoucherCreate](#PaychVoucherCreate)
+ * [PaychVoucherList](#PaychVoucherList)
+ * [PaychVoucherSubmit](#PaychVoucherSubmit)
+* [State](#State)
+ * [StateAccountKey](#StateAccountKey)
+ * [StateAllMinerFaults](#StateAllMinerFaults)
+ * [StateCall](#StateCall)
+ * [StateChangedActors](#StateChangedActors)
+ * [StateCirculatingSupply](#StateCirculatingSupply)
+ * [StateCompute](#StateCompute)
+ * [StateDealProviderCollateralBounds](#StateDealProviderCollateralBounds)
+ * [StateDecodeParams](#StateDecodeParams)
+ * [StateGetActor](#StateGetActor)
+ * [StateListActors](#StateListActors)
+ * [StateListMessages](#StateListMessages)
+ * [StateListMiners](#StateListMiners)
+ * [StateLookupID](#StateLookupID)
+ * [StateMarketBalance](#StateMarketBalance)
+ * [StateMarketDeals](#StateMarketDeals)
+ * [StateMarketParticipants](#StateMarketParticipants)
+ * [StateMarketStorageDeal](#StateMarketStorageDeal)
+ * [StateMinerActiveSectors](#StateMinerActiveSectors)
+ * [StateMinerAvailableBalance](#StateMinerAvailableBalance)
+ * [StateMinerDeadlines](#StateMinerDeadlines)
+ * [StateMinerFaults](#StateMinerFaults)
+ * [StateMinerInfo](#StateMinerInfo)
+ * [StateMinerInitialPledgeCollateral](#StateMinerInitialPledgeCollateral)
+ * [StateMinerPartitions](#StateMinerPartitions)
+ * [StateMinerPower](#StateMinerPower)
+ * [StateMinerPreCommitDepositForPower](#StateMinerPreCommitDepositForPower)
+ * [StateMinerProvingDeadline](#StateMinerProvingDeadline)
+ * [StateMinerRecoveries](#StateMinerRecoveries)
+ * [StateMinerSectorAllocated](#StateMinerSectorAllocated)
+ * [StateMinerSectorCount](#StateMinerSectorCount)
+ * [StateMinerSectors](#StateMinerSectors)
+ * [StateNetworkName](#StateNetworkName)
+ * [StateNetworkVersion](#StateNetworkVersion)
+ * [StateReadState](#StateReadState)
+ * [StateReplay](#StateReplay)
+ * [StateSearchMsg](#StateSearchMsg)
+ * [StateSectorExpiration](#StateSectorExpiration)
+ * [StateSectorGetInfo](#StateSectorGetInfo)
+ * [StateSectorPartition](#StateSectorPartition)
+ * [StateSectorPreCommitInfo](#StateSectorPreCommitInfo)
+ * [StateVMCirculatingSupplyInternal](#StateVMCirculatingSupplyInternal)
+ * [StateVerifiedClientStatus](#StateVerifiedClientStatus)
+ * [StateVerifiedRegistryRootKey](#StateVerifiedRegistryRootKey)
+ * [StateVerifierStatus](#StateVerifierStatus)
+ * [StateWaitMsg](#StateWaitMsg)
+* [Sync](#Sync)
+ * [SyncCheckBad](#SyncCheckBad)
+ * [SyncCheckpoint](#SyncCheckpoint)
+ * [SyncIncomingBlocks](#SyncIncomingBlocks)
+ * [SyncMarkBad](#SyncMarkBad)
+ * [SyncState](#SyncState)
+ * [SyncSubmitBlock](#SyncSubmitBlock)
+ * [SyncUnmarkAllBad](#SyncUnmarkAllBad)
+ * [SyncUnmarkBad](#SyncUnmarkBad)
+ * [SyncValidateTipset](#SyncValidateTipset)
+* [Wallet](#Wallet)
+ * [WalletBalance](#WalletBalance)
+ * [WalletDefaultAddress](#WalletDefaultAddress)
+ * [WalletDelete](#WalletDelete)
+ * [WalletExport](#WalletExport)
+ * [WalletHas](#WalletHas)
+ * [WalletImport](#WalletImport)
+ * [WalletList](#WalletList)
+ * [WalletNew](#WalletNew)
+ * [WalletSetDefault](#WalletSetDefault)
+ * [WalletSign](#WalletSign)
+ * [WalletSignMessage](#WalletSignMessage)
+ * [WalletValidateAddress](#WalletValidateAddress)
+ * [WalletVerify](#WalletVerify)
+##
+
+
+### Closing
+
+
+Perms: read
+
+Inputs: `null`
+
+Response: `{}`
+
+### Discover
+
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "info": {
+ "title": "Lotus RPC API",
+ "version": "1.2.1/generated=2020-11-22T08:22:42-06:00"
+ },
+ "methods": [],
+ "openrpc": "1.2.6"
+}
+```
+
+### Session
+
+
+Perms: read
+
+Inputs: `null`
+
+Response: `"07070707-0707-0707-0707-070707070707"`
+
+### Shutdown
+
+
+Perms: admin
+
+Inputs: `null`
+
+Response: `{}`
+
+### Version
+
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "Version": "string value",
+ "APIVersion": 131328,
+ "BlockDelay": 42
+}
+```
+
+## Auth
+
+
+### AuthNew
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ null
+]
+```
+
+Response: `"Ynl0ZSBhcnJheQ=="`
+
+### AuthVerify
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ "string value"
+]
+```
+
+Response: `null`
+
+## Beacon
+The Beacon method group contains methods for interacting with the random beacon (DRAND)
+
+
+### BeaconGetEntry
+BeaconGetEntry returns the beacon entry for the given filecoin epoch. If
+the entry has not yet been produced, the call will block until the entry
+becomes available
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ 10101
+]
+```
+
+Response:
+```json
+{
+ "Round": 42,
+ "Data": "Ynl0ZSBhcnJheQ=="
+}
+```
+
+## Chain
+The Chain method group contains methods for interacting with the
+blockchain, but that do not require any form of state computation.
+
+
+### ChainDeleteObj
+ChainDeleteObj deletes node referenced by the given CID
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response: `{}`
+
+### ChainExport
+ChainExport returns a stream of bytes with CAR dump of chain data.
+The exported chain data includes the header chain from the given tipset
+back to genesis, the entire genesis state, and the most recent 'nroots'
+state trees.
+If oldmsgskip is set, messages from before the requested roots are also not included.
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ 10101,
+ true,
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `"Ynl0ZSBhcnJheQ=="`
+
+### ChainGetBlock
+ChainGetBlock returns the block specified by the given CID.
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response:
+```json
+{
+ "Miner": "f01234",
+ "Ticket": {
+ "VRFProof": "Ynl0ZSBhcnJheQ=="
+ },
+ "ElectionProof": {
+ "WinCount": 9,
+ "VRFProof": "Ynl0ZSBhcnJheQ=="
+ },
+ "BeaconEntries": null,
+ "WinPoStProof": null,
+ "Parents": null,
+ "ParentWeight": "0",
+ "Height": 10101,
+ "ParentStateRoot": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "ParentMessageReceipts": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Messages": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "BLSAggregate": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "Timestamp": 42,
+ "BlockSig": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "ForkSignaling": 42,
+ "ParentBaseFee": "0"
+}
+```
+
+### ChainGetBlockMessages
+ChainGetBlockMessages returns messages stored in the specified block.
+
+Note: If there are multiple blocks in a tipset, it's likely that some
+messages will be duplicated. It's also possible for blocks in a tipset to have
+different messages from the same sender at the same nonce. When that happens,
+only the first message (in a block with lowest ticket) will be considered
+for execution
+
+NOTE: THIS METHOD SHOULD ONLY BE USED FOR GETTING MESSAGES IN A SPECIFIC BLOCK
+
+DO NOT USE THIS METHOD TO GET MESSAGES INCLUDED IN A TIPSET
+Use ChainGetParentMessages, which will perform correct message deduplication
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response:
+```json
+{
+ "BlsMessages": null,
+ "SecpkMessages": null,
+ "Cids": null
+}
+```
+
+### ChainGetGenesis
+ChainGetGenesis returns the genesis tipset.
+
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "Cids": null,
+ "Blocks": null,
+ "Height": 0
+}
+```
+
+### ChainGetMessage
+ChainGetMessage reads a message referenced by the specified CID from the
+chain blockstore.
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response:
+```json
+{
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+}
+```
+
+### ChainGetMessagesInTipset
+ChainGetMessagesInTipset returns message stores in current tipset
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `null`
+
+### ChainGetNode
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ "string value"
+]
+```
+
+Response:
+```json
+{
+ "Cid": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Obj": {}
+}
+```
+
+### ChainGetParentMessages
+ChainGetParentMessages returns messages stored in parent tipset of the
+specified block.
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response: `null`
+
+### ChainGetParentReceipts
+ChainGetParentReceipts returns receipts for messages in parent tipset of
+the specified block. The receipts in the list returned is one-to-one with the
+messages returned by a call to ChainGetParentMessages with the same blockCid.
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response: `null`
+
+### ChainGetPath
+ChainGetPath returns a set of revert/apply operations needed to get from
+one tipset to another, for example:
+```
+ to
+ ^
+from tAA
+ ^ ^
+tBA tAB
+ ^---*--^
+ ^
+ tRR
+```
+Would return `[revert(tBA), apply(tAB), apply(tAA)]`
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ],
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `null`
+
+### ChainGetRandomnessFromBeacon
+ChainGetRandomnessFromBeacon is used to sample the beacon for randomness.
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ],
+ 2,
+ 10101,
+ "Ynl0ZSBhcnJheQ=="
+]
+```
+
+Response: `null`
+
+### ChainGetRandomnessFromTickets
+ChainGetRandomnessFromTickets is used to sample the chain for randomness.
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ],
+ 2,
+ 10101,
+ "Ynl0ZSBhcnJheQ=="
+]
+```
+
+Response: `null`
+
+### ChainGetTipSet
+ChainGetTipSet returns the tipset specified by the given TipSetKey.
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+```json
+{
+ "Cids": null,
+ "Blocks": null,
+ "Height": 0
+}
+```
+
+### ChainGetTipSetByHeight
+ChainGetTipSetByHeight looks back for a tipset at the specified epoch.
+If there are no blocks at the specified epoch, a tipset at an earlier epoch
+will be returned.
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ 10101,
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+```json
+{
+ "Cids": null,
+ "Blocks": null,
+ "Height": 0
+}
+```
+
+### ChainHasObj
+ChainHasObj checks if a given CID exists in the chain blockstore.
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response: `true`
+
+### ChainHead
+ChainHead returns the current head of the chain.
+
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "Cids": null,
+ "Blocks": null,
+ "Height": 0
+}
+```
+
+### ChainNotify
+ChainNotify returns channel with chain head updates.
+First message is guaranteed to be of len == 1, and type == 'current'.
+
+
+Perms: read
+
+Inputs: `null`
+
+Response: `null`
+
+### ChainReadObj
+ChainReadObj reads ipld nodes referenced by the specified CID from chain
+blockstore and returns raw bytes.
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response: `"Ynl0ZSBhcnJheQ=="`
+
+### ChainSetHead
+ChainSetHead forcefully sets current chain head. Use with caution.
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `{}`
+
+### ChainStatObj
+ChainStatObj returns statistics about the graph referenced by 'obj'.
+If 'base' is also specified, then the returned stat will be a diff
+between the two objects.
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response:
+```json
+{
+ "Size": 42,
+ "Links": 42
+}
+```
+
+### ChainTipSetWeight
+ChainTipSetWeight computes weight for the specified tipset.
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `"0"`
+
+## Client
+The Client methods all have to do with interacting with the storage and
+retrieval markets as a client
+
+
+### ClientCalcCommP
+ClientCalcCommP calculates the CommP for a specified file
+
+
+Perms: write
+
+Inputs:
+```json
+[
+ "string value"
+]
+```
+
+Response:
+```json
+{
+ "Root": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Size": 1024
+}
+```
+
+### ClientCancelDataTransfer
+ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
+
+
+Perms: write
+
+Inputs:
+```json
+[
+ 3,
+ "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ true
+]
+```
+
+Response: `{}`
+
+### ClientCancelRetrievalDeal
+ClientCancelRetrievalDeal cancels an ongoing retrieval deal based on DealID
+
+
+Perms: write
+
+Inputs:
+```json
+[
+ 5
+]
+```
+
+Response: `{}`
+
+### ClientDataTransferUpdates
+
+
+Perms: write
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "TransferID": 3,
+ "Status": 1,
+ "BaseCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "IsInitiator": true,
+ "IsSender": true,
+ "Voucher": "string value",
+ "Message": "string value",
+ "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Transferred": 42,
+ "Stages": {
+ "Stages": null
+ }
+}
+```
+
+### ClientDealPieceCID
+ClientCalcCommP calculates the CommP and data size of the specified CID
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response:
+```json
+{
+ "PayloadSize": 9,
+ "PieceSize": 1032,
+ "PieceCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+}
+```
+
+### ClientDealSize
+ClientDealSize calculates real deal data size
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response:
+```json
+{
+ "PayloadSize": 9,
+ "PieceSize": 1032
+}
+```
+
+### ClientFindData
+ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer).
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ null
+]
+```
+
+Response: `null`
+
+### ClientGenCar
+ClientGenCar generates a CAR file for the specified file.
+
+
+Perms: write
+
+Inputs:
+```json
+[
+ {
+ "Path": "string value",
+ "IsCAR": true
+ },
+ "string value"
+]
+```
+
+Response: `{}`
+
+### ClientGetDealInfo
+ClientGetDealInfo returns the latest information about a given deal.
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response:
+```json
+{
+ "ProposalCid": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "State": 42,
+ "Message": "string value",
+ "DealStages": {
+ "Stages": null
+ },
+ "Provider": "f01234",
+ "DataRef": {
+ "TransferType": "string value",
+ "Root": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "PieceCid": null,
+ "PieceSize": 1024,
+ "RawBlockSize": 42
+ },
+ "PieceCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Size": 42,
+ "PricePerEpoch": "0",
+ "Duration": 42,
+ "DealID": 5432,
+ "CreationTime": "0001-01-01T00:00:00Z",
+ "Verified": true,
+ "TransferChannelID": {
+ "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "ID": 3
+ },
+ "DataTransfer": {
+ "TransferID": 3,
+ "Status": 1,
+ "BaseCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "IsInitiator": true,
+ "IsSender": true,
+ "Voucher": "string value",
+ "Message": "string value",
+ "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Transferred": 42,
+ "Stages": {
+ "Stages": null
+ }
+ }
+}
+```
+
+### ClientGetDealStatus
+ClientGetDealStatus returns status given a code
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ 42
+]
+```
+
+Response: `"string value"`
+
+### ClientGetDealUpdates
+ClientGetDealUpdates returns the status of updated deals
+
+
+Perms: write
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "ProposalCid": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "State": 42,
+ "Message": "string value",
+ "DealStages": {
+ "Stages": null
+ },
+ "Provider": "f01234",
+ "DataRef": {
+ "TransferType": "string value",
+ "Root": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "PieceCid": null,
+ "PieceSize": 1024,
+ "RawBlockSize": 42
+ },
+ "PieceCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Size": 42,
+ "PricePerEpoch": "0",
+ "Duration": 42,
+ "DealID": 5432,
+ "CreationTime": "0001-01-01T00:00:00Z",
+ "Verified": true,
+ "TransferChannelID": {
+ "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "ID": 3
+ },
+ "DataTransfer": {
+ "TransferID": 3,
+ "Status": 1,
+ "BaseCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "IsInitiator": true,
+ "IsSender": true,
+ "Voucher": "string value",
+ "Message": "string value",
+ "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Transferred": 42,
+ "Stages": {
+ "Stages": null
+ }
+ }
+}
+```
+
+### ClientGetRetrievalUpdates
+ClientGetRetrievalUpdates returns status of updated retrieval deals
+
+
+Perms: write
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "PayloadCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "ID": 5,
+ "PieceCID": null,
+ "PricePerByte": "0",
+ "UnsealPrice": "0",
+ "Status": 0,
+ "Message": "string value",
+ "Provider": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "BytesReceived": 42,
+ "BytesPaidFor": 42,
+ "TotalPaid": "0",
+ "TransferChannelID": {
+ "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "ID": 3
+ },
+ "DataTransfer": {
+ "TransferID": 3,
+ "Status": 1,
+ "BaseCID": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "IsInitiator": true,
+ "IsSender": true,
+ "Voucher": "string value",
+ "Message": "string value",
+ "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "Transferred": 42,
+ "Stages": {
+ "Stages": null
+ }
+ }
+}
+```
+
+### ClientHasLocal
+ClientHasLocal indicates whether a certain CID is locally stored.
+
+
+Perms: write
+
+Inputs:
+```json
+[
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
+]
+```
+
+Response: `true`
+
+### ClientImport
+ClientImport imports file under the specified path into filestore.
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "Path": "string value",
+ "IsCAR": true
+ }
+]
+```
+
+Response:
+```json
+{
+ "Root": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "ImportID": 50
+}
+```
+
+### ClientListDataTransfers
+ClientListTransfers returns the status of all ongoing transfers of data
+
+
+Perms: write
+
+Inputs: `null`
+
+Response: `null`
+
+### ClientListDeals
+ClientListDeals returns information about the deals made by the local client.
+
+
+Perms: write
+
+Inputs: `null`
+
+Response: `null`
+
+### ClientListImports
+ClientListImports lists imported files and their root CIDs
+
+
+Perms: write
+
+Inputs: `null`
+
+Response: `null`
+
+### ClientListRetrievals
+ClientListRetrievals returns information about retrievals made by the local client
+
+
+Perms: write
+
+Inputs: `null`
+
+Response: `null`
+
+### ClientMinerQueryOffer
+ClientMinerQueryOffer returns a QueryOffer for the specific miner and file.
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ "f01234",
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ null
+]
+```
+
+Response:
+```json
+{
+ "Err": "string value",
+ "Root": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Piece": null,
+ "Size": 42,
+ "MinPrice": "0",
+ "UnsealPrice": "0",
+ "PaymentInterval": 42,
+ "PaymentIntervalIncrease": 42,
+ "Miner": "f01234",
+ "MinerPeer": {
+ "Address": "f01234",
+ "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "PieceCID": null
+ }
+}
+```
+
+### ClientQueryAsk
+ClientQueryAsk returns a signed StorageAsk from the specified miner.
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "f01234"
+]
+```
+
+Response:
+```json
+{
+ "Price": "0",
+ "VerifiedPrice": "0",
+ "MinPieceSize": 1032,
+ "MaxPieceSize": 1032,
+ "Miner": "f01234",
+ "Timestamp": 10101,
+ "Expiry": 10101,
+ "SeqNo": 42
+}
+```
+
+### ClientRemoveImport
+ClientRemoveImport removes file import
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ 50
+]
+```
+
+Response: `{}`
+
+### ClientRestartDataTransfer
+ClientRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer
+
+
+Perms: write
+
+Inputs:
+```json
+[
+ 3,
+ "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ true
+]
+```
+
+Response: `{}`
+
+### ClientRetrieve
+ClientRetrieve initiates the retrieval of a file, as specified in the order.
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "Root": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Piece": null,
+ "Size": 42,
+ "LocalStore": 12,
+ "Total": "0",
+ "UnsealPrice": "0",
+ "PaymentInterval": 42,
+ "PaymentIntervalIncrease": 42,
+ "Client": "f01234",
+ "Miner": "f01234",
+ "MinerPeer": {
+ "Address": "f01234",
+ "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "PieceCID": null
+ }
+ },
+ {
+ "Path": "string value",
+ "IsCAR": true
+ }
+]
+```
+
+Response: `{}`
+
+### ClientRetrieveTryRestartInsufficientFunds
+ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel
+which are stuck due to insufficient funds
+
+
+Perms: write
+
+Inputs:
+```json
+[
+ "f01234"
+]
+```
+
+Response: `{}`
+
+### ClientRetrieveWithEvents
+ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel
+of status updates.
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "Root": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Piece": null,
+ "Size": 42,
+ "LocalStore": 12,
+ "Total": "0",
+ "UnsealPrice": "0",
+ "PaymentInterval": 42,
+ "PaymentIntervalIncrease": 42,
+ "Client": "f01234",
+ "Miner": "f01234",
+ "MinerPeer": {
+ "Address": "f01234",
+ "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
+ "PieceCID": null
+ }
+ },
+ {
+ "Path": "string value",
+ "IsCAR": true
+ }
+]
+```
+
+Response:
+```json
+{
+ "Event": 5,
+ "Status": 0,
+ "BytesReceived": 42,
+ "FundsSpent": "0",
+ "Err": "string value"
+}
+```
+
+### ClientStartDeal
+ClientStartDeal proposes a deal with a miner.
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "Data": {
+ "TransferType": "string value",
+ "Root": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "PieceCid": null,
+ "PieceSize": 1024,
+ "RawBlockSize": 42
+ },
+ "Wallet": "f01234",
+ "Miner": "f01234",
+ "EpochPrice": "0",
+ "MinBlocksDuration": 42,
+ "ProviderCollateral": "0",
+ "DealStartEpoch": 10101,
+ "FastRetrieval": true,
+ "VerifiedDeal": true
+ }
+]
+```
+
+Response: `null`
+
+### ClientStatelessDeal
+ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking.
+
+
+Perms: write
+
+Inputs:
+```json
+[
+ {
+ "Data": {
+ "TransferType": "string value",
+ "Root": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "PieceCid": null,
+ "PieceSize": 1024,
+ "RawBlockSize": 42
+ },
+ "Wallet": "f01234",
+ "Miner": "f01234",
+ "EpochPrice": "0",
+ "MinBlocksDuration": 42,
+ "ProviderCollateral": "0",
+ "DealStartEpoch": 10101,
+ "FastRetrieval": true,
+ "VerifiedDeal": true
+ }
+]
+```
+
+Response: `null`
+
+## Create
+
+
+### CreateBackup
+CreateBackup creates node backup onder the specified file name. The
+method requires that the lotus daemon is running with the
+LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
+the path specified when calling CreateBackup is within the base path
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ "string value"
+]
+```
+
+Response: `{}`
+
+## Gas
+
+
+### GasEstimateFeeCap
+GasEstimateFeeCap estimates gas fee cap
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ 9,
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `"0"`
+
+### GasEstimateGasLimit
+GasEstimateGasLimit estimates gas used by the message and returns it.
+It fails if message fails to execute.
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `9`
+
+### GasEstimateGasPremium
+GasEstimateGasPremium estimates what gas price should be used for a
+message to have high likelihood of inclusion in `nblocksincl` epochs.
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ 42,
+ "f01234",
+ 9,
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `"0"`
+
+### GasEstimateMessageGas
+GasEstimateMessageGas estimates gas values for unset message gas fields
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ {
+ "MaxFee": "0"
+ },
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+```json
+{
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+}
+```
+
+## I
+
+
+### ID
+
+
+Perms: read
+
+Inputs: `null`
+
+Response: `"12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"`
+
+## Log
+
+
+### LogList
+
+
+Perms: write
+
+Inputs: `null`
+
+Response: `null`
+
+### LogSetLevel
+
+
+Perms: write
+
+Inputs:
+```json
+[
+ "string value",
+ "string value"
+]
+```
+
+Response: `{}`
+
+## Market
+
+
+### MarketAddBalance
+MarketAddBalance adds funds to the market actor
+
+
+Perms: sign
+
+Inputs:
+```json
+[
+ "f01234",
+ "f01234",
+ "0"
+]
+```
+
+Response:
+```json
+{
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+}
+```
+
+### MarketGetReserved
+MarketGetReserved gets the amount of funds that are currently reserved for the address
+
+
+Perms: sign
+
+Inputs:
+```json
+[
+ "f01234"
+]
+```
+
+Response: `"0"`
+
+### MarketReleaseFunds
+MarketReleaseFunds releases funds reserved by MarketReserveFunds
+
+
+Perms: sign
+
+Inputs:
+```json
+[
+ "f01234",
+ "0"
+]
+```
+
+Response: `{}`
+
+### MarketReserveFunds
+MarketReserveFunds reserves funds for a deal
+
+
+Perms: sign
+
+Inputs:
+```json
+[
+ "f01234",
+ "f01234",
+ "0"
+]
+```
+
+Response:
+```json
+{
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+}
+```
+
+### MarketWithdraw
+MarketWithdraw withdraws unlocked funds from the market actor
+
+
+Perms: sign
+
+Inputs:
+```json
+[
+ "f01234",
+ "f01234",
+ "0"
+]
+```
+
+Response:
+```json
+{
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+}
+```
+
+## Miner
+
+
+### MinerCreateBlock
+
+
+Perms: write
+
+Inputs:
+```json
+[
+ {
+ "Miner": "f01234",
+ "Parents": [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ],
+ "Ticket": {
+ "VRFProof": "Ynl0ZSBhcnJheQ=="
+ },
+ "Eproof": {
+ "WinCount": 9,
+ "VRFProof": "Ynl0ZSBhcnJheQ=="
+ },
+ "BeaconValues": null,
+ "Messages": null,
+ "Epoch": 10101,
+ "Timestamp": 42,
+ "WinningPoStProof": null
+ }
+]
+```
+
+Response:
+```json
+{
+ "Header": {
+ "Miner": "f01234",
+ "Ticket": {
+ "VRFProof": "Ynl0ZSBhcnJheQ=="
+ },
+ "ElectionProof": {
+ "WinCount": 9,
+ "VRFProof": "Ynl0ZSBhcnJheQ=="
+ },
+ "BeaconEntries": null,
+ "WinPoStProof": null,
+ "Parents": null,
+ "ParentWeight": "0",
+ "Height": 10101,
+ "ParentStateRoot": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "ParentMessageReceipts": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "Messages": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ "BLSAggregate": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "Timestamp": 42,
+ "BlockSig": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "ForkSignaling": 42,
+ "ParentBaseFee": "0"
+ },
+ "BlsMessages": null,
+ "SecpkMessages": null
+}
+```
+
+### MinerGetBaseInfo
+There are not yet any comments for this method.
+
+Perms: read
+
+Inputs:
+```json
+[
+ "f01234",
+ 10101,
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+```json
+{
+ "MinerPower": "0",
+ "NetworkPower": "0",
+ "Sectors": null,
+ "WorkerKey": "f01234",
+ "SectorSize": 34359738368,
+ "PrevBeaconEntry": {
+ "Round": 42,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "BeaconEntries": null,
+ "EligibleForMining": true
+}
+```
+
+## Mpool
+The Mpool methods are for interacting with the message pool. The message pool
+manages all incoming and outgoing 'messages' going over the network.
+
+
+### MpoolBatchPush
+MpoolBatchPush batch pushes a signed message to mempool.
+
+
+Perms: write
+
+Inputs:
+```json
+[
+ null
+]
+```
+
+Response: `null`
+
+### MpoolBatchPushMessage
+MpoolBatchPushMessage batch pushes a unsigned message to mempool.
+
+
+Perms: sign
+
+Inputs:
+```json
+[
+ null,
+ {
+ "MaxFee": "0"
+ }
+]
+```
+
+Response: `null`
+
+### MpoolBatchPushUntrusted
+MpoolBatchPushUntrusted batch pushes a signed message to mempool from untrusted sources.
+
+
+Perms: write
+
+Inputs:
+```json
+[
+ null
+]
+```
+
+Response: `null`
+
+### MpoolCheckMessages
+MpoolCheckMessages performs logical checks on a batch of messages
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ null
+]
+```
+
+Response: `null`
+
+### MpoolCheckPendingMessages
+MpoolCheckPendingMessages performs logical checks for all pending messages from a given address
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ "f01234"
+]
+```
+
+Response: `null`
+
+### MpoolCheckReplaceMessages
+MpoolCheckReplaceMessages performs logical checks on pending messages with replacement
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ null
+]
+```
+
+Response: `null`
+
+### MpoolClear
+MpoolClear clears pending messages from the mpool
+
+
+Perms: write
+
+Inputs:
+```json
+[
+ true
+]
+```
+
+Response: `{}`
+
+### MpoolGetConfig
+MpoolGetConfig returns (a copy of) the current mpool config
+
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "PriorityAddrs": null,
+ "SizeLimitHigh": 123,
+ "SizeLimitLow": 123,
+ "ReplaceByFeeRatio": 12.3,
+ "PruneCooldown": 60000000000,
+ "GasLimitOverestimation": 12.3
+}
+```
+
+### MpoolGetNonce
+MpoolGetNonce gets next nonce for the specified sender.
+Note that this method may not be atomic. Use MpoolPushMessage instead.
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ "f01234"
+]
+```
+
+Response: `42`
+
+### MpoolPending
+MpoolPending returns pending mempool messages.
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `null`
+
+### MpoolPush
+MpoolPush pushes a signed message to mempool.
+
+
+Perms: write
+
+Inputs:
+```json
+[
+ {
+ "Message": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "Signature": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ }
+]
+```
+
+Response:
+```json
+{
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+}
+```
+
+### MpoolPushMessage
+MpoolPushMessage atomically assigns a nonce, signs, and pushes a message
+to mempool.
+maxFee is only used when GasFeeCap/GasPremium fields aren't specified
+
+When maxFee is set to 0, MpoolPushMessage will guess appropriate fee
+based on current chain conditions
+
+
+Perms: sign
+
+Inputs:
+```json
+[
+ {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ {
+ "MaxFee": "0"
+ }
+]
+```
+
+Response:
+```json
+{
+ "Message": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "Signature": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+}
+```
+
+### MpoolPushUntrusted
+MpoolPushUntrusted pushes a signed message to mempool from untrusted sources.
+
+
+Perms: write
+
+Inputs:
+```json
+[
+ {
+ "Message": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "Signature": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ }
+]
+```
+
+Response:
+```json
+{
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+}
+```
+
+### MpoolSelect
+MpoolSelect returns a list of pending messages for inclusion in the next block
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ],
+ 12.3
+]
+```
+
+Response: `null`
+
+### MpoolSetConfig
+MpoolSetConfig sets the mpool config to (a copy of) the supplied config
+
+
+Perms: admin
+
+Inputs:
+```json
+[
+ {
+ "PriorityAddrs": null,
+ "SizeLimitHigh": 123,
+ "SizeLimitLow": 123,
+ "ReplaceByFeeRatio": 12.3,
+ "PruneCooldown": 60000000000,
+ "GasLimitOverestimation": 12.3
+ }
+]
+```
+
+Response: `{}`
+
+### MpoolSub
+
+
+Perms: read
+
+Inputs: `null`
+
+Response:
+```json
+{
+ "Type": 0,
+ "Message": {
+ "Message": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "Signature": {
+ "Type": 2,
+ "Data": "Ynl0ZSBhcnJheQ=="
+ },
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ }
+}
+```
+
+## Msig
+The Msig methods are used to interact with multisig wallets on the
+filecoin network
+
+
+### MsigAddApprove
+MsigAddApprove approves a previously proposed AddSigner message
+It takes the following params: , , ,
+, ,
+
+
+Perms: sign
+
+Inputs:
+```json
+[
+ "f01234",
+ "f01234",
+ 42,
+ "f01234",
+ "f01234",
+ true
+]
+```
+
+Response:
+```json
+{
+ "Message": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "ValidNonce": true
+}
+```
+
+### MsigAddCancel
+MsigAddCancel cancels a previously proposed AddSigner message
+It takes the following params: , , ,
+,
+
+
+Perms: sign
+
+Inputs:
+```json
+[
+ "f01234",
+ "f01234",
+ 42,
+ "f01234",
+ true
+]
+```
+
+Response:
+```json
+{
+ "Message": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "ValidNonce": true
+}
+```
+
+### MsigAddPropose
+MsigAddPropose proposes adding a signer in the multisig
+It takes the following params: , ,
+,
+
+
+Perms: sign
+
+Inputs:
+```json
+[
+ "f01234",
+ "f01234",
+ "f01234",
+ true
+]
+```
+
+Response:
+```json
+{
+ "Message": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "ValidNonce": true
+}
+```
+
+### MsigApprove
+MsigApprove approves a previously-proposed multisig message by transaction ID
+It takes the following params: ,
+
+
+Perms: sign
+
+Inputs:
+```json
+[
+ "f01234",
+ 42,
+ "f01234"
+]
+```
+
+Response:
+```json
+{
+ "Message": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "ValidNonce": true
+}
+```
+
+### MsigApproveTxnHash
+MsigApproveTxnHash approves a previously-proposed multisig message, specified
+using both transaction ID and a hash of the parameters used in the
+proposal. This method of approval can be used to ensure you only approve
+exactly the transaction you think you are.
+It takes the following params: , , , , ,
+, ,
+
+
+Perms: sign
+
+Inputs:
+```json
+[
+ "f01234",
+ 42,
+ "f01234",
+ "f01234",
+ "0",
+ "f01234",
+ 42,
+ "Ynl0ZSBhcnJheQ=="
+]
+```
+
+Response:
+```json
+{
+ "Message": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "ValidNonce": true
+}
+```
+
+### MsigCancel
+MsigCancel cancels a previously-proposed multisig message
+It takes the following params: , , , ,
+, ,
+
+
+Perms: sign
+
+Inputs:
+```json
+[
+ "f01234",
+ 42,
+ "f01234",
+ "0",
+ "f01234",
+ 42,
+ "Ynl0ZSBhcnJheQ=="
+]
+```
+
+Response:
+```json
+{
+ "Message": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "ValidNonce": true
+}
+```
+
+### MsigCreate
+MsigCreate creates a multisig wallet
+It takes the following params: , ,
+, ,
+
+
+Perms: sign
+
+Inputs:
+```json
+[
+ 42,
+ null,
+ 10101,
+ "0",
+ "f01234",
+ "0"
+]
+```
+
+Response:
+```json
+{
+ "Message": {
+ "Version": 42,
+ "To": "f01234",
+ "From": "f01234",
+ "Nonce": 42,
+ "Value": "0",
+ "GasLimit": 9,
+ "GasFeeCap": "0",
+ "GasPremium": "0",
+ "Method": 1,
+ "Params": "Ynl0ZSBhcnJheQ==",
+ "CID": {
+ "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s"
+ }
+ },
+ "ValidNonce": true
+}
+```
+
+### MsigGetAvailableBalance
+MsigGetAvailableBalance returns the portion of a multisig's balance that can be withdrawn or spent
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ "f01234",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `"0"`
+
+### MsigGetPending
+MsigGetPending returns pending transactions for the given multisig
+wallet. Once pending transactions are fully approved, they will no longer
+appear here.
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ "f01234",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `null`
+
+### MsigGetVested
+MsigGetVested returns the amount of FIL that vested in a multisig in a certain period.
+It takes the following params: , ,
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ "f01234",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ],
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `"0"`
+
+### MsigGetVestingSchedule
+MsigGetVestingSchedule returns the vesting details of a given multisig.
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ "f01234",
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response:
+```json
+{
+ "InitialBalance": "0",
+ "StartEpoch": 10101,
+ "UnlockDuration": 10101
+}
+```
+
+### MsigPropose
+MsigPropose proposes a multisig message
+It takes the following params: , , ,
+