diff --git a/.circleci/config.yml b/.circleci/config.yml index 0364e99fa26..4a69a4a4964 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,15 +1,17 @@ version: 2.1 orbs: go: gotest/tools@0.0.13 + aws-cli: circleci/aws-cli@1.3.2 + packer: salaxander/packer@0.0.3 executors: golang: docker: - - image: circleci/golang:1.14.6 + - image: circleci/golang:1.16.4 resource_class: 2xlarge ubuntu: docker: - - image: ubuntu:19.10 + - image: ubuntu:20.04 commands: install-deps: @@ -110,7 +112,7 @@ jobs: - run: command: make debug - test: &test + test: description: | Run tests with gotestsum. parameters: &test-params @@ -121,20 +123,20 @@ jobs: type: string default: "-timeout 30m" description: Flags passed to go test. - packages: + target: type: string default: "./..." description: Import paths of packages to be tested. - winpost-test: + proofs-log-test: type: string default: "0" - test-suite-name: + suite: type: string default: unit description: Test suite name to report to CircleCI. gotestsum-format: type: string - default: pkgname-and-test-fails + default: standard-verbose description: gotestsum format. https://github.com/gotestyourself/gotestsum#format coverage: type: string @@ -142,7 +144,7 @@ jobs: description: Coverage flag. Set to the empty string to disable. codecov-upload: type: boolean - default: false + default: true description: | Upload coverage report to https://codecov.io/. Requires the codecov API token to be set as an environment variable for private projects. @@ -160,24 +162,24 @@ jobs: - run: name: go test environment: - LOTUS_TEST_WINDOW_POST: << parameters.winpost-test >> + TEST_RUSTPROOFS_LOGS: << parameters.proofs-log-test >> SKIP_CONFORMANCE: "1" command: | - mkdir -p /tmp/test-reports/<< parameters.test-suite-name >> + mkdir -p /tmp/test-reports/<< parameters.suite >> mkdir -p /tmp/test-artifacts gotestsum \ --format << parameters.gotestsum-format >> \ - --junitfile /tmp/test-reports/<< parameters.test-suite-name >>/junit.xml \ - --jsonfile /tmp/test-artifacts/<< parameters.test-suite-name >>.json \ + --junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \ + --jsonfile /tmp/test-artifacts/<< parameters.suite >>.json \ -- \ << parameters.coverage >> \ << parameters.go-test-flags >> \ - << parameters.packages >> + << parameters.target >> no_output_timeout: 30m - store_test_results: path: /tmp/test-reports - store_artifacts: - path: /tmp/test-artifacts/<< parameters.test-suite-name >>.json + path: /tmp/test-artifacts/<< parameters.suite >>.json - when: condition: << parameters.codecov-upload >> steps: @@ -188,18 +190,6 @@ jobs: command: | bash <(curl -s https://codecov.io/bash) - test-chain: - <<: *test - test-node: - <<: *test - test-storage: - <<: *test - test-cli: - <<: *test - test-short: - <<: *test - test-window-post: - <<: *test test-conformance: description: | Run tests using a corpus of interoperable test vectors for Filecoin @@ -262,24 +252,97 @@ jobs: path: /tmp/test-reports - store_artifacts: path: /tmp/test-artifacts/conformance-coverage.html + build-ntwk-calibration: + description: | + Compile lotus binaries for the calibration network + parameters: + <<: *test-params + executor: << parameters.executor >> + steps: + - install-deps + - prepare + - run: make calibnet + - run: mkdir linux-calibrationnet && mv lotus lotus-miner lotus-worker linux-calibrationnet + - persist_to_workspace: + root: "." + paths: + - linux-calibrationnet + build-ntwk-butterfly: + description: | + Compile lotus binaries for the butterfly network + parameters: + <<: *test-params + executor: << parameters.executor >> + steps: + - install-deps + - prepare + - run: make butterflynet + - run: mkdir linux-butterflynet && mv lotus lotus-miner lotus-worker linux-butterflynet + - persist_to_workspace: + root: "." + paths: + - linux-butterflynet + build-ntwk-nerpa: + description: | + Compile lotus binaries for the nerpa network + parameters: + <<: *test-params + executor: << parameters.executor >> + steps: + - install-deps + - prepare + - run: make nerpanet + - run: mkdir linux-nerpanet && mv lotus lotus-miner lotus-worker linux-nerpanet + - persist_to_workspace: + root: "." + paths: + - linux-nerpanet build-lotus-soup: description: | - Compile `lotus-soup` Testground test plan using the current version of Lotus. + Compile `lotus-soup` Testground test plan parameters: <<: *test-params executor: << parameters.executor >> steps: - install-deps - prepare - - run: cd extern/oni && git submodule sync - - run: cd extern/oni && git submodule update --init - run: cd extern/filecoin-ffi && make - run: - name: "replace lotus, filecoin-ffi, blst and fil-blst deps" - command: cd extern/oni/lotus-soup && go mod edit -replace github.com/filecoin-project/lotus=../../../ && go mod edit -replace github.com/filecoin-project/filecoin-ffi=../../filecoin-ffi && go mod edit -replace github.com/supranational/blst=../../fil-blst/blst && go mod edit -replace github.com/filecoin-project/fil-blst=../../fil-blst + name: "go get lotus@master" + command: cd testplans/lotus-soup && go mod edit -replace=github.com/filecoin-project/lotus=../.. && go mod tidy - run: name: "build lotus-soup testplan" - command: pushd extern/oni/lotus-soup && go build -tags=testground . + command: pushd testplans/lotus-soup && go build -tags=testground . + trigger-testplans: + description: | + Trigger `lotus-soup` test cases on TaaS + parameters: + <<: *test-params + executor: << parameters.executor >> + steps: + - install-deps + - prepare + - run: + name: "download testground" + command: wget https://gist.github.com/nonsense/5fbf3167cac79945f658771aed32fc44/raw/2e17eb0debf7ec6bdf027c1bdafc2c92dd97273b/testground-d3e9603 -O ~/testground-cli && chmod +x ~/testground-cli + - run: + name: "prepare .env.toml" + command: pushd testplans/lotus-soup && mkdir -p $HOME/testground && cp env-ci.toml $HOME/testground/.env.toml && echo 'endpoint="https://ci.testground.ipfs.team"' >> $HOME/testground/.env.toml && echo 'user="circleci"' >> $HOME/testground/.env.toml + - run: + name: "prepare testground home dir and link test plans" + command: mkdir -p $HOME/testground/plans && ln -s $(pwd)/testplans/lotus-soup $HOME/testground/plans/lotus-soup && ln -s $(pwd)/testplans/graphsync $HOME/testground/plans/graphsync + - run: + name: "go get lotus@master" + command: cd testplans/lotus-soup && go get github.com/filecoin-project/lotus@master + - run: + name: "trigger deals baseline testplan on taas" + command: ~/testground-cli run composition -f $HOME/testground/plans/lotus-soup/_compositions/baseline-k8s-3-1.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH + - run: + name: "trigger payment channel stress testplan on taas" + command: ~/testground-cli run composition -f $HOME/testground/plans/lotus-soup/_compositions/paych-stress-k8s.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH + - run: + name: "trigger graphsync testplan on taas" + command: ~/testground-cli run composition -f $HOME/testground/plans/graphsync/_compositions/stress-k8s.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH build-macos: @@ -294,8 +357,8 @@ jobs: - run: name: Install go command: | - curl -O https://dl.google.com/go/go1.14.2.darwin-amd64.pkg && \ - sudo installer -pkg go1.14.2.darwin-amd64.pkg -target / + curl -O https://dl.google.com/go/go1.16.4.darwin-amd64.pkg && \ + sudo installer -pkg go1.16.4.darwin-amd64.pkg -target / - run: name: Install pkg-config command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config @@ -309,6 +372,15 @@ jobs: command: | curl --location https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 --output /usr/local/bin/jq chmod +x /usr/local/bin/jq + - run: + name: Install hwloc + command: | + mkdir ~/hwloc + curl --location https://download.open-mpi.org/release/hwloc/v2.4/hwloc-2.4.1.tar.gz --output ~/hwloc/hwloc-2.4.1.tar.gz + cd ~/hwloc + tar -xvzpf hwloc-2.4.1.tar.gz + cd hwloc-2.4.1 + ./configure && make && sudo make install - restore_cache: name: restore cargo cache key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }} @@ -334,6 +406,41 @@ jobs: - "~/.rustup" - "~/.cargo" + build-appimage: + machine: + image: ubuntu-2004:202104-01 + steps: + - checkout + - attach_workspace: + at: "." + - run: + name: install appimage-builder + command: | + # docs: https://appimage-builder.readthedocs.io/en/latest/intro/install.html + sudo apt update + sudo apt install -y python3-pip python3-setuptools patchelf desktop-file-utils libgdk-pixbuf2.0-dev fakeroot strace + sudo curl -Lo /usr/local/bin/appimagetool https://github.com/AppImage/AppImageKit/releases/download/continuous/appimagetool-x86_64.AppImage + sudo chmod +x /usr/local/bin/appimagetool + sudo pip3 install appimage-builder + - run: + name: install lotus dependencies + command: sudo apt install ocl-icd-opencl-dev libhwloc-dev + - run: + name: build appimage + command: | + sed -i "s/version: latest/version: ${CIRCLE_TAG:-latest}/" AppImageBuilder.yml + make appimage + - run: + name: prepare workspace + command: | + mkdir appimage + mv Lotus-*.AppImage appimage + - persist_to_workspace: + root: "." + paths: + - appimage + + gofmt: executor: golang steps: @@ -342,7 +449,7 @@ jobs: - run: command: "! go fmt ./... 2>&1 | read" - cbor-gen-check: + gen-check: executor: golang steps: - install-deps @@ -350,7 +457,10 @@ jobs: - run: make deps - run: go install golang.org/x/tools/cmd/goimports - run: go install github.com/hannahhoward/cbor-gen-for - - run: go generate ./... + - run: make gen + - run: git --no-pager diff + - run: git --no-pager diff --quiet + - run: make docsgen-cli - run: git --no-pager diff - run: git --no-pager diff --quiet @@ -359,8 +469,19 @@ jobs: steps: - install-deps - prepare + - run: go install golang.org/x/tools/cmd/goimports + - run: zcat build/openrpc/full.json.gz | jq > ../pre-openrpc-full + - run: zcat build/openrpc/miner.json.gz | jq > ../pre-openrpc-miner + - run: zcat build/openrpc/worker.json.gz | jq > ../pre-openrpc-worker + - run: make deps - run: make docsgen + - run: zcat build/openrpc/full.json.gz | jq > ../post-openrpc-full + - run: zcat build/openrpc/miner.json.gz | jq > ../post-openrpc-miner + - run: zcat build/openrpc/worker.json.gz | jq > ../post-openrpc-worker - run: git --no-pager diff + - run: diff ../pre-openrpc-full ../post-openrpc-full + - run: diff ../pre-openrpc-miner ../post-openrpc-miner + - run: diff ../pre-openrpc-worker ../post-openrpc-worker - run: git --no-pager diff --quiet lint: &lint @@ -422,6 +543,198 @@ jobs: name: Publish release command: ./scripts/publish-release.sh + publish-snapcraft: + description: build and push snapcraft + machine: + image: ubuntu-2004:202104-01 + resource_class: 2xlarge + parameters: + channel: + type: string + default: "edge" + description: snapcraft channel + steps: + - checkout + - run: + name: install snapcraft + command: sudo snap install snapcraft --classic + - run: + name: create snapcraft config file + command: | + mkdir -p ~/.config/snapcraft + echo "$SNAPCRAFT_LOGIN_FILE" | base64 -d > ~/.config/snapcraft/snapcraft.cfg + - run: + name: build snap + command: snapcraft --use-lxd + - run: + name: publish snap + command: snapcraft push *.snap --release << parameters.channel >> + + build-and-push-image: + description: build and push docker images to public AWS ECR registry + executor: aws-cli/default + parameters: + profile-name: + type: string + default: "default" + description: AWS profile name to be configured. + + aws-access-key-id: + type: env_var_name + default: AWS_ACCESS_KEY_ID + description: > + AWS access key id for IAM role. Set this to the name of + the environment variable you will set to hold this + value, i.e. AWS_ACCESS_KEY. + + aws-secret-access-key: + type: env_var_name + default: AWS_SECRET_ACCESS_KEY + description: > + AWS secret key for IAM role. Set this to the name of + the environment variable you will set to hold this + value, i.e. AWS_SECRET_ACCESS_KEY. + + region: + type: env_var_name + default: AWS_REGION + description: > + Name of env var storing your AWS region information, + defaults to AWS_REGION + + account-url: + type: env_var_name + default: AWS_ECR_ACCOUNT_URL + description: > + Env var storing Amazon ECR account URL that maps to an AWS account, + e.g. {awsAccountNum}.dkr.ecr.us-west-2.amazonaws.com + defaults to AWS_ECR_ACCOUNT_URL + + dockerfile: + type: string + default: Dockerfile + description: Name of dockerfile to use. Defaults to Dockerfile. + + path: + type: string + default: . + description: Path to the directory containing your Dockerfile and build context. Defaults to . (working directory). + + extra-build-args: + type: string + default: "" + description: > + Extra flags to pass to docker build. For examples, see + https://docs.docker.com/engine/reference/commandline/build + + repo: + type: string + description: Name of an Amazon ECR repository + + tag: + type: string + default: "latest" + description: A comma-separated string containing docker image tags to build and push (default = latest) + + steps: + - run: + name: Confirm that environment variables are set + command: | + if [ -z "$AWS_ACCESS_KEY_ID" ]; then + echo "No AWS_ACCESS_KEY_ID is set. Skipping build-and-push job ..." + circleci-agent step halt + fi + + - aws-cli/setup: + profile-name: <> + aws-access-key-id: <> + aws-secret-access-key: <> + aws-region: <> + + - run: + name: Log into Amazon ECR + command: | + aws ecr-public get-login-password --region $<> --profile <> | docker login --username AWS --password-stdin $<> + + - checkout + + - setup_remote_docker: + version: 19.03.13 + docker_layer_caching: false + + - run: + name: Build docker image + command: | + registry_id=$(echo $<> | sed "s;\..*;;g") + + docker_tag_args="" + IFS="," read -ra DOCKER_TAGS \<<< "<< parameters.tag >>" + for tag in "${DOCKER_TAGS[@]}"; do + docker_tag_args="$docker_tag_args -t $<>/<>:$tag" + done + + docker build \ + <<#parameters.extra-build-args>><><> \ + -f <>/<> \ + $docker_tag_args \ + <> + + - run: + name: Push image to Amazon ECR + command: | + IFS="," read -ra DOCKER_TAGS \<<< "<< parameters.tag >>" + for tag in "${DOCKER_TAGS[@]}"; do + docker push $<>/<>:${tag} + done + + publish-packer-mainnet: + description: build and push AWS IAM and DigitalOcean droplet. + executor: + name: packer/default + packer-version: 1.6.6 + steps: + - checkout + - attach_workspace: + at: "." + - packer/build: + template: tools/packer/lotus.pkr.hcl + args: "-var ci_workspace_bins=./linux -var lotus_network=mainnet -var git_tag=$CIRCLE_TAG" + publish-packer-calibrationnet: + description: build and push AWS IAM and DigitalOcean droplet. + executor: + name: packer/default + packer-version: 1.6.6 + steps: + - checkout + - attach_workspace: + at: "." + - packer/build: + template: tools/packer/lotus.pkr.hcl + args: "-var ci_workspace_bins=./linux-calibrationnet -var lotus_network=calibrationnet -var git_tag=$CIRCLE_TAG" + publish-packer-butterflynet: + description: build and push AWS IAM and DigitalOcean droplet. + executor: + name: packer/default + packer-version: 1.6.6 + steps: + - checkout + - attach_workspace: + at: "." + - packer/build: + template: tools/packer/lotus.pkr.hcl + args: "-var ci_workspace_bins=./linux-butterflynet -var lotus_network=butterflynet -var git_tag=$CIRCLE_TAG" + publish-packer-nerpanet: + description: build and push AWS IAM and DigitalOcean droplet. + executor: + name: packer/default + packer-version: 1.6.6 + steps: + - checkout + - attach_workspace: + at: "." + - packer/build: + template: tools/packer/lotus.pkr.hcl + args: "-var ci_workspace_bins=./linux-nerpanet -var lotus_network=nerpanet -var git_tag=$CIRCLE_TAG" workflows: version: 2.1 @@ -431,73 +744,289 @@ workflows: concurrency: "16" # expend all docker 2xlarge CPUs. - mod-tidy-check - gofmt - - cbor-gen-check + - gen-check - docs-check - test: - codecov-upload: true - test-suite-name: full - - test-chain: - codecov-upload: true - test-suite-name: chain - packages: "./chain/..." - - test-node: - codecov-upload: true - test-suite-name: node - packages: "./node/..." - - test-storage: - codecov-upload: true - test-suite-name: storage - packages: "./storage/... ./extern/..." - - test-cli: - codecov-upload: true - test-suite-name: cli - packages: "./cli/... ./cmd/... ./api/..." - - test-window-post: - go-test-flags: "-run=TestWindowedPost" - winpost-test: "1" - test-suite-name: window-post - - test-short: - go-test-flags: "--timeout 10m --short" - test-suite-name: short - filters: - tags: - only: - - /^v\d+\.\d+\.\d+$/ + name: test-itest-api + suite: itest-api + target: "./itests/api_test.go" + + - test: + name: test-itest-batch_deal + suite: itest-batch_deal + target: "./itests/batch_deal_test.go" + + - test: + name: test-itest-ccupgrade + suite: itest-ccupgrade + target: "./itests/ccupgrade_test.go" + + - test: + name: test-itest-cli + suite: itest-cli + target: "./itests/cli_test.go" + + - test: + name: test-itest-deadlines + suite: itest-deadlines + target: "./itests/deadlines_test.go" + + - test: + name: test-itest-deals_concurrent + suite: itest-deals_concurrent + target: "./itests/deals_concurrent_test.go" + + - test: + name: test-itest-deals_offline + suite: itest-deals_offline + target: "./itests/deals_offline_test.go" + + - test: + name: test-itest-deals_power + suite: itest-deals_power + target: "./itests/deals_power_test.go" + + - test: + name: test-itest-deals_pricing + suite: itest-deals_pricing + target: "./itests/deals_pricing_test.go" + + - test: + name: test-itest-deals_publish + suite: itest-deals_publish + target: "./itests/deals_publish_test.go" + + - test: + name: test-itest-deals + suite: itest-deals + target: "./itests/deals_test.go" + + - test: + name: test-itest-gateway + suite: itest-gateway + target: "./itests/gateway_test.go" + + - test: + name: test-itest-get_messages_in_ts + suite: itest-get_messages_in_ts + target: "./itests/get_messages_in_ts_test.go" + + - test: + name: test-itest-multisig + suite: itest-multisig + target: "./itests/multisig_test.go" + + - test: + name: test-itest-nonce + suite: itest-nonce + target: "./itests/nonce_test.go" + + - test: + name: test-itest-paych_api + suite: itest-paych_api + target: "./itests/paych_api_test.go" + + - test: + name: test-itest-paych_cli + suite: itest-paych_cli + target: "./itests/paych_cli_test.go" + + - test: + name: test-itest-sdr_upgrade + suite: itest-sdr_upgrade + target: "./itests/sdr_upgrade_test.go" + + - test: + name: test-itest-sector_finalize_early + suite: itest-sector_finalize_early + target: "./itests/sector_finalize_early_test.go" + + - test: + name: test-itest-sector_miner_collateral + suite: itest-sector_miner_collateral + target: "./itests/sector_miner_collateral_test.go" + + - test: + name: test-itest-sector_pledge + suite: itest-sector_pledge + target: "./itests/sector_pledge_test.go" + + - test: + name: test-itest-sector_terminate + suite: itest-sector_terminate + target: "./itests/sector_terminate_test.go" + + - test: + name: test-itest-tape + suite: itest-tape + target: "./itests/tape_test.go" + + - test: + name: test-itest-verifreg + suite: itest-verifreg + target: "./itests/verifreg_test.go" + + - test: + name: test-itest-wdpost_dispute + suite: itest-wdpost_dispute + target: "./itests/wdpost_dispute_test.go" + + - test: + name: test-itest-wdpost + suite: itest-wdpost + target: "./itests/wdpost_test.go" + + - test: + name: test-unit-cli + suite: utest-unit-cli + target: "./cli/... ./cmd/... ./api/..." + - test: + name: test-unit-node + suite: utest-unit-node + target: "./node/..." + - test: + name: test-unit-rest + suite: utest-unit-rest + target: "./api/... ./blockstore/... ./build/... ./chain/... ./cli/... ./cmd/... ./conformance/... ./extern/... ./gateway/... ./journal/... ./lib/... ./markets/... ./node/... ./paychmgr/... ./storage/... ./tools/..." + - test: + name: test-unit-storage + suite: utest-unit-storage + target: "./storage/... ./extern/..." + - test: + go-test-flags: "-run=TestMulticoreSDR" + suite: multicore-sdr-check + target: "./extern/sector-storage/ffiwrapper" + proofs-log-test: "1" - test-conformance: - test-suite-name: conformance - packages: "./conformance" + suite: conformance + codecov-upload: false + target: "./conformance" - test-conformance: name: test-conformance-bleeding-edge - test-suite-name: conformance-bleeding-edge - packages: "./conformance" + codecov-upload: false + suite: conformance-bleeding-edge + target: "./conformance" vectors-branch: master - - build-lotus-soup + - trigger-testplans: + filters: + branches: + only: + - master - build-debug - build-all: - requires: - - test-short filters: tags: only: - - /^v\d+\.\d+\.\d+$/ + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - build-ntwk-calibration: + filters: + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - build-ntwk-butterfly: + filters: + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - build-ntwk-nerpa: + filters: + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - build-lotus-soup - build-macos: - requires: - - test-short filters: branches: ignore: - /.*/ tags: only: - - /^v\d+\.\d+\.\d+$/ + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - build-appimage: + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ - publish: requires: - build-all - build-macos + - build-appimage + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - build-and-push-image: + dockerfile: Dockerfile.lotus + path: . + repo: lotus-dev + tag: '${CIRCLE_SHA1:0:8}' + - publish-packer-mainnet: + requires: + - build-all + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - publish-packer-calibrationnet: + requires: + - build-ntwk-calibration + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - publish-packer-butterflynet: + requires: + - build-ntwk-butterfly + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - publish-packer-nerpanet: + requires: + - build-ntwk-nerpa + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - publish-snapcraft: + name: publish-snapcraft-stable + channel: stable filters: branches: ignore: - /.*/ tags: only: - - /^v\d+\.\d+\.\d+$/ + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + + nightly: + triggers: + - schedule: + cron: "0 0 * * *" + filters: + branches: + only: + - master + jobs: + - publish-snapcraft: + name: publish-snapcraft-nightly + channel: edge diff --git a/.circleci/gen.go b/.circleci/gen.go new file mode 100644 index 00000000000..844348e29ae --- /dev/null +++ b/.circleci/gen.go @@ -0,0 +1,136 @@ +package main + +import ( + "embed" + "fmt" + "os" + "path/filepath" + "sort" + "strings" + "text/template" +) + +//go:generate go run ./gen.go .. + +//go:embed template.yml +var templateFile embed.FS + +type ( + dirs = []string + suite = string +) + +// groupedUnitTests maps suite names to top-level directories that should be +// included in that suite. The program adds an implicit group "rest" that +// includes all other top-level directories. +var groupedUnitTests = map[suite]dirs{ + "unit-node": {"node"}, + "unit-storage": {"storage", "extern"}, + "unit-cli": {"cli", "cmd", "api"}, +} + +func main() { + if len(os.Args) != 2 { + panic("expected path to repo as argument") + } + + repo := os.Args[1] + + tmpl := template.New("template.yml") + tmpl.Delims("[[", "]]") + tmpl.Funcs(template.FuncMap{ + "stripSuffix": func(in string) string { + return strings.TrimSuffix(in, "_test.go") + }, + }) + tmpl = template.Must(tmpl.ParseFS(templateFile, "*")) + + // list all itests. + itests, err := filepath.Glob(filepath.Join(repo, "./itests/*_test.go")) + if err != nil { + panic(err) + } + + // strip the dir from all entries. + for i, f := range itests { + itests[i] = filepath.Base(f) + } + + // calculate the exclusion set of unit test directories to exclude because + // they are already included in a grouped suite. + var excluded = map[string]struct{}{} + for _, ss := range groupedUnitTests { + for _, s := range ss { + e, err := filepath.Abs(filepath.Join(repo, s)) + if err != nil { + panic(err) + } + excluded[e] = struct{}{} + } + } + + // all unit tests top-level dirs that are not itests, nor included in other suites. + var rest = map[string]struct{}{} + err = filepath.Walk(repo, func(path string, f os.FileInfo, err error) error { + // include all tests that aren't in the itests directory. + if strings.Contains(path, "itests") { + return filepath.SkipDir + } + // exclude all tests included in other suites + if f.IsDir() { + if _, ok := excluded[path]; ok { + return filepath.SkipDir + } + } + if strings.HasSuffix(path, "_test.go") { + rel, err := filepath.Rel(repo, path) + if err != nil { + panic(err) + } + // take the first directory + rest[strings.Split(rel, string(os.PathSeparator))[0]] = struct{}{} + } + return err + }) + if err != nil { + panic(err) + } + + // add other directories to a 'rest' suite. + for k := range rest { + groupedUnitTests["unit-rest"] = append(groupedUnitTests["unit-rest"], k) + } + + // map iteration guarantees no order, so sort the array in-place. + sort.Strings(groupedUnitTests["unit-rest"]) + + // form the input data. + type data struct { + ItestFiles []string + UnitSuites map[string]string + } + in := data{ + ItestFiles: itests, + UnitSuites: func() map[string]string { + ret := make(map[string]string) + for name, dirs := range groupedUnitTests { + for i, d := range dirs { + dirs[i] = fmt.Sprintf("./%s/...", d) // turn into package + } + ret[name] = strings.Join(dirs, " ") + } + return ret + }(), + } + + out, err := os.Create("./config.yml") + if err != nil { + panic(err) + } + defer out.Close() + + // execute the template. + if err := tmpl.Execute(out, in); err != nil { + panic(err) + } +} diff --git a/.circleci/template.yml b/.circleci/template.yml new file mode 100644 index 00000000000..fb59f23eafe --- /dev/null +++ b/.circleci/template.yml @@ -0,0 +1,902 @@ +version: 2.1 +orbs: + go: gotest/tools@0.0.13 + aws-cli: circleci/aws-cli@1.3.2 + packer: salaxander/packer@0.0.3 + +executors: + golang: + docker: + - image: circleci/golang:1.16.4 + resource_class: 2xlarge + ubuntu: + docker: + - image: ubuntu:20.04 + +commands: + install-deps: + steps: + - go/install-ssh + - go/install: {package: git} + prepare: + parameters: + linux: + default: true + description: is a linux build environment? + type: boolean + darwin: + default: false + description: is a darwin build environment? + type: boolean + steps: + - checkout + - git_fetch_all_tags + - checkout + - when: + condition: << parameters.linux >> + steps: + - run: sudo apt-get update + - run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev + - run: git submodule sync + - run: git submodule update --init + download-params: + steps: + - restore_cache: + name: Restore parameters cache + keys: + - 'v25-2k-lotus-params' + paths: + - /var/tmp/filecoin-proof-parameters/ + - run: ./lotus fetch-params 2048 + - save_cache: + name: Save parameters cache + key: 'v25-2k-lotus-params' + paths: + - /var/tmp/filecoin-proof-parameters/ + install_ipfs: + steps: + - run: | + apt update + apt install -y wget + wget https://github.com/ipfs/go-ipfs/releases/download/v0.4.22/go-ipfs_v0.4.22_linux-amd64.tar.gz + wget https://github.com/ipfs/go-ipfs/releases/download/v0.4.22/go-ipfs_v0.4.22_linux-amd64.tar.gz.sha512 + if [ "$(sha512sum go-ipfs_v0.4.22_linux-amd64.tar.gz)" != "$(cat go-ipfs_v0.4.22_linux-amd64.tar.gz.sha512)" ] + then + echo "ipfs failed checksum check" + exit 1 + fi + tar -xf go-ipfs_v0.4.22_linux-amd64.tar.gz + mv go-ipfs/ipfs /usr/local/bin/ipfs + chmod +x /usr/local/bin/ipfs + git_fetch_all_tags: + steps: + - run: + name: fetch all tags + command: | + git fetch --all + +jobs: + mod-tidy-check: + executor: golang + steps: + - install-deps + - prepare + - go/mod-tidy-check + + build-all: + executor: golang + steps: + - install-deps + - prepare + - run: sudo apt-get update + - run: sudo apt-get install npm + - run: + command: make buildall + - store_artifacts: + path: lotus + - store_artifacts: + path: lotus-miner + - store_artifacts: + path: lotus-worker + - run: mkdir linux && mv lotus lotus-miner lotus-worker linux/ + - persist_to_workspace: + root: "." + paths: + - linux + + build-debug: + executor: golang + steps: + - install-deps + - prepare + - run: + command: make debug + + test: + description: | + Run tests with gotestsum. + parameters: &test-params + executor: + type: executor + default: golang + go-test-flags: + type: string + default: "-timeout 30m" + description: Flags passed to go test. + target: + type: string + default: "./..." + description: Import paths of packages to be tested. + proofs-log-test: + type: string + default: "0" + suite: + type: string + default: unit + description: Test suite name to report to CircleCI. + gotestsum-format: + type: string + default: standard-verbose + description: gotestsum format. https://github.com/gotestyourself/gotestsum#format + coverage: + type: string + default: -coverprofile=coverage.txt -coverpkg=github.com/filecoin-project/lotus/... + description: Coverage flag. Set to the empty string to disable. + codecov-upload: + type: boolean + default: true + description: | + Upload coverage report to https://codecov.io/. Requires the codecov API token to be + set as an environment variable for private projects. + executor: << parameters.executor >> + steps: + - install-deps + - prepare + - run: + command: make deps lotus + no_output_timeout: 30m + - download-params + - go/install-gotestsum: + gobin: $HOME/.local/bin + version: 0.5.2 + - run: + name: go test + environment: + TEST_RUSTPROOFS_LOGS: << parameters.proofs-log-test >> + SKIP_CONFORMANCE: "1" + command: | + mkdir -p /tmp/test-reports/<< parameters.suite >> + mkdir -p /tmp/test-artifacts + gotestsum \ + --format << parameters.gotestsum-format >> \ + --junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \ + --jsonfile /tmp/test-artifacts/<< parameters.suite >>.json \ + -- \ + << parameters.coverage >> \ + << parameters.go-test-flags >> \ + << parameters.target >> + no_output_timeout: 30m + - store_test_results: + path: /tmp/test-reports + - store_artifacts: + path: /tmp/test-artifacts/<< parameters.suite >>.json + - when: + condition: << parameters.codecov-upload >> + steps: + - go/install: {package: bash} + - go/install: {package: curl} + - run: + shell: /bin/bash -eo pipefail + command: | + bash <(curl -s https://codecov.io/bash) + + test-conformance: + description: | + Run tests using a corpus of interoperable test vectors for Filecoin + implementations to test their correctness and compliance with the Filecoin + specifications. + parameters: + <<: *test-params + vectors-branch: + type: string + default: "" + description: | + Branch on github.com/filecoin-project/test-vectors to checkout and + test with. If empty (the default) the commit defined by the git + submodule is used. + executor: << parameters.executor >> + steps: + - install-deps + - prepare + - run: + command: make deps lotus + no_output_timeout: 30m + - download-params + - when: + condition: + not: + equal: [ "", << parameters.vectors-branch >> ] + steps: + - run: + name: checkout vectors branch + command: | + cd extern/test-vectors + git fetch + git checkout origin/<< parameters.vectors-branch >> + - go/install-gotestsum: + gobin: $HOME/.local/bin + version: 0.5.2 + - run: + name: install statediff globally + command: | + ## statediff is optional; we succeed even if compilation fails. + mkdir -p /tmp/statediff + git clone https://github.com/filecoin-project/statediff.git /tmp/statediff + cd /tmp/statediff + go install ./cmd/statediff || exit 0 + - run: + name: go test + environment: + SKIP_CONFORMANCE: "0" + command: | + mkdir -p /tmp/test-reports + mkdir -p /tmp/test-artifacts + gotestsum \ + --format pkgname-and-test-fails \ + --junitfile /tmp/test-reports/junit.xml \ + -- \ + -v -coverpkg ./chain/vm/,github.com/filecoin-project/specs-actors/... -coverprofile=/tmp/conformance.out ./conformance/ + go tool cover -html=/tmp/conformance.out -o /tmp/test-artifacts/conformance-coverage.html + no_output_timeout: 30m + - store_test_results: + path: /tmp/test-reports + - store_artifacts: + path: /tmp/test-artifacts/conformance-coverage.html + build-ntwk-calibration: + description: | + Compile lotus binaries for the calibration network + parameters: + <<: *test-params + executor: << parameters.executor >> + steps: + - install-deps + - prepare + - run: make calibnet + - run: mkdir linux-calibrationnet && mv lotus lotus-miner lotus-worker linux-calibrationnet + - persist_to_workspace: + root: "." + paths: + - linux-calibrationnet + build-ntwk-butterfly: + description: | + Compile lotus binaries for the butterfly network + parameters: + <<: *test-params + executor: << parameters.executor >> + steps: + - install-deps + - prepare + - run: make butterflynet + - run: mkdir linux-butterflynet && mv lotus lotus-miner lotus-worker linux-butterflynet + - persist_to_workspace: + root: "." + paths: + - linux-butterflynet + build-ntwk-nerpa: + description: | + Compile lotus binaries for the nerpa network + parameters: + <<: *test-params + executor: << parameters.executor >> + steps: + - install-deps + - prepare + - run: make nerpanet + - run: mkdir linux-nerpanet && mv lotus lotus-miner lotus-worker linux-nerpanet + - persist_to_workspace: + root: "." + paths: + - linux-nerpanet + build-lotus-soup: + description: | + Compile `lotus-soup` Testground test plan + parameters: + <<: *test-params + executor: << parameters.executor >> + steps: + - install-deps + - prepare + - run: cd extern/filecoin-ffi && make + - run: + name: "go get lotus@master" + command: cd testplans/lotus-soup && go mod edit -replace=github.com/filecoin-project/lotus=../.. && go mod tidy + - run: + name: "build lotus-soup testplan" + command: pushd testplans/lotus-soup && go build -tags=testground . + trigger-testplans: + description: | + Trigger `lotus-soup` test cases on TaaS + parameters: + <<: *test-params + executor: << parameters.executor >> + steps: + - install-deps + - prepare + - run: + name: "download testground" + command: wget https://gist.github.com/nonsense/5fbf3167cac79945f658771aed32fc44/raw/2e17eb0debf7ec6bdf027c1bdafc2c92dd97273b/testground-d3e9603 -O ~/testground-cli && chmod +x ~/testground-cli + - run: + name: "prepare .env.toml" + command: pushd testplans/lotus-soup && mkdir -p $HOME/testground && cp env-ci.toml $HOME/testground/.env.toml && echo 'endpoint="https://ci.testground.ipfs.team"' >> $HOME/testground/.env.toml && echo 'user="circleci"' >> $HOME/testground/.env.toml + - run: + name: "prepare testground home dir and link test plans" + command: mkdir -p $HOME/testground/plans && ln -s $(pwd)/testplans/lotus-soup $HOME/testground/plans/lotus-soup && ln -s $(pwd)/testplans/graphsync $HOME/testground/plans/graphsync + - run: + name: "go get lotus@master" + command: cd testplans/lotus-soup && go get github.com/filecoin-project/lotus@master + - run: + name: "trigger deals baseline testplan on taas" + command: ~/testground-cli run composition -f $HOME/testground/plans/lotus-soup/_compositions/baseline-k8s-3-1.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH + - run: + name: "trigger payment channel stress testplan on taas" + command: ~/testground-cli run composition -f $HOME/testground/plans/lotus-soup/_compositions/paych-stress-k8s.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH + - run: + name: "trigger graphsync testplan on taas" + command: ~/testground-cli run composition -f $HOME/testground/plans/graphsync/_compositions/stress-k8s.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH + + + build-macos: + description: build darwin lotus binary + macos: + xcode: "10.0.0" + working_directory: ~/go/src/github.com/filecoin-project/lotus + steps: + - prepare: + linux: false + darwin: true + - run: + name: Install go + command: | + curl -O https://dl.google.com/go/go1.16.4.darwin-amd64.pkg && \ + sudo installer -pkg go1.16.4.darwin-amd64.pkg -target / + - run: + name: Install pkg-config + command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config + - run: go version + - run: + name: Install Rust + command: | + curl https://sh.rustup.rs -sSf | sh -s -- -y + - run: + name: Install jq + command: | + curl --location https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 --output /usr/local/bin/jq + chmod +x /usr/local/bin/jq + - run: + name: Install hwloc + command: | + mkdir ~/hwloc + curl --location https://download.open-mpi.org/release/hwloc/v2.4/hwloc-2.4.1.tar.gz --output ~/hwloc/hwloc-2.4.1.tar.gz + cd ~/hwloc + tar -xvzpf hwloc-2.4.1.tar.gz + cd hwloc-2.4.1 + ./configure && make && sudo make install + - restore_cache: + name: restore cargo cache + key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }} + - install-deps + - run: + command: make build + no_output_timeout: 30m + - store_artifacts: + path: lotus + - store_artifacts: + path: lotus-miner + - store_artifacts: + path: lotus-worker + - run: mkdir darwin && mv lotus lotus-miner lotus-worker darwin/ + - persist_to_workspace: + root: "." + paths: + - darwin + - save_cache: + name: save cargo cache + key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }} + paths: + - "~/.rustup" + - "~/.cargo" + + build-appimage: + machine: + image: ubuntu-2004:202104-01 + steps: + - checkout + - attach_workspace: + at: "." + - run: + name: install appimage-builder + command: | + # docs: https://appimage-builder.readthedocs.io/en/latest/intro/install.html + sudo apt update + sudo apt install -y python3-pip python3-setuptools patchelf desktop-file-utils libgdk-pixbuf2.0-dev fakeroot strace + sudo curl -Lo /usr/local/bin/appimagetool https://github.com/AppImage/AppImageKit/releases/download/continuous/appimagetool-x86_64.AppImage + sudo chmod +x /usr/local/bin/appimagetool + sudo pip3 install appimage-builder + - run: + name: install lotus dependencies + command: sudo apt install ocl-icd-opencl-dev libhwloc-dev + - run: + name: build appimage + command: | + sed -i "s/version: latest/version: ${CIRCLE_TAG:-latest}/" AppImageBuilder.yml + make appimage + - run: + name: prepare workspace + command: | + mkdir appimage + mv Lotus-*.AppImage appimage + - persist_to_workspace: + root: "." + paths: + - appimage + + + gofmt: + executor: golang + steps: + - install-deps + - prepare + - run: + command: "! go fmt ./... 2>&1 | read" + + gen-check: + executor: golang + steps: + - install-deps + - prepare + - run: make deps + - run: go install golang.org/x/tools/cmd/goimports + - run: go install github.com/hannahhoward/cbor-gen-for + - run: make gen + - run: git --no-pager diff + - run: git --no-pager diff --quiet + - run: make docsgen-cli + - run: git --no-pager diff + - run: git --no-pager diff --quiet + + docs-check: + executor: golang + steps: + - install-deps + - prepare + - run: go install golang.org/x/tools/cmd/goimports + - run: zcat build/openrpc/full.json.gz | jq > ../pre-openrpc-full + - run: zcat build/openrpc/miner.json.gz | jq > ../pre-openrpc-miner + - run: zcat build/openrpc/worker.json.gz | jq > ../pre-openrpc-worker + - run: make deps + - run: make docsgen + - run: zcat build/openrpc/full.json.gz | jq > ../post-openrpc-full + - run: zcat build/openrpc/miner.json.gz | jq > ../post-openrpc-miner + - run: zcat build/openrpc/worker.json.gz | jq > ../post-openrpc-worker + - run: git --no-pager diff + - run: diff ../pre-openrpc-full ../post-openrpc-full + - run: diff ../pre-openrpc-miner ../post-openrpc-miner + - run: diff ../pre-openrpc-worker ../post-openrpc-worker + - run: git --no-pager diff --quiet + + lint: &lint + description: | + Run golangci-lint. + parameters: + executor: + type: executor + default: golang + golangci-lint-version: + type: string + default: 1.27.0 + concurrency: + type: string + default: '2' + description: | + Concurrency used to run linters. Defaults to 2 because NumCPU is not + aware of container CPU limits. + args: + type: string + default: '' + description: | + Arguments to pass to golangci-lint + executor: << parameters.executor >> + steps: + - install-deps + - prepare + - run: + command: make deps + no_output_timeout: 30m + - go/install-golangci-lint: + gobin: $HOME/.local/bin + version: << parameters.golangci-lint-version >> + - run: + name: Lint + command: | + $HOME/.local/bin/golangci-lint run -v --timeout 2m \ + --concurrency << parameters.concurrency >> << parameters.args >> + lint-all: + <<: *lint + + publish: + description: publish binary artifacts + executor: ubuntu + steps: + - run: + name: Install git jq curl + command: apt update && apt install -y git jq curl + - checkout + - git_fetch_all_tags + - checkout + - install_ipfs + - attach_workspace: + at: "." + - run: + name: Create bundles + command: ./scripts/build-bundle.sh + - run: + name: Publish release + command: ./scripts/publish-release.sh + + publish-snapcraft: + description: build and push snapcraft + machine: + image: ubuntu-2004:202104-01 + resource_class: 2xlarge + parameters: + channel: + type: string + default: "edge" + description: snapcraft channel + steps: + - checkout + - run: + name: install snapcraft + command: sudo snap install snapcraft --classic + - run: + name: create snapcraft config file + command: | + mkdir -p ~/.config/snapcraft + echo "$SNAPCRAFT_LOGIN_FILE" | base64 -d > ~/.config/snapcraft/snapcraft.cfg + - run: + name: build snap + command: snapcraft --use-lxd + - run: + name: publish snap + command: snapcraft push *.snap --release << parameters.channel >> + + build-and-push-image: + description: build and push docker images to public AWS ECR registry + executor: aws-cli/default + parameters: + profile-name: + type: string + default: "default" + description: AWS profile name to be configured. + + aws-access-key-id: + type: env_var_name + default: AWS_ACCESS_KEY_ID + description: > + AWS access key id for IAM role. Set this to the name of + the environment variable you will set to hold this + value, i.e. AWS_ACCESS_KEY. + + aws-secret-access-key: + type: env_var_name + default: AWS_SECRET_ACCESS_KEY + description: > + AWS secret key for IAM role. Set this to the name of + the environment variable you will set to hold this + value, i.e. AWS_SECRET_ACCESS_KEY. + + region: + type: env_var_name + default: AWS_REGION + description: > + Name of env var storing your AWS region information, + defaults to AWS_REGION + + account-url: + type: env_var_name + default: AWS_ECR_ACCOUNT_URL + description: > + Env var storing Amazon ECR account URL that maps to an AWS account, + e.g. {awsAccountNum}.dkr.ecr.us-west-2.amazonaws.com + defaults to AWS_ECR_ACCOUNT_URL + + dockerfile: + type: string + default: Dockerfile + description: Name of dockerfile to use. Defaults to Dockerfile. + + path: + type: string + default: . + description: Path to the directory containing your Dockerfile and build context. Defaults to . (working directory). + + extra-build-args: + type: string + default: "" + description: > + Extra flags to pass to docker build. For examples, see + https://docs.docker.com/engine/reference/commandline/build + + repo: + type: string + description: Name of an Amazon ECR repository + + tag: + type: string + default: "latest" + description: A comma-separated string containing docker image tags to build and push (default = latest) + + steps: + - run: + name: Confirm that environment variables are set + command: | + if [ -z "$AWS_ACCESS_KEY_ID" ]; then + echo "No AWS_ACCESS_KEY_ID is set. Skipping build-and-push job ..." + circleci-agent step halt + fi + + - aws-cli/setup: + profile-name: <> + aws-access-key-id: <> + aws-secret-access-key: <> + aws-region: <> + + - run: + name: Log into Amazon ECR + command: | + aws ecr-public get-login-password --region $<> --profile <> | docker login --username AWS --password-stdin $<> + + - checkout + + - setup_remote_docker: + version: 19.03.13 + docker_layer_caching: false + + - run: + name: Build docker image + command: | + registry_id=$(echo $<> | sed "s;\..*;;g") + + docker_tag_args="" + IFS="," read -ra DOCKER_TAGS \<<< "<< parameters.tag >>" + for tag in "${DOCKER_TAGS[@]}"; do + docker_tag_args="$docker_tag_args -t $<>/<>:$tag" + done + + docker build \ + <<#parameters.extra-build-args>><><> \ + -f <>/<> \ + $docker_tag_args \ + <> + + - run: + name: Push image to Amazon ECR + command: | + IFS="," read -ra DOCKER_TAGS \<<< "<< parameters.tag >>" + for tag in "${DOCKER_TAGS[@]}"; do + docker push $<>/<>:${tag} + done + + publish-packer-mainnet: + description: build and push AWS IAM and DigitalOcean droplet. + executor: + name: packer/default + packer-version: 1.6.6 + steps: + - checkout + - attach_workspace: + at: "." + - packer/build: + template: tools/packer/lotus.pkr.hcl + args: "-var ci_workspace_bins=./linux -var lotus_network=mainnet -var git_tag=$CIRCLE_TAG" + publish-packer-calibrationnet: + description: build and push AWS IAM and DigitalOcean droplet. + executor: + name: packer/default + packer-version: 1.6.6 + steps: + - checkout + - attach_workspace: + at: "." + - packer/build: + template: tools/packer/lotus.pkr.hcl + args: "-var ci_workspace_bins=./linux-calibrationnet -var lotus_network=calibrationnet -var git_tag=$CIRCLE_TAG" + publish-packer-butterflynet: + description: build and push AWS IAM and DigitalOcean droplet. + executor: + name: packer/default + packer-version: 1.6.6 + steps: + - checkout + - attach_workspace: + at: "." + - packer/build: + template: tools/packer/lotus.pkr.hcl + args: "-var ci_workspace_bins=./linux-butterflynet -var lotus_network=butterflynet -var git_tag=$CIRCLE_TAG" + publish-packer-nerpanet: + description: build and push AWS IAM and DigitalOcean droplet. + executor: + name: packer/default + packer-version: 1.6.6 + steps: + - checkout + - attach_workspace: + at: "." + - packer/build: + template: tools/packer/lotus.pkr.hcl + args: "-var ci_workspace_bins=./linux-nerpanet -var lotus_network=nerpanet -var git_tag=$CIRCLE_TAG" + +workflows: + version: 2.1 + ci: + jobs: + - lint-all: + concurrency: "16" # expend all docker 2xlarge CPUs. + - mod-tidy-check + - gofmt + - gen-check + - docs-check + + [[- range $file := .ItestFiles -]] + [[ with $name := $file | stripSuffix ]] + - test: + name: test-itest-[[ $name ]] + suite: itest-[[ $name ]] + target: "./itests/[[ $file ]]" + [[ end ]] + [[- end -]] + + [[range $suite, $pkgs := .UnitSuites]] + - test: + name: test-[[ $suite ]] + suite: utest-[[ $suite ]] + target: "[[ $pkgs ]]" + [[- end]] + - test: + go-test-flags: "-run=TestMulticoreSDR" + suite: multicore-sdr-check + target: "./extern/sector-storage/ffiwrapper" + proofs-log-test: "1" + - test-conformance: + suite: conformance + codecov-upload: false + target: "./conformance" + - test-conformance: + name: test-conformance-bleeding-edge + codecov-upload: false + suite: conformance-bleeding-edge + target: "./conformance" + vectors-branch: master + - trigger-testplans: + filters: + branches: + only: + - master + - build-debug + - build-all: + filters: + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - build-ntwk-calibration: + filters: + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - build-ntwk-butterfly: + filters: + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - build-ntwk-nerpa: + filters: + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - build-lotus-soup + - build-macos: + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - build-appimage: + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - publish: + requires: + - build-all + - build-macos + - build-appimage + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - build-and-push-image: + dockerfile: Dockerfile.lotus + path: . + repo: lotus-dev + tag: '${CIRCLE_SHA1:0:8}' + - publish-packer-mainnet: + requires: + - build-all + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - publish-packer-calibrationnet: + requires: + - build-ntwk-calibration + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - publish-packer-butterflynet: + requires: + - build-ntwk-butterfly + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - publish-packer-nerpanet: + requires: + - build-ntwk-nerpa + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - publish-snapcraft: + name: publish-snapcraft-stable + channel: stable + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + + nightly: + triggers: + - schedule: + cron: "0 0 * * *" + filters: + branches: + only: + - master + jobs: + - publish-snapcraft: + name: publish-snapcraft-nightly + channel: edge diff --git a/.codecov.yml b/.codecov.yml index a53081be7fd..1967f6ecac4 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -5,5 +5,15 @@ ignore: - "api/test/*" - "gen/**/*" - "gen/*" + - "cmd/lotus-shed/*" + - "cmd/tvx/*" + - "cmd/lotus-pcr/*" + - "cmd/tvx/*" + - "cmd/lotus-chainwatch/*" + - "cmd/lotus-health/*" + - "cmd/lotus-fountain/*" + - "cmd/lotus-townhall/*" + - "cmd/lotus-stats/*" + - "cmd/lotus-pcr/*" github_checks: annotations: false diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 6d717b44d69..b8ec66f00ea 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,16 +1,6 @@ -## filecoin-project/lotus CODEOWNERS -## Refer to https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners. -## -## These users or groups will be automatically assigned as reviewers every time -## a PR is submitted that modifies code in the specified locations. -## -## The Lotus repo configuration requires that at least ONE codeowner approves -## the PR before merging. +# Reference +# https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/creating-a-repository-on-github/about-code-owners -### Global owners. -* @magik6k @whyrusleeping @Kubuxu - -### Conformance testing. -conformance/ @raulk -extern/test-vectors @raulk -cmd/tvx @raulk \ No newline at end of file +# Global owners +# Ensure maintainers team is a requested reviewer for non-draft PRs +* @filecoin-project/lotus-maintainers diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md new file mode 100644 index 00000000000..23c7640b782 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -0,0 +1,33 @@ +--- +name: Bug Report +about: Create a report to help us improve +title: "[BUG] " +labels: hint/needs-triaging, kind/bug +assignees: '' + +--- + +> Note: For security-related bugs/issues, please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy). + +**Describe the bug** +A clear and concise description of what the bug is. +(If you are not sure what the bug is, try to figure it out via a [discussion](https://github.com/filecoin-project/lotus/discussions/new) first! + +**Version (run `lotus version`):** + +**To Reproduce** +Steps to reproduce the behavior: +1. Run '...' +2. See error + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Logs** +Provide daemon/miner/worker logs, and goroutines(if available) for troubleshooting. + +**Screenshots** +If applicable, add screenshots to help explain your problem. + +**Additional context** +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index 1ded8c36b70..00000000000 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve -title: '' -labels: '' -assignees: '' - ---- - -**Describe the bug** -A clear and concise description of what the bug is. - -**To Reproduce** -Steps to reproduce the behavior: -1. Run '...' -2. See error - -**Expected behavior** -A clear and concise description of what you expected to happen. - -**Screenshots** -If applicable, add screenshots to help explain your problem. - -**Version (run `lotus version`):** - -**Additional context** -Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/deal-making-issues.md b/.github/ISSUE_TEMPLATE/deal-making-issues.md new file mode 100644 index 00000000000..bec800cb7ce --- /dev/null +++ b/.github/ISSUE_TEMPLATE/deal-making-issues.md @@ -0,0 +1,49 @@ +--- +name: Deal Making Issues +about: Create a report for help with deal making failures. +title: "[Deal Making Issue]" +labels: hint/needs-triaging, area/markets +assignees: '' + +--- + +> Note: For security-related bugs/issues, please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy). + +Please provide all the information requested here to help us troubleshoot "deal making failed" issues. +If the information requested is missing, we will probably have to just ask you to provide it anyway, +before we can help debug. + +**Basic Information** +Including information like, Are you the client or the miner? Is this a storage deal or a retrieval deal? Is it an offline deal? + +**Describe the problem** + +A brief description of the problem you encountered while trying to make a deal. + +**Version** + +The output of `lotus --version`. + +**Setup** + +You miner(if applicable) and daemon setup, i.e: What hardware do you use, how much ram and etc. + +**To Reproduce** + Steps to reproduce the behavior: + 1. Run '...' + 2. See error + +**Deal status** + +The output of `lotus client list-deals -v` and/or `lotus-miner storage-deals|retrieval-deals|data-transfers list [-v]` commands for the deal(s) in question. + +**Lotus daemon and miner logs** + +Please go through the logs of your daemon and miner(if applicable), and include screenshots of any error/warning-like messages you find. + +Alternatively please upload full log files and share a link here + +** Code modifications ** + +If you have modified parts of lotus, please describe which areas were modified, +and the scope of those modifications diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 00000000000..0803a6db827 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: "[Feature Request]" +labels: hint/needs-triaging +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.github/ISSUE_TEMPLATE/mining-issues.md b/.github/ISSUE_TEMPLATE/mining-issues.md new file mode 100644 index 00000000000..434e160d411 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/mining-issues.md @@ -0,0 +1,35 @@ +--- +name: Mining Issues +about: Create a report for help with mining failures. +title: "[Mining Issue]" +labels: hint/needs-triaging, area/mining +assignees: '' + +--- + +> Note: For security-related bugs/issues, please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy). + +Please provide all the information requested here to help us troubleshoot "mining/WinningPoSt failed" issues. +If the information requested is missing, you may be asked you to provide it. + +**Describe the problem** +A brief description of the problem you encountered while mining new blocks. + +**Version** + +The output of `lotus --version`. + +**Setup** + +You miner and daemon setup, including what hardware do you use, your environment variable settings, how do you run your miner and worker, do you use GPU and etc. + +**Lotus daemon and miner logs** + +Please go through the logs of your daemon and miner, and include screenshots of any error/warning-like messages you find, highlighting the one has "winning post" in it. + +Alternatively please upload full log files and share a link here + +** Code modifications ** + +If you have modified parts of lotus, please describe which areas were modified, +and the scope of those modifications diff --git a/.github/ISSUE_TEMPLATE/proving-issues.md b/.github/ISSUE_TEMPLATE/proving-issues.md new file mode 100644 index 00000000000..6187d546ee0 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/proving-issues.md @@ -0,0 +1,46 @@ +--- +name: Proving Issues +about: Create a report for help with proving failures. +title: "[Proving Issue]" +labels: area/proving, hint/needs-triaging +assignees: '' + +--- + +> Note: For security-related bugs/issues, please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy). + +Please provide all the information requested here to help us troubleshoot "proving/window PoSt failed" issues. +If the information requested is missing, we will probably have to just ask you to provide it anyway, +before we can help debug. + +**Describe the problem** +A brief description of the problem you encountered while proving the storage. + +**Version** + +The output of `lotus --version`. + +**Setup** + +You miner and daemon setup, including what hardware do you use, your environment variable settings, how do you run your miner and worker, do you use GPU and etc. + +**Proving status** + +The output of `lotus-miner proving` info. + +**Lotus miner logs** + +Please go through the logs of your miner, and include screenshots of any error-like messages you find, highlighting the one has "window post" in it. + +Alternatively please upload full log files and share a link here + +**Lotus miner diagnostic info** + +Please collect the following diagnostic information, and share a link here + +* lotus-miner diagnostic info `lotus-miner info all > allinfo.txt` + +** Code modifications ** + +If you have modified parts of lotus, please describe which areas were modified, +and the scope of those modifications diff --git a/.github/ISSUE_TEMPLATE/sealingfailed.md b/.github/ISSUE_TEMPLATE/sealing-issues.md similarity index 67% rename from .github/ISSUE_TEMPLATE/sealingfailed.md rename to .github/ISSUE_TEMPLATE/sealing-issues.md index ae14c32622c..7511849d3db 100644 --- a/.github/ISSUE_TEMPLATE/sealingfailed.md +++ b/.github/ISSUE_TEMPLATE/sealing-issues.md @@ -1,21 +1,32 @@ --- name: Sealing Issues about: Create a report for help with sealing (commit) failures. -title: '' -labels: 'sealing' +title: "[Sealing Issue]" +labels: hint/needs-triaging, area/sealing assignees: '' --- +> Note: For security-related bugs/issues, please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy). + Please provide all the information requested here to help us troubleshoot "commit failed" issues. If the information requested is missing, we will probably have to just ask you to provide it anyway, before we can help debug. **Describe the problem** +A brief description of the problem you encountered while sealing a sector. + +**Version** + +The output of `lotus --version`. + +**Setup** -A brief description of the problem you encountered while proving (sealing) a sector. +You miner and daemon setup, including what hardware do you use, your environment variable settings, how do you run your miner and worker, do you use GPU and etc. -Including what commands you ran, and a description of your setup, is very helpful. +**Commands** + +Commands you ran. **Sectors status** @@ -37,7 +48,3 @@ Please collect the following diagnostic information, and share a link here If you have modified parts of lotus, please describe which areas were modified, and the scope of those modifications - -**Version** - -The output of `lotus --version`. diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml new file mode 100644 index 00000000000..33725d70d32 --- /dev/null +++ b/.github/workflows/codeql-analysis.yml @@ -0,0 +1,69 @@ +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL" + +on: + push: + branches: [ master ] + pull_request: + # The branches below must be a subset of the branches above + branches: [ master ] + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + + strategy: + fail-fast: false + matrix: + language: [ 'go' ] + # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] + # Learn more: + # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed + + steps: + - name: Checkout repository + uses: actions/checkout@v2 + + - uses: actions/setup-go@v1 + with: + go-version: '1.16.4' + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v1 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + # queries: ./path/to/local/query, your-org/your-repo/queries@main + + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v1 + + # ℹ️ Command-line programs to run using the OS shell. + # 📚 https://git.io/JvXDl + + # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines + # and modify them (or add more) to build your code if your project + # uses a compiled language + + #- run: | + # make bootstrap + # make release + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v1 diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 00000000000..20b2feb8a95 --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,27 @@ +name: Close and mark stale issue + +on: + schedule: + - cron: '0 0 * * *' + +jobs: + stale: + + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + + steps: + - uses: actions/stale@v3 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + stale-issue-message: 'Oops, seems like we needed more information for this issue, please comment with more details or this issue will be closed in 24 hours.' + close-issue-message: 'This issue was closed because it is missing author input.' + stale-issue-label: 'kind/stale' + any-of-labels: 'hint/needs-author-input' + days-before-issue-stale: 5 + days-before-issue-close: 1 + enable-statistics: true + + diff --git a/.github/workflows/testground-on-push.yml b/.github/workflows/testground-on-push.yml new file mode 100644 index 00000000000..2a3c8af1d51 --- /dev/null +++ b/.github/workflows/testground-on-push.yml @@ -0,0 +1,29 @@ +--- +name: Testground PR Checker + +on: [push] + +jobs: + testground: + runs-on: ubuntu-latest + name: ${{ matrix.composition_file }} + strategy: + matrix: + include: + - backend_addr: ci.testground.ipfs.team + backend_proto: https + plan_directory: testplans/lotus-soup + composition_file: testplans/lotus-soup/_compositions/baseline-k8s-3-1.toml + - backend_addr: ci.testground.ipfs.team + backend_proto: https + plan_directory: testplans/lotus-soup + composition_file: testplans/lotus-soup/_compositions/paych-stress-k8s.toml + steps: + - uses: actions/checkout@v2 + - name: testground run + uses: coryschwartz/testground-github-action@v1.1 + with: + backend_addr: ${{ matrix.backend_addr }} + backend_proto: ${{ matrix.backend_proto }} + plan_directory: ${{ matrix.plan_directory }} + composition_file: ${{ matrix.composition_file }} diff --git a/.gitignore b/.gitignore index fd51881b788..467f315b8ef 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +/AppDir +/appimage-builder-cache +*.AppImage /lotus /lotus-miner /lotus-worker @@ -5,6 +8,7 @@ /lotus-health /lotus-chainwatch /lotus-shed +/lotus-sim /lotus-pond /lotus-townhall /lotus-fountain @@ -13,6 +17,9 @@ /lotus-gateway /lotus-pcr /lotus-wallet +/lotus-keygen +/docgen-md +/docgen-openrpc /bench.json /lotuspond/front/node_modules /lotuspond/front/build diff --git a/.gitmodules b/.gitmodules index 35f5a3d3f9b..cdee35ce393 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,16 +1,9 @@ [submodule "extern/filecoin-ffi"] path = extern/filecoin-ffi url = https://github.com/filecoin-project/filecoin-ffi.git - branch = master [submodule "extern/serialization-vectors"] path = extern/serialization-vectors - url = https://github.com/filecoin-project/serialization-vectors + url = https://github.com/filecoin-project/serialization-vectors.git [submodule "extern/test-vectors"] path = extern/test-vectors url = https://github.com/filecoin-project/test-vectors.git -[submodule "extern/fil-blst"] - path = extern/fil-blst - url = https://github.com/filecoin-project/fil-blst.git -[submodule "extern/oni"] - path = extern/oni - url = https://github.com/filecoin-project/oni diff --git a/.golangci.yml b/.golangci.yml index 8bdba64f0b6..87db745e427 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -16,6 +16,12 @@ linters: - deadcode - scopelint +# We don't want to skip builtin/ +skip-dirs-use-default: false +skip-dirs: + - vendor$ + - testdata$ + - examples$ issues: exclude: diff --git a/AppDir/usr/share/icons/icon.svg b/AppDir/usr/share/icons/icon.svg new file mode 100644 index 00000000000..da992296a1a --- /dev/null +++ b/AppDir/usr/share/icons/icon.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/AppImageBuilder.yml b/AppImageBuilder.yml new file mode 100644 index 00000000000..19c74e4a26a --- /dev/null +++ b/AppImageBuilder.yml @@ -0,0 +1,73 @@ +version: 1 +AppDir: + path: ./AppDir + app_info: + id: io.filecoin.lotus + name: Lotus + icon: icon + version: latest + exec: usr/bin/lotus + exec_args: $@ + apt: + arch: amd64 + allow_unauthenticated: true + sources: + - sourceline: deb http://archive.ubuntu.com/ubuntu/ focal main restricted + - sourceline: deb http://archive.ubuntu.com/ubuntu/ focal-updates main restricted + - sourceline: deb http://archive.ubuntu.com/ubuntu/ focal universe + - sourceline: deb http://archive.ubuntu.com/ubuntu/ focal-updates universe + - sourceline: deb http://archive.ubuntu.com/ubuntu/ focal multiverse + - sourceline: deb http://archive.ubuntu.com/ubuntu/ focal-updates multiverse + - sourceline: deb http://archive.ubuntu.com/ubuntu/ focal-backports main restricted + universe multiverse + - sourceline: deb http://security.ubuntu.com/ubuntu focal-security main restricted + - sourceline: deb http://security.ubuntu.com/ubuntu focal-security universe + - sourceline: deb http://security.ubuntu.com/ubuntu focal-security multiverse + - sourceline: deb https://cli-assets.heroku.com/apt ./ + - sourceline: deb http://ppa.launchpad.net/openjdk-r/ppa/ubuntu focal main + - sourceline: deb http://ppa.launchpad.net/git-core/ppa/ubuntu focal main + - sourceline: deb http://archive.canonical.com/ubuntu focal partner + include: + - ocl-icd-libopencl1 + - libhwloc15 + exclude: [] + files: + include: + - /usr/lib/x86_64-linux-gnu/libgcc_s.so.1 + - /usr/lib/x86_64-linux-gnu/libpthread-2.31.so + - /usr/lib/x86_64-linux-gnu/libm-2.31.so + - /usr/lib/x86_64-linux-gnu/libdl-2.31.so + - /usr/lib/x86_64-linux-gnu/libc-2.31.so + - /usr/lib/x86_64-linux-gnu/libudev.so.1.6.17 + exclude: + - usr/share/man + - usr/share/doc/*/README.* + - usr/share/doc/*/changelog.* + - usr/share/doc/*/NEWS.* + - usr/share/doc/*/TODO.* + test: + fedora: + image: appimagecrafters/tests-env:fedora-30 + command: ./AppRun + use_host_x: true + debian: + image: appimagecrafters/tests-env:debian-stable + command: ./AppRun + use_host_x: true + arch: + image: appimagecrafters/tests-env:archlinux-latest + command: ./AppRun + use_host_x: true + centos: + image: appimagecrafters/tests-env:centos-7 + command: ./AppRun + use_host_x: true + ubuntu: + image: appimagecrafters/tests-env:ubuntu-xenial + command: ./AppRun + use_host_x: true +AppImage: + arch: x86_64 + update-information: guess + sign-key: None + diff --git a/CHANGELOG.md b/CHANGELOG.md index 88a30c91dd9..b45c6236d53 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,10 +1,867 @@ # Lotus changelog +# 1.10.1 / 2021-07-05 + +This is an optional but **highly recommended** release of Lotus for lotus miners that has many bug fixes and improvements based on the feedback we got from the community since HyperDrive. + +## New Features +- commit batch: AggregateAboveBaseFee config #6650 + - `AggregateAboveBaseFee` is added to miner sealing configuration for setting the network base fee to start aggregating proofs. When the network base fee is lower than this value, the prove commits will be submitted individually via `ProveCommitSector`. According to the [Batch Incentive Alignment](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0013.md#batch-incentive-alignment) introduced in FIP-0013, we recommend miners to set this value to 0.15 nanoFIL(which is the default value) to avoid unexpected aggregation fee in burn and enjoy the most benefits of aggregation! + +## Bug Fixes +- storage: Fix FinalizeSector with sectors in storage paths #6652 +- Fix tiny error in check-client-datacap #6664 +- Fix: precommit_batch method used the wrong cfg.PreCommitBatchWait #6658 +- to optimize the batchwait #6636 +- fix getTicket: sector precommitted but expired case #6635 +- handleSubmitCommitAggregate() exception handling #6595 +- remove precommit check in handleCommitFailed #6634 +- ensure agg fee is adequate +- fix: miner balance is not enough, so that ProveCommitAggregate msg exec failed #6623 +- commit batch: Initialize the FailedSectors map #6647 + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| @magik6k| 7 | +151/-56 | 21 | +| @llifezou | 4 | +59/-20 | 4 | +| @johnli-helloworld | 2 | +45/-14 | 4 | +| @wangchao | 1 | +1/-27 | 1 | +| Jerry | 2 | +9/-4 | 2 | +| @zhoutian527 | 1 | +2/-2 | 1 | +| @ribasushi| 1 | +1/-1 | 1 | + + +# 1.10.0 / 2021-06-23 + +This is a mandatory release of Lotus that introduces Filecoin network v13, codenamed the HyperDrive upgrade. The +Filecoin mainnet will upgrade, which is epoch 892800, on 2021-06-30T22:00:00Z. The network upgrade introduces the +following FIPs: + +- [FIP-0008](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0008.md): Add miner batched sector pre-commit method +- [FIP-0011](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0011.md): Remove reward auction from reporting consensus faults +- [FIP-0012](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0012.md): DataCap Top up for FIL+ Client Addresses +- [FIP-0013](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0013.md): Add ProveCommitSectorAggregated method to reduce on-chain congestion +- [FIP-0015](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0015.md): Revert FIP-0009(Exempt Window PoSts from BaseFee burn) + +Note that this release is built on top of Lotus v1.9.0. Enterprising users can use the `master` branch of Lotus to get the latest functionality, including all changes in this release candidate. + +## Proof batching and aggregation + +FIPs [0008](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0008.md) and [0013](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0013.md) combine to allow for a significant increase in the rate of onboarding storage on the Filecoin network. This aims to lead to more useful data being stored on the network, reduced network congestion, and lower network base fee. + +**Check out the documentation [here](https://docs.filecoin.io/mine/lotus/miner-configuration/#precommitsectorsbatch) for details on the new Lotus miner sealing config options, [here](https://docs.filecoin.io/mine/lotus/miner-configuration/#fees-section) for fee config options, and explanations of the new features.** + +Note: + - We recommend to keep `PreCommitSectorsBatch` as 1. + - We recommend miners to set `PreCommitBatchWait` lower than 30 hours. + - We recommend miners to set a longer `CommitBatchSlack` and `PreCommitBatchSlack` to prevent message failures + due to expirations. + +### Projected state tree growth + +In order to validate the Hyperdrive changes, we wrote a simulation to seal as many sectors as quickly as possible, assuming the same number and mix of 32GiB and 64GiB miners as the current network. + +Given these assumptions: + +- We'd expect a network storage growth rate of around 530PiB per day. 😳 🎉 🥳 😅 +- We'd expect network bandwidth dedicated to `SubmitWindowedPoSt` to grow by about 0.02% per day. +- We'd expect the [state-tree](https://spec.filecoin.io/#section-systems.filecoin_vm.state_tree) (and therefore [snapshot](https://docs.filecoin.io/get-started/lotus/chain/#lightweight-snapshot)) size to grow by 1.16GiB per day. + - Nearly all of the state-tree growth is expected to come from new sector metadata. +- We'd expect the daily lotus datastore growth rate to increase by about 10-15% (from current ~21GiB/day). + - Most "growth" of the lotus datastore is due to "churn", historical data that's no longer referenced by the latest state-tree. + +### Future improvements + +Various Lotus improvements are planned moving forward to mitigate the effects of the growing state tree size. The primary improvement is the [Lotus splitstore](https://github.com/filecoin-project/lotus/discussions/5788), which will soon be enabled by default. The feature allows for [online garbage collection](https://github.com/filecoin-project/lotus/issues/6577) for nodes that do not seek to maintain full chain and state history, thus eliminating the need for users to delete their datastores and sync from snapshots. + +Other improvements including better compressed snapshots, faster pre-migrations, and improved chain exports are in the roadmap. + +## WindowPost base fee burn + +Included in the HyperDrive upgrade is [FIP-0015](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0015.md) which eliminates the special-case gas treatment of `SubmitWindowedPoSt` messages that was introduced in [FIP-0009](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0009.md). Although `SubmitWindowedPoSt` messages will be relatively cheap, thanks to the introduction of optimistic acceptance of these proofs in [FIP-0010](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0010.md), storage providers should pay attention to their `MaxWindowPoStGasFee` config option: too low and PoSts may not land on chain; too high and they may cost an exorbitant amount! + +## Changelog + +### New Features + +- Implement FIP-0015 ([filecoin-project/lotus#6361](https://github.com/filecoin-project/lotus/pull/6361)) +- Integrate FIP0013 and FIP0008 ([filecoin-project/lotus#6235](https://github.com/filecoin-project/lotus/pull/6235)) + - [Configuration docs and cli examples](https://docs.filecoin.io/mine/lotus/miner-configuration/#precommitsectorsbatch) + - [cli docs](https://github.com/filecoin-project/lotus/blob/master/documentation/en/cli-lotus-miner.md#lotus-miner-sectors-batching) + - Introduce gas prices for aggregate verifications ([filecoin-project/lotus#6347](https://github.com/filecoin-project/lotus/pull/6347)) +- Introduce v5 actors ([filecoin-project/lotus#6195](https://github.com/filecoin-project/lotus/pull/6195)) +- Robustify commit batcher ([filecoin-project/lotus#6367](https://github.com/filecoin-project/lotus/pull/6367)) +- Always flush when timer goes off ([filecoin-project/lotus#6563](https://github.com/filecoin-project/lotus/pull/6563)) +- Update default fees for aggregates ([filecoin-project/lotus#6548](https://github.com/filecoin-project/lotus/pull/6548)) +- sealing: Early finalization option ([filecoin-project/lotus#6452](https://github.com/filecoin-project/lotus/pull/6452)) + - `./lotus-miner/config.toml/[Sealing.FinalizeEarly]`: default to false. Enable if you want to FinalizeSector before commiting +- Add filplus utils to CLI ([filecoin-project/lotus#6351](https://github.com/filecoin-project/lotus/pull/6351)) + - cli doc can be found [here](https://github.com/filecoin-project/lotus/blob/master/documentation/en/cli-lotus.md#lotus-filplus) +- Add miner-side MaxDealStartDelay config ([filecoin-project/lotus#6576](https://github.com/filecoin-project/lotus/pull/6576)) + + +### Bug Fixes +- chainstore: Don't take heaviestLk with backlogged reorgCh ([filecoin-project/lotus#6526](https://github.com/filecoin-project/lotus/pull/6526)) +- Backport #6041 - storagefsm: Fix batch deal packing behavior ([filecoin-project/lotus#6519](https://github.com/filecoin-project/lotus/pull/6519)) +- backport: pick the correct partitions-per-post limit ([filecoin-project/lotus#6503](https://github.com/filecoin-project/lotus/pull/6503)) +- failed sectors should be added into res correctly ([filecoin-project/lotus#6472](https://github.com/filecoin-project/lotus/pull/6472)) +- sealing: Fix restartSectors race ([filecoin-project/lotus#6491](https://github.com/filecoin-project/lotus/pull/6491)) +- Fund miners with the aggregate fee when ProveCommitting ([filecoin-project/lotus#6428](https://github.com/filecoin-project/lotus/pull/6428)) +- Commit and Precommit batcher cannot share a getSectorDeadline method ([filecoin-project/lotus#6416](https://github.com/filecoin-project/lotus/pull/6416)) +- Fix supported proof type manipulations for v5 actors ([filecoin-project/lotus#6366](https://github.com/filecoin-project/lotus/pull/6366)) +- events: Fix handling of multiple matched events per epoch ([filecoin-project/lotus#6362](https://github.com/filecoin-project/lotus/pull/6362)) +- Fix randomness fetching around null blocks ([filecoin-project/lotus#6240](https://github.com/filecoin-project/lotus/pull/6240)) + +### Improvements +- Appimage v1.10.0 rc3 ([filecoin-project/lotus#6492](https://github.com/filecoin-project/lotus/pull/6492)) +- Expand on Drand change testing ([filecoin-project/lotus#6500](https://github.com/filecoin-project/lotus/pull/6500)) +- Backport Fix logging around mineOne ([filecoin-project/lotus#6499](https://github.com/filecoin-project/lotus/pull/6499)) +- mpool: Add more metrics ([filecoin-project/lotus#6453](https://github.com/filecoin-project/lotus/pull/6453)) +- Merge backported PRs into v1.10 release branch ([filecoin-project/lotus#6436](https://github.com/filecoin-project/lotus/pull/6436)) +- Fix tests ([filecoin-project/lotus#6371](https://github.com/filecoin-project/lotus/pull/6371)) +- Extend the default deal start epoch delay ([filecoin-project/lotus#6350](https://github.com/filecoin-project/lotus/pull/6350)) +- sealing: Wire up context to batchers ([filecoin-project/lotus#6497](https://github.com/filecoin-project/lotus/pull/6497)) +- Improve address resolution for messages ([filecoin-project/lotus#6364](https://github.com/filecoin-project/lotus/pull/6364)) + +### Dependency Updates +- Proofs v8.0.2 ([filecoin-project/lotus#6524](https://github.com/filecoin-project/lotus/pull/6524)) +- Update to fixed Bellperson ([filecoin-project/lotus#6480](https://github.com/filecoin-project/lotus/pull/6480)) +- Update to go-praamfetch with fslocks ([filecoin-project/lotus#6473](https://github.com/filecoin-project/lotus/pull/6473)) +- Update ffi with fixed multicore sdr support ([filecoin-project/lotus#6471](https://github.com/filecoin-project/lotus/pull/6471)) +- github.com/filecoin-project/go-paramfetch (v0.0.2-0.20200701152213-3e0f0afdc261 -> v0.0.2-0.20210614165157-25a6c7769498) +- github.com/filecoin-project/specs-actors/v5 (v5.0.0-20210512015452-4fe3889fff57 -> v5.0.0) +- github.com/filecoin-project/go-hamt-ipld/v3 (v3.0.1 -> v3.1.0) +- github.com/ipfs/go-log/v2 (v2.1.2-0.20200626104915-0016c0b4b3e4 -> v2.1.3) +- github.com/filecoin-project/go-amt-ipld/v3 (v3.0.0 -> v3.1.0) + +### Network Version v13 HyperDrive Upgrade +- Set HyperDrive upgrade epoch ([filecoin-project/lotus#6565](https://github.com/filecoin-project/lotus/pull/6565)) +- version bump to lotus v1.10.0-rc6 ([filecoin-project/lotus#6529](https://github.com/filecoin-project/lotus/pull/6529)) +- Upgrade epochs for calibration reset ([filecoin-project/lotus#6528](https://github.com/filecoin-project/lotus/pull/6528)) +- Lotus version 1.10.0-rc5 ([filecoin-project/lotus#6504](https://github.com/filecoin-project/lotus/pull/6504)) +- Merge releases into v1.10 release ([filecoin-project/lotus#6494](https://github.com/filecoin-project/lotus/pull/6494)) +- update lotus to v1.10.0-rc3 ([filecoin-project/lotus#6481](https://github.com/filecoin-project/lotus/pull/6481)) +- updated configuration comments for docs +- Lotus version 1.10.0-rc2 ([filecoin-project/lotus#6443](https://github.com/filecoin-project/lotus/pull/6443)) +- Set ntwk v13 HyperDrive Calibration upgrade epoch ([filecoin-project/lotus#6442](https://github.com/filecoin-project/lotus/pull/6442)) + + +## Contributors + +💙Thank you to all the contributors! + +| Contributor | Commits | Lines ± | Files Changed | +|--------------------|---------|-------------|---------------| +| @magik6k | 81 | +9606/-1536 | 361 | +| @arajasek | 41 | +6543/-679 | 189 | +| @ZenGround0 | 11 | +4074/-727 | 110 | +| @anorth | 10 | +2035/-1177 | 55 | +| @iand | 1 | +779/-12 | 5 | +| @frrist | 2 | +722/-6 | 6 | +| @Stebalien | 6 | +368/-24 | 15 | +| @jennijuju | 11 | +204/-111 | 19 | +| @vyzo | 6 | +155/-66 | 13 | +| @coryschwartz | 10 | +171/-27 | 14 | +| @Kubuxu | 4 | +177/-13 | 7 | +| @ribasushi | 4 | +65/-42 | 5 | +| @travisperson | 2 | +11/-11 | 4 | +| @kirk-baird | 1 | +1/-5 | 1 | +| @wangchao | 2 | +3/-2 | 2 | + + +# 1.9.0 / 2021-05-17 + +This is an optional Lotus release that introduces various improvements to the sealing, mining, and deal-making processes. + +## Highlights + +- OpenRPC Support (https://github.com/filecoin-project/lotus/pull/5843) +- Take latency into account when making interactive deals (https://github.com/filecoin-project/lotus/pull/5876) +- Update go-commp-utils for >10x faster client commp calculation (https://github.com/filecoin-project/lotus/pull/5892) +- add `lotus client cancel-retrieval` cmd to lotus CLI (https://github.com/filecoin-project/lotus/pull/5871) +- add `inspect-deal` command to `lotus client` (https://github.com/filecoin-project/lotus/pull/5833) +- Local retrieval support (https://github.com/filecoin-project/lotus/pull/5917) +- go-fil-markets v1.1.9 -> v1.2.5 + - For a detailed changelog see https://github.com/filecoin-project/go-fil-markets/blob/master/CHANGELOG.md +- rust-fil-proofs v5.4.1 -> v7.0.1 + - For a detailed changelog see https://github.com/filecoin-project/rust-fil-proofs/blob/master/CHANGELOG.md + +## Changes +- storagefsm: Apply global events even in broken states (https://github.com/filecoin-project/lotus/pull/5962) +- Default the AlwaysKeepUnsealedCopy flag to true (https://github.com/filecoin-project/lotus/pull/5743) +- splitstore: compact hotstore prior to garbage collection (https://github.com/filecoin-project/lotus/pull/5778) +- ipfs-force bootstrapper update (https://github.com/filecoin-project/lotus/pull/5799) +- better logging when unsealing fails (https://github.com/filecoin-project/lotus/pull/5851) +- perf: add cache for gas permium estimation (https://github.com/filecoin-project/lotus/pull/5709) +- backupds: Compact log on restart (https://github.com/filecoin-project/lotus/pull/5875) +- backupds: Improve truncated log handling (https://github.com/filecoin-project/lotus/pull/5891) +- State CLI improvements (State CLI improvements) +- API proxy struct codegen (https://github.com/filecoin-project/lotus/pull/5854) +- move DI stuff for paychmgr into modules (https://github.com/filecoin-project/lotus/pull/5791) +- Implement Event observer and Settings for 3rd party dep injection (https://github.com/filecoin-project/lotus/pull/5693) +- Export developer and network commands for consumption by derivatives of Lotus (https://github.com/filecoin-project/lotus/pull/5864) +- mock sealer: Simulate randomness sideeffects (https://github.com/filecoin-project/lotus/pull/5805) +- localstorage: Demote reservation stat error to debug (https://github.com/filecoin-project/lotus/pull/5976) +- shed command to unpack miner info dumps (https://github.com/filecoin-project/lotus/pull/5800) +- Add two utils to Lotus-shed (https://github.com/filecoin-project/lotus/pull/5867) +- add shed election estimate command (https://github.com/filecoin-project/lotus/pull/5092) +- Add --actor flag in lotus-shed sectors terminate (https://github.com/filecoin-project/lotus/pull/5819) +- Move lotus mpool clear to lotus-shed (https://github.com/filecoin-project/lotus/pull/5900) +- Centralize everything on ipfs/go-log/v2 (https://github.com/filecoin-project/lotus/pull/5974) +- expose NextID from nice market actor interface (https://github.com/filecoin-project/lotus/pull/5850) +- add available options for perm on error (https://github.com/filecoin-project/lotus/pull/5814) +- API docs clarification: Document StateSearchMsg replaced message behavior (https://github.com/filecoin-project/lotus/pull/5838) +- api: Document StateReplay replaced message behavior (https://github.com/filecoin-project/lotus/pull/5840) +- add godocs to miner objects (https://github.com/filecoin-project/lotus/pull/2184) +- Add description to the client deal CLI command (https://github.com/filecoin-project/lotus/pull/5999) +- lint: don't skip builtin (https://github.com/filecoin-project/lotus/pull/5881) +- use deal duration from actors (https://github.com/filecoin-project/lotus/pull/5270) +- remote calc winningpost proof (https://github.com/filecoin-project/lotus/pull/5884) +- packer: other network images (https://github.com/filecoin-project/lotus/pull/5930) +- Convert the chainstore lock to RW (https://github.com/filecoin-project/lotus/pull/5971) +- Remove CachedBlockstore (https://github.com/filecoin-project/lotus/pull/5972) +- remove messagepool CapGasFee duplicate code (https://github.com/filecoin-project/lotus/pull/5992) +- Add a mining-heartbeat INFO line at every epoch (https://github.com/filecoin-project/lotus/pull/6183) +- chore(ci): Enable build on RC tags (https://github.com/filecoin-project/lotus/pull/6245) +- Upgrade nerpa to actor v4 and bump the version to rc4 (https://github.com/filecoin-project/lotus/pull/6249) +## Fixes +- return buffers after canceling badger operation (https://github.com/filecoin-project/lotus/pull/5796) +- avoid holding a lock while calling the View callback (https://github.com/filecoin-project/lotus/pull/5792) +- storagefsm: Trigger input processing when below limits (https://github.com/filecoin-project/lotus/pull/5801) +- After importing a previously deleted key, be able to delete it again (https://github.com/filecoin-project/lotus/pull/4653) +- fix StateManager.Replay on reward actor (https://github.com/filecoin-project/lotus/pull/5804) +- make sure atomic 64bit fields are 64bit aligned (https://github.com/filecoin-project/lotus/pull/5794) +- Import secp sigs in paych tests (https://github.com/filecoin-project/lotus/pull/5879) +- fix ci build-macos (https://github.com/filecoin-project/lotus/pull/5934) +- Fix creation of remainder account when it's not a multisig (https://github.com/filecoin-project/lotus/pull/5807) +- Fix fallback chainstore (https://github.com/filecoin-project/lotus/pull/6003) +- fix 4857: show help for set-addrs (https://github.com/filecoin-project/lotus/pull/5943) +- fix health report (https://github.com/filecoin-project/lotus/pull/6011) +- fix(ci): Use recent ubuntu LTS release; Update release params ((https://github.com/filecoin-project/lotus/pull/6011)) + +# 1.8.0 / 2021-04-05 + +This is a mandatory release of Lotus that upgrades the network to version 12, which introduces various performance improvements to the cron processing of the power actor. The network will upgrade at height 712320, which is 2021-04-29T06:00:00Z. + +## Changes + +- v4 specs-actors integration, nv12 migration (https://github.com/filecoin-project/lotus/pull/6116) + +# 1.6.0 / 2021-04-05 + +This is a mandatory release of Lotus that upgrades the network to version 11, which implements [FIP-0014](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0014.md). The network will upgrade at height 665280, which is 2021-04-12T22:00:00Z. + +## v1 sector extension CLI + +This release also expands the `lotus-miner sectors extend` CLI, with a new option that automatically extends all extensible v1 sectors. The option can be run using `lotus-miner sectors extend --v1-sectors`. + +- The `tolerance` flag can be passed to indicate what durations aren't "worth" extending. It defaults to one week, which means that sectors whose current lifetime's are within one week of the maximum possible lifetime will not be extended. + +- The `expiration-cutoff` flag can be passed to skip sectors whose expiration is past a certain point from the current head. It defaults to infinity (no cutoff), but if, say, 28800 was specified, then only sectors expiring in the next 10 days would be extended (2880 epochs in 1 day). + +## Changes + +- Util for miners to extend all v1 sectors (https://github.com/filecoin-project/lotus/pull/5924) +- Upgrade the butterfly network (https://github.com/filecoin-project/lotus/pull/5929) +- Introduce the v11 network upgrade (https://github.com/filecoin-project/lotus/pull/5904) +- Debug mode: Make upgrade heights controllable by an envvar (https://github.com/filecoin-project/lotus/pull/5919) + +# 1.5.3 / 2021-03-24 + +This is a patch release of Lotus that introduces small fixes to the Storage FSM. + +## Changes + +- storagefsm: Fix double unlock with ready WaitDeals sectors (https://github.com/filecoin-project/lotus/pull/5783) +- backupds: Allow larger values in write log (https://github.com/filecoin-project/lotus/pull/5776) +- storagefsm: Don't log the SectorRestart event (https://github.com/filecoin-project/lotus/pull/5779) + +# 1.5.2 / 2021-03-11 + +This is an hotfix release of Lotus that fixes a critical bug introduced in v1.5.1 in the miner windowPoSt logic. This upgrade is only affecting miner nodes. + +## Changes +- fix window post rand check (https://github.com/filecoin-project/lotus/pull/5773) +- wdpost: Always use head tipset to get randomness (https://github.com/filecoin-project/lotus/pull/5774) + +# 1.5.1 / 2021-03-10 + +This is an optional release of Lotus that introduces an important fix to the WindowPoSt computation process. The change is to wait for some confidence before drawing beacon randomness for the proof. Without this, invalid proofs might be generated as the result of a null tipset. + +## Splitstore + +This release also introduces the splitstore, a new optional blockstore that segregates the monolithic blockstore into cold and hot regions. The hot region contains objects from the last 4-5 finalities plus all reachable objects from two finalities away. All other objects are moved to the cold region using a compaction process that executes every finality, once 5 finalities have elapsed. + +The splitstore allows us to separate the two regions quite effectively, using two separate badger blockstores. The separation +means that the live working set is much smaller, which results in potentially significant performance improvements. In addition, it means that the coldstore can be moved to a separate (bigger, slower, cheaper) disk without loss of performance. + +The design also allows us to use different implementations for the two blockstores; for example, an append-only blockstore could be used for coldstore and a faster memory mapped blockstore could be used for the hotstore (eg LMDB). We plan to experiment with these options in the future. + +Once the splitstore has been enabled, the existing monolithic blockstore becomes the coldstore. On the first head change notification, the splitstore will warm up the hotstore by copying all reachable objects from the current tipset into the hotstore. All new writes go into the hotstore, with the splitstore tracking the write epoch. Once 5 finalities have elapsed, and every finality thereafter, the splitstore compacts by moving cold objects into the coldstore. There is also experimental support for garbage collection, whereby nunreachable objects are simply discarded. + +To enable the splitstore, add the following to config.toml: + +``` +[Chainstore] + EnableSplitstore = true +``` + +## Highlights + +Other highlights include: + +- Improved deal data handling - now multiple deals can be adding to sectors in parallel +- Rewriten sector pledging - it now actually cares about max sealing sector limits +- Better handling for sectors stuck in the RecoverDealIDs state +- lotus-miner sectors extend command +- Optional configurable storage path size limit +- Config to disable owner/worker fallback from control addresses (useful when owner is a key on a hardware wallet) +- A write log for node metadata, which can be restored as a backup when the metadata leveldb becomes corrupted (e.g. when you run out of disk space / system crashes in some bad way) + +## Changes + +- avoid use mp.cfg directly to avoid race (https://github.com/filecoin-project/lotus/pull/5350) +- Show replacing message CID is state search-msg cli (https://github.com/filecoin-project/lotus/pull/5656) +- Fix riceing by importing the main package (https://github.com/filecoin-project/lotus/pull/5675) +- Remove sectors with all deals expired in RecoverDealIDs (https://github.com/filecoin-project/lotus/pull/5658) +- storagefsm: Rewrite input handling (https://github.com/filecoin-project/lotus/pull/5375) +- reintroduce Refactor send command for better testability (https://github.com/filecoin-project/lotus/pull/5668) +- Improve error message with importing a chain (https://github.com/filecoin-project/lotus/pull/5669) +- storagefsm: Cleanup CC sector creation (https://github.com/filecoin-project/lotus/pull/5612) +- chain list --gas-stats display capacity (https://github.com/filecoin-project/lotus/pull/5676) +- Correct some logs (https://github.com/filecoin-project/lotus/pull/5694) +- refactor blockstores (https://github.com/filecoin-project/lotus/pull/5484) +- Add idle to sync stage's String() (https://github.com/filecoin-project/lotus/pull/5702) +- packer provisioner (https://github.com/filecoin-project/lotus/pull/5604) +- add DeleteMany to Blockstore interface (https://github.com/filecoin-project/lotus/pull/5703) +- segregate chain and state blockstores (https://github.com/filecoin-project/lotus/pull/5695) +- fix(multisig): The format of the amount is not correct in msigLockApp (https://github.com/filecoin-project/lotus/pull/5718) +- Update butterfly network (https://github.com/filecoin-project/lotus/pull/5627) +- Collect worker task metrics (https://github.com/filecoin-project/lotus/pull/5648) +- Correctly format disputer log (https://github.com/filecoin-project/lotus/pull/5716) +- Log block CID in the large delay warning (https://github.com/filecoin-project/lotus/pull/5704) +- Move api client builders to a cliutil package (https://github.com/filecoin-project/lotus/pull/5728) +- Implement net peers --extended (https://github.com/filecoin-project/lotus/pull/5734) +- Command to extend sector expiration (https://github.com/filecoin-project/lotus/pull/5666) +- garbage collect hotstore after compaction (https://github.com/filecoin-project/lotus/pull/5744) +- tune badger gc to repeatedly gc the value log until there is no rewrite (https://github.com/filecoin-project/lotus/pull/5745) +- Add configuration option for pubsub IPColocationWhitelist subnets (https://github.com/filecoin-project/lotus/pull/5735) +- hot/cold blockstore segregation (aka. splitstore) (https://github.com/filecoin-project/lotus/pull/4992) +- Customize verifreg root key and remainder account when making genesis (https://github.com/filecoin-project/lotus/pull/5730) +- chore: update go-graphsync to 0.6.0 (https://github.com/filecoin-project/lotus/pull/5746) +- Add connmgr metadata to NetPeerInfo (https://github.com/filecoin-project/lotus/pull/5749) +- test: attempt to make the splitstore test deterministic (https://github.com/filecoin-project/lotus/pull/5750) +- Feat/api no dep build (https://github.com/filecoin-project/lotus/pull/5729) +- Fix bootstrapper profile setting (https://github.com/filecoin-project/lotus/pull/5756) +- Check liveness of sectors when processing termination batches (https://github.com/filecoin-project/lotus/pull/5759) +- Configurable storage path storage limit (https://github.com/filecoin-project/lotus/pull/5624) +- miner: Config to disable owner/worker address fallback (https://github.com/filecoin-project/lotus/pull/5620) +- Fix TestUnpadReader on Go 1.16 (https://github.com/filecoin-project/lotus/pull/5761) +- Metadata datastore log (https://github.com/filecoin-project/lotus/pull/5755) +- Remove the SR2 stats, leave just the network totals (https://github.com/filecoin-project/lotus/pull/5757) +- fix: wait a bit before starting to compute window post proofs (https://github.com/filecoin-project/lotus/pull/5764) +- fix: retry proof when randomness changes (https://github.com/filecoin-project/lotus/pull/5768) + + +# 1.5.0 / 2021-02-23 + +This is a mandatory release of Lotus that introduces the fifth upgrade to the Filecoin network. The network upgrade occurs at height 550321, before which time all nodes must have updated to this release (or later). At this height, [v3 specs-actors](https://github.com/filecoin-project/specs-actors/releases/tag/v3.0.0) will take effect, which in turn implements the following two FIPs: + +- [FIP-0007 h/amt-v3](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0007.md) which improves the performance of the Filecoin HAMT and AMT. +- [FIP-0010 off-chain Window PoSt Verification](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0010.md) which reduces the gas consumption of `SubmitWindowedPoSt` messages significantly by optimistically accepting Window PoSt proofs without verification, and allowing them to be disputed later by off-chain verifiers. + +Note that the integration of v3 actors was already completed in 1.4.2, this upgrade simply sets the epoch for the upgrade to occur. + +## Disputer + +FIP-0010 introduces the ability to dispute bad Window PoSts. Node operators are encouraged to run the new Lotus disputer alongside their Lotus daemons. For more information, see the announcement [here](https://github.com/filecoin-project/lotus/discussions/5617#discussioncomment-387333). + +## Changes + +- [#5341](https://github.com/filecoin-project/lotus/pull/5341) Add a `LOTUS_DISABLE_V3_ACTOR_MIGRATION` envvar + - Setting this envvar to 1 disables the v3 actor migration, should only be used in the event of a failed migration + +# 1.4.2 / 2021-02-17 + +This is a large, and highly recommended, optional release with new features and improvements for lotus miner and deal-making UX. The release also integrates [v3 specs-actors](https://github.com/filecoin-project/specs-actors/releases/tag/v3.0.0), which implements two FIPs: + +- [FIP-0007 h/amt-v3](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0007.md) which improves the performance of the Filecoin HAMT and AMT. +- [FIP-0010 off-chain Window PoSt Verification](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0010.md) which reduces the gas consumption of `SubmitWindowedPoSt` messages significantly by optimistically accepting Window PoSt proofs without verification, and allowing them to be disputed later by off-chain verifiers. + +Note that this release does NOT set an upgrade epoch for v3 actors to take effect. That will be done in the upcoming 1.5.0 release. + +## New Features + +- [#5341](https://github.com/filecoin-project/lotus/pull/5341) Added sector termination API and CLI + - Run `lotus-miner sectors terminate` +- [#5342](https://github.com/filecoin-project/lotus/pull/5342) Added CLI for using a multisig wallet as miner's owner address + - See how to set it up [here](https://github.com/filecoin-project/lotus/pull/5342#issue-554009129) +- [#5363](https://github.com/filecoin-project/lotus/pull/5363), [#5418](https://github.com/filecoin-project/lotus/pull/), [#5476](https://github.com/filecoin-project/lotus/pull/5476), [#5459](https://github.com/filecoin-project/lotus/pull/5459) Integrated [spec-actor v3](https://github.com/filecoin-pro5418ject/specs-actors/releases/tag/v3.0.0) + - [#5472](https://github.com/filecoin-project/lotus/pull/5472) Generate actor v3 methods for pond +- [#5379](https://github.com/filecoin-project/lotus/pull/5379) Added WindowPoSt disputer + - This is to support [FIP-0010 off-chian Window PoSt verification](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0010.md) + - See how to run a disputer [here](https://github.com/filecoin-project/lotus/pull/5379#issuecomment-776482445) +- [#5309](https://github.com/filecoin-project/lotus/pull/5309) Batch multiple deals in one `PublishStorageMessages` + - [#5411](https://github.com/filecoin-project/lotus/pull/5411) Handle batch `PublishStorageDeals` message in sealing recovery + - [#5505](https://github.com/filecoin-project/lotus/pull/5505) Exclude expired deals from batching in `PublishStorageDeals` messages + - Added `PublishMsgPeriod` and `MaxDealsPerPublishMsg` to miner `Dealmaking` [configuration](https://docs.filecoin.io/mine/lotus/miner-configuration/#dealmaking-section). See how they work [here](https://docs.filecoin.io/mine/lotus/miner-configuration/#publishing-several-deals-in-one-message). + - [#5538](https://github.com/filecoin-project/lotus/pull/5538), [#5549](https://github.com/filecoin-project/lotus/pull/5549) Added a command to list pending deals and force publish messages. + - Run `lotus-miner market pending-publish` + - [#5428](https://github.com/filecoin-project/lotus/pull/5428) Moved waiting for `PublishStorageDeals` messages' receipt from markets to lotus +- [#5510](https://github.com/filecoin-project/lotus/pull/5510) Added `nerpanet` build option + - To build `nerpanet`, run `make nerpanet` +- [#5433](https://github.com/filecoin-project/lotus/pull/5433) Added `AlwaysKeepUnsealedCopy` option to the miner configuration +- [#5520](https://github.com/filecoin-project/lotus/pull/5520) Added `MsigGetPending` to get pending transactions for multisig wallets +- [#5219](https://github.com/filecoin-project/lotus/pull/5219) Added interactive mode for lotus-wallet +- [5529](https://github.com/filecoin-project/lotus/pull/5529) Added support for minder nodes in `lotus-shed rpc` util + +## Bug Fixes + +- [#5210](https://github.com/filecoin-project/lotus/pull/5210) Miner should not dial client on restart +- [#5403](https://github.com/filecoin-project/lotus/pull/5403) When estimating GasLimit only apply prior messages up to the nonce +- [#5410](https://github.com/filecoin-project/lotus/pull/510) Fix the calibnet build option +- [#5492](https://github.com/filecoin-project/lotus/pull/5492) Fixed `has` for ipfsbstore for non-existing blocks +- [#5361](https://github.com/filecoin-project/lotus/pull/5361) Fixed retrieval hangs when using `IpfsOnlineMode=true` +- [#5493](https://github.com/filecoin-project/lotus/pull/5493) Fixed retrieval failure when price-per-byte is zero +- [#5506](https://github.com/filecoin-project/lotus/pull/5506) Fixed contexts in the storage adpater +- [#5515](https://github.com/filecoin-project/lotus/pull/5515) Properly wire up `StateReadState` on gateway API +- [#5582](https://github.com/filecoin-project/lotus/pull/5582) Fixed error logging format strings +- [#5614](https://github.com/filecoin-project/lotus/pull/5614) Fixed websocket reconnecting handling + + +## Improvements + +- [#5389](https://github.com/filecoin-project/lotus/pull/5389) Show verified indicator for `./lotus-miner storage-deals list` +- [#5229](https://github.com/filecoin-project/lotus/pull/5220) Show power for verified deals in `./lotus-miner setocr list` +- [#5407](https://github.com/filecoin-project/lotus/pull/5407) Added explicit check of the miner address protocol +- [#5399](https://github.com/filecoin-project/lotus/pull/5399) watchdog: increase heapprof capture threshold to 90% +- [#5398](https://github.com/filecoin-project/lotus/pull/5398) storageadapter: Look at precommits on-chain since deal publish msg +- [#5470](https://github.com/filecoin-project/lotus/pull/5470) Added `--no-timing` option for `./lotus state compute-state --html` +- [#5417](https://github.com/filecoin-project/lotus/pull/5417) Storage Manager: Always unseal full sectors +- [#5393](https://github.com/filecoin-project/lotus/pull/5393) Switched to [filecoin-ffi bls api ](https://github.com/filecoin-project/filecoin-ffi/pull/159)for bls signatures +- [#5380](https://github.com/filecoin-project/lotus/pull/5210) Refactor deals API tests +- [#5397](https://github.com/filecoin-project/lotus/pull/5397) Fixed a flake in the sync manager edge case test +- [#5406](https://github.com/filecoin-project/lotus/pull/5406) Added a test to ensure a correct window post cannot be disputed +- [#5294](https://github.com/filecoin-project/lotus/pull/5394) Added jobs to build Lotus docker image and push it to AWS ECR +- [#5387](https://github.com/filecoin-project/lotus/pull/5387) Added network info(mainnet|calibnet) in version +- [#5497](https://github.com/filecoin-project/lotus/pull/5497) Export metric for lotus-gateaway +- [#4950](https://github.com/filecoin-project/lotus/pull/4950) Removed bench policy +- [#5047](https://github.com/filecoin-project/lotus/pull/5047) Improved the UX for `./lotus-shed bitfield enc` +- [#5282](https://github.com/filecoin-project/lotus/pull/5282) Snake a context through the chian blockstore creation +- [#5350](https://github.com/filecoin-project/lotus/pull/5350) Avoid using `mp.cfg` directrly to prevent race condition +- [#5449](https://github.com/filecoin-project/lotus/pull/5449) Documented the block-header better +- [#5404](https://github.com/filecoin-project/lotus/pull/5404) Added retrying proofs if an incorrect one is generated +- [#4545](https://github.com/filecoin-project/lotus/pull/4545) Made state tipset usage consistent in the API +- [#5540](https://github.com/filecoin-project/lotus/pull/5540) Removed unnecessary database reads in validation check +- [#5554](https://github.com/filecoin-project/lotus/pull/5554) Fixed `build lotus-soup` CI job +- [#5552](https://github.com/filecoin-project/lotus/pull/5552) Updated CircleCI to halt gracefully +- [#5555](https://github.com/filecoin-project/lotus/pull/5555) Cleanup and add docstrings of node builder +- [#5564](https://github.com/filecoin-project/lotus/pull/5564) Stopped depending on gocheck with gomod +- [#5574](https://github.com/filecoin-project/lotus/pull/5574) Updated CLI UI +- [#5570](https://github.com/filecoin-project/lotus/pull/5570) Added code CID to `StateReadState` return object +- [#5565](https://github.com/filecoin-project/lotus/pull/5565) Added storageadapter.PublishMsgConfig to miner in testkit for lotus-soup testplan +- [#5571](https://github.com/filecoin-project/lotus/pull/5571) Added `lotus-seed gensis car` to generate lotus block for devnets +- [#5613](https://github.com/filecoin-project/lotus/pull/5613) Check format in client commP util +- [#5507](https://github.com/filecoin-project/lotus/pull/5507) Refactored coalescing logic into its own function and take both cancellation sets into account +- [#5592](https://github.com/filecoin-project/lotus/pull/5592) Verify FFI version before building + +## Dependency Updates +- [#5296](https://github.com/filecoin-project/lotus/pull/5396) Upgraded to [raulk/go-watchdog@v1.0.1](https://github.com/raulk/go-watchdog/releases/tag/v1.0.1) +- [#5450](https://github.com/filecoin-project/lotus/pull/5450) Dependency updates +- [#5425](https://github.com/filecoin-project/lotus/pull/5425) Fixed stale imports in testplans/lotus-soup +- [#5535](https://github.com/filecoin-project/lotus/pull/5535) Updated to [go-fil-markets@v1.1.7](https://github.com/filecoin-project/go-fil-markets/releases/tag/v1.1.7) +- [#5616](https://github.com/filecoin-project/lotus/pull/5600) Updated to [filecoin-ffi@b6e0b35fb49ed0fe](https://github.com/filecoin-project/filecoin-ffi/releases/tag/b6e0b35fb49ed0fe) +- [#5599](https://github.com/filecoin-project/lotus/pull/5599) Updated to [go-bitfield@v0.2.4](https://github.com/filecoin-project/go-bitfield/releases/tag/v0.2.4) +- [#5614](https://github.com/filecoin-project/lotus/pull/5614), , [#5621](https://github.com/filecoin-project/lotus/pull/5621) Updated to [go-jsonrpc@v0.1.3](https://github.com/filecoin-project/go-jsonrpc/releases/tag/v0.1.3) +- [#5459](https://github.com/filecoin-project/lotus/pull/5459) Updated to [spec-actors@v3.0.1](https://github.com/filecoin-project/specs-actors/releases/tag/v3.0.1) + + +## Network Version v10 Upgrade +- [#5473](https://github.com/filecoin-project/lotus/pull/5473) Merged staging branch for v1.5.0 +- [#5603](https://github.com/filecoin-project/lotus/pull/5603) Set nerpanet's upgrade epochs up to v3 actors +- [#5471](https://github.com/filecoin-project/lotus/pull/5471), [#5456](https://github.com/filecoin-project/lotus/pull/5456) Set calibration net actor v3 migration epochs for testing +- [#5434](https://github.com/filecoin-project/lotus/pull/5434) Implemented pre-migration framework +- [#5476](https://github.com/filecoin-project/lotus/pull/5477) Tune migration + +# 1.4.1 / 2021-01-20 + +This is an optional Lotus release that introduces various improvements to the sealing, mining, and deal-making processes. In particular, [#5341](https://github.com/filecoin-project/lotus/pull/5341) introduces the ability for Lotus miners to terminate sectors. + +## Changes + +#### Core Lotus + +- fix(sync): enforce ForkLengthThreshold for synced chain (https://github.com/filecoin-project/lotus/pull/5182) +- introduce memory watchdog; LOTUS_MAX_HEAP (https://github.com/filecoin-project/lotus/pull/5101) +- Skip bootstrapping if no peers specified (https://github.com/filecoin-project/lotus/pull/5301) +- Chainxchg write response timeout (https://github.com/filecoin-project/lotus/pull/5254) +- update NewestNetworkVersion (https://github.com/filecoin-project/lotus/pull/5277) +- fix(sync): remove checks bypass when we submit the block (https://github.com/filecoin-project/lotus/pull/4192) +- chore: export vm.ShouldBurn (https://github.com/filecoin-project/lotus/pull/5355) +- fix(sync): enforce fork len when changing head (https://github.com/filecoin-project/lotus/pull/5244) +- Use 55th percentile instead of median for gas-price (https://github.com/filecoin-project/lotus/pull/5369) +- update go-libp2p-pubsub to v0.4.1 (https://github.com/filecoin-project/lotus/pull/5329) + +#### Sealing + +- Sector termination support (https://github.com/filecoin-project/lotus/pull/5341) +- update weight canSeal and canStore when attach (https://github.com/filecoin-project/lotus/pull/5242/files) +- sector-storage/mock: improve mocked readpiece (https://github.com/filecoin-project/lotus/pull/5208) +- Fix deadlock in runWorker in sched_worker.go (https://github.com/filecoin-project/lotus/pull/5251) +- Skip checking terminated sectors provable (https://github.com/filecoin-project/lotus/pull/5217) +- storagefsm: Fix unsealedInfoMap.lk init race (https://github.com/filecoin-project/lotus/pull/5319) +- Multicore AddPiece CommP (https://github.com/filecoin-project/lotus/pull/5320) +- storagefsm: Send correct event on ErrExpiredTicket in CommitFailed (https://github.com/filecoin-project/lotus/pull/5366) +- expose StateSearchMessage on gateway (https://github.com/filecoin-project/lotus/pull/5382) +- fix FileSize to return correct disk usage recursively (https://github.com/filecoin-project/lotus/pull/5384) + +#### Dealmaking + +- Better error message when withdrawing funds (https://github.com/filecoin-project/lotus/pull/5293) +- add verbose for list transfers (https://github.com/filecoin-project/lotus/pull/5259) +- cli - rename `client info` to `client balances` (https://github.com/filecoin-project/lotus/pull/5304) +- Better CLI for wallet market withdraw and client info (https://github.com/filecoin-project/lotus/pull/5303) + +#### UX + +- correct flag usages for replace cmd (https://github.com/filecoin-project/lotus/pull/5255) +- lotus state call will panic (https://github.com/filecoin-project/lotus/pull/5275) +- fix get sector bug (https://github.com/filecoin-project/lotus/pull/4976) +- feat: lotus wallet market add (adds funds to storage market actor) (https://github.com/filecoin-project/lotus/pull/5300) +- Fix client flag parsing in client balances cli (https://github.com/filecoin-project/lotus/pull/5312) +- delete slash-consensus miner (https://github.com/filecoin-project/lotus/pull/4577) +- add fund sufficient check in send (https://github.com/filecoin-project/lotus/pull/5252) +- enable parse and shorten negative FIL values (https://github.com/filecoin-project/lotus/pull/5315) +- add limit and rate for chain noise (https://github.com/filecoin-project/lotus/pull/5223) +- add bench env print (https://github.com/filecoin-project/lotus/pull/5222) +- Implement full-node restore option (https://github.com/filecoin-project/lotus/pull/5362) +- add color for token amount (https://github.com/filecoin-project/lotus/pull/5352) +- correct log in maybeUseAddress (https://github.com/filecoin-project/lotus/pull/5359) +- add slash-consensus from flag (https://github.com/filecoin-project/lotus/pull/5378) + +#### Testing + +- tvx extract: more tipset extraction goodness (https://github.com/filecoin-project/lotus/pull/5258) +- Fix race in blockstore test suite (https://github.com/filecoin-project/lotus/pull/5297) + + +#### Build & Networks + +- Remove LOTUS_DISABLE_V2_ACTOR_MIGRATION envvar (https://github.com/filecoin-project/lotus/pull/5289) +- Create a calibnet build option (https://github.com/filecoin-project/lotus/pull/5288) +- Calibnet: Set Orange epoch (https://github.com/filecoin-project/lotus/pull/5325) + +#### Management + +- Update SECURITY.md (https://github.com/filecoin-project/lotus/pull/5246) +- README: Contribute section (https://github.com/filecoin-project/lotus/pull/5330) +- README: refine Contribute section (https://github.com/filecoin-project/lotus/pull/5331) +- Add misc tooling to codecov ignore list (https://github.com/filecoin-project/lotus/pull/5347) + +# 1.4.0 / 2020-12-19 + +This is a MANDATORY hotfix release of Lotus that resolves a chain halt at height 336,459 caused by nondeterminism in specs-actors. The fix is to update actors to 2.3.3 in order to incorporate this fix https://github.com/filecoin-project/specs-actors/pull/1334. + +# 1.3.0 / 2020-12-16 + +This is a mandatory release of Lotus that introduces the third post-liftoff upgrade to the Filecoin network. The network upgrade occurs at height 343200, before which time all nodes must have updated to this release (or later). The change that breaks consensus is an implementation of FIP-0009(https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0009.md). + +## Changes + +- Disable gas burning for window post messages (https://github.com/filecoin-project/lotus/pull/5200) +- fix lock propose (https://github.com/filecoin-project/lotus/pull/5197) + +# 1.2.3 / 2020-12-15 + +This is an optional Lotus release that introduces many performance improvements, bugfixes, and UX improvements. + +## Changes + +- When waiting for deal commit messages, ignore unsuccessful messages (https://github.com/filecoin-project/lotus/pull/5189) +- Bigger copy buffer size for stores (https://github.com/filecoin-project/lotus/pull/5177) +- Print MinPieceSize when querying ask (https://github.com/filecoin-project/lotus/pull/5178) +- Optimize miner info & sectors list loading (https://github.com/filecoin-project/lotus/pull/5176) +- Allow miners to filter (un)verified deals (https://github.com/filecoin-project/lotus/pull/5094) +- Fix curSealing out of MaxSealingSectors limit (https://github.com/filecoin-project/lotus/pull/5166) +- Add mpool pending from / to filter (https://github.com/filecoin-project/lotus/pull/5169) +- Add metrics for delayed blocks (https://github.com/filecoin-project/lotus/pull/5171) +- Fix PushUntrusted publishing -- the message is local (https://github.com/filecoin-project/lotus/pull/5173) +- Avoid potential hang in events API when starting event listener (https://github.com/filecoin-project/lotus/pull/5159) +- Show data transfer ID in list-deals (https://github.com/filecoin-project/lotus/pull/5150) +- Fix events API mutex locking (https://github.com/filecoin-project/lotus/pull/5160) +- Message pool refactors (https://github.com/filecoin-project/lotus/pull/5162) +- Fix lotus-shed cid output (https://github.com/filecoin-project/lotus/pull/5072) +- Use FundManager to withdraw funds, add MarketWithdraw API (https://github.com/filecoin-project/lotus/pull/5112) +- Add keygen outfile (https://github.com/filecoin-project/lotus/pull/5118) +- Update sr2 stat aggregation (https://github.com/filecoin-project/lotus/pull/5114) +- Fix miner control address lookup (https://github.com/filecoin-project/lotus/pull/5119) +- Fix send with declared nonce 0 (https://github.com/filecoin-project/lotus/pull/5111) +- Introduce memory watchdog; LOTUS_MAX_HEAP (https://github.com/filecoin-project/lotus/pull/5101) +- Miner control address config for (pre)commits (https://github.com/filecoin-project/lotus/pull/5103) +- Delete repeated call func (https://github.com/filecoin-project/lotus/pull/5099) +- lotus-shed ledger show command (https://github.com/filecoin-project/lotus/pull/5098) +- Log a message when there aren't enough peers for sync (https://github.com/filecoin-project/lotus/pull/5105) +- Miner code cleanup (https://github.com/filecoin-project/lotus/pull/5107) + +# 1.2.2 / 2020-12-03 + +This is an optional Lotus release that introduces various improvements to the mining logic and deal-making workflow, as well as several new UX features. + +## Changes + +- Set lower feecap on PoSt messages with low balance (https://github.com/filecoin-project/lotus/pull/4217) +- Add options to set BlockProfileRate and MutexProfileFraction (https://github.com/filecoin-project/lotus/pull/4140) +- Shed/post find (https://github.com/filecoin-project/lotus/pull/4355) +- tvx extract: make it work with secp messages.(https://github.com/filecoin-project/lotus/pull/4583) +- update go from 1.14 to 1.15 (https://github.com/filecoin-project/lotus/pull/4909) +- print multiple blocks from miner cid (https://github.com/filecoin-project/lotus/pull/4767) +- Connection Gater support (https://github.com/filecoin-project/lotus/pull/4849) +- just return storedask.NewStoredAsk to reduce unuseful code (https://github.com/filecoin-project/lotus/pull/4902) +- add go main version (https://github.com/filecoin-project/lotus/pull/4910) +- Use version0 when pre-sealing (https://github.com/filecoin-project/lotus/pull/4911) +- optimize code UpgradeTapeHeight and go fmt (https://github.com/filecoin-project/lotus/pull/4913) +- CLI to get network version (https://github.com/filecoin-project/lotus/pull/4914) +- Improve error for ActorsVersionPredicate (https://github.com/filecoin-project/lotus/pull/4915) +- upgrade to go-fil-markets 1.0.5 (https://github.com/filecoin-project/lotus/pull/4916) +- bug:replace with func recordFailure (https://github.com/filecoin-project/lotus/pull/4919) +- Remove unused key (https://github.com/filecoin-project/lotus/pull/4924) +- change typeV7 make len (https://github.com/filecoin-project/lotus/pull/4943) +- emit events for peer disconnections and act upon them in the blocksync tracker (https://github.com/filecoin-project/lotus/pull/4754) +- Fix lotus bench error (https://github.com/filecoin-project/lotus/pull/4305) +- Reduce badger ValueTreshold to 128 (https://github.com/filecoin-project/lotus/pull/4629) +- Downgrade duplicate nonce logs to debug (https://github.com/filecoin-project/lotus/pull/4933) +- readme update golang version from 1.14.7 to 1.15.5 (https://github.com/filecoin-project/lotus/pull/4974) +- add data transfer logging (https://github.com/filecoin-project/lotus/pull/4975) +- Remove all temp file generation for deals (https://github.com/filecoin-project/lotus/pull/4929) +- fix get sector bug (https://github.com/filecoin-project/lotus/pull/4976) +- fix nil pointer in StateSectorPreCommitInfo (https://github.com/filecoin-project/lotus/pull/4082) +- Add logging on data-transfer to miner (https://github.com/filecoin-project/lotus/pull/4980) +- bugfix: fixup devnet script (https://github.com/filecoin-project/lotus/pull/4956) +- modify for unsafe (https://github.com/filecoin-project/lotus/pull/4024) +- move testground/lotus-soup testplan from oni to lotus (https://github.com/filecoin-project/lotus/pull/4727) +- Setup remainder msig signers when parsing genesis template (https://github.com/filecoin-project/lotus/pull/4904) +- Update JSON RPC server to enforce a maximum request size (https://github.com/filecoin-project/lotus/pull/4923) +- New SR-specific lotus-shed cmd (https://github.com/filecoin-project/lotus/pull/4971) +- update index to sectorNumber (https://github.com/filecoin-project/lotus/pull/4987) +- storagefsm: Fix expired ticket retry loop (https://github.com/filecoin-project/lotus/pull/4876) +- add .sec scale to measurements; humanize for metric tags (https://github.com/filecoin-project/lotus/pull/4989) +- Support seal proof type switching (https://github.com/filecoin-project/lotus/pull/4873) +- fix log format (https://github.com/filecoin-project/lotus/pull/4984) +- Format workerID as string (https://github.com/filecoin-project/lotus/pull/4973) +- miner: Winning PoSt Warmup (https://github.com/filecoin-project/lotus/pull/4824) +- Default StartDealParams's fast retrieval field to true over JSON (https://github.com/filecoin-project/lotus/pull/4998) +- Fix actor not found in chain inspect-usage (https://github.com/filecoin-project/lotus/pull/5010) +- storagefsm: Improve new deal sector logic (https://github.com/filecoin-project/lotus/pull/5007) +- Configure simultaneous requests (https://github.com/filecoin-project/lotus/pull/4996) +- miner: log winningPoSt duration separately (https://github.com/filecoin-project/lotus/pull/5005) +- fix wallet dead lock (https://github.com/filecoin-project/lotus/pull/5002) +- Update go-jsonrpc to v0.1.2 (https://github.com/filecoin-project/lotus/pull/5015) +- markets - separate watching for pre-commit from prove-commit (https://github.com/filecoin-project/lotus/pull/4945) +- storagefsm: Add missing planners (https://github.com/filecoin-project/lotus/pull/5016) +- fix wallet delete address where address is default (https://github.com/filecoin-project/lotus/pull/5019) +- worker: More robust remote checks (https://github.com/filecoin-project/lotus/pull/5008) +- Add new booststrappers (https://github.com/filecoin-project/lotus/pull/4007) +- add a tooling to make filecoin accounting a little easier (https://github.com/filecoin-project/lotus/pull/5025) +- fix: start a new line in print miner-info to avoid ambiguous display (https://github.com/filecoin-project/lotus/pull/5029) +- Print gas limit sum in mpool stat (https://github.com/filecoin-project/lotus/pull/5035) +- Fix chainstore tipset leak (https://github.com/filecoin-project/lotus/pull/5037) +- shed rpc: Allow calling with args (https://github.com/filecoin-project/lotus/pull/5036) +- Make --gas-limit optional in mpool replace cli (https://github.com/filecoin-project/lotus/pull/5059) +- client list-asks --by-ping (https://github.com/filecoin-project/lotus/pull/5060) +- Ledger signature verification (https://github.com/filecoin-project/lotus/pull/5068) +- Fix helptext for verified-deal default in client deal (https://github.com/filecoin-project/lotus/pull/5074) +- worker: Support setting task types at runtime (https://github.com/filecoin-project/lotus/pull/5023) +- Enable Callers tracing when GasTracing is enabled (https://github.com/filecoin-project/lotus/pull/5080) +- Cancel transfer cancels storage deal (https://github.com/filecoin-project/lotus/pull/5032) +- Sector check command (https://github.com/filecoin-project/lotus/pull/5041) +- add commp-to-cid base64 decode (https://github.com/filecoin-project/lotus/pull/5079) +- miner info cli improvements (https://github.com/filecoin-project/lotus/pull/5083) +- miner: Add slow mode to proving check (https://github.com/filecoin-project/lotus/pull/5086) +- Error out deals that are not activated by proposed deal start epoch (https://github.com/filecoin-project/lotus/pull/5061) + +# 1.2.1 / 2020-11-20 + +This is a very small release of Lotus that fixes an issue users are experiencing when importing snapshots. There is no need to upgrade unless you experience an issue with creating a new datastore directory in the Lotus repo. + +## Changes + +- fix blockstore directory not created automatically (https://github.com/filecoin-project/lotus/pull/4922) +- WindowPoStScheduler.checkSectors() delete useless judgment (https://github.com/filecoin-project/lotus/pull/4918) + + +# 1.2.0 / 2020-11-18 + +This is a mandatory release of Lotus that introduces the second post-liftoff upgrade to the Filecoin network. The network upgrade occurs at height 265200, before which time all nodes must have updated to this release (or later). This release also bumps the required version of Go to 1.15. + +The changes that break consensus are: + +- Upgrading to sepcs-actors 2.3.2 (https://github.com/filecoin-project/specs-actors/releases/tag/v2.3.2) +- Introducing proofs v5.4.0 (https://github.com/filecoin-project/rust-fil-proofs/releases/tag/storage-proofs-v5.4.0), and switching between the proof types (https://github.com/filecoin-project/lotus/pull/4873) +- Don't use terminated sectors for winning PoSt (https://github.com/filecoin-project/lotus/pull/4770) +- Various small VM-level edge-case handling (https://github.com/filecoin-project/lotus/pull/4783) +- Correction of the VM circulating supply calculation (https://github.com/filecoin-project/lotus/pull/4862) +- Retuning gas costs (https://github.com/filecoin-project/lotus/pull/4830) +- Avoid sending messages to the zero BLS address (https://github.com/filecoin-project/lotus/pull/4888) + +## Other Changes + +- delayed pubsub subscribe for messages topic (https://github.com/filecoin-project/lotus/pull/3646) +- add chain base64 decode params (https://github.com/filecoin-project/lotus/pull/4748) +- chore(dep): update bitswap to fix an initialization race that could panic (https://github.com/filecoin-project/lotus/pull/4855) +- Chore/blockstore nits (https://github.com/filecoin-project/lotus/pull/4813) +- Print Consensus Faults in miner info (https://github.com/filecoin-project/lotus/pull/4853) +- Truncate genesis file before generating (https://github.com/filecoin-project/lotus/pull/4851) +- miner: Winning PoSt Warmup (https://github.com/filecoin-project/lotus/pull/4824) +- Fix init actor address map diffing (https://github.com/filecoin-project/lotus/pull/4875) +- Bump API versions to 1.0.0 (https://github.com/filecoin-project/lotus/pull/4884) +- Fix cid recording issue (https://github.com/filecoin-project/lotus/pull/4874) +- Speed up worker key retrieval (https://github.com/filecoin-project/lotus/pull/4885) +- Add error codes to worker return (https://github.com/filecoin-project/lotus/pull/4890) +- Update go to 1.15.5 (https://github.com/filecoin-project/lotus/pull/4896) +- Fix MaxSealingSectrosForDeals getting reset to 0 (https://github.com/filecoin-project/lotus/pull/4879) +- add sanity check for maximum block size (https://github.com/filecoin-project/lotus/pull/3171) +- Check (pre)commit receipt before other checks in failed states (https://github.com/filecoin-project/lotus/pull/4712) +- fix badger double open on daemon --import-snapshot; chainstore lifecycle (https://github.com/filecoin-project/lotus/pull/4872) +- Update to ipfs-blockstore 1.0.3 (https://github.com/filecoin-project/lotus/pull/4897) +- break loop when found warm up sector (https://github.com/filecoin-project/lotus/pull/4869) +- Tweak handling of bad beneficaries in DeleteActor (https://github.com/filecoin-project/lotus/pull/4903) +- cap maximum number of messages per block in selection (https://github.com/filecoin-project/lotus/pull/4905) +- Set Calico epoch (https://github.com/filecoin-project/lotus/pull/4889) + +# 1.1.3 / 2020-11-13 + +This is an optional release of Lotus that upgrades Lotus dependencies, and includes many performance enhancements, bugfixes, and UX improvements. + +## Highlights + +- Refactored much of the miner code (https://github.com/filecoin-project/lotus/pull/3618), improving its recovery from restarts and overall sector success rate +- Updated [proofs](https://github.com/filecoin-project/rust-fil-proofs) to v5.3.0, which brings significant performance improvements +- Updated [markets](https://github.com/filecoin-project/go-fil-markets/releases/tag/v1.0.4) to v1.0.4, which reduces failures due to reorgs (https://github.com/filecoin-project/lotus/pull/4730) and uses the newly refactored fund manager (https://github.com/filecoin-project/lotus/pull/4736) + +## Changes + +#### Core Lotus + +- polish: add Equals method to MinerInfo shim (https://github.com/filecoin-project/lotus/pull/4604) +- Fix messagepool accounting (https://github.com/filecoin-project/lotus/pull/4668) +- Prep for gas balancing (https://github.com/filecoin-project/lotus/pull/4651) +- Reduce badger ValueThreshold to 128 (https://github.com/filecoin-project/lotus/pull/4629) +- Config for default max gas fee (https://github.com/filecoin-project/lotus/pull/4652) +- bootstrap: don't return early when one drand resolution fails (https://github.com/filecoin-project/lotus/pull/4626) +- polish: add ClaimsChanged and DiffClaims method to power shim (https://github.com/filecoin-project/lotus/pull/4628) +- Simplify chain event Called API (https://github.com/filecoin-project/lotus/pull/4664) +- Cache deal states for most recent old/new tipset (https://github.com/filecoin-project/lotus/pull/4623) +- Add miner available balance and power info to state miner info (https://github.com/filecoin-project/lotus/pull/4618) +- Call GetHeaviestTipSet() only once when syncing (https://github.com/filecoin-project/lotus/pull/4696) +- modify runtime gasUsed printf (https://github.com/filecoin-project/lotus/pull/4704) +- Rename builtin actor generators (https://github.com/filecoin-project/lotus/pull/4697) +- Move gas multiplier as property of pricelist (https://github.com/filecoin-project/lotus/pull/4728) +- polish: add msig pendingtxn diffing and comp (https://github.com/filecoin-project/lotus/pull/4719) +- Optional chain Bitswap (https://github.com/filecoin-project/lotus/pull/4717) +- rewrite sync manager (https://github.com/filecoin-project/lotus/pull/4599) +- async connect to bootstrappers (https://github.com/filecoin-project/lotus/pull/4785) +- head change coalescer (https://github.com/filecoin-project/lotus/pull/4688) +- move to native badger blockstore; leverage zero-copy View() to deserialize in-place (https://github.com/filecoin-project/lotus/pull/4681) +- badger blockstore: minor improvements (https://github.com/filecoin-project/lotus/pull/4811) +- Do not fail wallet delete because of pre-existing trashed key (https://github.com/filecoin-project/lotus/pull/4589) +- Correctly delete the default wallet address (https://github.com/filecoin-project/lotus/pull/4705) +- Reduce badger ValueTreshold to 128 (https://github.com/filecoin-project/lotus/pull/4629) +- predicates: Fast StateGetActor wrapper (https://github.com/filecoin-project/lotus/pull/4835) + +#### Mining + +- worker key should change when set sender found key not equal with the value on chain (https://github.com/filecoin-project/lotus/pull/4595) +- extern/sector-storage: fix GPU usage overwrite bug (https://github.com/filecoin-project/lotus/pull/4627) +- sectorstorage: Fix manager restart edge-case (https://github.com/filecoin-project/lotus/pull/4645) +- storagefsm: Fix GetTicket loop when the sector is already precommitted (https://github.com/filecoin-project/lotus/pull/4643) +- Debug flag to force running sealing scheduler (https://github.com/filecoin-project/lotus/pull/4662) +- Fix worker reenabling, handle multiple restarts in worker (https://github.com/filecoin-project/lotus/pull/4666) +- keep retrying the proof until we run out of sectors to skip (https://github.com/filecoin-project/lotus/pull/4633) +- worker: Commands to pause/resume task processing (https://github.com/filecoin-project/lotus/pull/4615) +- struct name incorrect (https://github.com/filecoin-project/lotus/pull/4699) +- optimize code replace strings with constants (https://github.com/filecoin-project/lotus/pull/4769) +- optimize pledge sector (https://github.com/filecoin-project/lotus/pull/4765) +- Track sealing processes across lotus-miner restarts (https://github.com/filecoin-project/lotus/pull/3618) +- Fix scheduler lockups after storage is freed (https://github.com/filecoin-project/lotus/pull/4778) +- storage: Track worker hostnames with work (https://github.com/filecoin-project/lotus/pull/4779) +- Expand sched-diag; Command to abort sealing calls (https://github.com/filecoin-project/lotus/pull/4804) +- miner: Winning PoSt Warmup (https://github.com/filecoin-project/lotus/pull/4824) +- docsgen: Support miner/worker (https://github.com/filecoin-project/lotus/pull/4817) +- miner: Basic storage cleanup command (https://github.com/filecoin-project/lotus/pull/4834) + +#### Markets and Data Transfer + +- Flesh out data transfer features (https://github.com/filecoin-project/lotus/pull/4572) +- Fix memory leaks in data transfer (https://github.com/filecoin-project/lotus/pull/4619) +- Handle deal id changes in OnDealSectorCommitted (https://github.com/filecoin-project/lotus/pull/4730) +- Refactor FundManager (https://github.com/filecoin-project/lotus/pull/4736) +- refactor: integrate new FundManager (https://github.com/filecoin-project/lotus/pull/4787) +- Fix race in paych manager when req context is cancelled (https://github.com/filecoin-project/lotus/pull/4803) +- fix race in paych manager add funds (https://github.com/filecoin-project/lotus/pull/4597) +- Fix panic in FundManager (https://github.com/filecoin-project/lotus/pull/4808) +- Fix: dont crash on startup if funds migration fails (https://github.com/filecoin-project/lotus/pull/4827) + +#### UX + +- Make EarlyExpiration in sectors list less scary (https://github.com/filecoin-project/lotus/pull/4600) +- Add commands to change the worker key (https://github.com/filecoin-project/lotus/pull/4513) +- Expose ClientDealSize via CLI (https://github.com/filecoin-project/lotus/pull/4569) +- client deal: Cache CommD when creating multiple deals (https://github.com/filecoin-project/lotus/pull/4535) +- miner sectors list: flags for events/seal time (https://github.com/filecoin-project/lotus/pull/4649) +- make IPFS online mode configurable (https://github.com/filecoin-project/lotus/pull/4650) +- Add sync status to miner info command (https://github.com/filecoin-project/lotus/pull/4669) +- Add a StateDecodeParams method (https://github.com/filecoin-project/lotus/pull/4105) +- sched: Interactive RPC Shell (https://github.com/filecoin-project/lotus/pull/4692) +- Add api for getting status given a code (https://github.com/filecoin-project/lotus/pull/4210) +- Update lotus-stats with a richer cli (https://github.com/filecoin-project/lotus/pull/4718) +- Use TSK passed to GasEstimateGasLimit (https://github.com/filecoin-project/lotus/pull/4739) +- match data type for reward state api (https://github.com/filecoin-project/lotus/pull/4745) +- Add `termination-estimate` to get an estimation for how much a termination penalty will be (https://github.com/filecoin-project/lotus/pull/4617) +- Restrict `ParseFIL` input length (https://github.com/filecoin-project/lotus/pull/4780) +- cmd sectors commitIDs len debug (https://github.com/filecoin-project/lotus/pull/4786) +- Add client deal-stats CLI (https://github.com/filecoin-project/lotus/pull/4788) +- Modify printf format (https://github.com/filecoin-project/lotus/pull/4795) +- Updated msig inspect (https://github.com/filecoin-project/lotus/pull/4533) +- Delete the duplicate output (https://github.com/filecoin-project/lotus/pull/4819) +- miner: Storage list sectors command (https://github.com/filecoin-project/lotus/pull/4831) +- drop a few logs down to debug (https://github.com/filecoin-project/lotus/pull/4832) + +#### Testing and Tooling + +- refactor: share code between CLI tests (https://github.com/filecoin-project/lotus/pull/4598) +- Fix flaky TestCLIDealFlow (https://github.com/filecoin-project/lotus/pull/4608) +- Fix flaky testMiningReal (https://github.com/filecoin-project/lotus/pull/4609) +- Add election run-dummy command (https://github.com/filecoin-project/lotus/pull/4498) +- Fix .gitmodules (https://github.com/filecoin-project/lotus/pull/4713) +- fix metrics wiring.(https://github.com/filecoin-project/lotus/pull/4691) +- shed: Util for creating ID CIDs (https://github.com/filecoin-project/lotus/pull/4726) +- Run kumquat upgrade on devnets (https://github.com/filecoin-project/lotus/pull/4734) +- Make pond work again (https://github.com/filecoin-project/lotus/pull/4775) +- lotus-stats: fix influx flags (https://github.com/filecoin-project/lotus/pull/4810) +- 2k sync BootstrapPeerThreshold (https://github.com/filecoin-project/lotus/pull/4797) +- test for FundManager panic to ensure it is fixed (https://github.com/filecoin-project/lotus/pull/4825) +- Stop mining at the end of tests (https://github.com/filecoin-project/lotus/pull/4826) +- Make some logs quieter (https://github.com/filecoin-project/lotus/pull/4709) + +#### Dependencies + +- update filecoin-ffi in go mod (https://github.com/filecoin-project/lotus/pull/4584) +- Update FFI (https://github.com/filecoin-project/lotus/pull/4613) +- feat: integrate new optional blst backend and verification optimizations from proofs (https://github.com/filecoin-project/lotus/pull/4630) +- Use https for blst submodule (https://github.com/filecoin-project/lotus/pull/4710) +- Update go-bitfield (https://github.com/filecoin-project/lotus/pull/4756) +- Update Yamux (https://github.com/filecoin-project/lotus/pull/4758) +- Update to latest go-bitfield (https://github.com/filecoin-project/lotus/pull/4793) +- Update to latest go-address (https://github.com/filecoin-project/lotus/pull/4798) +- update libp2p for stream interface changes (https://github.com/filecoin-project/lotus/pull/4814) + # 1.1.2 / 2020-10-24 This is a patch release of Lotus that builds on the fixes involving worker keys that was introduced in v1.1.1. Miners and node operators should update to this release as soon as possible in order to ensure their blocks are propagated and validated. -## Changes +## Changes - Handle worker key changes correctly in runtime (https://github.com/filecoin-project/lotus/pull/4579) @@ -247,7 +1104,7 @@ This consensus-breaking release of Lotus upgrades the actors version to v2.0.0. - Fix pond (https://github.com/filecoin-project/lotus/pull/4203) - allow manual setting of noncefix fee cap (https://github.com/filecoin-project/lotus/pull/4205) - implement command to get execution traces of any message (https://github.com/filecoin-project/lotus/pull/4200) -- conformance: minor driver refactors (https://github.com/filecoin-project/lotus/pull/4211) +- conformance: minor driver refactors (https://github.com/filecoin-project/lotus/pull/4211) - lotus-pcr: ignore all other messages (https://github.com/filecoin-project/lotus/pull/4218) - lotus-pcr: zero refund (https://github.com/filecoin-project/lotus/pull/4229) @@ -274,7 +1131,7 @@ We are grateful for every contribution! This optional release of Lotus introduces a new version of markets which switches to CBOR-map encodings, and allows datastore migrations. The release also introduces several improvements to the mining process, a few performance optimizations, and a battery of UX additions and enhancements. -## Changes +## Changes #### Dependencies @@ -345,7 +1202,7 @@ This consensus-breaking release of Lotus introduces an upgrade to the network. T This release also updates go-fil-markets to fix an incompatibility issue between v0.7.2 and earlier versions. -## Changes +## Changes #### Dependencies @@ -434,7 +1291,7 @@ This optional release of Lotus introduces some critical fixes to the window PoSt ## Changes -#### Some notable improvements: +#### Some notable improvements: - Correctly construct params for `SubmitWindowedPoSt` messages (https://github.com/filecoin-project/lotus/pull/3909) - Skip sectors correctly for Window PoSt (https://github.com/filecoin-project/lotus/pull/3839) @@ -470,7 +1327,7 @@ This consensus-breaking release of Lotus is designed to test a network upgrade o - Drand upgrade (https://github.com/filecoin-project/lotus/pull/3670) - Multisig API additions (https://github.com/filecoin-project/lotus/pull/3590) -#### Storage Miner +#### Storage Miner - Increase the number of times precommit2 is attempted before moving back to precommit1 (https://github.com/filecoin-project/lotus/pull/3720) @@ -513,7 +1370,7 @@ This release introduces some critical fixes to message selection and gas estimat ## Changes -#### Messagepool +#### Messagepool - Warn when optimal selection fails to pack a block and we fall back to random selection (https://github.com/filecoin-project/lotus/pull/3708) - Add basic command for printing gas performance of messages in the mpool (https://github.com/filecoin-project/lotus/pull/3701) @@ -583,7 +1440,7 @@ This release also introduces many improvements to Lotus! Among them are a new ve - Add additional info about gas premium (https://github.com/filecoin-project/lotus/pull/3578) - Fix GasPremium capping logic (https://github.com/filecoin-project/lotus/pull/3552) -#### Payment channels +#### Payment channels - Get available funds by address or by from/to (https://github.com/filecoin-project/lotus/pull/3547) - Create `lotus paych status` command (https://github.com/filecoin-project/lotus/pull/3523) @@ -633,7 +1490,7 @@ This patch includes a crucial fix to the message pool selection logic, strongly This patch includes a hotfix to the `GasEstimateFeeCap` method, capping the estimated fee to a reasonable level by default. -## Changes +## Changes - Added target height to sync wait (https://github.com/filecoin-project/lotus/pull/3502) - Disable codecov annotations (https://github.com/filecoin-project/lotus/pull/3514) @@ -663,7 +1520,7 @@ This patch includes some bugfixes to the sector sealing process, and updates go- # 0.5.7 / 2020-08-31 -This patch release includes some bugfixes and enhancements to the sector lifecycle and message pool logic. +This patch release includes some bugfixes and enhancements to the sector lifecycle and message pool logic. ## Changes @@ -683,7 +1540,7 @@ Hotfix release that fixes a panic in the sealing scheduler (https://github.com/f # 0.5.5 This patch release introduces a large number of improvements to the sealing process. -It also updates go-fil-markets to +It also updates go-fil-markets to [version 0.5.8](https://github.com/filecoin-project/go-fil-markets/releases/tag/v0.5.8), and go-libp2p-pubsub to [v0.3.5](https://github.com/libp2p/go-libp2p-pubsub/releases/tag/v0.3.5). @@ -696,16 +1553,16 @@ and go-libp2p-pubsub to [v0.3.5](https://github.com/libp2p/go-libp2p-pubsub/rele - The following improvements were introduced in https://github.com/filecoin-project/lotus/pull/3350. - - Allow `lotus-miner sectors remove` to remove a sector in any state. - - Create a separate state in the storage FSM dedicated to submitting the Commit message. - - Recovery for when the Deal IDs of deals in a sector get changed in a reorg. - - Auto-retry sending Precommit and Commit messages if they run out of gas - - Auto-retry sector remove tasks when they fail - - Compact worker windows, and allow their tasks to be executed in any order + - Allow `lotus-miner sectors remove` to remove a sector in any state. + - Create a separate state in the storage FSM dedicated to submitting the Commit message. + - Recovery for when the Deal IDs of deals in a sector get changed in a reorg. + - Auto-retry sending Precommit and Commit messages if they run out of gas + - Auto-retry sector remove tasks when they fail + - Compact worker windows, and allow their tasks to be executed in any order - Don't simply skip PoSt for bad sectors (https://github.com/filecoin-project/lotus/pull/3323) -#### Message Pool +#### Message Pool - Spam Protection: Track required funds for pending messages (https://github.com/filecoin-project/lotus/pull/3313) @@ -730,7 +1587,7 @@ A patch release, containing a few nice bugfixes and improvements: # 0.5.3 -Yet another hotfix release. +Yet another hotfix release. A lesson for readers, having people who have been awake for 12+ hours review your hotfix PR is not a good idea. Find someone who has enough slept recently enough to give you good code review, otherwise you'll end up quickly bumping @@ -749,9 +1606,9 @@ This is a hotfix release. # 0.5.1 / 2020-08-24 -The Space Race release! +The Space Race release! This release contains the genesis car file and bootstrap peers for the space -race network. +race network. Additionally, we included two small fixes to genesis creation: - Randomize ticket value in genesis generation @@ -769,9 +1626,9 @@ Among the highlights included in this release are: - Gas changes: We implemented EIP-1559 and introduced real gas values. - Deal-making: We now support "Committed Capacity" sectors, "fast-retrieval" deals, -and the packing of multiple deals into a single sector. + and the packing of multiple deals into a single sector. - Renamed features: We renamed some of the binaries, environment variables, and default -paths associated with a Lotus node. + paths associated with a Lotus node. ### Gas changes @@ -779,19 +1636,19 @@ We made some significant changes to the mechanics of gas in this release. #### Network fee -We implemented something similar to +We implemented something similar to [Ethereum's EIP-1559](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-1559.md). The `Message` structure had three changes: - The `GasPrice` field has been removed - A new `GasFeeCap` field has been added, which controls the maximum cost -the sender incurs for the message + the sender incurs for the message - A new `GasPremium` field has been added, which controls the reward a miner -earns for including the message + earns for including the message -A sender will never be charged more than `GasFeeCap * GasLimit`. +A sender will never be charged more than `GasFeeCap * GasLimit`. A miner will typically earn `GasPremium * GasLimit` as a reward. -The `Blockheader` structure has one new field, called `ParentBaseFee`. +The `Blockheader` structure has one new field, called `ParentBaseFee`. Informally speaking,the `ParentBaseFee` is increased when blocks are densely packed with messages, and decreased otherwise. diff --git a/Dockerfile.lotus b/Dockerfile.lotus new file mode 100644 index 00000000000..0b43ef8063e --- /dev/null +++ b/Dockerfile.lotus @@ -0,0 +1,74 @@ +FROM golang:1.16.4 AS builder-deps +MAINTAINER Lotus Development Team + +RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev + +ARG RUST_VERSION=nightly +ENV XDG_CACHE_HOME="/tmp" + +ENV RUSTUP_HOME=/usr/local/rustup \ + CARGO_HOME=/usr/local/cargo \ + PATH=/usr/local/cargo/bin:$PATH + +RUN wget "https://static.rust-lang.org/rustup/dist/x86_64-unknown-linux-gnu/rustup-init"; \ + chmod +x rustup-init; \ + ./rustup-init -y --no-modify-path --profile minimal --default-toolchain $RUST_VERSION; \ + rm rustup-init; \ + chmod -R a+w $RUSTUP_HOME $CARGO_HOME; \ + rustup --version; \ + cargo --version; \ + rustc --version; + + +FROM builder-deps AS builder-local +MAINTAINER Lotus Development Team + +COPY ./ /opt/filecoin +WORKDIR /opt/filecoin +RUN make clean deps + + +FROM builder-local AS builder +MAINTAINER Lotus Development Team + +WORKDIR /opt/filecoin + +ARG RUSTFLAGS="" +ARG GOFLAGS="" + +RUN make deps lotus lotus-miner lotus-worker lotus-shed lotus-chainwatch lotus-stats + + +FROM ubuntu:20.04 AS base +MAINTAINER Lotus Development Team + +# Base resources +COPY --from=builder /etc/ssl/certs /etc/ssl/certs +COPY --from=builder /lib/x86_64-linux-gnu/libdl.so.2 /lib/ +COPY --from=builder /lib/x86_64-linux-gnu/librt.so.1 /lib/ +COPY --from=builder /lib/x86_64-linux-gnu/libgcc_s.so.1 /lib/ +COPY --from=builder /lib/x86_64-linux-gnu/libutil.so.1 /lib/ +COPY --from=builder /usr/lib/x86_64-linux-gnu/libltdl.so.7 /lib/ +COPY --from=builder /usr/lib/x86_64-linux-gnu/libnuma.so.1 /lib/ +COPY --from=builder /usr/lib/x86_64-linux-gnu/libhwloc.so.5 /lib/ +COPY --from=builder /usr/lib/x86_64-linux-gnu/libOpenCL.so.1 /lib/ + +RUN useradd -r -u 532 -U fc + + +FROM base AS lotus +MAINTAINER Lotus Development Team + +COPY --from=builder /opt/filecoin/lotus /usr/local/bin/ +COPY --from=builder /opt/filecoin/lotus-shed /usr/local/bin/ + +ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters +ENV LOTUS_PATH /var/lib/lotus + +RUN mkdir /var/lib/lotus /var/tmp/filecoin-proof-parameters && chown fc /var/lib/lotus /var/tmp/filecoin-proof-parameters + +USER fc + +ENTRYPOINT ["/usr/local/bin/lotus"] + +CMD ["-help"] diff --git a/Makefile b/Makefile index 093f62ef697..a5ce8a99fbf 100644 --- a/Makefile +++ b/Makefile @@ -5,10 +5,10 @@ all: build unexport GOFLAGS -GOVERSION:=$(shell go version | cut -d' ' -f 3 | cut -d. -f 2) -ifeq ($(shell expr $(GOVERSION) \< 14), 1) -$(warning Your Golang version is go 1.$(GOVERSION)) -$(error Update Golang to version $(shell grep '^go' go.mod)) +GOVERSION:=$(shell go version | cut -d' ' -f 3 | sed 's/^go//' | awk -F. '{printf "%d%03d%03d", $$1, $$2, $$3}') +ifeq ($(shell expr $(GOVERSION) \< 1016000), 1) +$(warning Your Golang version is go$(shell expr $(GOVERSION) / 1000000).$(shell expr $(GOVERSION) % 1000000 / 1000).$(shell expr $(GOVERSION) % 1000)) +$(error Update Golang to version to at least 1.16.0) endif # git modules that need to be loaded @@ -41,8 +41,13 @@ MODULES+=$(FFI_PATH) BUILD_DEPS+=build/.filecoin-install CLEAN+=build/.filecoin-install -$(MODULES): build/.update-modules ; +ffi-version-check: + @[[ "$$(awk '/const Version/{print $$5}' extern/filecoin-ffi/version.go)" -eq 3 ]] || (echo "FFI version mismatch, update submodules"; exit 1) +BUILD_DEPS+=ffi-version-check + +.PHONY: ffi-version-check +$(MODULES): build/.update-modules ; # dummy file that marks the last time modules were updated build/.update-modules: git submodule update --init --recursive @@ -57,16 +62,30 @@ CLEAN+=build/.update-modules deps: $(BUILD_DEPS) .PHONY: deps +build-devnets: build lotus-seed lotus-shed lotus-wallet lotus-gateway +.PHONY: build-devnets + debug: GOFLAGS+=-tags=debug -debug: lotus lotus-miner lotus-worker lotus-seed +debug: build-devnets 2k: GOFLAGS+=-tags=2k -2k: lotus lotus-miner lotus-worker lotus-seed +2k: build-devnets + +calibnet: GOFLAGS+=-tags=calibnet +calibnet: build-devnets + +nerpanet: GOFLAGS+=-tags=nerpanet +nerpanet: build-devnets + +butterflynet: GOFLAGS+=-tags=butterflynet +butterflynet: build-devnets + +interopnet: GOFLAGS+=-tags=interopnet +interopnet: build-devnets lotus: $(BUILD_DEPS) rm -f lotus go build $(GOFLAGS) -o lotus ./cmd/lotus - go run github.com/GeertJohan/go.rice/rice append --exec lotus -i ./build .PHONY: lotus BINS+=lotus @@ -74,21 +93,18 @@ BINS+=lotus lotus-miner: $(BUILD_DEPS) rm -f lotus-miner go build $(GOFLAGS) -o lotus-miner ./cmd/lotus-storage-miner - go run github.com/GeertJohan/go.rice/rice append --exec lotus-miner -i ./build .PHONY: lotus-miner BINS+=lotus-miner lotus-worker: $(BUILD_DEPS) rm -f lotus-worker go build $(GOFLAGS) -o lotus-worker ./cmd/lotus-seal-worker - go run github.com/GeertJohan/go.rice/rice append --exec lotus-worker -i ./build .PHONY: lotus-worker BINS+=lotus-worker lotus-shed: $(BUILD_DEPS) rm -f lotus-shed go build $(GOFLAGS) -o lotus-shed ./cmd/lotus-shed - go run github.com/GeertJohan/go.rice/rice append --exec lotus-shed -i ./build .PHONY: lotus-shed BINS+=lotus-shed @@ -115,12 +131,14 @@ install-miner: install-worker: install -C ./lotus-worker /usr/local/bin/lotus-worker +install-app: + install -C ./$(APP) /usr/local/bin/$(APP) + # TOOLS lotus-seed: $(BUILD_DEPS) rm -f lotus-seed go build $(GOFLAGS) -o lotus-seed ./cmd/lotus-seed - go run github.com/GeertJohan/go.rice/rice append --exec lotus-seed -i ./build .PHONY: lotus-seed BINS+=lotus-seed @@ -154,13 +172,11 @@ lotus-townhall-front: .PHONY: lotus-townhall-front lotus-townhall-app: lotus-touch lotus-townhall-front - go run github.com/GeertJohan/go.rice/rice append --exec lotus-townhall -i ./cmd/lotus-townhall -i ./build .PHONY: lotus-townhall-app lotus-fountain: rm -f lotus-fountain go build -o lotus-fountain ./cmd/lotus-fountain - go run github.com/GeertJohan/go.rice/rice append --exec lotus-fountain -i ./cmd/lotus-fountain -i ./build .PHONY: lotus-fountain BINS+=lotus-fountain @@ -173,28 +189,24 @@ BINS+=lotus-chainwatch lotus-bench: rm -f lotus-bench go build -o lotus-bench ./cmd/lotus-bench - go run github.com/GeertJohan/go.rice/rice append --exec lotus-bench -i ./build .PHONY: lotus-bench BINS+=lotus-bench lotus-stats: rm -f lotus-stats - go build -o lotus-stats ./cmd/lotus-stats - go run github.com/GeertJohan/go.rice/rice append --exec lotus-stats -i ./build + go build $(GOFLAGS) -o lotus-stats ./cmd/lotus-stats .PHONY: lotus-stats BINS+=lotus-stats lotus-pcr: rm -f lotus-pcr go build $(GOFLAGS) -o lotus-pcr ./cmd/lotus-pcr - go run github.com/GeertJohan/go.rice/rice append --exec lotus-pcr -i ./build .PHONY: lotus-pcr BINS+=lotus-pcr lotus-health: rm -f lotus-health go build -o lotus-health ./cmd/lotus-health - go run github.com/GeertJohan/go.rice/rice append --exec lotus-health -i ./build .PHONY: lotus-health BINS+=lotus-health @@ -204,14 +216,33 @@ lotus-wallet: .PHONY: lotus-wallet BINS+=lotus-wallet +lotus-keygen: + rm -f lotus-keygen + go build -o lotus-keygen ./cmd/lotus-keygen +.PHONY: lotus-keygen +BINS+=lotus-keygen + testground: go build -tags testground -o /dev/null ./cmd/lotus .PHONY: testground BINS+=testground + +tvx: + rm -f tvx + go build -o tvx ./cmd/tvx +.PHONY: tvx +BINS+=tvx + install-chainwatch: lotus-chainwatch install -C ./lotus-chainwatch /usr/local/bin/lotus-chainwatch +lotus-sim: $(BUILD_DEPS) + rm -f lotus-sim + go build $(GOFLAGS) -o lotus-sim ./cmd/lotus-sim +.PHONY: lotus-sim +BINS+=lotus-sim + # SYSTEMD install-daemon-service: install-daemon @@ -272,17 +303,10 @@ clean-services: clean-all-services buildall: $(BINS) -completions: - ./scripts/make-completions.sh lotus - ./scripts/make-completions.sh lotus-miner -.PHONY: completions - install-completions: mkdir -p /usr/share/bash-completion/completions /usr/local/share/zsh/site-functions/ install -C ./scripts/bash-completion/lotus /usr/share/bash-completion/completions/lotus - install -C ./scripts/bash-completion/lotus-miner /usr/share/bash-completion/completions/lotus-miner install -C ./scripts/zsh-completion/lotus /usr/local/share/zsh/site-functions/_lotus - install -C ./scripts/zsh-completion/lotus-miner /usr/local/share/zsh/site-functions/_lotus-miner clean: rm -rf $(CLEAN) $(BINS) @@ -294,17 +318,76 @@ dist-clean: git submodule deinit --all -f .PHONY: dist-clean -type-gen: +type-gen: api-gen go run ./gen/main.go - go generate ./... + go generate -x ./... + goimports -w api/ -method-gen: +method-gen: api-gen (cd ./lotuspond/front/src/chain && go run ./methodgen.go) -gen: type-gen method-gen - -docsgen: - go run ./api/docgen > documentation/en/api-methods.md +actors-gen: + go run ./chain/actors/agen + go fmt ./... + +api-gen: + go run ./gen/api + goimports -w api + goimports -w api +.PHONY: api-gen + +appimage: lotus + rm -rf appimage-builder-cache || true + rm AppDir/io.filecoin.lotus.desktop || true + rm AppDir/icon.svg || true + rm Appdir/AppRun || true + mkdir -p AppDir/usr/bin + cp ./lotus AppDir/usr/bin/ + appimage-builder + +docsgen: docsgen-md docsgen-openrpc + +docsgen-md-bin: api-gen actors-gen + go build $(GOFLAGS) -o docgen-md ./api/docgen/cmd +docsgen-openrpc-bin: api-gen actors-gen + go build $(GOFLAGS) -o docgen-openrpc ./api/docgen-openrpc/cmd + +docsgen-md: docsgen-md-full docsgen-md-storage docsgen-md-worker + +docsgen-md-full: docsgen-md-bin + ./docgen-md "api/api_full.go" "FullNode" "api" "./api" > documentation/en/api-v1-unstable-methods.md + ./docgen-md "api/v0api/full.go" "FullNode" "v0api" "./api/v0api" > documentation/en/api-v0-methods.md +docsgen-md-storage: docsgen-md-bin + ./docgen-md "api/api_storage.go" "StorageMiner" "api" "./api" > documentation/en/api-v0-methods-miner.md +docsgen-md-worker: docsgen-md-bin + ./docgen-md "api/api_worker.go" "Worker" "api" "./api" > documentation/en/api-v0-methods-worker.md + +docsgen-openrpc: docsgen-openrpc-full docsgen-openrpc-storage docsgen-openrpc-worker + +docsgen-openrpc-full: docsgen-openrpc-bin + ./docgen-openrpc "api/api_full.go" "FullNode" "api" "./api" -gzip > build/openrpc/full.json.gz +docsgen-openrpc-storage: docsgen-openrpc-bin + ./docgen-openrpc "api/api_storage.go" "StorageMiner" "api" "./api" -gzip > build/openrpc/miner.json.gz +docsgen-openrpc-worker: docsgen-openrpc-bin + ./docgen-openrpc "api/api_worker.go" "Worker" "api" "./api" -gzip > build/openrpc/worker.json.gz + +.PHONY: docsgen docsgen-md-bin docsgen-openrpc-bin + +gen: actors-gen type-gen method-gen docsgen api-gen circleci + @echo ">>> IF YOU'VE MODIFIED THE CLI, REMEMBER TO ALSO MAKE docsgen-cli" +.PHONY: gen + +snap: lotus lotus-miner lotus-worker + snapcraft + # snapcraft upload ./lotus_*.snap + +# separate from gen because it needs binaries +docsgen-cli: lotus lotus-miner lotus-worker + python ./scripts/generate-lotus-cli.py +.PHONY: docsgen-cli print-%: @echo $*=$($*) + +circleci: + go generate -x ./.circleci \ No newline at end of file diff --git a/README.md b/README.md index fa432bf7dc8..a44c690066c 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ - +

@@ -18,30 +18,121 @@ Lotus is an implementation of the Filecoin Distributed Storage Network. For more ## Building & Documentation -For instructions on how to build, install and setup lotus, please visit [https://docs.filecoin.io/get-started/lotus](https://docs.filecoin.io/get-started/lotus/). +> Note: The default `master` branch is the dev branch, please use with caution. For the latest stable version, checkout the most recent [`Latest release`](https://github.com/filecoin-project/lotus/releases). + +For complete instructions on how to build, install and setup lotus, please visit [https://docs.filecoin.io/get-started/lotus](https://docs.filecoin.io/get-started/lotus/). Basic build instructions can be found further down in this readme. ## Reporting a Vulnerability Please send an email to security@filecoin.org. See our [security policy](SECURITY.md) for more details. -## Development +## Related packages -The main branches under development at the moment are: -* [`master`](https://github.com/filecoin-project/lotus): current testnet. -* [`next`](https://github.com/filecoin-project/lotus/tree/next): working branch with chain-breaking changes. -* [`ntwk-calibration`](https://github.com/filecoin-project/lotus/tree/ntwk-calibration): devnet running one of `next` commits. +These repos are independent and reusable modules, but are tightly integrated into Lotus to make up a fully featured Filecoin implementation: -### Tracker +- [go-fil-markets](https://github.com/filecoin-project/go-fil-markets) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/markets-shared-components-5daa144a7046a60001c6e253/board) +- [specs-actors](https://github.com/filecoin-project/specs-actors) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/actors-5ee6f3aa87591f0016c05685/board) -All work is tracked via issues. An attempt at keeping an up-to-date view on remaining work towards Mainnet launch can be seen at the [lotus github project board](https://github.com/orgs/filecoin-project/projects/8). The issues labeled with `incentives` are there to identify the issues needed for Space Race launch. +## Contribute -### Packages +Lotus is a universally open project and welcomes contributions of all kinds: code, docs, and more. However, before making a contribution, we ask you to heed these recommendations: -The lotus Filecoin implementation unfolds into the following packages: +1. If the proposal entails a protocol change, please first submit a [Filecoin Improvement Proposal](https://github.com/filecoin-project/FIPs). +2. If the change is complex and requires prior discussion, [open an issue](github.com/filecoin-project/lotus/issues) or a [discussion](https://github.com/filecoin-project/lotus/discussions) to request feedback before you start working on a pull request. This is to avoid disappointment and sunk costs, in case the change is not actually needed or accepted. +3. Please refrain from submitting PRs to adapt existing code to subjective preferences. The changeset should contain functional or technical improvements/enhancements, bug fixes, new features, or some other clear material contribution. Simple stylistic changes are likely to be rejected in order to reduce code churn. -- [This repo](https://github.com/filecoin-project/lotus) -- [go-fil-markets](https://github.com/filecoin-project/go-fil-markets) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/markets-shared-components-5daa144a7046a60001c6e253/board) -- [spec-actors](https://github.com/filecoin-project/specs-actors) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/actors-5ee6f3aa87591f0016c05685/board) +When implementing a change: + +1. Adhere to the standard Go formatting guidelines, e.g. [Effective Go](https://golang.org/doc/effective_go.html). Run `go fmt`. +2. Stick to the idioms and patterns used in the codebase. Familiar-looking code has a higher chance of being accepted than eerie code. Pay attention to commonly used variable and parameter names, avoidance of naked returns, error handling patterns, etc. +3. Comments: follow the advice on the [Commentary](https://golang.org/doc/effective_go.html#commentary) section of Effective Go. +4. Minimize code churn. Modify only what is strictly necessary. Well-encapsulated changesets will get a quicker response from maintainers. +5. Lint your code with [`golangci-lint`](https://golangci-lint.run) (CI will reject your PR if unlinted). +6. Add tests. +7. Title the PR in a meaningful way and describe the rationale and the thought process in the PR description. +8. Write clean, thoughtful, and detailed [commit messages](https://chris.beams.io/posts/git-commit/). This is even more important than the PR description, because commit messages are stored _inside_ the Git history. One good rule is: if you are happy posting the commit message as the PR description, then it's a good commit message. + +## Basic Build Instructions +**System-specific Software Dependencies**: + +Building Lotus requires some system dependencies, usually provided by your distribution. + +Ubuntu/Debian: +``` +sudo apt install mesa-opencl-icd ocl-icd-opencl-dev gcc git bzr jq pkg-config curl clang build-essential hwloc libhwloc-dev wget -y && sudo apt upgrade -y +``` + +Fedora: +``` +sudo dnf -y install gcc make git bzr jq pkgconfig mesa-libOpenCL mesa-libOpenCL-devel opencl-headers ocl-icd ocl-icd-devel clang llvm wget hwloc libhwloc-dev +``` + +For other distributions you can find the required dependencies [here.](https://docs.filecoin.io/get-started/lotus/installation/#system-specific) For instructions specific to macOS, you can find them [here.](https://docs.filecoin.io/get-started/lotus/installation/#macos) + +#### Go + +To build Lotus, you need a working installation of [Go 1.16.4 or higher](https://golang.org/dl/): + +```bash +wget -c https://golang.org/dl/go1.16.4.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local +``` + +**TIP:** +You'll need to add `/usr/local/go/bin` to your path. For most Linux distributions you can run something like: + +```shell +echo "export PATH=$PATH:/usr/local/go/bin" >> ~/.bashrc && source ~/.bashrc +``` + +See the [official Golang installation instructions](https://golang.org/doc/install) if you get stuck. + +### Build and install Lotus + +Once all the dependencies are installed, you can build and install the Lotus suite (`lotus`, `lotus-miner`, and `lotus-worker`). + +1. Clone the repository: + + ```sh + git clone https://github.com/filecoin-project/lotus.git + cd lotus/ + ``` + +Note: The default branch `master` is the dev branch where the latest new features, bug fixes and improvement are in. However, if you want to run lotus on Filecoin mainnet and want to run a production-ready lotus, get the latest release[ here](https://github.com/filecoin-project/lotus/releases). + +2. To join mainnet, checkout the [latest release](https://github.com/filecoin-project/lotus/releases). + + If you are changing networks from a previous Lotus installation or there has been a network reset, read the [Switch networks guide](https://docs.filecoin.io/get-started/lotus/switch-networks/) before proceeding. + + For networks other than mainnet, look up the current branch or tag/commit for the network you want to join in the [Filecoin networks dashboard](https://network.filecoin.io), then build Lotus for your specific network below. + + ```sh + git checkout + # For example: + git checkout # tag for a release + ``` + + Currently, the latest code on the _master_ branch corresponds to mainnet. + +3. If you are in China, see "[Lotus: tips when running in China](https://docs.filecoin.io/get-started/lotus/tips-running-in-china/)". +4. This build instruction uses the prebuilt proofs binaries. If you want to build the proof binaries from source check the [complete instructions](https://docs.filecoin.io/get-started/lotus/installation/#build-and-install-lotus). Note, if you are building the proof binaries from source, [installing rustup](https://docs.filecoin.io/get-started/lotus/installation/#rustup) is also needed. + +5. Build and install Lotus: + + ```sh + make clean all #mainnet + + # Or to join a testnet or devnet: + make clean calibnet # Calibration with min 32GiB sectors + make clean nerpanet # Nerpa with min 512MiB sectors + + sudo make install + ``` + + This will put `lotus`, `lotus-miner` and `lotus-worker` in `/usr/local/bin`. + + `lotus` will use the `$HOME/.lotus` folder by default for storage (configuration, chain data, wallets, etc). See [advanced options](https://docs.filecoin.io/get-started/lotus/configuration-and-advanced-usage/) for information on how to customize the Lotus folder. + +6. You should now have Lotus installed. You can now [start the Lotus daemon and sync the chain](https://docs.filecoin.io/get-started/lotus/installation/#start-the-lotus-daemon-and-sync-the-chain). ## License diff --git a/SECURITY.md b/SECURITY.md index 592206bc5a9..d53c2b920b5 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -2,11 +2,11 @@ ## Reporting a Vulnerability -For *critical* bugs, please consult our Security Policy and Responsible Disclosure Program information at https://github.com/filecoin-project/community/blob/master/SECURITY.md +For reporting security vulnerabilities/bugs, please consult our Security Policy and Responsible Disclosure Program information at https://github.com/filecoin-project/community/blob/master/SECURITY.md. Security vulnerabilities should be reported via our [Vulnerability Reporting channels](https://github.com/filecoin-project/community/blob/master/SECURITY.md#vulnerability-reporting) and will be eligible for a [Bug Bounty](https://security.filecoin.io/bug-bounty/). Please try to provide a clear description of any bugs reported, along with how to reproduce the bug if possible. More detailed bug reports (especially those with a PoC included) will help us move forward much faster. Additionally, please avoid reporting bugs that already have open issues. Take a moment to search the issue list of the related GitHub repositories before writing up a new report. -Here are some examples of bugs we would consider 'critical': +Here are some examples of bugs we would consider to be security vulnerabilities: * If you can spend from a `multisig` wallet you do not control the keys for. * If you can cause a miner to be slashed without them actually misbehaving. @@ -16,8 +16,8 @@ Here are some examples of bugs we would consider 'critical': * If you can craft a message that causes a persistent fork in the network. * If you can cause the total amount of Filecoin in the network to no longer be 2 billion. -This is not an exhaustive list, but should provide some idea of what we consider 'critical'. +This is not an exhaustive list, but should provide some idea of what we consider as a security vulnerability, . ## Reporting a non security bug -For non-critical bugs, please simply file a GitHub [issue](https://github.com/filecoin-project/lotus/issues/new?template=bug_report.md). +For non-security bugs, please simply file a GitHub [issue](https://github.com/filecoin-project/lotus/issues/new?template=bug_report.md). diff --git a/api/README.md b/api/README.md new file mode 100644 index 00000000000..07089d7ae9f --- /dev/null +++ b/api/README.md @@ -0,0 +1,14 @@ +## Lotus API + +This package contains all lotus API definitions. Interfaces defined here are +exposed as JsonRPC 2.0 endpoints by lotus programs. + +### Versions + +| File | Alias File | Interface | Exposed by | Version | HTTP Endpoint | Status | Docs +|------------------|-------------------|----------------|--------------------|---------|---------------|------------------------------|------ +| `api_common.go` | `v0api/latest.go` | `Common` | lotus; lotus-miner | v0 | `/rpc/v0` | Latest, Stable | [Methods](../documentation/en/api-v0-methods.md) +| `api_full.go` | `v1api/latest.go` | `FullNode` | lotus | v1 | `/rpc/v1` | Latest, **Work in progress** | [Methods](../documentation/en/api-v1-unstable-methods.md) +| `api_storage.go` | `v0api/latest.go` | `StorageMiner` | lotus-miner | v0 | `/rpc/v0` | Latest, Stable | [Methods](../documentation/en/api-v0-methods-miner.md) +| `api_worker.go` | `v0api/latest.go` | `Worker` | lotus-worker | v0 | `/rpc/v0` | Latest, Stable | [Methods](../documentation/en/api-v0-methods-worker.md) +| `v0api/full.go` | | `FullNode` | lotus | v0 | `/rpc/v0` | Stable | [Methods](../documentation/en/api-v0-methods.md) diff --git a/api/api_common.go b/api/api_common.go index 5b036d1f6d9..629299db3b6 100644 --- a/api/api_common.go +++ b/api/api_common.go @@ -4,77 +4,61 @@ import ( "context" "fmt" + apitypes "github.com/filecoin-project/lotus/api/types" + "github.com/google/uuid" "github.com/filecoin-project/go-jsonrpc/auth" - metrics "github.com/libp2p/go-libp2p-core/metrics" - "github.com/libp2p/go-libp2p-core/network" - "github.com/libp2p/go-libp2p-core/peer" - protocol "github.com/libp2p/go-libp2p-core/protocol" - - "github.com/filecoin-project/lotus/build" ) -type Common interface { +// MODIFYING THE API INTERFACE +// +// When adding / changing methods in this file: +// * Do the change here +// * Adjust implementation in `node/impl/` +// * Run `make gen` - this will: +// * Generate proxy structs +// * Generate mocks +// * Generate markdown docs +// * Generate openrpc blobs +type Common interface { // MethodGroup: Auth - AuthVerify(ctx context.Context, token string) ([]auth.Permission, error) - AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error) - - // MethodGroup: Net + AuthVerify(ctx context.Context, token string) ([]auth.Permission, error) //perm:read + AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error) //perm:admin - NetConnectedness(context.Context, peer.ID) (network.Connectedness, error) - NetPeers(context.Context) ([]peer.AddrInfo, error) - NetConnect(context.Context, peer.AddrInfo) error - NetAddrsListen(context.Context) (peer.AddrInfo, error) - NetDisconnect(context.Context, peer.ID) error - NetFindPeer(context.Context, peer.ID) (peer.AddrInfo, error) - NetPubsubScores(context.Context) ([]PubsubScore, error) - NetAutoNatStatus(context.Context) (NatInfo, error) - NetAgentVersion(ctx context.Context, p peer.ID) (string, error) + // MethodGroup: Log - // NetBandwidthStats returns statistics about the nodes total bandwidth - // usage and current rate across all peers and protocols. - NetBandwidthStats(ctx context.Context) (metrics.Stats, error) - - // NetBandwidthStatsByPeer returns statistics about the nodes bandwidth - // usage and current rate per peer - NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) - - // NetBandwidthStatsByProtocol returns statistics about the nodes bandwidth - // usage and current rate per protocol - NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) + LogList(context.Context) ([]string, error) //perm:write + LogSetLevel(context.Context, string, string) error //perm:write // MethodGroup: Common - // ID returns peerID of libp2p node backing this API - ID(context.Context) (peer.ID, error) - // Version provides information about API provider - Version(context.Context) (Version, error) + Version(context.Context) (APIVersion, error) //perm:read - LogList(context.Context) ([]string, error) - LogSetLevel(context.Context, string, string) error + // Discover returns an OpenRPC document describing an RPC API. + Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) //perm:read // trigger graceful shutdown - Shutdown(context.Context) error + Shutdown(context.Context) error //perm:admin // Session returns a random UUID of api provider session - Session(context.Context) (uuid.UUID, error) + Session(context.Context) (uuid.UUID, error) //perm:read - Closing(context.Context) (<-chan struct{}, error) + Closing(context.Context) (<-chan struct{}, error) //perm:read } -// Version provides various build-time information -type Version struct { +// APIVersion provides various build-time information +type APIVersion struct { Version string // APIVersion is a binary encoded semver version of the remote implementing // this api // // See APIVersion in build/version.go - APIVersion build.Version + APIVersion Version // TODO: git commit / os / genesis cid? @@ -82,11 +66,6 @@ type Version struct { BlockDelay uint64 } -func (v Version) String() string { +func (v APIVersion) String() string { return fmt.Sprintf("%s+api%s", v.Version, v.APIVersion.String()) } - -type NatInfo struct { - Reachability network.Reachability - PublicAddr string -} diff --git a/api/api_full.go b/api/api_full.go index bb1eb159540..5c72c3613a8 100644 --- a/api/api_full.go +++ b/api/api_full.go @@ -2,17 +2,16 @@ package api import ( "context" + "encoding/json" "fmt" "time" - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-state-types/network" - "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p-core/peer" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" + datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-multistore" @@ -20,20 +19,46 @@ import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/dline" + + apitypes "github.com/filecoin-project/lotus/api/types" + "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/paych" - - "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/power" "github.com/filecoin-project/lotus/chain/types" marketevents "github.com/filecoin-project/lotus/markets/loggers" "github.com/filecoin-project/lotus/node/modules/dtypes" ) +//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_full.go -package=mocks . FullNode + +// ChainIO abstracts operations for accessing raw IPLD objects. +type ChainIO interface { + ChainReadObj(context.Context, cid.Cid) ([]byte, error) + ChainHasObj(context.Context, cid.Cid) (bool, error) +} + +const LookbackNoLimit = abi.ChainEpoch(-1) + +// MODIFYING THE API INTERFACE +// +// NOTE: This is the V1 (Unstable) API - to add methods to the V0 (Stable) API +// you'll have to add those methods to interfaces in `api/v0api` +// +// When adding / changing methods in this file: +// * Do the change here +// * Adjust implementation in `node/impl/` +// * Run `make gen` - this will: +// * Generate proxy structs +// * Generate mocks +// * Generate markdown docs +// * Generate openrpc blobs + // FullNode API is a low-level interface to the Filecoin network full node type FullNode interface { Common + Net // MethodGroup: Chain // The Chain method group contains methods for interacting with the @@ -41,66 +66,81 @@ type FullNode interface { // ChainNotify returns channel with chain head updates. // First message is guaranteed to be of len == 1, and type == 'current'. - ChainNotify(context.Context) (<-chan []*HeadChange, error) + ChainNotify(context.Context) (<-chan []*HeadChange, error) //perm:read // ChainHead returns the current head of the chain. - ChainHead(context.Context) (*types.TipSet, error) + ChainHead(context.Context) (*types.TipSet, error) //perm:read // ChainGetRandomnessFromTickets is used to sample the chain for randomness. - ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) + ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) //perm:read // ChainGetRandomnessFromBeacon is used to sample the beacon for randomness. - ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) + ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) //perm:read // ChainGetBlock returns the block specified by the given CID. - ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error) + ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error) //perm:read // ChainGetTipSet returns the tipset specified by the given TipSetKey. - ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) + ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) //perm:read // ChainGetBlockMessages returns messages stored in the specified block. - ChainGetBlockMessages(ctx context.Context, blockCid cid.Cid) (*BlockMessages, error) + // + // Note: If there are multiple blocks in a tipset, it's likely that some + // messages will be duplicated. It's also possible for blocks in a tipset to have + // different messages from the same sender at the same nonce. When that happens, + // only the first message (in a block with lowest ticket) will be considered + // for execution + // + // NOTE: THIS METHOD SHOULD ONLY BE USED FOR GETTING MESSAGES IN A SPECIFIC BLOCK + // + // DO NOT USE THIS METHOD TO GET MESSAGES INCLUDED IN A TIPSET + // Use ChainGetParentMessages, which will perform correct message deduplication + ChainGetBlockMessages(ctx context.Context, blockCid cid.Cid) (*BlockMessages, error) //perm:read // ChainGetParentReceipts returns receipts for messages in parent tipset of - // the specified block. - ChainGetParentReceipts(ctx context.Context, blockCid cid.Cid) ([]*types.MessageReceipt, error) + // the specified block. The receipts in the list returned is one-to-one with the + // messages returned by a call to ChainGetParentMessages with the same blockCid. + ChainGetParentReceipts(ctx context.Context, blockCid cid.Cid) ([]*types.MessageReceipt, error) //perm:read // ChainGetParentMessages returns messages stored in parent tipset of the // specified block. - ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]Message, error) + ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]Message, error) //perm:read + + // ChainGetMessagesInTipset returns message stores in current tipset + ChainGetMessagesInTipset(ctx context.Context, tsk types.TipSetKey) ([]Message, error) //perm:read // ChainGetTipSetByHeight looks back for a tipset at the specified epoch. // If there are no blocks at the specified epoch, a tipset at an earlier epoch // will be returned. - ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) + ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) //perm:read // ChainReadObj reads ipld nodes referenced by the specified CID from chain // blockstore and returns raw bytes. - ChainReadObj(context.Context, cid.Cid) ([]byte, error) + ChainReadObj(context.Context, cid.Cid) ([]byte, error) //perm:read // ChainDeleteObj deletes node referenced by the given CID - ChainDeleteObj(context.Context, cid.Cid) error + ChainDeleteObj(context.Context, cid.Cid) error //perm:admin // ChainHasObj checks if a given CID exists in the chain blockstore. - ChainHasObj(context.Context, cid.Cid) (bool, error) + ChainHasObj(context.Context, cid.Cid) (bool, error) //perm:read // ChainStatObj returns statistics about the graph referenced by 'obj'. // If 'base' is also specified, then the returned stat will be a diff // between the two objects. - ChainStatObj(ctx context.Context, obj cid.Cid, base cid.Cid) (ObjStat, error) + ChainStatObj(ctx context.Context, obj cid.Cid, base cid.Cid) (ObjStat, error) //perm:read // ChainSetHead forcefully sets current chain head. Use with caution. - ChainSetHead(context.Context, types.TipSetKey) error + ChainSetHead(context.Context, types.TipSetKey) error //perm:admin // ChainGetGenesis returns the genesis tipset. - ChainGetGenesis(context.Context) (*types.TipSet, error) + ChainGetGenesis(context.Context) (*types.TipSet, error) //perm:read // ChainTipSetWeight computes weight for the specified tipset. - ChainTipSetWeight(context.Context, types.TipSetKey) (types.BigInt, error) - ChainGetNode(ctx context.Context, p string) (*IpldObject, error) + ChainTipSetWeight(context.Context, types.TipSetKey) (types.BigInt, error) //perm:read + ChainGetNode(ctx context.Context, p string) (*IpldObject, error) //perm:read // ChainGetMessage reads a message referenced by the specified CID from the // chain blockstore. - ChainGetMessage(context.Context, cid.Cid) (*types.Message, error) + ChainGetMessage(context.Context, cid.Cid) (*types.Message, error) //perm:read // ChainGetPath returns a set of revert/apply operations needed to get from // one tipset to another, for example: @@ -115,14 +155,14 @@ type FullNode interface { // tRR //``` // Would return `[revert(tBA), apply(tAB), apply(tAA)]` - ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*HeadChange, error) + ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*HeadChange, error) //perm:read // ChainExport returns a stream of bytes with CAR dump of chain data. // The exported chain data includes the header chain from the given tipset // back to genesis, the entire genesis state, and the most recent 'nroots' // state trees. // If oldmsgskip is set, messages from before the requested roots are also not included. - ChainExport(ctx context.Context, nroots abi.ChainEpoch, oldmsgskip bool, tsk types.TipSetKey) (<-chan []byte, error) + ChainExport(ctx context.Context, nroots abi.ChainEpoch, oldmsgskip bool, tsk types.TipSetKey) (<-chan []byte, error) //perm:read // MethodGroup: Beacon // The Beacon method group contains methods for interacting with the random beacon (DRAND) @@ -130,74 +170,74 @@ type FullNode interface { // BeaconGetEntry returns the beacon entry for the given filecoin epoch. If // the entry has not yet been produced, the call will block until the entry // becomes available - BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) + BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) //perm:read // GasEstimateFeeCap estimates gas fee cap - GasEstimateFeeCap(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error) + GasEstimateFeeCap(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error) //perm:read // GasEstimateGasLimit estimates gas used by the message and returns it. // It fails if message fails to execute. - GasEstimateGasLimit(context.Context, *types.Message, types.TipSetKey) (int64, error) + GasEstimateGasLimit(context.Context, *types.Message, types.TipSetKey) (int64, error) //perm:read // GasEstimateGasPremium estimates what gas price should be used for a // message to have high likelihood of inclusion in `nblocksincl` epochs. GasEstimateGasPremium(_ context.Context, nblocksincl uint64, - sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) + sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) //perm:read // GasEstimateMessageGas estimates gas values for unset message gas fields - GasEstimateMessageGas(context.Context, *types.Message, *MessageSendSpec, types.TipSetKey) (*types.Message, error) + GasEstimateMessageGas(context.Context, *types.Message, *MessageSendSpec, types.TipSetKey) (*types.Message, error) //perm:read // MethodGroup: Sync // The Sync method group contains methods for interacting with and // observing the lotus sync service. // SyncState returns the current status of the lotus sync system. - SyncState(context.Context) (*SyncState, error) + SyncState(context.Context) (*SyncState, error) //perm:read // SyncSubmitBlock can be used to submit a newly created block to the. // network through this node - SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error + SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error //perm:write // SyncIncomingBlocks returns a channel streaming incoming, potentially not // yet synced block headers. - SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) + SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) //perm:read // SyncCheckpoint marks a blocks as checkpointed, meaning that it won't ever fork away from it. - SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) error + SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) error //perm:admin // SyncMarkBad marks a blocks as bad, meaning that it won't ever by synced. // Use with extreme caution. - SyncMarkBad(ctx context.Context, bcid cid.Cid) error + SyncMarkBad(ctx context.Context, bcid cid.Cid) error //perm:admin // SyncUnmarkBad unmarks a blocks as bad, making it possible to be validated and synced again. - SyncUnmarkBad(ctx context.Context, bcid cid.Cid) error + SyncUnmarkBad(ctx context.Context, bcid cid.Cid) error //perm:admin // SyncUnmarkAllBad purges bad block cache, making it possible to sync to chains previously marked as bad - SyncUnmarkAllBad(ctx context.Context) error + SyncUnmarkAllBad(ctx context.Context) error //perm:admin // SyncCheckBad checks if a block was marked as bad, and if it was, returns // the reason. - SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error) + SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error) //perm:read // SyncValidateTipset indicates whether the provided tipset is valid or not - SyncValidateTipset(ctx context.Context, tsk types.TipSetKey) (bool, error) + SyncValidateTipset(ctx context.Context, tsk types.TipSetKey) (bool, error) //perm:read // MethodGroup: Mpool // The Mpool methods are for interacting with the message pool. The message pool // manages all incoming and outgoing 'messages' going over the network. // MpoolPending returns pending mempool messages. - MpoolPending(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) + MpoolPending(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) //perm:read // MpoolSelect returns a list of pending messages for inclusion in the next block - MpoolSelect(context.Context, types.TipSetKey, float64) ([]*types.SignedMessage, error) + MpoolSelect(context.Context, types.TipSetKey, float64) ([]*types.SignedMessage, error) //perm:read // MpoolPush pushes a signed message to mempool. - MpoolPush(context.Context, *types.SignedMessage) (cid.Cid, error) + MpoolPush(context.Context, *types.SignedMessage) (cid.Cid, error) //perm:write // MpoolPushUntrusted pushes a signed message to mempool from untrusted sources. - MpoolPushUntrusted(context.Context, *types.SignedMessage) (cid.Cid, error) + MpoolPushUntrusted(context.Context, *types.SignedMessage) (cid.Cid, error) //perm:write // MpoolPushMessage atomically assigns a nonce, signs, and pushes a message // to mempool. @@ -205,34 +245,41 @@ type FullNode interface { // // When maxFee is set to 0, MpoolPushMessage will guess appropriate fee // based on current chain conditions - MpoolPushMessage(ctx context.Context, msg *types.Message, spec *MessageSendSpec) (*types.SignedMessage, error) + MpoolPushMessage(ctx context.Context, msg *types.Message, spec *MessageSendSpec) (*types.SignedMessage, error) //perm:sign // MpoolBatchPush batch pushes a signed message to mempool. - MpoolBatchPush(context.Context, []*types.SignedMessage) ([]cid.Cid, error) + MpoolBatchPush(context.Context, []*types.SignedMessage) ([]cid.Cid, error) //perm:write // MpoolBatchPushUntrusted batch pushes a signed message to mempool from untrusted sources. - MpoolBatchPushUntrusted(context.Context, []*types.SignedMessage) ([]cid.Cid, error) + MpoolBatchPushUntrusted(context.Context, []*types.SignedMessage) ([]cid.Cid, error) //perm:write // MpoolBatchPushMessage batch pushes a unsigned message to mempool. - MpoolBatchPushMessage(context.Context, []*types.Message, *MessageSendSpec) ([]*types.SignedMessage, error) + MpoolBatchPushMessage(context.Context, []*types.Message, *MessageSendSpec) ([]*types.SignedMessage, error) //perm:sign + + // MpoolCheckMessages performs logical checks on a batch of messages + MpoolCheckMessages(context.Context, []*MessagePrototype) ([][]MessageCheckStatus, error) //perm:read + // MpoolCheckPendingMessages performs logical checks for all pending messages from a given address + MpoolCheckPendingMessages(context.Context, address.Address) ([][]MessageCheckStatus, error) //perm:read + // MpoolCheckReplaceMessages performs logical checks on pending messages with replacement + MpoolCheckReplaceMessages(context.Context, []*types.Message) ([][]MessageCheckStatus, error) //perm:read // MpoolGetNonce gets next nonce for the specified sender. // Note that this method may not be atomic. Use MpoolPushMessage instead. - MpoolGetNonce(context.Context, address.Address) (uint64, error) - MpoolSub(context.Context) (<-chan MpoolUpdate, error) + MpoolGetNonce(context.Context, address.Address) (uint64, error) //perm:read + MpoolSub(context.Context) (<-chan MpoolUpdate, error) //perm:read // MpoolClear clears pending messages from the mpool - MpoolClear(context.Context, bool) error + MpoolClear(context.Context, bool) error //perm:write // MpoolGetConfig returns (a copy of) the current mpool config - MpoolGetConfig(context.Context) (*types.MpoolConfig, error) + MpoolGetConfig(context.Context) (*types.MpoolConfig, error) //perm:read // MpoolSetConfig sets the mpool config to (a copy of) the supplied config - MpoolSetConfig(context.Context, *types.MpoolConfig) error + MpoolSetConfig(context.Context, *types.MpoolConfig) error //perm:admin // MethodGroup: Miner - MinerGetBaseInfo(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*MiningBaseInfo, error) - MinerCreateBlock(context.Context, *BlockTemplate) (*types.BlockMsg, error) + MinerGetBaseInfo(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*MiningBaseInfo, error) //perm:read + MinerCreateBlock(context.Context, *BlockTemplate) (*types.BlockMsg, error) //perm:write // // UX ? @@ -241,32 +288,32 @@ type FullNode interface { // WalletNew creates a new address in the wallet with the given sigType. // Available key types: bls, secp256k1, secp256k1-ledger // Support for numerical types: 1 - secp256k1, 2 - BLS is deprecated - WalletNew(context.Context, types.KeyType) (address.Address, error) + WalletNew(context.Context, types.KeyType) (address.Address, error) //perm:write // WalletHas indicates whether the given address is in the wallet. - WalletHas(context.Context, address.Address) (bool, error) + WalletHas(context.Context, address.Address) (bool, error) //perm:write // WalletList lists all the addresses in the wallet. - WalletList(context.Context) ([]address.Address, error) + WalletList(context.Context) ([]address.Address, error) //perm:write // WalletBalance returns the balance of the given address at the current head of the chain. - WalletBalance(context.Context, address.Address) (types.BigInt, error) + WalletBalance(context.Context, address.Address) (types.BigInt, error) //perm:read // WalletSign signs the given bytes using the given address. - WalletSign(context.Context, address.Address, []byte) (*crypto.Signature, error) + WalletSign(context.Context, address.Address, []byte) (*crypto.Signature, error) //perm:sign // WalletSignMessage signs the given message using the given address. - WalletSignMessage(context.Context, address.Address, *types.Message) (*types.SignedMessage, error) + WalletSignMessage(context.Context, address.Address, *types.Message) (*types.SignedMessage, error) //perm:sign // WalletVerify takes an address, a signature, and some bytes, and indicates whether the signature is valid. // The address does not have to be in the wallet. - WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) + WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) //perm:read // WalletDefaultAddress returns the address marked as default in the wallet. - WalletDefaultAddress(context.Context) (address.Address, error) + WalletDefaultAddress(context.Context) (address.Address, error) //perm:write // WalletSetDefault marks the given address as as the default one. - WalletSetDefault(context.Context, address.Address) error + WalletSetDefault(context.Context, address.Address) error //perm:write // WalletExport returns the private key of an address in the wallet. - WalletExport(context.Context, address.Address) (*types.KeyInfo, error) + WalletExport(context.Context, address.Address) (*types.KeyInfo, error) //perm:admin // WalletImport receives a KeyInfo, which includes a private key, and imports it into the wallet. - WalletImport(context.Context, *types.KeyInfo) (address.Address, error) + WalletImport(context.Context, *types.KeyInfo) (address.Address, error) //perm:admin // WalletDelete deletes an address from the wallet. - WalletDelete(context.Context, address.Address) error + WalletDelete(context.Context, address.Address) error //perm:admin // WalletValidateAddress validates whether a given string can be decoded as a well-formed address - WalletValidateAddress(context.Context, string) (address.Address, error) + WalletValidateAddress(context.Context, string) (address.Address, error) //perm:read // Other @@ -275,198 +322,290 @@ type FullNode interface { // retrieval markets as a client // ClientImport imports file under the specified path into filestore. - ClientImport(ctx context.Context, ref FileRef) (*ImportRes, error) + ClientImport(ctx context.Context, ref FileRef) (*ImportRes, error) //perm:admin // ClientRemoveImport removes file import - ClientRemoveImport(ctx context.Context, importID multistore.StoreID) error + ClientRemoveImport(ctx context.Context, importID multistore.StoreID) error //perm:admin // ClientStartDeal proposes a deal with a miner. - ClientStartDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error) + ClientStartDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error) //perm:admin + // ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking. + ClientStatelessDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error) //perm:write // ClientGetDealInfo returns the latest information about a given deal. - ClientGetDealInfo(context.Context, cid.Cid) (*DealInfo, error) + ClientGetDealInfo(context.Context, cid.Cid) (*DealInfo, error) //perm:read // ClientListDeals returns information about the deals made by the local client. - ClientListDeals(ctx context.Context) ([]DealInfo, error) + ClientListDeals(ctx context.Context) ([]DealInfo, error) //perm:write // ClientGetDealUpdates returns the status of updated deals - ClientGetDealUpdates(ctx context.Context) (<-chan DealInfo, error) + ClientGetDealUpdates(ctx context.Context) (<-chan DealInfo, error) //perm:write // ClientGetDealStatus returns status given a code - ClientGetDealStatus(ctx context.Context, statusCode uint64) (string, error) + ClientGetDealStatus(ctx context.Context, statusCode uint64) (string, error) //perm:read // ClientHasLocal indicates whether a certain CID is locally stored. - ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) + ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) //perm:write // ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer). - ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]QueryOffer, error) + ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]QueryOffer, error) //perm:read // ClientMinerQueryOffer returns a QueryOffer for the specific miner and file. - ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (QueryOffer, error) + ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (QueryOffer, error) //perm:read // ClientRetrieve initiates the retrieval of a file, as specified in the order. - ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *FileRef) error + ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *FileRef) error //perm:admin // ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel // of status updates. - ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *FileRef) (<-chan marketevents.RetrievalEvent, error) + ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin + // ClientListRetrievals returns information about retrievals made by the local client + ClientListRetrievals(ctx context.Context) ([]RetrievalInfo, error) //perm:write + // ClientGetRetrievalUpdates returns status of updated retrieval deals + ClientGetRetrievalUpdates(ctx context.Context) (<-chan RetrievalInfo, error) //perm:write // ClientQueryAsk returns a signed StorageAsk from the specified miner. - ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) + ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) //perm:read // ClientCalcCommP calculates the CommP and data size of the specified CID - ClientDealPieceCID(ctx context.Context, root cid.Cid) (DataCIDSize, error) + ClientDealPieceCID(ctx context.Context, root cid.Cid) (DataCIDSize, error) //perm:read // ClientCalcCommP calculates the CommP for a specified file - ClientCalcCommP(ctx context.Context, inpath string) (*CommPRet, error) + ClientCalcCommP(ctx context.Context, inpath string) (*CommPRet, error) //perm:write // ClientGenCar generates a CAR file for the specified file. - ClientGenCar(ctx context.Context, ref FileRef, outpath string) error + ClientGenCar(ctx context.Context, ref FileRef, outpath string) error //perm:write // ClientDealSize calculates real deal data size - ClientDealSize(ctx context.Context, root cid.Cid) (DataSize, error) + ClientDealSize(ctx context.Context, root cid.Cid) (DataSize, error) //perm:read // ClientListTransfers returns the status of all ongoing transfers of data - ClientListDataTransfers(ctx context.Context) ([]DataTransferChannel, error) - ClientDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error) + ClientListDataTransfers(ctx context.Context) ([]DataTransferChannel, error) //perm:write + ClientDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error) //perm:write // ClientRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer - ClientRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error + ClientRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write // ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer - ClientCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error + ClientCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write // ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel // which are stuck due to insufficient funds - ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error + ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error //perm:write + + // ClientCancelRetrievalDeal cancels an ongoing retrieval deal based on DealID + ClientCancelRetrievalDeal(ctx context.Context, dealid retrievalmarket.DealID) error //perm:write // ClientUnimport removes references to the specified file from filestore //ClientUnimport(path string) // ClientListImports lists imported files and their root CIDs - ClientListImports(ctx context.Context) ([]Import, error) + ClientListImports(ctx context.Context) ([]Import, error) //perm:write //ClientListAsks() []Ask // MethodGroup: State // The State methods are used to query, inspect, and interact with chain state. - // Most methods take a TipSetKey as a parameter. The state looked up is the state at that tipset. + // Most methods take a TipSetKey as a parameter. The state looked up is the parent state of the tipset. // A nil TipSetKey can be provided as a param, this will cause the heaviest tipset in the chain to be used. // StateCall runs the given message and returns its result without any persisted changes. - StateCall(context.Context, *types.Message, types.TipSetKey) (*InvocResult, error) + // + // StateCall applies the message to the tipset's parent state. The + // message is not applied on-top-of the messages in the passed-in + // tipset. + StateCall(context.Context, *types.Message, types.TipSetKey) (*InvocResult, error) //perm:read // StateReplay replays a given message, assuming it was included in a block in the specified tipset. - // If no tipset key is provided, the appropriate tipset is looked up. - StateReplay(context.Context, types.TipSetKey, cid.Cid) (*InvocResult, error) + // + // If a tipset key is provided, and a replacing message is found on chain, + // the method will return an error saying that the message wasn't found + // + // If no tipset key is provided, the appropriate tipset is looked up, and if + // the message was gas-repriced, the on-chain message will be replayed - in + // that case the returned InvocResult.MsgCid will not match the Cid param + // + // If the caller wants to ensure that exactly the requested message was executed, + // they MUST check that InvocResult.MsgCid is equal to the provided Cid. + // Without this check both the requested and original message may appear as + // successfully executed on-chain, which may look like a double-spend. + // + // A replacing message is a message with a different CID, any of Gas values, and + // different signature, but with all other parameters matching (source/destination, + // nonce, params, etc.) + StateReplay(context.Context, types.TipSetKey, cid.Cid) (*InvocResult, error) //perm:read // StateGetActor returns the indicated actor's nonce and balance. - StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) + StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) //perm:read // StateReadState returns the indicated actor's state. - StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error) + StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error) //perm:read // StateListMessages looks back and returns all messages with a matching to or from address, stopping at the given height. - StateListMessages(ctx context.Context, match *MessageMatch, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) + StateListMessages(ctx context.Context, match *MessageMatch, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) //perm:read + // StateDecodeParams attempts to decode the provided params, based on the recipient actor address and method number. + StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error) //perm:read // StateNetworkName returns the name of the network the node is synced to - StateNetworkName(context.Context) (dtypes.NetworkName, error) + StateNetworkName(context.Context) (dtypes.NetworkName, error) //perm:read // StateMinerSectors returns info about the given miner's sectors. If the filter bitfield is nil, all sectors are included. - StateMinerSectors(context.Context, address.Address, *bitfield.BitField, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) + StateMinerSectors(context.Context, address.Address, *bitfield.BitField, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) //perm:read // StateMinerActiveSectors returns info about sectors that a given miner is actively proving. - StateMinerActiveSectors(context.Context, address.Address, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) + StateMinerActiveSectors(context.Context, address.Address, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) //perm:read // StateMinerProvingDeadline calculates the deadline at some epoch for a proving period // and returns the deadline-related calculations. - StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) + StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) //perm:read // StateMinerPower returns the power of the indicated miner - StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error) + StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error) //perm:read // StateMinerInfo returns info about the indicated miner - StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error) + StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error) //perm:read // StateMinerDeadlines returns all the proving deadlines for the given miner - StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]Deadline, error) + StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]Deadline, error) //perm:read // StateMinerPartitions returns all partitions in the specified deadline - StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tsk types.TipSetKey) ([]Partition, error) + StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tsk types.TipSetKey) ([]Partition, error) //perm:read // StateMinerFaults returns a bitfield indicating the faulty sectors of the given miner - StateMinerFaults(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) + StateMinerFaults(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) //perm:read // StateAllMinerFaults returns all non-expired Faults that occur within lookback epochs of the given tipset - StateAllMinerFaults(ctx context.Context, lookback abi.ChainEpoch, ts types.TipSetKey) ([]*Fault, error) + StateAllMinerFaults(ctx context.Context, lookback abi.ChainEpoch, ts types.TipSetKey) ([]*Fault, error) //perm:read // StateMinerRecoveries returns a bitfield indicating the recovering sectors of the given miner - StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) + StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) //perm:read // StateMinerInitialPledgeCollateral returns the precommit deposit for the specified miner's sector - StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) + StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) //perm:read // StateMinerInitialPledgeCollateral returns the initial pledge collateral for the specified miner's sector - StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) + StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) //perm:read // StateMinerAvailableBalance returns the portion of a miner's balance that can be withdrawn or spent - StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) + StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) //perm:read // StateMinerSectorAllocated checks if a sector is allocated - StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (bool, error) + StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (bool, error) //perm:read // StateSectorPreCommitInfo returns the PreCommit info for the specified miner's sector - StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) + StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) //perm:read // StateSectorGetInfo returns the on-chain info for the specified miner's sector. Returns null in case the sector info isn't found // NOTE: returned info.Expiration may not be accurate in some cases, use StateSectorExpiration to get accurate // expiration epoch - StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) + StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) //perm:read // StateSectorExpiration returns epoch at which given sector will expire - StateSectorExpiration(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorExpiration, error) + StateSectorExpiration(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorExpiration, error) //perm:read // StateSectorPartition finds deadline/partition with the specified sector - StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error) - // StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed - StateSearchMsg(context.Context, cid.Cid) (*MsgLookup, error) - // StateWaitMsg looks back in the chain for a message. If not found, it blocks until the - // message arrives on chain, and gets to the indicated confidence depth. - StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*MsgLookup, error) - // StateWaitMsgLimited looks back up to limit epochs in the chain for a message. + StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error) //perm:read + // StateSearchMsg looks back up to limit epochs in the chain for a message, and returns its receipt and the tipset where it was executed + // + // NOTE: If a replacing message is found on chain, this method will return + // a MsgLookup for the replacing message - the MsgLookup.Message will be a different + // CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the + // result of the execution of the replacing message. + // + // If the caller wants to ensure that exactly the requested message was executed, + // they must check that MsgLookup.Message is equal to the provided 'cid', or set the + // `allowReplaced` parameter to false. Without this check, and with `allowReplaced` + // set to true, both the requested and original message may appear as + // successfully executed on-chain, which may look like a double-spend. + // + // A replacing message is a message with a different CID, any of Gas values, and + // different signature, but with all other parameters matching (source/destination, + // nonce, params, etc.) + StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error) //perm:read + // StateWaitMsg looks back up to limit epochs in the chain for a message. // If not found, it blocks until the message arrives on chain, and gets to the // indicated confidence depth. - StateWaitMsgLimited(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch) (*MsgLookup, error) + // + // NOTE: If a replacing message is found on chain, this method will return + // a MsgLookup for the replacing message - the MsgLookup.Message will be a different + // CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the + // result of the execution of the replacing message. + // + // If the caller wants to ensure that exactly the requested message was executed, + // they must check that MsgLookup.Message is equal to the provided 'cid', or set the + // `allowReplaced` parameter to false. Without this check, and with `allowReplaced` + // set to true, both the requested and original message may appear as + // successfully executed on-chain, which may look like a double-spend. + // + // A replacing message is a message with a different CID, any of Gas values, and + // different signature, but with all other parameters matching (source/destination, + // nonce, params, etc.) + StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error) //perm:read // StateListMiners returns the addresses of every miner that has claimed power in the Power Actor - StateListMiners(context.Context, types.TipSetKey) ([]address.Address, error) + StateListMiners(context.Context, types.TipSetKey) ([]address.Address, error) //perm:read // StateListActors returns the addresses of every actor in the state - StateListActors(context.Context, types.TipSetKey) ([]address.Address, error) + StateListActors(context.Context, types.TipSetKey) ([]address.Address, error) //perm:read // StateMarketBalance looks up the Escrow and Locked balances of the given address in the Storage Market - StateMarketBalance(context.Context, address.Address, types.TipSetKey) (MarketBalance, error) + StateMarketBalance(context.Context, address.Address, types.TipSetKey) (MarketBalance, error) //perm:read // StateMarketParticipants returns the Escrow and Locked balances of every participant in the Storage Market - StateMarketParticipants(context.Context, types.TipSetKey) (map[string]MarketBalance, error) + StateMarketParticipants(context.Context, types.TipSetKey) (map[string]MarketBalance, error) //perm:read // StateMarketDeals returns information about every deal in the Storage Market - StateMarketDeals(context.Context, types.TipSetKey) (map[string]MarketDeal, error) + StateMarketDeals(context.Context, types.TipSetKey) (map[string]MarketDeal, error) //perm:read // StateMarketStorageDeal returns information about the indicated deal - StateMarketStorageDeal(context.Context, abi.DealID, types.TipSetKey) (*MarketDeal, error) + StateMarketStorageDeal(context.Context, abi.DealID, types.TipSetKey) (*MarketDeal, error) //perm:read // StateLookupID retrieves the ID address of the given address - StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) + StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) //perm:read // StateAccountKey returns the public key address of the given ID address - StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) + StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) //perm:read // StateChangedActors returns all the actors whose states change between the two given state CIDs // TODO: Should this take tipset keys instead? - StateChangedActors(context.Context, cid.Cid, cid.Cid) (map[string]types.Actor, error) - // StateGetReceipt returns the message receipt for the given message - StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) + StateChangedActors(context.Context, cid.Cid, cid.Cid) (map[string]types.Actor, error) //perm:read // StateMinerSectorCount returns the number of sectors in a miner's sector set and proving set - StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (MinerSectors, error) + StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (MinerSectors, error) //perm:read // StateCompute is a flexible command that applies the given messages on the given tipset. // The messages are run as though the VM were at the provided height. - StateCompute(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*ComputeStateOutput, error) + // + // When called, StateCompute will: + // - Load the provided tipset, or use the current chain head if not provided + // - Compute the tipset state of the provided tipset on top of the parent state + // - (note that this step runs before vmheight is applied to the execution) + // - Execute state upgrade if any were scheduled at the epoch, or in null + // blocks preceding the tipset + // - Call the cron actor on null blocks preceding the tipset + // - For each block in the tipset + // - Apply messages in blocks in the specified + // - Award block reward by calling the reward actor + // - Call the cron actor for the current epoch + // - If the specified vmheight is higher than the current epoch, apply any + // needed state upgrades to the state + // - Apply the specified messages to the state + // + // The vmheight parameter sets VM execution epoch, and can be used to simulate + // message execution in different network versions. If the specified vmheight + // epoch is higher than the epoch of the specified tipset, any state upgrades + // until the vmheight will be executed on the state before applying messages + // specified by the user. + // + // Note that the initial tipset state computation is not affected by the + // vmheight parameter - only the messages in the `apply` set are + // + // If the caller wants to simply compute the state, vmheight should be set to + // the epoch of the specified tipset. + // + // Messages in the `apply` parameter must have the correct nonces, and gas + // values set. + StateCompute(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*ComputeStateOutput, error) //perm:read // StateVerifierStatus returns the data cap for the given address. // Returns nil if there is no entry in the data cap table for the // address. - StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) + StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) //perm:read // StateVerifiedClientStatus returns the data cap for the given address. // Returns nil if there is no entry in the data cap table for the // address. - StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) + StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) //perm:read // StateVerifiedClientStatus returns the address of the Verified Registry's root key - StateVerifiedRegistryRootKey(ctx context.Context, tsk types.TipSetKey) (address.Address, error) + StateVerifiedRegistryRootKey(ctx context.Context, tsk types.TipSetKey) (address.Address, error) //perm:read // StateDealProviderCollateralBounds returns the min and max collateral a storage provider // can issue. It takes the deal size and verified status as parameters. - StateDealProviderCollateralBounds(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (DealCollateralBounds, error) + StateDealProviderCollateralBounds(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (DealCollateralBounds, error) //perm:read // StateCirculatingSupply returns the exact circulating supply of Filecoin at the given tipset. // This is not used anywhere in the protocol itself, and is only for external consumption. - StateCirculatingSupply(context.Context, types.TipSetKey) (abi.TokenAmount, error) + StateCirculatingSupply(context.Context, types.TipSetKey) (abi.TokenAmount, error) //perm:read // StateVMCirculatingSupplyInternal returns an approximation of the circulating supply of Filecoin at the given tipset. // This is the value reported by the runtime interface to actors code. - StateVMCirculatingSupplyInternal(context.Context, types.TipSetKey) (CirculatingSupply, error) + StateVMCirculatingSupplyInternal(context.Context, types.TipSetKey) (CirculatingSupply, error) //perm:read // StateNetworkVersion returns the network version at the given tipset - StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error) + StateNetworkVersion(context.Context, types.TipSetKey) (apitypes.NetworkVersion, error) //perm:read // MethodGroup: Msig // The Msig methods are used to interact with multisig wallets on the // filecoin network // MsigGetAvailableBalance returns the portion of a multisig's balance that can be withdrawn or spent - MsigGetAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) + MsigGetAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) //perm:read // MsigGetVestingSchedule returns the vesting details of a given multisig. - MsigGetVestingSchedule(context.Context, address.Address, types.TipSetKey) (MsigVesting, error) + MsigGetVestingSchedule(context.Context, address.Address, types.TipSetKey) (MsigVesting, error) //perm:read // MsigGetVested returns the amount of FIL that vested in a multisig in a certain period. // It takes the following params: , , - MsigGetVested(context.Context, address.Address, types.TipSetKey, types.TipSetKey) (types.BigInt, error) + MsigGetVested(context.Context, address.Address, types.TipSetKey, types.TipSetKey) (types.BigInt, error) //perm:read + + //MsigGetPending returns pending transactions for the given multisig + //wallet. Once pending transactions are fully approved, they will no longer + //appear here. + MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*MsigTransaction, error) //perm:read + // MsigCreate creates a multisig wallet // It takes the following params: , , //, , - MsigCreate(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (cid.Cid, error) + MsigCreate(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (*MessagePrototype, error) //perm:sign + // MsigPropose proposes a multisig message // It takes the following params: , , , // , , - MsigPropose(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) + MsigPropose(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign // MsigApprove approves a previously-proposed multisig message by transaction ID // It takes the following params: , - MsigApprove(context.Context, address.Address, uint64, address.Address) (cid.Cid, error) + MsigApprove(context.Context, address.Address, uint64, address.Address) (*MessagePrototype, error) //perm:sign // MsigApproveTxnHash approves a previously-proposed multisig message, specified // using both transaction ID and a hash of the parameters used in the @@ -474,72 +613,91 @@ type FullNode interface { // exactly the transaction you think you are. // It takes the following params: , , , , , // , , - MsigApproveTxnHash(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) + MsigApproveTxnHash(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign // MsigCancel cancels a previously-proposed multisig message // It takes the following params: , , , , // , , - MsigCancel(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) + MsigCancel(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign + // MsigAddPropose proposes adding a signer in the multisig // It takes the following params: , , // , - MsigAddPropose(context.Context, address.Address, address.Address, address.Address, bool) (cid.Cid, error) + MsigAddPropose(context.Context, address.Address, address.Address, address.Address, bool) (*MessagePrototype, error) //perm:sign + // MsigAddApprove approves a previously proposed AddSigner message // It takes the following params: , , , // , , - MsigAddApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, bool) (cid.Cid, error) + MsigAddApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, bool) (*MessagePrototype, error) //perm:sign + // MsigAddCancel cancels a previously proposed AddSigner message // It takes the following params: , , , // , - MsigAddCancel(context.Context, address.Address, address.Address, uint64, address.Address, bool) (cid.Cid, error) + MsigAddCancel(context.Context, address.Address, address.Address, uint64, address.Address, bool) (*MessagePrototype, error) //perm:sign + // MsigSwapPropose proposes swapping 2 signers in the multisig // It takes the following params: , , // , - MsigSwapPropose(context.Context, address.Address, address.Address, address.Address, address.Address) (cid.Cid, error) + MsigSwapPropose(context.Context, address.Address, address.Address, address.Address, address.Address) (*MessagePrototype, error) //perm:sign + // MsigSwapApprove approves a previously proposed SwapSigner // It takes the following params: , , , // , , - MsigSwapApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (cid.Cid, error) + MsigSwapApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (*MessagePrototype, error) //perm:sign + // MsigSwapCancel cancels a previously proposed SwapSigner message // It takes the following params: , , , // , - MsigSwapCancel(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error) + MsigSwapCancel(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (*MessagePrototype, error) //perm:sign // MsigRemoveSigner proposes the removal of a signer from the multisig. // It accepts the multisig to make the change on, the proposer address to // send the message from, the address to be removed, and a boolean // indicating whether or not the signing threshold should be lowered by one // along with the address removal. - MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error) - - MarketEnsureAvailable(context.Context, address.Address, address.Address, types.BigInt) (cid.Cid, error) - // MarketFreeBalance + MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (*MessagePrototype, error) //perm:sign + + // MarketAddBalance adds funds to the market actor + MarketAddBalance(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign + // MarketGetReserved gets the amount of funds that are currently reserved for the address + MarketGetReserved(ctx context.Context, addr address.Address) (types.BigInt, error) //perm:sign + // MarketReserveFunds reserves funds for a deal + MarketReserveFunds(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign + // MarketReleaseFunds releases funds reserved by MarketReserveFunds + MarketReleaseFunds(ctx context.Context, addr address.Address, amt types.BigInt) error //perm:sign + // MarketWithdraw withdraws unlocked funds from the market actor + MarketWithdraw(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign // MethodGroup: Paych // The Paych methods are for interacting with and managing payment channels - PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*ChannelInfo, error) - PaychGetWaitReady(context.Context, cid.Cid) (address.Address, error) - PaychAvailableFunds(ctx context.Context, ch address.Address) (*ChannelAvailableFunds, error) - PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*ChannelAvailableFunds, error) - PaychList(context.Context) ([]address.Address, error) - PaychStatus(context.Context, address.Address) (*PaychStatus, error) - PaychSettle(context.Context, address.Address) (cid.Cid, error) - PaychCollect(context.Context, address.Address) (cid.Cid, error) - PaychAllocateLane(ctx context.Context, ch address.Address) (uint64, error) - PaychNewPayment(ctx context.Context, from, to address.Address, vouchers []VoucherSpec) (*PaymentInfo, error) - PaychVoucherCheckValid(context.Context, address.Address, *paych.SignedVoucher) error - PaychVoucherCheckSpendable(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (bool, error) - PaychVoucherCreate(context.Context, address.Address, types.BigInt, uint64) (*VoucherCreateResult, error) - PaychVoucherAdd(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error) - PaychVoucherList(context.Context, address.Address) ([]*paych.SignedVoucher, error) - PaychVoucherSubmit(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error) + PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*ChannelInfo, error) //perm:sign + PaychGetWaitReady(context.Context, cid.Cid) (address.Address, error) //perm:sign + PaychAvailableFunds(ctx context.Context, ch address.Address) (*ChannelAvailableFunds, error) //perm:sign + PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*ChannelAvailableFunds, error) //perm:sign + PaychList(context.Context) ([]address.Address, error) //perm:read + PaychStatus(context.Context, address.Address) (*PaychStatus, error) //perm:read + PaychSettle(context.Context, address.Address) (cid.Cid, error) //perm:sign + PaychCollect(context.Context, address.Address) (cid.Cid, error) //perm:sign + PaychAllocateLane(ctx context.Context, ch address.Address) (uint64, error) //perm:sign + PaychNewPayment(ctx context.Context, from, to address.Address, vouchers []VoucherSpec) (*PaymentInfo, error) //perm:sign + PaychVoucherCheckValid(context.Context, address.Address, *paych.SignedVoucher) error //perm:read + PaychVoucherCheckSpendable(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (bool, error) //perm:read + PaychVoucherCreate(context.Context, address.Address, types.BigInt, uint64) (*VoucherCreateResult, error) //perm:sign + PaychVoucherAdd(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error) //perm:write + PaychVoucherList(context.Context, address.Address) ([]*paych.SignedVoucher, error) //perm:write + PaychVoucherSubmit(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error) //perm:sign + + // MethodGroup: Node + // These methods are general node management and status commands + + NodeStatus(ctx context.Context, inclChainStatus bool) (NodeStatus, error) //perm:read // CreateBackup creates node backup onder the specified file name. The // method requires that the lotus daemon is running with the // LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that // the path specified when calling CreateBackup is within the base path - CreateBackup(ctx context.Context, fpath string) error + CreateBackup(ctx context.Context, fpath string) error //perm:admin } type FileRef struct { @@ -574,6 +732,7 @@ type DealInfo struct { ProposalCid cid.Cid State storagemarket.StorageDealStatus Message string // more information about deal state, particularly errors + DealStages *storagemarket.DealStages Provider address.Address DataRef *storagemarket.DataRef @@ -587,6 +746,9 @@ type DealInfo struct { CreationTime time.Time Verified bool + + TransferChannelID *datatransfer.ChannelID + DataTransfer *DataTransferChannel } type MsgLookup struct { @@ -624,6 +786,7 @@ type Message struct { type ActorState struct { Balance types.BigInt + Code cid.Cid State interface{} } @@ -725,7 +888,7 @@ func (o *QueryOffer) Order(client address.Address) RetrievalOrder { Client: client, Miner: o.Miner, - MinerPeer: o.MinerPeer, + MinerPeer: &o.MinerPeer, } } @@ -744,6 +907,8 @@ type RetrievalOrder struct { Root cid.Cid Piece *cid.Cid Size uint64 + + LocalStore *multistore.StoreID // if specified, get data from local store // TODO: support offset Total types.BigInt UnsealPrice types.BigInt @@ -751,7 +916,7 @@ type RetrievalOrder struct { PaymentIntervalIncrease uint64 Client address.Address Miner address.Address - MinerPeer retrievalmarket.RetrievalPeer + MinerPeer *retrievalmarket.RetrievalPeer } type InvocResult struct { @@ -781,14 +946,31 @@ type StartDealParams struct { VerifiedDeal bool } +func (s *StartDealParams) UnmarshalJSON(raw []byte) (err error) { + type sdpAlias StartDealParams + + sdp := sdpAlias{ + FastRetrieval: true, + } + + if err := json.Unmarshal(raw, &sdp); err != nil { + return err + } + + *s = StartDealParams(sdp) + + return nil +} + type IpldObject struct { Cid cid.Cid Obj interface{} } type ActiveSync struct { - Base *types.TipSet - Target *types.TipSet + WorkerID uint64 + Base *types.TipSet + Target *types.TipSet Stage SyncStateStage Height abi.ChainEpoch @@ -818,6 +1000,8 @@ const ( func (v SyncStateStage) String() string { switch v { + case StageIdle: + return "idle" case StageHeaders: return "header sync" case StagePersistHeaders: @@ -858,11 +1042,12 @@ type DealCollateralBounds struct { } type CirculatingSupply struct { - FilVested abi.TokenAmount - FilMined abi.TokenAmount - FilBurnt abi.TokenAmount - FilLocked abi.TokenAmount - FilCirculating abi.TokenAmount + FilVested abi.TokenAmount + FilMined abi.TokenAmount + FilBurnt abi.TokenAmount + FilLocked abi.TokenAmount + FilCirculating abi.TokenAmount + FilReserveDisbursed abi.TokenAmount } type MiningBaseInfo struct { @@ -916,7 +1101,8 @@ const ( ) type Deadline struct { - PostSubmissions bitfield.BitField + PostSubmissions bitfield.BitField + DisputableProofCount uint64 } type Partition struct { @@ -948,3 +1134,13 @@ type MessageMatch struct { To address.Address From address.Address } + +type MsigTransaction struct { + ID int64 + To address.Address + Value abi.TokenAmount + Method abi.MethodNum + Params []byte + + Approved []address.Address +} diff --git a/api/api_gateway.go b/api/api_gateway.go index 07fb5deb34e..6db1c8e45a1 100644 --- a/api/api_gateway.go +++ b/api/api_gateway.go @@ -8,13 +8,27 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/dline" - "github.com/filecoin-project/go-state-types/network" + apitypes "github.com/filecoin-project/lotus/api/types" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" ) -type GatewayAPI interface { +// MODIFYING THE API INTERFACE +// +// NOTE: This is the V1 (Unstable) API - to add methods to the V0 (Stable) API +// you'll have to add those methods to interfaces in `api/v0api` +// +// When adding / changing methods in this file: +// * Do the change here +// * Adjust implementation in `node/impl/` +// * Run `make gen` - this will: +// * Generate proxy structs +// * Generate mocks +// * Generate markdown docs +// * Generate openrpc blobs + +type Gateway interface { ChainHasObj(context.Context, cid.Cid) (bool, error) ChainHead(ctx context.Context) (*types.TipSet, error) ChainGetBlockMessages(context.Context, cid.Cid) (*BlockMessages, error) @@ -27,10 +41,11 @@ type GatewayAPI interface { MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) + MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*MsigTransaction, error) StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (DealCollateralBounds, error) StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) - StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) + StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error) //perm:read StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (MarketBalance, error) @@ -38,7 +53,11 @@ type GatewayAPI interface { StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error) StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error) - StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error) + StateNetworkVersion(context.Context, types.TipSetKey) (apitypes.NetworkVersion, error) + StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) - StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*MsgLookup, error) + StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error) + StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error) + WalletBalance(context.Context, address.Address) (types.BigInt, error) + Version(context.Context) (APIVersion, error) } diff --git a/api/api_net.go b/api/api_net.go new file mode 100644 index 00000000000..4cf9ca336a3 --- /dev/null +++ b/api/api_net.go @@ -0,0 +1,66 @@ +package api + +import ( + "context" + + metrics "github.com/libp2p/go-libp2p-core/metrics" + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-core/protocol" +) + +// MODIFYING THE API INTERFACE +// +// When adding / changing methods in this file: +// * Do the change here +// * Adjust implementation in `node/impl/` +// * Run `make gen` - this will: +// * Generate proxy structs +// * Generate mocks +// * Generate markdown docs +// * Generate openrpc blobs + +type Net interface { + // MethodGroup: Net + + NetConnectedness(context.Context, peer.ID) (network.Connectedness, error) //perm:read + NetPeers(context.Context) ([]peer.AddrInfo, error) //perm:read + NetConnect(context.Context, peer.AddrInfo) error //perm:write + NetAddrsListen(context.Context) (peer.AddrInfo, error) //perm:read + NetDisconnect(context.Context, peer.ID) error //perm:write + NetFindPeer(context.Context, peer.ID) (peer.AddrInfo, error) //perm:read + NetPubsubScores(context.Context) ([]PubsubScore, error) //perm:read + NetAutoNatStatus(context.Context) (NatInfo, error) //perm:read + NetAgentVersion(ctx context.Context, p peer.ID) (string, error) //perm:read + NetPeerInfo(context.Context, peer.ID) (*ExtendedPeerInfo, error) //perm:read + + // NetBandwidthStats returns statistics about the nodes total bandwidth + // usage and current rate across all peers and protocols. + NetBandwidthStats(ctx context.Context) (metrics.Stats, error) //perm:read + + // NetBandwidthStatsByPeer returns statistics about the nodes bandwidth + // usage and current rate per peer + NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) //perm:read + + // NetBandwidthStatsByProtocol returns statistics about the nodes bandwidth + // usage and current rate per protocol + NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) //perm:read + + // ConnectionGater API + NetBlockAdd(ctx context.Context, acl NetBlockList) error //perm:admin + NetBlockRemove(ctx context.Context, acl NetBlockList) error //perm:admin + NetBlockList(ctx context.Context) (NetBlockList, error) //perm:read + + // ID returns peerID of libp2p node backing this API + ID(context.Context) (peer.ID, error) //perm:read +} + +type CommonNet interface { + Common + Net +} + +type NatInfo struct { + Reachability network.Reachability + PublicAddr string +} diff --git a/api/api_storage.go b/api/api_storage.go index 2176456b7df..154abcea713 100644 --- a/api/api_storage.go +++ b/api/api_storage.go @@ -5,118 +5,205 @@ import ( "context" "time" - datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/google/uuid" "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p-core/peer" "github.com/filecoin-project/go-address" + datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/filecoin-project/go-fil-markets/piecestore" "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" + "github.com/filecoin-project/specs-storage/storage" + "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" "github.com/filecoin-project/lotus/extern/sector-storage/stores" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" ) +// MODIFYING THE API INTERFACE +// +// When adding / changing methods in this file: +// * Do the change here +// * Adjust implementation in `node/impl/` +// * Run `make gen` - this will: +// * Generate proxy structs +// * Generate mocks +// * Generate markdown docs +// * Generate openrpc blobs + // StorageMiner is a low-level interface to the Filecoin network storage miner node type StorageMiner interface { Common + Net - ActorAddress(context.Context) (address.Address, error) + ActorAddress(context.Context) (address.Address, error) //perm:read - ActorSectorSize(context.Context, address.Address) (abi.SectorSize, error) + ActorSectorSize(context.Context, address.Address) (abi.SectorSize, error) //perm:read + ActorAddressConfig(ctx context.Context) (AddressConfig, error) //perm:read - MiningBase(context.Context) (*types.TipSet, error) + MiningBase(context.Context) (*types.TipSet, error) //perm:read // Temp api for testing - PledgeSector(context.Context) error + PledgeSector(context.Context) (abi.SectorID, error) //perm:write // Get the status of a given sector by ID - SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (SectorInfo, error) + SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (SectorInfo, error) //perm:read + + // Add piece to an open sector. If no sectors with enough space are open, + // either a new sector will be created, or this call will block until more + // sectors can be created. + SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storage.Data, d PieceDealInfo) (SectorOffset, error) //perm:admin + + SectorsUnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd *cid.Cid) error //perm:admin // List all staged sectors - SectorsList(context.Context) ([]abi.SectorNumber, error) + SectorsList(context.Context) ([]abi.SectorNumber, error) //perm:read - SectorsRefs(context.Context) (map[string][]SealedRef, error) + // Get summary info of sectors + SectorsSummary(ctx context.Context) (map[SectorState]int, error) //perm:read + + // List sectors in particular states + SectorsListInStates(context.Context, []SectorState) ([]abi.SectorNumber, error) //perm:read + + SectorsRefs(context.Context) (map[string][]SealedRef, error) //perm:read // SectorStartSealing can be called on sectors in Empty or WaitDeals states // to trigger sealing early - SectorStartSealing(context.Context, abi.SectorNumber) error + SectorStartSealing(context.Context, abi.SectorNumber) error //perm:write // SectorSetSealDelay sets the time that a newly-created sector // waits for more deals before it starts sealing - SectorSetSealDelay(context.Context, time.Duration) error + SectorSetSealDelay(context.Context, time.Duration) error //perm:write // SectorGetSealDelay gets the time that a newly-created sector // waits for more deals before it starts sealing - SectorGetSealDelay(context.Context) (time.Duration, error) + SectorGetSealDelay(context.Context) (time.Duration, error) //perm:read // SectorSetExpectedSealDuration sets the expected time for a sector to seal - SectorSetExpectedSealDuration(context.Context, time.Duration) error + SectorSetExpectedSealDuration(context.Context, time.Duration) error //perm:write // SectorGetExpectedSealDuration gets the expected time for a sector to seal - SectorGetExpectedSealDuration(context.Context) (time.Duration, error) - SectorsUpdate(context.Context, abi.SectorNumber, SectorState) error - SectorRemove(context.Context, abi.SectorNumber) error - SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error - - StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) - StorageLocal(ctx context.Context) (map[stores.ID]string, error) - StorageStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) + SectorGetExpectedSealDuration(context.Context) (time.Duration, error) //perm:read + SectorsUpdate(context.Context, abi.SectorNumber, SectorState) error //perm:admin + // SectorRemove removes the sector from storage. It doesn't terminate it on-chain, which can + // be done with SectorTerminate. Removing and not terminating live sectors will cause additional penalties. + SectorRemove(context.Context, abi.SectorNumber) error //perm:admin + // SectorTerminate terminates the sector on-chain (adding it to a termination batch first), then + // automatically removes it from storage + SectorTerminate(context.Context, abi.SectorNumber) error //perm:admin + // SectorTerminateFlush immediately sends a terminate message with sectors batched for termination. + // Returns null if message wasn't sent + SectorTerminateFlush(ctx context.Context) (*cid.Cid, error) //perm:admin + // SectorTerminatePending returns a list of pending sector terminations to be sent in the next batch message + SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) //perm:admin + SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error //perm:admin + // SectorPreCommitFlush immediately sends a PreCommit message with sectors batched for PreCommit. + // Returns null if message wasn't sent + SectorPreCommitFlush(ctx context.Context) ([]sealiface.PreCommitBatchRes, error) //perm:admin + // SectorPreCommitPending returns a list of pending PreCommit sectors to be sent in the next batch message + SectorPreCommitPending(ctx context.Context) ([]abi.SectorID, error) //perm:admin + // SectorCommitFlush immediately sends a Commit message with sectors aggregated for Commit. + // Returns null if message wasn't sent + SectorCommitFlush(ctx context.Context) ([]sealiface.CommitBatchRes, error) //perm:admin + // SectorCommitPending returns a list of pending Commit sectors to be sent in the next aggregate message + SectorCommitPending(ctx context.Context) ([]abi.SectorID, error) //perm:admin // WorkerConnect tells the node to connect to workers RPC - WorkerConnect(context.Context, string) error - WorkerStats(context.Context) (map[uuid.UUID]storiface.WorkerStats, error) - WorkerJobs(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) - storiface.WorkerReturn + WorkerConnect(context.Context, string) error //perm:admin retry:true + WorkerStats(context.Context) (map[uuid.UUID]storiface.WorkerStats, error) //perm:admin + WorkerJobs(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) //perm:admin + + //storiface.WorkerReturn + ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error //perm:admin retry:true + ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err *storiface.CallError) error //perm:admin retry:true + ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err *storiface.CallError) error //perm:admin retry:true + ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err *storiface.CallError) error //perm:admin retry:true + ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err *storiface.CallError) error //perm:admin retry:true + ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true + ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true + ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true + ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true + ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err *storiface.CallError) error //perm:admin retry:true + ReturnFetch(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true // SealingSchedDiag dumps internal sealing scheduler state - SealingSchedDiag(context.Context) (interface{}, error) - - stores.SectorIndex - - MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error - MarketListDeals(ctx context.Context) ([]MarketDeal, error) - MarketListRetrievalDeals(ctx context.Context) ([]retrievalmarket.ProviderDealState, error) - MarketGetDealUpdates(ctx context.Context) (<-chan storagemarket.MinerDeal, error) - MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error) - MarketSetAsk(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error - MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error) - MarketSetRetrievalAsk(ctx context.Context, rask *retrievalmarket.Ask) error - MarketGetRetrievalAsk(ctx context.Context) (*retrievalmarket.Ask, error) - MarketListDataTransfers(ctx context.Context) ([]DataTransferChannel, error) - MarketDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error) - // MinerRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer - MarketRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error - // ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer - MarketCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error - - DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error - DealsList(ctx context.Context) ([]MarketDeal, error) - DealsConsiderOnlineStorageDeals(context.Context) (bool, error) - DealsSetConsiderOnlineStorageDeals(context.Context, bool) error - DealsConsiderOnlineRetrievalDeals(context.Context) (bool, error) - DealsSetConsiderOnlineRetrievalDeals(context.Context, bool) error - DealsPieceCidBlocklist(context.Context) ([]cid.Cid, error) - DealsSetPieceCidBlocklist(context.Context, []cid.Cid) error - DealsConsiderOfflineStorageDeals(context.Context) (bool, error) - DealsSetConsiderOfflineStorageDeals(context.Context, bool) error - DealsConsiderOfflineRetrievalDeals(context.Context) (bool, error) - DealsSetConsiderOfflineRetrievalDeals(context.Context, bool) error - - StorageAddLocal(ctx context.Context, path string) error - - PiecesListPieces(ctx context.Context) ([]cid.Cid, error) - PiecesListCidInfos(ctx context.Context) ([]cid.Cid, error) - PiecesGetPieceInfo(ctx context.Context, pieceCid cid.Cid) (*piecestore.PieceInfo, error) - PiecesGetCIDInfo(ctx context.Context, payloadCid cid.Cid) (*piecestore.CIDInfo, error) + SealingSchedDiag(ctx context.Context, doSched bool) (interface{}, error) //perm:admin + SealingAbort(ctx context.Context, call storiface.CallID) error //perm:admin + + //stores.SectorIndex + StorageAttach(context.Context, stores.StorageInfo, fsutil.FsStat) error //perm:admin + StorageInfo(context.Context, stores.ID) (stores.StorageInfo, error) //perm:admin + StorageReportHealth(context.Context, stores.ID, stores.HealthReport) error //perm:admin + StorageDeclareSector(ctx context.Context, storageID stores.ID, s abi.SectorID, ft storiface.SectorFileType, primary bool) error //perm:admin + StorageDropSector(ctx context.Context, storageID stores.ID, s abi.SectorID, ft storiface.SectorFileType) error //perm:admin + StorageFindSector(ctx context.Context, sector abi.SectorID, ft storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]stores.SectorStorageInfo, error) //perm:admin + StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]stores.StorageInfo, error) //perm:admin + StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error //perm:admin + StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) //perm:admin + StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) //perm:admin + + StorageLocal(ctx context.Context) (map[stores.ID]string, error) //perm:admin + StorageStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) //perm:admin + + MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error //perm:write + MarketListDeals(ctx context.Context) ([]MarketDeal, error) //perm:read + MarketListRetrievalDeals(ctx context.Context) ([]retrievalmarket.ProviderDealState, error) //perm:read + MarketGetDealUpdates(ctx context.Context) (<-chan storagemarket.MinerDeal, error) //perm:read + MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error) //perm:read + MarketSetAsk(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error //perm:admin + MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error) //perm:read + MarketSetRetrievalAsk(ctx context.Context, rask *retrievalmarket.Ask) error //perm:admin + MarketGetRetrievalAsk(ctx context.Context) (*retrievalmarket.Ask, error) //perm:read + MarketListDataTransfers(ctx context.Context) ([]DataTransferChannel, error) //perm:write + MarketDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error) //perm:write + // MarketRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer + MarketRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write + // MarketCancelDataTransfer cancels a data transfer with the given transfer ID and other peer + MarketCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write + MarketPendingDeals(ctx context.Context) (PendingDealInfo, error) //perm:write + MarketPublishPendingDeals(ctx context.Context) error //perm:admin + + DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error //perm:admin + DealsList(ctx context.Context) ([]MarketDeal, error) //perm:admin + DealsConsiderOnlineStorageDeals(context.Context) (bool, error) //perm:admin + DealsSetConsiderOnlineStorageDeals(context.Context, bool) error //perm:admin + DealsConsiderOnlineRetrievalDeals(context.Context) (bool, error) //perm:admin + DealsSetConsiderOnlineRetrievalDeals(context.Context, bool) error //perm:admin + DealsPieceCidBlocklist(context.Context) ([]cid.Cid, error) //perm:admin + DealsSetPieceCidBlocklist(context.Context, []cid.Cid) error //perm:admin + DealsConsiderOfflineStorageDeals(context.Context) (bool, error) //perm:admin + DealsSetConsiderOfflineStorageDeals(context.Context, bool) error //perm:admin + DealsConsiderOfflineRetrievalDeals(context.Context) (bool, error) //perm:admin + DealsSetConsiderOfflineRetrievalDeals(context.Context, bool) error //perm:admin + DealsConsiderVerifiedStorageDeals(context.Context) (bool, error) //perm:admin + DealsSetConsiderVerifiedStorageDeals(context.Context, bool) error //perm:admin + DealsConsiderUnverifiedStorageDeals(context.Context) (bool, error) //perm:admin + DealsSetConsiderUnverifiedStorageDeals(context.Context, bool) error //perm:admin + + StorageAddLocal(ctx context.Context, path string) error //perm:admin + + PiecesListPieces(ctx context.Context) ([]cid.Cid, error) //perm:read + PiecesListCidInfos(ctx context.Context) ([]cid.Cid, error) //perm:read + PiecesGetPieceInfo(ctx context.Context, pieceCid cid.Cid) (*piecestore.PieceInfo, error) //perm:read + PiecesGetCIDInfo(ctx context.Context, payloadCid cid.Cid) (*piecestore.CIDInfo, error) //perm:read // CreateBackup creates node backup onder the specified file name. The // method requires that the lotus-miner is running with the // LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that // the path specified when calling CreateBackup is within the base path - CreateBackup(ctx context.Context, fpath string) error + CreateBackup(ctx context.Context, fpath string) error //perm:admin + + CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin + + ComputeProof(ctx context.Context, ssi []builtin.SectorInfo, rand abi.PoStRandomness) ([]builtin.PoStProof, error) //perm:read } +var _ storiface.WorkerReturn = *new(StorageMiner) +var _ stores.SectorIndex = *new(StorageMiner) + type SealRes struct { Err string GoErr error `json:"-"` @@ -194,3 +281,54 @@ func (st *SealSeed) Equals(ost *SealSeed) bool { } type SectorState string + +type AddrUse int + +const ( + PreCommitAddr AddrUse = iota + CommitAddr + DealPublishAddr + PoStAddr + + TerminateSectorsAddr +) + +type AddressConfig struct { + PreCommitControl []address.Address + CommitControl []address.Address + TerminateControl []address.Address + DealPublishControl []address.Address + + DisableOwnerFallback bool + DisableWorkerFallback bool +} + +// PendingDealInfo has info about pending deals and when they are due to be +// published +type PendingDealInfo struct { + Deals []market.ClientDealProposal + PublishPeriodStart time.Time + PublishPeriod time.Duration +} + +type SectorOffset struct { + Sector abi.SectorNumber + Offset abi.PaddedPieceSize +} + +// DealInfo is a tuple of deal identity and its schedule +type PieceDealInfo struct { + PublishCid *cid.Cid + DealID abi.DealID + DealProposal *market.DealProposal + DealSchedule DealSchedule + KeepUnsealed bool +} + +// DealSchedule communicates the time interval of a storage deal. The deal must +// appear in a sealed (proven) sector no later than StartEpoch, otherwise it +// is invalid. +type DealSchedule struct { + StartEpoch abi.ChainEpoch + EndEpoch abi.ChainEpoch +} diff --git a/api/api_test.go b/api/api_test.go index 34c47f432c5..738e1b06799 100644 --- a/api/api_test.go +++ b/api/api_test.go @@ -37,6 +37,18 @@ func TestDoesntDependOnFFI(t *testing.T) { } } +func TestDoesntDependOnBuild(t *testing.T) { + deps, err := exec.Command(goCmd(), "list", "-deps", "github.com/filecoin-project/lotus/api").Output() + if err != nil { + t.Fatal(err) + } + for _, pkg := range strings.Fields(string(deps)) { + if pkg == "github.com/filecoin-project/build" { + t.Fatal("api depends on filecoin-ffi") + } + } +} + func TestReturnTypes(t *testing.T) { errType := reflect.TypeOf(new(error)).Elem() bareIface := reflect.TypeOf(new(interface{})).Elem() @@ -99,5 +111,11 @@ func TestReturnTypes(t *testing.T) { t.Run("common", tst(new(Common))) t.Run("full", tst(new(FullNode))) t.Run("miner", tst(new(StorageMiner))) - t.Run("worker", tst(new(WorkerAPI))) + t.Run("worker", tst(new(Worker))) +} + +func TestPermTags(t *testing.T) { + _ = PermissionedFullAPI(&FullNodeStruct{}) + _ = PermissionedStorMinerAPI(&StorageMinerStruct{}) + _ = PermissionedWorkerAPI(&WorkerStruct{}) } diff --git a/api/api_wallet.go b/api/api_wallet.go index 88ad8f43a7a..973aaaf6d85 100644 --- a/api/api_wallet.go +++ b/api/api_wallet.go @@ -34,14 +34,14 @@ type MsgMeta struct { Extra []byte } -type WalletAPI interface { - WalletNew(context.Context, types.KeyType) (address.Address, error) - WalletHas(context.Context, address.Address) (bool, error) - WalletList(context.Context) ([]address.Address, error) +type Wallet interface { + WalletNew(context.Context, types.KeyType) (address.Address, error) //perm:admin + WalletHas(context.Context, address.Address) (bool, error) //perm:admin + WalletList(context.Context) ([]address.Address, error) //perm:admin - WalletSign(ctx context.Context, signer address.Address, toSign []byte, meta MsgMeta) (*crypto.Signature, error) + WalletSign(ctx context.Context, signer address.Address, toSign []byte, meta MsgMeta) (*crypto.Signature, error) //perm:admin - WalletExport(context.Context, address.Address) (*types.KeyInfo, error) - WalletImport(context.Context, *types.KeyInfo) (address.Address, error) - WalletDelete(context.Context, address.Address) error + WalletExport(context.Context, address.Address) (*types.KeyInfo, error) //perm:admin + WalletImport(context.Context, *types.KeyInfo) (address.Address, error) //perm:admin + WalletDelete(context.Context, address.Address) error //perm:admin } diff --git a/api/api_worker.go b/api/api_worker.go index 036748ec6f9..4553c30e095 100644 --- a/api/api_worker.go +++ b/api/api_worker.go @@ -4,29 +4,69 @@ import ( "context" "github.com/google/uuid" + "github.com/ipfs/go-cid" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" "github.com/filecoin-project/lotus/extern/sector-storage/stores" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" - - "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/specs-storage/storage" ) -type WorkerAPI interface { - Version(context.Context) (build.Version, error) - // TODO: Info() (name, ...) ? +// MODIFYING THE API INTERFACE +// +// When adding / changing methods in this file: +// * Do the change here +// * Adjust implementation in `node/impl/` +// * Run `make gen` - this will: +// * Generate proxy structs +// * Generate mocks +// * Generate markdown docs +// * Generate openrpc blobs + +type Worker interface { + Version(context.Context) (Version, error) //perm:admin - TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) // TaskType -> Weight - Paths(context.Context) ([]stores.StoragePath, error) - Info(context.Context) (storiface.WorkerInfo, error) + // TaskType -> Weight + TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) //perm:admin + Paths(context.Context) ([]stores.StoragePath, error) //perm:admin + Info(context.Context) (storiface.WorkerInfo, error) //perm:admin - storiface.WorkerCalls + // storiface.WorkerCalls + AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) //perm:admin + SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) //perm:admin + SealPreCommit2(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (storiface.CallID, error) //perm:admin + SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) //perm:admin + SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) //perm:admin + FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) //perm:admin + ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) //perm:admin + MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) //perm:admin + UnsealPiece(context.Context, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (storiface.CallID, error) //perm:admin + Fetch(context.Context, storage.SectorRef, storiface.SectorFileType, storiface.PathType, storiface.AcquireMode) (storiface.CallID, error) //perm:admin + + TaskDisable(ctx context.Context, tt sealtasks.TaskType) error //perm:admin + TaskEnable(ctx context.Context, tt sealtasks.TaskType) error //perm:admin // Storage / Other - Remove(ctx context.Context, sector abi.SectorID) error + Remove(ctx context.Context, sector abi.SectorID) error //perm:admin + + StorageAddLocal(ctx context.Context, path string) error //perm:admin - StorageAddLocal(ctx context.Context, path string) error + // SetEnabled marks the worker as enabled/disabled. Not that this setting + // may take a few seconds to propagate to task scheduler + SetEnabled(ctx context.Context, enabled bool) error //perm:admin - Session(context.Context) (uuid.UUID, error) + Enabled(ctx context.Context) (bool, error) //perm:admin + + // WaitQuiet blocks until there are no tasks running + WaitQuiet(ctx context.Context) error //perm:admin + + // returns a random UUID of worker session, generated randomly when worker + // process starts + ProcessSession(context.Context) (uuid.UUID, error) //perm:admin + + // Like ProcessSession, but returns an error when worker is disabled + Session(context.Context) (uuid.UUID, error) //perm:admin } + +var _ storiface.WorkerCalls = *new(Worker) diff --git a/api/apibstore/apibstore.go b/api/apibstore/apibstore.go deleted file mode 100644 index cf9f4f24c66..00000000000 --- a/api/apibstore/apibstore.go +++ /dev/null @@ -1,68 +0,0 @@ -package apibstore - -import ( - "context" - - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/lib/blockstore" -) - -type ChainIO interface { - ChainReadObj(context.Context, cid.Cid) ([]byte, error) - ChainHasObj(context.Context, cid.Cid) (bool, error) -} - -type apiBStore struct { - api ChainIO -} - -func NewAPIBlockstore(cio ChainIO) blockstore.Blockstore { - return &apiBStore{ - api: cio, - } -} - -func (a *apiBStore) DeleteBlock(cid.Cid) error { - return xerrors.New("not supported") -} - -func (a *apiBStore) Has(c cid.Cid) (bool, error) { - return a.api.ChainHasObj(context.TODO(), c) -} - -func (a *apiBStore) Get(c cid.Cid) (blocks.Block, error) { - bb, err := a.api.ChainReadObj(context.TODO(), c) - if err != nil { - return nil, err - } - return blocks.NewBlockWithCid(bb, c) -} - -func (a *apiBStore) GetSize(c cid.Cid) (int, error) { - bb, err := a.api.ChainReadObj(context.TODO(), c) - if err != nil { - return 0, err - } - return len(bb), nil -} - -func (a *apiBStore) Put(blocks.Block) error { - return xerrors.New("not supported") -} - -func (a *apiBStore) PutMany([]blocks.Block) error { - return xerrors.New("not supported") -} - -func (a *apiBStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { - return nil, xerrors.New("not supported") -} - -func (a *apiBStore) HashOnRead(enabled bool) { - return -} - -var _ blockstore.Blockstore = &apiBStore{} diff --git a/api/apistruct/permissioned.go b/api/apistruct/permissioned.go deleted file mode 100644 index 86902d31b29..00000000000 --- a/api/apistruct/permissioned.go +++ /dev/null @@ -1,44 +0,0 @@ -package apistruct - -import ( - "github.com/filecoin-project/go-jsonrpc/auth" - "github.com/filecoin-project/lotus/api" -) - -const ( - // When changing these, update docs/API.md too - - PermRead auth.Permission = "read" // default - PermWrite auth.Permission = "write" - PermSign auth.Permission = "sign" // Use wallet keys for signing - PermAdmin auth.Permission = "admin" // Manage permissions -) - -var AllPermissions = []auth.Permission{PermRead, PermWrite, PermSign, PermAdmin} -var DefaultPerms = []auth.Permission{PermRead} - -func PermissionedStorMinerAPI(a api.StorageMiner) api.StorageMiner { - var out StorageMinerStruct - auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal) - auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.CommonStruct.Internal) - return &out -} - -func PermissionedFullAPI(a api.FullNode) api.FullNode { - var out FullNodeStruct - auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal) - auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.CommonStruct.Internal) - return &out -} - -func PermissionedWorkerAPI(a api.WorkerAPI) api.WorkerAPI { - var out WorkerStruct - auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal) - return &out -} - -func PermissionedWalletAPI(a api.WalletAPI) api.WalletAPI { - var out WalletStruct - auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal) - return &out -} diff --git a/api/apistruct/struct.go b/api/apistruct/struct.go deleted file mode 100644 index 3a4ae75a8f3..00000000000 --- a/api/apistruct/struct.go +++ /dev/null @@ -1,1688 +0,0 @@ -package apistruct - -import ( - "context" - "io" - "time" - - "github.com/google/uuid" - "github.com/ipfs/go-cid" - metrics "github.com/libp2p/go-libp2p-core/metrics" - "github.com/libp2p/go-libp2p-core/network" - "github.com/libp2p/go-libp2p-core/peer" - protocol "github.com/libp2p/go-libp2p-core/protocol" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-bitfield" - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-fil-markets/piecestore" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-jsonrpc/auth" - "github.com/filecoin-project/go-multistore" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/go-state-types/dline" - stnetwork "github.com/filecoin-project/go-state-types/network" - "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" - "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" - "github.com/filecoin-project/lotus/extern/sector-storage/stores" - "github.com/filecoin-project/lotus/extern/sector-storage/storiface" - marketevents "github.com/filecoin-project/lotus/markets/loggers" - "github.com/filecoin-project/specs-storage/storage" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/actors/builtin/paych" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/modules/dtypes" -) - -// All permissions are listed in permissioned.go -var _ = AllPermissions - -type CommonStruct struct { - Internal struct { - AuthVerify func(ctx context.Context, token string) ([]auth.Permission, error) `perm:"read"` - AuthNew func(ctx context.Context, perms []auth.Permission) ([]byte, error) `perm:"admin"` - - NetConnectedness func(context.Context, peer.ID) (network.Connectedness, error) `perm:"read"` - NetPeers func(context.Context) ([]peer.AddrInfo, error) `perm:"read"` - NetConnect func(context.Context, peer.AddrInfo) error `perm:"write"` - NetAddrsListen func(context.Context) (peer.AddrInfo, error) `perm:"read"` - NetDisconnect func(context.Context, peer.ID) error `perm:"write"` - NetFindPeer func(context.Context, peer.ID) (peer.AddrInfo, error) `perm:"read"` - NetPubsubScores func(context.Context) ([]api.PubsubScore, error) `perm:"read"` - NetAutoNatStatus func(context.Context) (api.NatInfo, error) `perm:"read"` - NetBandwidthStats func(ctx context.Context) (metrics.Stats, error) `perm:"read"` - NetBandwidthStatsByPeer func(ctx context.Context) (map[string]metrics.Stats, error) `perm:"read"` - NetBandwidthStatsByProtocol func(ctx context.Context) (map[protocol.ID]metrics.Stats, error) `perm:"read"` - NetAgentVersion func(ctx context.Context, p peer.ID) (string, error) `perm:"read"` - - ID func(context.Context) (peer.ID, error) `perm:"read"` - Version func(context.Context) (api.Version, error) `perm:"read"` - - LogList func(context.Context) ([]string, error) `perm:"write"` - LogSetLevel func(context.Context, string, string) error `perm:"write"` - - Shutdown func(context.Context) error `perm:"admin"` - Session func(context.Context) (uuid.UUID, error) `perm:"read"` - Closing func(context.Context) (<-chan struct{}, error) `perm:"read"` - } -} - -// FullNodeStruct implements API passing calls to user-provided function values. -type FullNodeStruct struct { - CommonStruct - - Internal struct { - ChainNotify func(context.Context) (<-chan []*api.HeadChange, error) `perm:"read"` - ChainHead func(context.Context) (*types.TipSet, error) `perm:"read"` - ChainGetRandomnessFromTickets func(context.Context, types.TipSetKey, crypto.DomainSeparationTag, abi.ChainEpoch, []byte) (abi.Randomness, error) `perm:"read"` - ChainGetRandomnessFromBeacon func(context.Context, types.TipSetKey, crypto.DomainSeparationTag, abi.ChainEpoch, []byte) (abi.Randomness, error) `perm:"read"` - ChainGetBlock func(context.Context, cid.Cid) (*types.BlockHeader, error) `perm:"read"` - ChainGetTipSet func(context.Context, types.TipSetKey) (*types.TipSet, error) `perm:"read"` - ChainGetBlockMessages func(context.Context, cid.Cid) (*api.BlockMessages, error) `perm:"read"` - ChainGetParentReceipts func(context.Context, cid.Cid) ([]*types.MessageReceipt, error) `perm:"read"` - ChainGetParentMessages func(context.Context, cid.Cid) ([]api.Message, error) `perm:"read"` - ChainGetTipSetByHeight func(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) `perm:"read"` - ChainReadObj func(context.Context, cid.Cid) ([]byte, error) `perm:"read"` - ChainDeleteObj func(context.Context, cid.Cid) error `perm:"admin"` - ChainHasObj func(context.Context, cid.Cid) (bool, error) `perm:"read"` - ChainStatObj func(context.Context, cid.Cid, cid.Cid) (api.ObjStat, error) `perm:"read"` - ChainSetHead func(context.Context, types.TipSetKey) error `perm:"admin"` - ChainGetGenesis func(context.Context) (*types.TipSet, error) `perm:"read"` - ChainTipSetWeight func(context.Context, types.TipSetKey) (types.BigInt, error) `perm:"read"` - ChainGetNode func(ctx context.Context, p string) (*api.IpldObject, error) `perm:"read"` - ChainGetMessage func(context.Context, cid.Cid) (*types.Message, error) `perm:"read"` - ChainGetPath func(context.Context, types.TipSetKey, types.TipSetKey) ([]*api.HeadChange, error) `perm:"read"` - ChainExport func(context.Context, abi.ChainEpoch, bool, types.TipSetKey) (<-chan []byte, error) `perm:"read"` - - BeaconGetEntry func(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"` - - GasEstimateGasPremium func(context.Context, uint64, address.Address, int64, types.TipSetKey) (types.BigInt, error) `perm:"read"` - GasEstimateGasLimit func(context.Context, *types.Message, types.TipSetKey) (int64, error) `perm:"read"` - GasEstimateFeeCap func(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error) `perm:"read"` - GasEstimateMessageGas func(context.Context, *types.Message, *api.MessageSendSpec, types.TipSetKey) (*types.Message, error) `perm:"read"` - - SyncState func(context.Context) (*api.SyncState, error) `perm:"read"` - SyncSubmitBlock func(ctx context.Context, blk *types.BlockMsg) error `perm:"write"` - SyncIncomingBlocks func(ctx context.Context) (<-chan *types.BlockHeader, error) `perm:"read"` - SyncCheckpoint func(ctx context.Context, key types.TipSetKey) error `perm:"admin"` - SyncMarkBad func(ctx context.Context, bcid cid.Cid) error `perm:"admin"` - SyncUnmarkBad func(ctx context.Context, bcid cid.Cid) error `perm:"admin"` - SyncUnmarkAllBad func(ctx context.Context) error `perm:"admin"` - SyncCheckBad func(ctx context.Context, bcid cid.Cid) (string, error) `perm:"read"` - SyncValidateTipset func(ctx context.Context, tsk types.TipSetKey) (bool, error) `perm:"read"` - - MpoolGetConfig func(context.Context) (*types.MpoolConfig, error) `perm:"read"` - MpoolSetConfig func(context.Context, *types.MpoolConfig) error `perm:"write"` - - MpoolSelect func(context.Context, types.TipSetKey, float64) ([]*types.SignedMessage, error) `perm:"read"` - - MpoolPending func(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) `perm:"read"` - MpoolClear func(context.Context, bool) error `perm:"write"` - - MpoolPush func(context.Context, *types.SignedMessage) (cid.Cid, error) `perm:"write"` - MpoolPushUntrusted func(context.Context, *types.SignedMessage) (cid.Cid, error) `perm:"write"` - - MpoolPushMessage func(context.Context, *types.Message, *api.MessageSendSpec) (*types.SignedMessage, error) `perm:"sign"` - MpoolGetNonce func(context.Context, address.Address) (uint64, error) `perm:"read"` - MpoolSub func(context.Context) (<-chan api.MpoolUpdate, error) `perm:"read"` - - MpoolBatchPush func(ctx context.Context, smsgs []*types.SignedMessage) ([]cid.Cid, error) `perm:"write"` - MpoolBatchPushUntrusted func(ctx context.Context, smsgs []*types.SignedMessage) ([]cid.Cid, error) `perm:"write"` - MpoolBatchPushMessage func(ctx context.Context, msgs []*types.Message, spec *api.MessageSendSpec) ([]*types.SignedMessage, error) `perm:"sign"` - - MinerGetBaseInfo func(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*api.MiningBaseInfo, error) `perm:"read"` - MinerCreateBlock func(context.Context, *api.BlockTemplate) (*types.BlockMsg, error) `perm:"write"` - - WalletNew func(context.Context, types.KeyType) (address.Address, error) `perm:"write"` - WalletHas func(context.Context, address.Address) (bool, error) `perm:"write"` - WalletList func(context.Context) ([]address.Address, error) `perm:"write"` - WalletBalance func(context.Context, address.Address) (types.BigInt, error) `perm:"read"` - WalletSign func(context.Context, address.Address, []byte) (*crypto.Signature, error) `perm:"sign"` - WalletSignMessage func(context.Context, address.Address, *types.Message) (*types.SignedMessage, error) `perm:"sign"` - WalletVerify func(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) `perm:"read"` - WalletDefaultAddress func(context.Context) (address.Address, error) `perm:"write"` - WalletSetDefault func(context.Context, address.Address) error `perm:"admin"` - WalletExport func(context.Context, address.Address) (*types.KeyInfo, error) `perm:"admin"` - WalletImport func(context.Context, *types.KeyInfo) (address.Address, error) `perm:"admin"` - WalletDelete func(context.Context, address.Address) error `perm:"write"` - WalletValidateAddress func(context.Context, string) (address.Address, error) `perm:"read"` - - ClientImport func(ctx context.Context, ref api.FileRef) (*api.ImportRes, error) `perm:"admin"` - ClientListImports func(ctx context.Context) ([]api.Import, error) `perm:"write"` - ClientRemoveImport func(ctx context.Context, importID multistore.StoreID) error `perm:"admin"` - ClientHasLocal func(ctx context.Context, root cid.Cid) (bool, error) `perm:"write"` - ClientFindData func(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]api.QueryOffer, error) `perm:"read"` - ClientMinerQueryOffer func(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (api.QueryOffer, error) `perm:"read"` - ClientStartDeal func(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) `perm:"admin"` - ClientGetDealInfo func(context.Context, cid.Cid) (*api.DealInfo, error) `perm:"read"` - ClientGetDealStatus func(context.Context, uint64) (string, error) `perm:"read"` - ClientListDeals func(ctx context.Context) ([]api.DealInfo, error) `perm:"write"` - ClientGetDealUpdates func(ctx context.Context) (<-chan api.DealInfo, error) `perm:"read"` - ClientRetrieve func(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error `perm:"admin"` - ClientRetrieveWithEvents func(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"` - ClientQueryAsk func(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) `perm:"read"` - ClientDealPieceCID func(ctx context.Context, root cid.Cid) (api.DataCIDSize, error) `perm:"read"` - ClientCalcCommP func(ctx context.Context, inpath string) (*api.CommPRet, error) `perm:"read"` - ClientGenCar func(ctx context.Context, ref api.FileRef, outpath string) error `perm:"write"` - ClientDealSize func(ctx context.Context, root cid.Cid) (api.DataSize, error) `perm:"read"` - ClientListDataTransfers func(ctx context.Context) ([]api.DataTransferChannel, error) `perm:"write"` - ClientDataTransferUpdates func(ctx context.Context) (<-chan api.DataTransferChannel, error) `perm:"write"` - ClientRestartDataTransfer func(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error `perm:"write"` - ClientCancelDataTransfer func(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error `perm:"write"` - ClientRetrieveTryRestartInsufficientFunds func(ctx context.Context, paymentChannel address.Address) error `perm:"write"` - - StateNetworkName func(context.Context) (dtypes.NetworkName, error) `perm:"read"` - StateMinerSectors func(context.Context, address.Address, *bitfield.BitField, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) `perm:"read"` - StateMinerActiveSectors func(context.Context, address.Address, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) `perm:"read"` - StateMinerProvingDeadline func(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) `perm:"read"` - StateMinerPower func(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) `perm:"read"` - StateMinerInfo func(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error) `perm:"read"` - StateMinerDeadlines func(context.Context, address.Address, types.TipSetKey) ([]api.Deadline, error) `perm:"read"` - StateMinerPartitions func(ctx context.Context, m address.Address, dlIdx uint64, tsk types.TipSetKey) ([]api.Partition, error) `perm:"read"` - StateMinerFaults func(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) `perm:"read"` - StateAllMinerFaults func(context.Context, abi.ChainEpoch, types.TipSetKey) ([]*api.Fault, error) `perm:"read"` - StateMinerRecoveries func(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) `perm:"read"` - StateMinerPreCommitDepositForPower func(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) `perm:"read"` - StateMinerInitialPledgeCollateral func(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) `perm:"read"` - StateMinerAvailableBalance func(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) `perm:"read"` - StateMinerSectorAllocated func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (bool, error) `perm:"read"` - StateSectorPreCommitInfo func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) `perm:"read"` - StateSectorGetInfo func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) `perm:"read"` - StateSectorExpiration func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorExpiration, error) `perm:"read"` - StateSectorPartition func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorLocation, error) `perm:"read"` - StateCall func(context.Context, *types.Message, types.TipSetKey) (*api.InvocResult, error) `perm:"read"` - StateReplay func(context.Context, types.TipSetKey, cid.Cid) (*api.InvocResult, error) `perm:"read"` - StateGetActor func(context.Context, address.Address, types.TipSetKey) (*types.Actor, error) `perm:"read"` - StateReadState func(context.Context, address.Address, types.TipSetKey) (*api.ActorState, error) `perm:"read"` - StateWaitMsg func(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error) `perm:"read"` - StateWaitMsgLimited func(context.Context, cid.Cid, uint64, abi.ChainEpoch) (*api.MsgLookup, error) `perm:"read"` - StateSearchMsg func(context.Context, cid.Cid) (*api.MsgLookup, error) `perm:"read"` - StateListMiners func(context.Context, types.TipSetKey) ([]address.Address, error) `perm:"read"` - StateListActors func(context.Context, types.TipSetKey) ([]address.Address, error) `perm:"read"` - StateMarketBalance func(context.Context, address.Address, types.TipSetKey) (api.MarketBalance, error) `perm:"read"` - StateMarketParticipants func(context.Context, types.TipSetKey) (map[string]api.MarketBalance, error) `perm:"read"` - StateMarketDeals func(context.Context, types.TipSetKey) (map[string]api.MarketDeal, error) `perm:"read"` - StateMarketStorageDeal func(context.Context, abi.DealID, types.TipSetKey) (*api.MarketDeal, error) `perm:"read"` - StateLookupID func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) `perm:"read"` - StateAccountKey func(context.Context, address.Address, types.TipSetKey) (address.Address, error) `perm:"read"` - StateChangedActors func(context.Context, cid.Cid, cid.Cid) (map[string]types.Actor, error) `perm:"read"` - StateGetReceipt func(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) `perm:"read"` - StateMinerSectorCount func(context.Context, address.Address, types.TipSetKey) (api.MinerSectors, error) `perm:"read"` - StateListMessages func(ctx context.Context, match *api.MessageMatch, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) `perm:"read"` - StateCompute func(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*api.ComputeStateOutput, error) `perm:"read"` - StateVerifierStatus func(context.Context, address.Address, types.TipSetKey) (*abi.StoragePower, error) `perm:"read"` - StateVerifiedClientStatus func(context.Context, address.Address, types.TipSetKey) (*abi.StoragePower, error) `perm:"read"` - StateVerifiedRegistryRootKey func(ctx context.Context, tsk types.TipSetKey) (address.Address, error) `perm:"read"` - StateDealProviderCollateralBounds func(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (api.DealCollateralBounds, error) `perm:"read"` - StateCirculatingSupply func(context.Context, types.TipSetKey) (abi.TokenAmount, error) `perm:"read"` - StateVMCirculatingSupplyInternal func(context.Context, types.TipSetKey) (api.CirculatingSupply, error) `perm:"read"` - StateNetworkVersion func(context.Context, types.TipSetKey) (stnetwork.Version, error) `perm:"read"` - - MsigGetAvailableBalance func(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) `perm:"read"` - MsigGetVestingSchedule func(context.Context, address.Address, types.TipSetKey) (api.MsigVesting, error) `perm:"read"` - MsigGetVested func(context.Context, address.Address, types.TipSetKey, types.TipSetKey) (types.BigInt, error) `perm:"read"` - MsigCreate func(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (cid.Cid, error) `perm:"sign"` - MsigPropose func(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"` - MsigApprove func(context.Context, address.Address, uint64, address.Address) (cid.Cid, error) `perm:"sign"` - MsigApproveTxnHash func(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"` - MsigCancel func(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"` - MsigAddPropose func(context.Context, address.Address, address.Address, address.Address, bool) (cid.Cid, error) `perm:"sign"` - MsigAddApprove func(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, bool) (cid.Cid, error) `perm:"sign"` - MsigAddCancel func(context.Context, address.Address, address.Address, uint64, address.Address, bool) (cid.Cid, error) `perm:"sign"` - MsigSwapPropose func(context.Context, address.Address, address.Address, address.Address, address.Address) (cid.Cid, error) `perm:"sign"` - MsigSwapApprove func(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (cid.Cid, error) `perm:"sign"` - MsigSwapCancel func(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error) `perm:"sign"` - MsigRemoveSigner func(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error) `perm:"sign"` - - MarketEnsureAvailable func(context.Context, address.Address, address.Address, types.BigInt) (cid.Cid, error) `perm:"sign"` - - PaychGet func(ctx context.Context, from, to address.Address, amt types.BigInt) (*api.ChannelInfo, error) `perm:"sign"` - PaychGetWaitReady func(context.Context, cid.Cid) (address.Address, error) `perm:"sign"` - PaychAvailableFunds func(context.Context, address.Address) (*api.ChannelAvailableFunds, error) `perm:"sign"` - PaychAvailableFundsByFromTo func(context.Context, address.Address, address.Address) (*api.ChannelAvailableFunds, error) `perm:"sign"` - PaychList func(context.Context) ([]address.Address, error) `perm:"read"` - PaychStatus func(context.Context, address.Address) (*api.PaychStatus, error) `perm:"read"` - PaychSettle func(context.Context, address.Address) (cid.Cid, error) `perm:"sign"` - PaychCollect func(context.Context, address.Address) (cid.Cid, error) `perm:"sign"` - PaychAllocateLane func(context.Context, address.Address) (uint64, error) `perm:"sign"` - PaychNewPayment func(ctx context.Context, from, to address.Address, vouchers []api.VoucherSpec) (*api.PaymentInfo, error) `perm:"sign"` - PaychVoucherCheck func(context.Context, *paych.SignedVoucher) error `perm:"read"` - PaychVoucherCheckValid func(context.Context, address.Address, *paych.SignedVoucher) error `perm:"read"` - PaychVoucherCheckSpendable func(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (bool, error) `perm:"read"` - PaychVoucherAdd func(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error) `perm:"write"` - PaychVoucherCreate func(context.Context, address.Address, big.Int, uint64) (*api.VoucherCreateResult, error) `perm:"sign"` - PaychVoucherList func(context.Context, address.Address) ([]*paych.SignedVoucher, error) `perm:"write"` - PaychVoucherSubmit func(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error) `perm:"sign"` - - CreateBackup func(ctx context.Context, fpath string) error `perm:"admin"` - } -} - -func (c *FullNodeStruct) StateMinerSectorCount(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MinerSectors, error) { - return c.Internal.StateMinerSectorCount(ctx, addr, tsk) -} - -type StorageMinerStruct struct { - CommonStruct - - Internal struct { - ActorAddress func(context.Context) (address.Address, error) `perm:"read"` - ActorSectorSize func(context.Context, address.Address) (abi.SectorSize, error) `perm:"read"` - - MiningBase func(context.Context) (*types.TipSet, error) `perm:"read"` - - MarketImportDealData func(context.Context, cid.Cid, string) error `perm:"write"` - MarketListDeals func(ctx context.Context) ([]api.MarketDeal, error) `perm:"read"` - MarketListRetrievalDeals func(ctx context.Context) ([]retrievalmarket.ProviderDealState, error) `perm:"read"` - MarketGetDealUpdates func(ctx context.Context) (<-chan storagemarket.MinerDeal, error) `perm:"read"` - MarketListIncompleteDeals func(ctx context.Context) ([]storagemarket.MinerDeal, error) `perm:"read"` - MarketSetAsk func(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error `perm:"admin"` - MarketGetAsk func(ctx context.Context) (*storagemarket.SignedStorageAsk, error) `perm:"read"` - MarketSetRetrievalAsk func(ctx context.Context, rask *retrievalmarket.Ask) error `perm:"admin"` - MarketGetRetrievalAsk func(ctx context.Context) (*retrievalmarket.Ask, error) `perm:"read"` - MarketListDataTransfers func(ctx context.Context) ([]api.DataTransferChannel, error) `perm:"write"` - MarketDataTransferUpdates func(ctx context.Context) (<-chan api.DataTransferChannel, error) `perm:"write"` - MarketRestartDataTransfer func(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error `perm:"read"` - MarketCancelDataTransfer func(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error `perm:"read"` - - PledgeSector func(context.Context) error `perm:"write"` - - SectorsStatus func(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) `perm:"read"` - SectorsList func(context.Context) ([]abi.SectorNumber, error) `perm:"read"` - SectorsRefs func(context.Context) (map[string][]api.SealedRef, error) `perm:"read"` - SectorStartSealing func(context.Context, abi.SectorNumber) error `perm:"write"` - SectorSetSealDelay func(context.Context, time.Duration) error `perm:"write"` - SectorGetSealDelay func(context.Context) (time.Duration, error) `perm:"read"` - SectorSetExpectedSealDuration func(context.Context, time.Duration) error `perm:"write"` - SectorGetExpectedSealDuration func(context.Context) (time.Duration, error) `perm:"read"` - SectorsUpdate func(context.Context, abi.SectorNumber, api.SectorState) error `perm:"admin"` - SectorRemove func(context.Context, abi.SectorNumber) error `perm:"admin"` - SectorMarkForUpgrade func(ctx context.Context, id abi.SectorNumber) error `perm:"admin"` - - WorkerConnect func(context.Context, string) error `perm:"admin" retry:"true"` // TODO: worker perm - WorkerStats func(context.Context) (map[uuid.UUID]storiface.WorkerStats, error) `perm:"admin"` - WorkerJobs func(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) `perm:"admin"` - - ReturnAddPiece func(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err string) error `perm:"admin" retry:"true"` - ReturnSealPreCommit1 func(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err string) error `perm:"admin" retry:"true"` - ReturnSealPreCommit2 func(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err string) error `perm:"admin" retry:"true"` - ReturnSealCommit1 func(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err string) error `perm:"admin" retry:"true"` - ReturnSealCommit2 func(ctx context.Context, callID storiface.CallID, proof storage.Proof, err string) error `perm:"admin" retry:"true"` - ReturnFinalizeSector func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"` - ReturnReleaseUnsealed func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"` - ReturnMoveStorage func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"` - ReturnUnsealPiece func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"` - ReturnReadPiece func(ctx context.Context, callID storiface.CallID, ok bool, err string) error `perm:"admin" retry:"true"` - ReturnFetch func(ctx context.Context, callID storiface.CallID, err string) error `perm:"admin" retry:"true"` - - SealingSchedDiag func(context.Context) (interface{}, error) `perm:"admin"` - - StorageList func(context.Context) (map[stores.ID][]stores.Decl, error) `perm:"admin"` - StorageLocal func(context.Context) (map[stores.ID]string, error) `perm:"admin"` - StorageStat func(context.Context, stores.ID) (fsutil.FsStat, error) `perm:"admin"` - StorageAttach func(context.Context, stores.StorageInfo, fsutil.FsStat) error `perm:"admin"` - StorageDeclareSector func(context.Context, stores.ID, abi.SectorID, storiface.SectorFileType, bool) error `perm:"admin"` - StorageDropSector func(context.Context, stores.ID, abi.SectorID, storiface.SectorFileType) error `perm:"admin"` - StorageFindSector func(context.Context, abi.SectorID, storiface.SectorFileType, abi.SectorSize, bool) ([]stores.SectorStorageInfo, error) `perm:"admin"` - StorageInfo func(context.Context, stores.ID) (stores.StorageInfo, error) `perm:"admin"` - StorageBestAlloc func(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, sealing storiface.PathType) ([]stores.StorageInfo, error) `perm:"admin"` - StorageReportHealth func(ctx context.Context, id stores.ID, report stores.HealthReport) error `perm:"admin"` - StorageLock func(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error `perm:"admin"` - StorageTryLock func(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) `perm:"admin"` - - DealsImportData func(ctx context.Context, dealPropCid cid.Cid, file string) error `perm:"write"` - DealsList func(ctx context.Context) ([]api.MarketDeal, error) `perm:"read"` - DealsConsiderOnlineStorageDeals func(context.Context) (bool, error) `perm:"read"` - DealsSetConsiderOnlineStorageDeals func(context.Context, bool) error `perm:"admin"` - DealsConsiderOnlineRetrievalDeals func(context.Context) (bool, error) `perm:"read"` - DealsSetConsiderOnlineRetrievalDeals func(context.Context, bool) error `perm:"admin"` - DealsConsiderOfflineStorageDeals func(context.Context) (bool, error) `perm:"read"` - DealsSetConsiderOfflineStorageDeals func(context.Context, bool) error `perm:"admin"` - DealsConsiderOfflineRetrievalDeals func(context.Context) (bool, error) `perm:"read"` - DealsSetConsiderOfflineRetrievalDeals func(context.Context, bool) error `perm:"admin"` - DealsPieceCidBlocklist func(context.Context) ([]cid.Cid, error) `perm:"read"` - DealsSetPieceCidBlocklist func(context.Context, []cid.Cid) error `perm:"admin"` - - StorageAddLocal func(ctx context.Context, path string) error `perm:"admin"` - - PiecesListPieces func(ctx context.Context) ([]cid.Cid, error) `perm:"read"` - PiecesListCidInfos func(ctx context.Context) ([]cid.Cid, error) `perm:"read"` - PiecesGetPieceInfo func(ctx context.Context, pieceCid cid.Cid) (*piecestore.PieceInfo, error) `perm:"read"` - PiecesGetCIDInfo func(ctx context.Context, payloadCid cid.Cid) (*piecestore.CIDInfo, error) `perm:"read"` - - CreateBackup func(ctx context.Context, fpath string) error `perm:"admin"` - } -} - -type WorkerStruct struct { - Internal struct { - // TODO: lower perms - - Version func(context.Context) (build.Version, error) `perm:"admin"` - - TaskTypes func(context.Context) (map[sealtasks.TaskType]struct{}, error) `perm:"admin"` - Paths func(context.Context) ([]stores.StoragePath, error) `perm:"admin"` - Info func(context.Context) (storiface.WorkerInfo, error) `perm:"admin"` - - AddPiece func(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) `perm:"admin"` - SealPreCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) `perm:"admin"` - SealPreCommit2 func(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storiface.CallID, error) `perm:"admin"` - SealCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) `perm:"admin"` - SealCommit2 func(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storiface.CallID, error) `perm:"admin"` - FinalizeSector func(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (storiface.CallID, error) `perm:"admin"` - ReleaseUnsealed func(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) (storiface.CallID, error) `perm:"admin"` - MoveStorage func(ctx context.Context, sector abi.SectorID, types storiface.SectorFileType) (storiface.CallID, error) `perm:"admin"` - UnsealPiece func(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (storiface.CallID, error) `perm:"admin"` - ReadPiece func(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (storiface.CallID, error) `perm:"admin"` - Fetch func(context.Context, abi.SectorID, storiface.SectorFileType, storiface.PathType, storiface.AcquireMode) (storiface.CallID, error) `perm:"admin"` - - Remove func(ctx context.Context, sector abi.SectorID) error `perm:"admin"` - StorageAddLocal func(ctx context.Context, path string) error `perm:"admin"` - - Session func(context.Context) (uuid.UUID, error) `perm:"admin"` - } -} - -type GatewayStruct struct { - Internal struct { - ChainGetBlockMessages func(ctx context.Context, c cid.Cid) (*api.BlockMessages, error) - ChainGetMessage func(ctx context.Context, mc cid.Cid) (*types.Message, error) - ChainGetTipSet func(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) - ChainGetTipSetByHeight func(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) - ChainHasObj func(context.Context, cid.Cid) (bool, error) - ChainHead func(ctx context.Context) (*types.TipSet, error) - ChainNotify func(ctx context.Context) (<-chan []*api.HeadChange, error) - ChainReadObj func(context.Context, cid.Cid) ([]byte, error) - GasEstimateMessageGas func(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) - MpoolPush func(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) - MsigGetAvailableBalance func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) - MsigGetVested func(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) - StateAccountKey func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) - StateDealProviderCollateralBounds func(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) - StateGetActor func(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) - StateGetReceipt func(ctx context.Context, c cid.Cid, tsk types.TipSetKey) (*types.MessageReceipt, error) - StateLookupID func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) - StateListMiners func(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) - StateMinerInfo func(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) - StateMinerProvingDeadline func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error) - StateMinerPower func(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) - StateMarketBalance func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) - StateMarketStorageDeal func(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) - StateNetworkVersion func(ctx context.Context, tsk types.TipSetKey) (stnetwork.Version, error) - StateVerifiedClientStatus func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) - StateWaitMsg func(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error) - } -} - -type WalletStruct struct { - Internal struct { - WalletNew func(context.Context, types.KeyType) (address.Address, error) `perm:"write"` - WalletHas func(context.Context, address.Address) (bool, error) `perm:"write"` - WalletList func(context.Context) ([]address.Address, error) `perm:"write"` - WalletSign func(context.Context, address.Address, []byte, api.MsgMeta) (*crypto.Signature, error) `perm:"sign"` - WalletExport func(context.Context, address.Address) (*types.KeyInfo, error) `perm:"admin"` - WalletImport func(context.Context, *types.KeyInfo) (address.Address, error) `perm:"admin"` - WalletDelete func(context.Context, address.Address) error `perm:"write"` - } -} - -// CommonStruct - -func (c *CommonStruct) AuthVerify(ctx context.Context, token string) ([]auth.Permission, error) { - return c.Internal.AuthVerify(ctx, token) -} - -func (c *CommonStruct) AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error) { - return c.Internal.AuthNew(ctx, perms) -} - -func (c *CommonStruct) NetPubsubScores(ctx context.Context) ([]api.PubsubScore, error) { - return c.Internal.NetPubsubScores(ctx) -} - -func (c *CommonStruct) NetConnectedness(ctx context.Context, pid peer.ID) (network.Connectedness, error) { - return c.Internal.NetConnectedness(ctx, pid) -} - -func (c *CommonStruct) NetPeers(ctx context.Context) ([]peer.AddrInfo, error) { - return c.Internal.NetPeers(ctx) -} - -func (c *CommonStruct) NetConnect(ctx context.Context, p peer.AddrInfo) error { - return c.Internal.NetConnect(ctx, p) -} - -func (c *CommonStruct) NetAddrsListen(ctx context.Context) (peer.AddrInfo, error) { - return c.Internal.NetAddrsListen(ctx) -} - -func (c *CommonStruct) NetDisconnect(ctx context.Context, p peer.ID) error { - return c.Internal.NetDisconnect(ctx, p) -} - -func (c *CommonStruct) NetFindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo, error) { - return c.Internal.NetFindPeer(ctx, p) -} - -func (c *CommonStruct) NetAutoNatStatus(ctx context.Context) (api.NatInfo, error) { - return c.Internal.NetAutoNatStatus(ctx) -} - -func (c *CommonStruct) NetBandwidthStats(ctx context.Context) (metrics.Stats, error) { - return c.Internal.NetBandwidthStats(ctx) -} - -func (c *CommonStruct) NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) { - return c.Internal.NetBandwidthStatsByPeer(ctx) -} - -func (c *CommonStruct) NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) { - return c.Internal.NetBandwidthStatsByProtocol(ctx) -} - -func (c *CommonStruct) NetAgentVersion(ctx context.Context, p peer.ID) (string, error) { - return c.Internal.NetAgentVersion(ctx, p) -} - -// ID implements API.ID -func (c *CommonStruct) ID(ctx context.Context) (peer.ID, error) { - return c.Internal.ID(ctx) -} - -// Version implements API.Version -func (c *CommonStruct) Version(ctx context.Context) (api.Version, error) { - return c.Internal.Version(ctx) -} - -func (c *CommonStruct) LogList(ctx context.Context) ([]string, error) { - return c.Internal.LogList(ctx) -} - -func (c *CommonStruct) LogSetLevel(ctx context.Context, group, level string) error { - return c.Internal.LogSetLevel(ctx, group, level) -} - -func (c *CommonStruct) Shutdown(ctx context.Context) error { - return c.Internal.Shutdown(ctx) -} - -func (c *CommonStruct) Session(ctx context.Context) (uuid.UUID, error) { - return c.Internal.Session(ctx) -} - -func (c *CommonStruct) Closing(ctx context.Context) (<-chan struct{}, error) { - return c.Internal.Closing(ctx) -} - -// FullNodeStruct - -func (c *FullNodeStruct) ClientListImports(ctx context.Context) ([]api.Import, error) { - return c.Internal.ClientListImports(ctx) -} - -func (c *FullNodeStruct) ClientRemoveImport(ctx context.Context, importID multistore.StoreID) error { - return c.Internal.ClientRemoveImport(ctx, importID) -} - -func (c *FullNodeStruct) ClientImport(ctx context.Context, ref api.FileRef) (*api.ImportRes, error) { - return c.Internal.ClientImport(ctx, ref) -} - -func (c *FullNodeStruct) ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) { - return c.Internal.ClientHasLocal(ctx, root) -} - -func (c *FullNodeStruct) ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]api.QueryOffer, error) { - return c.Internal.ClientFindData(ctx, root, piece) -} - -func (c *FullNodeStruct) ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (api.QueryOffer, error) { - return c.Internal.ClientMinerQueryOffer(ctx, miner, root, piece) -} - -func (c *FullNodeStruct) ClientStartDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) { - return c.Internal.ClientStartDeal(ctx, params) -} - -func (c *FullNodeStruct) ClientGetDealInfo(ctx context.Context, deal cid.Cid) (*api.DealInfo, error) { - return c.Internal.ClientGetDealInfo(ctx, deal) -} - -func (c *FullNodeStruct) ClientGetDealStatus(ctx context.Context, statusCode uint64) (string, error) { - return c.Internal.ClientGetDealStatus(ctx, statusCode) -} - -func (c *FullNodeStruct) ClientListDeals(ctx context.Context) ([]api.DealInfo, error) { - return c.Internal.ClientListDeals(ctx) -} - -func (c *FullNodeStruct) ClientGetDealUpdates(ctx context.Context) (<-chan api.DealInfo, error) { - return c.Internal.ClientGetDealUpdates(ctx) -} - -func (c *FullNodeStruct) ClientRetrieve(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error { - return c.Internal.ClientRetrieve(ctx, order, ref) -} - -func (c *FullNodeStruct) ClientRetrieveWithEvents(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { - return c.Internal.ClientRetrieveWithEvents(ctx, order, ref) -} - -func (c *FullNodeStruct) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) { - return c.Internal.ClientQueryAsk(ctx, p, miner) -} - -func (c *FullNodeStruct) ClientDealPieceCID(ctx context.Context, root cid.Cid) (api.DataCIDSize, error) { - return c.Internal.ClientDealPieceCID(ctx, root) -} - -func (c *FullNodeStruct) ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet, error) { - return c.Internal.ClientCalcCommP(ctx, inpath) -} - -func (c *FullNodeStruct) ClientGenCar(ctx context.Context, ref api.FileRef, outpath string) error { - return c.Internal.ClientGenCar(ctx, ref, outpath) -} - -func (c *FullNodeStruct) ClientDealSize(ctx context.Context, root cid.Cid) (api.DataSize, error) { - return c.Internal.ClientDealSize(ctx, root) -} - -func (c *FullNodeStruct) ClientListDataTransfers(ctx context.Context) ([]api.DataTransferChannel, error) { - return c.Internal.ClientListDataTransfers(ctx) -} - -func (c *FullNodeStruct) ClientDataTransferUpdates(ctx context.Context) (<-chan api.DataTransferChannel, error) { - return c.Internal.ClientDataTransferUpdates(ctx) -} - -func (c *FullNodeStruct) ClientRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error { - return c.Internal.ClientRestartDataTransfer(ctx, transferID, otherPeer, isInitiator) -} - -func (c *FullNodeStruct) ClientCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error { - return c.Internal.ClientCancelDataTransfer(ctx, transferID, otherPeer, isInitiator) -} - -func (c *FullNodeStruct) ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error { - return c.Internal.ClientRetrieveTryRestartInsufficientFunds(ctx, paymentChannel) -} - -func (c *FullNodeStruct) GasEstimateGasPremium(ctx context.Context, nblocksincl uint64, sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) { - return c.Internal.GasEstimateGasPremium(ctx, nblocksincl, sender, gaslimit, tsk) -} - -func (c *FullNodeStruct) GasEstimateFeeCap(ctx context.Context, msg *types.Message, maxqueueblks int64, tsk types.TipSetKey) (types.BigInt, error) { - return c.Internal.GasEstimateFeeCap(ctx, msg, maxqueueblks, tsk) -} - -func (c *FullNodeStruct) GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) { - return c.Internal.GasEstimateMessageGas(ctx, msg, spec, tsk) -} - -func (c *FullNodeStruct) GasEstimateGasLimit(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (int64, error) { - return c.Internal.GasEstimateGasLimit(ctx, msg, tsk) -} - -func (c *FullNodeStruct) MpoolGetConfig(ctx context.Context) (*types.MpoolConfig, error) { - return c.Internal.MpoolGetConfig(ctx) -} - -func (c *FullNodeStruct) MpoolSetConfig(ctx context.Context, cfg *types.MpoolConfig) error { - return c.Internal.MpoolSetConfig(ctx, cfg) -} - -func (c *FullNodeStruct) MpoolSelect(ctx context.Context, tsk types.TipSetKey, tq float64) ([]*types.SignedMessage, error) { - return c.Internal.MpoolSelect(ctx, tsk, tq) -} - -func (c *FullNodeStruct) MpoolPending(ctx context.Context, tsk types.TipSetKey) ([]*types.SignedMessage, error) { - return c.Internal.MpoolPending(ctx, tsk) -} - -func (c *FullNodeStruct) MpoolClear(ctx context.Context, local bool) error { - return c.Internal.MpoolClear(ctx, local) -} - -func (c *FullNodeStruct) MpoolPush(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) { - return c.Internal.MpoolPush(ctx, smsg) -} - -func (c *FullNodeStruct) MpoolPushUntrusted(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) { - return c.Internal.MpoolPushUntrusted(ctx, smsg) -} - -func (c *FullNodeStruct) MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) { - return c.Internal.MpoolPushMessage(ctx, msg, spec) -} - -func (c *FullNodeStruct) MpoolBatchPush(ctx context.Context, smsgs []*types.SignedMessage) ([]cid.Cid, error) { - return c.Internal.MpoolBatchPush(ctx, smsgs) -} - -func (c *FullNodeStruct) MpoolBatchPushUntrusted(ctx context.Context, smsgs []*types.SignedMessage) ([]cid.Cid, error) { - return c.Internal.MpoolBatchPushUntrusted(ctx, smsgs) -} - -func (c *FullNodeStruct) MpoolBatchPushMessage(ctx context.Context, msgs []*types.Message, spec *api.MessageSendSpec) ([]*types.SignedMessage, error) { - return c.Internal.MpoolBatchPushMessage(ctx, msgs, spec) -} - -func (c *FullNodeStruct) MpoolSub(ctx context.Context) (<-chan api.MpoolUpdate, error) { - return c.Internal.MpoolSub(ctx) -} - -func (c *FullNodeStruct) MinerGetBaseInfo(ctx context.Context, maddr address.Address, epoch abi.ChainEpoch, tsk types.TipSetKey) (*api.MiningBaseInfo, error) { - return c.Internal.MinerGetBaseInfo(ctx, maddr, epoch, tsk) -} - -func (c *FullNodeStruct) MinerCreateBlock(ctx context.Context, bt *api.BlockTemplate) (*types.BlockMsg, error) { - return c.Internal.MinerCreateBlock(ctx, bt) -} - -func (c *FullNodeStruct) ChainHead(ctx context.Context) (*types.TipSet, error) { - return c.Internal.ChainHead(ctx) -} - -func (c *FullNodeStruct) ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { - return c.Internal.ChainGetRandomnessFromTickets(ctx, tsk, personalization, randEpoch, entropy) -} - -func (c *FullNodeStruct) ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { - return c.Internal.ChainGetRandomnessFromBeacon(ctx, tsk, personalization, randEpoch, entropy) -} - -func (c *FullNodeStruct) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) { - return c.Internal.ChainGetTipSetByHeight(ctx, h, tsk) -} - -func (c *FullNodeStruct) WalletNew(ctx context.Context, typ types.KeyType) (address.Address, error) { - return c.Internal.WalletNew(ctx, typ) -} - -func (c *FullNodeStruct) WalletHas(ctx context.Context, addr address.Address) (bool, error) { - return c.Internal.WalletHas(ctx, addr) -} - -func (c *FullNodeStruct) WalletList(ctx context.Context) ([]address.Address, error) { - return c.Internal.WalletList(ctx) -} - -func (c *FullNodeStruct) WalletBalance(ctx context.Context, a address.Address) (types.BigInt, error) { - return c.Internal.WalletBalance(ctx, a) -} - -func (c *FullNodeStruct) WalletSign(ctx context.Context, k address.Address, msg []byte) (*crypto.Signature, error) { - return c.Internal.WalletSign(ctx, k, msg) -} - -func (c *FullNodeStruct) WalletSignMessage(ctx context.Context, k address.Address, msg *types.Message) (*types.SignedMessage, error) { - return c.Internal.WalletSignMessage(ctx, k, msg) -} - -func (c *FullNodeStruct) WalletVerify(ctx context.Context, k address.Address, msg []byte, sig *crypto.Signature) (bool, error) { - return c.Internal.WalletVerify(ctx, k, msg, sig) -} - -func (c *FullNodeStruct) WalletDefaultAddress(ctx context.Context) (address.Address, error) { - return c.Internal.WalletDefaultAddress(ctx) -} - -func (c *FullNodeStruct) WalletSetDefault(ctx context.Context, a address.Address) error { - return c.Internal.WalletSetDefault(ctx, a) -} - -func (c *FullNodeStruct) WalletExport(ctx context.Context, a address.Address) (*types.KeyInfo, error) { - return c.Internal.WalletExport(ctx, a) -} - -func (c *FullNodeStruct) WalletImport(ctx context.Context, ki *types.KeyInfo) (address.Address, error) { - return c.Internal.WalletImport(ctx, ki) -} - -func (c *FullNodeStruct) WalletDelete(ctx context.Context, addr address.Address) error { - return c.Internal.WalletDelete(ctx, addr) -} - -func (c *FullNodeStruct) WalletValidateAddress(ctx context.Context, str string) (address.Address, error) { - return c.Internal.WalletValidateAddress(ctx, str) -} - -func (c *FullNodeStruct) MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error) { - return c.Internal.MpoolGetNonce(ctx, addr) -} - -func (c *FullNodeStruct) ChainGetBlock(ctx context.Context, b cid.Cid) (*types.BlockHeader, error) { - return c.Internal.ChainGetBlock(ctx, b) -} - -func (c *FullNodeStruct) ChainGetTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) { - return c.Internal.ChainGetTipSet(ctx, key) -} - -func (c *FullNodeStruct) ChainGetBlockMessages(ctx context.Context, b cid.Cid) (*api.BlockMessages, error) { - return c.Internal.ChainGetBlockMessages(ctx, b) -} - -func (c *FullNodeStruct) ChainGetParentReceipts(ctx context.Context, b cid.Cid) ([]*types.MessageReceipt, error) { - return c.Internal.ChainGetParentReceipts(ctx, b) -} - -func (c *FullNodeStruct) ChainGetParentMessages(ctx context.Context, b cid.Cid) ([]api.Message, error) { - return c.Internal.ChainGetParentMessages(ctx, b) -} - -func (c *FullNodeStruct) ChainNotify(ctx context.Context) (<-chan []*api.HeadChange, error) { - return c.Internal.ChainNotify(ctx) -} - -func (c *FullNodeStruct) ChainReadObj(ctx context.Context, obj cid.Cid) ([]byte, error) { - return c.Internal.ChainReadObj(ctx, obj) -} - -func (c *FullNodeStruct) ChainDeleteObj(ctx context.Context, obj cid.Cid) error { - return c.Internal.ChainDeleteObj(ctx, obj) -} - -func (c *FullNodeStruct) ChainHasObj(ctx context.Context, o cid.Cid) (bool, error) { - return c.Internal.ChainHasObj(ctx, o) -} - -func (c *FullNodeStruct) ChainStatObj(ctx context.Context, obj, base cid.Cid) (api.ObjStat, error) { - return c.Internal.ChainStatObj(ctx, obj, base) -} - -func (c *FullNodeStruct) ChainSetHead(ctx context.Context, tsk types.TipSetKey) error { - return c.Internal.ChainSetHead(ctx, tsk) -} - -func (c *FullNodeStruct) ChainGetGenesis(ctx context.Context) (*types.TipSet, error) { - return c.Internal.ChainGetGenesis(ctx) -} - -func (c *FullNodeStruct) ChainTipSetWeight(ctx context.Context, tsk types.TipSetKey) (types.BigInt, error) { - return c.Internal.ChainTipSetWeight(ctx, tsk) -} - -func (c *FullNodeStruct) ChainGetNode(ctx context.Context, p string) (*api.IpldObject, error) { - return c.Internal.ChainGetNode(ctx, p) -} - -func (c *FullNodeStruct) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) { - return c.Internal.ChainGetMessage(ctx, mc) -} - -func (c *FullNodeStruct) ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*api.HeadChange, error) { - return c.Internal.ChainGetPath(ctx, from, to) -} - -func (c *FullNodeStruct) ChainExport(ctx context.Context, nroots abi.ChainEpoch, iom bool, tsk types.TipSetKey) (<-chan []byte, error) { - return c.Internal.ChainExport(ctx, nroots, iom, tsk) -} - -func (c *FullNodeStruct) BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) { - return c.Internal.BeaconGetEntry(ctx, epoch) -} - -func (c *FullNodeStruct) SyncState(ctx context.Context) (*api.SyncState, error) { - return c.Internal.SyncState(ctx) -} - -func (c *FullNodeStruct) SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error { - return c.Internal.SyncSubmitBlock(ctx, blk) -} - -func (c *FullNodeStruct) SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) { - return c.Internal.SyncIncomingBlocks(ctx) -} - -func (c *FullNodeStruct) SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) error { - return c.Internal.SyncCheckpoint(ctx, tsk) -} - -func (c *FullNodeStruct) SyncMarkBad(ctx context.Context, bcid cid.Cid) error { - return c.Internal.SyncMarkBad(ctx, bcid) -} - -func (c *FullNodeStruct) SyncUnmarkBad(ctx context.Context, bcid cid.Cid) error { - return c.Internal.SyncUnmarkBad(ctx, bcid) -} - -func (c *FullNodeStruct) SyncUnmarkAllBad(ctx context.Context) error { - return c.Internal.SyncUnmarkAllBad(ctx) -} - -func (c *FullNodeStruct) SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error) { - return c.Internal.SyncCheckBad(ctx, bcid) -} - -func (c *FullNodeStruct) SyncValidateTipset(ctx context.Context, tsk types.TipSetKey) (bool, error) { - return c.Internal.SyncValidateTipset(ctx, tsk) -} - -func (c *FullNodeStruct) StateNetworkName(ctx context.Context) (dtypes.NetworkName, error) { - return c.Internal.StateNetworkName(ctx) -} - -func (c *FullNodeStruct) StateMinerSectors(ctx context.Context, addr address.Address, sectorNos *bitfield.BitField, tsk types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { - return c.Internal.StateMinerSectors(ctx, addr, sectorNos, tsk) -} - -func (c *FullNodeStruct) StateMinerActiveSectors(ctx context.Context, addr address.Address, tsk types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { - return c.Internal.StateMinerActiveSectors(ctx, addr, tsk) -} - -func (c *FullNodeStruct) StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error) { - return c.Internal.StateMinerProvingDeadline(ctx, addr, tsk) -} - -func (c *FullNodeStruct) StateMinerPower(ctx context.Context, a address.Address, tsk types.TipSetKey) (*api.MinerPower, error) { - return c.Internal.StateMinerPower(ctx, a, tsk) -} - -func (c *FullNodeStruct) StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) { - return c.Internal.StateMinerInfo(ctx, actor, tsk) -} - -func (c *FullNodeStruct) StateMinerDeadlines(ctx context.Context, actor address.Address, tsk types.TipSetKey) ([]api.Deadline, error) { - return c.Internal.StateMinerDeadlines(ctx, actor, tsk) -} - -func (c *FullNodeStruct) StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tsk types.TipSetKey) ([]api.Partition, error) { - return c.Internal.StateMinerPartitions(ctx, m, dlIdx, tsk) -} - -func (c *FullNodeStruct) StateMinerFaults(ctx context.Context, actor address.Address, tsk types.TipSetKey) (bitfield.BitField, error) { - return c.Internal.StateMinerFaults(ctx, actor, tsk) -} - -func (c *FullNodeStruct) StateAllMinerFaults(ctx context.Context, cutoff abi.ChainEpoch, endTsk types.TipSetKey) ([]*api.Fault, error) { - return c.Internal.StateAllMinerFaults(ctx, cutoff, endTsk) -} - -func (c *FullNodeStruct) StateMinerRecoveries(ctx context.Context, actor address.Address, tsk types.TipSetKey) (bitfield.BitField, error) { - return c.Internal.StateMinerRecoveries(ctx, actor, tsk) -} - -func (c *FullNodeStruct) StateMinerPreCommitDepositForPower(ctx context.Context, maddr address.Address, pci miner.SectorPreCommitInfo, tsk types.TipSetKey) (types.BigInt, error) { - return c.Internal.StateMinerPreCommitDepositForPower(ctx, maddr, pci, tsk) -} - -func (c *FullNodeStruct) StateMinerInitialPledgeCollateral(ctx context.Context, maddr address.Address, pci miner.SectorPreCommitInfo, tsk types.TipSetKey) (types.BigInt, error) { - return c.Internal.StateMinerInitialPledgeCollateral(ctx, maddr, pci, tsk) -} - -func (c *FullNodeStruct) StateMinerAvailableBalance(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (types.BigInt, error) { - return c.Internal.StateMinerAvailableBalance(ctx, maddr, tsk) -} - -func (c *FullNodeStruct) StateMinerSectorAllocated(ctx context.Context, maddr address.Address, s abi.SectorNumber, tsk types.TipSetKey) (bool, error) { - return c.Internal.StateMinerSectorAllocated(ctx, maddr, s, tsk) -} - -func (c *FullNodeStruct) StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) { - return c.Internal.StateSectorPreCommitInfo(ctx, maddr, n, tsk) -} - -func (c *FullNodeStruct) StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) { - return c.Internal.StateSectorGetInfo(ctx, maddr, n, tsk) -} - -func (c *FullNodeStruct) StateSectorExpiration(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorExpiration, error) { - return c.Internal.StateSectorExpiration(ctx, maddr, n, tsk) -} - -func (c *FullNodeStruct) StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error) { - return c.Internal.StateSectorPartition(ctx, maddr, sectorNumber, tok) -} - -func (c *FullNodeStruct) StateCall(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (*api.InvocResult, error) { - return c.Internal.StateCall(ctx, msg, tsk) -} - -func (c *FullNodeStruct) StateReplay(ctx context.Context, tsk types.TipSetKey, mc cid.Cid) (*api.InvocResult, error) { - return c.Internal.StateReplay(ctx, tsk, mc) -} - -func (c *FullNodeStruct) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) { - return c.Internal.StateGetActor(ctx, actor, tsk) -} - -func (c *FullNodeStruct) StateReadState(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*api.ActorState, error) { - return c.Internal.StateReadState(ctx, addr, tsk) -} - -func (c *FullNodeStruct) StateWaitMsg(ctx context.Context, msgc cid.Cid, confidence uint64) (*api.MsgLookup, error) { - return c.Internal.StateWaitMsg(ctx, msgc, confidence) -} - -func (c *FullNodeStruct) StateWaitMsgLimited(ctx context.Context, msgc cid.Cid, confidence uint64, limit abi.ChainEpoch) (*api.MsgLookup, error) { - return c.Internal.StateWaitMsgLimited(ctx, msgc, confidence, limit) -} - -func (c *FullNodeStruct) StateSearchMsg(ctx context.Context, msgc cid.Cid) (*api.MsgLookup, error) { - return c.Internal.StateSearchMsg(ctx, msgc) -} - -func (c *FullNodeStruct) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) { - return c.Internal.StateListMiners(ctx, tsk) -} - -func (c *FullNodeStruct) StateListActors(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) { - return c.Internal.StateListActors(ctx, tsk) -} - -func (c *FullNodeStruct) StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) { - return c.Internal.StateMarketBalance(ctx, addr, tsk) -} - -func (c *FullNodeStruct) StateMarketParticipants(ctx context.Context, tsk types.TipSetKey) (map[string]api.MarketBalance, error) { - return c.Internal.StateMarketParticipants(ctx, tsk) -} - -func (c *FullNodeStruct) StateMarketDeals(ctx context.Context, tsk types.TipSetKey) (map[string]api.MarketDeal, error) { - return c.Internal.StateMarketDeals(ctx, tsk) -} - -func (c *FullNodeStruct) StateMarketStorageDeal(ctx context.Context, dealid abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) { - return c.Internal.StateMarketStorageDeal(ctx, dealid, tsk) -} - -func (c *FullNodeStruct) StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) { - return c.Internal.StateLookupID(ctx, addr, tsk) -} - -func (c *FullNodeStruct) StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) { - return c.Internal.StateAccountKey(ctx, addr, tsk) -} - -func (c *FullNodeStruct) StateChangedActors(ctx context.Context, olnstate cid.Cid, newstate cid.Cid) (map[string]types.Actor, error) { - return c.Internal.StateChangedActors(ctx, olnstate, newstate) -} - -func (c *FullNodeStruct) StateGetReceipt(ctx context.Context, msg cid.Cid, tsk types.TipSetKey) (*types.MessageReceipt, error) { - return c.Internal.StateGetReceipt(ctx, msg, tsk) -} - -func (c *FullNodeStruct) StateListMessages(ctx context.Context, match *api.MessageMatch, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) { - return c.Internal.StateListMessages(ctx, match, tsk, toht) -} - -func (c *FullNodeStruct) StateCompute(ctx context.Context, height abi.ChainEpoch, msgs []*types.Message, tsk types.TipSetKey) (*api.ComputeStateOutput, error) { - return c.Internal.StateCompute(ctx, height, msgs, tsk) -} - -func (c *FullNodeStruct) StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) { - return c.Internal.StateVerifierStatus(ctx, addr, tsk) -} - -func (c *FullNodeStruct) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) { - return c.Internal.StateVerifiedClientStatus(ctx, addr, tsk) -} - -func (c *FullNodeStruct) StateVerifiedRegistryRootKey(ctx context.Context, tsk types.TipSetKey) (address.Address, error) { - return c.Internal.StateVerifiedRegistryRootKey(ctx, tsk) -} - -func (c *FullNodeStruct) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) { - return c.Internal.StateDealProviderCollateralBounds(ctx, size, verified, tsk) -} - -func (c *FullNodeStruct) StateCirculatingSupply(ctx context.Context, tsk types.TipSetKey) (abi.TokenAmount, error) { - return c.Internal.StateCirculatingSupply(ctx, tsk) -} - -func (c *FullNodeStruct) StateVMCirculatingSupplyInternal(ctx context.Context, tsk types.TipSetKey) (api.CirculatingSupply, error) { - return c.Internal.StateVMCirculatingSupplyInternal(ctx, tsk) -} - -func (c *FullNodeStruct) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (stnetwork.Version, error) { - return c.Internal.StateNetworkVersion(ctx, tsk) -} - -func (c *FullNodeStruct) MsigGetAvailableBalance(ctx context.Context, a address.Address, tsk types.TipSetKey) (types.BigInt, error) { - return c.Internal.MsigGetAvailableBalance(ctx, a, tsk) -} - -func (c *FullNodeStruct) MsigGetVestingSchedule(ctx context.Context, a address.Address, tsk types.TipSetKey) (api.MsigVesting, error) { - return c.Internal.MsigGetVestingSchedule(ctx, a, tsk) -} - -func (c *FullNodeStruct) MsigGetVested(ctx context.Context, a address.Address, sTsk types.TipSetKey, eTsk types.TipSetKey) (types.BigInt, error) { - return c.Internal.MsigGetVested(ctx, a, sTsk, eTsk) -} - -func (c *FullNodeStruct) MsigCreate(ctx context.Context, req uint64, addrs []address.Address, duration abi.ChainEpoch, val types.BigInt, src address.Address, gp types.BigInt) (cid.Cid, error) { - return c.Internal.MsigCreate(ctx, req, addrs, duration, val, src, gp) -} - -func (c *FullNodeStruct) MsigPropose(ctx context.Context, msig address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) { - return c.Internal.MsigPropose(ctx, msig, to, amt, src, method, params) -} - -func (c *FullNodeStruct) MsigApprove(ctx context.Context, msig address.Address, txID uint64, signer address.Address) (cid.Cid, error) { - return c.Internal.MsigApprove(ctx, msig, txID, signer) -} - -func (c *FullNodeStruct) MsigApproveTxnHash(ctx context.Context, msig address.Address, txID uint64, proposer address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) { - return c.Internal.MsigApproveTxnHash(ctx, msig, txID, proposer, to, amt, src, method, params) -} - -func (c *FullNodeStruct) MsigCancel(ctx context.Context, msig address.Address, txID uint64, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) { - return c.Internal.MsigCancel(ctx, msig, txID, to, amt, src, method, params) -} - -func (c *FullNodeStruct) MsigAddPropose(ctx context.Context, msig address.Address, src address.Address, newAdd address.Address, inc bool) (cid.Cid, error) { - return c.Internal.MsigAddPropose(ctx, msig, src, newAdd, inc) -} - -func (c *FullNodeStruct) MsigAddApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, newAdd address.Address, inc bool) (cid.Cid, error) { - return c.Internal.MsigAddApprove(ctx, msig, src, txID, proposer, newAdd, inc) -} - -func (c *FullNodeStruct) MsigAddCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, newAdd address.Address, inc bool) (cid.Cid, error) { - return c.Internal.MsigAddCancel(ctx, msig, src, txID, newAdd, inc) -} - -func (c *FullNodeStruct) MsigSwapPropose(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) { - return c.Internal.MsigSwapPropose(ctx, msig, src, oldAdd, newAdd) -} - -func (c *FullNodeStruct) MsigSwapApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) { - return c.Internal.MsigSwapApprove(ctx, msig, src, txID, proposer, oldAdd, newAdd) -} - -func (c *FullNodeStruct) MsigSwapCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) { - return c.Internal.MsigSwapCancel(ctx, msig, src, txID, oldAdd, newAdd) -} - -func (c *FullNodeStruct) MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error) { - return c.Internal.MsigRemoveSigner(ctx, msig, proposer, toRemove, decrease) -} - -func (c *FullNodeStruct) MarketEnsureAvailable(ctx context.Context, addr, wallet address.Address, amt types.BigInt) (cid.Cid, error) { - return c.Internal.MarketEnsureAvailable(ctx, addr, wallet, amt) -} - -func (c *FullNodeStruct) PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*api.ChannelInfo, error) { - return c.Internal.PaychGet(ctx, from, to, amt) -} - -func (c *FullNodeStruct) PaychGetWaitReady(ctx context.Context, sentinel cid.Cid) (address.Address, error) { - return c.Internal.PaychGetWaitReady(ctx, sentinel) -} - -func (c *FullNodeStruct) PaychAvailableFunds(ctx context.Context, ch address.Address) (*api.ChannelAvailableFunds, error) { - return c.Internal.PaychAvailableFunds(ctx, ch) -} - -func (c *FullNodeStruct) PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*api.ChannelAvailableFunds, error) { - return c.Internal.PaychAvailableFundsByFromTo(ctx, from, to) -} - -func (c *FullNodeStruct) PaychList(ctx context.Context) ([]address.Address, error) { - return c.Internal.PaychList(ctx) -} - -func (c *FullNodeStruct) PaychStatus(ctx context.Context, pch address.Address) (*api.PaychStatus, error) { - return c.Internal.PaychStatus(ctx, pch) -} - -func (c *FullNodeStruct) PaychVoucherCheckValid(ctx context.Context, addr address.Address, sv *paych.SignedVoucher) error { - return c.Internal.PaychVoucherCheckValid(ctx, addr, sv) -} - -func (c *FullNodeStruct) PaychVoucherCheckSpendable(ctx context.Context, addr address.Address, sv *paych.SignedVoucher, secret []byte, proof []byte) (bool, error) { - return c.Internal.PaychVoucherCheckSpendable(ctx, addr, sv, secret, proof) -} - -func (c *FullNodeStruct) PaychVoucherAdd(ctx context.Context, addr address.Address, sv *paych.SignedVoucher, proof []byte, minDelta types.BigInt) (types.BigInt, error) { - return c.Internal.PaychVoucherAdd(ctx, addr, sv, proof, minDelta) -} - -func (c *FullNodeStruct) PaychVoucherCreate(ctx context.Context, pch address.Address, amt types.BigInt, lane uint64) (*api.VoucherCreateResult, error) { - return c.Internal.PaychVoucherCreate(ctx, pch, amt, lane) -} - -func (c *FullNodeStruct) PaychVoucherList(ctx context.Context, pch address.Address) ([]*paych.SignedVoucher, error) { - return c.Internal.PaychVoucherList(ctx, pch) -} - -func (c *FullNodeStruct) PaychSettle(ctx context.Context, a address.Address) (cid.Cid, error) { - return c.Internal.PaychSettle(ctx, a) -} - -func (c *FullNodeStruct) PaychCollect(ctx context.Context, a address.Address) (cid.Cid, error) { - return c.Internal.PaychCollect(ctx, a) -} - -func (c *FullNodeStruct) PaychAllocateLane(ctx context.Context, ch address.Address) (uint64, error) { - return c.Internal.PaychAllocateLane(ctx, ch) -} - -func (c *FullNodeStruct) PaychNewPayment(ctx context.Context, from, to address.Address, vouchers []api.VoucherSpec) (*api.PaymentInfo, error) { - return c.Internal.PaychNewPayment(ctx, from, to, vouchers) -} - -func (c *FullNodeStruct) PaychVoucherSubmit(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, secret []byte, proof []byte) (cid.Cid, error) { - return c.Internal.PaychVoucherSubmit(ctx, ch, sv, secret, proof) -} - -func (c *FullNodeStruct) CreateBackup(ctx context.Context, fpath string) error { - return c.Internal.CreateBackup(ctx, fpath) -} - -// StorageMinerStruct - -func (c *StorageMinerStruct) ActorAddress(ctx context.Context) (address.Address, error) { - return c.Internal.ActorAddress(ctx) -} - -func (c *StorageMinerStruct) MiningBase(ctx context.Context) (*types.TipSet, error) { - return c.Internal.MiningBase(ctx) -} - -func (c *StorageMinerStruct) ActorSectorSize(ctx context.Context, addr address.Address) (abi.SectorSize, error) { - return c.Internal.ActorSectorSize(ctx, addr) -} - -func (c *StorageMinerStruct) PledgeSector(ctx context.Context) error { - return c.Internal.PledgeSector(ctx) -} - -// Get the status of a given sector by ID -func (c *StorageMinerStruct) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) { - return c.Internal.SectorsStatus(ctx, sid, showOnChainInfo) -} - -// List all staged sectors -func (c *StorageMinerStruct) SectorsList(ctx context.Context) ([]abi.SectorNumber, error) { - return c.Internal.SectorsList(ctx) -} - -func (c *StorageMinerStruct) SectorsRefs(ctx context.Context) (map[string][]api.SealedRef, error) { - return c.Internal.SectorsRefs(ctx) -} - -func (c *StorageMinerStruct) SectorStartSealing(ctx context.Context, number abi.SectorNumber) error { - return c.Internal.SectorStartSealing(ctx, number) -} - -func (c *StorageMinerStruct) SectorSetSealDelay(ctx context.Context, delay time.Duration) error { - return c.Internal.SectorSetSealDelay(ctx, delay) -} - -func (c *StorageMinerStruct) SectorGetSealDelay(ctx context.Context) (time.Duration, error) { - return c.Internal.SectorGetSealDelay(ctx) -} - -func (c *StorageMinerStruct) SectorSetExpectedSealDuration(ctx context.Context, delay time.Duration) error { - return c.Internal.SectorSetExpectedSealDuration(ctx, delay) -} - -func (c *StorageMinerStruct) SectorGetExpectedSealDuration(ctx context.Context) (time.Duration, error) { - return c.Internal.SectorGetExpectedSealDuration(ctx) -} - -func (c *StorageMinerStruct) SectorsUpdate(ctx context.Context, id abi.SectorNumber, state api.SectorState) error { - return c.Internal.SectorsUpdate(ctx, id, state) -} - -func (c *StorageMinerStruct) SectorRemove(ctx context.Context, number abi.SectorNumber) error { - return c.Internal.SectorRemove(ctx, number) -} - -func (c *StorageMinerStruct) SectorMarkForUpgrade(ctx context.Context, number abi.SectorNumber) error { - return c.Internal.SectorMarkForUpgrade(ctx, number) -} - -func (c *StorageMinerStruct) WorkerConnect(ctx context.Context, url string) error { - return c.Internal.WorkerConnect(ctx, url) -} - -func (c *StorageMinerStruct) WorkerStats(ctx context.Context) (map[uuid.UUID]storiface.WorkerStats, error) { - return c.Internal.WorkerStats(ctx) -} - -func (c *StorageMinerStruct) WorkerJobs(ctx context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) { - return c.Internal.WorkerJobs(ctx) -} - -func (c *StorageMinerStruct) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err string) error { - return c.Internal.ReturnAddPiece(ctx, callID, pi, err) -} - -func (c *StorageMinerStruct) ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err string) error { - return c.Internal.ReturnSealPreCommit1(ctx, callID, p1o, err) -} - -func (c *StorageMinerStruct) ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err string) error { - return c.Internal.ReturnSealPreCommit2(ctx, callID, sealed, err) -} - -func (c *StorageMinerStruct) ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err string) error { - return c.Internal.ReturnSealCommit1(ctx, callID, out, err) -} - -func (c *StorageMinerStruct) ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err string) error { - return c.Internal.ReturnSealCommit2(ctx, callID, proof, err) -} - -func (c *StorageMinerStruct) ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err string) error { - return c.Internal.ReturnFinalizeSector(ctx, callID, err) -} - -func (c *StorageMinerStruct) ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err string) error { - return c.Internal.ReturnReleaseUnsealed(ctx, callID, err) -} - -func (c *StorageMinerStruct) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err string) error { - return c.Internal.ReturnMoveStorage(ctx, callID, err) -} - -func (c *StorageMinerStruct) ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err string) error { - return c.Internal.ReturnUnsealPiece(ctx, callID, err) -} - -func (c *StorageMinerStruct) ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err string) error { - return c.Internal.ReturnReadPiece(ctx, callID, ok, err) -} - -func (c *StorageMinerStruct) ReturnFetch(ctx context.Context, callID storiface.CallID, err string) error { - return c.Internal.ReturnFetch(ctx, callID, err) -} - -func (c *StorageMinerStruct) SealingSchedDiag(ctx context.Context) (interface{}, error) { - return c.Internal.SealingSchedDiag(ctx) -} - -func (c *StorageMinerStruct) StorageAttach(ctx context.Context, si stores.StorageInfo, st fsutil.FsStat) error { - return c.Internal.StorageAttach(ctx, si, st) -} - -func (c *StorageMinerStruct) StorageDeclareSector(ctx context.Context, storageId stores.ID, s abi.SectorID, ft storiface.SectorFileType, primary bool) error { - return c.Internal.StorageDeclareSector(ctx, storageId, s, ft, primary) -} - -func (c *StorageMinerStruct) StorageDropSector(ctx context.Context, storageId stores.ID, s abi.SectorID, ft storiface.SectorFileType) error { - return c.Internal.StorageDropSector(ctx, storageId, s, ft) -} - -func (c *StorageMinerStruct) StorageFindSector(ctx context.Context, si abi.SectorID, types storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]stores.SectorStorageInfo, error) { - return c.Internal.StorageFindSector(ctx, si, types, ssize, allowFetch) -} - -func (c *StorageMinerStruct) StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) { - return c.Internal.StorageList(ctx) -} - -func (c *StorageMinerStruct) StorageLocal(ctx context.Context) (map[stores.ID]string, error) { - return c.Internal.StorageLocal(ctx) -} - -func (c *StorageMinerStruct) StorageStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) { - return c.Internal.StorageStat(ctx, id) -} - -func (c *StorageMinerStruct) StorageInfo(ctx context.Context, id stores.ID) (stores.StorageInfo, error) { - return c.Internal.StorageInfo(ctx, id) -} - -func (c *StorageMinerStruct) StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pt storiface.PathType) ([]stores.StorageInfo, error) { - return c.Internal.StorageBestAlloc(ctx, allocate, ssize, pt) -} - -func (c *StorageMinerStruct) StorageReportHealth(ctx context.Context, id stores.ID, report stores.HealthReport) error { - return c.Internal.StorageReportHealth(ctx, id, report) -} - -func (c *StorageMinerStruct) StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error { - return c.Internal.StorageLock(ctx, sector, read, write) -} - -func (c *StorageMinerStruct) StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) { - return c.Internal.StorageTryLock(ctx, sector, read, write) -} - -func (c *StorageMinerStruct) MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error { - return c.Internal.MarketImportDealData(ctx, propcid, path) -} - -func (c *StorageMinerStruct) MarketListDeals(ctx context.Context) ([]api.MarketDeal, error) { - return c.Internal.MarketListDeals(ctx) -} - -func (c *StorageMinerStruct) MarketListRetrievalDeals(ctx context.Context) ([]retrievalmarket.ProviderDealState, error) { - return c.Internal.MarketListRetrievalDeals(ctx) -} - -func (c *StorageMinerStruct) MarketGetDealUpdates(ctx context.Context) (<-chan storagemarket.MinerDeal, error) { - return c.Internal.MarketGetDealUpdates(ctx) -} - -func (c *StorageMinerStruct) MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error) { - return c.Internal.MarketListIncompleteDeals(ctx) -} - -func (c *StorageMinerStruct) MarketSetAsk(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error { - return c.Internal.MarketSetAsk(ctx, price, verifiedPrice, duration, minPieceSize, maxPieceSize) -} - -func (c *StorageMinerStruct) MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error) { - return c.Internal.MarketGetAsk(ctx) -} - -func (c *StorageMinerStruct) MarketSetRetrievalAsk(ctx context.Context, rask *retrievalmarket.Ask) error { - return c.Internal.MarketSetRetrievalAsk(ctx, rask) -} - -func (c *StorageMinerStruct) MarketGetRetrievalAsk(ctx context.Context) (*retrievalmarket.Ask, error) { - return c.Internal.MarketGetRetrievalAsk(ctx) -} - -func (c *StorageMinerStruct) MarketListDataTransfers(ctx context.Context) ([]api.DataTransferChannel, error) { - return c.Internal.MarketListDataTransfers(ctx) -} - -func (c *StorageMinerStruct) MarketDataTransferUpdates(ctx context.Context) (<-chan api.DataTransferChannel, error) { - return c.Internal.MarketDataTransferUpdates(ctx) -} - -func (c *StorageMinerStruct) MarketRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error { - return c.Internal.MarketRestartDataTransfer(ctx, transferID, otherPeer, isInitiator) -} - -func (c *StorageMinerStruct) MarketCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error { - return c.Internal.MarketCancelDataTransfer(ctx, transferID, otherPeer, isInitiator) -} - -func (c *StorageMinerStruct) DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error { - return c.Internal.DealsImportData(ctx, dealPropCid, file) -} - -func (c *StorageMinerStruct) DealsList(ctx context.Context) ([]api.MarketDeal, error) { - return c.Internal.DealsList(ctx) -} - -func (c *StorageMinerStruct) DealsConsiderOnlineStorageDeals(ctx context.Context) (bool, error) { - return c.Internal.DealsConsiderOnlineStorageDeals(ctx) -} - -func (c *StorageMinerStruct) DealsSetConsiderOnlineStorageDeals(ctx context.Context, b bool) error { - return c.Internal.DealsSetConsiderOnlineStorageDeals(ctx, b) -} - -func (c *StorageMinerStruct) DealsConsiderOnlineRetrievalDeals(ctx context.Context) (bool, error) { - return c.Internal.DealsConsiderOnlineRetrievalDeals(ctx) -} - -func (c *StorageMinerStruct) DealsSetConsiderOnlineRetrievalDeals(ctx context.Context, b bool) error { - return c.Internal.DealsSetConsiderOnlineRetrievalDeals(ctx, b) -} - -func (c *StorageMinerStruct) DealsPieceCidBlocklist(ctx context.Context) ([]cid.Cid, error) { - return c.Internal.DealsPieceCidBlocklist(ctx) -} - -func (c *StorageMinerStruct) DealsSetPieceCidBlocklist(ctx context.Context, cids []cid.Cid) error { - return c.Internal.DealsSetPieceCidBlocklist(ctx, cids) -} - -func (c *StorageMinerStruct) DealsConsiderOfflineStorageDeals(ctx context.Context) (bool, error) { - return c.Internal.DealsConsiderOfflineStorageDeals(ctx) -} - -func (c *StorageMinerStruct) DealsSetConsiderOfflineStorageDeals(ctx context.Context, b bool) error { - return c.Internal.DealsSetConsiderOfflineStorageDeals(ctx, b) -} - -func (c *StorageMinerStruct) DealsConsiderOfflineRetrievalDeals(ctx context.Context) (bool, error) { - return c.Internal.DealsConsiderOfflineRetrievalDeals(ctx) -} - -func (c *StorageMinerStruct) DealsSetConsiderOfflineRetrievalDeals(ctx context.Context, b bool) error { - return c.Internal.DealsSetConsiderOfflineRetrievalDeals(ctx, b) -} - -func (c *StorageMinerStruct) StorageAddLocal(ctx context.Context, path string) error { - return c.Internal.StorageAddLocal(ctx, path) -} - -func (c *StorageMinerStruct) PiecesListPieces(ctx context.Context) ([]cid.Cid, error) { - return c.Internal.PiecesListPieces(ctx) -} - -func (c *StorageMinerStruct) PiecesListCidInfos(ctx context.Context) ([]cid.Cid, error) { - return c.Internal.PiecesListCidInfos(ctx) -} - -func (c *StorageMinerStruct) PiecesGetPieceInfo(ctx context.Context, pieceCid cid.Cid) (*piecestore.PieceInfo, error) { - return c.Internal.PiecesGetPieceInfo(ctx, pieceCid) -} - -func (c *StorageMinerStruct) PiecesGetCIDInfo(ctx context.Context, payloadCid cid.Cid) (*piecestore.CIDInfo, error) { - return c.Internal.PiecesGetCIDInfo(ctx, payloadCid) -} - -func (c *StorageMinerStruct) CreateBackup(ctx context.Context, fpath string) error { - return c.Internal.CreateBackup(ctx, fpath) -} - -// WorkerStruct - -func (w *WorkerStruct) Version(ctx context.Context) (build.Version, error) { - return w.Internal.Version(ctx) -} - -func (w *WorkerStruct) TaskTypes(ctx context.Context) (map[sealtasks.TaskType]struct{}, error) { - return w.Internal.TaskTypes(ctx) -} - -func (w *WorkerStruct) Paths(ctx context.Context) ([]stores.StoragePath, error) { - return w.Internal.Paths(ctx) -} - -func (w *WorkerStruct) Info(ctx context.Context) (storiface.WorkerInfo, error) { - return w.Internal.Info(ctx) -} - -func (w *WorkerStruct) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) { - return w.Internal.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData) -} - -func (w *WorkerStruct) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) { - return w.Internal.SealPreCommit1(ctx, sector, ticket, pieces) -} - -func (w *WorkerStruct) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storiface.CallID, error) { - return w.Internal.SealPreCommit2(ctx, sector, pc1o) -} - -func (w *WorkerStruct) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) { - return w.Internal.SealCommit1(ctx, sector, ticket, seed, pieces, cids) -} - -func (w *WorkerStruct) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storiface.CallID, error) { - return w.Internal.SealCommit2(ctx, sector, c1o) -} - -func (w *WorkerStruct) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (storiface.CallID, error) { - return w.Internal.FinalizeSector(ctx, sector, keepUnsealed) -} - -func (w *WorkerStruct) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) (storiface.CallID, error) { - return w.Internal.ReleaseUnsealed(ctx, sector, safeToFree) -} - -func (w *WorkerStruct) MoveStorage(ctx context.Context, sector abi.SectorID, types storiface.SectorFileType) (storiface.CallID, error) { - return w.Internal.MoveStorage(ctx, sector, types) -} - -func (w *WorkerStruct) UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, c cid.Cid) (storiface.CallID, error) { - return w.Internal.UnsealPiece(ctx, sector, offset, size, ticket, c) -} - -func (w *WorkerStruct) ReadPiece(ctx context.Context, sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) { - return w.Internal.ReadPiece(ctx, sink, sector, offset, size) -} - -func (w *WorkerStruct) Fetch(ctx context.Context, id abi.SectorID, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) { - return w.Internal.Fetch(ctx, id, fileType, ptype, am) -} - -func (w *WorkerStruct) Remove(ctx context.Context, sector abi.SectorID) error { - return w.Internal.Remove(ctx, sector) -} - -func (w *WorkerStruct) StorageAddLocal(ctx context.Context, path string) error { - return w.Internal.StorageAddLocal(ctx, path) -} - -func (w *WorkerStruct) Session(ctx context.Context) (uuid.UUID, error) { - return w.Internal.Session(ctx) -} - -func (g GatewayStruct) ChainGetBlockMessages(ctx context.Context, c cid.Cid) (*api.BlockMessages, error) { - return g.Internal.ChainGetBlockMessages(ctx, c) -} - -func (g GatewayStruct) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) { - return g.Internal.ChainGetMessage(ctx, mc) -} - -func (g GatewayStruct) ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { - return g.Internal.ChainGetTipSet(ctx, tsk) -} - -func (g GatewayStruct) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) { - return g.Internal.ChainGetTipSetByHeight(ctx, h, tsk) -} - -func (g GatewayStruct) ChainHasObj(ctx context.Context, c cid.Cid) (bool, error) { - return g.Internal.ChainHasObj(ctx, c) -} - -func (g GatewayStruct) ChainHead(ctx context.Context) (*types.TipSet, error) { - return g.Internal.ChainHead(ctx) -} - -func (g GatewayStruct) ChainNotify(ctx context.Context) (<-chan []*api.HeadChange, error) { - return g.Internal.ChainNotify(ctx) -} - -func (g GatewayStruct) ChainReadObj(ctx context.Context, c cid.Cid) ([]byte, error) { - return g.Internal.ChainReadObj(ctx, c) -} - -func (g GatewayStruct) GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) { - return g.Internal.GasEstimateMessageGas(ctx, msg, spec, tsk) -} - -func (g GatewayStruct) MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) { - return g.Internal.MpoolPush(ctx, sm) -} - -func (g GatewayStruct) MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) { - return g.Internal.MsigGetAvailableBalance(ctx, addr, tsk) -} - -func (g GatewayStruct) MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) { - return g.Internal.MsigGetVested(ctx, addr, start, end) -} - -func (g GatewayStruct) StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) { - return g.Internal.StateAccountKey(ctx, addr, tsk) -} - -func (g GatewayStruct) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) { - return g.Internal.StateDealProviderCollateralBounds(ctx, size, verified, tsk) -} - -func (g GatewayStruct) StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) { - return g.Internal.StateGetActor(ctx, actor, ts) -} - -func (g GatewayStruct) StateGetReceipt(ctx context.Context, c cid.Cid, tsk types.TipSetKey) (*types.MessageReceipt, error) { - return g.Internal.StateGetReceipt(ctx, c, tsk) -} - -func (g GatewayStruct) StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) { - return g.Internal.StateLookupID(ctx, addr, tsk) -} - -func (g GatewayStruct) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) { - return g.Internal.StateListMiners(ctx, tsk) -} - -func (g GatewayStruct) StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) { - return g.Internal.StateMarketBalance(ctx, addr, tsk) -} - -func (g GatewayStruct) StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) { - return g.Internal.StateMarketStorageDeal(ctx, dealId, tsk) -} - -func (g GatewayStruct) StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) { - return g.Internal.StateMinerInfo(ctx, actor, tsk) -} - -func (g GatewayStruct) StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error) { - return g.Internal.StateMinerProvingDeadline(ctx, addr, tsk) -} - -func (g GatewayStruct) StateMinerPower(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*api.MinerPower, error) { - return g.Internal.StateMinerPower(ctx, addr, tsk) -} - -func (g GatewayStruct) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (stnetwork.Version, error) { - return g.Internal.StateNetworkVersion(ctx, tsk) -} - -func (g GatewayStruct) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) { - return g.Internal.StateVerifiedClientStatus(ctx, addr, tsk) -} - -func (g GatewayStruct) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error) { - return g.Internal.StateWaitMsg(ctx, msg, confidence) -} - -func (c *WalletStruct) WalletNew(ctx context.Context, typ types.KeyType) (address.Address, error) { - return c.Internal.WalletNew(ctx, typ) -} - -func (c *WalletStruct) WalletHas(ctx context.Context, addr address.Address) (bool, error) { - return c.Internal.WalletHas(ctx, addr) -} - -func (c *WalletStruct) WalletList(ctx context.Context) ([]address.Address, error) { - return c.Internal.WalletList(ctx) -} - -func (c *WalletStruct) WalletSign(ctx context.Context, k address.Address, msg []byte, meta api.MsgMeta) (*crypto.Signature, error) { - return c.Internal.WalletSign(ctx, k, msg, meta) -} - -func (c *WalletStruct) WalletExport(ctx context.Context, a address.Address) (*types.KeyInfo, error) { - return c.Internal.WalletExport(ctx, a) -} - -func (c *WalletStruct) WalletImport(ctx context.Context, ki *types.KeyInfo) (address.Address, error) { - return c.Internal.WalletImport(ctx, ki) -} - -func (c *WalletStruct) WalletDelete(ctx context.Context, addr address.Address) error { - return c.Internal.WalletDelete(ctx, addr) -} - -var _ api.Common = &CommonStruct{} -var _ api.FullNode = &FullNodeStruct{} -var _ api.StorageMiner = &StorageMinerStruct{} -var _ api.WorkerAPI = &WorkerStruct{} -var _ api.GatewayAPI = &GatewayStruct{} -var _ api.WalletAPI = &WalletStruct{} diff --git a/api/apistruct/struct_test.go b/api/apistruct/struct_test.go deleted file mode 100644 index 9f5f5836015..00000000000 --- a/api/apistruct/struct_test.go +++ /dev/null @@ -1,9 +0,0 @@ -package apistruct - -import "testing" - -func TestPermTags(t *testing.T) { - _ = PermissionedFullAPI(&FullNodeStruct{}) - _ = PermissionedStorMinerAPI(&StorageMinerStruct{}) - _ = PermissionedWorkerAPI(&WorkerStruct{}) -} diff --git a/api/cbor_gen.go b/api/cbor_gen.go index 7ab575b287d..4434b45ede9 100644 --- a/api/cbor_gen.go +++ b/api/cbor_gen.go @@ -5,14 +5,19 @@ package api import ( "fmt" "io" + "sort" abi "github.com/filecoin-project/go-state-types/abi" + market "github.com/filecoin-project/specs-actors/actors/builtin/market" paych "github.com/filecoin-project/specs-actors/actors/builtin/paych" + cid "github.com/ipfs/go-cid" cbg "github.com/whyrusleeping/cbor-gen" xerrors "golang.org/x/xerrors" ) var _ = xerrors.Errorf +var _ = cid.Undef +var _ = sort.Sort func (t *PaymentInfo) MarshalCBOR(w io.Writer) error { if t == nil { @@ -171,7 +176,8 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) error { } default: - return fmt.Errorf("unknown struct field %d: '%s'", i, name) + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) } } @@ -319,7 +325,8 @@ func (t *SealedRef) UnmarshalCBOR(r io.Reader) error { } default: - return fmt.Errorf("unknown struct field %d: '%s'", i, name) + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) } } @@ -427,7 +434,8 @@ func (t *SealedRefs) UnmarshalCBOR(r io.Reader) error { } default: - return fmt.Errorf("unknown struct field %d: '%s'", i, name) + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) } } @@ -575,7 +583,8 @@ func (t *SealTicket) UnmarshalCBOR(r io.Reader) error { } default: - return fmt.Errorf("unknown struct field %d: '%s'", i, name) + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) } } @@ -723,7 +732,386 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) error { } default: - return fmt.Errorf("unknown struct field %d: '%s'", i, name) + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{165}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.PublishCid (cid.Cid) (struct) + if len("PublishCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PublishCid\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PublishCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PublishCid")); err != nil { + return err + } + + if t.PublishCid == nil { + if _, err := w.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCidBuf(scratch, w, *t.PublishCid); err != nil { + return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err) + } + } + + // t.DealID (abi.DealID) (uint64) + if len("DealID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealID")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { + return err + } + + // t.DealProposal (market.DealProposal) (struct) + if len("DealProposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealProposal\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealProposal"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealProposal")); err != nil { + return err + } + + if err := t.DealProposal.MarshalCBOR(w); err != nil { + return err + } + + // t.DealSchedule (api.DealSchedule) (struct) + if len("DealSchedule") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealSchedule\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealSchedule"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealSchedule")); err != nil { + return err + } + + if err := t.DealSchedule.MarshalCBOR(w); err != nil { + return err + } + + // t.KeepUnsealed (bool) (bool) + if len("KeepUnsealed") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"KeepUnsealed\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("KeepUnsealed"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("KeepUnsealed")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.KeepUnsealed); err != nil { + return err + } + return nil +} + +func (t *PieceDealInfo) UnmarshalCBOR(r io.Reader) error { + *t = PieceDealInfo{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("PieceDealInfo: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.PublishCid (cid.Cid) (struct) + case "PublishCid": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err) + } + + t.PublishCid = &c + } + + } + // t.DealID (abi.DealID) (uint64) + case "DealID": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.DealProposal (market.DealProposal) (struct) + case "DealProposal": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.DealProposal = new(market.DealProposal) + if err := t.DealProposal.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.DealProposal pointer: %w", err) + } + } + + } + // t.DealSchedule (api.DealSchedule) (struct) + case "DealSchedule": + + { + + if err := t.DealSchedule.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.DealSchedule: %w", err) + } + + } + // t.KeepUnsealed (bool) (bool) + case "KeepUnsealed": + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.KeepUnsealed = false + case 21: + t.KeepUnsealed = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *DealSchedule) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{162}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.StartEpoch (abi.ChainEpoch) (int64) + if len("StartEpoch") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"StartEpoch\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("StartEpoch"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("StartEpoch")); err != nil { + return err + } + + if t.StartEpoch >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil { + return err + } + } + + // t.EndEpoch (abi.ChainEpoch) (int64) + if len("EndEpoch") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"EndEpoch\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("EndEpoch"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("EndEpoch")); err != nil { + return err + } + + if t.EndEpoch >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.EndEpoch)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.EndEpoch-1)); err != nil { + return err + } + } + return nil +} + +func (t *DealSchedule) UnmarshalCBOR(r io.Reader) error { + *t = DealSchedule{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealSchedule: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.StartEpoch (abi.ChainEpoch) (int64) + case "StartEpoch": + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.StartEpoch = abi.ChainEpoch(extraI) + } + // t.EndEpoch (abi.ChainEpoch) (int64) + case "EndEpoch": + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.EndEpoch = abi.ChainEpoch(extraI) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) } } diff --git a/api/checkstatuscode_string.go b/api/checkstatuscode_string.go new file mode 100644 index 00000000000..072f7798975 --- /dev/null +++ b/api/checkstatuscode_string.go @@ -0,0 +1,35 @@ +// Code generated by "stringer -type=CheckStatusCode -trimprefix=CheckStatus"; DO NOT EDIT. + +package api + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[CheckStatusMessageSerialize-1] + _ = x[CheckStatusMessageSize-2] + _ = x[CheckStatusMessageValidity-3] + _ = x[CheckStatusMessageMinGas-4] + _ = x[CheckStatusMessageMinBaseFee-5] + _ = x[CheckStatusMessageBaseFee-6] + _ = x[CheckStatusMessageBaseFeeLowerBound-7] + _ = x[CheckStatusMessageBaseFeeUpperBound-8] + _ = x[CheckStatusMessageGetStateNonce-9] + _ = x[CheckStatusMessageNonce-10] + _ = x[CheckStatusMessageGetStateBalance-11] + _ = x[CheckStatusMessageBalance-12] +} + +const _CheckStatusCode_name = "MessageSerializeMessageSizeMessageValidityMessageMinGasMessageMinBaseFeeMessageBaseFeeMessageBaseFeeLowerBoundMessageBaseFeeUpperBoundMessageGetStateNonceMessageNonceMessageGetStateBalanceMessageBalance" + +var _CheckStatusCode_index = [...]uint8{0, 16, 27, 42, 55, 72, 86, 110, 134, 154, 166, 188, 202} + +func (i CheckStatusCode) String() string { + i -= 1 + if i < 0 || i >= CheckStatusCode(len(_CheckStatusCode_index)-1) { + return "CheckStatusCode(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _CheckStatusCode_name[_CheckStatusCode_index[i]:_CheckStatusCode_index[i+1]] +} diff --git a/api/client/client.go b/api/client/client.go index 7d8a466d333..669c58f278b 100644 --- a/api/client/client.go +++ b/api/client/client.go @@ -10,72 +10,84 @@ import ( "github.com/filecoin-project/go-jsonrpc" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/apistruct" + "github.com/filecoin-project/lotus/api/v0api" + "github.com/filecoin-project/lotus/api/v1api" "github.com/filecoin-project/lotus/lib/rpcenc" ) -// NewCommonRPC creates a new http jsonrpc client. -func NewCommonRPC(ctx context.Context, addr string, requestHeader http.Header) (api.Common, jsonrpc.ClientCloser, error) { - var res apistruct.CommonStruct +// NewCommonRPCV0 creates a new http jsonrpc client. +func NewCommonRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.CommonNet, jsonrpc.ClientCloser, error) { + var res v0api.CommonNetStruct closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", - []interface{}{ - &res.Internal, - }, - requestHeader, - ) + api.GetInternalStructs(&res), requestHeader) return &res, closer, err } -// NewFullNodeRPC creates a new http jsonrpc client. -func NewFullNodeRPC(ctx context.Context, addr string, requestHeader http.Header) (api.FullNode, jsonrpc.ClientCloser, error) { - var res apistruct.FullNodeStruct +// NewFullNodeRPCV0 creates a new http jsonrpc client. +func NewFullNodeRPCV0(ctx context.Context, addr string, requestHeader http.Header) (v0api.FullNode, jsonrpc.ClientCloser, error) { + var res v0api.FullNodeStruct + closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", - []interface{}{ - &res.CommonStruct.Internal, - &res.Internal, - }, requestHeader) + api.GetInternalStructs(&res), requestHeader) return &res, closer, err } -// NewStorageMinerRPC creates a new http jsonrpc client for miner -func NewStorageMinerRPC(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (api.StorageMiner, jsonrpc.ClientCloser, error) { - var res apistruct.StorageMinerStruct +// NewFullNodeRPCV1 creates a new http jsonrpc client. +func NewFullNodeRPCV1(ctx context.Context, addr string, requestHeader http.Header) (api.FullNode, jsonrpc.ClientCloser, error) { + var res v1api.FullNodeStruct closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", - []interface{}{ - &res.CommonStruct.Internal, - &res.Internal, - }, - requestHeader, - opts..., - ) + api.GetInternalStructs(&res), requestHeader) return &res, closer, err } -func NewWorkerRPC(ctx context.Context, addr string, requestHeader http.Header) (api.WorkerAPI, jsonrpc.ClientCloser, error) { - u, err := url.Parse(addr) +func getPushUrl(addr string) (string, error) { + pushUrl, err := url.Parse(addr) if err != nil { - return nil, nil, err + return "", err } - switch u.Scheme { + switch pushUrl.Scheme { case "ws": - u.Scheme = "http" + pushUrl.Scheme = "http" case "wss": - u.Scheme = "https" + pushUrl.Scheme = "https" } ///rpc/v0 -> /rpc/streams/v0/push - u.Path = path.Join(u.Path, "../streams/v0/push") + pushUrl.Path = path.Join(pushUrl.Path, "../streams/v0/push") + return pushUrl.String(), nil +} - var res apistruct.WorkerStruct +// NewStorageMinerRPCV0 creates a new http jsonrpc client for miner +func NewStorageMinerRPCV0(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (v0api.StorageMiner, jsonrpc.ClientCloser, error) { + pushUrl, err := getPushUrl(addr) + if err != nil { + return nil, nil, err + } + + var res v0api.StorageMinerStruct closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", - []interface{}{ - &res.Internal, - }, + api.GetInternalStructs(&res), requestHeader, + append([]jsonrpc.Option{ + rpcenc.ReaderParamEncoder(pushUrl), + }, opts...)...) + + return &res, closer, err +} + +func NewWorkerRPCV0(ctx context.Context, addr string, requestHeader http.Header) (v0api.Worker, jsonrpc.ClientCloser, error) { + pushUrl, err := getPushUrl(addr) + if err != nil { + return nil, nil, err + } + + var res api.WorkerStruct + closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", + api.GetInternalStructs(&res), requestHeader, - rpcenc.ReaderParamEncoder(u.String()), + rpcenc.ReaderParamEncoder(pushUrl), jsonrpc.WithNoReconnect(), jsonrpc.WithTimeout(30*time.Second), ) @@ -83,13 +95,23 @@ func NewWorkerRPC(ctx context.Context, addr string, requestHeader http.Header) ( return &res, closer, err } -// NewGatewayRPC creates a new http jsonrpc client for a gateway node. -func NewGatewayRPC(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (api.GatewayAPI, jsonrpc.ClientCloser, error) { - var res apistruct.GatewayStruct +// NewGatewayRPCV1 creates a new http jsonrpc client for a gateway node. +func NewGatewayRPCV1(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (api.Gateway, jsonrpc.ClientCloser, error) { + var res api.GatewayStruct + closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", + api.GetInternalStructs(&res), + requestHeader, + opts..., + ) + + return &res, closer, err +} + +// NewGatewayRPCV0 creates a new http jsonrpc client for a gateway node. +func NewGatewayRPCV0(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (v0api.Gateway, jsonrpc.ClientCloser, error) { + var res v0api.GatewayStruct closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", - []interface{}{ - &res.Internal, - }, + api.GetInternalStructs(&res), requestHeader, opts..., ) @@ -97,12 +119,10 @@ func NewGatewayRPC(ctx context.Context, addr string, requestHeader http.Header, return &res, closer, err } -func NewWalletRPC(ctx context.Context, addr string, requestHeader http.Header) (api.WalletAPI, jsonrpc.ClientCloser, error) { - var res apistruct.WalletStruct +func NewWalletRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.Wallet, jsonrpc.ClientCloser, error) { + var res api.WalletStruct closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", - []interface{}{ - &res.Internal, - }, + api.GetInternalStructs(&res), requestHeader, ) diff --git a/api/docgen-openrpc/cmd/docgen_openrpc.go b/api/docgen-openrpc/cmd/docgen_openrpc.go new file mode 100644 index 00000000000..cc5e9f0cda5 --- /dev/null +++ b/api/docgen-openrpc/cmd/docgen_openrpc.go @@ -0,0 +1,74 @@ +package main + +import ( + "compress/gzip" + "encoding/json" + "io" + "log" + "os" + + "github.com/filecoin-project/lotus/api/docgen" + + docgen_openrpc "github.com/filecoin-project/lotus/api/docgen-openrpc" +) + +/* +main defines a small program that writes an OpenRPC document describing +a Lotus API to stdout. + +If the first argument is "miner", the document will describe the StorageMiner API. +If not (no, or any other args), the document will describe the Full API. + +Use: + + go run ./api/openrpc/cmd ["api/api_full.go"|"api/api_storage.go"|"api/api_worker.go"] ["FullNode"|"StorageMiner"|"Worker"] + + With gzip compression: a '-gzip' flag is made available as an optional third argument. Note that position matters. + + go run ./api/openrpc/cmd ["api/api_full.go"|"api/api_storage.go"|"api/api_worker.go"] ["FullNode"|"StorageMiner"|"Worker"] -gzip + +*/ + +func main() { + Comments, GroupDocs := docgen.ParseApiASTInfo(os.Args[1], os.Args[2], os.Args[3], os.Args[4]) + + doc := docgen_openrpc.NewLotusOpenRPCDocument(Comments, GroupDocs) + + i, _, _ := docgen.GetAPIType(os.Args[2], os.Args[3]) + doc.RegisterReceiverName("Filecoin", i) + + out, err := doc.Discover() + if err != nil { + log.Fatalln(err) + } + + var jsonOut []byte + var writer io.WriteCloser + + // Use os.Args to handle a somewhat hacky flag for the gzip option. + // Could use flags package to handle this more cleanly, but that requires changes elsewhere + // the scope of which just isn't warranted by this one use case which will usually be run + // programmatically anyways. + if len(os.Args) > 5 && os.Args[5] == "-gzip" { + jsonOut, err = json.Marshal(out) + if err != nil { + log.Fatalln(err) + } + writer = gzip.NewWriter(os.Stdout) + } else { + jsonOut, err = json.MarshalIndent(out, "", " ") + if err != nil { + log.Fatalln(err) + } + writer = os.Stdout + } + + _, err = writer.Write(jsonOut) + if err != nil { + log.Fatalln(err) + } + err = writer.Close() + if err != nil { + log.Fatalln(err) + } +} diff --git a/api/docgen-openrpc/openrpc.go b/api/docgen-openrpc/openrpc.go new file mode 100644 index 00000000000..271b43ac607 --- /dev/null +++ b/api/docgen-openrpc/openrpc.go @@ -0,0 +1,161 @@ +package docgenopenrpc + +import ( + "encoding/json" + "go/ast" + "net" + "reflect" + + "github.com/alecthomas/jsonschema" + go_openrpc_reflect "github.com/etclabscore/go-openrpc-reflect" + "github.com/filecoin-project/lotus/api/docgen" + "github.com/filecoin-project/lotus/build" + "github.com/ipfs/go-cid" + meta_schema "github.com/open-rpc/meta-schema" +) + +// schemaDictEntry represents a type association passed to the jsonschema reflector. +type schemaDictEntry struct { + example interface{} + rawJson string +} + +const integerD = `{ + "title": "number", + "type": "number", + "description": "Number is a number" + }` + +const cidCidD = `{"title": "Content Identifier", "type": "string", "description": "Cid represents a self-describing content addressed identifier. It is formed by a Version, a Codec (which indicates a multicodec-packed content type) and a Multihash."}` + +func OpenRPCSchemaTypeMapper(ty reflect.Type) *jsonschema.Type { + unmarshalJSONToJSONSchemaType := func(input string) *jsonschema.Type { + var js jsonschema.Type + err := json.Unmarshal([]byte(input), &js) + if err != nil { + panic(err) + } + return &js + } + + if ty.Kind() == reflect.Ptr { + ty = ty.Elem() + } + + if ty == reflect.TypeOf((*interface{})(nil)).Elem() { + return &jsonschema.Type{Type: "object", AdditionalProperties: []byte("true")} + } + + // Second, handle other types. + // Use a slice instead of a map because it preserves order, as a logic safeguard/fallback. + dict := []schemaDictEntry{ + {cid.Cid{}, cidCidD}, + } + + for _, d := range dict { + if reflect.TypeOf(d.example) == ty { + tt := unmarshalJSONToJSONSchemaType(d.rawJson) + + return tt + } + } + + // Handle primitive types in case there are generic cases + // specific to our services. + switch ty.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + // Return all integer types as the hex representation integer schemea. + ret := unmarshalJSONToJSONSchemaType(integerD) + return ret + case reflect.Uintptr: + return &jsonschema.Type{Type: "number", Title: "uintptr-title"} + case reflect.Struct: + case reflect.Map: + case reflect.Slice, reflect.Array: + case reflect.Float32, reflect.Float64: + case reflect.Bool: + case reflect.String: + case reflect.Ptr, reflect.Interface: + default: + } + + return nil +} + +// NewLotusOpenRPCDocument defines application-specific documentation and configuration for its OpenRPC document. +func NewLotusOpenRPCDocument(Comments, GroupDocs map[string]string) *go_openrpc_reflect.Document { + d := &go_openrpc_reflect.Document{} + + // Register "Meta" document fields. + // These include getters for + // - Servers object + // - Info object + // - ExternalDocs object + // + // These objects represent server-specific data that cannot be + // reflected. + d.WithMeta(&go_openrpc_reflect.MetaT{ + GetServersFn: func() func(listeners []net.Listener) (*meta_schema.Servers, error) { + return func(listeners []net.Listener) (*meta_schema.Servers, error) { + return nil, nil + } + }, + GetInfoFn: func() (info *meta_schema.InfoObject) { + info = &meta_schema.InfoObject{} + title := "Lotus RPC API" + info.Title = (*meta_schema.InfoObjectProperties)(&title) + + version := build.BuildVersion + info.Version = (*meta_schema.InfoObjectVersion)(&version) + return info + }, + GetExternalDocsFn: func() (exdocs *meta_schema.ExternalDocumentationObject) { + return nil // FIXME + }, + }) + + // Use a provided Ethereum default configuration as a base. + appReflector := &go_openrpc_reflect.EthereumReflectorT{} + + // Install overrides for the json schema->type map fn used by the jsonschema reflect package. + appReflector.FnSchemaTypeMap = func() func(ty reflect.Type) *jsonschema.Type { + return OpenRPCSchemaTypeMapper + } + + appReflector.FnIsMethodEligible = func(m reflect.Method) bool { + for i := 0; i < m.Func.Type().NumOut(); i++ { + if m.Func.Type().Out(i).Kind() == reflect.Chan { + return false + } + } + return go_openrpc_reflect.EthereumReflector.IsMethodEligible(m) + } + appReflector.FnGetMethodName = func(moduleName string, r reflect.Value, m reflect.Method, funcDecl *ast.FuncDecl) (string, error) { + if m.Name == "ID" { + return moduleName + "_ID", nil + } + if moduleName == "rpc" && m.Name == "Discover" { + return "rpc.discover", nil + } + + return moduleName + "." + m.Name, nil + } + + appReflector.FnGetMethodSummary = func(r reflect.Value, m reflect.Method, funcDecl *ast.FuncDecl) (string, error) { + if v, ok := Comments[m.Name]; ok { + return v, nil + } + return "", nil // noComment + } + + appReflector.FnSchemaExamples = func(ty reflect.Type) (examples *meta_schema.Examples, err error) { + v := docgen.ExampleValue("unknown", ty, ty) // This isn't ideal, but seems to work well enough. + return &meta_schema.Examples{ + meta_schema.AlwaysTrue(v), + }, nil + } + + // Finally, register the configured reflector to the document. + d.WithReflector(appReflector) + return d +} diff --git a/api/docgen/cmd/docgen.go b/api/docgen/cmd/docgen.go new file mode 100644 index 00000000000..9ae2df2e707 --- /dev/null +++ b/api/docgen/cmd/docgen.go @@ -0,0 +1,121 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "reflect" + "sort" + "strings" + + "github.com/filecoin-project/lotus/api/docgen" +) + +func main() { + comments, groupComments := docgen.ParseApiASTInfo(os.Args[1], os.Args[2], os.Args[3], os.Args[4]) + + groups := make(map[string]*docgen.MethodGroup) + + _, t, permStruct := docgen.GetAPIType(os.Args[2], os.Args[3]) + + for i := 0; i < t.NumMethod(); i++ { + m := t.Method(i) + + groupName := docgen.MethodGroupFromName(m.Name) + + g, ok := groups[groupName] + if !ok { + g = new(docgen.MethodGroup) + g.Header = groupComments[groupName] + g.GroupName = groupName + groups[groupName] = g + } + + var args []interface{} + ft := m.Func.Type() + for j := 2; j < ft.NumIn(); j++ { + inp := ft.In(j) + args = append(args, docgen.ExampleValue(m.Name, inp, nil)) + } + + v, err := json.MarshalIndent(args, "", " ") + if err != nil { + panic(err) + } + + outv := docgen.ExampleValue(m.Name, ft.Out(0), nil) + + ov, err := json.MarshalIndent(outv, "", " ") + if err != nil { + panic(err) + } + + g.Methods = append(g.Methods, &docgen.Method{ + Name: m.Name, + Comment: comments[m.Name], + InputExample: string(v), + ResponseExample: string(ov), + }) + } + + var groupslice []*docgen.MethodGroup + for _, g := range groups { + groupslice = append(groupslice, g) + } + + sort.Slice(groupslice, func(i, j int) bool { + return groupslice[i].GroupName < groupslice[j].GroupName + }) + + fmt.Printf("# Groups\n") + + for _, g := range groupslice { + fmt.Printf("* [%s](#%s)\n", g.GroupName, g.GroupName) + for _, method := range g.Methods { + fmt.Printf(" * [%s](#%s)\n", method.Name, method.Name) + } + } + + for _, g := range groupslice { + g := g + fmt.Printf("## %s\n", g.GroupName) + fmt.Printf("%s\n\n", g.Header) + + sort.Slice(g.Methods, func(i, j int) bool { + return g.Methods[i].Name < g.Methods[j].Name + }) + + for _, m := range g.Methods { + fmt.Printf("### %s\n", m.Name) + fmt.Printf("%s\n\n", m.Comment) + + var meth reflect.StructField + var ok bool + for _, ps := range permStruct { + meth, ok = ps.FieldByName(m.Name) + if ok { + break + } + } + if !ok { + panic("no perms for method: " + m.Name) + } + + perms := meth.Tag.Get("perm") + + fmt.Printf("Perms: %s\n\n", perms) + + if strings.Count(m.InputExample, "\n") > 0 { + fmt.Printf("Inputs:\n```json\n%s\n```\n\n", m.InputExample) + } else { + fmt.Printf("Inputs: `%s`\n\n", m.InputExample) + } + + if strings.Count(m.ResponseExample, "\n") > 0 { + fmt.Printf("Response:\n```json\n%s\n```\n\n", m.ResponseExample) + } else { + fmt.Printf("Response: `%s`\n\n", m.ResponseExample) + } + } + } +} diff --git a/api/docgen/docgen.go b/api/docgen/docgen.go index dc60041211f..39980023f0a 100644 --- a/api/docgen/docgen.go +++ b/api/docgen/docgen.go @@ -1,17 +1,19 @@ -package main +package docgen import ( - "encoding/json" "fmt" "go/ast" "go/parser" "go/token" + "path/filepath" "reflect" - "sort" "strings" "time" "unicode" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + "github.com/google/uuid" "github.com/ipfs/go-cid" "github.com/ipfs/go-filestore" metrics "github.com/libp2p/go-libp2p-core/metrics" @@ -21,9 +23,8 @@ import ( pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/multiformats/go-multiaddr" - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-bitfield" datatransfer "github.com/filecoin-project/go-data-transfer" + filestore2 "github.com/filecoin-project/go-fil-markets/filestore" "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/go-multistore" @@ -33,9 +34,14 @@ import ( "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/apistruct" + apitypes "github.com/filecoin-project/lotus/api/types" + "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" + "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" "github.com/filecoin-project/lotus/node/modules/dtypes" ) @@ -82,8 +88,10 @@ func init() { addExample(pid) addExample(&pid) + multistoreIDExample := multistore.StoreID(50) + addExample(bitfield.NewFromSet([]uint64{5})) - addExample(abi.RegisteredSealProof_StackedDrg32GiBV1) + addExample(abi.RegisteredSealProof_StackedDrg32GiBV1_1) addExample(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1) addExample(abi.ChainEpoch(10101)) addExample(crypto.SigTypeBLS) @@ -106,28 +114,31 @@ func init() { addExample(network.Connected) addExample(dtypes.NetworkName("lotus")) addExample(api.SyncStateStage(1)) - addExample(build.FullAPIVersion) + addExample(api.FullAPIVersion1) addExample(api.PCHInbound) addExample(time.Minute) addExample(datatransfer.TransferID(3)) addExample(datatransfer.Ongoing) - addExample(multistore.StoreID(50)) + addExample(multistoreIDExample) + addExample(&multistoreIDExample) addExample(retrievalmarket.ClientEventDealAccepted) addExample(retrievalmarket.DealStatusNew) addExample(network.ReachabilityPublic) addExample(build.NewestNetworkVersion) + addExample(map[string]int{"name": 42}) + addExample(map[string]time.Time{"name": time.Unix(1615243938, 0).UTC()}) addExample(&types.ExecutionTrace{ - Msg: exampleValue(reflect.TypeOf(&types.Message{}), nil).(*types.Message), - MsgRct: exampleValue(reflect.TypeOf(&types.MessageReceipt{}), nil).(*types.MessageReceipt), + Msg: ExampleValue("init", reflect.TypeOf(&types.Message{}), nil).(*types.Message), + MsgRct: ExampleValue("init", reflect.TypeOf(&types.MessageReceipt{}), nil).(*types.MessageReceipt), }) addExample(map[string]types.Actor{ - "t01236": exampleValue(reflect.TypeOf(types.Actor{}), nil).(types.Actor), + "t01236": ExampleValue("init", reflect.TypeOf(types.Actor{}), nil).(types.Actor), }) addExample(map[string]api.MarketDeal{ - "t026363": exampleValue(reflect.TypeOf(api.MarketDeal{}), nil).(api.MarketDeal), + "t026363": ExampleValue("init", reflect.TypeOf(api.MarketDeal{}), nil).(api.MarketDeal), }) addExample(map[string]api.MarketBalance{ - "t026363": exampleValue(reflect.TypeOf(api.MarketBalance{}), nil).(api.MarketBalance), + "t026363": ExampleValue("init", reflect.TypeOf(api.MarketBalance{}), nil).(api.MarketBalance), }) addExample(map[string]*pubsub.TopicScoreSnapshot{ "/blocks": { @@ -162,9 +173,139 @@ func init() { // because reflect.TypeOf(maddr) returns the concrete type... ExampleValues[reflect.TypeOf(struct{ A multiaddr.Multiaddr }{}).Field(0).Type] = maddr + // miner specific + addExample(filestore2.Path(".lotusminer/fstmp123")) + si := multistore.StoreID(12) + addExample(&si) + addExample(retrievalmarket.DealID(5)) + addExample(abi.ActorID(1000)) + addExample(map[string][]api.SealedRef{ + "98000": { + api.SealedRef{ + SectorID: 100, + Offset: 10 << 20, + Size: 1 << 20, + }, + }, + }) + addExample(api.SectorState(sealing.Proving)) + addExample(stores.ID("76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8")) + addExample(storiface.FTUnsealed) + addExample(storiface.PathSealing) + addExample(map[stores.ID][]stores.Decl{ + "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": { + { + SectorID: abi.SectorID{Miner: 1000, Number: 100}, + SectorFileType: storiface.FTSealed, + }, + }, + }) + addExample(map[stores.ID]string{ + "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": "/data/path", + }) + addExample(map[uuid.UUID][]storiface.WorkerJob{ + uuid.MustParse("ef8d99a2-6865-4189-8ffa-9fef0f806eee"): { + { + ID: storiface.CallID{ + Sector: abi.SectorID{Miner: 1000, Number: 100}, + ID: uuid.MustParse("76081ba0-61bd-45a5-bc08-af05f1c26e5d"), + }, + Sector: abi.SectorID{Miner: 1000, Number: 100}, + Task: sealtasks.TTPreCommit2, + RunWait: 0, + Start: time.Unix(1605172927, 0).UTC(), + Hostname: "host", + }, + }, + }) + addExample(map[uuid.UUID]storiface.WorkerStats{ + uuid.MustParse("ef8d99a2-6865-4189-8ffa-9fef0f806eee"): { + Info: storiface.WorkerInfo{ + Hostname: "host", + Resources: storiface.WorkerResources{ + MemPhysical: 256 << 30, + MemSwap: 120 << 30, + MemReserved: 2 << 30, + CPUs: 64, + GPUs: []string{"aGPU 1337"}, + }, + }, + Enabled: true, + MemUsedMin: 0, + MemUsedMax: 0, + GpuUsed: false, + CpuUse: 0, + }, + }) + addExample(storiface.ErrorCode(0)) + addExample(map[abi.SectorNumber]string{ + 123: "can't acquire read lock", + }) + addExample(map[api.SectorState]int{ + api.SectorState(sealing.Proving): 120, + }) + addExample([]abi.SectorNumber{123, 124}) + + // worker specific + addExample(storiface.AcquireMove) + addExample(storiface.UnpaddedByteIndex(abi.PaddedPieceSize(1 << 20).Unpadded())) + addExample(map[sealtasks.TaskType]struct{}{ + sealtasks.TTPreCommit2: {}, + }) + addExample(sealtasks.TTCommit2) + addExample(apitypes.OpenRPCDocument{ + "openrpc": "1.2.6", + "info": map[string]interface{}{ + "title": "Lotus RPC API", + "version": "1.2.1/generated=2020-11-22T08:22:42-06:00", + }, + "methods": []interface{}{}}, + ) + + addExample(api.CheckStatusCode(0)) + addExample(map[string]interface{}{"abc": 123}) } -func exampleValue(t, parent reflect.Type) interface{} { +func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []reflect.Type) { + + switch pkg { + case "api": // latest + switch name { + case "FullNode": + i = &api.FullNodeStruct{} + t = reflect.TypeOf(new(struct{ api.FullNode })).Elem() + permStruct = append(permStruct, reflect.TypeOf(api.FullNodeStruct{}.Internal)) + permStruct = append(permStruct, reflect.TypeOf(api.CommonStruct{}.Internal)) + permStruct = append(permStruct, reflect.TypeOf(api.NetStruct{}.Internal)) + case "StorageMiner": + i = &api.StorageMinerStruct{} + t = reflect.TypeOf(new(struct{ api.StorageMiner })).Elem() + permStruct = append(permStruct, reflect.TypeOf(api.StorageMinerStruct{}.Internal)) + permStruct = append(permStruct, reflect.TypeOf(api.CommonStruct{}.Internal)) + permStruct = append(permStruct, reflect.TypeOf(api.NetStruct{}.Internal)) + case "Worker": + i = &api.WorkerStruct{} + t = reflect.TypeOf(new(struct{ api.Worker })).Elem() + permStruct = append(permStruct, reflect.TypeOf(api.WorkerStruct{}.Internal)) + default: + panic("unknown type") + } + case "v0api": + switch name { + case "FullNode": + i = v0api.FullNodeStruct{} + t = reflect.TypeOf(new(struct{ v0api.FullNode })).Elem() + permStruct = append(permStruct, reflect.TypeOf(v0api.FullNodeStruct{}.Internal)) + permStruct = append(permStruct, reflect.TypeOf(v0api.CommonStruct{}.Internal)) + permStruct = append(permStruct, reflect.TypeOf(v0api.NetStruct{}.Internal)) + default: + panic("unknown type") + } + } + return +} + +func ExampleValue(method string, t, parent reflect.Type) interface{} { v, ok := ExampleValues[t] if ok { return v @@ -173,25 +314,25 @@ func exampleValue(t, parent reflect.Type) interface{} { switch t.Kind() { case reflect.Slice: out := reflect.New(t).Elem() - reflect.Append(out, reflect.ValueOf(exampleValue(t.Elem(), t))) + reflect.Append(out, reflect.ValueOf(ExampleValue(method, t.Elem(), t))) return out.Interface() case reflect.Chan: - return exampleValue(t.Elem(), nil) + return ExampleValue(method, t.Elem(), nil) case reflect.Struct: - es := exampleStruct(t, parent) + es := exampleStruct(method, t, parent) v := reflect.ValueOf(es).Elem().Interface() ExampleValues[t] = v return v case reflect.Array: out := reflect.New(t).Elem() for i := 0; i < t.Len(); i++ { - out.Index(i).Set(reflect.ValueOf(exampleValue(t.Elem(), t))) + out.Index(i).Set(reflect.ValueOf(ExampleValue(method, t.Elem(), t))) } return out.Interface() case reflect.Ptr: if t.Elem().Kind() == reflect.Struct { - es := exampleStruct(t.Elem(), t) + es := exampleStruct(method, t.Elem(), t) //ExampleValues[t] = es return es } @@ -199,10 +340,10 @@ func exampleValue(t, parent reflect.Type) interface{} { return struct{}{} } - panic(fmt.Sprintf("No example value for type: %s", t)) + panic(fmt.Sprintf("No example value for type: %s (method '%s')", t, method)) } -func exampleStruct(t, parent reflect.Type) interface{} { +func exampleStruct(method string, t, parent reflect.Type) interface{} { ns := reflect.New(t) for i := 0; i < t.NumField(); i++ { f := t.Field(i) @@ -210,7 +351,7 @@ func exampleStruct(t, parent reflect.Type) interface{} { continue } if strings.Title(f.Name) == f.Name { - ns.Elem().Field(i).Set(reflect.ValueOf(exampleValue(f.Type, t))) + ns.Elem().Field(i).Set(reflect.ValueOf(ExampleValue(method, f.Type, t))) } } @@ -218,6 +359,7 @@ func exampleStruct(t, parent reflect.Type) interface{} { } type Visitor struct { + Root string Methods map[string]ast.Node } @@ -227,7 +369,7 @@ func (v *Visitor) Visit(node ast.Node) ast.Visitor { return v } - if st.Name.Name != "FullNode" { + if st.Name.Name != v.Root { return nil } @@ -241,32 +383,43 @@ func (v *Visitor) Visit(node ast.Node) ast.Visitor { return v } -const noComment = "There are not yet any comments for this method." +const NoComment = "There are not yet any comments for this method." -func parseApiASTInfo() (map[string]string, map[string]string) { //nolint:golint +func ParseApiASTInfo(apiFile, iface, pkg, dir string) (comments map[string]string, groupDocs map[string]string) { //nolint:golint fset := token.NewFileSet() - pkgs, err := parser.ParseDir(fset, "./api", nil, parser.AllErrors|parser.ParseComments) + apiDir, err := filepath.Abs(dir) + if err != nil { + fmt.Println("./api filepath absolute error: ", err) + return + } + apiFile, err = filepath.Abs(apiFile) + if err != nil { + fmt.Println("filepath absolute error: ", err, "file:", apiFile) + return + } + pkgs, err := parser.ParseDir(fset, apiDir, nil, parser.AllErrors|parser.ParseComments) if err != nil { fmt.Println("parse error: ", err) + return } - ap := pkgs["api"] + ap := pkgs[pkg] - f := ap.Files["api/api_full.go"] + f := ap.Files[apiFile] cmap := ast.NewCommentMap(fset, f, f.Comments) - v := &Visitor{make(map[string]ast.Node)} - ast.Walk(v, pkgs["api"]) + v := &Visitor{iface, make(map[string]ast.Node)} + ast.Walk(v, ap) - groupDocs := make(map[string]string) - out := make(map[string]string) + comments = make(map[string]string) + groupDocs = make(map[string]string) for mn, node := range v.Methods { - cs := cmap.Filter(node).Comments() - if len(cs) == 0 { - out[mn] = noComment + filteredComments := cmap.Filter(node).Comments() + if len(filteredComments) == 0 { + comments[mn] = NoComment } else { - for _, c := range cs { + for _, c := range filteredComments { if strings.HasPrefix(c.Text(), "MethodGroup:") { parts := strings.Split(c.Text(), "\n") groupName := strings.TrimSpace(parts[0][12:]) @@ -277,15 +430,19 @@ func parseApiASTInfo() (map[string]string, map[string]string) { //nolint:golint } } - last := cs[len(cs)-1].Text() + l := len(filteredComments) - 1 + if len(filteredComments) > 1 { + l = len(filteredComments) - 2 + } + last := filteredComments[l].Text() if !strings.HasPrefix(last, "MethodGroup:") { - out[mn] = last + comments[mn] = last } else { - out[mn] = noComment + comments[mn] = NoComment } } } - return out, groupDocs + return comments, groupDocs } type MethodGroup struct { @@ -301,7 +458,7 @@ type Method struct { ResponseExample string } -func methodGroupFromName(mn string) string { +func MethodGroupFromName(mn string) string { i := strings.IndexFunc(mn[1:], func(r rune) bool { return unicode.IsUpper(r) }) @@ -310,112 +467,3 @@ func methodGroupFromName(mn string) string { } return mn[:i+1] } - -func main() { - - comments, groupComments := parseApiASTInfo() - - groups := make(map[string]*MethodGroup) - - var api struct{ api.FullNode } - t := reflect.TypeOf(api) - for i := 0; i < t.NumMethod(); i++ { - m := t.Method(i) - - groupName := methodGroupFromName(m.Name) - - g, ok := groups[groupName] - if !ok { - g = new(MethodGroup) - g.Header = groupComments[groupName] - g.GroupName = groupName - groups[groupName] = g - } - - var args []interface{} - ft := m.Func.Type() - for j := 2; j < ft.NumIn(); j++ { - inp := ft.In(j) - args = append(args, exampleValue(inp, nil)) - } - - v, err := json.MarshalIndent(args, "", " ") - if err != nil { - panic(err) - } - - outv := exampleValue(ft.Out(0), nil) - - ov, err := json.MarshalIndent(outv, "", " ") - if err != nil { - panic(err) - } - - g.Methods = append(g.Methods, &Method{ - Name: m.Name, - Comment: comments[m.Name], - InputExample: string(v), - ResponseExample: string(ov), - }) - } - - var groupslice []*MethodGroup - for _, g := range groups { - groupslice = append(groupslice, g) - } - - sort.Slice(groupslice, func(i, j int) bool { - return groupslice[i].GroupName < groupslice[j].GroupName - }) - - fmt.Printf("# Groups\n") - - for _, g := range groupslice { - fmt.Printf("* [%s](#%s)\n", g.GroupName, g.GroupName) - for _, method := range g.Methods { - fmt.Printf(" * [%s](#%s)\n", method.Name, method.Name) - } - } - - permStruct := reflect.TypeOf(apistruct.FullNodeStruct{}.Internal) - commonPermStruct := reflect.TypeOf(apistruct.CommonStruct{}.Internal) - - for _, g := range groupslice { - g := g - fmt.Printf("## %s\n", g.GroupName) - fmt.Printf("%s\n\n", g.Header) - - sort.Slice(g.Methods, func(i, j int) bool { - return g.Methods[i].Name < g.Methods[j].Name - }) - - for _, m := range g.Methods { - fmt.Printf("### %s\n", m.Name) - fmt.Printf("%s\n\n", m.Comment) - - meth, ok := permStruct.FieldByName(m.Name) - if !ok { - meth, ok = commonPermStruct.FieldByName(m.Name) - if !ok { - panic("no perms for method: " + m.Name) - } - } - - perms := meth.Tag.Get("perm") - - fmt.Printf("Perms: %s\n\n", perms) - - if strings.Count(m.InputExample, "\n") > 0 { - fmt.Printf("Inputs:\n```json\n%s\n```\n\n", m.InputExample) - } else { - fmt.Printf("Inputs: `%s`\n\n", m.InputExample) - } - - if strings.Count(m.ResponseExample, "\n") > 0 { - fmt.Printf("Response:\n```json\n%s\n```\n\n", m.ResponseExample) - } else { - fmt.Printf("Response: `%s`\n\n", m.ResponseExample) - } - } - } -} diff --git a/api/mocks/mock_full.go b/api/mocks/mock_full.go new file mode 100644 index 00000000000..69f315be948 --- /dev/null +++ b/api/mocks/mock_full.go @@ -0,0 +1,3094 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/filecoin-project/lotus/api (interfaces: FullNode) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + address "github.com/filecoin-project/go-address" + bitfield "github.com/filecoin-project/go-bitfield" + datatransfer "github.com/filecoin-project/go-data-transfer" + retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket" + storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket" + auth "github.com/filecoin-project/go-jsonrpc/auth" + multistore "github.com/filecoin-project/go-multistore" + abi "github.com/filecoin-project/go-state-types/abi" + big "github.com/filecoin-project/go-state-types/big" + crypto "github.com/filecoin-project/go-state-types/crypto" + dline "github.com/filecoin-project/go-state-types/dline" + network "github.com/filecoin-project/go-state-types/network" + api "github.com/filecoin-project/lotus/api" + apitypes "github.com/filecoin-project/lotus/api/types" + miner "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + types "github.com/filecoin-project/lotus/chain/types" + marketevents "github.com/filecoin-project/lotus/markets/loggers" + dtypes "github.com/filecoin-project/lotus/node/modules/dtypes" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + paych "github.com/filecoin-project/specs-actors/actors/builtin/paych" + gomock "github.com/golang/mock/gomock" + uuid "github.com/google/uuid" + cid "github.com/ipfs/go-cid" + metrics "github.com/libp2p/go-libp2p-core/metrics" + network0 "github.com/libp2p/go-libp2p-core/network" + peer "github.com/libp2p/go-libp2p-core/peer" + protocol "github.com/libp2p/go-libp2p-core/protocol" +) + +// MockFullNode is a mock of FullNode interface. +type MockFullNode struct { + ctrl *gomock.Controller + recorder *MockFullNodeMockRecorder +} + +// MockFullNodeMockRecorder is the mock recorder for MockFullNode. +type MockFullNodeMockRecorder struct { + mock *MockFullNode +} + +// NewMockFullNode creates a new mock instance. +func NewMockFullNode(ctrl *gomock.Controller) *MockFullNode { + mock := &MockFullNode{ctrl: ctrl} + mock.recorder = &MockFullNodeMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockFullNode) EXPECT() *MockFullNodeMockRecorder { + return m.recorder +} + +// AuthNew mocks base method. +func (m *MockFullNode) AuthNew(arg0 context.Context, arg1 []auth.Permission) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AuthNew", arg0, arg1) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AuthNew indicates an expected call of AuthNew. +func (mr *MockFullNodeMockRecorder) AuthNew(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AuthNew", reflect.TypeOf((*MockFullNode)(nil).AuthNew), arg0, arg1) +} + +// AuthVerify mocks base method. +func (m *MockFullNode) AuthVerify(arg0 context.Context, arg1 string) ([]auth.Permission, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AuthVerify", arg0, arg1) + ret0, _ := ret[0].([]auth.Permission) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AuthVerify indicates an expected call of AuthVerify. +func (mr *MockFullNodeMockRecorder) AuthVerify(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AuthVerify", reflect.TypeOf((*MockFullNode)(nil).AuthVerify), arg0, arg1) +} + +// BeaconGetEntry mocks base method. +func (m *MockFullNode) BeaconGetEntry(arg0 context.Context, arg1 abi.ChainEpoch) (*types.BeaconEntry, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BeaconGetEntry", arg0, arg1) + ret0, _ := ret[0].(*types.BeaconEntry) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BeaconGetEntry indicates an expected call of BeaconGetEntry. +func (mr *MockFullNodeMockRecorder) BeaconGetEntry(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeaconGetEntry", reflect.TypeOf((*MockFullNode)(nil).BeaconGetEntry), arg0, arg1) +} + +// ChainDeleteObj mocks base method. +func (m *MockFullNode) ChainDeleteObj(arg0 context.Context, arg1 cid.Cid) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainDeleteObj", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ChainDeleteObj indicates an expected call of ChainDeleteObj. +func (mr *MockFullNodeMockRecorder) ChainDeleteObj(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainDeleteObj", reflect.TypeOf((*MockFullNode)(nil).ChainDeleteObj), arg0, arg1) +} + +// ChainExport mocks base method. +func (m *MockFullNode) ChainExport(arg0 context.Context, arg1 abi.ChainEpoch, arg2 bool, arg3 types.TipSetKey) (<-chan []byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainExport", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(<-chan []byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainExport indicates an expected call of ChainExport. +func (mr *MockFullNodeMockRecorder) ChainExport(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainExport", reflect.TypeOf((*MockFullNode)(nil).ChainExport), arg0, arg1, arg2, arg3) +} + +// ChainGetBlock mocks base method. +func (m *MockFullNode) ChainGetBlock(arg0 context.Context, arg1 cid.Cid) (*types.BlockHeader, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetBlock", arg0, arg1) + ret0, _ := ret[0].(*types.BlockHeader) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetBlock indicates an expected call of ChainGetBlock. +func (mr *MockFullNodeMockRecorder) ChainGetBlock(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetBlock", reflect.TypeOf((*MockFullNode)(nil).ChainGetBlock), arg0, arg1) +} + +// ChainGetBlockMessages mocks base method. +func (m *MockFullNode) ChainGetBlockMessages(arg0 context.Context, arg1 cid.Cid) (*api.BlockMessages, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetBlockMessages", arg0, arg1) + ret0, _ := ret[0].(*api.BlockMessages) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetBlockMessages indicates an expected call of ChainGetBlockMessages. +func (mr *MockFullNodeMockRecorder) ChainGetBlockMessages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetBlockMessages", reflect.TypeOf((*MockFullNode)(nil).ChainGetBlockMessages), arg0, arg1) +} + +// ChainGetGenesis mocks base method. +func (m *MockFullNode) ChainGetGenesis(arg0 context.Context) (*types.TipSet, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetGenesis", arg0) + ret0, _ := ret[0].(*types.TipSet) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetGenesis indicates an expected call of ChainGetGenesis. +func (mr *MockFullNodeMockRecorder) ChainGetGenesis(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetGenesis", reflect.TypeOf((*MockFullNode)(nil).ChainGetGenesis), arg0) +} + +// ChainGetMessage mocks base method. +func (m *MockFullNode) ChainGetMessage(arg0 context.Context, arg1 cid.Cid) (*types.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetMessage", arg0, arg1) + ret0, _ := ret[0].(*types.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetMessage indicates an expected call of ChainGetMessage. +func (mr *MockFullNodeMockRecorder) ChainGetMessage(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetMessage", reflect.TypeOf((*MockFullNode)(nil).ChainGetMessage), arg0, arg1) +} + +// ChainGetMessagesInTipset mocks base method. +func (m *MockFullNode) ChainGetMessagesInTipset(arg0 context.Context, arg1 types.TipSetKey) ([]api.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetMessagesInTipset", arg0, arg1) + ret0, _ := ret[0].([]api.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetMessagesInTipset indicates an expected call of ChainGetMessagesInTipset. +func (mr *MockFullNodeMockRecorder) ChainGetMessagesInTipset(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetMessagesInTipset", reflect.TypeOf((*MockFullNode)(nil).ChainGetMessagesInTipset), arg0, arg1) +} + +// ChainGetNode mocks base method. +func (m *MockFullNode) ChainGetNode(arg0 context.Context, arg1 string) (*api.IpldObject, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetNode", arg0, arg1) + ret0, _ := ret[0].(*api.IpldObject) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetNode indicates an expected call of ChainGetNode. +func (mr *MockFullNodeMockRecorder) ChainGetNode(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetNode", reflect.TypeOf((*MockFullNode)(nil).ChainGetNode), arg0, arg1) +} + +// ChainGetParentMessages mocks base method. +func (m *MockFullNode) ChainGetParentMessages(arg0 context.Context, arg1 cid.Cid) ([]api.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetParentMessages", arg0, arg1) + ret0, _ := ret[0].([]api.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetParentMessages indicates an expected call of ChainGetParentMessages. +func (mr *MockFullNodeMockRecorder) ChainGetParentMessages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetParentMessages", reflect.TypeOf((*MockFullNode)(nil).ChainGetParentMessages), arg0, arg1) +} + +// ChainGetParentReceipts mocks base method. +func (m *MockFullNode) ChainGetParentReceipts(arg0 context.Context, arg1 cid.Cid) ([]*types.MessageReceipt, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetParentReceipts", arg0, arg1) + ret0, _ := ret[0].([]*types.MessageReceipt) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetParentReceipts indicates an expected call of ChainGetParentReceipts. +func (mr *MockFullNodeMockRecorder) ChainGetParentReceipts(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetParentReceipts", reflect.TypeOf((*MockFullNode)(nil).ChainGetParentReceipts), arg0, arg1) +} + +// ChainGetPath mocks base method. +func (m *MockFullNode) ChainGetPath(arg0 context.Context, arg1, arg2 types.TipSetKey) ([]*api.HeadChange, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetPath", arg0, arg1, arg2) + ret0, _ := ret[0].([]*api.HeadChange) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetPath indicates an expected call of ChainGetPath. +func (mr *MockFullNodeMockRecorder) ChainGetPath(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetPath", reflect.TypeOf((*MockFullNode)(nil).ChainGetPath), arg0, arg1, arg2) +} + +// ChainGetRandomnessFromBeacon mocks base method. +func (m *MockFullNode) ChainGetRandomnessFromBeacon(arg0 context.Context, arg1 types.TipSetKey, arg2 crypto.DomainSeparationTag, arg3 abi.ChainEpoch, arg4 []byte) (abi.Randomness, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetRandomnessFromBeacon", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(abi.Randomness) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetRandomnessFromBeacon indicates an expected call of ChainGetRandomnessFromBeacon. +func (mr *MockFullNodeMockRecorder) ChainGetRandomnessFromBeacon(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetRandomnessFromBeacon", reflect.TypeOf((*MockFullNode)(nil).ChainGetRandomnessFromBeacon), arg0, arg1, arg2, arg3, arg4) +} + +// ChainGetRandomnessFromTickets mocks base method. +func (m *MockFullNode) ChainGetRandomnessFromTickets(arg0 context.Context, arg1 types.TipSetKey, arg2 crypto.DomainSeparationTag, arg3 abi.ChainEpoch, arg4 []byte) (abi.Randomness, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetRandomnessFromTickets", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(abi.Randomness) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetRandomnessFromTickets indicates an expected call of ChainGetRandomnessFromTickets. +func (mr *MockFullNodeMockRecorder) ChainGetRandomnessFromTickets(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetRandomnessFromTickets", reflect.TypeOf((*MockFullNode)(nil).ChainGetRandomnessFromTickets), arg0, arg1, arg2, arg3, arg4) +} + +// ChainGetTipSet mocks base method. +func (m *MockFullNode) ChainGetTipSet(arg0 context.Context, arg1 types.TipSetKey) (*types.TipSet, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetTipSet", arg0, arg1) + ret0, _ := ret[0].(*types.TipSet) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetTipSet indicates an expected call of ChainGetTipSet. +func (mr *MockFullNodeMockRecorder) ChainGetTipSet(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetTipSet", reflect.TypeOf((*MockFullNode)(nil).ChainGetTipSet), arg0, arg1) +} + +// ChainGetTipSetByHeight mocks base method. +func (m *MockFullNode) ChainGetTipSetByHeight(arg0 context.Context, arg1 abi.ChainEpoch, arg2 types.TipSetKey) (*types.TipSet, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetTipSetByHeight", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.TipSet) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetTipSetByHeight indicates an expected call of ChainGetTipSetByHeight. +func (mr *MockFullNodeMockRecorder) ChainGetTipSetByHeight(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetTipSetByHeight", reflect.TypeOf((*MockFullNode)(nil).ChainGetTipSetByHeight), arg0, arg1, arg2) +} + +// ChainHasObj mocks base method. +func (m *MockFullNode) ChainHasObj(arg0 context.Context, arg1 cid.Cid) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainHasObj", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainHasObj indicates an expected call of ChainHasObj. +func (mr *MockFullNodeMockRecorder) ChainHasObj(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHasObj", reflect.TypeOf((*MockFullNode)(nil).ChainHasObj), arg0, arg1) +} + +// ChainHead mocks base method. +func (m *MockFullNode) ChainHead(arg0 context.Context) (*types.TipSet, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainHead", arg0) + ret0, _ := ret[0].(*types.TipSet) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainHead indicates an expected call of ChainHead. +func (mr *MockFullNodeMockRecorder) ChainHead(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHead", reflect.TypeOf((*MockFullNode)(nil).ChainHead), arg0) +} + +// ChainNotify mocks base method. +func (m *MockFullNode) ChainNotify(arg0 context.Context) (<-chan []*api.HeadChange, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainNotify", arg0) + ret0, _ := ret[0].(<-chan []*api.HeadChange) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainNotify indicates an expected call of ChainNotify. +func (mr *MockFullNodeMockRecorder) ChainNotify(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainNotify", reflect.TypeOf((*MockFullNode)(nil).ChainNotify), arg0) +} + +// ChainReadObj mocks base method. +func (m *MockFullNode) ChainReadObj(arg0 context.Context, arg1 cid.Cid) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainReadObj", arg0, arg1) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainReadObj indicates an expected call of ChainReadObj. +func (mr *MockFullNodeMockRecorder) ChainReadObj(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainReadObj", reflect.TypeOf((*MockFullNode)(nil).ChainReadObj), arg0, arg1) +} + +// ChainSetHead mocks base method. +func (m *MockFullNode) ChainSetHead(arg0 context.Context, arg1 types.TipSetKey) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainSetHead", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ChainSetHead indicates an expected call of ChainSetHead. +func (mr *MockFullNodeMockRecorder) ChainSetHead(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainSetHead", reflect.TypeOf((*MockFullNode)(nil).ChainSetHead), arg0, arg1) +} + +// ChainStatObj mocks base method. +func (m *MockFullNode) ChainStatObj(arg0 context.Context, arg1, arg2 cid.Cid) (api.ObjStat, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainStatObj", arg0, arg1, arg2) + ret0, _ := ret[0].(api.ObjStat) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainStatObj indicates an expected call of ChainStatObj. +func (mr *MockFullNodeMockRecorder) ChainStatObj(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainStatObj", reflect.TypeOf((*MockFullNode)(nil).ChainStatObj), arg0, arg1, arg2) +} + +// ChainTipSetWeight mocks base method. +func (m *MockFullNode) ChainTipSetWeight(arg0 context.Context, arg1 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainTipSetWeight", arg0, arg1) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainTipSetWeight indicates an expected call of ChainTipSetWeight. +func (mr *MockFullNodeMockRecorder) ChainTipSetWeight(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainTipSetWeight", reflect.TypeOf((*MockFullNode)(nil).ChainTipSetWeight), arg0, arg1) +} + +// ClientCalcCommP mocks base method. +func (m *MockFullNode) ClientCalcCommP(arg0 context.Context, arg1 string) (*api.CommPRet, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientCalcCommP", arg0, arg1) + ret0, _ := ret[0].(*api.CommPRet) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientCalcCommP indicates an expected call of ClientCalcCommP. +func (mr *MockFullNodeMockRecorder) ClientCalcCommP(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCalcCommP", reflect.TypeOf((*MockFullNode)(nil).ClientCalcCommP), arg0, arg1) +} + +// ClientCancelDataTransfer mocks base method. +func (m *MockFullNode) ClientCancelDataTransfer(arg0 context.Context, arg1 datatransfer.TransferID, arg2 peer.ID, arg3 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientCancelDataTransfer", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClientCancelDataTransfer indicates an expected call of ClientCancelDataTransfer. +func (mr *MockFullNodeMockRecorder) ClientCancelDataTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCancelDataTransfer", reflect.TypeOf((*MockFullNode)(nil).ClientCancelDataTransfer), arg0, arg1, arg2, arg3) +} + +// ClientCancelRetrievalDeal mocks base method. +func (m *MockFullNode) ClientCancelRetrievalDeal(arg0 context.Context, arg1 retrievalmarket.DealID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientCancelRetrievalDeal", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClientCancelRetrievalDeal indicates an expected call of ClientCancelRetrievalDeal. +func (mr *MockFullNodeMockRecorder) ClientCancelRetrievalDeal(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCancelRetrievalDeal", reflect.TypeOf((*MockFullNode)(nil).ClientCancelRetrievalDeal), arg0, arg1) +} + +// ClientDataTransferUpdates mocks base method. +func (m *MockFullNode) ClientDataTransferUpdates(arg0 context.Context) (<-chan api.DataTransferChannel, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientDataTransferUpdates", arg0) + ret0, _ := ret[0].(<-chan api.DataTransferChannel) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientDataTransferUpdates indicates an expected call of ClientDataTransferUpdates. +func (mr *MockFullNodeMockRecorder) ClientDataTransferUpdates(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDataTransferUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientDataTransferUpdates), arg0) +} + +// ClientDealPieceCID mocks base method. +func (m *MockFullNode) ClientDealPieceCID(arg0 context.Context, arg1 cid.Cid) (api.DataCIDSize, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientDealPieceCID", arg0, arg1) + ret0, _ := ret[0].(api.DataCIDSize) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientDealPieceCID indicates an expected call of ClientDealPieceCID. +func (mr *MockFullNodeMockRecorder) ClientDealPieceCID(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealPieceCID", reflect.TypeOf((*MockFullNode)(nil).ClientDealPieceCID), arg0, arg1) +} + +// ClientDealSize mocks base method. +func (m *MockFullNode) ClientDealSize(arg0 context.Context, arg1 cid.Cid) (api.DataSize, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientDealSize", arg0, arg1) + ret0, _ := ret[0].(api.DataSize) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientDealSize indicates an expected call of ClientDealSize. +func (mr *MockFullNodeMockRecorder) ClientDealSize(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealSize", reflect.TypeOf((*MockFullNode)(nil).ClientDealSize), arg0, arg1) +} + +// ClientFindData mocks base method. +func (m *MockFullNode) ClientFindData(arg0 context.Context, arg1 cid.Cid, arg2 *cid.Cid) ([]api.QueryOffer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientFindData", arg0, arg1, arg2) + ret0, _ := ret[0].([]api.QueryOffer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientFindData indicates an expected call of ClientFindData. +func (mr *MockFullNodeMockRecorder) ClientFindData(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientFindData", reflect.TypeOf((*MockFullNode)(nil).ClientFindData), arg0, arg1, arg2) +} + +// ClientGenCar mocks base method. +func (m *MockFullNode) ClientGenCar(arg0 context.Context, arg1 api.FileRef, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientGenCar", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClientGenCar indicates an expected call of ClientGenCar. +func (mr *MockFullNodeMockRecorder) ClientGenCar(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGenCar", reflect.TypeOf((*MockFullNode)(nil).ClientGenCar), arg0, arg1, arg2) +} + +// ClientGetDealInfo mocks base method. +func (m *MockFullNode) ClientGetDealInfo(arg0 context.Context, arg1 cid.Cid) (*api.DealInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientGetDealInfo", arg0, arg1) + ret0, _ := ret[0].(*api.DealInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientGetDealInfo indicates an expected call of ClientGetDealInfo. +func (mr *MockFullNodeMockRecorder) ClientGetDealInfo(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealInfo", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealInfo), arg0, arg1) +} + +// ClientGetDealStatus mocks base method. +func (m *MockFullNode) ClientGetDealStatus(arg0 context.Context, arg1 uint64) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientGetDealStatus", arg0, arg1) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientGetDealStatus indicates an expected call of ClientGetDealStatus. +func (mr *MockFullNodeMockRecorder) ClientGetDealStatus(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealStatus", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealStatus), arg0, arg1) +} + +// ClientGetDealUpdates mocks base method. +func (m *MockFullNode) ClientGetDealUpdates(arg0 context.Context) (<-chan api.DealInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientGetDealUpdates", arg0) + ret0, _ := ret[0].(<-chan api.DealInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientGetDealUpdates indicates an expected call of ClientGetDealUpdates. +func (mr *MockFullNodeMockRecorder) ClientGetDealUpdates(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealUpdates), arg0) +} + +// ClientGetRetrievalUpdates mocks base method. +func (m *MockFullNode) ClientGetRetrievalUpdates(arg0 context.Context) (<-chan api.RetrievalInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientGetRetrievalUpdates", arg0) + ret0, _ := ret[0].(<-chan api.RetrievalInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientGetRetrievalUpdates indicates an expected call of ClientGetRetrievalUpdates. +func (mr *MockFullNodeMockRecorder) ClientGetRetrievalUpdates(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetRetrievalUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientGetRetrievalUpdates), arg0) +} + +// ClientHasLocal mocks base method. +func (m *MockFullNode) ClientHasLocal(arg0 context.Context, arg1 cid.Cid) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientHasLocal", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientHasLocal indicates an expected call of ClientHasLocal. +func (mr *MockFullNodeMockRecorder) ClientHasLocal(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientHasLocal", reflect.TypeOf((*MockFullNode)(nil).ClientHasLocal), arg0, arg1) +} + +// ClientImport mocks base method. +func (m *MockFullNode) ClientImport(arg0 context.Context, arg1 api.FileRef) (*api.ImportRes, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientImport", arg0, arg1) + ret0, _ := ret[0].(*api.ImportRes) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientImport indicates an expected call of ClientImport. +func (mr *MockFullNodeMockRecorder) ClientImport(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientImport", reflect.TypeOf((*MockFullNode)(nil).ClientImport), arg0, arg1) +} + +// ClientListDataTransfers mocks base method. +func (m *MockFullNode) ClientListDataTransfers(arg0 context.Context) ([]api.DataTransferChannel, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientListDataTransfers", arg0) + ret0, _ := ret[0].([]api.DataTransferChannel) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientListDataTransfers indicates an expected call of ClientListDataTransfers. +func (mr *MockFullNodeMockRecorder) ClientListDataTransfers(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListDataTransfers", reflect.TypeOf((*MockFullNode)(nil).ClientListDataTransfers), arg0) +} + +// ClientListDeals mocks base method. +func (m *MockFullNode) ClientListDeals(arg0 context.Context) ([]api.DealInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientListDeals", arg0) + ret0, _ := ret[0].([]api.DealInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientListDeals indicates an expected call of ClientListDeals. +func (mr *MockFullNodeMockRecorder) ClientListDeals(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListDeals", reflect.TypeOf((*MockFullNode)(nil).ClientListDeals), arg0) +} + +// ClientListImports mocks base method. +func (m *MockFullNode) ClientListImports(arg0 context.Context) ([]api.Import, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientListImports", arg0) + ret0, _ := ret[0].([]api.Import) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientListImports indicates an expected call of ClientListImports. +func (mr *MockFullNodeMockRecorder) ClientListImports(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListImports", reflect.TypeOf((*MockFullNode)(nil).ClientListImports), arg0) +} + +// ClientListRetrievals mocks base method. +func (m *MockFullNode) ClientListRetrievals(arg0 context.Context) ([]api.RetrievalInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientListRetrievals", arg0) + ret0, _ := ret[0].([]api.RetrievalInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientListRetrievals indicates an expected call of ClientListRetrievals. +func (mr *MockFullNodeMockRecorder) ClientListRetrievals(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListRetrievals", reflect.TypeOf((*MockFullNode)(nil).ClientListRetrievals), arg0) +} + +// ClientMinerQueryOffer mocks base method. +func (m *MockFullNode) ClientMinerQueryOffer(arg0 context.Context, arg1 address.Address, arg2 cid.Cid, arg3 *cid.Cid) (api.QueryOffer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientMinerQueryOffer", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(api.QueryOffer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientMinerQueryOffer indicates an expected call of ClientMinerQueryOffer. +func (mr *MockFullNodeMockRecorder) ClientMinerQueryOffer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientMinerQueryOffer", reflect.TypeOf((*MockFullNode)(nil).ClientMinerQueryOffer), arg0, arg1, arg2, arg3) +} + +// ClientQueryAsk mocks base method. +func (m *MockFullNode) ClientQueryAsk(arg0 context.Context, arg1 peer.ID, arg2 address.Address) (*storagemarket.StorageAsk, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientQueryAsk", arg0, arg1, arg2) + ret0, _ := ret[0].(*storagemarket.StorageAsk) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientQueryAsk indicates an expected call of ClientQueryAsk. +func (mr *MockFullNodeMockRecorder) ClientQueryAsk(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientQueryAsk", reflect.TypeOf((*MockFullNode)(nil).ClientQueryAsk), arg0, arg1, arg2) +} + +// ClientRemoveImport mocks base method. +func (m *MockFullNode) ClientRemoveImport(arg0 context.Context, arg1 multistore.StoreID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientRemoveImport", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClientRemoveImport indicates an expected call of ClientRemoveImport. +func (mr *MockFullNodeMockRecorder) ClientRemoveImport(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRemoveImport", reflect.TypeOf((*MockFullNode)(nil).ClientRemoveImport), arg0, arg1) +} + +// ClientRestartDataTransfer mocks base method. +func (m *MockFullNode) ClientRestartDataTransfer(arg0 context.Context, arg1 datatransfer.TransferID, arg2 peer.ID, arg3 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientRestartDataTransfer", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClientRestartDataTransfer indicates an expected call of ClientRestartDataTransfer. +func (mr *MockFullNodeMockRecorder) ClientRestartDataTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRestartDataTransfer", reflect.TypeOf((*MockFullNode)(nil).ClientRestartDataTransfer), arg0, arg1, arg2, arg3) +} + +// ClientRetrieve mocks base method. +func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientRetrieve", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClientRetrieve indicates an expected call of ClientRetrieve. +func (mr *MockFullNodeMockRecorder) ClientRetrieve(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieve", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieve), arg0, arg1, arg2) +} + +// ClientRetrieveTryRestartInsufficientFunds mocks base method. +func (m *MockFullNode) ClientRetrieveTryRestartInsufficientFunds(arg0 context.Context, arg1 address.Address) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientRetrieveTryRestartInsufficientFunds", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClientRetrieveTryRestartInsufficientFunds indicates an expected call of ClientRetrieveTryRestartInsufficientFunds. +func (mr *MockFullNodeMockRecorder) ClientRetrieveTryRestartInsufficientFunds(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveTryRestartInsufficientFunds", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveTryRestartInsufficientFunds), arg0, arg1) +} + +// ClientRetrieveWithEvents mocks base method. +func (m *MockFullNode) ClientRetrieveWithEvents(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientRetrieveWithEvents", arg0, arg1, arg2) + ret0, _ := ret[0].(<-chan marketevents.RetrievalEvent) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientRetrieveWithEvents indicates an expected call of ClientRetrieveWithEvents. +func (mr *MockFullNodeMockRecorder) ClientRetrieveWithEvents(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveWithEvents", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveWithEvents), arg0, arg1, arg2) +} + +// ClientStartDeal mocks base method. +func (m *MockFullNode) ClientStartDeal(arg0 context.Context, arg1 *api.StartDealParams) (*cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientStartDeal", arg0, arg1) + ret0, _ := ret[0].(*cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientStartDeal indicates an expected call of ClientStartDeal. +func (mr *MockFullNodeMockRecorder) ClientStartDeal(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientStartDeal", reflect.TypeOf((*MockFullNode)(nil).ClientStartDeal), arg0, arg1) +} + +// ClientStatelessDeal mocks base method. +func (m *MockFullNode) ClientStatelessDeal(arg0 context.Context, arg1 *api.StartDealParams) (*cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientStatelessDeal", arg0, arg1) + ret0, _ := ret[0].(*cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientStatelessDeal indicates an expected call of ClientStatelessDeal. +func (mr *MockFullNodeMockRecorder) ClientStatelessDeal(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientStatelessDeal", reflect.TypeOf((*MockFullNode)(nil).ClientStatelessDeal), arg0, arg1) +} + +// Closing mocks base method. +func (m *MockFullNode) Closing(arg0 context.Context) (<-chan struct{}, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Closing", arg0) + ret0, _ := ret[0].(<-chan struct{}) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Closing indicates an expected call of Closing. +func (mr *MockFullNodeMockRecorder) Closing(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Closing", reflect.TypeOf((*MockFullNode)(nil).Closing), arg0) +} + +// CreateBackup mocks base method. +func (m *MockFullNode) CreateBackup(arg0 context.Context, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateBackup", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateBackup indicates an expected call of CreateBackup. +func (mr *MockFullNodeMockRecorder) CreateBackup(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateBackup", reflect.TypeOf((*MockFullNode)(nil).CreateBackup), arg0, arg1) +} + +// Discover mocks base method. +func (m *MockFullNode) Discover(arg0 context.Context) (apitypes.OpenRPCDocument, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Discover", arg0) + ret0, _ := ret[0].(apitypes.OpenRPCDocument) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Discover indicates an expected call of Discover. +func (mr *MockFullNodeMockRecorder) Discover(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Discover", reflect.TypeOf((*MockFullNode)(nil).Discover), arg0) +} + +// GasEstimateFeeCap mocks base method. +func (m *MockFullNode) GasEstimateFeeCap(arg0 context.Context, arg1 *types.Message, arg2 int64, arg3 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GasEstimateFeeCap", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GasEstimateFeeCap indicates an expected call of GasEstimateFeeCap. +func (mr *MockFullNodeMockRecorder) GasEstimateFeeCap(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateFeeCap", reflect.TypeOf((*MockFullNode)(nil).GasEstimateFeeCap), arg0, arg1, arg2, arg3) +} + +// GasEstimateGasLimit mocks base method. +func (m *MockFullNode) GasEstimateGasLimit(arg0 context.Context, arg1 *types.Message, arg2 types.TipSetKey) (int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GasEstimateGasLimit", arg0, arg1, arg2) + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GasEstimateGasLimit indicates an expected call of GasEstimateGasLimit. +func (mr *MockFullNodeMockRecorder) GasEstimateGasLimit(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateGasLimit", reflect.TypeOf((*MockFullNode)(nil).GasEstimateGasLimit), arg0, arg1, arg2) +} + +// GasEstimateGasPremium mocks base method. +func (m *MockFullNode) GasEstimateGasPremium(arg0 context.Context, arg1 uint64, arg2 address.Address, arg3 int64, arg4 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GasEstimateGasPremium", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GasEstimateGasPremium indicates an expected call of GasEstimateGasPremium. +func (mr *MockFullNodeMockRecorder) GasEstimateGasPremium(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateGasPremium", reflect.TypeOf((*MockFullNode)(nil).GasEstimateGasPremium), arg0, arg1, arg2, arg3, arg4) +} + +// GasEstimateMessageGas mocks base method. +func (m *MockFullNode) GasEstimateMessageGas(arg0 context.Context, arg1 *types.Message, arg2 *api.MessageSendSpec, arg3 types.TipSetKey) (*types.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GasEstimateMessageGas", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*types.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GasEstimateMessageGas indicates an expected call of GasEstimateMessageGas. +func (mr *MockFullNodeMockRecorder) GasEstimateMessageGas(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateMessageGas", reflect.TypeOf((*MockFullNode)(nil).GasEstimateMessageGas), arg0, arg1, arg2, arg3) +} + +// ID mocks base method. +func (m *MockFullNode) ID(arg0 context.Context) (peer.ID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ID", arg0) + ret0, _ := ret[0].(peer.ID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ID indicates an expected call of ID. +func (mr *MockFullNodeMockRecorder) ID(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ID", reflect.TypeOf((*MockFullNode)(nil).ID), arg0) +} + +// LogList mocks base method. +func (m *MockFullNode) LogList(arg0 context.Context) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LogList", arg0) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LogList indicates an expected call of LogList. +func (mr *MockFullNodeMockRecorder) LogList(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LogList", reflect.TypeOf((*MockFullNode)(nil).LogList), arg0) +} + +// LogSetLevel mocks base method. +func (m *MockFullNode) LogSetLevel(arg0 context.Context, arg1, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LogSetLevel", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// LogSetLevel indicates an expected call of LogSetLevel. +func (mr *MockFullNodeMockRecorder) LogSetLevel(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LogSetLevel", reflect.TypeOf((*MockFullNode)(nil).LogSetLevel), arg0, arg1, arg2) +} + +// MarketAddBalance mocks base method. +func (m *MockFullNode) MarketAddBalance(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketAddBalance", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketAddBalance indicates an expected call of MarketAddBalance. +func (mr *MockFullNodeMockRecorder) MarketAddBalance(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketAddBalance", reflect.TypeOf((*MockFullNode)(nil).MarketAddBalance), arg0, arg1, arg2, arg3) +} + +// MarketGetReserved mocks base method. +func (m *MockFullNode) MarketGetReserved(arg0 context.Context, arg1 address.Address) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketGetReserved", arg0, arg1) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketGetReserved indicates an expected call of MarketGetReserved. +func (mr *MockFullNodeMockRecorder) MarketGetReserved(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketGetReserved", reflect.TypeOf((*MockFullNode)(nil).MarketGetReserved), arg0, arg1) +} + +// MarketReleaseFunds mocks base method. +func (m *MockFullNode) MarketReleaseFunds(arg0 context.Context, arg1 address.Address, arg2 big.Int) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketReleaseFunds", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// MarketReleaseFunds indicates an expected call of MarketReleaseFunds. +func (mr *MockFullNodeMockRecorder) MarketReleaseFunds(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketReleaseFunds", reflect.TypeOf((*MockFullNode)(nil).MarketReleaseFunds), arg0, arg1, arg2) +} + +// MarketReserveFunds mocks base method. +func (m *MockFullNode) MarketReserveFunds(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketReserveFunds", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketReserveFunds indicates an expected call of MarketReserveFunds. +func (mr *MockFullNodeMockRecorder) MarketReserveFunds(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketReserveFunds", reflect.TypeOf((*MockFullNode)(nil).MarketReserveFunds), arg0, arg1, arg2, arg3) +} + +// MarketWithdraw mocks base method. +func (m *MockFullNode) MarketWithdraw(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketWithdraw", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketWithdraw indicates an expected call of MarketWithdraw. +func (mr *MockFullNodeMockRecorder) MarketWithdraw(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketWithdraw", reflect.TypeOf((*MockFullNode)(nil).MarketWithdraw), arg0, arg1, arg2, arg3) +} + +// MinerCreateBlock mocks base method. +func (m *MockFullNode) MinerCreateBlock(arg0 context.Context, arg1 *api.BlockTemplate) (*types.BlockMsg, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MinerCreateBlock", arg0, arg1) + ret0, _ := ret[0].(*types.BlockMsg) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MinerCreateBlock indicates an expected call of MinerCreateBlock. +func (mr *MockFullNodeMockRecorder) MinerCreateBlock(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MinerCreateBlock", reflect.TypeOf((*MockFullNode)(nil).MinerCreateBlock), arg0, arg1) +} + +// MinerGetBaseInfo mocks base method. +func (m *MockFullNode) MinerGetBaseInfo(arg0 context.Context, arg1 address.Address, arg2 abi.ChainEpoch, arg3 types.TipSetKey) (*api.MiningBaseInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MinerGetBaseInfo", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*api.MiningBaseInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MinerGetBaseInfo indicates an expected call of MinerGetBaseInfo. +func (mr *MockFullNodeMockRecorder) MinerGetBaseInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MinerGetBaseInfo", reflect.TypeOf((*MockFullNode)(nil).MinerGetBaseInfo), arg0, arg1, arg2, arg3) +} + +// MpoolBatchPush mocks base method. +func (m *MockFullNode) MpoolBatchPush(arg0 context.Context, arg1 []*types.SignedMessage) ([]cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolBatchPush", arg0, arg1) + ret0, _ := ret[0].([]cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolBatchPush indicates an expected call of MpoolBatchPush. +func (mr *MockFullNodeMockRecorder) MpoolBatchPush(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolBatchPush", reflect.TypeOf((*MockFullNode)(nil).MpoolBatchPush), arg0, arg1) +} + +// MpoolBatchPushMessage mocks base method. +func (m *MockFullNode) MpoolBatchPushMessage(arg0 context.Context, arg1 []*types.Message, arg2 *api.MessageSendSpec) ([]*types.SignedMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolBatchPushMessage", arg0, arg1, arg2) + ret0, _ := ret[0].([]*types.SignedMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolBatchPushMessage indicates an expected call of MpoolBatchPushMessage. +func (mr *MockFullNodeMockRecorder) MpoolBatchPushMessage(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolBatchPushMessage", reflect.TypeOf((*MockFullNode)(nil).MpoolBatchPushMessage), arg0, arg1, arg2) +} + +// MpoolBatchPushUntrusted mocks base method. +func (m *MockFullNode) MpoolBatchPushUntrusted(arg0 context.Context, arg1 []*types.SignedMessage) ([]cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolBatchPushUntrusted", arg0, arg1) + ret0, _ := ret[0].([]cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolBatchPushUntrusted indicates an expected call of MpoolBatchPushUntrusted. +func (mr *MockFullNodeMockRecorder) MpoolBatchPushUntrusted(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolBatchPushUntrusted", reflect.TypeOf((*MockFullNode)(nil).MpoolBatchPushUntrusted), arg0, arg1) +} + +// MpoolCheckMessages mocks base method. +func (m *MockFullNode) MpoolCheckMessages(arg0 context.Context, arg1 []*api.MessagePrototype) ([][]api.MessageCheckStatus, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolCheckMessages", arg0, arg1) + ret0, _ := ret[0].([][]api.MessageCheckStatus) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolCheckMessages indicates an expected call of MpoolCheckMessages. +func (mr *MockFullNodeMockRecorder) MpoolCheckMessages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolCheckMessages", reflect.TypeOf((*MockFullNode)(nil).MpoolCheckMessages), arg0, arg1) +} + +// MpoolCheckPendingMessages mocks base method. +func (m *MockFullNode) MpoolCheckPendingMessages(arg0 context.Context, arg1 address.Address) ([][]api.MessageCheckStatus, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolCheckPendingMessages", arg0, arg1) + ret0, _ := ret[0].([][]api.MessageCheckStatus) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolCheckPendingMessages indicates an expected call of MpoolCheckPendingMessages. +func (mr *MockFullNodeMockRecorder) MpoolCheckPendingMessages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolCheckPendingMessages", reflect.TypeOf((*MockFullNode)(nil).MpoolCheckPendingMessages), arg0, arg1) +} + +// MpoolCheckReplaceMessages mocks base method. +func (m *MockFullNode) MpoolCheckReplaceMessages(arg0 context.Context, arg1 []*types.Message) ([][]api.MessageCheckStatus, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolCheckReplaceMessages", arg0, arg1) + ret0, _ := ret[0].([][]api.MessageCheckStatus) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolCheckReplaceMessages indicates an expected call of MpoolCheckReplaceMessages. +func (mr *MockFullNodeMockRecorder) MpoolCheckReplaceMessages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolCheckReplaceMessages", reflect.TypeOf((*MockFullNode)(nil).MpoolCheckReplaceMessages), arg0, arg1) +} + +// MpoolClear mocks base method. +func (m *MockFullNode) MpoolClear(arg0 context.Context, arg1 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolClear", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// MpoolClear indicates an expected call of MpoolClear. +func (mr *MockFullNodeMockRecorder) MpoolClear(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolClear", reflect.TypeOf((*MockFullNode)(nil).MpoolClear), arg0, arg1) +} + +// MpoolGetConfig mocks base method. +func (m *MockFullNode) MpoolGetConfig(arg0 context.Context) (*types.MpoolConfig, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolGetConfig", arg0) + ret0, _ := ret[0].(*types.MpoolConfig) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolGetConfig indicates an expected call of MpoolGetConfig. +func (mr *MockFullNodeMockRecorder) MpoolGetConfig(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolGetConfig", reflect.TypeOf((*MockFullNode)(nil).MpoolGetConfig), arg0) +} + +// MpoolGetNonce mocks base method. +func (m *MockFullNode) MpoolGetNonce(arg0 context.Context, arg1 address.Address) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolGetNonce", arg0, arg1) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolGetNonce indicates an expected call of MpoolGetNonce. +func (mr *MockFullNodeMockRecorder) MpoolGetNonce(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolGetNonce", reflect.TypeOf((*MockFullNode)(nil).MpoolGetNonce), arg0, arg1) +} + +// MpoolPending mocks base method. +func (m *MockFullNode) MpoolPending(arg0 context.Context, arg1 types.TipSetKey) ([]*types.SignedMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolPending", arg0, arg1) + ret0, _ := ret[0].([]*types.SignedMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolPending indicates an expected call of MpoolPending. +func (mr *MockFullNodeMockRecorder) MpoolPending(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPending", reflect.TypeOf((*MockFullNode)(nil).MpoolPending), arg0, arg1) +} + +// MpoolPush mocks base method. +func (m *MockFullNode) MpoolPush(arg0 context.Context, arg1 *types.SignedMessage) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolPush", arg0, arg1) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolPush indicates an expected call of MpoolPush. +func (mr *MockFullNodeMockRecorder) MpoolPush(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPush", reflect.TypeOf((*MockFullNode)(nil).MpoolPush), arg0, arg1) +} + +// MpoolPushMessage mocks base method. +func (m *MockFullNode) MpoolPushMessage(arg0 context.Context, arg1 *types.Message, arg2 *api.MessageSendSpec) (*types.SignedMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolPushMessage", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.SignedMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolPushMessage indicates an expected call of MpoolPushMessage. +func (mr *MockFullNodeMockRecorder) MpoolPushMessage(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPushMessage", reflect.TypeOf((*MockFullNode)(nil).MpoolPushMessage), arg0, arg1, arg2) +} + +// MpoolPushUntrusted mocks base method. +func (m *MockFullNode) MpoolPushUntrusted(arg0 context.Context, arg1 *types.SignedMessage) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolPushUntrusted", arg0, arg1) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolPushUntrusted indicates an expected call of MpoolPushUntrusted. +func (mr *MockFullNodeMockRecorder) MpoolPushUntrusted(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPushUntrusted", reflect.TypeOf((*MockFullNode)(nil).MpoolPushUntrusted), arg0, arg1) +} + +// MpoolSelect mocks base method. +func (m *MockFullNode) MpoolSelect(arg0 context.Context, arg1 types.TipSetKey, arg2 float64) ([]*types.SignedMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolSelect", arg0, arg1, arg2) + ret0, _ := ret[0].([]*types.SignedMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolSelect indicates an expected call of MpoolSelect. +func (mr *MockFullNodeMockRecorder) MpoolSelect(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolSelect", reflect.TypeOf((*MockFullNode)(nil).MpoolSelect), arg0, arg1, arg2) +} + +// MpoolSetConfig mocks base method. +func (m *MockFullNode) MpoolSetConfig(arg0 context.Context, arg1 *types.MpoolConfig) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolSetConfig", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// MpoolSetConfig indicates an expected call of MpoolSetConfig. +func (mr *MockFullNodeMockRecorder) MpoolSetConfig(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolSetConfig", reflect.TypeOf((*MockFullNode)(nil).MpoolSetConfig), arg0, arg1) +} + +// MpoolSub mocks base method. +func (m *MockFullNode) MpoolSub(arg0 context.Context) (<-chan api.MpoolUpdate, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolSub", arg0) + ret0, _ := ret[0].(<-chan api.MpoolUpdate) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolSub indicates an expected call of MpoolSub. +func (mr *MockFullNodeMockRecorder) MpoolSub(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolSub", reflect.TypeOf((*MockFullNode)(nil).MpoolSub), arg0) +} + +// MsigAddApprove mocks base method. +func (m *MockFullNode) MsigAddApprove(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4, arg5 address.Address, arg6 bool) (*api.MessagePrototype, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigAddApprove", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(*api.MessagePrototype) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigAddApprove indicates an expected call of MsigAddApprove. +func (mr *MockFullNodeMockRecorder) MsigAddApprove(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigAddApprove", reflect.TypeOf((*MockFullNode)(nil).MsigAddApprove), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +// MsigAddCancel mocks base method. +func (m *MockFullNode) MsigAddCancel(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4 address.Address, arg5 bool) (*api.MessagePrototype, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigAddCancel", arg0, arg1, arg2, arg3, arg4, arg5) + ret0, _ := ret[0].(*api.MessagePrototype) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigAddCancel indicates an expected call of MsigAddCancel. +func (mr *MockFullNodeMockRecorder) MsigAddCancel(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigAddCancel", reflect.TypeOf((*MockFullNode)(nil).MsigAddCancel), arg0, arg1, arg2, arg3, arg4, arg5) +} + +// MsigAddPropose mocks base method. +func (m *MockFullNode) MsigAddPropose(arg0 context.Context, arg1, arg2, arg3 address.Address, arg4 bool) (*api.MessagePrototype, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigAddPropose", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(*api.MessagePrototype) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigAddPropose indicates an expected call of MsigAddPropose. +func (mr *MockFullNodeMockRecorder) MsigAddPropose(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigAddPropose", reflect.TypeOf((*MockFullNode)(nil).MsigAddPropose), arg0, arg1, arg2, arg3, arg4) +} + +// MsigApprove mocks base method. +func (m *MockFullNode) MsigApprove(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address) (*api.MessagePrototype, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigApprove", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*api.MessagePrototype) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigApprove indicates an expected call of MsigApprove. +func (mr *MockFullNodeMockRecorder) MsigApprove(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigApprove", reflect.TypeOf((*MockFullNode)(nil).MsigApprove), arg0, arg1, arg2, arg3) +} + +// MsigApproveTxnHash mocks base method. +func (m *MockFullNode) MsigApproveTxnHash(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3, arg4 address.Address, arg5 big.Int, arg6 address.Address, arg7 uint64, arg8 []byte) (*api.MessagePrototype, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigApproveTxnHash", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) + ret0, _ := ret[0].(*api.MessagePrototype) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigApproveTxnHash indicates an expected call of MsigApproveTxnHash. +func (mr *MockFullNodeMockRecorder) MsigApproveTxnHash(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigApproveTxnHash", reflect.TypeOf((*MockFullNode)(nil).MsigApproveTxnHash), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) +} + +// MsigCancel mocks base method. +func (m *MockFullNode) MsigCancel(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address, arg4 big.Int, arg5 address.Address, arg6 uint64, arg7 []byte) (*api.MessagePrototype, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigCancel", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + ret0, _ := ret[0].(*api.MessagePrototype) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigCancel indicates an expected call of MsigCancel. +func (mr *MockFullNodeMockRecorder) MsigCancel(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCancel", reflect.TypeOf((*MockFullNode)(nil).MsigCancel), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) +} + +// MsigCreate mocks base method. +func (m *MockFullNode) MsigCreate(arg0 context.Context, arg1 uint64, arg2 []address.Address, arg3 abi.ChainEpoch, arg4 big.Int, arg5 address.Address, arg6 big.Int) (*api.MessagePrototype, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigCreate", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(*api.MessagePrototype) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigCreate indicates an expected call of MsigCreate. +func (mr *MockFullNodeMockRecorder) MsigCreate(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCreate", reflect.TypeOf((*MockFullNode)(nil).MsigCreate), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +// MsigGetAvailableBalance mocks base method. +func (m *MockFullNode) MsigGetAvailableBalance(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigGetAvailableBalance", arg0, arg1, arg2) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigGetAvailableBalance indicates an expected call of MsigGetAvailableBalance. +func (mr *MockFullNodeMockRecorder) MsigGetAvailableBalance(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigGetAvailableBalance", reflect.TypeOf((*MockFullNode)(nil).MsigGetAvailableBalance), arg0, arg1, arg2) +} + +// MsigGetPending mocks base method. +func (m *MockFullNode) MsigGetPending(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) ([]*api.MsigTransaction, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigGetPending", arg0, arg1, arg2) + ret0, _ := ret[0].([]*api.MsigTransaction) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigGetPending indicates an expected call of MsigGetPending. +func (mr *MockFullNodeMockRecorder) MsigGetPending(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigGetPending", reflect.TypeOf((*MockFullNode)(nil).MsigGetPending), arg0, arg1, arg2) +} + +// MsigGetVested mocks base method. +func (m *MockFullNode) MsigGetVested(arg0 context.Context, arg1 address.Address, arg2, arg3 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigGetVested", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigGetVested indicates an expected call of MsigGetVested. +func (mr *MockFullNodeMockRecorder) MsigGetVested(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigGetVested", reflect.TypeOf((*MockFullNode)(nil).MsigGetVested), arg0, arg1, arg2, arg3) +} + +// MsigGetVestingSchedule mocks base method. +func (m *MockFullNode) MsigGetVestingSchedule(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (api.MsigVesting, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigGetVestingSchedule", arg0, arg1, arg2) + ret0, _ := ret[0].(api.MsigVesting) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigGetVestingSchedule indicates an expected call of MsigGetVestingSchedule. +func (mr *MockFullNodeMockRecorder) MsigGetVestingSchedule(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigGetVestingSchedule", reflect.TypeOf((*MockFullNode)(nil).MsigGetVestingSchedule), arg0, arg1, arg2) +} + +// MsigPropose mocks base method. +func (m *MockFullNode) MsigPropose(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int, arg4 address.Address, arg5 uint64, arg6 []byte) (*api.MessagePrototype, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigPropose", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(*api.MessagePrototype) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigPropose indicates an expected call of MsigPropose. +func (mr *MockFullNodeMockRecorder) MsigPropose(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigPropose", reflect.TypeOf((*MockFullNode)(nil).MsigPropose), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +// MsigRemoveSigner mocks base method. +func (m *MockFullNode) MsigRemoveSigner(arg0 context.Context, arg1, arg2, arg3 address.Address, arg4 bool) (*api.MessagePrototype, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigRemoveSigner", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(*api.MessagePrototype) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigRemoveSigner indicates an expected call of MsigRemoveSigner. +func (mr *MockFullNodeMockRecorder) MsigRemoveSigner(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigRemoveSigner", reflect.TypeOf((*MockFullNode)(nil).MsigRemoveSigner), arg0, arg1, arg2, arg3, arg4) +} + +// MsigSwapApprove mocks base method. +func (m *MockFullNode) MsigSwapApprove(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4, arg5, arg6 address.Address) (*api.MessagePrototype, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigSwapApprove", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(*api.MessagePrototype) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigSwapApprove indicates an expected call of MsigSwapApprove. +func (mr *MockFullNodeMockRecorder) MsigSwapApprove(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigSwapApprove", reflect.TypeOf((*MockFullNode)(nil).MsigSwapApprove), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +// MsigSwapCancel mocks base method. +func (m *MockFullNode) MsigSwapCancel(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4, arg5 address.Address) (*api.MessagePrototype, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigSwapCancel", arg0, arg1, arg2, arg3, arg4, arg5) + ret0, _ := ret[0].(*api.MessagePrototype) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigSwapCancel indicates an expected call of MsigSwapCancel. +func (mr *MockFullNodeMockRecorder) MsigSwapCancel(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigSwapCancel", reflect.TypeOf((*MockFullNode)(nil).MsigSwapCancel), arg0, arg1, arg2, arg3, arg4, arg5) +} + +// MsigSwapPropose mocks base method. +func (m *MockFullNode) MsigSwapPropose(arg0 context.Context, arg1, arg2, arg3, arg4 address.Address) (*api.MessagePrototype, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigSwapPropose", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(*api.MessagePrototype) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigSwapPropose indicates an expected call of MsigSwapPropose. +func (mr *MockFullNodeMockRecorder) MsigSwapPropose(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigSwapPropose", reflect.TypeOf((*MockFullNode)(nil).MsigSwapPropose), arg0, arg1, arg2, arg3, arg4) +} + +// NetAddrsListen mocks base method. +func (m *MockFullNode) NetAddrsListen(arg0 context.Context) (peer.AddrInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetAddrsListen", arg0) + ret0, _ := ret[0].(peer.AddrInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetAddrsListen indicates an expected call of NetAddrsListen. +func (mr *MockFullNodeMockRecorder) NetAddrsListen(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetAddrsListen", reflect.TypeOf((*MockFullNode)(nil).NetAddrsListen), arg0) +} + +// NetAgentVersion mocks base method. +func (m *MockFullNode) NetAgentVersion(arg0 context.Context, arg1 peer.ID) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetAgentVersion", arg0, arg1) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetAgentVersion indicates an expected call of NetAgentVersion. +func (mr *MockFullNodeMockRecorder) NetAgentVersion(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetAgentVersion", reflect.TypeOf((*MockFullNode)(nil).NetAgentVersion), arg0, arg1) +} + +// NetAutoNatStatus mocks base method. +func (m *MockFullNode) NetAutoNatStatus(arg0 context.Context) (api.NatInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetAutoNatStatus", arg0) + ret0, _ := ret[0].(api.NatInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetAutoNatStatus indicates an expected call of NetAutoNatStatus. +func (mr *MockFullNodeMockRecorder) NetAutoNatStatus(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetAutoNatStatus", reflect.TypeOf((*MockFullNode)(nil).NetAutoNatStatus), arg0) +} + +// NetBandwidthStats mocks base method. +func (m *MockFullNode) NetBandwidthStats(arg0 context.Context) (metrics.Stats, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetBandwidthStats", arg0) + ret0, _ := ret[0].(metrics.Stats) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetBandwidthStats indicates an expected call of NetBandwidthStats. +func (mr *MockFullNodeMockRecorder) NetBandwidthStats(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBandwidthStats", reflect.TypeOf((*MockFullNode)(nil).NetBandwidthStats), arg0) +} + +// NetBandwidthStatsByPeer mocks base method. +func (m *MockFullNode) NetBandwidthStatsByPeer(arg0 context.Context) (map[string]metrics.Stats, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetBandwidthStatsByPeer", arg0) + ret0, _ := ret[0].(map[string]metrics.Stats) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetBandwidthStatsByPeer indicates an expected call of NetBandwidthStatsByPeer. +func (mr *MockFullNodeMockRecorder) NetBandwidthStatsByPeer(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBandwidthStatsByPeer", reflect.TypeOf((*MockFullNode)(nil).NetBandwidthStatsByPeer), arg0) +} + +// NetBandwidthStatsByProtocol mocks base method. +func (m *MockFullNode) NetBandwidthStatsByProtocol(arg0 context.Context) (map[protocol.ID]metrics.Stats, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetBandwidthStatsByProtocol", arg0) + ret0, _ := ret[0].(map[protocol.ID]metrics.Stats) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetBandwidthStatsByProtocol indicates an expected call of NetBandwidthStatsByProtocol. +func (mr *MockFullNodeMockRecorder) NetBandwidthStatsByProtocol(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBandwidthStatsByProtocol", reflect.TypeOf((*MockFullNode)(nil).NetBandwidthStatsByProtocol), arg0) +} + +// NetBlockAdd mocks base method. +func (m *MockFullNode) NetBlockAdd(arg0 context.Context, arg1 api.NetBlockList) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetBlockAdd", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// NetBlockAdd indicates an expected call of NetBlockAdd. +func (mr *MockFullNodeMockRecorder) NetBlockAdd(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBlockAdd", reflect.TypeOf((*MockFullNode)(nil).NetBlockAdd), arg0, arg1) +} + +// NetBlockList mocks base method. +func (m *MockFullNode) NetBlockList(arg0 context.Context) (api.NetBlockList, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetBlockList", arg0) + ret0, _ := ret[0].(api.NetBlockList) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetBlockList indicates an expected call of NetBlockList. +func (mr *MockFullNodeMockRecorder) NetBlockList(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBlockList", reflect.TypeOf((*MockFullNode)(nil).NetBlockList), arg0) +} + +// NetBlockRemove mocks base method. +func (m *MockFullNode) NetBlockRemove(arg0 context.Context, arg1 api.NetBlockList) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetBlockRemove", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// NetBlockRemove indicates an expected call of NetBlockRemove. +func (mr *MockFullNodeMockRecorder) NetBlockRemove(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBlockRemove", reflect.TypeOf((*MockFullNode)(nil).NetBlockRemove), arg0, arg1) +} + +// NetConnect mocks base method. +func (m *MockFullNode) NetConnect(arg0 context.Context, arg1 peer.AddrInfo) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetConnect", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// NetConnect indicates an expected call of NetConnect. +func (mr *MockFullNodeMockRecorder) NetConnect(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetConnect", reflect.TypeOf((*MockFullNode)(nil).NetConnect), arg0, arg1) +} + +// NetConnectedness mocks base method. +func (m *MockFullNode) NetConnectedness(arg0 context.Context, arg1 peer.ID) (network0.Connectedness, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetConnectedness", arg0, arg1) + ret0, _ := ret[0].(network0.Connectedness) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetConnectedness indicates an expected call of NetConnectedness. +func (mr *MockFullNodeMockRecorder) NetConnectedness(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetConnectedness", reflect.TypeOf((*MockFullNode)(nil).NetConnectedness), arg0, arg1) +} + +// NetDisconnect mocks base method. +func (m *MockFullNode) NetDisconnect(arg0 context.Context, arg1 peer.ID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetDisconnect", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// NetDisconnect indicates an expected call of NetDisconnect. +func (mr *MockFullNodeMockRecorder) NetDisconnect(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetDisconnect", reflect.TypeOf((*MockFullNode)(nil).NetDisconnect), arg0, arg1) +} + +// NetFindPeer mocks base method. +func (m *MockFullNode) NetFindPeer(arg0 context.Context, arg1 peer.ID) (peer.AddrInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetFindPeer", arg0, arg1) + ret0, _ := ret[0].(peer.AddrInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetFindPeer indicates an expected call of NetFindPeer. +func (mr *MockFullNodeMockRecorder) NetFindPeer(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetFindPeer", reflect.TypeOf((*MockFullNode)(nil).NetFindPeer), arg0, arg1) +} + +// NetPeerInfo mocks base method. +func (m *MockFullNode) NetPeerInfo(arg0 context.Context, arg1 peer.ID) (*api.ExtendedPeerInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetPeerInfo", arg0, arg1) + ret0, _ := ret[0].(*api.ExtendedPeerInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetPeerInfo indicates an expected call of NetPeerInfo. +func (mr *MockFullNodeMockRecorder) NetPeerInfo(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPeerInfo", reflect.TypeOf((*MockFullNode)(nil).NetPeerInfo), arg0, arg1) +} + +// NetPeers mocks base method. +func (m *MockFullNode) NetPeers(arg0 context.Context) ([]peer.AddrInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetPeers", arg0) + ret0, _ := ret[0].([]peer.AddrInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetPeers indicates an expected call of NetPeers. +func (mr *MockFullNodeMockRecorder) NetPeers(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPeers", reflect.TypeOf((*MockFullNode)(nil).NetPeers), arg0) +} + +// NetPubsubScores mocks base method. +func (m *MockFullNode) NetPubsubScores(arg0 context.Context) ([]api.PubsubScore, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetPubsubScores", arg0) + ret0, _ := ret[0].([]api.PubsubScore) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetPubsubScores indicates an expected call of NetPubsubScores. +func (mr *MockFullNodeMockRecorder) NetPubsubScores(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPubsubScores", reflect.TypeOf((*MockFullNode)(nil).NetPubsubScores), arg0) +} + +// NodeStatus mocks base method. +func (m *MockFullNode) NodeStatus(arg0 context.Context, arg1 bool) (api.NodeStatus, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NodeStatus", arg0, arg1) + ret0, _ := ret[0].(api.NodeStatus) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NodeStatus indicates an expected call of NodeStatus. +func (mr *MockFullNodeMockRecorder) NodeStatus(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeStatus", reflect.TypeOf((*MockFullNode)(nil).NodeStatus), arg0, arg1) +} + +// PaychAllocateLane mocks base method. +func (m *MockFullNode) PaychAllocateLane(arg0 context.Context, arg1 address.Address) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychAllocateLane", arg0, arg1) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychAllocateLane indicates an expected call of PaychAllocateLane. +func (mr *MockFullNodeMockRecorder) PaychAllocateLane(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychAllocateLane", reflect.TypeOf((*MockFullNode)(nil).PaychAllocateLane), arg0, arg1) +} + +// PaychAvailableFunds mocks base method. +func (m *MockFullNode) PaychAvailableFunds(arg0 context.Context, arg1 address.Address) (*api.ChannelAvailableFunds, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychAvailableFunds", arg0, arg1) + ret0, _ := ret[0].(*api.ChannelAvailableFunds) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychAvailableFunds indicates an expected call of PaychAvailableFunds. +func (mr *MockFullNodeMockRecorder) PaychAvailableFunds(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychAvailableFunds", reflect.TypeOf((*MockFullNode)(nil).PaychAvailableFunds), arg0, arg1) +} + +// PaychAvailableFundsByFromTo mocks base method. +func (m *MockFullNode) PaychAvailableFundsByFromTo(arg0 context.Context, arg1, arg2 address.Address) (*api.ChannelAvailableFunds, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychAvailableFundsByFromTo", arg0, arg1, arg2) + ret0, _ := ret[0].(*api.ChannelAvailableFunds) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychAvailableFundsByFromTo indicates an expected call of PaychAvailableFundsByFromTo. +func (mr *MockFullNodeMockRecorder) PaychAvailableFundsByFromTo(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychAvailableFundsByFromTo", reflect.TypeOf((*MockFullNode)(nil).PaychAvailableFundsByFromTo), arg0, arg1, arg2) +} + +// PaychCollect mocks base method. +func (m *MockFullNode) PaychCollect(arg0 context.Context, arg1 address.Address) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychCollect", arg0, arg1) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychCollect indicates an expected call of PaychCollect. +func (mr *MockFullNodeMockRecorder) PaychCollect(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychCollect", reflect.TypeOf((*MockFullNode)(nil).PaychCollect), arg0, arg1) +} + +// PaychGet mocks base method. +func (m *MockFullNode) PaychGet(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (*api.ChannelInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychGet", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*api.ChannelInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychGet indicates an expected call of PaychGet. +func (mr *MockFullNodeMockRecorder) PaychGet(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychGet", reflect.TypeOf((*MockFullNode)(nil).PaychGet), arg0, arg1, arg2, arg3) +} + +// PaychGetWaitReady mocks base method. +func (m *MockFullNode) PaychGetWaitReady(arg0 context.Context, arg1 cid.Cid) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychGetWaitReady", arg0, arg1) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychGetWaitReady indicates an expected call of PaychGetWaitReady. +func (mr *MockFullNodeMockRecorder) PaychGetWaitReady(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychGetWaitReady", reflect.TypeOf((*MockFullNode)(nil).PaychGetWaitReady), arg0, arg1) +} + +// PaychList mocks base method. +func (m *MockFullNode) PaychList(arg0 context.Context) ([]address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychList", arg0) + ret0, _ := ret[0].([]address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychList indicates an expected call of PaychList. +func (mr *MockFullNodeMockRecorder) PaychList(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychList", reflect.TypeOf((*MockFullNode)(nil).PaychList), arg0) +} + +// PaychNewPayment mocks base method. +func (m *MockFullNode) PaychNewPayment(arg0 context.Context, arg1, arg2 address.Address, arg3 []api.VoucherSpec) (*api.PaymentInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychNewPayment", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*api.PaymentInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychNewPayment indicates an expected call of PaychNewPayment. +func (mr *MockFullNodeMockRecorder) PaychNewPayment(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychNewPayment", reflect.TypeOf((*MockFullNode)(nil).PaychNewPayment), arg0, arg1, arg2, arg3) +} + +// PaychSettle mocks base method. +func (m *MockFullNode) PaychSettle(arg0 context.Context, arg1 address.Address) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychSettle", arg0, arg1) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychSettle indicates an expected call of PaychSettle. +func (mr *MockFullNodeMockRecorder) PaychSettle(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychSettle", reflect.TypeOf((*MockFullNode)(nil).PaychSettle), arg0, arg1) +} + +// PaychStatus mocks base method. +func (m *MockFullNode) PaychStatus(arg0 context.Context, arg1 address.Address) (*api.PaychStatus, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychStatus", arg0, arg1) + ret0, _ := ret[0].(*api.PaychStatus) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychStatus indicates an expected call of PaychStatus. +func (mr *MockFullNodeMockRecorder) PaychStatus(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychStatus", reflect.TypeOf((*MockFullNode)(nil).PaychStatus), arg0, arg1) +} + +// PaychVoucherAdd mocks base method. +func (m *MockFullNode) PaychVoucherAdd(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher, arg3 []byte, arg4 big.Int) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychVoucherAdd", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychVoucherAdd indicates an expected call of PaychVoucherAdd. +func (mr *MockFullNodeMockRecorder) PaychVoucherAdd(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherAdd", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherAdd), arg0, arg1, arg2, arg3, arg4) +} + +// PaychVoucherCheckSpendable mocks base method. +func (m *MockFullNode) PaychVoucherCheckSpendable(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher, arg3, arg4 []byte) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychVoucherCheckSpendable", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychVoucherCheckSpendable indicates an expected call of PaychVoucherCheckSpendable. +func (mr *MockFullNodeMockRecorder) PaychVoucherCheckSpendable(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherCheckSpendable", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherCheckSpendable), arg0, arg1, arg2, arg3, arg4) +} + +// PaychVoucherCheckValid mocks base method. +func (m *MockFullNode) PaychVoucherCheckValid(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychVoucherCheckValid", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// PaychVoucherCheckValid indicates an expected call of PaychVoucherCheckValid. +func (mr *MockFullNodeMockRecorder) PaychVoucherCheckValid(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherCheckValid", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherCheckValid), arg0, arg1, arg2) +} + +// PaychVoucherCreate mocks base method. +func (m *MockFullNode) PaychVoucherCreate(arg0 context.Context, arg1 address.Address, arg2 big.Int, arg3 uint64) (*api.VoucherCreateResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychVoucherCreate", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*api.VoucherCreateResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychVoucherCreate indicates an expected call of PaychVoucherCreate. +func (mr *MockFullNodeMockRecorder) PaychVoucherCreate(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherCreate", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherCreate), arg0, arg1, arg2, arg3) +} + +// PaychVoucherList mocks base method. +func (m *MockFullNode) PaychVoucherList(arg0 context.Context, arg1 address.Address) ([]*paych.SignedVoucher, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychVoucherList", arg0, arg1) + ret0, _ := ret[0].([]*paych.SignedVoucher) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychVoucherList indicates an expected call of PaychVoucherList. +func (mr *MockFullNodeMockRecorder) PaychVoucherList(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherList", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherList), arg0, arg1) +} + +// PaychVoucherSubmit mocks base method. +func (m *MockFullNode) PaychVoucherSubmit(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher, arg3, arg4 []byte) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychVoucherSubmit", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychVoucherSubmit indicates an expected call of PaychVoucherSubmit. +func (mr *MockFullNodeMockRecorder) PaychVoucherSubmit(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherSubmit", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherSubmit), arg0, arg1, arg2, arg3, arg4) +} + +// Session mocks base method. +func (m *MockFullNode) Session(arg0 context.Context) (uuid.UUID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Session", arg0) + ret0, _ := ret[0].(uuid.UUID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Session indicates an expected call of Session. +func (mr *MockFullNodeMockRecorder) Session(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Session", reflect.TypeOf((*MockFullNode)(nil).Session), arg0) +} + +// Shutdown mocks base method. +func (m *MockFullNode) Shutdown(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Shutdown", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Shutdown indicates an expected call of Shutdown. +func (mr *MockFullNodeMockRecorder) Shutdown(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Shutdown", reflect.TypeOf((*MockFullNode)(nil).Shutdown), arg0) +} + +// StateAccountKey mocks base method. +func (m *MockFullNode) StateAccountKey(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateAccountKey", arg0, arg1, arg2) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateAccountKey indicates an expected call of StateAccountKey. +func (mr *MockFullNodeMockRecorder) StateAccountKey(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateAccountKey", reflect.TypeOf((*MockFullNode)(nil).StateAccountKey), arg0, arg1, arg2) +} + +// StateAllMinerFaults mocks base method. +func (m *MockFullNode) StateAllMinerFaults(arg0 context.Context, arg1 abi.ChainEpoch, arg2 types.TipSetKey) ([]*api.Fault, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateAllMinerFaults", arg0, arg1, arg2) + ret0, _ := ret[0].([]*api.Fault) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateAllMinerFaults indicates an expected call of StateAllMinerFaults. +func (mr *MockFullNodeMockRecorder) StateAllMinerFaults(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateAllMinerFaults", reflect.TypeOf((*MockFullNode)(nil).StateAllMinerFaults), arg0, arg1, arg2) +} + +// StateCall mocks base method. +func (m *MockFullNode) StateCall(arg0 context.Context, arg1 *types.Message, arg2 types.TipSetKey) (*api.InvocResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateCall", arg0, arg1, arg2) + ret0, _ := ret[0].(*api.InvocResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateCall indicates an expected call of StateCall. +func (mr *MockFullNodeMockRecorder) StateCall(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateCall", reflect.TypeOf((*MockFullNode)(nil).StateCall), arg0, arg1, arg2) +} + +// StateChangedActors mocks base method. +func (m *MockFullNode) StateChangedActors(arg0 context.Context, arg1, arg2 cid.Cid) (map[string]types.Actor, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateChangedActors", arg0, arg1, arg2) + ret0, _ := ret[0].(map[string]types.Actor) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateChangedActors indicates an expected call of StateChangedActors. +func (mr *MockFullNodeMockRecorder) StateChangedActors(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateChangedActors", reflect.TypeOf((*MockFullNode)(nil).StateChangedActors), arg0, arg1, arg2) +} + +// StateCirculatingSupply mocks base method. +func (m *MockFullNode) StateCirculatingSupply(arg0 context.Context, arg1 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateCirculatingSupply", arg0, arg1) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateCirculatingSupply indicates an expected call of StateCirculatingSupply. +func (mr *MockFullNodeMockRecorder) StateCirculatingSupply(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateCirculatingSupply", reflect.TypeOf((*MockFullNode)(nil).StateCirculatingSupply), arg0, arg1) +} + +// StateCompute mocks base method. +func (m *MockFullNode) StateCompute(arg0 context.Context, arg1 abi.ChainEpoch, arg2 []*types.Message, arg3 types.TipSetKey) (*api.ComputeStateOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateCompute", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*api.ComputeStateOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateCompute indicates an expected call of StateCompute. +func (mr *MockFullNodeMockRecorder) StateCompute(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateCompute", reflect.TypeOf((*MockFullNode)(nil).StateCompute), arg0, arg1, arg2, arg3) +} + +// StateDealProviderCollateralBounds mocks base method. +func (m *MockFullNode) StateDealProviderCollateralBounds(arg0 context.Context, arg1 abi.PaddedPieceSize, arg2 bool, arg3 types.TipSetKey) (api.DealCollateralBounds, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateDealProviderCollateralBounds", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(api.DealCollateralBounds) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateDealProviderCollateralBounds indicates an expected call of StateDealProviderCollateralBounds. +func (mr *MockFullNodeMockRecorder) StateDealProviderCollateralBounds(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateDealProviderCollateralBounds", reflect.TypeOf((*MockFullNode)(nil).StateDealProviderCollateralBounds), arg0, arg1, arg2, arg3) +} + +// StateDecodeParams mocks base method. +func (m *MockFullNode) StateDecodeParams(arg0 context.Context, arg1 address.Address, arg2 abi.MethodNum, arg3 []byte, arg4 types.TipSetKey) (interface{}, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateDecodeParams", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(interface{}) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateDecodeParams indicates an expected call of StateDecodeParams. +func (mr *MockFullNodeMockRecorder) StateDecodeParams(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateDecodeParams", reflect.TypeOf((*MockFullNode)(nil).StateDecodeParams), arg0, arg1, arg2, arg3, arg4) +} + +// StateGetActor mocks base method. +func (m *MockFullNode) StateGetActor(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*types.Actor, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetActor", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.Actor) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetActor indicates an expected call of StateGetActor. +func (mr *MockFullNodeMockRecorder) StateGetActor(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetActor", reflect.TypeOf((*MockFullNode)(nil).StateGetActor), arg0, arg1, arg2) +} + +// StateListActors mocks base method. +func (m *MockFullNode) StateListActors(arg0 context.Context, arg1 types.TipSetKey) ([]address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateListActors", arg0, arg1) + ret0, _ := ret[0].([]address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateListActors indicates an expected call of StateListActors. +func (mr *MockFullNodeMockRecorder) StateListActors(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateListActors", reflect.TypeOf((*MockFullNode)(nil).StateListActors), arg0, arg1) +} + +// StateListMessages mocks base method. +func (m *MockFullNode) StateListMessages(arg0 context.Context, arg1 *api.MessageMatch, arg2 types.TipSetKey, arg3 abi.ChainEpoch) ([]cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateListMessages", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateListMessages indicates an expected call of StateListMessages. +func (mr *MockFullNodeMockRecorder) StateListMessages(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateListMessages", reflect.TypeOf((*MockFullNode)(nil).StateListMessages), arg0, arg1, arg2, arg3) +} + +// StateListMiners mocks base method. +func (m *MockFullNode) StateListMiners(arg0 context.Context, arg1 types.TipSetKey) ([]address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateListMiners", arg0, arg1) + ret0, _ := ret[0].([]address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateListMiners indicates an expected call of StateListMiners. +func (mr *MockFullNodeMockRecorder) StateListMiners(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateListMiners", reflect.TypeOf((*MockFullNode)(nil).StateListMiners), arg0, arg1) +} + +// StateLookupID mocks base method. +func (m *MockFullNode) StateLookupID(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateLookupID", arg0, arg1, arg2) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateLookupID indicates an expected call of StateLookupID. +func (mr *MockFullNodeMockRecorder) StateLookupID(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateLookupID", reflect.TypeOf((*MockFullNode)(nil).StateLookupID), arg0, arg1, arg2) +} + +// StateMarketBalance mocks base method. +func (m *MockFullNode) StateMarketBalance(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (api.MarketBalance, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMarketBalance", arg0, arg1, arg2) + ret0, _ := ret[0].(api.MarketBalance) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMarketBalance indicates an expected call of StateMarketBalance. +func (mr *MockFullNodeMockRecorder) StateMarketBalance(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketBalance", reflect.TypeOf((*MockFullNode)(nil).StateMarketBalance), arg0, arg1, arg2) +} + +// StateMarketDeals mocks base method. +func (m *MockFullNode) StateMarketDeals(arg0 context.Context, arg1 types.TipSetKey) (map[string]api.MarketDeal, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMarketDeals", arg0, arg1) + ret0, _ := ret[0].(map[string]api.MarketDeal) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMarketDeals indicates an expected call of StateMarketDeals. +func (mr *MockFullNodeMockRecorder) StateMarketDeals(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketDeals", reflect.TypeOf((*MockFullNode)(nil).StateMarketDeals), arg0, arg1) +} + +// StateMarketParticipants mocks base method. +func (m *MockFullNode) StateMarketParticipants(arg0 context.Context, arg1 types.TipSetKey) (map[string]api.MarketBalance, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMarketParticipants", arg0, arg1) + ret0, _ := ret[0].(map[string]api.MarketBalance) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMarketParticipants indicates an expected call of StateMarketParticipants. +func (mr *MockFullNodeMockRecorder) StateMarketParticipants(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketParticipants", reflect.TypeOf((*MockFullNode)(nil).StateMarketParticipants), arg0, arg1) +} + +// StateMarketStorageDeal mocks base method. +func (m *MockFullNode) StateMarketStorageDeal(arg0 context.Context, arg1 abi.DealID, arg2 types.TipSetKey) (*api.MarketDeal, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMarketStorageDeal", arg0, arg1, arg2) + ret0, _ := ret[0].(*api.MarketDeal) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMarketStorageDeal indicates an expected call of StateMarketStorageDeal. +func (mr *MockFullNodeMockRecorder) StateMarketStorageDeal(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketStorageDeal", reflect.TypeOf((*MockFullNode)(nil).StateMarketStorageDeal), arg0, arg1, arg2) +} + +// StateMinerActiveSectors mocks base method. +func (m *MockFullNode) StateMinerActiveSectors(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerActiveSectors", arg0, arg1, arg2) + ret0, _ := ret[0].([]*miner.SectorOnChainInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerActiveSectors indicates an expected call of StateMinerActiveSectors. +func (mr *MockFullNodeMockRecorder) StateMinerActiveSectors(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerActiveSectors", reflect.TypeOf((*MockFullNode)(nil).StateMinerActiveSectors), arg0, arg1, arg2) +} + +// StateMinerAvailableBalance mocks base method. +func (m *MockFullNode) StateMinerAvailableBalance(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerAvailableBalance", arg0, arg1, arg2) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerAvailableBalance indicates an expected call of StateMinerAvailableBalance. +func (mr *MockFullNodeMockRecorder) StateMinerAvailableBalance(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerAvailableBalance", reflect.TypeOf((*MockFullNode)(nil).StateMinerAvailableBalance), arg0, arg1, arg2) +} + +// StateMinerDeadlines mocks base method. +func (m *MockFullNode) StateMinerDeadlines(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) ([]api.Deadline, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerDeadlines", arg0, arg1, arg2) + ret0, _ := ret[0].([]api.Deadline) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerDeadlines indicates an expected call of StateMinerDeadlines. +func (mr *MockFullNodeMockRecorder) StateMinerDeadlines(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerDeadlines", reflect.TypeOf((*MockFullNode)(nil).StateMinerDeadlines), arg0, arg1, arg2) +} + +// StateMinerFaults mocks base method. +func (m *MockFullNode) StateMinerFaults(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (bitfield.BitField, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerFaults", arg0, arg1, arg2) + ret0, _ := ret[0].(bitfield.BitField) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerFaults indicates an expected call of StateMinerFaults. +func (mr *MockFullNodeMockRecorder) StateMinerFaults(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerFaults", reflect.TypeOf((*MockFullNode)(nil).StateMinerFaults), arg0, arg1, arg2) +} + +// StateMinerInfo mocks base method. +func (m *MockFullNode) StateMinerInfo(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (miner.MinerInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerInfo", arg0, arg1, arg2) + ret0, _ := ret[0].(miner.MinerInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerInfo indicates an expected call of StateMinerInfo. +func (mr *MockFullNodeMockRecorder) StateMinerInfo(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerInfo", reflect.TypeOf((*MockFullNode)(nil).StateMinerInfo), arg0, arg1, arg2) +} + +// StateMinerInitialPledgeCollateral mocks base method. +func (m *MockFullNode) StateMinerInitialPledgeCollateral(arg0 context.Context, arg1 address.Address, arg2 miner0.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerInitialPledgeCollateral", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerInitialPledgeCollateral indicates an expected call of StateMinerInitialPledgeCollateral. +func (mr *MockFullNodeMockRecorder) StateMinerInitialPledgeCollateral(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerInitialPledgeCollateral", reflect.TypeOf((*MockFullNode)(nil).StateMinerInitialPledgeCollateral), arg0, arg1, arg2, arg3) +} + +// StateMinerPartitions mocks base method. +func (m *MockFullNode) StateMinerPartitions(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 types.TipSetKey) ([]api.Partition, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerPartitions", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]api.Partition) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerPartitions indicates an expected call of StateMinerPartitions. +func (mr *MockFullNodeMockRecorder) StateMinerPartitions(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerPartitions", reflect.TypeOf((*MockFullNode)(nil).StateMinerPartitions), arg0, arg1, arg2, arg3) +} + +// StateMinerPower mocks base method. +func (m *MockFullNode) StateMinerPower(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*api.MinerPower, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerPower", arg0, arg1, arg2) + ret0, _ := ret[0].(*api.MinerPower) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerPower indicates an expected call of StateMinerPower. +func (mr *MockFullNodeMockRecorder) StateMinerPower(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerPower", reflect.TypeOf((*MockFullNode)(nil).StateMinerPower), arg0, arg1, arg2) +} + +// StateMinerPreCommitDepositForPower mocks base method. +func (m *MockFullNode) StateMinerPreCommitDepositForPower(arg0 context.Context, arg1 address.Address, arg2 miner0.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerPreCommitDepositForPower", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerPreCommitDepositForPower indicates an expected call of StateMinerPreCommitDepositForPower. +func (mr *MockFullNodeMockRecorder) StateMinerPreCommitDepositForPower(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerPreCommitDepositForPower", reflect.TypeOf((*MockFullNode)(nil).StateMinerPreCommitDepositForPower), arg0, arg1, arg2, arg3) +} + +// StateMinerProvingDeadline mocks base method. +func (m *MockFullNode) StateMinerProvingDeadline(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*dline.Info, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerProvingDeadline", arg0, arg1, arg2) + ret0, _ := ret[0].(*dline.Info) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerProvingDeadline indicates an expected call of StateMinerProvingDeadline. +func (mr *MockFullNodeMockRecorder) StateMinerProvingDeadline(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerProvingDeadline", reflect.TypeOf((*MockFullNode)(nil).StateMinerProvingDeadline), arg0, arg1, arg2) +} + +// StateMinerRecoveries mocks base method. +func (m *MockFullNode) StateMinerRecoveries(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (bitfield.BitField, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerRecoveries", arg0, arg1, arg2) + ret0, _ := ret[0].(bitfield.BitField) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerRecoveries indicates an expected call of StateMinerRecoveries. +func (mr *MockFullNodeMockRecorder) StateMinerRecoveries(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerRecoveries", reflect.TypeOf((*MockFullNode)(nil).StateMinerRecoveries), arg0, arg1, arg2) +} + +// StateMinerSectorAllocated mocks base method. +func (m *MockFullNode) StateMinerSectorAllocated(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerSectorAllocated", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerSectorAllocated indicates an expected call of StateMinerSectorAllocated. +func (mr *MockFullNodeMockRecorder) StateMinerSectorAllocated(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerSectorAllocated", reflect.TypeOf((*MockFullNode)(nil).StateMinerSectorAllocated), arg0, arg1, arg2, arg3) +} + +// StateMinerSectorCount mocks base method. +func (m *MockFullNode) StateMinerSectorCount(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (api.MinerSectors, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerSectorCount", arg0, arg1, arg2) + ret0, _ := ret[0].(api.MinerSectors) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerSectorCount indicates an expected call of StateMinerSectorCount. +func (mr *MockFullNodeMockRecorder) StateMinerSectorCount(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerSectorCount", reflect.TypeOf((*MockFullNode)(nil).StateMinerSectorCount), arg0, arg1, arg2) +} + +// StateMinerSectors mocks base method. +func (m *MockFullNode) StateMinerSectors(arg0 context.Context, arg1 address.Address, arg2 *bitfield.BitField, arg3 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerSectors", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]*miner.SectorOnChainInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerSectors indicates an expected call of StateMinerSectors. +func (mr *MockFullNodeMockRecorder) StateMinerSectors(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerSectors", reflect.TypeOf((*MockFullNode)(nil).StateMinerSectors), arg0, arg1, arg2, arg3) +} + +// StateNetworkName mocks base method. +func (m *MockFullNode) StateNetworkName(arg0 context.Context) (dtypes.NetworkName, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateNetworkName", arg0) + ret0, _ := ret[0].(dtypes.NetworkName) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateNetworkName indicates an expected call of StateNetworkName. +func (mr *MockFullNodeMockRecorder) StateNetworkName(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateNetworkName", reflect.TypeOf((*MockFullNode)(nil).StateNetworkName), arg0) +} + +// StateNetworkVersion mocks base method. +func (m *MockFullNode) StateNetworkVersion(arg0 context.Context, arg1 types.TipSetKey) (network.Version, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateNetworkVersion", arg0, arg1) + ret0, _ := ret[0].(network.Version) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateNetworkVersion indicates an expected call of StateNetworkVersion. +func (mr *MockFullNodeMockRecorder) StateNetworkVersion(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateNetworkVersion", reflect.TypeOf((*MockFullNode)(nil).StateNetworkVersion), arg0, arg1) +} + +// StateReadState mocks base method. +func (m *MockFullNode) StateReadState(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*api.ActorState, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateReadState", arg0, arg1, arg2) + ret0, _ := ret[0].(*api.ActorState) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateReadState indicates an expected call of StateReadState. +func (mr *MockFullNodeMockRecorder) StateReadState(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateReadState", reflect.TypeOf((*MockFullNode)(nil).StateReadState), arg0, arg1, arg2) +} + +// StateReplay mocks base method. +func (m *MockFullNode) StateReplay(arg0 context.Context, arg1 types.TipSetKey, arg2 cid.Cid) (*api.InvocResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateReplay", arg0, arg1, arg2) + ret0, _ := ret[0].(*api.InvocResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateReplay indicates an expected call of StateReplay. +func (mr *MockFullNodeMockRecorder) StateReplay(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateReplay", reflect.TypeOf((*MockFullNode)(nil).StateReplay), arg0, arg1, arg2) +} + +// StateSearchMsg mocks base method. +func (m *MockFullNode) StateSearchMsg(arg0 context.Context, arg1 types.TipSetKey, arg2 cid.Cid, arg3 abi.ChainEpoch, arg4 bool) (*api.MsgLookup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSearchMsg", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(*api.MsgLookup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSearchMsg indicates an expected call of StateSearchMsg. +func (mr *MockFullNodeMockRecorder) StateSearchMsg(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSearchMsg", reflect.TypeOf((*MockFullNode)(nil).StateSearchMsg), arg0, arg1, arg2, arg3, arg4) +} + +// StateSectorExpiration mocks base method. +func (m *MockFullNode) StateSectorExpiration(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner.SectorExpiration, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSectorExpiration", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*miner.SectorExpiration) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSectorExpiration indicates an expected call of StateSectorExpiration. +func (mr *MockFullNodeMockRecorder) StateSectorExpiration(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorExpiration", reflect.TypeOf((*MockFullNode)(nil).StateSectorExpiration), arg0, arg1, arg2, arg3) +} + +// StateSectorGetInfo mocks base method. +func (m *MockFullNode) StateSectorGetInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner.SectorOnChainInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSectorGetInfo", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*miner.SectorOnChainInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSectorGetInfo indicates an expected call of StateSectorGetInfo. +func (mr *MockFullNodeMockRecorder) StateSectorGetInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorGetInfo", reflect.TypeOf((*MockFullNode)(nil).StateSectorGetInfo), arg0, arg1, arg2, arg3) +} + +// StateSectorPartition mocks base method. +func (m *MockFullNode) StateSectorPartition(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner.SectorLocation, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSectorPartition", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*miner.SectorLocation) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSectorPartition indicates an expected call of StateSectorPartition. +func (mr *MockFullNodeMockRecorder) StateSectorPartition(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorPartition", reflect.TypeOf((*MockFullNode)(nil).StateSectorPartition), arg0, arg1, arg2, arg3) +} + +// StateSectorPreCommitInfo mocks base method. +func (m *MockFullNode) StateSectorPreCommitInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSectorPreCommitInfo", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(miner.SectorPreCommitOnChainInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSectorPreCommitInfo indicates an expected call of StateSectorPreCommitInfo. +func (mr *MockFullNodeMockRecorder) StateSectorPreCommitInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorPreCommitInfo", reflect.TypeOf((*MockFullNode)(nil).StateSectorPreCommitInfo), arg0, arg1, arg2, arg3) +} + +// StateVMCirculatingSupplyInternal mocks base method. +func (m *MockFullNode) StateVMCirculatingSupplyInternal(arg0 context.Context, arg1 types.TipSetKey) (api.CirculatingSupply, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateVMCirculatingSupplyInternal", arg0, arg1) + ret0, _ := ret[0].(api.CirculatingSupply) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateVMCirculatingSupplyInternal indicates an expected call of StateVMCirculatingSupplyInternal. +func (mr *MockFullNodeMockRecorder) StateVMCirculatingSupplyInternal(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVMCirculatingSupplyInternal", reflect.TypeOf((*MockFullNode)(nil).StateVMCirculatingSupplyInternal), arg0, arg1) +} + +// StateVerifiedClientStatus mocks base method. +func (m *MockFullNode) StateVerifiedClientStatus(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateVerifiedClientStatus", arg0, arg1, arg2) + ret0, _ := ret[0].(*big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateVerifiedClientStatus indicates an expected call of StateVerifiedClientStatus. +func (mr *MockFullNodeMockRecorder) StateVerifiedClientStatus(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVerifiedClientStatus", reflect.TypeOf((*MockFullNode)(nil).StateVerifiedClientStatus), arg0, arg1, arg2) +} + +// StateVerifiedRegistryRootKey mocks base method. +func (m *MockFullNode) StateVerifiedRegistryRootKey(arg0 context.Context, arg1 types.TipSetKey) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateVerifiedRegistryRootKey", arg0, arg1) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateVerifiedRegistryRootKey indicates an expected call of StateVerifiedRegistryRootKey. +func (mr *MockFullNodeMockRecorder) StateVerifiedRegistryRootKey(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVerifiedRegistryRootKey", reflect.TypeOf((*MockFullNode)(nil).StateVerifiedRegistryRootKey), arg0, arg1) +} + +// StateVerifierStatus mocks base method. +func (m *MockFullNode) StateVerifierStatus(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateVerifierStatus", arg0, arg1, arg2) + ret0, _ := ret[0].(*big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateVerifierStatus indicates an expected call of StateVerifierStatus. +func (mr *MockFullNodeMockRecorder) StateVerifierStatus(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVerifierStatus", reflect.TypeOf((*MockFullNode)(nil).StateVerifierStatus), arg0, arg1, arg2) +} + +// StateWaitMsg mocks base method. +func (m *MockFullNode) StateWaitMsg(arg0 context.Context, arg1 cid.Cid, arg2 uint64, arg3 abi.ChainEpoch, arg4 bool) (*api.MsgLookup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateWaitMsg", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(*api.MsgLookup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateWaitMsg indicates an expected call of StateWaitMsg. +func (mr *MockFullNodeMockRecorder) StateWaitMsg(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateWaitMsg", reflect.TypeOf((*MockFullNode)(nil).StateWaitMsg), arg0, arg1, arg2, arg3, arg4) +} + +// SyncCheckBad mocks base method. +func (m *MockFullNode) SyncCheckBad(arg0 context.Context, arg1 cid.Cid) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncCheckBad", arg0, arg1) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SyncCheckBad indicates an expected call of SyncCheckBad. +func (mr *MockFullNodeMockRecorder) SyncCheckBad(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncCheckBad", reflect.TypeOf((*MockFullNode)(nil).SyncCheckBad), arg0, arg1) +} + +// SyncCheckpoint mocks base method. +func (m *MockFullNode) SyncCheckpoint(arg0 context.Context, arg1 types.TipSetKey) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncCheckpoint", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SyncCheckpoint indicates an expected call of SyncCheckpoint. +func (mr *MockFullNodeMockRecorder) SyncCheckpoint(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncCheckpoint", reflect.TypeOf((*MockFullNode)(nil).SyncCheckpoint), arg0, arg1) +} + +// SyncIncomingBlocks mocks base method. +func (m *MockFullNode) SyncIncomingBlocks(arg0 context.Context) (<-chan *types.BlockHeader, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncIncomingBlocks", arg0) + ret0, _ := ret[0].(<-chan *types.BlockHeader) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SyncIncomingBlocks indicates an expected call of SyncIncomingBlocks. +func (mr *MockFullNodeMockRecorder) SyncIncomingBlocks(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncIncomingBlocks", reflect.TypeOf((*MockFullNode)(nil).SyncIncomingBlocks), arg0) +} + +// SyncMarkBad mocks base method. +func (m *MockFullNode) SyncMarkBad(arg0 context.Context, arg1 cid.Cid) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncMarkBad", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SyncMarkBad indicates an expected call of SyncMarkBad. +func (mr *MockFullNodeMockRecorder) SyncMarkBad(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncMarkBad", reflect.TypeOf((*MockFullNode)(nil).SyncMarkBad), arg0, arg1) +} + +// SyncState mocks base method. +func (m *MockFullNode) SyncState(arg0 context.Context) (*api.SyncState, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncState", arg0) + ret0, _ := ret[0].(*api.SyncState) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SyncState indicates an expected call of SyncState. +func (mr *MockFullNodeMockRecorder) SyncState(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncState", reflect.TypeOf((*MockFullNode)(nil).SyncState), arg0) +} + +// SyncSubmitBlock mocks base method. +func (m *MockFullNode) SyncSubmitBlock(arg0 context.Context, arg1 *types.BlockMsg) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncSubmitBlock", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SyncSubmitBlock indicates an expected call of SyncSubmitBlock. +func (mr *MockFullNodeMockRecorder) SyncSubmitBlock(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncSubmitBlock", reflect.TypeOf((*MockFullNode)(nil).SyncSubmitBlock), arg0, arg1) +} + +// SyncUnmarkAllBad mocks base method. +func (m *MockFullNode) SyncUnmarkAllBad(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncUnmarkAllBad", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SyncUnmarkAllBad indicates an expected call of SyncUnmarkAllBad. +func (mr *MockFullNodeMockRecorder) SyncUnmarkAllBad(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncUnmarkAllBad", reflect.TypeOf((*MockFullNode)(nil).SyncUnmarkAllBad), arg0) +} + +// SyncUnmarkBad mocks base method. +func (m *MockFullNode) SyncUnmarkBad(arg0 context.Context, arg1 cid.Cid) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncUnmarkBad", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SyncUnmarkBad indicates an expected call of SyncUnmarkBad. +func (mr *MockFullNodeMockRecorder) SyncUnmarkBad(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncUnmarkBad", reflect.TypeOf((*MockFullNode)(nil).SyncUnmarkBad), arg0, arg1) +} + +// SyncValidateTipset mocks base method. +func (m *MockFullNode) SyncValidateTipset(arg0 context.Context, arg1 types.TipSetKey) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncValidateTipset", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SyncValidateTipset indicates an expected call of SyncValidateTipset. +func (mr *MockFullNodeMockRecorder) SyncValidateTipset(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncValidateTipset", reflect.TypeOf((*MockFullNode)(nil).SyncValidateTipset), arg0, arg1) +} + +// Version mocks base method. +func (m *MockFullNode) Version(arg0 context.Context) (api.APIVersion, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Version", arg0) + ret0, _ := ret[0].(api.APIVersion) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Version indicates an expected call of Version. +func (mr *MockFullNodeMockRecorder) Version(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockFullNode)(nil).Version), arg0) +} + +// WalletBalance mocks base method. +func (m *MockFullNode) WalletBalance(arg0 context.Context, arg1 address.Address) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletBalance", arg0, arg1) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletBalance indicates an expected call of WalletBalance. +func (mr *MockFullNodeMockRecorder) WalletBalance(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletBalance", reflect.TypeOf((*MockFullNode)(nil).WalletBalance), arg0, arg1) +} + +// WalletDefaultAddress mocks base method. +func (m *MockFullNode) WalletDefaultAddress(arg0 context.Context) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletDefaultAddress", arg0) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletDefaultAddress indicates an expected call of WalletDefaultAddress. +func (mr *MockFullNodeMockRecorder) WalletDefaultAddress(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletDefaultAddress", reflect.TypeOf((*MockFullNode)(nil).WalletDefaultAddress), arg0) +} + +// WalletDelete mocks base method. +func (m *MockFullNode) WalletDelete(arg0 context.Context, arg1 address.Address) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletDelete", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// WalletDelete indicates an expected call of WalletDelete. +func (mr *MockFullNodeMockRecorder) WalletDelete(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletDelete", reflect.TypeOf((*MockFullNode)(nil).WalletDelete), arg0, arg1) +} + +// WalletExport mocks base method. +func (m *MockFullNode) WalletExport(arg0 context.Context, arg1 address.Address) (*types.KeyInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletExport", arg0, arg1) + ret0, _ := ret[0].(*types.KeyInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletExport indicates an expected call of WalletExport. +func (mr *MockFullNodeMockRecorder) WalletExport(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletExport", reflect.TypeOf((*MockFullNode)(nil).WalletExport), arg0, arg1) +} + +// WalletHas mocks base method. +func (m *MockFullNode) WalletHas(arg0 context.Context, arg1 address.Address) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletHas", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletHas indicates an expected call of WalletHas. +func (mr *MockFullNodeMockRecorder) WalletHas(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletHas", reflect.TypeOf((*MockFullNode)(nil).WalletHas), arg0, arg1) +} + +// WalletImport mocks base method. +func (m *MockFullNode) WalletImport(arg0 context.Context, arg1 *types.KeyInfo) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletImport", arg0, arg1) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletImport indicates an expected call of WalletImport. +func (mr *MockFullNodeMockRecorder) WalletImport(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletImport", reflect.TypeOf((*MockFullNode)(nil).WalletImport), arg0, arg1) +} + +// WalletList mocks base method. +func (m *MockFullNode) WalletList(arg0 context.Context) ([]address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletList", arg0) + ret0, _ := ret[0].([]address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletList indicates an expected call of WalletList. +func (mr *MockFullNodeMockRecorder) WalletList(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletList", reflect.TypeOf((*MockFullNode)(nil).WalletList), arg0) +} + +// WalletNew mocks base method. +func (m *MockFullNode) WalletNew(arg0 context.Context, arg1 types.KeyType) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletNew", arg0, arg1) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletNew indicates an expected call of WalletNew. +func (mr *MockFullNodeMockRecorder) WalletNew(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletNew", reflect.TypeOf((*MockFullNode)(nil).WalletNew), arg0, arg1) +} + +// WalletSetDefault mocks base method. +func (m *MockFullNode) WalletSetDefault(arg0 context.Context, arg1 address.Address) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletSetDefault", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// WalletSetDefault indicates an expected call of WalletSetDefault. +func (mr *MockFullNodeMockRecorder) WalletSetDefault(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletSetDefault", reflect.TypeOf((*MockFullNode)(nil).WalletSetDefault), arg0, arg1) +} + +// WalletSign mocks base method. +func (m *MockFullNode) WalletSign(arg0 context.Context, arg1 address.Address, arg2 []byte) (*crypto.Signature, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletSign", arg0, arg1, arg2) + ret0, _ := ret[0].(*crypto.Signature) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletSign indicates an expected call of WalletSign. +func (mr *MockFullNodeMockRecorder) WalletSign(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletSign", reflect.TypeOf((*MockFullNode)(nil).WalletSign), arg0, arg1, arg2) +} + +// WalletSignMessage mocks base method. +func (m *MockFullNode) WalletSignMessage(arg0 context.Context, arg1 address.Address, arg2 *types.Message) (*types.SignedMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletSignMessage", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.SignedMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletSignMessage indicates an expected call of WalletSignMessage. +func (mr *MockFullNodeMockRecorder) WalletSignMessage(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletSignMessage", reflect.TypeOf((*MockFullNode)(nil).WalletSignMessage), arg0, arg1, arg2) +} + +// WalletValidateAddress mocks base method. +func (m *MockFullNode) WalletValidateAddress(arg0 context.Context, arg1 string) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletValidateAddress", arg0, arg1) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletValidateAddress indicates an expected call of WalletValidateAddress. +func (mr *MockFullNodeMockRecorder) WalletValidateAddress(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletValidateAddress", reflect.TypeOf((*MockFullNode)(nil).WalletValidateAddress), arg0, arg1) +} + +// WalletVerify mocks base method. +func (m *MockFullNode) WalletVerify(arg0 context.Context, arg1 address.Address, arg2 []byte, arg3 *crypto.Signature) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletVerify", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletVerify indicates an expected call of WalletVerify. +func (mr *MockFullNodeMockRecorder) WalletVerify(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletVerify", reflect.TypeOf((*MockFullNode)(nil).WalletVerify), arg0, arg1, arg2, arg3) +} diff --git a/api/permissioned.go b/api/permissioned.go new file mode 100644 index 00000000000..72d2239ee3c --- /dev/null +++ b/api/permissioned.go @@ -0,0 +1,48 @@ +package api + +import ( + "github.com/filecoin-project/go-jsonrpc/auth" +) + +const ( + // When changing these, update docs/API.md too + + PermRead auth.Permission = "read" // default + PermWrite auth.Permission = "write" + PermSign auth.Permission = "sign" // Use wallet keys for signing + PermAdmin auth.Permission = "admin" // Manage permissions +) + +var AllPermissions = []auth.Permission{PermRead, PermWrite, PermSign, PermAdmin} +var DefaultPerms = []auth.Permission{PermRead} + +func permissionedProxies(in, out interface{}) { + outs := GetInternalStructs(out) + for _, o := range outs { + auth.PermissionedProxy(AllPermissions, DefaultPerms, in, o) + } +} + +func PermissionedStorMinerAPI(a StorageMiner) StorageMiner { + var out StorageMinerStruct + permissionedProxies(a, &out) + return &out +} + +func PermissionedFullAPI(a FullNode) FullNode { + var out FullNodeStruct + permissionedProxies(a, &out) + return &out +} + +func PermissionedWorkerAPI(a Worker) Worker { + var out WorkerStruct + permissionedProxies(a, &out) + return &out +} + +func PermissionedWalletAPI(a Wallet) Wallet { + var out WalletStruct + permissionedProxies(a, &out) + return &out +} diff --git a/api/proxy_gen.go b/api/proxy_gen.go new file mode 100644 index 00000000000..fb645eb4800 --- /dev/null +++ b/api/proxy_gen.go @@ -0,0 +1,3763 @@ +// Code generated by github.com/filecoin-project/lotus/gen/api. DO NOT EDIT. + +package api + +import ( + "context" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-fil-markets/piecestore" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-jsonrpc/auth" + "github.com/filecoin-project/go-multistore" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/dline" + apitypes "github.com/filecoin-project/lotus/api/types" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/paych" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" + "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" + "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" + marketevents "github.com/filecoin-project/lotus/markets/loggers" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/specs-storage/storage" + "github.com/google/uuid" + "github.com/ipfs/go-cid" + metrics "github.com/libp2p/go-libp2p-core/metrics" + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-core/protocol" + xerrors "golang.org/x/xerrors" +) + +type ChainIOStruct struct { + Internal struct { + ChainHasObj func(p0 context.Context, p1 cid.Cid) (bool, error) `` + + ChainReadObj func(p0 context.Context, p1 cid.Cid) ([]byte, error) `` + } +} + +type ChainIOStub struct { +} + +type CommonStruct struct { + Internal struct { + AuthNew func(p0 context.Context, p1 []auth.Permission) ([]byte, error) `perm:"admin"` + + AuthVerify func(p0 context.Context, p1 string) ([]auth.Permission, error) `perm:"read"` + + Closing func(p0 context.Context) (<-chan struct{}, error) `perm:"read"` + + Discover func(p0 context.Context) (apitypes.OpenRPCDocument, error) `perm:"read"` + + LogList func(p0 context.Context) ([]string, error) `perm:"write"` + + LogSetLevel func(p0 context.Context, p1 string, p2 string) error `perm:"write"` + + Session func(p0 context.Context) (uuid.UUID, error) `perm:"read"` + + Shutdown func(p0 context.Context) error `perm:"admin"` + + Version func(p0 context.Context) (APIVersion, error) `perm:"read"` + } +} + +type CommonStub struct { +} + +type CommonNetStruct struct { + CommonStruct + + NetStruct + + Internal struct { + } +} + +type CommonNetStub struct { + CommonStub + + NetStub +} + +type FullNodeStruct struct { + CommonStruct + + NetStruct + + Internal struct { + BeaconGetEntry func(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"` + + ChainDeleteObj func(p0 context.Context, p1 cid.Cid) error `perm:"admin"` + + ChainExport func(p0 context.Context, p1 abi.ChainEpoch, p2 bool, p3 types.TipSetKey) (<-chan []byte, error) `perm:"read"` + + ChainGetBlock func(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) `perm:"read"` + + ChainGetBlockMessages func(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) `perm:"read"` + + ChainGetGenesis func(p0 context.Context) (*types.TipSet, error) `perm:"read"` + + ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) `perm:"read"` + + ChainGetMessagesInTipset func(p0 context.Context, p1 types.TipSetKey) ([]Message, error) `perm:"read"` + + ChainGetNode func(p0 context.Context, p1 string) (*IpldObject, error) `perm:"read"` + + ChainGetParentMessages func(p0 context.Context, p1 cid.Cid) ([]Message, error) `perm:"read"` + + ChainGetParentReceipts func(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) `perm:"read"` + + ChainGetPath func(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*HeadChange, error) `perm:"read"` + + ChainGetRandomnessFromBeacon func(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) `perm:"read"` + + ChainGetRandomnessFromTickets func(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) `perm:"read"` + + ChainGetTipSet func(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) `perm:"read"` + + ChainGetTipSetByHeight func(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) `perm:"read"` + + ChainHasObj func(p0 context.Context, p1 cid.Cid) (bool, error) `perm:"read"` + + ChainHead func(p0 context.Context) (*types.TipSet, error) `perm:"read"` + + ChainNotify func(p0 context.Context) (<-chan []*HeadChange, error) `perm:"read"` + + ChainReadObj func(p0 context.Context, p1 cid.Cid) ([]byte, error) `perm:"read"` + + ChainSetHead func(p0 context.Context, p1 types.TipSetKey) error `perm:"admin"` + + ChainStatObj func(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (ObjStat, error) `perm:"read"` + + ChainTipSetWeight func(p0 context.Context, p1 types.TipSetKey) (types.BigInt, error) `perm:"read"` + + ClientCalcCommP func(p0 context.Context, p1 string) (*CommPRet, error) `perm:"write"` + + ClientCancelDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"` + + ClientCancelRetrievalDeal func(p0 context.Context, p1 retrievalmarket.DealID) error `perm:"write"` + + ClientDataTransferUpdates func(p0 context.Context) (<-chan DataTransferChannel, error) `perm:"write"` + + ClientDealPieceCID func(p0 context.Context, p1 cid.Cid) (DataCIDSize, error) `perm:"read"` + + ClientDealSize func(p0 context.Context, p1 cid.Cid) (DataSize, error) `perm:"read"` + + ClientFindData func(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]QueryOffer, error) `perm:"read"` + + ClientGenCar func(p0 context.Context, p1 FileRef, p2 string) error `perm:"write"` + + ClientGetDealInfo func(p0 context.Context, p1 cid.Cid) (*DealInfo, error) `perm:"read"` + + ClientGetDealStatus func(p0 context.Context, p1 uint64) (string, error) `perm:"read"` + + ClientGetDealUpdates func(p0 context.Context) (<-chan DealInfo, error) `perm:"write"` + + ClientGetRetrievalUpdates func(p0 context.Context) (<-chan RetrievalInfo, error) `perm:"write"` + + ClientHasLocal func(p0 context.Context, p1 cid.Cid) (bool, error) `perm:"write"` + + ClientImport func(p0 context.Context, p1 FileRef) (*ImportRes, error) `perm:"admin"` + + ClientListDataTransfers func(p0 context.Context) ([]DataTransferChannel, error) `perm:"write"` + + ClientListDeals func(p0 context.Context) ([]DealInfo, error) `perm:"write"` + + ClientListImports func(p0 context.Context) ([]Import, error) `perm:"write"` + + ClientListRetrievals func(p0 context.Context) ([]RetrievalInfo, error) `perm:"write"` + + ClientMinerQueryOffer func(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (QueryOffer, error) `perm:"read"` + + ClientQueryAsk func(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) `perm:"read"` + + ClientRemoveImport func(p0 context.Context, p1 multistore.StoreID) error `perm:"admin"` + + ClientRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"` + + ClientRetrieve func(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) error `perm:"admin"` + + ClientRetrieveTryRestartInsufficientFunds func(p0 context.Context, p1 address.Address) error `perm:"write"` + + ClientRetrieveWithEvents func(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"` + + ClientStartDeal func(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) `perm:"admin"` + + ClientStatelessDeal func(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) `perm:"write"` + + CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"` + + GasEstimateFeeCap func(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"` + + GasEstimateGasLimit func(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (int64, error) `perm:"read"` + + GasEstimateGasPremium func(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) `perm:"read"` + + GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) `perm:"read"` + + MarketAddBalance func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) `perm:"sign"` + + MarketGetReserved func(p0 context.Context, p1 address.Address) (types.BigInt, error) `perm:"sign"` + + MarketReleaseFunds func(p0 context.Context, p1 address.Address, p2 types.BigInt) error `perm:"sign"` + + MarketReserveFunds func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) `perm:"sign"` + + MarketWithdraw func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) `perm:"sign"` + + MinerCreateBlock func(p0 context.Context, p1 *BlockTemplate) (*types.BlockMsg, error) `perm:"write"` + + MinerGetBaseInfo func(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*MiningBaseInfo, error) `perm:"read"` + + MpoolBatchPush func(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) `perm:"write"` + + MpoolBatchPushMessage func(p0 context.Context, p1 []*types.Message, p2 *MessageSendSpec) ([]*types.SignedMessage, error) `perm:"sign"` + + MpoolBatchPushUntrusted func(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) `perm:"write"` + + MpoolCheckMessages func(p0 context.Context, p1 []*MessagePrototype) ([][]MessageCheckStatus, error) `perm:"read"` + + MpoolCheckPendingMessages func(p0 context.Context, p1 address.Address) ([][]MessageCheckStatus, error) `perm:"read"` + + MpoolCheckReplaceMessages func(p0 context.Context, p1 []*types.Message) ([][]MessageCheckStatus, error) `perm:"read"` + + MpoolClear func(p0 context.Context, p1 bool) error `perm:"write"` + + MpoolGetConfig func(p0 context.Context) (*types.MpoolConfig, error) `perm:"read"` + + MpoolGetNonce func(p0 context.Context, p1 address.Address) (uint64, error) `perm:"read"` + + MpoolPending func(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) `perm:"read"` + + MpoolPush func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) `perm:"write"` + + MpoolPushMessage func(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec) (*types.SignedMessage, error) `perm:"sign"` + + MpoolPushUntrusted func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) `perm:"write"` + + MpoolSelect func(p0 context.Context, p1 types.TipSetKey, p2 float64) ([]*types.SignedMessage, error) `perm:"read"` + + MpoolSetConfig func(p0 context.Context, p1 *types.MpoolConfig) error `perm:"admin"` + + MpoolSub func(p0 context.Context) (<-chan MpoolUpdate, error) `perm:"read"` + + MsigAddApprove func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (*MessagePrototype, error) `perm:"sign"` + + MsigAddCancel func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (*MessagePrototype, error) `perm:"sign"` + + MsigAddPropose func(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (*MessagePrototype, error) `perm:"sign"` + + MsigApprove func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (*MessagePrototype, error) `perm:"sign"` + + MsigApproveTxnHash func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (*MessagePrototype, error) `perm:"sign"` + + MsigCancel func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) `perm:"sign"` + + MsigCreate func(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (*MessagePrototype, error) `perm:"sign"` + + MsigGetAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) `perm:"read"` + + MsigGetPending func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*MsigTransaction, error) `perm:"read"` + + MsigGetVested func(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"` + + MsigGetVestingSchedule func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MsigVesting, error) `perm:"read"` + + MsigPropose func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (*MessagePrototype, error) `perm:"sign"` + + MsigRemoveSigner func(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (*MessagePrototype, error) `perm:"sign"` + + MsigSwapApprove func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (*MessagePrototype, error) `perm:"sign"` + + MsigSwapCancel func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (*MessagePrototype, error) `perm:"sign"` + + MsigSwapPropose func(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (*MessagePrototype, error) `perm:"sign"` + + NodeStatus func(p0 context.Context, p1 bool) (NodeStatus, error) `perm:"read"` + + PaychAllocateLane func(p0 context.Context, p1 address.Address) (uint64, error) `perm:"sign"` + + PaychAvailableFunds func(p0 context.Context, p1 address.Address) (*ChannelAvailableFunds, error) `perm:"sign"` + + PaychAvailableFundsByFromTo func(p0 context.Context, p1 address.Address, p2 address.Address) (*ChannelAvailableFunds, error) `perm:"sign"` + + PaychCollect func(p0 context.Context, p1 address.Address) (cid.Cid, error) `perm:"sign"` + + PaychGet func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (*ChannelInfo, error) `perm:"sign"` + + PaychGetWaitReady func(p0 context.Context, p1 cid.Cid) (address.Address, error) `perm:"sign"` + + PaychList func(p0 context.Context) ([]address.Address, error) `perm:"read"` + + PaychNewPayment func(p0 context.Context, p1 address.Address, p2 address.Address, p3 []VoucherSpec) (*PaymentInfo, error) `perm:"sign"` + + PaychSettle func(p0 context.Context, p1 address.Address) (cid.Cid, error) `perm:"sign"` + + PaychStatus func(p0 context.Context, p1 address.Address) (*PaychStatus, error) `perm:"read"` + + PaychVoucherAdd func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 types.BigInt) (types.BigInt, error) `perm:"write"` + + PaychVoucherCheckSpendable func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (bool, error) `perm:"read"` + + PaychVoucherCheckValid func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher) error `perm:"read"` + + PaychVoucherCreate func(p0 context.Context, p1 address.Address, p2 types.BigInt, p3 uint64) (*VoucherCreateResult, error) `perm:"sign"` + + PaychVoucherList func(p0 context.Context, p1 address.Address) ([]*paych.SignedVoucher, error) `perm:"write"` + + PaychVoucherSubmit func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (cid.Cid, error) `perm:"sign"` + + StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `perm:"read"` + + StateAllMinerFaults func(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) ([]*Fault, error) `perm:"read"` + + StateCall func(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*InvocResult, error) `perm:"read"` + + StateChangedActors func(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (map[string]types.Actor, error) `perm:"read"` + + StateCirculatingSupply func(p0 context.Context, p1 types.TipSetKey) (abi.TokenAmount, error) `perm:"read"` + + StateCompute func(p0 context.Context, p1 abi.ChainEpoch, p2 []*types.Message, p3 types.TipSetKey) (*ComputeStateOutput, error) `perm:"read"` + + StateDealProviderCollateralBounds func(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (DealCollateralBounds, error) `perm:"read"` + + StateDecodeParams func(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) `perm:"read"` + + StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) `perm:"read"` + + StateListActors func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) `perm:"read"` + + StateListMessages func(p0 context.Context, p1 *MessageMatch, p2 types.TipSetKey, p3 abi.ChainEpoch) ([]cid.Cid, error) `perm:"read"` + + StateListMiners func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) `perm:"read"` + + StateLookupID func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `perm:"read"` + + StateMarketBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MarketBalance, error) `perm:"read"` + + StateMarketDeals func(p0 context.Context, p1 types.TipSetKey) (map[string]MarketDeal, error) `perm:"read"` + + StateMarketParticipants func(p0 context.Context, p1 types.TipSetKey) (map[string]MarketBalance, error) `perm:"read"` + + StateMarketStorageDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*MarketDeal, error) `perm:"read"` + + StateMinerActiveSectors func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) `perm:"read"` + + StateMinerAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) `perm:"read"` + + StateMinerDeadlines func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]Deadline, error) `perm:"read"` + + StateMinerFaults func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) `perm:"read"` + + StateMinerInfo func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) `perm:"read"` + + StateMinerInitialPledgeCollateral func(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"` + + StateMinerPartitions func(p0 context.Context, p1 address.Address, p2 uint64, p3 types.TipSetKey) ([]Partition, error) `perm:"read"` + + StateMinerPower func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*MinerPower, error) `perm:"read"` + + StateMinerPreCommitDepositForPower func(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"` + + StateMinerProvingDeadline func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) `perm:"read"` + + StateMinerRecoveries func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) `perm:"read"` + + StateMinerSectorAllocated func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (bool, error) `perm:"read"` + + StateMinerSectorCount func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MinerSectors, error) `perm:"read"` + + StateMinerSectors func(p0 context.Context, p1 address.Address, p2 *bitfield.BitField, p3 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) `perm:"read"` + + StateNetworkName func(p0 context.Context) (dtypes.NetworkName, error) `perm:"read"` + + StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) `perm:"read"` + + StateReadState func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*ActorState, error) `perm:"read"` + + StateReplay func(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*InvocResult, error) `perm:"read"` + + StateSearchMsg func(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) `perm:"read"` + + StateSectorExpiration func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorExpiration, error) `perm:"read"` + + StateSectorGetInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) `perm:"read"` + + StateSectorPartition func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorLocation, error) `perm:"read"` + + StateSectorPreCommitInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) `perm:"read"` + + StateVMCirculatingSupplyInternal func(p0 context.Context, p1 types.TipSetKey) (CirculatingSupply, error) `perm:"read"` + + StateVerifiedClientStatus func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) `perm:"read"` + + StateVerifiedRegistryRootKey func(p0 context.Context, p1 types.TipSetKey) (address.Address, error) `perm:"read"` + + StateVerifierStatus func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) `perm:"read"` + + StateWaitMsg func(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) `perm:"read"` + + SyncCheckBad func(p0 context.Context, p1 cid.Cid) (string, error) `perm:"read"` + + SyncCheckpoint func(p0 context.Context, p1 types.TipSetKey) error `perm:"admin"` + + SyncIncomingBlocks func(p0 context.Context) (<-chan *types.BlockHeader, error) `perm:"read"` + + SyncMarkBad func(p0 context.Context, p1 cid.Cid) error `perm:"admin"` + + SyncState func(p0 context.Context) (*SyncState, error) `perm:"read"` + + SyncSubmitBlock func(p0 context.Context, p1 *types.BlockMsg) error `perm:"write"` + + SyncUnmarkAllBad func(p0 context.Context) error `perm:"admin"` + + SyncUnmarkBad func(p0 context.Context, p1 cid.Cid) error `perm:"admin"` + + SyncValidateTipset func(p0 context.Context, p1 types.TipSetKey) (bool, error) `perm:"read"` + + WalletBalance func(p0 context.Context, p1 address.Address) (types.BigInt, error) `perm:"read"` + + WalletDefaultAddress func(p0 context.Context) (address.Address, error) `perm:"write"` + + WalletDelete func(p0 context.Context, p1 address.Address) error `perm:"admin"` + + WalletExport func(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) `perm:"admin"` + + WalletHas func(p0 context.Context, p1 address.Address) (bool, error) `perm:"write"` + + WalletImport func(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) `perm:"admin"` + + WalletList func(p0 context.Context) ([]address.Address, error) `perm:"write"` + + WalletNew func(p0 context.Context, p1 types.KeyType) (address.Address, error) `perm:"write"` + + WalletSetDefault func(p0 context.Context, p1 address.Address) error `perm:"write"` + + WalletSign func(p0 context.Context, p1 address.Address, p2 []byte) (*crypto.Signature, error) `perm:"sign"` + + WalletSignMessage func(p0 context.Context, p1 address.Address, p2 *types.Message) (*types.SignedMessage, error) `perm:"sign"` + + WalletValidateAddress func(p0 context.Context, p1 string) (address.Address, error) `perm:"read"` + + WalletVerify func(p0 context.Context, p1 address.Address, p2 []byte, p3 *crypto.Signature) (bool, error) `perm:"read"` + } +} + +type FullNodeStub struct { + CommonStub + + NetStub +} + +type GatewayStruct struct { + Internal struct { + ChainGetBlockMessages func(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) `` + + ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) `` + + ChainGetTipSet func(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) `` + + ChainGetTipSetByHeight func(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) `` + + ChainHasObj func(p0 context.Context, p1 cid.Cid) (bool, error) `` + + ChainHead func(p0 context.Context) (*types.TipSet, error) `` + + ChainNotify func(p0 context.Context) (<-chan []*HeadChange, error) `` + + ChainReadObj func(p0 context.Context, p1 cid.Cid) ([]byte, error) `` + + GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) `` + + MpoolPush func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) `` + + MsigGetAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) `` + + MsigGetPending func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*MsigTransaction, error) `` + + MsigGetVested func(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) `` + + StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `` + + StateDealProviderCollateralBounds func(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (DealCollateralBounds, error) `` + + StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) `` + + StateListMiners func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) `` + + StateLookupID func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `` + + StateMarketBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MarketBalance, error) `` + + StateMarketStorageDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*MarketDeal, error) `` + + StateMinerInfo func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) `` + + StateMinerPower func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*MinerPower, error) `` + + StateMinerProvingDeadline func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) `` + + StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) `` + + StateReadState func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*ActorState, error) `perm:"read"` + + StateSearchMsg func(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) `` + + StateSectorGetInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) `` + + StateVerifiedClientStatus func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) `` + + StateWaitMsg func(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) `` + + Version func(p0 context.Context) (APIVersion, error) `` + + WalletBalance func(p0 context.Context, p1 address.Address) (types.BigInt, error) `` + } +} + +type GatewayStub struct { +} + +type NetStruct struct { + Internal struct { + ID func(p0 context.Context) (peer.ID, error) `perm:"read"` + + NetAddrsListen func(p0 context.Context) (peer.AddrInfo, error) `perm:"read"` + + NetAgentVersion func(p0 context.Context, p1 peer.ID) (string, error) `perm:"read"` + + NetAutoNatStatus func(p0 context.Context) (NatInfo, error) `perm:"read"` + + NetBandwidthStats func(p0 context.Context) (metrics.Stats, error) `perm:"read"` + + NetBandwidthStatsByPeer func(p0 context.Context) (map[string]metrics.Stats, error) `perm:"read"` + + NetBandwidthStatsByProtocol func(p0 context.Context) (map[protocol.ID]metrics.Stats, error) `perm:"read"` + + NetBlockAdd func(p0 context.Context, p1 NetBlockList) error `perm:"admin"` + + NetBlockList func(p0 context.Context) (NetBlockList, error) `perm:"read"` + + NetBlockRemove func(p0 context.Context, p1 NetBlockList) error `perm:"admin"` + + NetConnect func(p0 context.Context, p1 peer.AddrInfo) error `perm:"write"` + + NetConnectedness func(p0 context.Context, p1 peer.ID) (network.Connectedness, error) `perm:"read"` + + NetDisconnect func(p0 context.Context, p1 peer.ID) error `perm:"write"` + + NetFindPeer func(p0 context.Context, p1 peer.ID) (peer.AddrInfo, error) `perm:"read"` + + NetPeerInfo func(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) `perm:"read"` + + NetPeers func(p0 context.Context) ([]peer.AddrInfo, error) `perm:"read"` + + NetPubsubScores func(p0 context.Context) ([]PubsubScore, error) `perm:"read"` + } +} + +type NetStub struct { +} + +type SignableStruct struct { + Internal struct { + Sign func(p0 context.Context, p1 SignFunc) error `` + } +} + +type SignableStub struct { +} + +type StorageMinerStruct struct { + CommonStruct + + NetStruct + + Internal struct { + ActorAddress func(p0 context.Context) (address.Address, error) `perm:"read"` + + ActorAddressConfig func(p0 context.Context) (AddressConfig, error) `perm:"read"` + + ActorSectorSize func(p0 context.Context, p1 address.Address) (abi.SectorSize, error) `perm:"read"` + + CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) `perm:"admin"` + + ComputeProof func(p0 context.Context, p1 []builtin.SectorInfo, p2 abi.PoStRandomness) ([]builtin.PoStProof, error) `perm:"read"` + + CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"` + + DealsConsiderOfflineRetrievalDeals func(p0 context.Context) (bool, error) `perm:"admin"` + + DealsConsiderOfflineStorageDeals func(p0 context.Context) (bool, error) `perm:"admin"` + + DealsConsiderOnlineRetrievalDeals func(p0 context.Context) (bool, error) `perm:"admin"` + + DealsConsiderOnlineStorageDeals func(p0 context.Context) (bool, error) `perm:"admin"` + + DealsConsiderUnverifiedStorageDeals func(p0 context.Context) (bool, error) `perm:"admin"` + + DealsConsiderVerifiedStorageDeals func(p0 context.Context) (bool, error) `perm:"admin"` + + DealsImportData func(p0 context.Context, p1 cid.Cid, p2 string) error `perm:"admin"` + + DealsList func(p0 context.Context) ([]MarketDeal, error) `perm:"admin"` + + DealsPieceCidBlocklist func(p0 context.Context) ([]cid.Cid, error) `perm:"admin"` + + DealsSetConsiderOfflineRetrievalDeals func(p0 context.Context, p1 bool) error `perm:"admin"` + + DealsSetConsiderOfflineStorageDeals func(p0 context.Context, p1 bool) error `perm:"admin"` + + DealsSetConsiderOnlineRetrievalDeals func(p0 context.Context, p1 bool) error `perm:"admin"` + + DealsSetConsiderOnlineStorageDeals func(p0 context.Context, p1 bool) error `perm:"admin"` + + DealsSetConsiderUnverifiedStorageDeals func(p0 context.Context, p1 bool) error `perm:"admin"` + + DealsSetConsiderVerifiedStorageDeals func(p0 context.Context, p1 bool) error `perm:"admin"` + + DealsSetPieceCidBlocklist func(p0 context.Context, p1 []cid.Cid) error `perm:"admin"` + + MarketCancelDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"` + + MarketDataTransferUpdates func(p0 context.Context) (<-chan DataTransferChannel, error) `perm:"write"` + + MarketGetAsk func(p0 context.Context) (*storagemarket.SignedStorageAsk, error) `perm:"read"` + + MarketGetDealUpdates func(p0 context.Context) (<-chan storagemarket.MinerDeal, error) `perm:"read"` + + MarketGetRetrievalAsk func(p0 context.Context) (*retrievalmarket.Ask, error) `perm:"read"` + + MarketImportDealData func(p0 context.Context, p1 cid.Cid, p2 string) error `perm:"write"` + + MarketListDataTransfers func(p0 context.Context) ([]DataTransferChannel, error) `perm:"write"` + + MarketListDeals func(p0 context.Context) ([]MarketDeal, error) `perm:"read"` + + MarketListIncompleteDeals func(p0 context.Context) ([]storagemarket.MinerDeal, error) `perm:"read"` + + MarketListRetrievalDeals func(p0 context.Context) ([]retrievalmarket.ProviderDealState, error) `perm:"read"` + + MarketPendingDeals func(p0 context.Context) (PendingDealInfo, error) `perm:"write"` + + MarketPublishPendingDeals func(p0 context.Context) error `perm:"admin"` + + MarketRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"` + + MarketSetAsk func(p0 context.Context, p1 types.BigInt, p2 types.BigInt, p3 abi.ChainEpoch, p4 abi.PaddedPieceSize, p5 abi.PaddedPieceSize) error `perm:"admin"` + + MarketSetRetrievalAsk func(p0 context.Context, p1 *retrievalmarket.Ask) error `perm:"admin"` + + MiningBase func(p0 context.Context) (*types.TipSet, error) `perm:"read"` + + PiecesGetCIDInfo func(p0 context.Context, p1 cid.Cid) (*piecestore.CIDInfo, error) `perm:"read"` + + PiecesGetPieceInfo func(p0 context.Context, p1 cid.Cid) (*piecestore.PieceInfo, error) `perm:"read"` + + PiecesListCidInfos func(p0 context.Context) ([]cid.Cid, error) `perm:"read"` + + PiecesListPieces func(p0 context.Context) ([]cid.Cid, error) `perm:"read"` + + PledgeSector func(p0 context.Context) (abi.SectorID, error) `perm:"write"` + + ReturnAddPiece func(p0 context.Context, p1 storiface.CallID, p2 abi.PieceInfo, p3 *storiface.CallError) error `perm:"admin"` + + ReturnFetch func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"` + + ReturnFinalizeSector func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"` + + ReturnMoveStorage func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"` + + ReturnReadPiece func(p0 context.Context, p1 storiface.CallID, p2 bool, p3 *storiface.CallError) error `perm:"admin"` + + ReturnReleaseUnsealed func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"` + + ReturnSealCommit1 func(p0 context.Context, p1 storiface.CallID, p2 storage.Commit1Out, p3 *storiface.CallError) error `perm:"admin"` + + ReturnSealCommit2 func(p0 context.Context, p1 storiface.CallID, p2 storage.Proof, p3 *storiface.CallError) error `perm:"admin"` + + ReturnSealPreCommit1 func(p0 context.Context, p1 storiface.CallID, p2 storage.PreCommit1Out, p3 *storiface.CallError) error `perm:"admin"` + + ReturnSealPreCommit2 func(p0 context.Context, p1 storiface.CallID, p2 storage.SectorCids, p3 *storiface.CallError) error `perm:"admin"` + + ReturnUnsealPiece func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"` + + SealingAbort func(p0 context.Context, p1 storiface.CallID) error `perm:"admin"` + + SealingSchedDiag func(p0 context.Context, p1 bool) (interface{}, error) `perm:"admin"` + + SectorAddPieceToAny func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) `perm:"admin"` + + SectorCommitFlush func(p0 context.Context) ([]sealiface.CommitBatchRes, error) `perm:"admin"` + + SectorCommitPending func(p0 context.Context) ([]abi.SectorID, error) `perm:"admin"` + + SectorGetExpectedSealDuration func(p0 context.Context) (time.Duration, error) `perm:"read"` + + SectorGetSealDelay func(p0 context.Context) (time.Duration, error) `perm:"read"` + + SectorMarkForUpgrade func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"` + + SectorPreCommitFlush func(p0 context.Context) ([]sealiface.PreCommitBatchRes, error) `perm:"admin"` + + SectorPreCommitPending func(p0 context.Context) ([]abi.SectorID, error) `perm:"admin"` + + SectorRemove func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"` + + SectorSetExpectedSealDuration func(p0 context.Context, p1 time.Duration) error `perm:"write"` + + SectorSetSealDelay func(p0 context.Context, p1 time.Duration) error `perm:"write"` + + SectorStartSealing func(p0 context.Context, p1 abi.SectorNumber) error `perm:"write"` + + SectorTerminate func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"` + + SectorTerminateFlush func(p0 context.Context) (*cid.Cid, error) `perm:"admin"` + + SectorTerminatePending func(p0 context.Context) ([]abi.SectorID, error) `perm:"admin"` + + SectorsList func(p0 context.Context) ([]abi.SectorNumber, error) `perm:"read"` + + SectorsListInStates func(p0 context.Context, p1 []SectorState) ([]abi.SectorNumber, error) `perm:"read"` + + SectorsRefs func(p0 context.Context) (map[string][]SealedRef, error) `perm:"read"` + + SectorsStatus func(p0 context.Context, p1 abi.SectorNumber, p2 bool) (SectorInfo, error) `perm:"read"` + + SectorsSummary func(p0 context.Context) (map[SectorState]int, error) `perm:"read"` + + SectorsUnsealPiece func(p0 context.Context, p1 storage.SectorRef, p2 storiface.UnpaddedByteIndex, p3 abi.UnpaddedPieceSize, p4 abi.SealRandomness, p5 *cid.Cid) error `perm:"admin"` + + SectorsUpdate func(p0 context.Context, p1 abi.SectorNumber, p2 SectorState) error `perm:"admin"` + + StorageAddLocal func(p0 context.Context, p1 string) error `perm:"admin"` + + StorageAttach func(p0 context.Context, p1 stores.StorageInfo, p2 fsutil.FsStat) error `perm:"admin"` + + StorageBestAlloc func(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType) ([]stores.StorageInfo, error) `perm:"admin"` + + StorageDeclareSector func(p0 context.Context, p1 stores.ID, p2 abi.SectorID, p3 storiface.SectorFileType, p4 bool) error `perm:"admin"` + + StorageDropSector func(p0 context.Context, p1 stores.ID, p2 abi.SectorID, p3 storiface.SectorFileType) error `perm:"admin"` + + StorageFindSector func(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]stores.SectorStorageInfo, error) `perm:"admin"` + + StorageInfo func(p0 context.Context, p1 stores.ID) (stores.StorageInfo, error) `perm:"admin"` + + StorageList func(p0 context.Context) (map[stores.ID][]stores.Decl, error) `perm:"admin"` + + StorageLocal func(p0 context.Context) (map[stores.ID]string, error) `perm:"admin"` + + StorageLock func(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 storiface.SectorFileType) error `perm:"admin"` + + StorageReportHealth func(p0 context.Context, p1 stores.ID, p2 stores.HealthReport) error `perm:"admin"` + + StorageStat func(p0 context.Context, p1 stores.ID) (fsutil.FsStat, error) `perm:"admin"` + + StorageTryLock func(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 storiface.SectorFileType) (bool, error) `perm:"admin"` + + WorkerConnect func(p0 context.Context, p1 string) error `perm:"admin"` + + WorkerJobs func(p0 context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) `perm:"admin"` + + WorkerStats func(p0 context.Context) (map[uuid.UUID]storiface.WorkerStats, error) `perm:"admin"` + } +} + +type StorageMinerStub struct { + CommonStub + + NetStub +} + +type WalletStruct struct { + Internal struct { + WalletDelete func(p0 context.Context, p1 address.Address) error `perm:"admin"` + + WalletExport func(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) `perm:"admin"` + + WalletHas func(p0 context.Context, p1 address.Address) (bool, error) `perm:"admin"` + + WalletImport func(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) `perm:"admin"` + + WalletList func(p0 context.Context) ([]address.Address, error) `perm:"admin"` + + WalletNew func(p0 context.Context, p1 types.KeyType) (address.Address, error) `perm:"admin"` + + WalletSign func(p0 context.Context, p1 address.Address, p2 []byte, p3 MsgMeta) (*crypto.Signature, error) `perm:"admin"` + } +} + +type WalletStub struct { +} + +type WorkerStruct struct { + Internal struct { + AddPiece func(p0 context.Context, p1 storage.SectorRef, p2 []abi.UnpaddedPieceSize, p3 abi.UnpaddedPieceSize, p4 storage.Data) (storiface.CallID, error) `perm:"admin"` + + Enabled func(p0 context.Context) (bool, error) `perm:"admin"` + + Fetch func(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType, p3 storiface.PathType, p4 storiface.AcquireMode) (storiface.CallID, error) `perm:"admin"` + + FinalizeSector func(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) `perm:"admin"` + + Info func(p0 context.Context) (storiface.WorkerInfo, error) `perm:"admin"` + + MoveStorage func(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType) (storiface.CallID, error) `perm:"admin"` + + Paths func(p0 context.Context) ([]stores.StoragePath, error) `perm:"admin"` + + ProcessSession func(p0 context.Context) (uuid.UUID, error) `perm:"admin"` + + ReleaseUnsealed func(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) `perm:"admin"` + + Remove func(p0 context.Context, p1 abi.SectorID) error `perm:"admin"` + + SealCommit1 func(p0 context.Context, p1 storage.SectorRef, p2 abi.SealRandomness, p3 abi.InteractiveSealRandomness, p4 []abi.PieceInfo, p5 storage.SectorCids) (storiface.CallID, error) `perm:"admin"` + + SealCommit2 func(p0 context.Context, p1 storage.SectorRef, p2 storage.Commit1Out) (storiface.CallID, error) `perm:"admin"` + + SealPreCommit1 func(p0 context.Context, p1 storage.SectorRef, p2 abi.SealRandomness, p3 []abi.PieceInfo) (storiface.CallID, error) `perm:"admin"` + + SealPreCommit2 func(p0 context.Context, p1 storage.SectorRef, p2 storage.PreCommit1Out) (storiface.CallID, error) `perm:"admin"` + + Session func(p0 context.Context) (uuid.UUID, error) `perm:"admin"` + + SetEnabled func(p0 context.Context, p1 bool) error `perm:"admin"` + + StorageAddLocal func(p0 context.Context, p1 string) error `perm:"admin"` + + TaskDisable func(p0 context.Context, p1 sealtasks.TaskType) error `perm:"admin"` + + TaskEnable func(p0 context.Context, p1 sealtasks.TaskType) error `perm:"admin"` + + TaskTypes func(p0 context.Context) (map[sealtasks.TaskType]struct{}, error) `perm:"admin"` + + UnsealPiece func(p0 context.Context, p1 storage.SectorRef, p2 storiface.UnpaddedByteIndex, p3 abi.UnpaddedPieceSize, p4 abi.SealRandomness, p5 cid.Cid) (storiface.CallID, error) `perm:"admin"` + + Version func(p0 context.Context) (Version, error) `perm:"admin"` + + WaitQuiet func(p0 context.Context) error `perm:"admin"` + } +} + +type WorkerStub struct { +} + +func (s *ChainIOStruct) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) { + return s.Internal.ChainHasObj(p0, p1) +} + +func (s *ChainIOStub) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) { + return false, xerrors.New("method not supported") +} + +func (s *ChainIOStruct) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) { + return s.Internal.ChainReadObj(p0, p1) +} + +func (s *ChainIOStub) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) { + return *new([]byte), xerrors.New("method not supported") +} + +func (s *CommonStruct) AuthNew(p0 context.Context, p1 []auth.Permission) ([]byte, error) { + return s.Internal.AuthNew(p0, p1) +} + +func (s *CommonStub) AuthNew(p0 context.Context, p1 []auth.Permission) ([]byte, error) { + return *new([]byte), xerrors.New("method not supported") +} + +func (s *CommonStruct) AuthVerify(p0 context.Context, p1 string) ([]auth.Permission, error) { + return s.Internal.AuthVerify(p0, p1) +} + +func (s *CommonStub) AuthVerify(p0 context.Context, p1 string) ([]auth.Permission, error) { + return *new([]auth.Permission), xerrors.New("method not supported") +} + +func (s *CommonStruct) Closing(p0 context.Context) (<-chan struct{}, error) { + return s.Internal.Closing(p0) +} + +func (s *CommonStub) Closing(p0 context.Context) (<-chan struct{}, error) { + return nil, xerrors.New("method not supported") +} + +func (s *CommonStruct) Discover(p0 context.Context) (apitypes.OpenRPCDocument, error) { + return s.Internal.Discover(p0) +} + +func (s *CommonStub) Discover(p0 context.Context) (apitypes.OpenRPCDocument, error) { + return *new(apitypes.OpenRPCDocument), xerrors.New("method not supported") +} + +func (s *CommonStruct) LogList(p0 context.Context) ([]string, error) { + return s.Internal.LogList(p0) +} + +func (s *CommonStub) LogList(p0 context.Context) ([]string, error) { + return *new([]string), xerrors.New("method not supported") +} + +func (s *CommonStruct) LogSetLevel(p0 context.Context, p1 string, p2 string) error { + return s.Internal.LogSetLevel(p0, p1, p2) +} + +func (s *CommonStub) LogSetLevel(p0 context.Context, p1 string, p2 string) error { + return xerrors.New("method not supported") +} + +func (s *CommonStruct) Session(p0 context.Context) (uuid.UUID, error) { + return s.Internal.Session(p0) +} + +func (s *CommonStub) Session(p0 context.Context) (uuid.UUID, error) { + return *new(uuid.UUID), xerrors.New("method not supported") +} + +func (s *CommonStruct) Shutdown(p0 context.Context) error { + return s.Internal.Shutdown(p0) +} + +func (s *CommonStub) Shutdown(p0 context.Context) error { + return xerrors.New("method not supported") +} + +func (s *CommonStruct) Version(p0 context.Context) (APIVersion, error) { + return s.Internal.Version(p0) +} + +func (s *CommonStub) Version(p0 context.Context) (APIVersion, error) { + return *new(APIVersion), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) BeaconGetEntry(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) { + return s.Internal.BeaconGetEntry(p0, p1) +} + +func (s *FullNodeStub) BeaconGetEntry(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainDeleteObj(p0 context.Context, p1 cid.Cid) error { + return s.Internal.ChainDeleteObj(p0, p1) +} + +func (s *FullNodeStub) ChainDeleteObj(p0 context.Context, p1 cid.Cid) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainExport(p0 context.Context, p1 abi.ChainEpoch, p2 bool, p3 types.TipSetKey) (<-chan []byte, error) { + return s.Internal.ChainExport(p0, p1, p2, p3) +} + +func (s *FullNodeStub) ChainExport(p0 context.Context, p1 abi.ChainEpoch, p2 bool, p3 types.TipSetKey) (<-chan []byte, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainGetBlock(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) { + return s.Internal.ChainGetBlock(p0, p1) +} + +func (s *FullNodeStub) ChainGetBlock(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) { + return s.Internal.ChainGetBlockMessages(p0, p1) +} + +func (s *FullNodeStub) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainGetGenesis(p0 context.Context) (*types.TipSet, error) { + return s.Internal.ChainGetGenesis(p0) +} + +func (s *FullNodeStub) ChainGetGenesis(p0 context.Context) (*types.TipSet, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) { + return s.Internal.ChainGetMessage(p0, p1) +} + +func (s *FullNodeStub) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainGetMessagesInTipset(p0 context.Context, p1 types.TipSetKey) ([]Message, error) { + return s.Internal.ChainGetMessagesInTipset(p0, p1) +} + +func (s *FullNodeStub) ChainGetMessagesInTipset(p0 context.Context, p1 types.TipSetKey) ([]Message, error) { + return *new([]Message), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainGetNode(p0 context.Context, p1 string) (*IpldObject, error) { + return s.Internal.ChainGetNode(p0, p1) +} + +func (s *FullNodeStub) ChainGetNode(p0 context.Context, p1 string) (*IpldObject, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainGetParentMessages(p0 context.Context, p1 cid.Cid) ([]Message, error) { + return s.Internal.ChainGetParentMessages(p0, p1) +} + +func (s *FullNodeStub) ChainGetParentMessages(p0 context.Context, p1 cid.Cid) ([]Message, error) { + return *new([]Message), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainGetParentReceipts(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) { + return s.Internal.ChainGetParentReceipts(p0, p1) +} + +func (s *FullNodeStub) ChainGetParentReceipts(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) { + return *new([]*types.MessageReceipt), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainGetPath(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*HeadChange, error) { + return s.Internal.ChainGetPath(p0, p1, p2) +} + +func (s *FullNodeStub) ChainGetPath(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*HeadChange, error) { + return *new([]*HeadChange), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainGetRandomnessFromBeacon(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) { + return s.Internal.ChainGetRandomnessFromBeacon(p0, p1, p2, p3, p4) +} + +func (s *FullNodeStub) ChainGetRandomnessFromBeacon(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) { + return *new(abi.Randomness), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainGetRandomnessFromTickets(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) { + return s.Internal.ChainGetRandomnessFromTickets(p0, p1, p2, p3, p4) +} + +func (s *FullNodeStub) ChainGetRandomnessFromTickets(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) { + return *new(abi.Randomness), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainGetTipSet(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) { + return s.Internal.ChainGetTipSet(p0, p1) +} + +func (s *FullNodeStub) ChainGetTipSet(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainGetTipSetByHeight(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) { + return s.Internal.ChainGetTipSetByHeight(p0, p1, p2) +} + +func (s *FullNodeStub) ChainGetTipSetByHeight(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) { + return s.Internal.ChainHasObj(p0, p1) +} + +func (s *FullNodeStub) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) { + return false, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainHead(p0 context.Context) (*types.TipSet, error) { + return s.Internal.ChainHead(p0) +} + +func (s *FullNodeStub) ChainHead(p0 context.Context) (*types.TipSet, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainNotify(p0 context.Context) (<-chan []*HeadChange, error) { + return s.Internal.ChainNotify(p0) +} + +func (s *FullNodeStub) ChainNotify(p0 context.Context) (<-chan []*HeadChange, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) { + return s.Internal.ChainReadObj(p0, p1) +} + +func (s *FullNodeStub) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) { + return *new([]byte), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainSetHead(p0 context.Context, p1 types.TipSetKey) error { + return s.Internal.ChainSetHead(p0, p1) +} + +func (s *FullNodeStub) ChainSetHead(p0 context.Context, p1 types.TipSetKey) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainStatObj(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (ObjStat, error) { + return s.Internal.ChainStatObj(p0, p1, p2) +} + +func (s *FullNodeStub) ChainStatObj(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (ObjStat, error) { + return *new(ObjStat), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainTipSetWeight(p0 context.Context, p1 types.TipSetKey) (types.BigInt, error) { + return s.Internal.ChainTipSetWeight(p0, p1) +} + +func (s *FullNodeStub) ChainTipSetWeight(p0 context.Context, p1 types.TipSetKey) (types.BigInt, error) { + return *new(types.BigInt), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientCalcCommP(p0 context.Context, p1 string) (*CommPRet, error) { + return s.Internal.ClientCalcCommP(p0, p1) +} + +func (s *FullNodeStub) ClientCalcCommP(p0 context.Context, p1 string) (*CommPRet, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { + return s.Internal.ClientCancelDataTransfer(p0, p1, p2, p3) +} + +func (s *FullNodeStub) ClientCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientCancelRetrievalDeal(p0 context.Context, p1 retrievalmarket.DealID) error { + return s.Internal.ClientCancelRetrievalDeal(p0, p1) +} + +func (s *FullNodeStub) ClientCancelRetrievalDeal(p0 context.Context, p1 retrievalmarket.DealID) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientDataTransferUpdates(p0 context.Context) (<-chan DataTransferChannel, error) { + return s.Internal.ClientDataTransferUpdates(p0) +} + +func (s *FullNodeStub) ClientDataTransferUpdates(p0 context.Context) (<-chan DataTransferChannel, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientDealPieceCID(p0 context.Context, p1 cid.Cid) (DataCIDSize, error) { + return s.Internal.ClientDealPieceCID(p0, p1) +} + +func (s *FullNodeStub) ClientDealPieceCID(p0 context.Context, p1 cid.Cid) (DataCIDSize, error) { + return *new(DataCIDSize), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientDealSize(p0 context.Context, p1 cid.Cid) (DataSize, error) { + return s.Internal.ClientDealSize(p0, p1) +} + +func (s *FullNodeStub) ClientDealSize(p0 context.Context, p1 cid.Cid) (DataSize, error) { + return *new(DataSize), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientFindData(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]QueryOffer, error) { + return s.Internal.ClientFindData(p0, p1, p2) +} + +func (s *FullNodeStub) ClientFindData(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]QueryOffer, error) { + return *new([]QueryOffer), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientGenCar(p0 context.Context, p1 FileRef, p2 string) error { + return s.Internal.ClientGenCar(p0, p1, p2) +} + +func (s *FullNodeStub) ClientGenCar(p0 context.Context, p1 FileRef, p2 string) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientGetDealInfo(p0 context.Context, p1 cid.Cid) (*DealInfo, error) { + return s.Internal.ClientGetDealInfo(p0, p1) +} + +func (s *FullNodeStub) ClientGetDealInfo(p0 context.Context, p1 cid.Cid) (*DealInfo, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientGetDealStatus(p0 context.Context, p1 uint64) (string, error) { + return s.Internal.ClientGetDealStatus(p0, p1) +} + +func (s *FullNodeStub) ClientGetDealStatus(p0 context.Context, p1 uint64) (string, error) { + return "", xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientGetDealUpdates(p0 context.Context) (<-chan DealInfo, error) { + return s.Internal.ClientGetDealUpdates(p0) +} + +func (s *FullNodeStub) ClientGetDealUpdates(p0 context.Context) (<-chan DealInfo, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientGetRetrievalUpdates(p0 context.Context) (<-chan RetrievalInfo, error) { + return s.Internal.ClientGetRetrievalUpdates(p0) +} + +func (s *FullNodeStub) ClientGetRetrievalUpdates(p0 context.Context) (<-chan RetrievalInfo, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientHasLocal(p0 context.Context, p1 cid.Cid) (bool, error) { + return s.Internal.ClientHasLocal(p0, p1) +} + +func (s *FullNodeStub) ClientHasLocal(p0 context.Context, p1 cid.Cid) (bool, error) { + return false, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientImport(p0 context.Context, p1 FileRef) (*ImportRes, error) { + return s.Internal.ClientImport(p0, p1) +} + +func (s *FullNodeStub) ClientImport(p0 context.Context, p1 FileRef) (*ImportRes, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientListDataTransfers(p0 context.Context) ([]DataTransferChannel, error) { + return s.Internal.ClientListDataTransfers(p0) +} + +func (s *FullNodeStub) ClientListDataTransfers(p0 context.Context) ([]DataTransferChannel, error) { + return *new([]DataTransferChannel), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientListDeals(p0 context.Context) ([]DealInfo, error) { + return s.Internal.ClientListDeals(p0) +} + +func (s *FullNodeStub) ClientListDeals(p0 context.Context) ([]DealInfo, error) { + return *new([]DealInfo), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientListImports(p0 context.Context) ([]Import, error) { + return s.Internal.ClientListImports(p0) +} + +func (s *FullNodeStub) ClientListImports(p0 context.Context) ([]Import, error) { + return *new([]Import), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientListRetrievals(p0 context.Context) ([]RetrievalInfo, error) { + return s.Internal.ClientListRetrievals(p0) +} + +func (s *FullNodeStub) ClientListRetrievals(p0 context.Context) ([]RetrievalInfo, error) { + return *new([]RetrievalInfo), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientMinerQueryOffer(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (QueryOffer, error) { + return s.Internal.ClientMinerQueryOffer(p0, p1, p2, p3) +} + +func (s *FullNodeStub) ClientMinerQueryOffer(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (QueryOffer, error) { + return *new(QueryOffer), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientQueryAsk(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) { + return s.Internal.ClientQueryAsk(p0, p1, p2) +} + +func (s *FullNodeStub) ClientQueryAsk(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientRemoveImport(p0 context.Context, p1 multistore.StoreID) error { + return s.Internal.ClientRemoveImport(p0, p1) +} + +func (s *FullNodeStub) ClientRemoveImport(p0 context.Context, p1 multistore.StoreID) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { + return s.Internal.ClientRestartDataTransfer(p0, p1, p2, p3) +} + +func (s *FullNodeStub) ClientRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) error { + return s.Internal.ClientRetrieve(p0, p1, p2) +} + +func (s *FullNodeStub) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientRetrieveTryRestartInsufficientFunds(p0 context.Context, p1 address.Address) error { + return s.Internal.ClientRetrieveTryRestartInsufficientFunds(p0, p1) +} + +func (s *FullNodeStub) ClientRetrieveTryRestartInsufficientFunds(p0 context.Context, p1 address.Address) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) (<-chan marketevents.RetrievalEvent, error) { + return s.Internal.ClientRetrieveWithEvents(p0, p1, p2) +} + +func (s *FullNodeStub) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) (<-chan marketevents.RetrievalEvent, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientStartDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) { + return s.Internal.ClientStartDeal(p0, p1) +} + +func (s *FullNodeStub) ClientStartDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientStatelessDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) { + return s.Internal.ClientStatelessDeal(p0, p1) +} + +func (s *FullNodeStub) ClientStatelessDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) CreateBackup(p0 context.Context, p1 string) error { + return s.Internal.CreateBackup(p0, p1) +} + +func (s *FullNodeStub) CreateBackup(p0 context.Context, p1 string) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) GasEstimateFeeCap(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (types.BigInt, error) { + return s.Internal.GasEstimateFeeCap(p0, p1, p2, p3) +} + +func (s *FullNodeStub) GasEstimateFeeCap(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (types.BigInt, error) { + return *new(types.BigInt), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) GasEstimateGasLimit(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (int64, error) { + return s.Internal.GasEstimateGasLimit(p0, p1, p2) +} + +func (s *FullNodeStub) GasEstimateGasLimit(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (int64, error) { + return 0, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) GasEstimateGasPremium(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) { + return s.Internal.GasEstimateGasPremium(p0, p1, p2, p3, p4) +} + +func (s *FullNodeStub) GasEstimateGasPremium(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) { + return *new(types.BigInt), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) { + return s.Internal.GasEstimateMessageGas(p0, p1, p2, p3) +} + +func (s *FullNodeStub) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MarketAddBalance(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { + return s.Internal.MarketAddBalance(p0, p1, p2, p3) +} + +func (s *FullNodeStub) MarketAddBalance(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { + return *new(cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MarketGetReserved(p0 context.Context, p1 address.Address) (types.BigInt, error) { + return s.Internal.MarketGetReserved(p0, p1) +} + +func (s *FullNodeStub) MarketGetReserved(p0 context.Context, p1 address.Address) (types.BigInt, error) { + return *new(types.BigInt), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MarketReleaseFunds(p0 context.Context, p1 address.Address, p2 types.BigInt) error { + return s.Internal.MarketReleaseFunds(p0, p1, p2) +} + +func (s *FullNodeStub) MarketReleaseFunds(p0 context.Context, p1 address.Address, p2 types.BigInt) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MarketReserveFunds(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { + return s.Internal.MarketReserveFunds(p0, p1, p2, p3) +} + +func (s *FullNodeStub) MarketReserveFunds(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { + return *new(cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MarketWithdraw(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { + return s.Internal.MarketWithdraw(p0, p1, p2, p3) +} + +func (s *FullNodeStub) MarketWithdraw(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { + return *new(cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MinerCreateBlock(p0 context.Context, p1 *BlockTemplate) (*types.BlockMsg, error) { + return s.Internal.MinerCreateBlock(p0, p1) +} + +func (s *FullNodeStub) MinerCreateBlock(p0 context.Context, p1 *BlockTemplate) (*types.BlockMsg, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MinerGetBaseInfo(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*MiningBaseInfo, error) { + return s.Internal.MinerGetBaseInfo(p0, p1, p2, p3) +} + +func (s *FullNodeStub) MinerGetBaseInfo(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*MiningBaseInfo, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MpoolBatchPush(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) { + return s.Internal.MpoolBatchPush(p0, p1) +} + +func (s *FullNodeStub) MpoolBatchPush(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) { + return *new([]cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MpoolBatchPushMessage(p0 context.Context, p1 []*types.Message, p2 *MessageSendSpec) ([]*types.SignedMessage, error) { + return s.Internal.MpoolBatchPushMessage(p0, p1, p2) +} + +func (s *FullNodeStub) MpoolBatchPushMessage(p0 context.Context, p1 []*types.Message, p2 *MessageSendSpec) ([]*types.SignedMessage, error) { + return *new([]*types.SignedMessage), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MpoolBatchPushUntrusted(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) { + return s.Internal.MpoolBatchPushUntrusted(p0, p1) +} + +func (s *FullNodeStub) MpoolBatchPushUntrusted(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) { + return *new([]cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MpoolCheckMessages(p0 context.Context, p1 []*MessagePrototype) ([][]MessageCheckStatus, error) { + return s.Internal.MpoolCheckMessages(p0, p1) +} + +func (s *FullNodeStub) MpoolCheckMessages(p0 context.Context, p1 []*MessagePrototype) ([][]MessageCheckStatus, error) { + return *new([][]MessageCheckStatus), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MpoolCheckPendingMessages(p0 context.Context, p1 address.Address) ([][]MessageCheckStatus, error) { + return s.Internal.MpoolCheckPendingMessages(p0, p1) +} + +func (s *FullNodeStub) MpoolCheckPendingMessages(p0 context.Context, p1 address.Address) ([][]MessageCheckStatus, error) { + return *new([][]MessageCheckStatus), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MpoolCheckReplaceMessages(p0 context.Context, p1 []*types.Message) ([][]MessageCheckStatus, error) { + return s.Internal.MpoolCheckReplaceMessages(p0, p1) +} + +func (s *FullNodeStub) MpoolCheckReplaceMessages(p0 context.Context, p1 []*types.Message) ([][]MessageCheckStatus, error) { + return *new([][]MessageCheckStatus), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MpoolClear(p0 context.Context, p1 bool) error { + return s.Internal.MpoolClear(p0, p1) +} + +func (s *FullNodeStub) MpoolClear(p0 context.Context, p1 bool) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MpoolGetConfig(p0 context.Context) (*types.MpoolConfig, error) { + return s.Internal.MpoolGetConfig(p0) +} + +func (s *FullNodeStub) MpoolGetConfig(p0 context.Context) (*types.MpoolConfig, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MpoolGetNonce(p0 context.Context, p1 address.Address) (uint64, error) { + return s.Internal.MpoolGetNonce(p0, p1) +} + +func (s *FullNodeStub) MpoolGetNonce(p0 context.Context, p1 address.Address) (uint64, error) { + return 0, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MpoolPending(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) { + return s.Internal.MpoolPending(p0, p1) +} + +func (s *FullNodeStub) MpoolPending(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) { + return *new([]*types.SignedMessage), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { + return s.Internal.MpoolPush(p0, p1) +} + +func (s *FullNodeStub) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { + return *new(cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MpoolPushMessage(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec) (*types.SignedMessage, error) { + return s.Internal.MpoolPushMessage(p0, p1, p2) +} + +func (s *FullNodeStub) MpoolPushMessage(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec) (*types.SignedMessage, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MpoolPushUntrusted(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { + return s.Internal.MpoolPushUntrusted(p0, p1) +} + +func (s *FullNodeStub) MpoolPushUntrusted(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { + return *new(cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MpoolSelect(p0 context.Context, p1 types.TipSetKey, p2 float64) ([]*types.SignedMessage, error) { + return s.Internal.MpoolSelect(p0, p1, p2) +} + +func (s *FullNodeStub) MpoolSelect(p0 context.Context, p1 types.TipSetKey, p2 float64) ([]*types.SignedMessage, error) { + return *new([]*types.SignedMessage), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MpoolSetConfig(p0 context.Context, p1 *types.MpoolConfig) error { + return s.Internal.MpoolSetConfig(p0, p1) +} + +func (s *FullNodeStub) MpoolSetConfig(p0 context.Context, p1 *types.MpoolConfig) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MpoolSub(p0 context.Context) (<-chan MpoolUpdate, error) { + return s.Internal.MpoolSub(p0) +} + +func (s *FullNodeStub) MpoolSub(p0 context.Context) (<-chan MpoolUpdate, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigAddApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (*MessagePrototype, error) { + return s.Internal.MsigAddApprove(p0, p1, p2, p3, p4, p5, p6) +} + +func (s *FullNodeStub) MsigAddApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (*MessagePrototype, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigAddCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (*MessagePrototype, error) { + return s.Internal.MsigAddCancel(p0, p1, p2, p3, p4, p5) +} + +func (s *FullNodeStub) MsigAddCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (*MessagePrototype, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigAddPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (*MessagePrototype, error) { + return s.Internal.MsigAddPropose(p0, p1, p2, p3, p4) +} + +func (s *FullNodeStub) MsigAddPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (*MessagePrototype, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigApprove(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (*MessagePrototype, error) { + return s.Internal.MsigApprove(p0, p1, p2, p3) +} + +func (s *FullNodeStub) MsigApprove(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (*MessagePrototype, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigApproveTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (*MessagePrototype, error) { + return s.Internal.MsigApproveTxnHash(p0, p1, p2, p3, p4, p5, p6, p7, p8) +} + +func (s *FullNodeStub) MsigApproveTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (*MessagePrototype, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) { + return s.Internal.MsigCancel(p0, p1, p2, p3, p4, p5, p6, p7) +} + +func (s *FullNodeStub) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigCreate(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (*MessagePrototype, error) { + return s.Internal.MsigCreate(p0, p1, p2, p3, p4, p5, p6) +} + +func (s *FullNodeStub) MsigCreate(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (*MessagePrototype, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigGetAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) { + return s.Internal.MsigGetAvailableBalance(p0, p1, p2) +} + +func (s *FullNodeStub) MsigGetAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) { + return *new(types.BigInt), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigGetPending(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*MsigTransaction, error) { + return s.Internal.MsigGetPending(p0, p1, p2) +} + +func (s *FullNodeStub) MsigGetPending(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*MsigTransaction, error) { + return *new([]*MsigTransaction), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigGetVested(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) { + return s.Internal.MsigGetVested(p0, p1, p2, p3) +} + +func (s *FullNodeStub) MsigGetVested(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) { + return *new(types.BigInt), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigGetVestingSchedule(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MsigVesting, error) { + return s.Internal.MsigGetVestingSchedule(p0, p1, p2) +} + +func (s *FullNodeStub) MsigGetVestingSchedule(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MsigVesting, error) { + return *new(MsigVesting), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (*MessagePrototype, error) { + return s.Internal.MsigPropose(p0, p1, p2, p3, p4, p5, p6) +} + +func (s *FullNodeStub) MsigPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (*MessagePrototype, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigRemoveSigner(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (*MessagePrototype, error) { + return s.Internal.MsigRemoveSigner(p0, p1, p2, p3, p4) +} + +func (s *FullNodeStub) MsigRemoveSigner(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (*MessagePrototype, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigSwapApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (*MessagePrototype, error) { + return s.Internal.MsigSwapApprove(p0, p1, p2, p3, p4, p5, p6) +} + +func (s *FullNodeStub) MsigSwapApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (*MessagePrototype, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigSwapCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (*MessagePrototype, error) { + return s.Internal.MsigSwapCancel(p0, p1, p2, p3, p4, p5) +} + +func (s *FullNodeStub) MsigSwapCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (*MessagePrototype, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigSwapPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (*MessagePrototype, error) { + return s.Internal.MsigSwapPropose(p0, p1, p2, p3, p4) +} + +func (s *FullNodeStub) MsigSwapPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (*MessagePrototype, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) NodeStatus(p0 context.Context, p1 bool) (NodeStatus, error) { + return s.Internal.NodeStatus(p0, p1) +} + +func (s *FullNodeStub) NodeStatus(p0 context.Context, p1 bool) (NodeStatus, error) { + return *new(NodeStatus), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychAllocateLane(p0 context.Context, p1 address.Address) (uint64, error) { + return s.Internal.PaychAllocateLane(p0, p1) +} + +func (s *FullNodeStub) PaychAllocateLane(p0 context.Context, p1 address.Address) (uint64, error) { + return 0, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychAvailableFunds(p0 context.Context, p1 address.Address) (*ChannelAvailableFunds, error) { + return s.Internal.PaychAvailableFunds(p0, p1) +} + +func (s *FullNodeStub) PaychAvailableFunds(p0 context.Context, p1 address.Address) (*ChannelAvailableFunds, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychAvailableFundsByFromTo(p0 context.Context, p1 address.Address, p2 address.Address) (*ChannelAvailableFunds, error) { + return s.Internal.PaychAvailableFundsByFromTo(p0, p1, p2) +} + +func (s *FullNodeStub) PaychAvailableFundsByFromTo(p0 context.Context, p1 address.Address, p2 address.Address) (*ChannelAvailableFunds, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychCollect(p0 context.Context, p1 address.Address) (cid.Cid, error) { + return s.Internal.PaychCollect(p0, p1) +} + +func (s *FullNodeStub) PaychCollect(p0 context.Context, p1 address.Address) (cid.Cid, error) { + return *new(cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychGet(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (*ChannelInfo, error) { + return s.Internal.PaychGet(p0, p1, p2, p3) +} + +func (s *FullNodeStub) PaychGet(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (*ChannelInfo, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychGetWaitReady(p0 context.Context, p1 cid.Cid) (address.Address, error) { + return s.Internal.PaychGetWaitReady(p0, p1) +} + +func (s *FullNodeStub) PaychGetWaitReady(p0 context.Context, p1 cid.Cid) (address.Address, error) { + return *new(address.Address), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychList(p0 context.Context) ([]address.Address, error) { + return s.Internal.PaychList(p0) +} + +func (s *FullNodeStub) PaychList(p0 context.Context) ([]address.Address, error) { + return *new([]address.Address), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychNewPayment(p0 context.Context, p1 address.Address, p2 address.Address, p3 []VoucherSpec) (*PaymentInfo, error) { + return s.Internal.PaychNewPayment(p0, p1, p2, p3) +} + +func (s *FullNodeStub) PaychNewPayment(p0 context.Context, p1 address.Address, p2 address.Address, p3 []VoucherSpec) (*PaymentInfo, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychSettle(p0 context.Context, p1 address.Address) (cid.Cid, error) { + return s.Internal.PaychSettle(p0, p1) +} + +func (s *FullNodeStub) PaychSettle(p0 context.Context, p1 address.Address) (cid.Cid, error) { + return *new(cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychStatus(p0 context.Context, p1 address.Address) (*PaychStatus, error) { + return s.Internal.PaychStatus(p0, p1) +} + +func (s *FullNodeStub) PaychStatus(p0 context.Context, p1 address.Address) (*PaychStatus, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychVoucherAdd(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 types.BigInt) (types.BigInt, error) { + return s.Internal.PaychVoucherAdd(p0, p1, p2, p3, p4) +} + +func (s *FullNodeStub) PaychVoucherAdd(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 types.BigInt) (types.BigInt, error) { + return *new(types.BigInt), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychVoucherCheckSpendable(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (bool, error) { + return s.Internal.PaychVoucherCheckSpendable(p0, p1, p2, p3, p4) +} + +func (s *FullNodeStub) PaychVoucherCheckSpendable(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (bool, error) { + return false, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychVoucherCheckValid(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher) error { + return s.Internal.PaychVoucherCheckValid(p0, p1, p2) +} + +func (s *FullNodeStub) PaychVoucherCheckValid(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychVoucherCreate(p0 context.Context, p1 address.Address, p2 types.BigInt, p3 uint64) (*VoucherCreateResult, error) { + return s.Internal.PaychVoucherCreate(p0, p1, p2, p3) +} + +func (s *FullNodeStub) PaychVoucherCreate(p0 context.Context, p1 address.Address, p2 types.BigInt, p3 uint64) (*VoucherCreateResult, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychVoucherList(p0 context.Context, p1 address.Address) ([]*paych.SignedVoucher, error) { + return s.Internal.PaychVoucherList(p0, p1) +} + +func (s *FullNodeStub) PaychVoucherList(p0 context.Context, p1 address.Address) ([]*paych.SignedVoucher, error) { + return *new([]*paych.SignedVoucher), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychVoucherSubmit(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (cid.Cid, error) { + return s.Internal.PaychVoucherSubmit(p0, p1, p2, p3, p4) +} + +func (s *FullNodeStub) PaychVoucherSubmit(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (cid.Cid, error) { + return *new(cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { + return s.Internal.StateAccountKey(p0, p1, p2) +} + +func (s *FullNodeStub) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { + return *new(address.Address), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateAllMinerFaults(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) ([]*Fault, error) { + return s.Internal.StateAllMinerFaults(p0, p1, p2) +} + +func (s *FullNodeStub) StateAllMinerFaults(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) ([]*Fault, error) { + return *new([]*Fault), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateCall(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*InvocResult, error) { + return s.Internal.StateCall(p0, p1, p2) +} + +func (s *FullNodeStub) StateCall(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*InvocResult, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateChangedActors(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (map[string]types.Actor, error) { + return s.Internal.StateChangedActors(p0, p1, p2) +} + +func (s *FullNodeStub) StateChangedActors(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (map[string]types.Actor, error) { + return *new(map[string]types.Actor), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateCirculatingSupply(p0 context.Context, p1 types.TipSetKey) (abi.TokenAmount, error) { + return s.Internal.StateCirculatingSupply(p0, p1) +} + +func (s *FullNodeStub) StateCirculatingSupply(p0 context.Context, p1 types.TipSetKey) (abi.TokenAmount, error) { + return *new(abi.TokenAmount), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateCompute(p0 context.Context, p1 abi.ChainEpoch, p2 []*types.Message, p3 types.TipSetKey) (*ComputeStateOutput, error) { + return s.Internal.StateCompute(p0, p1, p2, p3) +} + +func (s *FullNodeStub) StateCompute(p0 context.Context, p1 abi.ChainEpoch, p2 []*types.Message, p3 types.TipSetKey) (*ComputeStateOutput, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (DealCollateralBounds, error) { + return s.Internal.StateDealProviderCollateralBounds(p0, p1, p2, p3) +} + +func (s *FullNodeStub) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (DealCollateralBounds, error) { + return *new(DealCollateralBounds), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateDecodeParams(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) { + return s.Internal.StateDecodeParams(p0, p1, p2, p3, p4) +} + +func (s *FullNodeStub) StateDecodeParams(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) { + return s.Internal.StateGetActor(p0, p1, p2) +} + +func (s *FullNodeStub) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateListActors(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) { + return s.Internal.StateListActors(p0, p1) +} + +func (s *FullNodeStub) StateListActors(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) { + return *new([]address.Address), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateListMessages(p0 context.Context, p1 *MessageMatch, p2 types.TipSetKey, p3 abi.ChainEpoch) ([]cid.Cid, error) { + return s.Internal.StateListMessages(p0, p1, p2, p3) +} + +func (s *FullNodeStub) StateListMessages(p0 context.Context, p1 *MessageMatch, p2 types.TipSetKey, p3 abi.ChainEpoch) ([]cid.Cid, error) { + return *new([]cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateListMiners(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) { + return s.Internal.StateListMiners(p0, p1) +} + +func (s *FullNodeStub) StateListMiners(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) { + return *new([]address.Address), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateLookupID(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { + return s.Internal.StateLookupID(p0, p1, p2) +} + +func (s *FullNodeStub) StateLookupID(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { + return *new(address.Address), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMarketBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MarketBalance, error) { + return s.Internal.StateMarketBalance(p0, p1, p2) +} + +func (s *FullNodeStub) StateMarketBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MarketBalance, error) { + return *new(MarketBalance), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMarketDeals(p0 context.Context, p1 types.TipSetKey) (map[string]MarketDeal, error) { + return s.Internal.StateMarketDeals(p0, p1) +} + +func (s *FullNodeStub) StateMarketDeals(p0 context.Context, p1 types.TipSetKey) (map[string]MarketDeal, error) { + return *new(map[string]MarketDeal), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMarketParticipants(p0 context.Context, p1 types.TipSetKey) (map[string]MarketBalance, error) { + return s.Internal.StateMarketParticipants(p0, p1) +} + +func (s *FullNodeStub) StateMarketParticipants(p0 context.Context, p1 types.TipSetKey) (map[string]MarketBalance, error) { + return *new(map[string]MarketBalance), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMarketStorageDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*MarketDeal, error) { + return s.Internal.StateMarketStorageDeal(p0, p1, p2) +} + +func (s *FullNodeStub) StateMarketStorageDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*MarketDeal, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMinerActiveSectors(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { + return s.Internal.StateMinerActiveSectors(p0, p1, p2) +} + +func (s *FullNodeStub) StateMinerActiveSectors(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { + return *new([]*miner.SectorOnChainInfo), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMinerAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) { + return s.Internal.StateMinerAvailableBalance(p0, p1, p2) +} + +func (s *FullNodeStub) StateMinerAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) { + return *new(types.BigInt), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMinerDeadlines(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]Deadline, error) { + return s.Internal.StateMinerDeadlines(p0, p1, p2) +} + +func (s *FullNodeStub) StateMinerDeadlines(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]Deadline, error) { + return *new([]Deadline), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMinerFaults(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) { + return s.Internal.StateMinerFaults(p0, p1, p2) +} + +func (s *FullNodeStub) StateMinerFaults(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) { + return *new(bitfield.BitField), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMinerInfo(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) { + return s.Internal.StateMinerInfo(p0, p1, p2) +} + +func (s *FullNodeStub) StateMinerInfo(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) { + return *new(miner.MinerInfo), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMinerInitialPledgeCollateral(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) { + return s.Internal.StateMinerInitialPledgeCollateral(p0, p1, p2, p3) +} + +func (s *FullNodeStub) StateMinerInitialPledgeCollateral(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) { + return *new(types.BigInt), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMinerPartitions(p0 context.Context, p1 address.Address, p2 uint64, p3 types.TipSetKey) ([]Partition, error) { + return s.Internal.StateMinerPartitions(p0, p1, p2, p3) +} + +func (s *FullNodeStub) StateMinerPartitions(p0 context.Context, p1 address.Address, p2 uint64, p3 types.TipSetKey) ([]Partition, error) { + return *new([]Partition), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMinerPower(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*MinerPower, error) { + return s.Internal.StateMinerPower(p0, p1, p2) +} + +func (s *FullNodeStub) StateMinerPower(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*MinerPower, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMinerPreCommitDepositForPower(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) { + return s.Internal.StateMinerPreCommitDepositForPower(p0, p1, p2, p3) +} + +func (s *FullNodeStub) StateMinerPreCommitDepositForPower(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) { + return *new(types.BigInt), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMinerProvingDeadline(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) { + return s.Internal.StateMinerProvingDeadline(p0, p1, p2) +} + +func (s *FullNodeStub) StateMinerProvingDeadline(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMinerRecoveries(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) { + return s.Internal.StateMinerRecoveries(p0, p1, p2) +} + +func (s *FullNodeStub) StateMinerRecoveries(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) { + return *new(bitfield.BitField), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMinerSectorAllocated(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (bool, error) { + return s.Internal.StateMinerSectorAllocated(p0, p1, p2, p3) +} + +func (s *FullNodeStub) StateMinerSectorAllocated(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (bool, error) { + return false, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMinerSectorCount(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MinerSectors, error) { + return s.Internal.StateMinerSectorCount(p0, p1, p2) +} + +func (s *FullNodeStub) StateMinerSectorCount(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MinerSectors, error) { + return *new(MinerSectors), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMinerSectors(p0 context.Context, p1 address.Address, p2 *bitfield.BitField, p3 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { + return s.Internal.StateMinerSectors(p0, p1, p2, p3) +} + +func (s *FullNodeStub) StateMinerSectors(p0 context.Context, p1 address.Address, p2 *bitfield.BitField, p3 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { + return *new([]*miner.SectorOnChainInfo), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateNetworkName(p0 context.Context) (dtypes.NetworkName, error) { + return s.Internal.StateNetworkName(p0) +} + +func (s *FullNodeStub) StateNetworkName(p0 context.Context) (dtypes.NetworkName, error) { + return *new(dtypes.NetworkName), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) { + return s.Internal.StateNetworkVersion(p0, p1) +} + +func (s *FullNodeStub) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) { + return *new(apitypes.NetworkVersion), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateReadState(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*ActorState, error) { + return s.Internal.StateReadState(p0, p1, p2) +} + +func (s *FullNodeStub) StateReadState(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*ActorState, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateReplay(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*InvocResult, error) { + return s.Internal.StateReplay(p0, p1, p2) +} + +func (s *FullNodeStub) StateReplay(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*InvocResult, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateSearchMsg(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) { + return s.Internal.StateSearchMsg(p0, p1, p2, p3, p4) +} + +func (s *FullNodeStub) StateSearchMsg(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateSectorExpiration(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorExpiration, error) { + return s.Internal.StateSectorExpiration(p0, p1, p2, p3) +} + +func (s *FullNodeStub) StateSectorExpiration(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorExpiration, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateSectorGetInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) { + return s.Internal.StateSectorGetInfo(p0, p1, p2, p3) +} + +func (s *FullNodeStub) StateSectorGetInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateSectorPartition(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorLocation, error) { + return s.Internal.StateSectorPartition(p0, p1, p2, p3) +} + +func (s *FullNodeStub) StateSectorPartition(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorLocation, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateSectorPreCommitInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) { + return s.Internal.StateSectorPreCommitInfo(p0, p1, p2, p3) +} + +func (s *FullNodeStub) StateSectorPreCommitInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) { + return *new(miner.SectorPreCommitOnChainInfo), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateVMCirculatingSupplyInternal(p0 context.Context, p1 types.TipSetKey) (CirculatingSupply, error) { + return s.Internal.StateVMCirculatingSupplyInternal(p0, p1) +} + +func (s *FullNodeStub) StateVMCirculatingSupplyInternal(p0 context.Context, p1 types.TipSetKey) (CirculatingSupply, error) { + return *new(CirculatingSupply), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateVerifiedClientStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) { + return s.Internal.StateVerifiedClientStatus(p0, p1, p2) +} + +func (s *FullNodeStub) StateVerifiedClientStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateVerifiedRegistryRootKey(p0 context.Context, p1 types.TipSetKey) (address.Address, error) { + return s.Internal.StateVerifiedRegistryRootKey(p0, p1) +} + +func (s *FullNodeStub) StateVerifiedRegistryRootKey(p0 context.Context, p1 types.TipSetKey) (address.Address, error) { + return *new(address.Address), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateVerifierStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) { + return s.Internal.StateVerifierStatus(p0, p1, p2) +} + +func (s *FullNodeStub) StateVerifierStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) { + return s.Internal.StateWaitMsg(p0, p1, p2, p3, p4) +} + +func (s *FullNodeStub) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) SyncCheckBad(p0 context.Context, p1 cid.Cid) (string, error) { + return s.Internal.SyncCheckBad(p0, p1) +} + +func (s *FullNodeStub) SyncCheckBad(p0 context.Context, p1 cid.Cid) (string, error) { + return "", xerrors.New("method not supported") +} + +func (s *FullNodeStruct) SyncCheckpoint(p0 context.Context, p1 types.TipSetKey) error { + return s.Internal.SyncCheckpoint(p0, p1) +} + +func (s *FullNodeStub) SyncCheckpoint(p0 context.Context, p1 types.TipSetKey) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) SyncIncomingBlocks(p0 context.Context) (<-chan *types.BlockHeader, error) { + return s.Internal.SyncIncomingBlocks(p0) +} + +func (s *FullNodeStub) SyncIncomingBlocks(p0 context.Context) (<-chan *types.BlockHeader, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) SyncMarkBad(p0 context.Context, p1 cid.Cid) error { + return s.Internal.SyncMarkBad(p0, p1) +} + +func (s *FullNodeStub) SyncMarkBad(p0 context.Context, p1 cid.Cid) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) SyncState(p0 context.Context) (*SyncState, error) { + return s.Internal.SyncState(p0) +} + +func (s *FullNodeStub) SyncState(p0 context.Context) (*SyncState, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) SyncSubmitBlock(p0 context.Context, p1 *types.BlockMsg) error { + return s.Internal.SyncSubmitBlock(p0, p1) +} + +func (s *FullNodeStub) SyncSubmitBlock(p0 context.Context, p1 *types.BlockMsg) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) SyncUnmarkAllBad(p0 context.Context) error { + return s.Internal.SyncUnmarkAllBad(p0) +} + +func (s *FullNodeStub) SyncUnmarkAllBad(p0 context.Context) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) SyncUnmarkBad(p0 context.Context, p1 cid.Cid) error { + return s.Internal.SyncUnmarkBad(p0, p1) +} + +func (s *FullNodeStub) SyncUnmarkBad(p0 context.Context, p1 cid.Cid) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) SyncValidateTipset(p0 context.Context, p1 types.TipSetKey) (bool, error) { + return s.Internal.SyncValidateTipset(p0, p1) +} + +func (s *FullNodeStub) SyncValidateTipset(p0 context.Context, p1 types.TipSetKey) (bool, error) { + return false, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) WalletBalance(p0 context.Context, p1 address.Address) (types.BigInt, error) { + return s.Internal.WalletBalance(p0, p1) +} + +func (s *FullNodeStub) WalletBalance(p0 context.Context, p1 address.Address) (types.BigInt, error) { + return *new(types.BigInt), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) WalletDefaultAddress(p0 context.Context) (address.Address, error) { + return s.Internal.WalletDefaultAddress(p0) +} + +func (s *FullNodeStub) WalletDefaultAddress(p0 context.Context) (address.Address, error) { + return *new(address.Address), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) WalletDelete(p0 context.Context, p1 address.Address) error { + return s.Internal.WalletDelete(p0, p1) +} + +func (s *FullNodeStub) WalletDelete(p0 context.Context, p1 address.Address) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) WalletExport(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) { + return s.Internal.WalletExport(p0, p1) +} + +func (s *FullNodeStub) WalletExport(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) WalletHas(p0 context.Context, p1 address.Address) (bool, error) { + return s.Internal.WalletHas(p0, p1) +} + +func (s *FullNodeStub) WalletHas(p0 context.Context, p1 address.Address) (bool, error) { + return false, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) WalletImport(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) { + return s.Internal.WalletImport(p0, p1) +} + +func (s *FullNodeStub) WalletImport(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) { + return *new(address.Address), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) WalletList(p0 context.Context) ([]address.Address, error) { + return s.Internal.WalletList(p0) +} + +func (s *FullNodeStub) WalletList(p0 context.Context) ([]address.Address, error) { + return *new([]address.Address), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) WalletNew(p0 context.Context, p1 types.KeyType) (address.Address, error) { + return s.Internal.WalletNew(p0, p1) +} + +func (s *FullNodeStub) WalletNew(p0 context.Context, p1 types.KeyType) (address.Address, error) { + return *new(address.Address), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) WalletSetDefault(p0 context.Context, p1 address.Address) error { + return s.Internal.WalletSetDefault(p0, p1) +} + +func (s *FullNodeStub) WalletSetDefault(p0 context.Context, p1 address.Address) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) WalletSign(p0 context.Context, p1 address.Address, p2 []byte) (*crypto.Signature, error) { + return s.Internal.WalletSign(p0, p1, p2) +} + +func (s *FullNodeStub) WalletSign(p0 context.Context, p1 address.Address, p2 []byte) (*crypto.Signature, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) WalletSignMessage(p0 context.Context, p1 address.Address, p2 *types.Message) (*types.SignedMessage, error) { + return s.Internal.WalletSignMessage(p0, p1, p2) +} + +func (s *FullNodeStub) WalletSignMessage(p0 context.Context, p1 address.Address, p2 *types.Message) (*types.SignedMessage, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) WalletValidateAddress(p0 context.Context, p1 string) (address.Address, error) { + return s.Internal.WalletValidateAddress(p0, p1) +} + +func (s *FullNodeStub) WalletValidateAddress(p0 context.Context, p1 string) (address.Address, error) { + return *new(address.Address), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) WalletVerify(p0 context.Context, p1 address.Address, p2 []byte, p3 *crypto.Signature) (bool, error) { + return s.Internal.WalletVerify(p0, p1, p2, p3) +} + +func (s *FullNodeStub) WalletVerify(p0 context.Context, p1 address.Address, p2 []byte, p3 *crypto.Signature) (bool, error) { + return false, xerrors.New("method not supported") +} + +func (s *GatewayStruct) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) { + return s.Internal.ChainGetBlockMessages(p0, p1) +} + +func (s *GatewayStub) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) { + return s.Internal.ChainGetMessage(p0, p1) +} + +func (s *GatewayStub) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) ChainGetTipSet(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) { + return s.Internal.ChainGetTipSet(p0, p1) +} + +func (s *GatewayStub) ChainGetTipSet(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) ChainGetTipSetByHeight(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) { + return s.Internal.ChainGetTipSetByHeight(p0, p1, p2) +} + +func (s *GatewayStub) ChainGetTipSetByHeight(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) { + return s.Internal.ChainHasObj(p0, p1) +} + +func (s *GatewayStub) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) { + return false, xerrors.New("method not supported") +} + +func (s *GatewayStruct) ChainHead(p0 context.Context) (*types.TipSet, error) { + return s.Internal.ChainHead(p0) +} + +func (s *GatewayStub) ChainHead(p0 context.Context) (*types.TipSet, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) ChainNotify(p0 context.Context) (<-chan []*HeadChange, error) { + return s.Internal.ChainNotify(p0) +} + +func (s *GatewayStub) ChainNotify(p0 context.Context) (<-chan []*HeadChange, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) { + return s.Internal.ChainReadObj(p0, p1) +} + +func (s *GatewayStub) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) { + return *new([]byte), xerrors.New("method not supported") +} + +func (s *GatewayStruct) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) { + return s.Internal.GasEstimateMessageGas(p0, p1, p2, p3) +} + +func (s *GatewayStub) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { + return s.Internal.MpoolPush(p0, p1) +} + +func (s *GatewayStub) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { + return *new(cid.Cid), xerrors.New("method not supported") +} + +func (s *GatewayStruct) MsigGetAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) { + return s.Internal.MsigGetAvailableBalance(p0, p1, p2) +} + +func (s *GatewayStub) MsigGetAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) { + return *new(types.BigInt), xerrors.New("method not supported") +} + +func (s *GatewayStruct) MsigGetPending(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*MsigTransaction, error) { + return s.Internal.MsigGetPending(p0, p1, p2) +} + +func (s *GatewayStub) MsigGetPending(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*MsigTransaction, error) { + return *new([]*MsigTransaction), xerrors.New("method not supported") +} + +func (s *GatewayStruct) MsigGetVested(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) { + return s.Internal.MsigGetVested(p0, p1, p2, p3) +} + +func (s *GatewayStub) MsigGetVested(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) { + return *new(types.BigInt), xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { + return s.Internal.StateAccountKey(p0, p1, p2) +} + +func (s *GatewayStub) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { + return *new(address.Address), xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (DealCollateralBounds, error) { + return s.Internal.StateDealProviderCollateralBounds(p0, p1, p2, p3) +} + +func (s *GatewayStub) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (DealCollateralBounds, error) { + return *new(DealCollateralBounds), xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) { + return s.Internal.StateGetActor(p0, p1, p2) +} + +func (s *GatewayStub) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateListMiners(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) { + return s.Internal.StateListMiners(p0, p1) +} + +func (s *GatewayStub) StateListMiners(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) { + return *new([]address.Address), xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateLookupID(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { + return s.Internal.StateLookupID(p0, p1, p2) +} + +func (s *GatewayStub) StateLookupID(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { + return *new(address.Address), xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateMarketBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MarketBalance, error) { + return s.Internal.StateMarketBalance(p0, p1, p2) +} + +func (s *GatewayStub) StateMarketBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MarketBalance, error) { + return *new(MarketBalance), xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateMarketStorageDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*MarketDeal, error) { + return s.Internal.StateMarketStorageDeal(p0, p1, p2) +} + +func (s *GatewayStub) StateMarketStorageDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*MarketDeal, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateMinerInfo(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) { + return s.Internal.StateMinerInfo(p0, p1, p2) +} + +func (s *GatewayStub) StateMinerInfo(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) { + return *new(miner.MinerInfo), xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateMinerPower(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*MinerPower, error) { + return s.Internal.StateMinerPower(p0, p1, p2) +} + +func (s *GatewayStub) StateMinerPower(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*MinerPower, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateMinerProvingDeadline(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) { + return s.Internal.StateMinerProvingDeadline(p0, p1, p2) +} + +func (s *GatewayStub) StateMinerProvingDeadline(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) { + return s.Internal.StateNetworkVersion(p0, p1) +} + +func (s *GatewayStub) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) { + return *new(apitypes.NetworkVersion), xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateReadState(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*ActorState, error) { + return s.Internal.StateReadState(p0, p1, p2) +} + +func (s *GatewayStub) StateReadState(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*ActorState, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateSearchMsg(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) { + return s.Internal.StateSearchMsg(p0, p1, p2, p3, p4) +} + +func (s *GatewayStub) StateSearchMsg(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateSectorGetInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) { + return s.Internal.StateSectorGetInfo(p0, p1, p2, p3) +} + +func (s *GatewayStub) StateSectorGetInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateVerifiedClientStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) { + return s.Internal.StateVerifiedClientStatus(p0, p1, p2) +} + +func (s *GatewayStub) StateVerifiedClientStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) { + return s.Internal.StateWaitMsg(p0, p1, p2, p3, p4) +} + +func (s *GatewayStub) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) Version(p0 context.Context) (APIVersion, error) { + return s.Internal.Version(p0) +} + +func (s *GatewayStub) Version(p0 context.Context) (APIVersion, error) { + return *new(APIVersion), xerrors.New("method not supported") +} + +func (s *GatewayStruct) WalletBalance(p0 context.Context, p1 address.Address) (types.BigInt, error) { + return s.Internal.WalletBalance(p0, p1) +} + +func (s *GatewayStub) WalletBalance(p0 context.Context, p1 address.Address) (types.BigInt, error) { + return *new(types.BigInt), xerrors.New("method not supported") +} + +func (s *NetStruct) ID(p0 context.Context) (peer.ID, error) { + return s.Internal.ID(p0) +} + +func (s *NetStub) ID(p0 context.Context) (peer.ID, error) { + return *new(peer.ID), xerrors.New("method not supported") +} + +func (s *NetStruct) NetAddrsListen(p0 context.Context) (peer.AddrInfo, error) { + return s.Internal.NetAddrsListen(p0) +} + +func (s *NetStub) NetAddrsListen(p0 context.Context) (peer.AddrInfo, error) { + return *new(peer.AddrInfo), xerrors.New("method not supported") +} + +func (s *NetStruct) NetAgentVersion(p0 context.Context, p1 peer.ID) (string, error) { + return s.Internal.NetAgentVersion(p0, p1) +} + +func (s *NetStub) NetAgentVersion(p0 context.Context, p1 peer.ID) (string, error) { + return "", xerrors.New("method not supported") +} + +func (s *NetStruct) NetAutoNatStatus(p0 context.Context) (NatInfo, error) { + return s.Internal.NetAutoNatStatus(p0) +} + +func (s *NetStub) NetAutoNatStatus(p0 context.Context) (NatInfo, error) { + return *new(NatInfo), xerrors.New("method not supported") +} + +func (s *NetStruct) NetBandwidthStats(p0 context.Context) (metrics.Stats, error) { + return s.Internal.NetBandwidthStats(p0) +} + +func (s *NetStub) NetBandwidthStats(p0 context.Context) (metrics.Stats, error) { + return *new(metrics.Stats), xerrors.New("method not supported") +} + +func (s *NetStruct) NetBandwidthStatsByPeer(p0 context.Context) (map[string]metrics.Stats, error) { + return s.Internal.NetBandwidthStatsByPeer(p0) +} + +func (s *NetStub) NetBandwidthStatsByPeer(p0 context.Context) (map[string]metrics.Stats, error) { + return *new(map[string]metrics.Stats), xerrors.New("method not supported") +} + +func (s *NetStruct) NetBandwidthStatsByProtocol(p0 context.Context) (map[protocol.ID]metrics.Stats, error) { + return s.Internal.NetBandwidthStatsByProtocol(p0) +} + +func (s *NetStub) NetBandwidthStatsByProtocol(p0 context.Context) (map[protocol.ID]metrics.Stats, error) { + return *new(map[protocol.ID]metrics.Stats), xerrors.New("method not supported") +} + +func (s *NetStruct) NetBlockAdd(p0 context.Context, p1 NetBlockList) error { + return s.Internal.NetBlockAdd(p0, p1) +} + +func (s *NetStub) NetBlockAdd(p0 context.Context, p1 NetBlockList) error { + return xerrors.New("method not supported") +} + +func (s *NetStruct) NetBlockList(p0 context.Context) (NetBlockList, error) { + return s.Internal.NetBlockList(p0) +} + +func (s *NetStub) NetBlockList(p0 context.Context) (NetBlockList, error) { + return *new(NetBlockList), xerrors.New("method not supported") +} + +func (s *NetStruct) NetBlockRemove(p0 context.Context, p1 NetBlockList) error { + return s.Internal.NetBlockRemove(p0, p1) +} + +func (s *NetStub) NetBlockRemove(p0 context.Context, p1 NetBlockList) error { + return xerrors.New("method not supported") +} + +func (s *NetStruct) NetConnect(p0 context.Context, p1 peer.AddrInfo) error { + return s.Internal.NetConnect(p0, p1) +} + +func (s *NetStub) NetConnect(p0 context.Context, p1 peer.AddrInfo) error { + return xerrors.New("method not supported") +} + +func (s *NetStruct) NetConnectedness(p0 context.Context, p1 peer.ID) (network.Connectedness, error) { + return s.Internal.NetConnectedness(p0, p1) +} + +func (s *NetStub) NetConnectedness(p0 context.Context, p1 peer.ID) (network.Connectedness, error) { + return *new(network.Connectedness), xerrors.New("method not supported") +} + +func (s *NetStruct) NetDisconnect(p0 context.Context, p1 peer.ID) error { + return s.Internal.NetDisconnect(p0, p1) +} + +func (s *NetStub) NetDisconnect(p0 context.Context, p1 peer.ID) error { + return xerrors.New("method not supported") +} + +func (s *NetStruct) NetFindPeer(p0 context.Context, p1 peer.ID) (peer.AddrInfo, error) { + return s.Internal.NetFindPeer(p0, p1) +} + +func (s *NetStub) NetFindPeer(p0 context.Context, p1 peer.ID) (peer.AddrInfo, error) { + return *new(peer.AddrInfo), xerrors.New("method not supported") +} + +func (s *NetStruct) NetPeerInfo(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) { + return s.Internal.NetPeerInfo(p0, p1) +} + +func (s *NetStub) NetPeerInfo(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) { + return nil, xerrors.New("method not supported") +} + +func (s *NetStruct) NetPeers(p0 context.Context) ([]peer.AddrInfo, error) { + return s.Internal.NetPeers(p0) +} + +func (s *NetStub) NetPeers(p0 context.Context) ([]peer.AddrInfo, error) { + return *new([]peer.AddrInfo), xerrors.New("method not supported") +} + +func (s *NetStruct) NetPubsubScores(p0 context.Context) ([]PubsubScore, error) { + return s.Internal.NetPubsubScores(p0) +} + +func (s *NetStub) NetPubsubScores(p0 context.Context) ([]PubsubScore, error) { + return *new([]PubsubScore), xerrors.New("method not supported") +} + +func (s *SignableStruct) Sign(p0 context.Context, p1 SignFunc) error { + return s.Internal.Sign(p0, p1) +} + +func (s *SignableStub) Sign(p0 context.Context, p1 SignFunc) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) ActorAddress(p0 context.Context) (address.Address, error) { + return s.Internal.ActorAddress(p0) +} + +func (s *StorageMinerStub) ActorAddress(p0 context.Context) (address.Address, error) { + return *new(address.Address), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) ActorAddressConfig(p0 context.Context) (AddressConfig, error) { + return s.Internal.ActorAddressConfig(p0) +} + +func (s *StorageMinerStub) ActorAddressConfig(p0 context.Context) (AddressConfig, error) { + return *new(AddressConfig), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) ActorSectorSize(p0 context.Context, p1 address.Address) (abi.SectorSize, error) { + return s.Internal.ActorSectorSize(p0, p1) +} + +func (s *StorageMinerStub) ActorSectorSize(p0 context.Context, p1 address.Address) (abi.SectorSize, error) { + return *new(abi.SectorSize), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) { + return s.Internal.CheckProvable(p0, p1, p2, p3) +} + +func (s *StorageMinerStub) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) { + return *new(map[abi.SectorNumber]string), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) ComputeProof(p0 context.Context, p1 []builtin.SectorInfo, p2 abi.PoStRandomness) ([]builtin.PoStProof, error) { + return s.Internal.ComputeProof(p0, p1, p2) +} + +func (s *StorageMinerStub) ComputeProof(p0 context.Context, p1 []builtin.SectorInfo, p2 abi.PoStRandomness) ([]builtin.PoStProof, error) { + return *new([]builtin.PoStProof), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) CreateBackup(p0 context.Context, p1 string) error { + return s.Internal.CreateBackup(p0, p1) +} + +func (s *StorageMinerStub) CreateBackup(p0 context.Context, p1 string) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) DealsConsiderOfflineRetrievalDeals(p0 context.Context) (bool, error) { + return s.Internal.DealsConsiderOfflineRetrievalDeals(p0) +} + +func (s *StorageMinerStub) DealsConsiderOfflineRetrievalDeals(p0 context.Context) (bool, error) { + return false, xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) DealsConsiderOfflineStorageDeals(p0 context.Context) (bool, error) { + return s.Internal.DealsConsiderOfflineStorageDeals(p0) +} + +func (s *StorageMinerStub) DealsConsiderOfflineStorageDeals(p0 context.Context) (bool, error) { + return false, xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) DealsConsiderOnlineRetrievalDeals(p0 context.Context) (bool, error) { + return s.Internal.DealsConsiderOnlineRetrievalDeals(p0) +} + +func (s *StorageMinerStub) DealsConsiderOnlineRetrievalDeals(p0 context.Context) (bool, error) { + return false, xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) DealsConsiderOnlineStorageDeals(p0 context.Context) (bool, error) { + return s.Internal.DealsConsiderOnlineStorageDeals(p0) +} + +func (s *StorageMinerStub) DealsConsiderOnlineStorageDeals(p0 context.Context) (bool, error) { + return false, xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) DealsConsiderUnverifiedStorageDeals(p0 context.Context) (bool, error) { + return s.Internal.DealsConsiderUnverifiedStorageDeals(p0) +} + +func (s *StorageMinerStub) DealsConsiderUnverifiedStorageDeals(p0 context.Context) (bool, error) { + return false, xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) DealsConsiderVerifiedStorageDeals(p0 context.Context) (bool, error) { + return s.Internal.DealsConsiderVerifiedStorageDeals(p0) +} + +func (s *StorageMinerStub) DealsConsiderVerifiedStorageDeals(p0 context.Context) (bool, error) { + return false, xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) DealsImportData(p0 context.Context, p1 cid.Cid, p2 string) error { + return s.Internal.DealsImportData(p0, p1, p2) +} + +func (s *StorageMinerStub) DealsImportData(p0 context.Context, p1 cid.Cid, p2 string) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) DealsList(p0 context.Context) ([]MarketDeal, error) { + return s.Internal.DealsList(p0) +} + +func (s *StorageMinerStub) DealsList(p0 context.Context) ([]MarketDeal, error) { + return *new([]MarketDeal), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) DealsPieceCidBlocklist(p0 context.Context) ([]cid.Cid, error) { + return s.Internal.DealsPieceCidBlocklist(p0) +} + +func (s *StorageMinerStub) DealsPieceCidBlocklist(p0 context.Context) ([]cid.Cid, error) { + return *new([]cid.Cid), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) DealsSetConsiderOfflineRetrievalDeals(p0 context.Context, p1 bool) error { + return s.Internal.DealsSetConsiderOfflineRetrievalDeals(p0, p1) +} + +func (s *StorageMinerStub) DealsSetConsiderOfflineRetrievalDeals(p0 context.Context, p1 bool) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) DealsSetConsiderOfflineStorageDeals(p0 context.Context, p1 bool) error { + return s.Internal.DealsSetConsiderOfflineStorageDeals(p0, p1) +} + +func (s *StorageMinerStub) DealsSetConsiderOfflineStorageDeals(p0 context.Context, p1 bool) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) DealsSetConsiderOnlineRetrievalDeals(p0 context.Context, p1 bool) error { + return s.Internal.DealsSetConsiderOnlineRetrievalDeals(p0, p1) +} + +func (s *StorageMinerStub) DealsSetConsiderOnlineRetrievalDeals(p0 context.Context, p1 bool) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) DealsSetConsiderOnlineStorageDeals(p0 context.Context, p1 bool) error { + return s.Internal.DealsSetConsiderOnlineStorageDeals(p0, p1) +} + +func (s *StorageMinerStub) DealsSetConsiderOnlineStorageDeals(p0 context.Context, p1 bool) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) DealsSetConsiderUnverifiedStorageDeals(p0 context.Context, p1 bool) error { + return s.Internal.DealsSetConsiderUnverifiedStorageDeals(p0, p1) +} + +func (s *StorageMinerStub) DealsSetConsiderUnverifiedStorageDeals(p0 context.Context, p1 bool) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) DealsSetConsiderVerifiedStorageDeals(p0 context.Context, p1 bool) error { + return s.Internal.DealsSetConsiderVerifiedStorageDeals(p0, p1) +} + +func (s *StorageMinerStub) DealsSetConsiderVerifiedStorageDeals(p0 context.Context, p1 bool) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) DealsSetPieceCidBlocklist(p0 context.Context, p1 []cid.Cid) error { + return s.Internal.DealsSetPieceCidBlocklist(p0, p1) +} + +func (s *StorageMinerStub) DealsSetPieceCidBlocklist(p0 context.Context, p1 []cid.Cid) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) MarketCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { + return s.Internal.MarketCancelDataTransfer(p0, p1, p2, p3) +} + +func (s *StorageMinerStub) MarketCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) MarketDataTransferUpdates(p0 context.Context) (<-chan DataTransferChannel, error) { + return s.Internal.MarketDataTransferUpdates(p0) +} + +func (s *StorageMinerStub) MarketDataTransferUpdates(p0 context.Context) (<-chan DataTransferChannel, error) { + return nil, xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) MarketGetAsk(p0 context.Context) (*storagemarket.SignedStorageAsk, error) { + return s.Internal.MarketGetAsk(p0) +} + +func (s *StorageMinerStub) MarketGetAsk(p0 context.Context) (*storagemarket.SignedStorageAsk, error) { + return nil, xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) MarketGetDealUpdates(p0 context.Context) (<-chan storagemarket.MinerDeal, error) { + return s.Internal.MarketGetDealUpdates(p0) +} + +func (s *StorageMinerStub) MarketGetDealUpdates(p0 context.Context) (<-chan storagemarket.MinerDeal, error) { + return nil, xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) MarketGetRetrievalAsk(p0 context.Context) (*retrievalmarket.Ask, error) { + return s.Internal.MarketGetRetrievalAsk(p0) +} + +func (s *StorageMinerStub) MarketGetRetrievalAsk(p0 context.Context) (*retrievalmarket.Ask, error) { + return nil, xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) MarketImportDealData(p0 context.Context, p1 cid.Cid, p2 string) error { + return s.Internal.MarketImportDealData(p0, p1, p2) +} + +func (s *StorageMinerStub) MarketImportDealData(p0 context.Context, p1 cid.Cid, p2 string) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) MarketListDataTransfers(p0 context.Context) ([]DataTransferChannel, error) { + return s.Internal.MarketListDataTransfers(p0) +} + +func (s *StorageMinerStub) MarketListDataTransfers(p0 context.Context) ([]DataTransferChannel, error) { + return *new([]DataTransferChannel), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) MarketListDeals(p0 context.Context) ([]MarketDeal, error) { + return s.Internal.MarketListDeals(p0) +} + +func (s *StorageMinerStub) MarketListDeals(p0 context.Context) ([]MarketDeal, error) { + return *new([]MarketDeal), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) MarketListIncompleteDeals(p0 context.Context) ([]storagemarket.MinerDeal, error) { + return s.Internal.MarketListIncompleteDeals(p0) +} + +func (s *StorageMinerStub) MarketListIncompleteDeals(p0 context.Context) ([]storagemarket.MinerDeal, error) { + return *new([]storagemarket.MinerDeal), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) MarketListRetrievalDeals(p0 context.Context) ([]retrievalmarket.ProviderDealState, error) { + return s.Internal.MarketListRetrievalDeals(p0) +} + +func (s *StorageMinerStub) MarketListRetrievalDeals(p0 context.Context) ([]retrievalmarket.ProviderDealState, error) { + return *new([]retrievalmarket.ProviderDealState), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) MarketPendingDeals(p0 context.Context) (PendingDealInfo, error) { + return s.Internal.MarketPendingDeals(p0) +} + +func (s *StorageMinerStub) MarketPendingDeals(p0 context.Context) (PendingDealInfo, error) { + return *new(PendingDealInfo), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) MarketPublishPendingDeals(p0 context.Context) error { + return s.Internal.MarketPublishPendingDeals(p0) +} + +func (s *StorageMinerStub) MarketPublishPendingDeals(p0 context.Context) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) MarketRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { + return s.Internal.MarketRestartDataTransfer(p0, p1, p2, p3) +} + +func (s *StorageMinerStub) MarketRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) MarketSetAsk(p0 context.Context, p1 types.BigInt, p2 types.BigInt, p3 abi.ChainEpoch, p4 abi.PaddedPieceSize, p5 abi.PaddedPieceSize) error { + return s.Internal.MarketSetAsk(p0, p1, p2, p3, p4, p5) +} + +func (s *StorageMinerStub) MarketSetAsk(p0 context.Context, p1 types.BigInt, p2 types.BigInt, p3 abi.ChainEpoch, p4 abi.PaddedPieceSize, p5 abi.PaddedPieceSize) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) MarketSetRetrievalAsk(p0 context.Context, p1 *retrievalmarket.Ask) error { + return s.Internal.MarketSetRetrievalAsk(p0, p1) +} + +func (s *StorageMinerStub) MarketSetRetrievalAsk(p0 context.Context, p1 *retrievalmarket.Ask) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) MiningBase(p0 context.Context) (*types.TipSet, error) { + return s.Internal.MiningBase(p0) +} + +func (s *StorageMinerStub) MiningBase(p0 context.Context) (*types.TipSet, error) { + return nil, xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) PiecesGetCIDInfo(p0 context.Context, p1 cid.Cid) (*piecestore.CIDInfo, error) { + return s.Internal.PiecesGetCIDInfo(p0, p1) +} + +func (s *StorageMinerStub) PiecesGetCIDInfo(p0 context.Context, p1 cid.Cid) (*piecestore.CIDInfo, error) { + return nil, xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) PiecesGetPieceInfo(p0 context.Context, p1 cid.Cid) (*piecestore.PieceInfo, error) { + return s.Internal.PiecesGetPieceInfo(p0, p1) +} + +func (s *StorageMinerStub) PiecesGetPieceInfo(p0 context.Context, p1 cid.Cid) (*piecestore.PieceInfo, error) { + return nil, xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) PiecesListCidInfos(p0 context.Context) ([]cid.Cid, error) { + return s.Internal.PiecesListCidInfos(p0) +} + +func (s *StorageMinerStub) PiecesListCidInfos(p0 context.Context) ([]cid.Cid, error) { + return *new([]cid.Cid), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) PiecesListPieces(p0 context.Context) ([]cid.Cid, error) { + return s.Internal.PiecesListPieces(p0) +} + +func (s *StorageMinerStub) PiecesListPieces(p0 context.Context) ([]cid.Cid, error) { + return *new([]cid.Cid), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) PledgeSector(p0 context.Context) (abi.SectorID, error) { + return s.Internal.PledgeSector(p0) +} + +func (s *StorageMinerStub) PledgeSector(p0 context.Context) (abi.SectorID, error) { + return *new(abi.SectorID), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) ReturnAddPiece(p0 context.Context, p1 storiface.CallID, p2 abi.PieceInfo, p3 *storiface.CallError) error { + return s.Internal.ReturnAddPiece(p0, p1, p2, p3) +} + +func (s *StorageMinerStub) ReturnAddPiece(p0 context.Context, p1 storiface.CallID, p2 abi.PieceInfo, p3 *storiface.CallError) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) ReturnFetch(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { + return s.Internal.ReturnFetch(p0, p1, p2) +} + +func (s *StorageMinerStub) ReturnFetch(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) ReturnFinalizeSector(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { + return s.Internal.ReturnFinalizeSector(p0, p1, p2) +} + +func (s *StorageMinerStub) ReturnFinalizeSector(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) ReturnMoveStorage(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { + return s.Internal.ReturnMoveStorage(p0, p1, p2) +} + +func (s *StorageMinerStub) ReturnMoveStorage(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) ReturnReadPiece(p0 context.Context, p1 storiface.CallID, p2 bool, p3 *storiface.CallError) error { + return s.Internal.ReturnReadPiece(p0, p1, p2, p3) +} + +func (s *StorageMinerStub) ReturnReadPiece(p0 context.Context, p1 storiface.CallID, p2 bool, p3 *storiface.CallError) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) ReturnReleaseUnsealed(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { + return s.Internal.ReturnReleaseUnsealed(p0, p1, p2) +} + +func (s *StorageMinerStub) ReturnReleaseUnsealed(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) ReturnSealCommit1(p0 context.Context, p1 storiface.CallID, p2 storage.Commit1Out, p3 *storiface.CallError) error { + return s.Internal.ReturnSealCommit1(p0, p1, p2, p3) +} + +func (s *StorageMinerStub) ReturnSealCommit1(p0 context.Context, p1 storiface.CallID, p2 storage.Commit1Out, p3 *storiface.CallError) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) ReturnSealCommit2(p0 context.Context, p1 storiface.CallID, p2 storage.Proof, p3 *storiface.CallError) error { + return s.Internal.ReturnSealCommit2(p0, p1, p2, p3) +} + +func (s *StorageMinerStub) ReturnSealCommit2(p0 context.Context, p1 storiface.CallID, p2 storage.Proof, p3 *storiface.CallError) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) ReturnSealPreCommit1(p0 context.Context, p1 storiface.CallID, p2 storage.PreCommit1Out, p3 *storiface.CallError) error { + return s.Internal.ReturnSealPreCommit1(p0, p1, p2, p3) +} + +func (s *StorageMinerStub) ReturnSealPreCommit1(p0 context.Context, p1 storiface.CallID, p2 storage.PreCommit1Out, p3 *storiface.CallError) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) ReturnSealPreCommit2(p0 context.Context, p1 storiface.CallID, p2 storage.SectorCids, p3 *storiface.CallError) error { + return s.Internal.ReturnSealPreCommit2(p0, p1, p2, p3) +} + +func (s *StorageMinerStub) ReturnSealPreCommit2(p0 context.Context, p1 storiface.CallID, p2 storage.SectorCids, p3 *storiface.CallError) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) ReturnUnsealPiece(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { + return s.Internal.ReturnUnsealPiece(p0, p1, p2) +} + +func (s *StorageMinerStub) ReturnUnsealPiece(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) SealingAbort(p0 context.Context, p1 storiface.CallID) error { + return s.Internal.SealingAbort(p0, p1) +} + +func (s *StorageMinerStub) SealingAbort(p0 context.Context, p1 storiface.CallID) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) SealingSchedDiag(p0 context.Context, p1 bool) (interface{}, error) { + return s.Internal.SealingSchedDiag(p0, p1) +} + +func (s *StorageMinerStub) SealingSchedDiag(p0 context.Context, p1 bool) (interface{}, error) { + return nil, xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) { + return s.Internal.SectorAddPieceToAny(p0, p1, p2, p3) +} + +func (s *StorageMinerStub) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) { + return *new(SectorOffset), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) SectorCommitFlush(p0 context.Context) ([]sealiface.CommitBatchRes, error) { + return s.Internal.SectorCommitFlush(p0) +} + +func (s *StorageMinerStub) SectorCommitFlush(p0 context.Context) ([]sealiface.CommitBatchRes, error) { + return *new([]sealiface.CommitBatchRes), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) SectorCommitPending(p0 context.Context) ([]abi.SectorID, error) { + return s.Internal.SectorCommitPending(p0) +} + +func (s *StorageMinerStub) SectorCommitPending(p0 context.Context) ([]abi.SectorID, error) { + return *new([]abi.SectorID), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) SectorGetExpectedSealDuration(p0 context.Context) (time.Duration, error) { + return s.Internal.SectorGetExpectedSealDuration(p0) +} + +func (s *StorageMinerStub) SectorGetExpectedSealDuration(p0 context.Context) (time.Duration, error) { + return *new(time.Duration), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) SectorGetSealDelay(p0 context.Context) (time.Duration, error) { + return s.Internal.SectorGetSealDelay(p0) +} + +func (s *StorageMinerStub) SectorGetSealDelay(p0 context.Context) (time.Duration, error) { + return *new(time.Duration), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) SectorMarkForUpgrade(p0 context.Context, p1 abi.SectorNumber) error { + return s.Internal.SectorMarkForUpgrade(p0, p1) +} + +func (s *StorageMinerStub) SectorMarkForUpgrade(p0 context.Context, p1 abi.SectorNumber) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) SectorPreCommitFlush(p0 context.Context) ([]sealiface.PreCommitBatchRes, error) { + return s.Internal.SectorPreCommitFlush(p0) +} + +func (s *StorageMinerStub) SectorPreCommitFlush(p0 context.Context) ([]sealiface.PreCommitBatchRes, error) { + return *new([]sealiface.PreCommitBatchRes), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) SectorPreCommitPending(p0 context.Context) ([]abi.SectorID, error) { + return s.Internal.SectorPreCommitPending(p0) +} + +func (s *StorageMinerStub) SectorPreCommitPending(p0 context.Context) ([]abi.SectorID, error) { + return *new([]abi.SectorID), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) SectorRemove(p0 context.Context, p1 abi.SectorNumber) error { + return s.Internal.SectorRemove(p0, p1) +} + +func (s *StorageMinerStub) SectorRemove(p0 context.Context, p1 abi.SectorNumber) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) SectorSetExpectedSealDuration(p0 context.Context, p1 time.Duration) error { + return s.Internal.SectorSetExpectedSealDuration(p0, p1) +} + +func (s *StorageMinerStub) SectorSetExpectedSealDuration(p0 context.Context, p1 time.Duration) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) SectorSetSealDelay(p0 context.Context, p1 time.Duration) error { + return s.Internal.SectorSetSealDelay(p0, p1) +} + +func (s *StorageMinerStub) SectorSetSealDelay(p0 context.Context, p1 time.Duration) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) SectorStartSealing(p0 context.Context, p1 abi.SectorNumber) error { + return s.Internal.SectorStartSealing(p0, p1) +} + +func (s *StorageMinerStub) SectorStartSealing(p0 context.Context, p1 abi.SectorNumber) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) SectorTerminate(p0 context.Context, p1 abi.SectorNumber) error { + return s.Internal.SectorTerminate(p0, p1) +} + +func (s *StorageMinerStub) SectorTerminate(p0 context.Context, p1 abi.SectorNumber) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) SectorTerminateFlush(p0 context.Context) (*cid.Cid, error) { + return s.Internal.SectorTerminateFlush(p0) +} + +func (s *StorageMinerStub) SectorTerminateFlush(p0 context.Context) (*cid.Cid, error) { + return nil, xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) SectorTerminatePending(p0 context.Context) ([]abi.SectorID, error) { + return s.Internal.SectorTerminatePending(p0) +} + +func (s *StorageMinerStub) SectorTerminatePending(p0 context.Context) ([]abi.SectorID, error) { + return *new([]abi.SectorID), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) SectorsList(p0 context.Context) ([]abi.SectorNumber, error) { + return s.Internal.SectorsList(p0) +} + +func (s *StorageMinerStub) SectorsList(p0 context.Context) ([]abi.SectorNumber, error) { + return *new([]abi.SectorNumber), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) SectorsListInStates(p0 context.Context, p1 []SectorState) ([]abi.SectorNumber, error) { + return s.Internal.SectorsListInStates(p0, p1) +} + +func (s *StorageMinerStub) SectorsListInStates(p0 context.Context, p1 []SectorState) ([]abi.SectorNumber, error) { + return *new([]abi.SectorNumber), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) SectorsRefs(p0 context.Context) (map[string][]SealedRef, error) { + return s.Internal.SectorsRefs(p0) +} + +func (s *StorageMinerStub) SectorsRefs(p0 context.Context) (map[string][]SealedRef, error) { + return *new(map[string][]SealedRef), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) SectorsStatus(p0 context.Context, p1 abi.SectorNumber, p2 bool) (SectorInfo, error) { + return s.Internal.SectorsStatus(p0, p1, p2) +} + +func (s *StorageMinerStub) SectorsStatus(p0 context.Context, p1 abi.SectorNumber, p2 bool) (SectorInfo, error) { + return *new(SectorInfo), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) SectorsSummary(p0 context.Context) (map[SectorState]int, error) { + return s.Internal.SectorsSummary(p0) +} + +func (s *StorageMinerStub) SectorsSummary(p0 context.Context) (map[SectorState]int, error) { + return *new(map[SectorState]int), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) SectorsUnsealPiece(p0 context.Context, p1 storage.SectorRef, p2 storiface.UnpaddedByteIndex, p3 abi.UnpaddedPieceSize, p4 abi.SealRandomness, p5 *cid.Cid) error { + return s.Internal.SectorsUnsealPiece(p0, p1, p2, p3, p4, p5) +} + +func (s *StorageMinerStub) SectorsUnsealPiece(p0 context.Context, p1 storage.SectorRef, p2 storiface.UnpaddedByteIndex, p3 abi.UnpaddedPieceSize, p4 abi.SealRandomness, p5 *cid.Cid) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) SectorsUpdate(p0 context.Context, p1 abi.SectorNumber, p2 SectorState) error { + return s.Internal.SectorsUpdate(p0, p1, p2) +} + +func (s *StorageMinerStub) SectorsUpdate(p0 context.Context, p1 abi.SectorNumber, p2 SectorState) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) StorageAddLocal(p0 context.Context, p1 string) error { + return s.Internal.StorageAddLocal(p0, p1) +} + +func (s *StorageMinerStub) StorageAddLocal(p0 context.Context, p1 string) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) StorageAttach(p0 context.Context, p1 stores.StorageInfo, p2 fsutil.FsStat) error { + return s.Internal.StorageAttach(p0, p1, p2) +} + +func (s *StorageMinerStub) StorageAttach(p0 context.Context, p1 stores.StorageInfo, p2 fsutil.FsStat) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) StorageBestAlloc(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType) ([]stores.StorageInfo, error) { + return s.Internal.StorageBestAlloc(p0, p1, p2, p3) +} + +func (s *StorageMinerStub) StorageBestAlloc(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType) ([]stores.StorageInfo, error) { + return *new([]stores.StorageInfo), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) StorageDeclareSector(p0 context.Context, p1 stores.ID, p2 abi.SectorID, p3 storiface.SectorFileType, p4 bool) error { + return s.Internal.StorageDeclareSector(p0, p1, p2, p3, p4) +} + +func (s *StorageMinerStub) StorageDeclareSector(p0 context.Context, p1 stores.ID, p2 abi.SectorID, p3 storiface.SectorFileType, p4 bool) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) StorageDropSector(p0 context.Context, p1 stores.ID, p2 abi.SectorID, p3 storiface.SectorFileType) error { + return s.Internal.StorageDropSector(p0, p1, p2, p3) +} + +func (s *StorageMinerStub) StorageDropSector(p0 context.Context, p1 stores.ID, p2 abi.SectorID, p3 storiface.SectorFileType) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) StorageFindSector(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]stores.SectorStorageInfo, error) { + return s.Internal.StorageFindSector(p0, p1, p2, p3, p4) +} + +func (s *StorageMinerStub) StorageFindSector(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]stores.SectorStorageInfo, error) { + return *new([]stores.SectorStorageInfo), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) StorageInfo(p0 context.Context, p1 stores.ID) (stores.StorageInfo, error) { + return s.Internal.StorageInfo(p0, p1) +} + +func (s *StorageMinerStub) StorageInfo(p0 context.Context, p1 stores.ID) (stores.StorageInfo, error) { + return *new(stores.StorageInfo), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) StorageList(p0 context.Context) (map[stores.ID][]stores.Decl, error) { + return s.Internal.StorageList(p0) +} + +func (s *StorageMinerStub) StorageList(p0 context.Context) (map[stores.ID][]stores.Decl, error) { + return *new(map[stores.ID][]stores.Decl), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) StorageLocal(p0 context.Context) (map[stores.ID]string, error) { + return s.Internal.StorageLocal(p0) +} + +func (s *StorageMinerStub) StorageLocal(p0 context.Context) (map[stores.ID]string, error) { + return *new(map[stores.ID]string), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) StorageLock(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 storiface.SectorFileType) error { + return s.Internal.StorageLock(p0, p1, p2, p3) +} + +func (s *StorageMinerStub) StorageLock(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 storiface.SectorFileType) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) StorageReportHealth(p0 context.Context, p1 stores.ID, p2 stores.HealthReport) error { + return s.Internal.StorageReportHealth(p0, p1, p2) +} + +func (s *StorageMinerStub) StorageReportHealth(p0 context.Context, p1 stores.ID, p2 stores.HealthReport) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) StorageStat(p0 context.Context, p1 stores.ID) (fsutil.FsStat, error) { + return s.Internal.StorageStat(p0, p1) +} + +func (s *StorageMinerStub) StorageStat(p0 context.Context, p1 stores.ID) (fsutil.FsStat, error) { + return *new(fsutil.FsStat), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) StorageTryLock(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 storiface.SectorFileType) (bool, error) { + return s.Internal.StorageTryLock(p0, p1, p2, p3) +} + +func (s *StorageMinerStub) StorageTryLock(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 storiface.SectorFileType) (bool, error) { + return false, xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) WorkerConnect(p0 context.Context, p1 string) error { + return s.Internal.WorkerConnect(p0, p1) +} + +func (s *StorageMinerStub) WorkerConnect(p0 context.Context, p1 string) error { + return xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) WorkerJobs(p0 context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) { + return s.Internal.WorkerJobs(p0) +} + +func (s *StorageMinerStub) WorkerJobs(p0 context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) { + return *new(map[uuid.UUID][]storiface.WorkerJob), xerrors.New("method not supported") +} + +func (s *StorageMinerStruct) WorkerStats(p0 context.Context) (map[uuid.UUID]storiface.WorkerStats, error) { + return s.Internal.WorkerStats(p0) +} + +func (s *StorageMinerStub) WorkerStats(p0 context.Context) (map[uuid.UUID]storiface.WorkerStats, error) { + return *new(map[uuid.UUID]storiface.WorkerStats), xerrors.New("method not supported") +} + +func (s *WalletStruct) WalletDelete(p0 context.Context, p1 address.Address) error { + return s.Internal.WalletDelete(p0, p1) +} + +func (s *WalletStub) WalletDelete(p0 context.Context, p1 address.Address) error { + return xerrors.New("method not supported") +} + +func (s *WalletStruct) WalletExport(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) { + return s.Internal.WalletExport(p0, p1) +} + +func (s *WalletStub) WalletExport(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) { + return nil, xerrors.New("method not supported") +} + +func (s *WalletStruct) WalletHas(p0 context.Context, p1 address.Address) (bool, error) { + return s.Internal.WalletHas(p0, p1) +} + +func (s *WalletStub) WalletHas(p0 context.Context, p1 address.Address) (bool, error) { + return false, xerrors.New("method not supported") +} + +func (s *WalletStruct) WalletImport(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) { + return s.Internal.WalletImport(p0, p1) +} + +func (s *WalletStub) WalletImport(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) { + return *new(address.Address), xerrors.New("method not supported") +} + +func (s *WalletStruct) WalletList(p0 context.Context) ([]address.Address, error) { + return s.Internal.WalletList(p0) +} + +func (s *WalletStub) WalletList(p0 context.Context) ([]address.Address, error) { + return *new([]address.Address), xerrors.New("method not supported") +} + +func (s *WalletStruct) WalletNew(p0 context.Context, p1 types.KeyType) (address.Address, error) { + return s.Internal.WalletNew(p0, p1) +} + +func (s *WalletStub) WalletNew(p0 context.Context, p1 types.KeyType) (address.Address, error) { + return *new(address.Address), xerrors.New("method not supported") +} + +func (s *WalletStruct) WalletSign(p0 context.Context, p1 address.Address, p2 []byte, p3 MsgMeta) (*crypto.Signature, error) { + return s.Internal.WalletSign(p0, p1, p2, p3) +} + +func (s *WalletStub) WalletSign(p0 context.Context, p1 address.Address, p2 []byte, p3 MsgMeta) (*crypto.Signature, error) { + return nil, xerrors.New("method not supported") +} + +func (s *WorkerStruct) AddPiece(p0 context.Context, p1 storage.SectorRef, p2 []abi.UnpaddedPieceSize, p3 abi.UnpaddedPieceSize, p4 storage.Data) (storiface.CallID, error) { + return s.Internal.AddPiece(p0, p1, p2, p3, p4) +} + +func (s *WorkerStub) AddPiece(p0 context.Context, p1 storage.SectorRef, p2 []abi.UnpaddedPieceSize, p3 abi.UnpaddedPieceSize, p4 storage.Data) (storiface.CallID, error) { + return *new(storiface.CallID), xerrors.New("method not supported") +} + +func (s *WorkerStruct) Enabled(p0 context.Context) (bool, error) { + return s.Internal.Enabled(p0) +} + +func (s *WorkerStub) Enabled(p0 context.Context) (bool, error) { + return false, xerrors.New("method not supported") +} + +func (s *WorkerStruct) Fetch(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType, p3 storiface.PathType, p4 storiface.AcquireMode) (storiface.CallID, error) { + return s.Internal.Fetch(p0, p1, p2, p3, p4) +} + +func (s *WorkerStub) Fetch(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType, p3 storiface.PathType, p4 storiface.AcquireMode) (storiface.CallID, error) { + return *new(storiface.CallID), xerrors.New("method not supported") +} + +func (s *WorkerStruct) FinalizeSector(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) { + return s.Internal.FinalizeSector(p0, p1, p2) +} + +func (s *WorkerStub) FinalizeSector(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) { + return *new(storiface.CallID), xerrors.New("method not supported") +} + +func (s *WorkerStruct) Info(p0 context.Context) (storiface.WorkerInfo, error) { + return s.Internal.Info(p0) +} + +func (s *WorkerStub) Info(p0 context.Context) (storiface.WorkerInfo, error) { + return *new(storiface.WorkerInfo), xerrors.New("method not supported") +} + +func (s *WorkerStruct) MoveStorage(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType) (storiface.CallID, error) { + return s.Internal.MoveStorage(p0, p1, p2) +} + +func (s *WorkerStub) MoveStorage(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType) (storiface.CallID, error) { + return *new(storiface.CallID), xerrors.New("method not supported") +} + +func (s *WorkerStruct) Paths(p0 context.Context) ([]stores.StoragePath, error) { + return s.Internal.Paths(p0) +} + +func (s *WorkerStub) Paths(p0 context.Context) ([]stores.StoragePath, error) { + return *new([]stores.StoragePath), xerrors.New("method not supported") +} + +func (s *WorkerStruct) ProcessSession(p0 context.Context) (uuid.UUID, error) { + return s.Internal.ProcessSession(p0) +} + +func (s *WorkerStub) ProcessSession(p0 context.Context) (uuid.UUID, error) { + return *new(uuid.UUID), xerrors.New("method not supported") +} + +func (s *WorkerStruct) ReleaseUnsealed(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) { + return s.Internal.ReleaseUnsealed(p0, p1, p2) +} + +func (s *WorkerStub) ReleaseUnsealed(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) { + return *new(storiface.CallID), xerrors.New("method not supported") +} + +func (s *WorkerStruct) Remove(p0 context.Context, p1 abi.SectorID) error { + return s.Internal.Remove(p0, p1) +} + +func (s *WorkerStub) Remove(p0 context.Context, p1 abi.SectorID) error { + return xerrors.New("method not supported") +} + +func (s *WorkerStruct) SealCommit1(p0 context.Context, p1 storage.SectorRef, p2 abi.SealRandomness, p3 abi.InteractiveSealRandomness, p4 []abi.PieceInfo, p5 storage.SectorCids) (storiface.CallID, error) { + return s.Internal.SealCommit1(p0, p1, p2, p3, p4, p5) +} + +func (s *WorkerStub) SealCommit1(p0 context.Context, p1 storage.SectorRef, p2 abi.SealRandomness, p3 abi.InteractiveSealRandomness, p4 []abi.PieceInfo, p5 storage.SectorCids) (storiface.CallID, error) { + return *new(storiface.CallID), xerrors.New("method not supported") +} + +func (s *WorkerStruct) SealCommit2(p0 context.Context, p1 storage.SectorRef, p2 storage.Commit1Out) (storiface.CallID, error) { + return s.Internal.SealCommit2(p0, p1, p2) +} + +func (s *WorkerStub) SealCommit2(p0 context.Context, p1 storage.SectorRef, p2 storage.Commit1Out) (storiface.CallID, error) { + return *new(storiface.CallID), xerrors.New("method not supported") +} + +func (s *WorkerStruct) SealPreCommit1(p0 context.Context, p1 storage.SectorRef, p2 abi.SealRandomness, p3 []abi.PieceInfo) (storiface.CallID, error) { + return s.Internal.SealPreCommit1(p0, p1, p2, p3) +} + +func (s *WorkerStub) SealPreCommit1(p0 context.Context, p1 storage.SectorRef, p2 abi.SealRandomness, p3 []abi.PieceInfo) (storiface.CallID, error) { + return *new(storiface.CallID), xerrors.New("method not supported") +} + +func (s *WorkerStruct) SealPreCommit2(p0 context.Context, p1 storage.SectorRef, p2 storage.PreCommit1Out) (storiface.CallID, error) { + return s.Internal.SealPreCommit2(p0, p1, p2) +} + +func (s *WorkerStub) SealPreCommit2(p0 context.Context, p1 storage.SectorRef, p2 storage.PreCommit1Out) (storiface.CallID, error) { + return *new(storiface.CallID), xerrors.New("method not supported") +} + +func (s *WorkerStruct) Session(p0 context.Context) (uuid.UUID, error) { + return s.Internal.Session(p0) +} + +func (s *WorkerStub) Session(p0 context.Context) (uuid.UUID, error) { + return *new(uuid.UUID), xerrors.New("method not supported") +} + +func (s *WorkerStruct) SetEnabled(p0 context.Context, p1 bool) error { + return s.Internal.SetEnabled(p0, p1) +} + +func (s *WorkerStub) SetEnabled(p0 context.Context, p1 bool) error { + return xerrors.New("method not supported") +} + +func (s *WorkerStruct) StorageAddLocal(p0 context.Context, p1 string) error { + return s.Internal.StorageAddLocal(p0, p1) +} + +func (s *WorkerStub) StorageAddLocal(p0 context.Context, p1 string) error { + return xerrors.New("method not supported") +} + +func (s *WorkerStruct) TaskDisable(p0 context.Context, p1 sealtasks.TaskType) error { + return s.Internal.TaskDisable(p0, p1) +} + +func (s *WorkerStub) TaskDisable(p0 context.Context, p1 sealtasks.TaskType) error { + return xerrors.New("method not supported") +} + +func (s *WorkerStruct) TaskEnable(p0 context.Context, p1 sealtasks.TaskType) error { + return s.Internal.TaskEnable(p0, p1) +} + +func (s *WorkerStub) TaskEnable(p0 context.Context, p1 sealtasks.TaskType) error { + return xerrors.New("method not supported") +} + +func (s *WorkerStruct) TaskTypes(p0 context.Context) (map[sealtasks.TaskType]struct{}, error) { + return s.Internal.TaskTypes(p0) +} + +func (s *WorkerStub) TaskTypes(p0 context.Context) (map[sealtasks.TaskType]struct{}, error) { + return *new(map[sealtasks.TaskType]struct{}), xerrors.New("method not supported") +} + +func (s *WorkerStruct) UnsealPiece(p0 context.Context, p1 storage.SectorRef, p2 storiface.UnpaddedByteIndex, p3 abi.UnpaddedPieceSize, p4 abi.SealRandomness, p5 cid.Cid) (storiface.CallID, error) { + return s.Internal.UnsealPiece(p0, p1, p2, p3, p4, p5) +} + +func (s *WorkerStub) UnsealPiece(p0 context.Context, p1 storage.SectorRef, p2 storiface.UnpaddedByteIndex, p3 abi.UnpaddedPieceSize, p4 abi.SealRandomness, p5 cid.Cid) (storiface.CallID, error) { + return *new(storiface.CallID), xerrors.New("method not supported") +} + +func (s *WorkerStruct) Version(p0 context.Context) (Version, error) { + return s.Internal.Version(p0) +} + +func (s *WorkerStub) Version(p0 context.Context) (Version, error) { + return *new(Version), xerrors.New("method not supported") +} + +func (s *WorkerStruct) WaitQuiet(p0 context.Context) error { + return s.Internal.WaitQuiet(p0) +} + +func (s *WorkerStub) WaitQuiet(p0 context.Context) error { + return xerrors.New("method not supported") +} + +var _ ChainIO = new(ChainIOStruct) +var _ Common = new(CommonStruct) +var _ CommonNet = new(CommonNetStruct) +var _ FullNode = new(FullNodeStruct) +var _ Gateway = new(GatewayStruct) +var _ Net = new(NetStruct) +var _ Signable = new(SignableStruct) +var _ StorageMiner = new(StorageMinerStruct) +var _ Wallet = new(WalletStruct) +var _ Worker = new(WorkerStruct) diff --git a/api/proxy_util.go b/api/proxy_util.go new file mode 100644 index 00000000000..ba94a9e5dce --- /dev/null +++ b/api/proxy_util.go @@ -0,0 +1,30 @@ +package api + +import "reflect" + +var _internalField = "Internal" + +// GetInternalStructs extracts all pointers to 'Internal' sub-structs from the provided pointer to a proxy struct +func GetInternalStructs(in interface{}) []interface{} { + return getInternalStructs(reflect.ValueOf(in).Elem()) +} + +func getInternalStructs(rv reflect.Value) []interface{} { + var out []interface{} + + internal := rv.FieldByName(_internalField) + ii := internal.Addr().Interface() + out = append(out, ii) + + for i := 0; i < rv.NumField(); i++ { + if rv.Type().Field(i).Name == _internalField { + continue + } + + sub := getInternalStructs(rv.Field(i)) + + out = append(out, sub...) + } + + return out +} diff --git a/api/proxy_util_test.go b/api/proxy_util_test.go new file mode 100644 index 00000000000..3cbc466b6a4 --- /dev/null +++ b/api/proxy_util_test.go @@ -0,0 +1,62 @@ +package api + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +type StrA struct { + StrB + + Internal struct { + A int + } +} + +type StrB struct { + Internal struct { + B int + } +} + +type StrC struct { + Internal struct { + Internal struct { + C int + } + } +} + +func TestGetInternalStructs(t *testing.T) { + var proxy StrA + + sts := GetInternalStructs(&proxy) + require.Len(t, sts, 2) + + sa := sts[0].(*struct{ A int }) + sa.A = 3 + sb := sts[1].(*struct{ B int }) + sb.B = 4 + + require.Equal(t, 3, proxy.Internal.A) + require.Equal(t, 4, proxy.StrB.Internal.B) +} + +func TestNestedInternalStructs(t *testing.T) { + var proxy StrC + + // check that only the top-level internal struct gets picked up + + sts := GetInternalStructs(&proxy) + require.Len(t, sts, 1) + + sa := sts[0].(*struct { + Internal struct { + C int + } + }) + sa.Internal.C = 5 + + require.Equal(t, 5, proxy.Internal.Internal.C) +} diff --git a/api/test/blockminer.go b/api/test/blockminer.go deleted file mode 100644 index 6b28a579416..00000000000 --- a/api/test/blockminer.go +++ /dev/null @@ -1,56 +0,0 @@ -package test - -import ( - "context" - "fmt" - "sync/atomic" - "testing" - "time" - - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/miner" -) - -type BlockMiner struct { - ctx context.Context - t *testing.T - miner TestStorageNode - blocktime time.Duration - mine int64 - nulls int64 - done chan struct{} -} - -func NewBlockMiner(ctx context.Context, t *testing.T, miner TestStorageNode, blocktime time.Duration) *BlockMiner { - return &BlockMiner{ - ctx: ctx, - t: t, - miner: miner, - blocktime: blocktime, - mine: int64(1), - done: make(chan struct{}), - } -} - -func (bm *BlockMiner) MineBlocks() { - time.Sleep(time.Second) - go func() { - defer close(bm.done) - for atomic.LoadInt64(&bm.mine) == 1 { - time.Sleep(bm.blocktime) - nulls := atomic.SwapInt64(&bm.nulls, 0) - if err := bm.miner.MineOne(bm.ctx, miner.MineReq{ - InjectNulls: abi.ChainEpoch(nulls), - Done: func(bool, abi.ChainEpoch, error) {}, - }); err != nil { - bm.t.Error(err) - } - } - }() -} - -func (bm *BlockMiner) Stop() { - atomic.AddInt64(&bm.mine, -1) - fmt.Println("shutting down mining") - <-bm.done -} diff --git a/api/test/ccupgrade.go b/api/test/ccupgrade.go deleted file mode 100644 index 75f72d86157..00000000000 --- a/api/test/ccupgrade.go +++ /dev/null @@ -1,127 +0,0 @@ -package test - -import ( - "context" - "fmt" - "sync/atomic" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/impl" -) - -func TestCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) { - for _, height := range []abi.ChainEpoch{ - 1, // before - 162, // while sealing - 520, // after upgrade deal - 5000, // after - } { - height := height // make linters happy by copying - t.Run(fmt.Sprintf("upgrade-%d", height), func(t *testing.T) { - testCCUpgrade(t, b, blocktime, height) - }) - } -} - -func testCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, upgradeHeight abi.ChainEpoch) { - ctx := context.Background() - n, sn := b(t, []FullNodeOpts{FullNodeWithUpgradeAt(upgradeHeight)}, OneMiner) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - time.Sleep(time.Second) - - mine := int64(1) - done := make(chan struct{}) - go func() { - defer close(done) - for atomic.LoadInt64(&mine) == 1 { - time.Sleep(blocktime) - if err := sn[0].MineOne(ctx, MineNext); err != nil { - t.Error(err) - } - } - }() - - maddr, err := miner.ActorAddress(ctx) - if err != nil { - t.Fatal(err) - } - - CC := abi.SectorNumber(GenesisPreseals + 1) - Upgraded := CC + 1 - - pledgeSectors(t, ctx, miner, 1, 0, nil) - - sl, err := miner.SectorsList(ctx) - if err != nil { - t.Fatal(err) - } - if len(sl) != 1 { - t.Fatal("expected 1 sector") - } - - if sl[0] != CC { - t.Fatal("bad") - } - - { - si, err := client.StateSectorGetInfo(ctx, maddr, CC, types.EmptyTSK) - require.NoError(t, err) - require.Less(t, 50000, int(si.Expiration)) - } - - if err := miner.SectorMarkForUpgrade(ctx, sl[0]); err != nil { - t.Fatal(err) - } - - MakeDeal(t, ctx, 6, client, miner, false, false) - - // Validate upgrade - - { - exp, err := client.StateSectorExpiration(ctx, maddr, CC, types.EmptyTSK) - require.NoError(t, err) - require.NotNil(t, exp) - require.Greater(t, 50000, int(exp.OnTime)) - } - { - exp, err := client.StateSectorExpiration(ctx, maddr, Upgraded, types.EmptyTSK) - require.NoError(t, err) - require.Less(t, 50000, int(exp.OnTime)) - } - - dlInfo, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - - // Sector should expire. - for { - // Wait for the sector to expire. - status, err := miner.SectorsStatus(ctx, CC, true) - require.NoError(t, err) - if status.OnTime == 0 && status.Early == 0 { - break - } - t.Log("waiting for sector to expire") - // wait one deadline per loop. - time.Sleep(time.Duration(dlInfo.WPoStChallengeWindow) * blocktime) - } - - fmt.Println("shutting down mining") - atomic.AddInt64(&mine, -1) - <-done -} diff --git a/api/test/deals.go b/api/test/deals.go deleted file mode 100644 index b81099d9015..00000000000 --- a/api/test/deals.go +++ /dev/null @@ -1,458 +0,0 @@ -package test - -import ( - "bytes" - "context" - "fmt" - "io/ioutil" - "math/rand" - "os" - "path/filepath" - "sync/atomic" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/ipfs/go-cid" - files "github.com/ipfs/go-ipfs-files" - "github.com/ipld/go-car" - - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - sealing "github.com/filecoin-project/lotus/extern/storage-sealing" - dag "github.com/ipfs/go-merkledag" - dstest "github.com/ipfs/go-merkledag/test" - unixfile "github.com/ipfs/go-unixfs/file" - - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/impl" - ipld "github.com/ipfs/go-ipld-format" -) - -func TestDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, carExport, fastRet bool) { - - ctx := context.Background() - n, sn := b(t, OneFull, OneMiner) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - time.Sleep(time.Second) - - mine := int64(1) - done := make(chan struct{}) - go func() { - defer close(done) - for atomic.LoadInt64(&mine) == 1 { - time.Sleep(blocktime) - if err := sn[0].MineOne(ctx, MineNext); err != nil { - t.Error(err) - } - } - }() - - MakeDeal(t, ctx, 6, client, miner, carExport, fastRet) - - atomic.AddInt64(&mine, -1) - fmt.Println("shutting down mining") - <-done -} - -func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) { - - ctx := context.Background() - n, sn := b(t, OneFull, OneMiner) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - time.Sleep(time.Second) - - mine := int64(1) - done := make(chan struct{}) - - go func() { - defer close(done) - for atomic.LoadInt64(&mine) == 1 { - time.Sleep(blocktime) - if err := sn[0].MineOne(ctx, MineNext); err != nil { - t.Error(err) - } - } - }() - - MakeDeal(t, ctx, 6, client, miner, false, false) - MakeDeal(t, ctx, 7, client, miner, false, false) - - atomic.AddInt64(&mine, -1) - fmt.Println("shutting down mining") - <-done -} - -func MakeDeal(t *testing.T, ctx context.Context, rseed int, client api.FullNode, miner TestStorageNode, carExport, fastRet bool) { - res, data, err := CreateClientFile(ctx, client, rseed) - if err != nil { - t.Fatal(err) - } - - fcid := res.Root - fmt.Println("FILE CID: ", fcid) - - deal := startDeal(t, ctx, miner, client, fcid, fastRet) - - // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this - time.Sleep(time.Second) - waitDealSealed(t, ctx, miner, client, deal, false) - - // Retrieval - info, err := client.ClientGetDealInfo(ctx, *deal) - require.NoError(t, err) - - testRetrieval(t, ctx, client, fcid, &info.PieceCID, carExport, data) -} - -func CreateClientFile(ctx context.Context, client api.FullNode, rseed int) (*api.ImportRes, []byte, error) { - data := make([]byte, 1600) - rand.New(rand.NewSource(int64(rseed))).Read(data) - - dir, err := ioutil.TempDir(os.TempDir(), "test-make-deal-") - if err != nil { - return nil, nil, err - } - - path := filepath.Join(dir, "sourcefile.dat") - err = ioutil.WriteFile(path, data, 0644) - if err != nil { - return nil, nil, err - } - - res, err := client.ClientImport(ctx, api.FileRef{Path: path}) - if err != nil { - return nil, nil, err - } - return res, data, nil -} - -func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) { - - ctx := context.Background() - n, sn := b(t, OneFull, OneMiner) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - time.Sleep(time.Second) - - mine := int64(1) - done := make(chan struct{}) - go func() { - defer close(done) - for atomic.LoadInt64(&mine) == 1 { - time.Sleep(blocktime) - if err := sn[0].MineOne(ctx, MineNext); err != nil { - t.Error(err) - } - } - }() - - data := make([]byte, 1600) - rand.New(rand.NewSource(int64(8))).Read(data) - - r := bytes.NewReader(data) - fcid, err := client.ClientImportLocal(ctx, r) - if err != nil { - t.Fatal(err) - } - - fmt.Println("FILE CID: ", fcid) - - deal := startDeal(t, ctx, miner, client, fcid, true) - - waitDealPublished(t, ctx, miner, deal) - fmt.Println("deal published, retrieving") - // Retrieval - info, err := client.ClientGetDealInfo(ctx, *deal) - require.NoError(t, err) - - testRetrieval(t, ctx, client, fcid, &info.PieceCID, false, data) - atomic.AddInt64(&mine, -1) - fmt.Println("shutting down mining") - <-done -} - -func TestSenondDealRetrieval(t *testing.T, b APIBuilder, blocktime time.Duration) { - - ctx := context.Background() - n, sn := b(t, OneFull, OneMiner) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - time.Sleep(time.Second) - - mine := int64(1) - done := make(chan struct{}) - - go func() { - defer close(done) - for atomic.LoadInt64(&mine) == 1 { - time.Sleep(blocktime) - if err := sn[0].MineOne(ctx, MineNext); err != nil { - t.Error(err) - } - } - }() - - { - data1 := make([]byte, 800) - rand.New(rand.NewSource(int64(3))).Read(data1) - r := bytes.NewReader(data1) - - fcid1, err := client.ClientImportLocal(ctx, r) - if err != nil { - t.Fatal(err) - } - - data2 := make([]byte, 800) - rand.New(rand.NewSource(int64(9))).Read(data2) - r2 := bytes.NewReader(data2) - - fcid2, err := client.ClientImportLocal(ctx, r2) - if err != nil { - t.Fatal(err) - } - - deal1 := startDeal(t, ctx, miner, client, fcid1, true) - - // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this - time.Sleep(time.Second) - waitDealSealed(t, ctx, miner, client, deal1, true) - - deal2 := startDeal(t, ctx, miner, client, fcid2, true) - - time.Sleep(time.Second) - waitDealSealed(t, ctx, miner, client, deal2, false) - - // Retrieval - info, err := client.ClientGetDealInfo(ctx, *deal2) - require.NoError(t, err) - - rf, _ := miner.SectorsRefs(ctx) - fmt.Printf("refs: %+v\n", rf) - - testRetrieval(t, ctx, client, fcid2, &info.PieceCID, false, data2) - } - - atomic.AddInt64(&mine, -1) - fmt.Println("shutting down mining") - <-done -} - -func startDeal(t *testing.T, ctx context.Context, miner TestStorageNode, client api.FullNode, fcid cid.Cid, fastRet bool) *cid.Cid { - maddr, err := miner.ActorAddress(ctx) - if err != nil { - t.Fatal(err) - } - - addr, err := client.WalletDefaultAddress(ctx) - if err != nil { - t.Fatal(err) - } - deal, err := client.ClientStartDeal(ctx, &api.StartDealParams{ - Data: &storagemarket.DataRef{ - TransferType: storagemarket.TTGraphsync, - Root: fcid, - }, - Wallet: addr, - Miner: maddr, - EpochPrice: types.NewInt(1000000), - MinBlocksDuration: uint64(build.MinDealDuration), - FastRetrieval: fastRet, - }) - if err != nil { - t.Fatalf("%+v", err) - } - return deal -} - -func waitDealSealed(t *testing.T, ctx context.Context, miner TestStorageNode, client api.FullNode, deal *cid.Cid, noseal bool) { -loop: - for { - di, err := client.ClientGetDealInfo(ctx, *deal) - if err != nil { - t.Fatal(err) - } - switch di.State { - case storagemarket.StorageDealSealing: - if noseal { - return - } - startSealingWaiting(t, ctx, miner) - case storagemarket.StorageDealProposalRejected: - t.Fatal("deal rejected") - case storagemarket.StorageDealFailing: - t.Fatal("deal failed") - case storagemarket.StorageDealError: - t.Fatal("deal errored", di.Message) - case storagemarket.StorageDealActive: - fmt.Println("COMPLETE", di) - break loop - } - fmt.Println("Deal state: ", storagemarket.DealStates[di.State]) - time.Sleep(time.Second / 2) - } -} - -func waitDealPublished(t *testing.T, ctx context.Context, miner TestStorageNode, deal *cid.Cid) { - subCtx, cancel := context.WithCancel(ctx) - defer cancel() - updates, err := miner.MarketGetDealUpdates(subCtx) - if err != nil { - t.Fatal(err) - } - for { - select { - case <-ctx.Done(): - t.Fatal("context timeout") - case di := <-updates: - if deal.Equals(di.ProposalCid) { - switch di.State { - case storagemarket.StorageDealProposalRejected: - t.Fatal("deal rejected") - case storagemarket.StorageDealFailing: - t.Fatal("deal failed") - case storagemarket.StorageDealError: - t.Fatal("deal errored", di.Message) - case storagemarket.StorageDealFinalizing, storagemarket.StorageDealSealing, storagemarket.StorageDealActive: - fmt.Println("COMPLETE", di) - return - } - fmt.Println("Deal state: ", storagemarket.DealStates[di.State]) - } - } - } -} - -func startSealingWaiting(t *testing.T, ctx context.Context, miner TestStorageNode) { - snums, err := miner.SectorsList(ctx) - require.NoError(t, err) - - for _, snum := range snums { - si, err := miner.SectorsStatus(ctx, snum, false) - require.NoError(t, err) - - t.Logf("Sector state: %s", si.State) - if si.State == api.SectorState(sealing.WaitDeals) { - require.NoError(t, miner.SectorStartSealing(ctx, snum)) - } - } -} - -func testRetrieval(t *testing.T, ctx context.Context, client api.FullNode, fcid cid.Cid, piece *cid.Cid, carExport bool, data []byte) { - offers, err := client.ClientFindData(ctx, fcid, piece) - if err != nil { - t.Fatal(err) - } - - if len(offers) < 1 { - t.Fatal("no offers") - } - - rpath, err := ioutil.TempDir("", "lotus-retrieve-test-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(rpath) //nolint:errcheck - - caddr, err := client.WalletDefaultAddress(ctx) - if err != nil { - t.Fatal(err) - } - - ref := &api.FileRef{ - Path: filepath.Join(rpath, "ret"), - IsCAR: carExport, - } - updates, err := client.ClientRetrieveWithEvents(ctx, offers[0].Order(caddr), ref) - if err != nil { - t.Fatal(err) - } - for update := range updates { - if update.Err != "" { - t.Fatalf("retrieval failed: %s", update.Err) - } - } - - rdata, err := ioutil.ReadFile(filepath.Join(rpath, "ret")) - if err != nil { - t.Fatal(err) - } - - if carExport { - rdata = extractCarData(t, ctx, rdata, rpath) - } - - if !bytes.Equal(rdata, data) { - t.Fatal("wrong data retrieved") - } -} - -func extractCarData(t *testing.T, ctx context.Context, rdata []byte, rpath string) []byte { - bserv := dstest.Bserv() - ch, err := car.LoadCar(bserv.Blockstore(), bytes.NewReader(rdata)) - if err != nil { - t.Fatal(err) - } - b, err := bserv.GetBlock(ctx, ch.Roots[0]) - if err != nil { - t.Fatal(err) - } - nd, err := ipld.Decode(b) - if err != nil { - t.Fatal(err) - } - dserv := dag.NewDAGService(bserv) - fil, err := unixfile.NewUnixfsFile(ctx, dserv, nd) - if err != nil { - t.Fatal(err) - } - outPath := filepath.Join(rpath, "retLoadedCAR") - if err := files.WriteTo(fil, outPath); err != nil { - t.Fatal(err) - } - rdata, err = ioutil.ReadFile(outPath) - if err != nil { - t.Fatal(err) - } - return rdata -} diff --git a/api/test/mining.go b/api/test/mining.go deleted file mode 100644 index 11953b95d70..00000000000 --- a/api/test/mining.go +++ /dev/null @@ -1,201 +0,0 @@ -package test - -import ( - "bytes" - "context" - "fmt" - "math/rand" - "sync/atomic" - "testing" - "time" - - logging "github.com/ipfs/go-log/v2" - - "github.com/filecoin-project/go-state-types/abi" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/miner" - "github.com/filecoin-project/lotus/node/impl" -) - -//nolint:deadcode,varcheck -var log = logging.Logger("apitest") - -func (ts *testSuite) testMining(t *testing.T) { - ctx := context.Background() - apis, sn := ts.makeNodes(t, OneFull, OneMiner) - api := apis[0] - - newHeads, err := api.ChainNotify(ctx) - require.NoError(t, err) - initHead := (<-newHeads)[0] - baseHeight := initHead.Val.Height() - - h1, err := api.ChainHead(ctx) - require.NoError(t, err) - require.Equal(t, int64(h1.Height()), int64(baseHeight)) - - MineUntilBlock(ctx, t, apis[0], sn[0], nil) - require.NoError(t, err) - - <-newHeads - - h2, err := api.ChainHead(ctx) - require.NoError(t, err) - require.Greater(t, int64(h2.Height()), int64(h1.Height())) -} - -func (ts *testSuite) testMiningReal(t *testing.T) { - build.InsecurePoStValidation = false - defer func() { - build.InsecurePoStValidation = true - }() - - ctx := context.Background() - apis, sn := ts.makeNodes(t, OneFull, OneMiner) - api := apis[0] - - newHeads, err := api.ChainNotify(ctx) - require.NoError(t, err) - at := (<-newHeads)[0].Val.Height() - - h1, err := api.ChainHead(ctx) - require.NoError(t, err) - require.Equal(t, int64(at), int64(h1.Height())) - - MineUntilBlock(ctx, t, apis[0], sn[0], nil) - require.NoError(t, err) - - <-newHeads - - h2, err := api.ChainHead(ctx) - require.NoError(t, err) - require.Greater(t, int64(h2.Height()), int64(h1.Height())) - - MineUntilBlock(ctx, t, apis[0], sn[0], nil) - require.NoError(t, err) - - <-newHeads - - h3, err := api.ChainHead(ctx) - require.NoError(t, err) - require.Greater(t, int64(h3.Height()), int64(h2.Height())) -} - -func TestDealMining(t *testing.T, b APIBuilder, blocktime time.Duration, carExport bool) { - // test making a deal with a fresh miner, and see if it starts to mine - - ctx := context.Background() - n, sn := b(t, OneFull, []StorageMiner{ - {Full: 0, Preseal: PresealGenesis}, - {Full: 0, Preseal: 0}, // TODO: Add support for miners on non-first full node - }) - client := n[0].FullNode.(*impl.FullNodeAPI) - provider := sn[1] - genesisMiner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := provider.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - - if err := genesisMiner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - - time.Sleep(time.Second) - - data := make([]byte, 600) - rand.New(rand.NewSource(5)).Read(data) - - r := bytes.NewReader(data) - fcid, err := client.ClientImportLocal(ctx, r) - if err != nil { - t.Fatal(err) - } - - fmt.Println("FILE CID: ", fcid) - - var mine int32 = 1 - done := make(chan struct{}) - minedTwo := make(chan struct{}) - - m2addr, err := sn[1].ActorAddress(context.TODO()) - if err != nil { - t.Fatal(err) - } - - go func() { - defer close(done) - - complChan := minedTwo - for atomic.LoadInt32(&mine) != 0 { - wait := make(chan int) - mdone := func(mined bool, _ abi.ChainEpoch, err error) { - n := 0 - if mined { - n = 1 - } - wait <- n - } - - if err := sn[0].MineOne(ctx, miner.MineReq{Done: mdone}); err != nil { - t.Error(err) - } - - if err := sn[1].MineOne(ctx, miner.MineReq{Done: mdone}); err != nil { - t.Error(err) - } - - expect := <-wait - expect += <-wait - - time.Sleep(blocktime) - if expect == 0 { - // null block - continue - } - - var nodeOneMined bool - for _, node := range sn { - mb, err := node.MiningBase(ctx) - if err != nil { - t.Error(err) - return - } - - for _, b := range mb.Blocks() { - if b.Miner == m2addr { - nodeOneMined = true - break - } - } - - } - - if nodeOneMined && complChan != nil { - close(complChan) - complChan = nil - } - - } - }() - - deal := startDeal(t, ctx, provider, client, fcid, false) - - // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this - time.Sleep(time.Second) - - waitDealSealed(t, ctx, provider, client, deal, false) - - <-minedTwo - - atomic.StoreInt32(&mine, 0) - fmt.Println("shutting down mining") - <-done -} diff --git a/api/test/tape.go b/api/test/tape.go deleted file mode 100644 index 466bdd829a5..00000000000 --- a/api/test/tape.go +++ /dev/null @@ -1,114 +0,0 @@ -package test - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/network" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/stmgr" - sealing "github.com/filecoin-project/lotus/extern/storage-sealing" - "github.com/filecoin-project/lotus/node" - "github.com/filecoin-project/lotus/node/impl" - "github.com/stretchr/testify/require" -) - -func TestTapeFix(t *testing.T, b APIBuilder, blocktime time.Duration) { - t.Run("before", func(t *testing.T) { testTapeFix(t, b, blocktime, false) }) - t.Run("after", func(t *testing.T) { testTapeFix(t, b, blocktime, true) }) -} -func testTapeFix(t *testing.T, b APIBuilder, blocktime time.Duration, after bool) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - upgradeSchedule := stmgr.UpgradeSchedule{{ - Network: build.ActorUpgradeNetworkVersion, - Height: 1, - Migration: stmgr.UpgradeActorsV2, - }} - if after { - upgradeSchedule = append(upgradeSchedule, stmgr.Upgrade{ - Network: network.Version5, - Height: 2, - }) - } - - n, sn := b(t, []FullNodeOpts{{Opts: func(_ []TestNode) node.Option { - return node.Override(new(stmgr.UpgradeSchedule), upgradeSchedule) - }}}, OneMiner) - - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - build.Clock.Sleep(time.Second) - - done := make(chan struct{}) - go func() { - defer close(done) - for ctx.Err() == nil { - build.Clock.Sleep(blocktime) - if err := sn[0].MineOne(ctx, MineNext); err != nil { - if ctx.Err() != nil { - // context was canceled, ignore the error. - return - } - t.Error(err) - } - } - }() - defer func() { - cancel() - <-done - }() - - err = miner.PledgeSector(ctx) - require.NoError(t, err) - - // Wait till done. - var sectorNo abi.SectorNumber - for { - s, err := miner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM - require.NoError(t, err) - fmt.Printf("Sectors: %d\n", len(s)) - if len(s) == 1 { - sectorNo = s[0] - break - } - - build.Clock.Sleep(100 * time.Millisecond) - } - - fmt.Printf("All sectors is fsm\n") - - // If before, we expect the precommit to fail - successState := api.SectorState(sealing.CommitFailed) - failureState := api.SectorState(sealing.Proving) - if after { - // otherwise, it should succeed. - successState, failureState = failureState, successState - } - - for { - st, err := miner.SectorsStatus(ctx, sectorNo, false) - require.NoError(t, err) - if st.State == successState { - break - } - require.NotEqual(t, failureState, st.State) - build.Clock.Sleep(100 * time.Millisecond) - fmt.Println("WaitSeal") - } - -} diff --git a/api/test/test.go b/api/test/test.go deleted file mode 100644 index bae3d520ef8..00000000000 --- a/api/test/test.go +++ /dev/null @@ -1,243 +0,0 @@ -package test - -import ( - "context" - "fmt" - "os" - "testing" - "time" - - "github.com/filecoin-project/lotus/chain/stmgr" - "github.com/filecoin-project/lotus/chain/types" - - logging "github.com/ipfs/go-log/v2" - "github.com/multiformats/go-multiaddr" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/network" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/miner" - "github.com/filecoin-project/lotus/node" -) - -func init() { - logging.SetAllLoggers(logging.LevelInfo) - err := os.Setenv("BELLMAN_NO_GPU", "1") - if err != nil { - panic(fmt.Sprintf("failed to set BELLMAN_NO_GPU env variable: %s", err)) - } - build.InsecurePoStValidation = true -} - -type TestNode struct { - api.FullNode - // ListenAddr is the address on which an API server is listening, if an - // API server is created for this Node - ListenAddr multiaddr.Multiaddr -} - -type TestStorageNode struct { - api.StorageMiner - // ListenAddr is the address on which an API server is listening, if an - // API server is created for this Node - ListenAddr multiaddr.Multiaddr - - MineOne func(context.Context, miner.MineReq) error -} - -var PresealGenesis = -1 - -const GenesisPreseals = 2 - -// Options for setting up a mock storage miner -type StorageMiner struct { - Full int - Preseal int -} - -type OptionGenerator func([]TestNode) node.Option - -// Options for setting up a mock full node -type FullNodeOpts struct { - Lite bool // run node in "lite" mode - Opts OptionGenerator // generate dependency injection options -} - -// APIBuilder is a function which is invoked in test suite to provide -// test nodes and networks -// -// fullOpts array defines options for each full node -// storage array defines storage nodes, numbers in the array specify full node -// index the storage node 'belongs' to -type APIBuilder func(t *testing.T, full []FullNodeOpts, storage []StorageMiner) ([]TestNode, []TestStorageNode) -type testSuite struct { - makeNodes APIBuilder -} - -// TestApis is the entry point to API test suite -func TestApis(t *testing.T, b APIBuilder) { - ts := testSuite{ - makeNodes: b, - } - - t.Run("version", ts.testVersion) - t.Run("id", ts.testID) - t.Run("testConnectTwo", ts.testConnectTwo) - t.Run("testMining", ts.testMining) - t.Run("testMiningReal", ts.testMiningReal) - t.Run("testSearchMsg", ts.testSearchMsg) -} - -func DefaultFullOpts(nFull int) []FullNodeOpts { - full := make([]FullNodeOpts, nFull) - for i := range full { - full[i] = FullNodeOpts{ - Opts: func(nodes []TestNode) node.Option { - return node.Options() - }, - } - } - return full -} - -var OneMiner = []StorageMiner{{Full: 0, Preseal: PresealGenesis}} -var OneFull = DefaultFullOpts(1) -var TwoFull = DefaultFullOpts(2) - -var FullNodeWithUpgradeAt = func(upgradeHeight abi.ChainEpoch) FullNodeOpts { - return FullNodeOpts{ - Opts: func(nodes []TestNode) node.Option { - return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{ - // Skip directly to tape height so precommits work. - Network: network.Version5, - Height: upgradeHeight, - Migration: stmgr.UpgradeActorsV2, - }}) - }, - } -} - -var MineNext = miner.MineReq{ - InjectNulls: 0, - Done: func(bool, abi.ChainEpoch, error) {}, -} - -func (ts *testSuite) testVersion(t *testing.T) { - build.RunningNodeType = build.NodeFull - - ctx := context.Background() - apis, _ := ts.makeNodes(t, OneFull, OneMiner) - api := apis[0] - - v, err := api.Version(ctx) - if err != nil { - t.Fatal(err) - } - require.Equal(t, v.Version, build.BuildVersion) -} - -func (ts *testSuite) testSearchMsg(t *testing.T) { - apis, miners := ts.makeNodes(t, OneFull, OneMiner) - - api := apis[0] - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - senderAddr, err := api.WalletDefaultAddress(ctx) - if err != nil { - t.Fatal(err) - } - - msg := &types.Message{ - From: senderAddr, - To: senderAddr, - Value: big.Zero(), - } - bm := NewBlockMiner(ctx, t, miners[0], 100*time.Millisecond) - bm.MineBlocks() - defer bm.Stop() - - sm, err := api.MpoolPushMessage(ctx, msg, nil) - if err != nil { - t.Fatal(err) - } - res, err := api.StateWaitMsg(ctx, sm.Cid(), 1) - if err != nil { - t.Fatal(err) - } - if res.Receipt.ExitCode != 0 { - t.Fatal("did not successfully send message") - } - - searchRes, err := api.StateSearchMsg(ctx, sm.Cid()) - if err != nil { - t.Fatal(err) - } - - if searchRes.TipSet != res.TipSet { - t.Fatalf("search ts: %s, different from wait ts: %s", searchRes.TipSet, res.TipSet) - } - -} - -func (ts *testSuite) testID(t *testing.T) { - ctx := context.Background() - apis, _ := ts.makeNodes(t, OneFull, OneMiner) - api := apis[0] - - id, err := api.ID(ctx) - if err != nil { - t.Fatal(err) - } - assert.Regexp(t, "^12", id.Pretty()) -} - -func (ts *testSuite) testConnectTwo(t *testing.T) { - ctx := context.Background() - apis, _ := ts.makeNodes(t, TwoFull, OneMiner) - - p, err := apis[0].NetPeers(ctx) - if err != nil { - t.Fatal(err) - } - if len(p) != 0 { - t.Error("Node 0 has a peer") - } - - p, err = apis[1].NetPeers(ctx) - if err != nil { - t.Fatal(err) - } - if len(p) != 0 { - t.Error("Node 1 has a peer") - } - - addrs, err := apis[1].NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := apis[0].NetConnect(ctx, addrs); err != nil { - t.Fatal(err) - } - - p, err = apis[0].NetPeers(ctx) - if err != nil { - t.Fatal(err) - } - if len(p) != 1 { - t.Error("Node 0 doesn't have 1 peer") - } - - p, err = apis[1].NetPeers(ctx) - if err != nil { - t.Fatal(err) - } - if len(p) != 1 { - t.Error("Node 0 doesn't have 1 peer") - } -} diff --git a/api/test/util.go b/api/test/util.go deleted file mode 100644 index 8695e2e2efb..00000000000 --- a/api/test/util.go +++ /dev/null @@ -1,86 +0,0 @@ -package test - -import ( - "context" - "testing" - "time" - - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/miner" -) - -func SendFunds(ctx context.Context, t *testing.T, sender TestNode, addr address.Address, amount abi.TokenAmount) { - senderAddr, err := sender.WalletDefaultAddress(ctx) - if err != nil { - t.Fatal(err) - } - - msg := &types.Message{ - From: senderAddr, - To: addr, - Value: amount, - } - - sm, err := sender.MpoolPushMessage(ctx, msg, nil) - if err != nil { - t.Fatal(err) - } - res, err := sender.StateWaitMsg(ctx, sm.Cid(), 1) - if err != nil { - t.Fatal(err) - } - if res.Receipt.ExitCode != 0 { - t.Fatal("did not successfully send money") - } -} - -func MineUntilBlock(ctx context.Context, t *testing.T, fn TestNode, sn TestStorageNode, cb func(abi.ChainEpoch)) { - for i := 0; i < 1000; i++ { - var success bool - var err error - var epoch abi.ChainEpoch - wait := make(chan struct{}) - mineErr := sn.MineOne(ctx, miner.MineReq{ - Done: func(win bool, ep abi.ChainEpoch, e error) { - success = win - err = e - epoch = ep - wait <- struct{}{} - }, - }) - if mineErr != nil { - t.Fatal(mineErr) - } - <-wait - if err != nil { - t.Fatal(err) - } - if success { - // Wait until it shows up on the given full nodes ChainHead - nloops := 50 - for i := 0; i < nloops; i++ { - ts, err := fn.ChainHead(ctx) - if err != nil { - t.Fatal(err) - } - if ts.Height() == epoch { - break - } - if i == nloops-1 { - t.Fatal("block never managed to sync to node") - } - time.Sleep(time.Millisecond * 10) - } - - if cb != nil { - cb(epoch) - } - return - } - t.Log("did not mine block, trying again", i) - } - t.Fatal("failed to mine 1000 times in a row...") -} diff --git a/api/test/window_post.go b/api/test/window_post.go deleted file mode 100644 index 55fc4ad7044..00000000000 --- a/api/test/window_post.go +++ /dev/null @@ -1,336 +0,0 @@ -package test - -import ( - "context" - "fmt" - "sync/atomic" - - "strings" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/extern/sector-storage/mock" - sealing "github.com/filecoin-project/lotus/extern/storage-sealing" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/types" - bminer "github.com/filecoin-project/lotus/miner" - "github.com/filecoin-project/lotus/node/impl" -) - -func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, sn := b(t, OneFull, OneMiner) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - build.Clock.Sleep(time.Second) - - mine := int64(1) - done := make(chan struct{}) - go func() { - defer close(done) - for atomic.LoadInt64(&mine) != 0 { - build.Clock.Sleep(blocktime) - if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) { - - }}); err != nil { - t.Error(err) - } - } - }() - - pledgeSectors(t, ctx, miner, nSectors, 0, nil) - - atomic.StoreInt64(&mine, 0) - <-done -} - -func pledgeSectors(t *testing.T, ctx context.Context, miner TestStorageNode, n, existing int, blockNotif <-chan struct{}) { - for i := 0; i < n; i++ { - err := miner.PledgeSector(ctx) - require.NoError(t, err) - if i%3 == 0 && blockNotif != nil { - <-blockNotif - } - } - - for { - s, err := miner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM - require.NoError(t, err) - fmt.Printf("Sectors: %d\n", len(s)) - if len(s) >= n+existing { - break - } - - build.Clock.Sleep(100 * time.Millisecond) - } - - fmt.Printf("All sectors is fsm\n") - - s, err := miner.SectorsList(ctx) - require.NoError(t, err) - - toCheck := map[abi.SectorNumber]struct{}{} - for _, number := range s { - toCheck[number] = struct{}{} - } - - for len(toCheck) > 0 { - for n := range toCheck { - st, err := miner.SectorsStatus(ctx, n, false) - require.NoError(t, err) - if st.State == api.SectorState(sealing.Proving) { - delete(toCheck, n) - } - if strings.Contains(string(st.State), "Fail") { - t.Fatal("sector in a failed state", st.State) - } - } - - build.Clock.Sleep(100 * time.Millisecond) - fmt.Printf("WaitSeal: %d\n", len(s)) - } -} - -func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) { - for _, height := range []abi.ChainEpoch{ - 1, // before - 162, // while sealing - 5000, // while proving - } { - height := height // copy to satisfy lints - t.Run(fmt.Sprintf("upgrade-%d", height), func(t *testing.T) { - testWindowPostUpgrade(t, b, blocktime, nSectors, height) - }) - } - -} -func testWindowPostUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int, - upgradeHeight abi.ChainEpoch) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, sn := b(t, []FullNodeOpts{FullNodeWithUpgradeAt(upgradeHeight)}, OneMiner) - - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - build.Clock.Sleep(time.Second) - - done := make(chan struct{}) - go func() { - defer close(done) - for ctx.Err() == nil { - build.Clock.Sleep(blocktime) - if err := sn[0].MineOne(ctx, MineNext); err != nil { - if ctx.Err() != nil { - // context was canceled, ignore the error. - return - } - t.Error(err) - } - } - }() - defer func() { - cancel() - <-done - }() - - pledgeSectors(t, ctx, miner, nSectors, 0, nil) - - maddr, err := miner.ActorAddress(ctx) - require.NoError(t, err) - - di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - - mid, err := address.IDFromAddress(maddr) - require.NoError(t, err) - - fmt.Printf("Running one proving period\n") - fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2) - - for { - head, err := client.ChainHead(ctx) - require.NoError(t, err) - - if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 { - fmt.Printf("Now head.Height = %d\n", head.Height()) - break - } - build.Clock.Sleep(blocktime) - } - - p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - - ssz, err := miner.ActorSectorSize(ctx, maddr) - require.NoError(t, err) - - require.Equal(t, p.MinerPower, p.TotalPower) - require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(nSectors+GenesisPreseals))) - - fmt.Printf("Drop some sectors\n") - - // Drop 2 sectors from deadline 2 partition 0 (full partition / deadline) - { - parts, err := client.StateMinerPartitions(ctx, maddr, 2, types.EmptyTSK) - require.NoError(t, err) - require.Greater(t, len(parts), 0) - - secs := parts[0].AllSectors - n, err := secs.Count() - require.NoError(t, err) - require.Equal(t, uint64(2), n) - - // Drop the partition - err = secs.ForEach(func(sid uint64) error { - return miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkCorrupted(abi.SectorID{ - Miner: abi.ActorID(mid), - Number: abi.SectorNumber(sid), - }, true) - }) - require.NoError(t, err) - } - - var s abi.SectorID - - // Drop 1 sectors from deadline 3 partition 0 - { - parts, err := client.StateMinerPartitions(ctx, maddr, 3, types.EmptyTSK) - require.NoError(t, err) - require.Greater(t, len(parts), 0) - - secs := parts[0].AllSectors - n, err := secs.Count() - require.NoError(t, err) - require.Equal(t, uint64(2), n) - - // Drop the sector - sn, err := secs.First() - require.NoError(t, err) - - all, err := secs.All(2) - require.NoError(t, err) - fmt.Println("the sectors", all) - - s = abi.SectorID{ - Miner: abi.ActorID(mid), - Number: abi.SectorNumber(sn), - } - - err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, true) - require.NoError(t, err) - } - - di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - - fmt.Printf("Go through another PP, wait for sectors to become faulty\n") - fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2) - - for { - head, err := client.ChainHead(ctx) - require.NoError(t, err) - - if head.Height() > di.PeriodStart+(di.WPoStProvingPeriod)+2 { - fmt.Printf("Now head.Height = %d\n", head.Height()) - break - } - - build.Clock.Sleep(blocktime) - } - - p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - - require.Equal(t, p.MinerPower, p.TotalPower) - - sectors := p.MinerPower.RawBytePower.Uint64() / uint64(ssz) - require.Equal(t, nSectors+GenesisPreseals-3, int(sectors)) // -3 just removed sectors - - fmt.Printf("Recover one sector\n") - - err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, false) - require.NoError(t, err) - - di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - - fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2) - - for { - head, err := client.ChainHead(ctx) - require.NoError(t, err) - - if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 { - fmt.Printf("Now head.Height = %d\n", head.Height()) - break - } - - build.Clock.Sleep(blocktime) - } - - p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - - require.Equal(t, p.MinerPower, p.TotalPower) - - sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz) - require.Equal(t, nSectors+GenesisPreseals-2, int(sectors)) // -2 not recovered sectors - - // pledge a sector after recovery - - pledgeSectors(t, ctx, miner, 1, nSectors, nil) - - { - // Wait until proven. - di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - - waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 2 - fmt.Printf("End for head.Height > %d\n", waitUntil) - - for { - head, err := client.ChainHead(ctx) - require.NoError(t, err) - - if head.Height() > waitUntil { - fmt.Printf("Now head.Height = %d\n", head.Height()) - break - } - } - } - - p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - - require.Equal(t, p.MinerPower, p.TotalPower) - - sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz) - require.Equal(t, nSectors+GenesisPreseals-2+1, int(sectors)) // -2 not recovered sectors + 1 just pledged -} diff --git a/api/types.go b/api/types.go index a69aa28d99e..9d887b0a117 100644 --- a/api/types.go +++ b/api/types.go @@ -3,10 +3,13 @@ package api import ( "encoding/json" "fmt" + "time" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/lotus/chain/types" datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/build" "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p-core/peer" @@ -51,19 +54,6 @@ type MessageSendSpec struct { MaxFee abi.TokenAmount } -var DefaultMessageSendSpec = MessageSendSpec{ - // MaxFee of 0.1FIL - MaxFee: abi.NewTokenAmount(int64(build.FilecoinPrecision) / 10), -} - -func (ms *MessageSendSpec) Get() MessageSendSpec { - if ms == nil { - return DefaultMessageSendSpec - } - - return *ms -} - type DataTransferChannel struct { TransferID datatransfer.TransferID Status datatransfer.Status @@ -74,6 +64,7 @@ type DataTransferChannel struct { Message string OtherPeer peer.ID Transferred uint64 + Stages *datatransfer.ChannelStages } // NewDataTransferChannel constructs an API DataTransferChannel type from full channel state snapshot and a host id @@ -107,3 +98,100 @@ func NewDataTransferChannel(hostID peer.ID, channelState datatransfer.ChannelSta } return channel } + +type NetBlockList struct { + Peers []peer.ID + IPAddrs []string + IPSubnets []string +} + +type ExtendedPeerInfo struct { + ID peer.ID + Agent string + Addrs []string + Protocols []string + ConnMgrMeta *ConnMgrInfo +} + +type ConnMgrInfo struct { + FirstSeen time.Time + Value int + Tags map[string]int + Conns map[string]time.Time +} + +type NodeStatus struct { + SyncStatus NodeSyncStatus + PeerStatus NodePeerStatus + ChainStatus NodeChainStatus +} + +type NodeSyncStatus struct { + Epoch uint64 + Behind uint64 +} + +type NodePeerStatus struct { + PeersToPublishMsgs int + PeersToPublishBlocks int +} + +type NodeChainStatus struct { + BlocksPerTipsetLast100 float64 + BlocksPerTipsetLastFinality float64 +} + +type CheckStatusCode int + +//go:generate go run golang.org/x/tools/cmd/stringer -type=CheckStatusCode -trimprefix=CheckStatus +const ( + _ CheckStatusCode = iota + // Message Checks + CheckStatusMessageSerialize + CheckStatusMessageSize + CheckStatusMessageValidity + CheckStatusMessageMinGas + CheckStatusMessageMinBaseFee + CheckStatusMessageBaseFee + CheckStatusMessageBaseFeeLowerBound + CheckStatusMessageBaseFeeUpperBound + CheckStatusMessageGetStateNonce + CheckStatusMessageNonce + CheckStatusMessageGetStateBalance + CheckStatusMessageBalance +) + +type CheckStatus struct { + Code CheckStatusCode + OK bool + Err string + Hint map[string]interface{} +} + +type MessageCheckStatus struct { + Cid cid.Cid + CheckStatus +} + +type MessagePrototype struct { + Message types.Message + ValidNonce bool +} + +type RetrievalInfo struct { + PayloadCID cid.Cid + ID retrievalmarket.DealID + PieceCID *cid.Cid + PricePerByte abi.TokenAmount + UnsealPrice abi.TokenAmount + + Status retrievalmarket.DealStatus + Message string // more information about deal state, particularly errors + Provider peer.ID + BytesReceived uint64 + BytesPaidFor uint64 + TotalPaid abi.TokenAmount + + TransferChannelID *datatransfer.ChannelID + DataTransfer *DataTransferChannel +} diff --git a/api/types/actors.go b/api/types/actors.go new file mode 100644 index 00000000000..d55ef3e107a --- /dev/null +++ b/api/types/actors.go @@ -0,0 +1,5 @@ +package apitypes + +import "github.com/filecoin-project/go-state-types/network" + +type NetworkVersion = network.Version diff --git a/api/types/openrpc.go b/api/types/openrpc.go new file mode 100644 index 00000000000..7d65cbde63c --- /dev/null +++ b/api/types/openrpc.go @@ -0,0 +1,3 @@ +package apitypes + +type OpenRPCDocument map[string]interface{} diff --git a/api/v0api/full.go b/api/v0api/full.go new file mode 100644 index 00000000000..b152c6cbb84 --- /dev/null +++ b/api/v0api/full.go @@ -0,0 +1,711 @@ +package v0api + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-multistore" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/dline" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p-core/peer" + + "github.com/filecoin-project/lotus/api" + apitypes "github.com/filecoin-project/lotus/api/types" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/paych" + "github.com/filecoin-project/lotus/chain/types" + marketevents "github.com/filecoin-project/lotus/markets/loggers" + "github.com/filecoin-project/lotus/node/modules/dtypes" +) + +//go:generate go run github.com/golang/mock/mockgen -destination=v0mocks/mock_full.go -package=v0mocks . FullNode + +// MODIFYING THE API INTERFACE +// +// NOTE: This is the V0 (Stable) API - when adding methods to this interface, +// you'll need to make sure they are also present on the V1 (Unstable) API +// +// This API is implemented in `v1_wrapper.go` as a compatibility layer backed +// by the V1 api +// +// When adding / changing methods in this file: +// * Do the change here +// * Adjust implementation in `node/impl/` +// * Run `make gen` - this will: +// * Generate proxy structs +// * Generate mocks +// * Generate markdown docs +// * Generate openrpc blobs + +// FullNode API is a low-level interface to the Filecoin network full node +type FullNode interface { + Common + Net + + // MethodGroup: Chain + // The Chain method group contains methods for interacting with the + // blockchain, but that do not require any form of state computation. + + // ChainNotify returns channel with chain head updates. + // First message is guaranteed to be of len == 1, and type == 'current'. + ChainNotify(context.Context) (<-chan []*api.HeadChange, error) //perm:read + + // ChainHead returns the current head of the chain. + ChainHead(context.Context) (*types.TipSet, error) //perm:read + + // ChainGetRandomnessFromTickets is used to sample the chain for randomness. + ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) //perm:read + + // ChainGetRandomnessFromBeacon is used to sample the beacon for randomness. + ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) //perm:read + + // ChainGetBlock returns the block specified by the given CID. + ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error) //perm:read + // ChainGetTipSet returns the tipset specified by the given TipSetKey. + ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) //perm:read + + // ChainGetBlockMessages returns messages stored in the specified block. + // + // Note: If there are multiple blocks in a tipset, it's likely that some + // messages will be duplicated. It's also possible for blocks in a tipset to have + // different messages from the same sender at the same nonce. When that happens, + // only the first message (in a block with lowest ticket) will be considered + // for execution + // + // NOTE: THIS METHOD SHOULD ONLY BE USED FOR GETTING MESSAGES IN A SPECIFIC BLOCK + // + // DO NOT USE THIS METHOD TO GET MESSAGES INCLUDED IN A TIPSET + // Use ChainGetParentMessages, which will perform correct message deduplication + ChainGetBlockMessages(ctx context.Context, blockCid cid.Cid) (*api.BlockMessages, error) //perm:read + + // ChainGetParentReceipts returns receipts for messages in parent tipset of + // the specified block. The receipts in the list returned is one-to-one with the + // messages returned by a call to ChainGetParentMessages with the same blockCid. + ChainGetParentReceipts(ctx context.Context, blockCid cid.Cid) ([]*types.MessageReceipt, error) //perm:read + + // ChainGetParentMessages returns messages stored in parent tipset of the + // specified block. + ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]api.Message, error) //perm:read + + // ChainGetMessagesInTipset returns message stores in current tipset + ChainGetMessagesInTipset(ctx context.Context, tsk types.TipSetKey) ([]api.Message, error) //perm:read + + // ChainGetTipSetByHeight looks back for a tipset at the specified epoch. + // If there are no blocks at the specified epoch, a tipset at an earlier epoch + // will be returned. + ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) //perm:read + + // ChainReadObj reads ipld nodes referenced by the specified CID from chain + // blockstore and returns raw bytes. + ChainReadObj(context.Context, cid.Cid) ([]byte, error) //perm:read + + // ChainDeleteObj deletes node referenced by the given CID + ChainDeleteObj(context.Context, cid.Cid) error //perm:admin + + // ChainHasObj checks if a given CID exists in the chain blockstore. + ChainHasObj(context.Context, cid.Cid) (bool, error) //perm:read + + // ChainStatObj returns statistics about the graph referenced by 'obj'. + // If 'base' is also specified, then the returned stat will be a diff + // between the two objects. + ChainStatObj(ctx context.Context, obj cid.Cid, base cid.Cid) (api.ObjStat, error) //perm:read + + // ChainSetHead forcefully sets current chain head. Use with caution. + ChainSetHead(context.Context, types.TipSetKey) error //perm:admin + + // ChainGetGenesis returns the genesis tipset. + ChainGetGenesis(context.Context) (*types.TipSet, error) //perm:read + + // ChainTipSetWeight computes weight for the specified tipset. + ChainTipSetWeight(context.Context, types.TipSetKey) (types.BigInt, error) //perm:read + ChainGetNode(ctx context.Context, p string) (*api.IpldObject, error) //perm:read + + // ChainGetMessage reads a message referenced by the specified CID from the + // chain blockstore. + ChainGetMessage(context.Context, cid.Cid) (*types.Message, error) //perm:read + + // ChainGetPath returns a set of revert/apply operations needed to get from + // one tipset to another, for example: + //``` + // to + // ^ + // from tAA + // ^ ^ + // tBA tAB + // ^---*--^ + // ^ + // tRR + //``` + // Would return `[revert(tBA), apply(tAB), apply(tAA)]` + ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*api.HeadChange, error) //perm:read + + // ChainExport returns a stream of bytes with CAR dump of chain data. + // The exported chain data includes the header chain from the given tipset + // back to genesis, the entire genesis state, and the most recent 'nroots' + // state trees. + // If oldmsgskip is set, messages from before the requested roots are also not included. + ChainExport(ctx context.Context, nroots abi.ChainEpoch, oldmsgskip bool, tsk types.TipSetKey) (<-chan []byte, error) //perm:read + + // MethodGroup: Beacon + // The Beacon method group contains methods for interacting with the random beacon (DRAND) + + // BeaconGetEntry returns the beacon entry for the given filecoin epoch. If + // the entry has not yet been produced, the call will block until the entry + // becomes available + BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) //perm:read + + // GasEstimateFeeCap estimates gas fee cap + GasEstimateFeeCap(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error) //perm:read + + // GasEstimateGasLimit estimates gas used by the message and returns it. + // It fails if message fails to execute. + GasEstimateGasLimit(context.Context, *types.Message, types.TipSetKey) (int64, error) //perm:read + + // GasEstimateGasPremium estimates what gas price should be used for a + // message to have high likelihood of inclusion in `nblocksincl` epochs. + + GasEstimateGasPremium(_ context.Context, nblocksincl uint64, + sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) //perm:read + + // GasEstimateMessageGas estimates gas values for unset message gas fields + GasEstimateMessageGas(context.Context, *types.Message, *api.MessageSendSpec, types.TipSetKey) (*types.Message, error) //perm:read + + // MethodGroup: Sync + // The Sync method group contains methods for interacting with and + // observing the lotus sync service. + + // SyncState returns the current status of the lotus sync system. + SyncState(context.Context) (*api.SyncState, error) //perm:read + + // SyncSubmitBlock can be used to submit a newly created block to the. + // network through this node + SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error //perm:write + + // SyncIncomingBlocks returns a channel streaming incoming, potentially not + // yet synced block headers. + SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) //perm:read + + // SyncCheckpoint marks a blocks as checkpointed, meaning that it won't ever fork away from it. + SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) error //perm:admin + + // SyncMarkBad marks a blocks as bad, meaning that it won't ever by synced. + // Use with extreme caution. + SyncMarkBad(ctx context.Context, bcid cid.Cid) error //perm:admin + + // SyncUnmarkBad unmarks a blocks as bad, making it possible to be validated and synced again. + SyncUnmarkBad(ctx context.Context, bcid cid.Cid) error //perm:admin + + // SyncUnmarkAllBad purges bad block cache, making it possible to sync to chains previously marked as bad + SyncUnmarkAllBad(ctx context.Context) error //perm:admin + + // SyncCheckBad checks if a block was marked as bad, and if it was, returns + // the reason. + SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error) //perm:read + + // SyncValidateTipset indicates whether the provided tipset is valid or not + SyncValidateTipset(ctx context.Context, tsk types.TipSetKey) (bool, error) //perm:read + + // MethodGroup: Mpool + // The Mpool methods are for interacting with the message pool. The message pool + // manages all incoming and outgoing 'messages' going over the network. + + // MpoolPending returns pending mempool messages. + MpoolPending(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) //perm:read + + // MpoolSelect returns a list of pending messages for inclusion in the next block + MpoolSelect(context.Context, types.TipSetKey, float64) ([]*types.SignedMessage, error) //perm:read + + // MpoolPush pushes a signed message to mempool. + MpoolPush(context.Context, *types.SignedMessage) (cid.Cid, error) //perm:write + + // MpoolPushUntrusted pushes a signed message to mempool from untrusted sources. + MpoolPushUntrusted(context.Context, *types.SignedMessage) (cid.Cid, error) //perm:write + + // MpoolPushMessage atomically assigns a nonce, signs, and pushes a message + // to mempool. + // maxFee is only used when GasFeeCap/GasPremium fields aren't specified + // + // When maxFee is set to 0, MpoolPushMessage will guess appropriate fee + // based on current chain conditions + MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) //perm:sign + + // MpoolBatchPush batch pushes a signed message to mempool. + MpoolBatchPush(context.Context, []*types.SignedMessage) ([]cid.Cid, error) //perm:write + + // MpoolBatchPushUntrusted batch pushes a signed message to mempool from untrusted sources. + MpoolBatchPushUntrusted(context.Context, []*types.SignedMessage) ([]cid.Cid, error) //perm:write + + // MpoolBatchPushMessage batch pushes a unsigned message to mempool. + MpoolBatchPushMessage(context.Context, []*types.Message, *api.MessageSendSpec) ([]*types.SignedMessage, error) //perm:sign + + // MpoolGetNonce gets next nonce for the specified sender. + // Note that this method may not be atomic. Use MpoolPushMessage instead. + MpoolGetNonce(context.Context, address.Address) (uint64, error) //perm:read + MpoolSub(context.Context) (<-chan api.MpoolUpdate, error) //perm:read + + // MpoolClear clears pending messages from the mpool + MpoolClear(context.Context, bool) error //perm:write + + // MpoolGetConfig returns (a copy of) the current mpool config + MpoolGetConfig(context.Context) (*types.MpoolConfig, error) //perm:read + // MpoolSetConfig sets the mpool config to (a copy of) the supplied config + MpoolSetConfig(context.Context, *types.MpoolConfig) error //perm:admin + + // MethodGroup: Miner + + MinerGetBaseInfo(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*api.MiningBaseInfo, error) //perm:read + MinerCreateBlock(context.Context, *api.BlockTemplate) (*types.BlockMsg, error) //perm:write + + // // UX ? + + // MethodGroup: Wallet + + // WalletNew creates a new address in the wallet with the given sigType. + // Available key types: bls, secp256k1, secp256k1-ledger + // Support for numerical types: 1 - secp256k1, 2 - BLS is deprecated + WalletNew(context.Context, types.KeyType) (address.Address, error) //perm:write + // WalletHas indicates whether the given address is in the wallet. + WalletHas(context.Context, address.Address) (bool, error) //perm:write + // WalletList lists all the addresses in the wallet. + WalletList(context.Context) ([]address.Address, error) //perm:write + // WalletBalance returns the balance of the given address at the current head of the chain. + WalletBalance(context.Context, address.Address) (types.BigInt, error) //perm:read + // WalletSign signs the given bytes using the given address. + WalletSign(context.Context, address.Address, []byte) (*crypto.Signature, error) //perm:sign + // WalletSignMessage signs the given message using the given address. + WalletSignMessage(context.Context, address.Address, *types.Message) (*types.SignedMessage, error) //perm:sign + // WalletVerify takes an address, a signature, and some bytes, and indicates whether the signature is valid. + // The address does not have to be in the wallet. + WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) //perm:read + // WalletDefaultAddress returns the address marked as default in the wallet. + WalletDefaultAddress(context.Context) (address.Address, error) //perm:write + // WalletSetDefault marks the given address as as the default one. + WalletSetDefault(context.Context, address.Address) error //perm:write + // WalletExport returns the private key of an address in the wallet. + WalletExport(context.Context, address.Address) (*types.KeyInfo, error) //perm:admin + // WalletImport receives a KeyInfo, which includes a private key, and imports it into the wallet. + WalletImport(context.Context, *types.KeyInfo) (address.Address, error) //perm:admin + // WalletDelete deletes an address from the wallet. + WalletDelete(context.Context, address.Address) error //perm:admin + // WalletValidateAddress validates whether a given string can be decoded as a well-formed address + WalletValidateAddress(context.Context, string) (address.Address, error) //perm:read + + // Other + + // MethodGroup: Client + // The Client methods all have to do with interacting with the storage and + // retrieval markets as a client + + // ClientImport imports file under the specified path into filestore. + ClientImport(ctx context.Context, ref api.FileRef) (*api.ImportRes, error) //perm:admin + // ClientRemoveImport removes file import + ClientRemoveImport(ctx context.Context, importID multistore.StoreID) error //perm:admin + // ClientStartDeal proposes a deal with a miner. + ClientStartDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) //perm:admin + // ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking. + ClientStatelessDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) //perm:write + // ClientGetDealInfo returns the latest information about a given deal. + ClientGetDealInfo(context.Context, cid.Cid) (*api.DealInfo, error) //perm:read + // ClientListDeals returns information about the deals made by the local client. + ClientListDeals(ctx context.Context) ([]api.DealInfo, error) //perm:write + // ClientGetDealUpdates returns the status of updated deals + ClientGetDealUpdates(ctx context.Context) (<-chan api.DealInfo, error) //perm:write + // ClientGetDealStatus returns status given a code + ClientGetDealStatus(ctx context.Context, statusCode uint64) (string, error) //perm:read + // ClientHasLocal indicates whether a certain CID is locally stored. + ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) //perm:write + // ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer). + ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]api.QueryOffer, error) //perm:read + // ClientMinerQueryOffer returns a QueryOffer for the specific miner and file. + ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (api.QueryOffer, error) //perm:read + // ClientRetrieve initiates the retrieval of a file, as specified in the order. + ClientRetrieve(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error //perm:admin + // ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel + // of status updates. + ClientRetrieveWithEvents(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin + // ClientQueryAsk returns a signed StorageAsk from the specified miner. + // ClientListRetrievals returns information about retrievals made by the local client + ClientListRetrievals(ctx context.Context) ([]api.RetrievalInfo, error) //perm:write + // ClientGetRetrievalUpdates returns status of updated retrieval deals + ClientGetRetrievalUpdates(ctx context.Context) (<-chan api.RetrievalInfo, error) //perm:write + ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) //perm:read + // ClientCalcCommP calculates the CommP and data size of the specified CID + ClientDealPieceCID(ctx context.Context, root cid.Cid) (api.DataCIDSize, error) //perm:read + // ClientCalcCommP calculates the CommP for a specified file + ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet, error) //perm:write + // ClientGenCar generates a CAR file for the specified file. + ClientGenCar(ctx context.Context, ref api.FileRef, outpath string) error //perm:write + // ClientDealSize calculates real deal data size + ClientDealSize(ctx context.Context, root cid.Cid) (api.DataSize, error) //perm:read + // ClientListTransfers returns the status of all ongoing transfers of data + ClientListDataTransfers(ctx context.Context) ([]api.DataTransferChannel, error) //perm:write + ClientDataTransferUpdates(ctx context.Context) (<-chan api.DataTransferChannel, error) //perm:write + // ClientRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer + ClientRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write + // ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer + ClientCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write + // ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel + // which are stuck due to insufficient funds + ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error //perm:write + + // ClientCancelRetrievalDeal cancels an ongoing retrieval deal based on DealID + ClientCancelRetrievalDeal(ctx context.Context, dealid retrievalmarket.DealID) error //perm:write + + // ClientUnimport removes references to the specified file from filestore + //ClientUnimport(path string) + + // ClientListImports lists imported files and their root CIDs + ClientListImports(ctx context.Context) ([]api.Import, error) //perm:write + + //ClientListAsks() []Ask + + // MethodGroup: State + // The State methods are used to query, inspect, and interact with chain state. + // Most methods take a TipSetKey as a parameter. The state looked up is the parent state of the tipset. + // A nil TipSetKey can be provided as a param, this will cause the heaviest tipset in the chain to be used. + + // StateCall runs the given message and returns its result without any persisted changes. + // + // StateCall applies the message to the tipset's parent state. The + // message is not applied on-top-of the messages in the passed-in + // tipset. + StateCall(context.Context, *types.Message, types.TipSetKey) (*api.InvocResult, error) //perm:read + // StateReplay replays a given message, assuming it was included in a block in the specified tipset. + // + // If a tipset key is provided, and a replacing message is found on chain, + // the method will return an error saying that the message wasn't found + // + // If no tipset key is provided, the appropriate tipset is looked up, and if + // the message was gas-repriced, the on-chain message will be replayed - in + // that case the returned InvocResult.MsgCid will not match the Cid param + // + // If the caller wants to ensure that exactly the requested message was executed, + // they MUST check that InvocResult.MsgCid is equal to the provided Cid. + // Without this check both the requested and original message may appear as + // successfully executed on-chain, which may look like a double-spend. + // + // A replacing message is a message with a different CID, any of Gas values, and + // different signature, but with all other parameters matching (source/destination, + // nonce, params, etc.) + StateReplay(context.Context, types.TipSetKey, cid.Cid) (*api.InvocResult, error) //perm:read + // StateGetActor returns the indicated actor's nonce and balance. + StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) //perm:read + // StateReadState returns the indicated actor's state. + StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*api.ActorState, error) //perm:read + // StateListMessages looks back and returns all messages with a matching to or from address, stopping at the given height. + StateListMessages(ctx context.Context, match *api.MessageMatch, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) //perm:read + // StateDecodeParams attempts to decode the provided params, based on the recipient actor address and method number. + StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error) //perm:read + + // StateNetworkName returns the name of the network the node is synced to + StateNetworkName(context.Context) (dtypes.NetworkName, error) //perm:read + // StateMinerSectors returns info about the given miner's sectors. If the filter bitfield is nil, all sectors are included. + StateMinerSectors(context.Context, address.Address, *bitfield.BitField, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) //perm:read + // StateMinerActiveSectors returns info about sectors that a given miner is actively proving. + StateMinerActiveSectors(context.Context, address.Address, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) //perm:read + // StateMinerProvingDeadline calculates the deadline at some epoch for a proving period + // and returns the deadline-related calculations. + StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) //perm:read + // StateMinerPower returns the power of the indicated miner + StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) //perm:read + // StateMinerInfo returns info about the indicated miner + StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error) //perm:read + // StateMinerDeadlines returns all the proving deadlines for the given miner + StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]api.Deadline, error) //perm:read + // StateMinerPartitions returns all partitions in the specified deadline + StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tsk types.TipSetKey) ([]api.Partition, error) //perm:read + // StateMinerFaults returns a bitfield indicating the faulty sectors of the given miner + StateMinerFaults(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) //perm:read + // StateAllMinerFaults returns all non-expired Faults that occur within lookback epochs of the given tipset + StateAllMinerFaults(ctx context.Context, lookback abi.ChainEpoch, ts types.TipSetKey) ([]*api.Fault, error) //perm:read + // StateMinerRecoveries returns a bitfield indicating the recovering sectors of the given miner + StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) //perm:read + // StateMinerInitialPledgeCollateral returns the precommit deposit for the specified miner's sector + StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) //perm:read + // StateMinerInitialPledgeCollateral returns the initial pledge collateral for the specified miner's sector + StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) //perm:read + // StateMinerAvailableBalance returns the portion of a miner's balance that can be withdrawn or spent + StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) //perm:read + // StateMinerSectorAllocated checks if a sector is allocated + StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (bool, error) //perm:read + // StateSectorPreCommitInfo returns the PreCommit info for the specified miner's sector + StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) //perm:read + // StateSectorGetInfo returns the on-chain info for the specified miner's sector. Returns null in case the sector info isn't found + // NOTE: returned info.Expiration may not be accurate in some cases, use StateSectorExpiration to get accurate + // expiration epoch + StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) //perm:read + // StateSectorExpiration returns epoch at which given sector will expire + StateSectorExpiration(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorExpiration, error) //perm:read + // StateSectorPartition finds deadline/partition with the specified sector + StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error) //perm:read + // StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed + // + // NOTE: If a replacing message is found on chain, this method will return + // a MsgLookup for the replacing message - the MsgLookup.Message will be a different + // CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the + // result of the execution of the replacing message. + // + // If the caller wants to ensure that exactly the requested message was executed, + // they MUST check that MsgLookup.Message is equal to the provided 'cid'. + // Without this check both the requested and original message may appear as + // successfully executed on-chain, which may look like a double-spend. + // + // A replacing message is a message with a different CID, any of Gas values, and + // different signature, but with all other parameters matching (source/destination, + // nonce, params, etc.) + StateSearchMsg(context.Context, cid.Cid) (*api.MsgLookup, error) //perm:read + // StateSearchMsgLimited looks back up to limit epochs in the chain for a message, and returns its receipt and the tipset where it was executed + // + // NOTE: If a replacing message is found on chain, this method will return + // a MsgLookup for the replacing message - the MsgLookup.Message will be a different + // CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the + // result of the execution of the replacing message. + // + // If the caller wants to ensure that exactly the requested message was executed, + // they MUST check that MsgLookup.Message is equal to the provided 'cid'. + // Without this check both the requested and original message may appear as + // successfully executed on-chain, which may look like a double-spend. + // + // A replacing message is a message with a different CID, any of Gas values, and + // different signature, but with all other parameters matching (source/destination, + // nonce, params, etc.) + StateSearchMsgLimited(ctx context.Context, msg cid.Cid, limit abi.ChainEpoch) (*api.MsgLookup, error) //perm:read + // StateWaitMsg looks back in the chain for a message. If not found, it blocks until the + // message arrives on chain, and gets to the indicated confidence depth. + // + // NOTE: If a replacing message is found on chain, this method will return + // a MsgLookup for the replacing message - the MsgLookup.Message will be a different + // CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the + // result of the execution of the replacing message. + // + // If the caller wants to ensure that exactly the requested message was executed, + // they MUST check that MsgLookup.Message is equal to the provided 'cid'. + // Without this check both the requested and original message may appear as + // successfully executed on-chain, which may look like a double-spend. + // + // A replacing message is a message with a different CID, any of Gas values, and + // different signature, but with all other parameters matching (source/destination, + // nonce, params, etc.) + StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error) //perm:read + // StateWaitMsgLimited looks back up to limit epochs in the chain for a message. + // If not found, it blocks until the message arrives on chain, and gets to the + // indicated confidence depth. + // + // NOTE: If a replacing message is found on chain, this method will return + // a MsgLookup for the replacing message - the MsgLookup.Message will be a different + // CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the + // result of the execution of the replacing message. + // + // If the caller wants to ensure that exactly the requested message was executed, + // they MUST check that MsgLookup.Message is equal to the provided 'cid'. + // Without this check both the requested and original message may appear as + // successfully executed on-chain, which may look like a double-spend. + // + // A replacing message is a message with a different CID, any of Gas values, and + // different signature, but with all other parameters matching (source/destination, + // nonce, params, etc.) + StateWaitMsgLimited(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch) (*api.MsgLookup, error) //perm:read + // StateListMiners returns the addresses of every miner that has claimed power in the Power Actor + StateListMiners(context.Context, types.TipSetKey) ([]address.Address, error) //perm:read + // StateListActors returns the addresses of every actor in the state + StateListActors(context.Context, types.TipSetKey) ([]address.Address, error) //perm:read + // StateMarketBalance looks up the Escrow and Locked balances of the given address in the Storage Market + StateMarketBalance(context.Context, address.Address, types.TipSetKey) (api.MarketBalance, error) //perm:read + // StateMarketParticipants returns the Escrow and Locked balances of every participant in the Storage Market + StateMarketParticipants(context.Context, types.TipSetKey) (map[string]api.MarketBalance, error) //perm:read + // StateMarketDeals returns information about every deal in the Storage Market + StateMarketDeals(context.Context, types.TipSetKey) (map[string]api.MarketDeal, error) //perm:read + // StateMarketStorageDeal returns information about the indicated deal + StateMarketStorageDeal(context.Context, abi.DealID, types.TipSetKey) (*api.MarketDeal, error) //perm:read + // StateLookupID retrieves the ID address of the given address + StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) //perm:read + // StateAccountKey returns the public key address of the given ID address + StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) //perm:read + // StateChangedActors returns all the actors whose states change between the two given state CIDs + // TODO: Should this take tipset keys instead? + StateChangedActors(context.Context, cid.Cid, cid.Cid) (map[string]types.Actor, error) //perm:read + // StateGetReceipt returns the message receipt for the given message or for a + // matching gas-repriced replacing message + // + // NOTE: If the requested message was replaced, this method will return the receipt + // for the replacing message - if the caller needs the receipt for exactly the + // requested message, use StateSearchMsg().Receipt, and check that MsgLookup.Message + // is matching the requested CID + // + // DEPRECATED: Use StateSearchMsg, this method won't be supported in v1 API + StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) //perm:read + // StateMinerSectorCount returns the number of sectors in a miner's sector set and proving set + StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (api.MinerSectors, error) //perm:read + // StateCompute is a flexible command that applies the given messages on the given tipset. + // The messages are run as though the VM were at the provided height. + // + // When called, StateCompute will: + // - Load the provided tipset, or use the current chain head if not provided + // - Compute the tipset state of the provided tipset on top of the parent state + // - (note that this step runs before vmheight is applied to the execution) + // - Execute state upgrade if any were scheduled at the epoch, or in null + // blocks preceding the tipset + // - Call the cron actor on null blocks preceding the tipset + // - For each block in the tipset + // - Apply messages in blocks in the specified + // - Award block reward by calling the reward actor + // - Call the cron actor for the current epoch + // - If the specified vmheight is higher than the current epoch, apply any + // needed state upgrades to the state + // - Apply the specified messages to the state + // + // The vmheight parameter sets VM execution epoch, and can be used to simulate + // message execution in different network versions. If the specified vmheight + // epoch is higher than the epoch of the specified tipset, any state upgrades + // until the vmheight will be executed on the state before applying messages + // specified by the user. + // + // Note that the initial tipset state computation is not affected by the + // vmheight parameter - only the messages in the `apply` set are + // + // If the caller wants to simply compute the state, vmheight should be set to + // the epoch of the specified tipset. + // + // Messages in the `apply` parameter must have the correct nonces, and gas + // values set. + StateCompute(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*api.ComputeStateOutput, error) //perm:read + // StateVerifierStatus returns the data cap for the given address. + // Returns nil if there is no entry in the data cap table for the + // address. + StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) //perm:read + // StateVerifiedClientStatus returns the data cap for the given address. + // Returns nil if there is no entry in the data cap table for the + // address. + StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) //perm:read + // StateVerifiedClientStatus returns the address of the Verified Registry's root key + StateVerifiedRegistryRootKey(ctx context.Context, tsk types.TipSetKey) (address.Address, error) //perm:read + // StateDealProviderCollateralBounds returns the min and max collateral a storage provider + // can issue. It takes the deal size and verified status as parameters. + StateDealProviderCollateralBounds(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (api.DealCollateralBounds, error) //perm:read + + // StateCirculatingSupply returns the exact circulating supply of Filecoin at the given tipset. + // This is not used anywhere in the protocol itself, and is only for external consumption. + StateCirculatingSupply(context.Context, types.TipSetKey) (abi.TokenAmount, error) //perm:read + // StateVMCirculatingSupplyInternal returns an approximation of the circulating supply of Filecoin at the given tipset. + // This is the value reported by the runtime interface to actors code. + StateVMCirculatingSupplyInternal(context.Context, types.TipSetKey) (api.CirculatingSupply, error) //perm:read + // StateNetworkVersion returns the network version at the given tipset + StateNetworkVersion(context.Context, types.TipSetKey) (apitypes.NetworkVersion, error) //perm:read + + // MethodGroup: Msig + // The Msig methods are used to interact with multisig wallets on the + // filecoin network + + // MsigGetAvailableBalance returns the portion of a multisig's balance that can be withdrawn or spent + MsigGetAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) //perm:read + // MsigGetVestingSchedule returns the vesting details of a given multisig. + MsigGetVestingSchedule(context.Context, address.Address, types.TipSetKey) (api.MsigVesting, error) //perm:read + // MsigGetVested returns the amount of FIL that vested in a multisig in a certain period. + // It takes the following params: , , + MsigGetVested(context.Context, address.Address, types.TipSetKey, types.TipSetKey) (types.BigInt, error) //perm:read + + //MsigGetPending returns pending transactions for the given multisig + //wallet. Once pending transactions are fully approved, they will no longer + //appear here. + MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*api.MsigTransaction, error) //perm:read + + // MsigCreate creates a multisig wallet + // It takes the following params: , , + //, , + MsigCreate(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (cid.Cid, error) //perm:sign + // MsigPropose proposes a multisig message + // It takes the following params: , , , + // , , + MsigPropose(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) //perm:sign + + // MsigApprove approves a previously-proposed multisig message by transaction ID + // It takes the following params: , + MsigApprove(context.Context, address.Address, uint64, address.Address) (cid.Cid, error) //perm:sign + + // MsigApproveTxnHash approves a previously-proposed multisig message, specified + // using both transaction ID and a hash of the parameters used in the + // proposal. This method of approval can be used to ensure you only approve + // exactly the transaction you think you are. + // It takes the following params: , , , , , + // , , + MsigApproveTxnHash(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) //perm:sign + + // MsigCancel cancels a previously-proposed multisig message + // It takes the following params: , , , , + // , , + MsigCancel(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) //perm:sign + // MsigAddPropose proposes adding a signer in the multisig + // It takes the following params: , , + // , + MsigAddPropose(context.Context, address.Address, address.Address, address.Address, bool) (cid.Cid, error) //perm:sign + // MsigAddApprove approves a previously proposed AddSigner message + // It takes the following params: , , , + // , , + MsigAddApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, bool) (cid.Cid, error) //perm:sign + // MsigAddCancel cancels a previously proposed AddSigner message + // It takes the following params: , , , + // , + MsigAddCancel(context.Context, address.Address, address.Address, uint64, address.Address, bool) (cid.Cid, error) //perm:sign + // MsigSwapPropose proposes swapping 2 signers in the multisig + // It takes the following params: , , + // , + MsigSwapPropose(context.Context, address.Address, address.Address, address.Address, address.Address) (cid.Cid, error) //perm:sign + // MsigSwapApprove approves a previously proposed SwapSigner + // It takes the following params: , , , + // , , + MsigSwapApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (cid.Cid, error) //perm:sign + // MsigSwapCancel cancels a previously proposed SwapSigner message + // It takes the following params: , , , + // , + MsigSwapCancel(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error) //perm:sign + + // MsigRemoveSigner proposes the removal of a signer from the multisig. + // It accepts the multisig to make the change on, the proposer address to + // send the message from, the address to be removed, and a boolean + // indicating whether or not the signing threshold should be lowered by one + // along with the address removal. + MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error) //perm:sign + + // MarketAddBalance adds funds to the market actor + MarketAddBalance(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign + // MarketGetReserved gets the amount of funds that are currently reserved for the address + MarketGetReserved(ctx context.Context, addr address.Address) (types.BigInt, error) //perm:sign + // MarketReserveFunds reserves funds for a deal + MarketReserveFunds(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign + // MarketReleaseFunds releases funds reserved by MarketReserveFunds + MarketReleaseFunds(ctx context.Context, addr address.Address, amt types.BigInt) error //perm:sign + // MarketWithdraw withdraws unlocked funds from the market actor + MarketWithdraw(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign + + // MethodGroup: Paych + // The Paych methods are for interacting with and managing payment channels + + PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*api.ChannelInfo, error) //perm:sign + PaychGetWaitReady(context.Context, cid.Cid) (address.Address, error) //perm:sign + PaychAvailableFunds(ctx context.Context, ch address.Address) (*api.ChannelAvailableFunds, error) //perm:sign + PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*api.ChannelAvailableFunds, error) //perm:sign + PaychList(context.Context) ([]address.Address, error) //perm:read + PaychStatus(context.Context, address.Address) (*api.PaychStatus, error) //perm:read + PaychSettle(context.Context, address.Address) (cid.Cid, error) //perm:sign + PaychCollect(context.Context, address.Address) (cid.Cid, error) //perm:sign + PaychAllocateLane(ctx context.Context, ch address.Address) (uint64, error) //perm:sign + PaychNewPayment(ctx context.Context, from, to address.Address, vouchers []api.VoucherSpec) (*api.PaymentInfo, error) //perm:sign + PaychVoucherCheckValid(context.Context, address.Address, *paych.SignedVoucher) error //perm:read + PaychVoucherCheckSpendable(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (bool, error) //perm:read + PaychVoucherCreate(context.Context, address.Address, types.BigInt, uint64) (*api.VoucherCreateResult, error) //perm:sign + PaychVoucherAdd(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error) //perm:write + PaychVoucherList(context.Context, address.Address) ([]*paych.SignedVoucher, error) //perm:write + PaychVoucherSubmit(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error) //perm:sign + + // CreateBackup creates node backup onder the specified file name. The + // method requires that the lotus daemon is running with the + // LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that + // the path specified when calling CreateBackup is within the base path + CreateBackup(ctx context.Context, fpath string) error //perm:admin +} diff --git a/api/v0api/gateway.go b/api/v0api/gateway.go new file mode 100644 index 00000000000..18a5ec7d6e6 --- /dev/null +++ b/api/v0api/gateway.go @@ -0,0 +1,69 @@ +package v0api + +import ( + "context" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" +) + +// MODIFYING THE API INTERFACE +// +// NOTE: This is the V0 (Stable) API - when adding methods to this interface, +// you'll need to make sure they are also present on the V1 (Unstable) API +// +// This API is implemented in `v1_wrapper.go` as a compatibility layer backed +// by the V1 api +// +// When adding / changing methods in this file: +// * Do the change here +// * Adjust implementation in `node/impl/` +// * Run `make gen` - this will: +// * Generate proxy structs +// * Generate mocks +// * Generate markdown docs +// * Generate openrpc blobs + +type Gateway interface { + ChainHasObj(context.Context, cid.Cid) (bool, error) + ChainHead(ctx context.Context) (*types.TipSet, error) + ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error) + ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) + ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) + ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) + ChainNotify(context.Context) (<-chan []*api.HeadChange, error) + ChainReadObj(context.Context, cid.Cid) ([]byte, error) + GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) + MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) + MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) + MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) + MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*api.MsigTransaction, error) + StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) + StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) + StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) + StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) + StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) + StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) + StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) + StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) + StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) + StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error) + StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) + StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error) + StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error) + StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) + StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) + StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error) + WalletBalance(context.Context, address.Address) (types.BigInt, error) + Version(context.Context) (api.APIVersion, error) +} + +var _ Gateway = *new(FullNode) diff --git a/api/v0api/latest.go b/api/v0api/latest.go new file mode 100644 index 00000000000..d423f57bc86 --- /dev/null +++ b/api/v0api/latest.go @@ -0,0 +1,32 @@ +package v0api + +import ( + "github.com/filecoin-project/lotus/api" +) + +type Common = api.Common +type Net = api.Net +type CommonNet = api.CommonNet + +type CommonStruct = api.CommonStruct +type CommonStub = api.CommonStub +type NetStruct = api.NetStruct +type NetStub = api.NetStub +type CommonNetStruct = api.CommonNetStruct +type CommonNetStub = api.CommonNetStub + +type StorageMiner = api.StorageMiner +type StorageMinerStruct = api.StorageMinerStruct + +type Worker = api.Worker +type WorkerStruct = api.WorkerStruct + +type Wallet = api.Wallet + +func PermissionedStorMinerAPI(a StorageMiner) StorageMiner { + return api.PermissionedStorMinerAPI(a) +} + +func PermissionedWorkerAPI(a Worker) Worker { + return api.PermissionedWorkerAPI(a) +} diff --git a/api/v0api/permissioned.go b/api/v0api/permissioned.go new file mode 100644 index 00000000000..ad64bc29ede --- /dev/null +++ b/api/v0api/permissioned.go @@ -0,0 +1,13 @@ +package v0api + +import ( + "github.com/filecoin-project/go-jsonrpc/auth" + "github.com/filecoin-project/lotus/api" +) + +func PermissionedFullAPI(a FullNode) FullNode { + var out FullNodeStruct + auth.PermissionedProxy(api.AllPermissions, api.DefaultPerms, a, &out.Internal) + auth.PermissionedProxy(api.AllPermissions, api.DefaultPerms, a, &out.CommonStruct.Internal) + return &out +} diff --git a/api/v0api/proxy_gen.go b/api/v0api/proxy_gen.go new file mode 100644 index 00000000000..4cb96b53edf --- /dev/null +++ b/api/v0api/proxy_gen.go @@ -0,0 +1,2132 @@ +// Code generated by github.com/filecoin-project/lotus/gen/api. DO NOT EDIT. + +package v0api + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-multistore" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/api" + apitypes "github.com/filecoin-project/lotus/api/types" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/paych" + "github.com/filecoin-project/lotus/chain/types" + marketevents "github.com/filecoin-project/lotus/markets/loggers" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p-core/peer" + "golang.org/x/xerrors" +) + +type FullNodeStruct struct { + CommonStruct + + NetStruct + + Internal struct { + BeaconGetEntry func(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"` + + ChainDeleteObj func(p0 context.Context, p1 cid.Cid) error `perm:"admin"` + + ChainExport func(p0 context.Context, p1 abi.ChainEpoch, p2 bool, p3 types.TipSetKey) (<-chan []byte, error) `perm:"read"` + + ChainGetBlock func(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) `perm:"read"` + + ChainGetBlockMessages func(p0 context.Context, p1 cid.Cid) (*api.BlockMessages, error) `perm:"read"` + + ChainGetGenesis func(p0 context.Context) (*types.TipSet, error) `perm:"read"` + + ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) `perm:"read"` + + ChainGetMessagesInTipset func(p0 context.Context, p1 types.TipSetKey) ([]api.Message, error) `perm:"read"` + + ChainGetNode func(p0 context.Context, p1 string) (*api.IpldObject, error) `perm:"read"` + + ChainGetParentMessages func(p0 context.Context, p1 cid.Cid) ([]api.Message, error) `perm:"read"` + + ChainGetParentReceipts func(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) `perm:"read"` + + ChainGetPath func(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*api.HeadChange, error) `perm:"read"` + + ChainGetRandomnessFromBeacon func(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) `perm:"read"` + + ChainGetRandomnessFromTickets func(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) `perm:"read"` + + ChainGetTipSet func(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) `perm:"read"` + + ChainGetTipSetByHeight func(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) `perm:"read"` + + ChainHasObj func(p0 context.Context, p1 cid.Cid) (bool, error) `perm:"read"` + + ChainHead func(p0 context.Context) (*types.TipSet, error) `perm:"read"` + + ChainNotify func(p0 context.Context) (<-chan []*api.HeadChange, error) `perm:"read"` + + ChainReadObj func(p0 context.Context, p1 cid.Cid) ([]byte, error) `perm:"read"` + + ChainSetHead func(p0 context.Context, p1 types.TipSetKey) error `perm:"admin"` + + ChainStatObj func(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (api.ObjStat, error) `perm:"read"` + + ChainTipSetWeight func(p0 context.Context, p1 types.TipSetKey) (types.BigInt, error) `perm:"read"` + + ClientCalcCommP func(p0 context.Context, p1 string) (*api.CommPRet, error) `perm:"write"` + + ClientCancelDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"` + + ClientCancelRetrievalDeal func(p0 context.Context, p1 retrievalmarket.DealID) error `perm:"write"` + + ClientDataTransferUpdates func(p0 context.Context) (<-chan api.DataTransferChannel, error) `perm:"write"` + + ClientDealPieceCID func(p0 context.Context, p1 cid.Cid) (api.DataCIDSize, error) `perm:"read"` + + ClientDealSize func(p0 context.Context, p1 cid.Cid) (api.DataSize, error) `perm:"read"` + + ClientFindData func(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]api.QueryOffer, error) `perm:"read"` + + ClientGenCar func(p0 context.Context, p1 api.FileRef, p2 string) error `perm:"write"` + + ClientGetDealInfo func(p0 context.Context, p1 cid.Cid) (*api.DealInfo, error) `perm:"read"` + + ClientGetDealStatus func(p0 context.Context, p1 uint64) (string, error) `perm:"read"` + + ClientGetDealUpdates func(p0 context.Context) (<-chan api.DealInfo, error) `perm:"write"` + + ClientGetRetrievalUpdates func(p0 context.Context) (<-chan api.RetrievalInfo, error) `perm:"write"` + + ClientHasLocal func(p0 context.Context, p1 cid.Cid) (bool, error) `perm:"write"` + + ClientImport func(p0 context.Context, p1 api.FileRef) (*api.ImportRes, error) `perm:"admin"` + + ClientListDataTransfers func(p0 context.Context) ([]api.DataTransferChannel, error) `perm:"write"` + + ClientListDeals func(p0 context.Context) ([]api.DealInfo, error) `perm:"write"` + + ClientListImports func(p0 context.Context) ([]api.Import, error) `perm:"write"` + + ClientListRetrievals func(p0 context.Context) ([]api.RetrievalInfo, error) `perm:"write"` + + ClientMinerQueryOffer func(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (api.QueryOffer, error) `perm:"read"` + + ClientQueryAsk func(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) `perm:"read"` + + ClientRemoveImport func(p0 context.Context, p1 multistore.StoreID) error `perm:"admin"` + + ClientRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"` + + ClientRetrieve func(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) error `perm:"admin"` + + ClientRetrieveTryRestartInsufficientFunds func(p0 context.Context, p1 address.Address) error `perm:"write"` + + ClientRetrieveWithEvents func(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"` + + ClientStartDeal func(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) `perm:"admin"` + + ClientStatelessDeal func(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) `perm:"write"` + + CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"` + + GasEstimateFeeCap func(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"` + + GasEstimateGasLimit func(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (int64, error) `perm:"read"` + + GasEstimateGasPremium func(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) `perm:"read"` + + GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) `perm:"read"` + + MarketAddBalance func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) `perm:"sign"` + + MarketGetReserved func(p0 context.Context, p1 address.Address) (types.BigInt, error) `perm:"sign"` + + MarketReleaseFunds func(p0 context.Context, p1 address.Address, p2 types.BigInt) error `perm:"sign"` + + MarketReserveFunds func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) `perm:"sign"` + + MarketWithdraw func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) `perm:"sign"` + + MinerCreateBlock func(p0 context.Context, p1 *api.BlockTemplate) (*types.BlockMsg, error) `perm:"write"` + + MinerGetBaseInfo func(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*api.MiningBaseInfo, error) `perm:"read"` + + MpoolBatchPush func(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) `perm:"write"` + + MpoolBatchPushMessage func(p0 context.Context, p1 []*types.Message, p2 *api.MessageSendSpec) ([]*types.SignedMessage, error) `perm:"sign"` + + MpoolBatchPushUntrusted func(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) `perm:"write"` + + MpoolClear func(p0 context.Context, p1 bool) error `perm:"write"` + + MpoolGetConfig func(p0 context.Context) (*types.MpoolConfig, error) `perm:"read"` + + MpoolGetNonce func(p0 context.Context, p1 address.Address) (uint64, error) `perm:"read"` + + MpoolPending func(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) `perm:"read"` + + MpoolPush func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) `perm:"write"` + + MpoolPushMessage func(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec) (*types.SignedMessage, error) `perm:"sign"` + + MpoolPushUntrusted func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) `perm:"write"` + + MpoolSelect func(p0 context.Context, p1 types.TipSetKey, p2 float64) ([]*types.SignedMessage, error) `perm:"read"` + + MpoolSetConfig func(p0 context.Context, p1 *types.MpoolConfig) error `perm:"admin"` + + MpoolSub func(p0 context.Context) (<-chan api.MpoolUpdate, error) `perm:"read"` + + MsigAddApprove func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (cid.Cid, error) `perm:"sign"` + + MsigAddCancel func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (cid.Cid, error) `perm:"sign"` + + MsigAddPropose func(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) `perm:"sign"` + + MsigApprove func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (cid.Cid, error) `perm:"sign"` + + MsigApproveTxnHash func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (cid.Cid, error) `perm:"sign"` + + MsigCancel func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (cid.Cid, error) `perm:"sign"` + + MsigCreate func(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (cid.Cid, error) `perm:"sign"` + + MsigGetAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) `perm:"read"` + + MsigGetPending func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*api.MsigTransaction, error) `perm:"read"` + + MsigGetVested func(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"` + + MsigGetVestingSchedule func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MsigVesting, error) `perm:"read"` + + MsigPropose func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (cid.Cid, error) `perm:"sign"` + + MsigRemoveSigner func(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) `perm:"sign"` + + MsigSwapApprove func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (cid.Cid, error) `perm:"sign"` + + MsigSwapCancel func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (cid.Cid, error) `perm:"sign"` + + MsigSwapPropose func(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (cid.Cid, error) `perm:"sign"` + + PaychAllocateLane func(p0 context.Context, p1 address.Address) (uint64, error) `perm:"sign"` + + PaychAvailableFunds func(p0 context.Context, p1 address.Address) (*api.ChannelAvailableFunds, error) `perm:"sign"` + + PaychAvailableFundsByFromTo func(p0 context.Context, p1 address.Address, p2 address.Address) (*api.ChannelAvailableFunds, error) `perm:"sign"` + + PaychCollect func(p0 context.Context, p1 address.Address) (cid.Cid, error) `perm:"sign"` + + PaychGet func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (*api.ChannelInfo, error) `perm:"sign"` + + PaychGetWaitReady func(p0 context.Context, p1 cid.Cid) (address.Address, error) `perm:"sign"` + + PaychList func(p0 context.Context) ([]address.Address, error) `perm:"read"` + + PaychNewPayment func(p0 context.Context, p1 address.Address, p2 address.Address, p3 []api.VoucherSpec) (*api.PaymentInfo, error) `perm:"sign"` + + PaychSettle func(p0 context.Context, p1 address.Address) (cid.Cid, error) `perm:"sign"` + + PaychStatus func(p0 context.Context, p1 address.Address) (*api.PaychStatus, error) `perm:"read"` + + PaychVoucherAdd func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 types.BigInt) (types.BigInt, error) `perm:"write"` + + PaychVoucherCheckSpendable func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (bool, error) `perm:"read"` + + PaychVoucherCheckValid func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher) error `perm:"read"` + + PaychVoucherCreate func(p0 context.Context, p1 address.Address, p2 types.BigInt, p3 uint64) (*api.VoucherCreateResult, error) `perm:"sign"` + + PaychVoucherList func(p0 context.Context, p1 address.Address) ([]*paych.SignedVoucher, error) `perm:"write"` + + PaychVoucherSubmit func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (cid.Cid, error) `perm:"sign"` + + StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `perm:"read"` + + StateAllMinerFaults func(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) ([]*api.Fault, error) `perm:"read"` + + StateCall func(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*api.InvocResult, error) `perm:"read"` + + StateChangedActors func(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (map[string]types.Actor, error) `perm:"read"` + + StateCirculatingSupply func(p0 context.Context, p1 types.TipSetKey) (abi.TokenAmount, error) `perm:"read"` + + StateCompute func(p0 context.Context, p1 abi.ChainEpoch, p2 []*types.Message, p3 types.TipSetKey) (*api.ComputeStateOutput, error) `perm:"read"` + + StateDealProviderCollateralBounds func(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (api.DealCollateralBounds, error) `perm:"read"` + + StateDecodeParams func(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) `perm:"read"` + + StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) `perm:"read"` + + StateGetReceipt func(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) `perm:"read"` + + StateListActors func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) `perm:"read"` + + StateListMessages func(p0 context.Context, p1 *api.MessageMatch, p2 types.TipSetKey, p3 abi.ChainEpoch) ([]cid.Cid, error) `perm:"read"` + + StateListMiners func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) `perm:"read"` + + StateLookupID func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `perm:"read"` + + StateMarketBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MarketBalance, error) `perm:"read"` + + StateMarketDeals func(p0 context.Context, p1 types.TipSetKey) (map[string]api.MarketDeal, error) `perm:"read"` + + StateMarketParticipants func(p0 context.Context, p1 types.TipSetKey) (map[string]api.MarketBalance, error) `perm:"read"` + + StateMarketStorageDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*api.MarketDeal, error) `perm:"read"` + + StateMinerActiveSectors func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) `perm:"read"` + + StateMinerAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) `perm:"read"` + + StateMinerDeadlines func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]api.Deadline, error) `perm:"read"` + + StateMinerFaults func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) `perm:"read"` + + StateMinerInfo func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) `perm:"read"` + + StateMinerInitialPledgeCollateral func(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"` + + StateMinerPartitions func(p0 context.Context, p1 address.Address, p2 uint64, p3 types.TipSetKey) ([]api.Partition, error) `perm:"read"` + + StateMinerPower func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*api.MinerPower, error) `perm:"read"` + + StateMinerPreCommitDepositForPower func(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"` + + StateMinerProvingDeadline func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) `perm:"read"` + + StateMinerRecoveries func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) `perm:"read"` + + StateMinerSectorAllocated func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (bool, error) `perm:"read"` + + StateMinerSectorCount func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MinerSectors, error) `perm:"read"` + + StateMinerSectors func(p0 context.Context, p1 address.Address, p2 *bitfield.BitField, p3 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) `perm:"read"` + + StateNetworkName func(p0 context.Context) (dtypes.NetworkName, error) `perm:"read"` + + StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) `perm:"read"` + + StateReadState func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*api.ActorState, error) `perm:"read"` + + StateReplay func(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*api.InvocResult, error) `perm:"read"` + + StateSearchMsg func(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) `perm:"read"` + + StateSearchMsgLimited func(p0 context.Context, p1 cid.Cid, p2 abi.ChainEpoch) (*api.MsgLookup, error) `perm:"read"` + + StateSectorExpiration func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorExpiration, error) `perm:"read"` + + StateSectorGetInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) `perm:"read"` + + StateSectorPartition func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorLocation, error) `perm:"read"` + + StateSectorPreCommitInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) `perm:"read"` + + StateVMCirculatingSupplyInternal func(p0 context.Context, p1 types.TipSetKey) (api.CirculatingSupply, error) `perm:"read"` + + StateVerifiedClientStatus func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) `perm:"read"` + + StateVerifiedRegistryRootKey func(p0 context.Context, p1 types.TipSetKey) (address.Address, error) `perm:"read"` + + StateVerifierStatus func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) `perm:"read"` + + StateWaitMsg func(p0 context.Context, p1 cid.Cid, p2 uint64) (*api.MsgLookup, error) `perm:"read"` + + StateWaitMsgLimited func(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch) (*api.MsgLookup, error) `perm:"read"` + + SyncCheckBad func(p0 context.Context, p1 cid.Cid) (string, error) `perm:"read"` + + SyncCheckpoint func(p0 context.Context, p1 types.TipSetKey) error `perm:"admin"` + + SyncIncomingBlocks func(p0 context.Context) (<-chan *types.BlockHeader, error) `perm:"read"` + + SyncMarkBad func(p0 context.Context, p1 cid.Cid) error `perm:"admin"` + + SyncState func(p0 context.Context) (*api.SyncState, error) `perm:"read"` + + SyncSubmitBlock func(p0 context.Context, p1 *types.BlockMsg) error `perm:"write"` + + SyncUnmarkAllBad func(p0 context.Context) error `perm:"admin"` + + SyncUnmarkBad func(p0 context.Context, p1 cid.Cid) error `perm:"admin"` + + SyncValidateTipset func(p0 context.Context, p1 types.TipSetKey) (bool, error) `perm:"read"` + + WalletBalance func(p0 context.Context, p1 address.Address) (types.BigInt, error) `perm:"read"` + + WalletDefaultAddress func(p0 context.Context) (address.Address, error) `perm:"write"` + + WalletDelete func(p0 context.Context, p1 address.Address) error `perm:"admin"` + + WalletExport func(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) `perm:"admin"` + + WalletHas func(p0 context.Context, p1 address.Address) (bool, error) `perm:"write"` + + WalletImport func(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) `perm:"admin"` + + WalletList func(p0 context.Context) ([]address.Address, error) `perm:"write"` + + WalletNew func(p0 context.Context, p1 types.KeyType) (address.Address, error) `perm:"write"` + + WalletSetDefault func(p0 context.Context, p1 address.Address) error `perm:"write"` + + WalletSign func(p0 context.Context, p1 address.Address, p2 []byte) (*crypto.Signature, error) `perm:"sign"` + + WalletSignMessage func(p0 context.Context, p1 address.Address, p2 *types.Message) (*types.SignedMessage, error) `perm:"sign"` + + WalletValidateAddress func(p0 context.Context, p1 string) (address.Address, error) `perm:"read"` + + WalletVerify func(p0 context.Context, p1 address.Address, p2 []byte, p3 *crypto.Signature) (bool, error) `perm:"read"` + } +} + +type FullNodeStub struct { + CommonStub + + NetStub +} + +type GatewayStruct struct { + Internal struct { + ChainGetBlockMessages func(p0 context.Context, p1 cid.Cid) (*api.BlockMessages, error) `` + + ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) `` + + ChainGetTipSet func(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) `` + + ChainGetTipSetByHeight func(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) `` + + ChainHasObj func(p0 context.Context, p1 cid.Cid) (bool, error) `` + + ChainHead func(p0 context.Context) (*types.TipSet, error) `` + + ChainNotify func(p0 context.Context) (<-chan []*api.HeadChange, error) `` + + ChainReadObj func(p0 context.Context, p1 cid.Cid) ([]byte, error) `` + + GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) `` + + MpoolPush func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) `` + + MsigGetAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) `` + + MsigGetPending func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*api.MsigTransaction, error) `` + + MsigGetVested func(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) `` + + StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `` + + StateDealProviderCollateralBounds func(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (api.DealCollateralBounds, error) `` + + StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) `` + + StateGetReceipt func(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) `` + + StateListMiners func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) `` + + StateLookupID func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `` + + StateMarketBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MarketBalance, error) `` + + StateMarketStorageDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*api.MarketDeal, error) `` + + StateMinerInfo func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) `` + + StateMinerPower func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*api.MinerPower, error) `` + + StateMinerProvingDeadline func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) `` + + StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (network.Version, error) `` + + StateSearchMsg func(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) `` + + StateSectorGetInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) `` + + StateVerifiedClientStatus func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) `` + + StateWaitMsg func(p0 context.Context, p1 cid.Cid, p2 uint64) (*api.MsgLookup, error) `` + + Version func(p0 context.Context) (api.APIVersion, error) `` + + WalletBalance func(p0 context.Context, p1 address.Address) (types.BigInt, error) `` + } +} + +type GatewayStub struct { +} + +func (s *FullNodeStruct) BeaconGetEntry(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) { + return s.Internal.BeaconGetEntry(p0, p1) +} + +func (s *FullNodeStub) BeaconGetEntry(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainDeleteObj(p0 context.Context, p1 cid.Cid) error { + return s.Internal.ChainDeleteObj(p0, p1) +} + +func (s *FullNodeStub) ChainDeleteObj(p0 context.Context, p1 cid.Cid) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainExport(p0 context.Context, p1 abi.ChainEpoch, p2 bool, p3 types.TipSetKey) (<-chan []byte, error) { + return s.Internal.ChainExport(p0, p1, p2, p3) +} + +func (s *FullNodeStub) ChainExport(p0 context.Context, p1 abi.ChainEpoch, p2 bool, p3 types.TipSetKey) (<-chan []byte, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainGetBlock(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) { + return s.Internal.ChainGetBlock(p0, p1) +} + +func (s *FullNodeStub) ChainGetBlock(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*api.BlockMessages, error) { + return s.Internal.ChainGetBlockMessages(p0, p1) +} + +func (s *FullNodeStub) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*api.BlockMessages, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainGetGenesis(p0 context.Context) (*types.TipSet, error) { + return s.Internal.ChainGetGenesis(p0) +} + +func (s *FullNodeStub) ChainGetGenesis(p0 context.Context) (*types.TipSet, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) { + return s.Internal.ChainGetMessage(p0, p1) +} + +func (s *FullNodeStub) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainGetMessagesInTipset(p0 context.Context, p1 types.TipSetKey) ([]api.Message, error) { + return s.Internal.ChainGetMessagesInTipset(p0, p1) +} + +func (s *FullNodeStub) ChainGetMessagesInTipset(p0 context.Context, p1 types.TipSetKey) ([]api.Message, error) { + return *new([]api.Message), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainGetNode(p0 context.Context, p1 string) (*api.IpldObject, error) { + return s.Internal.ChainGetNode(p0, p1) +} + +func (s *FullNodeStub) ChainGetNode(p0 context.Context, p1 string) (*api.IpldObject, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainGetParentMessages(p0 context.Context, p1 cid.Cid) ([]api.Message, error) { + return s.Internal.ChainGetParentMessages(p0, p1) +} + +func (s *FullNodeStub) ChainGetParentMessages(p0 context.Context, p1 cid.Cid) ([]api.Message, error) { + return *new([]api.Message), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainGetParentReceipts(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) { + return s.Internal.ChainGetParentReceipts(p0, p1) +} + +func (s *FullNodeStub) ChainGetParentReceipts(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) { + return *new([]*types.MessageReceipt), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainGetPath(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*api.HeadChange, error) { + return s.Internal.ChainGetPath(p0, p1, p2) +} + +func (s *FullNodeStub) ChainGetPath(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*api.HeadChange, error) { + return *new([]*api.HeadChange), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainGetRandomnessFromBeacon(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) { + return s.Internal.ChainGetRandomnessFromBeacon(p0, p1, p2, p3, p4) +} + +func (s *FullNodeStub) ChainGetRandomnessFromBeacon(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) { + return *new(abi.Randomness), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainGetRandomnessFromTickets(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) { + return s.Internal.ChainGetRandomnessFromTickets(p0, p1, p2, p3, p4) +} + +func (s *FullNodeStub) ChainGetRandomnessFromTickets(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) { + return *new(abi.Randomness), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainGetTipSet(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) { + return s.Internal.ChainGetTipSet(p0, p1) +} + +func (s *FullNodeStub) ChainGetTipSet(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainGetTipSetByHeight(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) { + return s.Internal.ChainGetTipSetByHeight(p0, p1, p2) +} + +func (s *FullNodeStub) ChainGetTipSetByHeight(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) { + return s.Internal.ChainHasObj(p0, p1) +} + +func (s *FullNodeStub) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) { + return false, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainHead(p0 context.Context) (*types.TipSet, error) { + return s.Internal.ChainHead(p0) +} + +func (s *FullNodeStub) ChainHead(p0 context.Context) (*types.TipSet, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainNotify(p0 context.Context) (<-chan []*api.HeadChange, error) { + return s.Internal.ChainNotify(p0) +} + +func (s *FullNodeStub) ChainNotify(p0 context.Context) (<-chan []*api.HeadChange, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) { + return s.Internal.ChainReadObj(p0, p1) +} + +func (s *FullNodeStub) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) { + return *new([]byte), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainSetHead(p0 context.Context, p1 types.TipSetKey) error { + return s.Internal.ChainSetHead(p0, p1) +} + +func (s *FullNodeStub) ChainSetHead(p0 context.Context, p1 types.TipSetKey) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainStatObj(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (api.ObjStat, error) { + return s.Internal.ChainStatObj(p0, p1, p2) +} + +func (s *FullNodeStub) ChainStatObj(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (api.ObjStat, error) { + return *new(api.ObjStat), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ChainTipSetWeight(p0 context.Context, p1 types.TipSetKey) (types.BigInt, error) { + return s.Internal.ChainTipSetWeight(p0, p1) +} + +func (s *FullNodeStub) ChainTipSetWeight(p0 context.Context, p1 types.TipSetKey) (types.BigInt, error) { + return *new(types.BigInt), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientCalcCommP(p0 context.Context, p1 string) (*api.CommPRet, error) { + return s.Internal.ClientCalcCommP(p0, p1) +} + +func (s *FullNodeStub) ClientCalcCommP(p0 context.Context, p1 string) (*api.CommPRet, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { + return s.Internal.ClientCancelDataTransfer(p0, p1, p2, p3) +} + +func (s *FullNodeStub) ClientCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientCancelRetrievalDeal(p0 context.Context, p1 retrievalmarket.DealID) error { + return s.Internal.ClientCancelRetrievalDeal(p0, p1) +} + +func (s *FullNodeStub) ClientCancelRetrievalDeal(p0 context.Context, p1 retrievalmarket.DealID) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientDataTransferUpdates(p0 context.Context) (<-chan api.DataTransferChannel, error) { + return s.Internal.ClientDataTransferUpdates(p0) +} + +func (s *FullNodeStub) ClientDataTransferUpdates(p0 context.Context) (<-chan api.DataTransferChannel, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientDealPieceCID(p0 context.Context, p1 cid.Cid) (api.DataCIDSize, error) { + return s.Internal.ClientDealPieceCID(p0, p1) +} + +func (s *FullNodeStub) ClientDealPieceCID(p0 context.Context, p1 cid.Cid) (api.DataCIDSize, error) { + return *new(api.DataCIDSize), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientDealSize(p0 context.Context, p1 cid.Cid) (api.DataSize, error) { + return s.Internal.ClientDealSize(p0, p1) +} + +func (s *FullNodeStub) ClientDealSize(p0 context.Context, p1 cid.Cid) (api.DataSize, error) { + return *new(api.DataSize), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientFindData(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]api.QueryOffer, error) { + return s.Internal.ClientFindData(p0, p1, p2) +} + +func (s *FullNodeStub) ClientFindData(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]api.QueryOffer, error) { + return *new([]api.QueryOffer), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientGenCar(p0 context.Context, p1 api.FileRef, p2 string) error { + return s.Internal.ClientGenCar(p0, p1, p2) +} + +func (s *FullNodeStub) ClientGenCar(p0 context.Context, p1 api.FileRef, p2 string) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientGetDealInfo(p0 context.Context, p1 cid.Cid) (*api.DealInfo, error) { + return s.Internal.ClientGetDealInfo(p0, p1) +} + +func (s *FullNodeStub) ClientGetDealInfo(p0 context.Context, p1 cid.Cid) (*api.DealInfo, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientGetDealStatus(p0 context.Context, p1 uint64) (string, error) { + return s.Internal.ClientGetDealStatus(p0, p1) +} + +func (s *FullNodeStub) ClientGetDealStatus(p0 context.Context, p1 uint64) (string, error) { + return "", xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientGetDealUpdates(p0 context.Context) (<-chan api.DealInfo, error) { + return s.Internal.ClientGetDealUpdates(p0) +} + +func (s *FullNodeStub) ClientGetDealUpdates(p0 context.Context) (<-chan api.DealInfo, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientGetRetrievalUpdates(p0 context.Context) (<-chan api.RetrievalInfo, error) { + return s.Internal.ClientGetRetrievalUpdates(p0) +} + +func (s *FullNodeStub) ClientGetRetrievalUpdates(p0 context.Context) (<-chan api.RetrievalInfo, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientHasLocal(p0 context.Context, p1 cid.Cid) (bool, error) { + return s.Internal.ClientHasLocal(p0, p1) +} + +func (s *FullNodeStub) ClientHasLocal(p0 context.Context, p1 cid.Cid) (bool, error) { + return false, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientImport(p0 context.Context, p1 api.FileRef) (*api.ImportRes, error) { + return s.Internal.ClientImport(p0, p1) +} + +func (s *FullNodeStub) ClientImport(p0 context.Context, p1 api.FileRef) (*api.ImportRes, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientListDataTransfers(p0 context.Context) ([]api.DataTransferChannel, error) { + return s.Internal.ClientListDataTransfers(p0) +} + +func (s *FullNodeStub) ClientListDataTransfers(p0 context.Context) ([]api.DataTransferChannel, error) { + return *new([]api.DataTransferChannel), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientListDeals(p0 context.Context) ([]api.DealInfo, error) { + return s.Internal.ClientListDeals(p0) +} + +func (s *FullNodeStub) ClientListDeals(p0 context.Context) ([]api.DealInfo, error) { + return *new([]api.DealInfo), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientListImports(p0 context.Context) ([]api.Import, error) { + return s.Internal.ClientListImports(p0) +} + +func (s *FullNodeStub) ClientListImports(p0 context.Context) ([]api.Import, error) { + return *new([]api.Import), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientListRetrievals(p0 context.Context) ([]api.RetrievalInfo, error) { + return s.Internal.ClientListRetrievals(p0) +} + +func (s *FullNodeStub) ClientListRetrievals(p0 context.Context) ([]api.RetrievalInfo, error) { + return *new([]api.RetrievalInfo), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientMinerQueryOffer(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (api.QueryOffer, error) { + return s.Internal.ClientMinerQueryOffer(p0, p1, p2, p3) +} + +func (s *FullNodeStub) ClientMinerQueryOffer(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (api.QueryOffer, error) { + return *new(api.QueryOffer), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientQueryAsk(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) { + return s.Internal.ClientQueryAsk(p0, p1, p2) +} + +func (s *FullNodeStub) ClientQueryAsk(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientRemoveImport(p0 context.Context, p1 multistore.StoreID) error { + return s.Internal.ClientRemoveImport(p0, p1) +} + +func (s *FullNodeStub) ClientRemoveImport(p0 context.Context, p1 multistore.StoreID) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { + return s.Internal.ClientRestartDataTransfer(p0, p1, p2, p3) +} + +func (s *FullNodeStub) ClientRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) error { + return s.Internal.ClientRetrieve(p0, p1, p2) +} + +func (s *FullNodeStub) ClientRetrieve(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientRetrieveTryRestartInsufficientFunds(p0 context.Context, p1 address.Address) error { + return s.Internal.ClientRetrieveTryRestartInsufficientFunds(p0, p1) +} + +func (s *FullNodeStub) ClientRetrieveTryRestartInsufficientFunds(p0 context.Context, p1 address.Address) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientRetrieveWithEvents(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { + return s.Internal.ClientRetrieveWithEvents(p0, p1, p2) +} + +func (s *FullNodeStub) ClientRetrieveWithEvents(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientStartDeal(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) { + return s.Internal.ClientStartDeal(p0, p1) +} + +func (s *FullNodeStub) ClientStartDeal(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) ClientStatelessDeal(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) { + return s.Internal.ClientStatelessDeal(p0, p1) +} + +func (s *FullNodeStub) ClientStatelessDeal(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) CreateBackup(p0 context.Context, p1 string) error { + return s.Internal.CreateBackup(p0, p1) +} + +func (s *FullNodeStub) CreateBackup(p0 context.Context, p1 string) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) GasEstimateFeeCap(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (types.BigInt, error) { + return s.Internal.GasEstimateFeeCap(p0, p1, p2, p3) +} + +func (s *FullNodeStub) GasEstimateFeeCap(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (types.BigInt, error) { + return *new(types.BigInt), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) GasEstimateGasLimit(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (int64, error) { + return s.Internal.GasEstimateGasLimit(p0, p1, p2) +} + +func (s *FullNodeStub) GasEstimateGasLimit(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (int64, error) { + return 0, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) GasEstimateGasPremium(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) { + return s.Internal.GasEstimateGasPremium(p0, p1, p2, p3, p4) +} + +func (s *FullNodeStub) GasEstimateGasPremium(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) { + return *new(types.BigInt), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) { + return s.Internal.GasEstimateMessageGas(p0, p1, p2, p3) +} + +func (s *FullNodeStub) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MarketAddBalance(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { + return s.Internal.MarketAddBalance(p0, p1, p2, p3) +} + +func (s *FullNodeStub) MarketAddBalance(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { + return *new(cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MarketGetReserved(p0 context.Context, p1 address.Address) (types.BigInt, error) { + return s.Internal.MarketGetReserved(p0, p1) +} + +func (s *FullNodeStub) MarketGetReserved(p0 context.Context, p1 address.Address) (types.BigInt, error) { + return *new(types.BigInt), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MarketReleaseFunds(p0 context.Context, p1 address.Address, p2 types.BigInt) error { + return s.Internal.MarketReleaseFunds(p0, p1, p2) +} + +func (s *FullNodeStub) MarketReleaseFunds(p0 context.Context, p1 address.Address, p2 types.BigInt) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MarketReserveFunds(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { + return s.Internal.MarketReserveFunds(p0, p1, p2, p3) +} + +func (s *FullNodeStub) MarketReserveFunds(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { + return *new(cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MarketWithdraw(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { + return s.Internal.MarketWithdraw(p0, p1, p2, p3) +} + +func (s *FullNodeStub) MarketWithdraw(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { + return *new(cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MinerCreateBlock(p0 context.Context, p1 *api.BlockTemplate) (*types.BlockMsg, error) { + return s.Internal.MinerCreateBlock(p0, p1) +} + +func (s *FullNodeStub) MinerCreateBlock(p0 context.Context, p1 *api.BlockTemplate) (*types.BlockMsg, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MinerGetBaseInfo(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*api.MiningBaseInfo, error) { + return s.Internal.MinerGetBaseInfo(p0, p1, p2, p3) +} + +func (s *FullNodeStub) MinerGetBaseInfo(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*api.MiningBaseInfo, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MpoolBatchPush(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) { + return s.Internal.MpoolBatchPush(p0, p1) +} + +func (s *FullNodeStub) MpoolBatchPush(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) { + return *new([]cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MpoolBatchPushMessage(p0 context.Context, p1 []*types.Message, p2 *api.MessageSendSpec) ([]*types.SignedMessage, error) { + return s.Internal.MpoolBatchPushMessage(p0, p1, p2) +} + +func (s *FullNodeStub) MpoolBatchPushMessage(p0 context.Context, p1 []*types.Message, p2 *api.MessageSendSpec) ([]*types.SignedMessage, error) { + return *new([]*types.SignedMessage), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MpoolBatchPushUntrusted(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) { + return s.Internal.MpoolBatchPushUntrusted(p0, p1) +} + +func (s *FullNodeStub) MpoolBatchPushUntrusted(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) { + return *new([]cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MpoolClear(p0 context.Context, p1 bool) error { + return s.Internal.MpoolClear(p0, p1) +} + +func (s *FullNodeStub) MpoolClear(p0 context.Context, p1 bool) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MpoolGetConfig(p0 context.Context) (*types.MpoolConfig, error) { + return s.Internal.MpoolGetConfig(p0) +} + +func (s *FullNodeStub) MpoolGetConfig(p0 context.Context) (*types.MpoolConfig, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MpoolGetNonce(p0 context.Context, p1 address.Address) (uint64, error) { + return s.Internal.MpoolGetNonce(p0, p1) +} + +func (s *FullNodeStub) MpoolGetNonce(p0 context.Context, p1 address.Address) (uint64, error) { + return 0, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MpoolPending(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) { + return s.Internal.MpoolPending(p0, p1) +} + +func (s *FullNodeStub) MpoolPending(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) { + return *new([]*types.SignedMessage), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { + return s.Internal.MpoolPush(p0, p1) +} + +func (s *FullNodeStub) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { + return *new(cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MpoolPushMessage(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec) (*types.SignedMessage, error) { + return s.Internal.MpoolPushMessage(p0, p1, p2) +} + +func (s *FullNodeStub) MpoolPushMessage(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec) (*types.SignedMessage, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MpoolPushUntrusted(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { + return s.Internal.MpoolPushUntrusted(p0, p1) +} + +func (s *FullNodeStub) MpoolPushUntrusted(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { + return *new(cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MpoolSelect(p0 context.Context, p1 types.TipSetKey, p2 float64) ([]*types.SignedMessage, error) { + return s.Internal.MpoolSelect(p0, p1, p2) +} + +func (s *FullNodeStub) MpoolSelect(p0 context.Context, p1 types.TipSetKey, p2 float64) ([]*types.SignedMessage, error) { + return *new([]*types.SignedMessage), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MpoolSetConfig(p0 context.Context, p1 *types.MpoolConfig) error { + return s.Internal.MpoolSetConfig(p0, p1) +} + +func (s *FullNodeStub) MpoolSetConfig(p0 context.Context, p1 *types.MpoolConfig) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MpoolSub(p0 context.Context) (<-chan api.MpoolUpdate, error) { + return s.Internal.MpoolSub(p0) +} + +func (s *FullNodeStub) MpoolSub(p0 context.Context) (<-chan api.MpoolUpdate, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigAddApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (cid.Cid, error) { + return s.Internal.MsigAddApprove(p0, p1, p2, p3, p4, p5, p6) +} + +func (s *FullNodeStub) MsigAddApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (cid.Cid, error) { + return *new(cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigAddCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (cid.Cid, error) { + return s.Internal.MsigAddCancel(p0, p1, p2, p3, p4, p5) +} + +func (s *FullNodeStub) MsigAddCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (cid.Cid, error) { + return *new(cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigAddPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) { + return s.Internal.MsigAddPropose(p0, p1, p2, p3, p4) +} + +func (s *FullNodeStub) MsigAddPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) { + return *new(cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigApprove(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (cid.Cid, error) { + return s.Internal.MsigApprove(p0, p1, p2, p3) +} + +func (s *FullNodeStub) MsigApprove(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (cid.Cid, error) { + return *new(cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigApproveTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (cid.Cid, error) { + return s.Internal.MsigApproveTxnHash(p0, p1, p2, p3, p4, p5, p6, p7, p8) +} + +func (s *FullNodeStub) MsigApproveTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (cid.Cid, error) { + return *new(cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (cid.Cid, error) { + return s.Internal.MsigCancel(p0, p1, p2, p3, p4, p5, p6, p7) +} + +func (s *FullNodeStub) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (cid.Cid, error) { + return *new(cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigCreate(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (cid.Cid, error) { + return s.Internal.MsigCreate(p0, p1, p2, p3, p4, p5, p6) +} + +func (s *FullNodeStub) MsigCreate(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (cid.Cid, error) { + return *new(cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigGetAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) { + return s.Internal.MsigGetAvailableBalance(p0, p1, p2) +} + +func (s *FullNodeStub) MsigGetAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) { + return *new(types.BigInt), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigGetPending(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*api.MsigTransaction, error) { + return s.Internal.MsigGetPending(p0, p1, p2) +} + +func (s *FullNodeStub) MsigGetPending(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*api.MsigTransaction, error) { + return *new([]*api.MsigTransaction), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigGetVested(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) { + return s.Internal.MsigGetVested(p0, p1, p2, p3) +} + +func (s *FullNodeStub) MsigGetVested(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) { + return *new(types.BigInt), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigGetVestingSchedule(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MsigVesting, error) { + return s.Internal.MsigGetVestingSchedule(p0, p1, p2) +} + +func (s *FullNodeStub) MsigGetVestingSchedule(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MsigVesting, error) { + return *new(api.MsigVesting), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (cid.Cid, error) { + return s.Internal.MsigPropose(p0, p1, p2, p3, p4, p5, p6) +} + +func (s *FullNodeStub) MsigPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (cid.Cid, error) { + return *new(cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigRemoveSigner(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) { + return s.Internal.MsigRemoveSigner(p0, p1, p2, p3, p4) +} + +func (s *FullNodeStub) MsigRemoveSigner(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) { + return *new(cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigSwapApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (cid.Cid, error) { + return s.Internal.MsigSwapApprove(p0, p1, p2, p3, p4, p5, p6) +} + +func (s *FullNodeStub) MsigSwapApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (cid.Cid, error) { + return *new(cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigSwapCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (cid.Cid, error) { + return s.Internal.MsigSwapCancel(p0, p1, p2, p3, p4, p5) +} + +func (s *FullNodeStub) MsigSwapCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (cid.Cid, error) { + return *new(cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MsigSwapPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (cid.Cid, error) { + return s.Internal.MsigSwapPropose(p0, p1, p2, p3, p4) +} + +func (s *FullNodeStub) MsigSwapPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (cid.Cid, error) { + return *new(cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychAllocateLane(p0 context.Context, p1 address.Address) (uint64, error) { + return s.Internal.PaychAllocateLane(p0, p1) +} + +func (s *FullNodeStub) PaychAllocateLane(p0 context.Context, p1 address.Address) (uint64, error) { + return 0, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychAvailableFunds(p0 context.Context, p1 address.Address) (*api.ChannelAvailableFunds, error) { + return s.Internal.PaychAvailableFunds(p0, p1) +} + +func (s *FullNodeStub) PaychAvailableFunds(p0 context.Context, p1 address.Address) (*api.ChannelAvailableFunds, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychAvailableFundsByFromTo(p0 context.Context, p1 address.Address, p2 address.Address) (*api.ChannelAvailableFunds, error) { + return s.Internal.PaychAvailableFundsByFromTo(p0, p1, p2) +} + +func (s *FullNodeStub) PaychAvailableFundsByFromTo(p0 context.Context, p1 address.Address, p2 address.Address) (*api.ChannelAvailableFunds, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychCollect(p0 context.Context, p1 address.Address) (cid.Cid, error) { + return s.Internal.PaychCollect(p0, p1) +} + +func (s *FullNodeStub) PaychCollect(p0 context.Context, p1 address.Address) (cid.Cid, error) { + return *new(cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychGet(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (*api.ChannelInfo, error) { + return s.Internal.PaychGet(p0, p1, p2, p3) +} + +func (s *FullNodeStub) PaychGet(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (*api.ChannelInfo, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychGetWaitReady(p0 context.Context, p1 cid.Cid) (address.Address, error) { + return s.Internal.PaychGetWaitReady(p0, p1) +} + +func (s *FullNodeStub) PaychGetWaitReady(p0 context.Context, p1 cid.Cid) (address.Address, error) { + return *new(address.Address), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychList(p0 context.Context) ([]address.Address, error) { + return s.Internal.PaychList(p0) +} + +func (s *FullNodeStub) PaychList(p0 context.Context) ([]address.Address, error) { + return *new([]address.Address), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychNewPayment(p0 context.Context, p1 address.Address, p2 address.Address, p3 []api.VoucherSpec) (*api.PaymentInfo, error) { + return s.Internal.PaychNewPayment(p0, p1, p2, p3) +} + +func (s *FullNodeStub) PaychNewPayment(p0 context.Context, p1 address.Address, p2 address.Address, p3 []api.VoucherSpec) (*api.PaymentInfo, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychSettle(p0 context.Context, p1 address.Address) (cid.Cid, error) { + return s.Internal.PaychSettle(p0, p1) +} + +func (s *FullNodeStub) PaychSettle(p0 context.Context, p1 address.Address) (cid.Cid, error) { + return *new(cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychStatus(p0 context.Context, p1 address.Address) (*api.PaychStatus, error) { + return s.Internal.PaychStatus(p0, p1) +} + +func (s *FullNodeStub) PaychStatus(p0 context.Context, p1 address.Address) (*api.PaychStatus, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychVoucherAdd(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 types.BigInt) (types.BigInt, error) { + return s.Internal.PaychVoucherAdd(p0, p1, p2, p3, p4) +} + +func (s *FullNodeStub) PaychVoucherAdd(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 types.BigInt) (types.BigInt, error) { + return *new(types.BigInt), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychVoucherCheckSpendable(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (bool, error) { + return s.Internal.PaychVoucherCheckSpendable(p0, p1, p2, p3, p4) +} + +func (s *FullNodeStub) PaychVoucherCheckSpendable(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (bool, error) { + return false, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychVoucherCheckValid(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher) error { + return s.Internal.PaychVoucherCheckValid(p0, p1, p2) +} + +func (s *FullNodeStub) PaychVoucherCheckValid(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychVoucherCreate(p0 context.Context, p1 address.Address, p2 types.BigInt, p3 uint64) (*api.VoucherCreateResult, error) { + return s.Internal.PaychVoucherCreate(p0, p1, p2, p3) +} + +func (s *FullNodeStub) PaychVoucherCreate(p0 context.Context, p1 address.Address, p2 types.BigInt, p3 uint64) (*api.VoucherCreateResult, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychVoucherList(p0 context.Context, p1 address.Address) ([]*paych.SignedVoucher, error) { + return s.Internal.PaychVoucherList(p0, p1) +} + +func (s *FullNodeStub) PaychVoucherList(p0 context.Context, p1 address.Address) ([]*paych.SignedVoucher, error) { + return *new([]*paych.SignedVoucher), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) PaychVoucherSubmit(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (cid.Cid, error) { + return s.Internal.PaychVoucherSubmit(p0, p1, p2, p3, p4) +} + +func (s *FullNodeStub) PaychVoucherSubmit(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (cid.Cid, error) { + return *new(cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { + return s.Internal.StateAccountKey(p0, p1, p2) +} + +func (s *FullNodeStub) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { + return *new(address.Address), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateAllMinerFaults(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) ([]*api.Fault, error) { + return s.Internal.StateAllMinerFaults(p0, p1, p2) +} + +func (s *FullNodeStub) StateAllMinerFaults(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) ([]*api.Fault, error) { + return *new([]*api.Fault), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateCall(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*api.InvocResult, error) { + return s.Internal.StateCall(p0, p1, p2) +} + +func (s *FullNodeStub) StateCall(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*api.InvocResult, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateChangedActors(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (map[string]types.Actor, error) { + return s.Internal.StateChangedActors(p0, p1, p2) +} + +func (s *FullNodeStub) StateChangedActors(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (map[string]types.Actor, error) { + return *new(map[string]types.Actor), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateCirculatingSupply(p0 context.Context, p1 types.TipSetKey) (abi.TokenAmount, error) { + return s.Internal.StateCirculatingSupply(p0, p1) +} + +func (s *FullNodeStub) StateCirculatingSupply(p0 context.Context, p1 types.TipSetKey) (abi.TokenAmount, error) { + return *new(abi.TokenAmount), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateCompute(p0 context.Context, p1 abi.ChainEpoch, p2 []*types.Message, p3 types.TipSetKey) (*api.ComputeStateOutput, error) { + return s.Internal.StateCompute(p0, p1, p2, p3) +} + +func (s *FullNodeStub) StateCompute(p0 context.Context, p1 abi.ChainEpoch, p2 []*types.Message, p3 types.TipSetKey) (*api.ComputeStateOutput, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (api.DealCollateralBounds, error) { + return s.Internal.StateDealProviderCollateralBounds(p0, p1, p2, p3) +} + +func (s *FullNodeStub) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (api.DealCollateralBounds, error) { + return *new(api.DealCollateralBounds), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateDecodeParams(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) { + return s.Internal.StateDecodeParams(p0, p1, p2, p3, p4) +} + +func (s *FullNodeStub) StateDecodeParams(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) { + return s.Internal.StateGetActor(p0, p1, p2) +} + +func (s *FullNodeStub) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateGetReceipt(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) { + return s.Internal.StateGetReceipt(p0, p1, p2) +} + +func (s *FullNodeStub) StateGetReceipt(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateListActors(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) { + return s.Internal.StateListActors(p0, p1) +} + +func (s *FullNodeStub) StateListActors(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) { + return *new([]address.Address), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateListMessages(p0 context.Context, p1 *api.MessageMatch, p2 types.TipSetKey, p3 abi.ChainEpoch) ([]cid.Cid, error) { + return s.Internal.StateListMessages(p0, p1, p2, p3) +} + +func (s *FullNodeStub) StateListMessages(p0 context.Context, p1 *api.MessageMatch, p2 types.TipSetKey, p3 abi.ChainEpoch) ([]cid.Cid, error) { + return *new([]cid.Cid), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateListMiners(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) { + return s.Internal.StateListMiners(p0, p1) +} + +func (s *FullNodeStub) StateListMiners(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) { + return *new([]address.Address), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateLookupID(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { + return s.Internal.StateLookupID(p0, p1, p2) +} + +func (s *FullNodeStub) StateLookupID(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { + return *new(address.Address), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMarketBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MarketBalance, error) { + return s.Internal.StateMarketBalance(p0, p1, p2) +} + +func (s *FullNodeStub) StateMarketBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MarketBalance, error) { + return *new(api.MarketBalance), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMarketDeals(p0 context.Context, p1 types.TipSetKey) (map[string]api.MarketDeal, error) { + return s.Internal.StateMarketDeals(p0, p1) +} + +func (s *FullNodeStub) StateMarketDeals(p0 context.Context, p1 types.TipSetKey) (map[string]api.MarketDeal, error) { + return *new(map[string]api.MarketDeal), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMarketParticipants(p0 context.Context, p1 types.TipSetKey) (map[string]api.MarketBalance, error) { + return s.Internal.StateMarketParticipants(p0, p1) +} + +func (s *FullNodeStub) StateMarketParticipants(p0 context.Context, p1 types.TipSetKey) (map[string]api.MarketBalance, error) { + return *new(map[string]api.MarketBalance), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMarketStorageDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*api.MarketDeal, error) { + return s.Internal.StateMarketStorageDeal(p0, p1, p2) +} + +func (s *FullNodeStub) StateMarketStorageDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*api.MarketDeal, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMinerActiveSectors(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { + return s.Internal.StateMinerActiveSectors(p0, p1, p2) +} + +func (s *FullNodeStub) StateMinerActiveSectors(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { + return *new([]*miner.SectorOnChainInfo), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMinerAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) { + return s.Internal.StateMinerAvailableBalance(p0, p1, p2) +} + +func (s *FullNodeStub) StateMinerAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) { + return *new(types.BigInt), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMinerDeadlines(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]api.Deadline, error) { + return s.Internal.StateMinerDeadlines(p0, p1, p2) +} + +func (s *FullNodeStub) StateMinerDeadlines(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]api.Deadline, error) { + return *new([]api.Deadline), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMinerFaults(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) { + return s.Internal.StateMinerFaults(p0, p1, p2) +} + +func (s *FullNodeStub) StateMinerFaults(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) { + return *new(bitfield.BitField), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMinerInfo(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) { + return s.Internal.StateMinerInfo(p0, p1, p2) +} + +func (s *FullNodeStub) StateMinerInfo(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) { + return *new(miner.MinerInfo), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMinerInitialPledgeCollateral(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) { + return s.Internal.StateMinerInitialPledgeCollateral(p0, p1, p2, p3) +} + +func (s *FullNodeStub) StateMinerInitialPledgeCollateral(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) { + return *new(types.BigInt), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMinerPartitions(p0 context.Context, p1 address.Address, p2 uint64, p3 types.TipSetKey) ([]api.Partition, error) { + return s.Internal.StateMinerPartitions(p0, p1, p2, p3) +} + +func (s *FullNodeStub) StateMinerPartitions(p0 context.Context, p1 address.Address, p2 uint64, p3 types.TipSetKey) ([]api.Partition, error) { + return *new([]api.Partition), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMinerPower(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*api.MinerPower, error) { + return s.Internal.StateMinerPower(p0, p1, p2) +} + +func (s *FullNodeStub) StateMinerPower(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*api.MinerPower, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMinerPreCommitDepositForPower(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) { + return s.Internal.StateMinerPreCommitDepositForPower(p0, p1, p2, p3) +} + +func (s *FullNodeStub) StateMinerPreCommitDepositForPower(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) { + return *new(types.BigInt), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMinerProvingDeadline(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) { + return s.Internal.StateMinerProvingDeadline(p0, p1, p2) +} + +func (s *FullNodeStub) StateMinerProvingDeadline(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMinerRecoveries(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) { + return s.Internal.StateMinerRecoveries(p0, p1, p2) +} + +func (s *FullNodeStub) StateMinerRecoveries(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) { + return *new(bitfield.BitField), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMinerSectorAllocated(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (bool, error) { + return s.Internal.StateMinerSectorAllocated(p0, p1, p2, p3) +} + +func (s *FullNodeStub) StateMinerSectorAllocated(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (bool, error) { + return false, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMinerSectorCount(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MinerSectors, error) { + return s.Internal.StateMinerSectorCount(p0, p1, p2) +} + +func (s *FullNodeStub) StateMinerSectorCount(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MinerSectors, error) { + return *new(api.MinerSectors), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateMinerSectors(p0 context.Context, p1 address.Address, p2 *bitfield.BitField, p3 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { + return s.Internal.StateMinerSectors(p0, p1, p2, p3) +} + +func (s *FullNodeStub) StateMinerSectors(p0 context.Context, p1 address.Address, p2 *bitfield.BitField, p3 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { + return *new([]*miner.SectorOnChainInfo), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateNetworkName(p0 context.Context) (dtypes.NetworkName, error) { + return s.Internal.StateNetworkName(p0) +} + +func (s *FullNodeStub) StateNetworkName(p0 context.Context) (dtypes.NetworkName, error) { + return *new(dtypes.NetworkName), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) { + return s.Internal.StateNetworkVersion(p0, p1) +} + +func (s *FullNodeStub) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) { + return *new(apitypes.NetworkVersion), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateReadState(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*api.ActorState, error) { + return s.Internal.StateReadState(p0, p1, p2) +} + +func (s *FullNodeStub) StateReadState(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*api.ActorState, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateReplay(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*api.InvocResult, error) { + return s.Internal.StateReplay(p0, p1, p2) +} + +func (s *FullNodeStub) StateReplay(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*api.InvocResult, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateSearchMsg(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) { + return s.Internal.StateSearchMsg(p0, p1) +} + +func (s *FullNodeStub) StateSearchMsg(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateSearchMsgLimited(p0 context.Context, p1 cid.Cid, p2 abi.ChainEpoch) (*api.MsgLookup, error) { + return s.Internal.StateSearchMsgLimited(p0, p1, p2) +} + +func (s *FullNodeStub) StateSearchMsgLimited(p0 context.Context, p1 cid.Cid, p2 abi.ChainEpoch) (*api.MsgLookup, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateSectorExpiration(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorExpiration, error) { + return s.Internal.StateSectorExpiration(p0, p1, p2, p3) +} + +func (s *FullNodeStub) StateSectorExpiration(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorExpiration, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateSectorGetInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) { + return s.Internal.StateSectorGetInfo(p0, p1, p2, p3) +} + +func (s *FullNodeStub) StateSectorGetInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateSectorPartition(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorLocation, error) { + return s.Internal.StateSectorPartition(p0, p1, p2, p3) +} + +func (s *FullNodeStub) StateSectorPartition(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorLocation, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateSectorPreCommitInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) { + return s.Internal.StateSectorPreCommitInfo(p0, p1, p2, p3) +} + +func (s *FullNodeStub) StateSectorPreCommitInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) { + return *new(miner.SectorPreCommitOnChainInfo), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateVMCirculatingSupplyInternal(p0 context.Context, p1 types.TipSetKey) (api.CirculatingSupply, error) { + return s.Internal.StateVMCirculatingSupplyInternal(p0, p1) +} + +func (s *FullNodeStub) StateVMCirculatingSupplyInternal(p0 context.Context, p1 types.TipSetKey) (api.CirculatingSupply, error) { + return *new(api.CirculatingSupply), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateVerifiedClientStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) { + return s.Internal.StateVerifiedClientStatus(p0, p1, p2) +} + +func (s *FullNodeStub) StateVerifiedClientStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateVerifiedRegistryRootKey(p0 context.Context, p1 types.TipSetKey) (address.Address, error) { + return s.Internal.StateVerifiedRegistryRootKey(p0, p1) +} + +func (s *FullNodeStub) StateVerifiedRegistryRootKey(p0 context.Context, p1 types.TipSetKey) (address.Address, error) { + return *new(address.Address), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateVerifierStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) { + return s.Internal.StateVerifierStatus(p0, p1, p2) +} + +func (s *FullNodeStub) StateVerifierStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64) (*api.MsgLookup, error) { + return s.Internal.StateWaitMsg(p0, p1, p2) +} + +func (s *FullNodeStub) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64) (*api.MsgLookup, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) StateWaitMsgLimited(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch) (*api.MsgLookup, error) { + return s.Internal.StateWaitMsgLimited(p0, p1, p2, p3) +} + +func (s *FullNodeStub) StateWaitMsgLimited(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch) (*api.MsgLookup, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) SyncCheckBad(p0 context.Context, p1 cid.Cid) (string, error) { + return s.Internal.SyncCheckBad(p0, p1) +} + +func (s *FullNodeStub) SyncCheckBad(p0 context.Context, p1 cid.Cid) (string, error) { + return "", xerrors.New("method not supported") +} + +func (s *FullNodeStruct) SyncCheckpoint(p0 context.Context, p1 types.TipSetKey) error { + return s.Internal.SyncCheckpoint(p0, p1) +} + +func (s *FullNodeStub) SyncCheckpoint(p0 context.Context, p1 types.TipSetKey) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) SyncIncomingBlocks(p0 context.Context) (<-chan *types.BlockHeader, error) { + return s.Internal.SyncIncomingBlocks(p0) +} + +func (s *FullNodeStub) SyncIncomingBlocks(p0 context.Context) (<-chan *types.BlockHeader, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) SyncMarkBad(p0 context.Context, p1 cid.Cid) error { + return s.Internal.SyncMarkBad(p0, p1) +} + +func (s *FullNodeStub) SyncMarkBad(p0 context.Context, p1 cid.Cid) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) SyncState(p0 context.Context) (*api.SyncState, error) { + return s.Internal.SyncState(p0) +} + +func (s *FullNodeStub) SyncState(p0 context.Context) (*api.SyncState, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) SyncSubmitBlock(p0 context.Context, p1 *types.BlockMsg) error { + return s.Internal.SyncSubmitBlock(p0, p1) +} + +func (s *FullNodeStub) SyncSubmitBlock(p0 context.Context, p1 *types.BlockMsg) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) SyncUnmarkAllBad(p0 context.Context) error { + return s.Internal.SyncUnmarkAllBad(p0) +} + +func (s *FullNodeStub) SyncUnmarkAllBad(p0 context.Context) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) SyncUnmarkBad(p0 context.Context, p1 cid.Cid) error { + return s.Internal.SyncUnmarkBad(p0, p1) +} + +func (s *FullNodeStub) SyncUnmarkBad(p0 context.Context, p1 cid.Cid) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) SyncValidateTipset(p0 context.Context, p1 types.TipSetKey) (bool, error) { + return s.Internal.SyncValidateTipset(p0, p1) +} + +func (s *FullNodeStub) SyncValidateTipset(p0 context.Context, p1 types.TipSetKey) (bool, error) { + return false, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) WalletBalance(p0 context.Context, p1 address.Address) (types.BigInt, error) { + return s.Internal.WalletBalance(p0, p1) +} + +func (s *FullNodeStub) WalletBalance(p0 context.Context, p1 address.Address) (types.BigInt, error) { + return *new(types.BigInt), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) WalletDefaultAddress(p0 context.Context) (address.Address, error) { + return s.Internal.WalletDefaultAddress(p0) +} + +func (s *FullNodeStub) WalletDefaultAddress(p0 context.Context) (address.Address, error) { + return *new(address.Address), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) WalletDelete(p0 context.Context, p1 address.Address) error { + return s.Internal.WalletDelete(p0, p1) +} + +func (s *FullNodeStub) WalletDelete(p0 context.Context, p1 address.Address) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) WalletExport(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) { + return s.Internal.WalletExport(p0, p1) +} + +func (s *FullNodeStub) WalletExport(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) WalletHas(p0 context.Context, p1 address.Address) (bool, error) { + return s.Internal.WalletHas(p0, p1) +} + +func (s *FullNodeStub) WalletHas(p0 context.Context, p1 address.Address) (bool, error) { + return false, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) WalletImport(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) { + return s.Internal.WalletImport(p0, p1) +} + +func (s *FullNodeStub) WalletImport(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) { + return *new(address.Address), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) WalletList(p0 context.Context) ([]address.Address, error) { + return s.Internal.WalletList(p0) +} + +func (s *FullNodeStub) WalletList(p0 context.Context) ([]address.Address, error) { + return *new([]address.Address), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) WalletNew(p0 context.Context, p1 types.KeyType) (address.Address, error) { + return s.Internal.WalletNew(p0, p1) +} + +func (s *FullNodeStub) WalletNew(p0 context.Context, p1 types.KeyType) (address.Address, error) { + return *new(address.Address), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) WalletSetDefault(p0 context.Context, p1 address.Address) error { + return s.Internal.WalletSetDefault(p0, p1) +} + +func (s *FullNodeStub) WalletSetDefault(p0 context.Context, p1 address.Address) error { + return xerrors.New("method not supported") +} + +func (s *FullNodeStruct) WalletSign(p0 context.Context, p1 address.Address, p2 []byte) (*crypto.Signature, error) { + return s.Internal.WalletSign(p0, p1, p2) +} + +func (s *FullNodeStub) WalletSign(p0 context.Context, p1 address.Address, p2 []byte) (*crypto.Signature, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) WalletSignMessage(p0 context.Context, p1 address.Address, p2 *types.Message) (*types.SignedMessage, error) { + return s.Internal.WalletSignMessage(p0, p1, p2) +} + +func (s *FullNodeStub) WalletSignMessage(p0 context.Context, p1 address.Address, p2 *types.Message) (*types.SignedMessage, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) WalletValidateAddress(p0 context.Context, p1 string) (address.Address, error) { + return s.Internal.WalletValidateAddress(p0, p1) +} + +func (s *FullNodeStub) WalletValidateAddress(p0 context.Context, p1 string) (address.Address, error) { + return *new(address.Address), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) WalletVerify(p0 context.Context, p1 address.Address, p2 []byte, p3 *crypto.Signature) (bool, error) { + return s.Internal.WalletVerify(p0, p1, p2, p3) +} + +func (s *FullNodeStub) WalletVerify(p0 context.Context, p1 address.Address, p2 []byte, p3 *crypto.Signature) (bool, error) { + return false, xerrors.New("method not supported") +} + +func (s *GatewayStruct) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*api.BlockMessages, error) { + return s.Internal.ChainGetBlockMessages(p0, p1) +} + +func (s *GatewayStub) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*api.BlockMessages, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) { + return s.Internal.ChainGetMessage(p0, p1) +} + +func (s *GatewayStub) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) ChainGetTipSet(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) { + return s.Internal.ChainGetTipSet(p0, p1) +} + +func (s *GatewayStub) ChainGetTipSet(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) ChainGetTipSetByHeight(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) { + return s.Internal.ChainGetTipSetByHeight(p0, p1, p2) +} + +func (s *GatewayStub) ChainGetTipSetByHeight(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) { + return s.Internal.ChainHasObj(p0, p1) +} + +func (s *GatewayStub) ChainHasObj(p0 context.Context, p1 cid.Cid) (bool, error) { + return false, xerrors.New("method not supported") +} + +func (s *GatewayStruct) ChainHead(p0 context.Context) (*types.TipSet, error) { + return s.Internal.ChainHead(p0) +} + +func (s *GatewayStub) ChainHead(p0 context.Context) (*types.TipSet, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) ChainNotify(p0 context.Context) (<-chan []*api.HeadChange, error) { + return s.Internal.ChainNotify(p0) +} + +func (s *GatewayStub) ChainNotify(p0 context.Context) (<-chan []*api.HeadChange, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) { + return s.Internal.ChainReadObj(p0, p1) +} + +func (s *GatewayStub) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, error) { + return *new([]byte), xerrors.New("method not supported") +} + +func (s *GatewayStruct) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) { + return s.Internal.GasEstimateMessageGas(p0, p1, p2, p3) +} + +func (s *GatewayStub) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { + return s.Internal.MpoolPush(p0, p1) +} + +func (s *GatewayStub) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { + return *new(cid.Cid), xerrors.New("method not supported") +} + +func (s *GatewayStruct) MsigGetAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) { + return s.Internal.MsigGetAvailableBalance(p0, p1, p2) +} + +func (s *GatewayStub) MsigGetAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) { + return *new(types.BigInt), xerrors.New("method not supported") +} + +func (s *GatewayStruct) MsigGetPending(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*api.MsigTransaction, error) { + return s.Internal.MsigGetPending(p0, p1, p2) +} + +func (s *GatewayStub) MsigGetPending(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*api.MsigTransaction, error) { + return *new([]*api.MsigTransaction), xerrors.New("method not supported") +} + +func (s *GatewayStruct) MsigGetVested(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) { + return s.Internal.MsigGetVested(p0, p1, p2, p3) +} + +func (s *GatewayStub) MsigGetVested(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) { + return *new(types.BigInt), xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { + return s.Internal.StateAccountKey(p0, p1, p2) +} + +func (s *GatewayStub) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { + return *new(address.Address), xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (api.DealCollateralBounds, error) { + return s.Internal.StateDealProviderCollateralBounds(p0, p1, p2, p3) +} + +func (s *GatewayStub) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (api.DealCollateralBounds, error) { + return *new(api.DealCollateralBounds), xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) { + return s.Internal.StateGetActor(p0, p1, p2) +} + +func (s *GatewayStub) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateGetReceipt(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) { + return s.Internal.StateGetReceipt(p0, p1, p2) +} + +func (s *GatewayStub) StateGetReceipt(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateListMiners(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) { + return s.Internal.StateListMiners(p0, p1) +} + +func (s *GatewayStub) StateListMiners(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) { + return *new([]address.Address), xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateLookupID(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { + return s.Internal.StateLookupID(p0, p1, p2) +} + +func (s *GatewayStub) StateLookupID(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { + return *new(address.Address), xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateMarketBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MarketBalance, error) { + return s.Internal.StateMarketBalance(p0, p1, p2) +} + +func (s *GatewayStub) StateMarketBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MarketBalance, error) { + return *new(api.MarketBalance), xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateMarketStorageDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*api.MarketDeal, error) { + return s.Internal.StateMarketStorageDeal(p0, p1, p2) +} + +func (s *GatewayStub) StateMarketStorageDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*api.MarketDeal, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateMinerInfo(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) { + return s.Internal.StateMinerInfo(p0, p1, p2) +} + +func (s *GatewayStub) StateMinerInfo(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (miner.MinerInfo, error) { + return *new(miner.MinerInfo), xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateMinerPower(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*api.MinerPower, error) { + return s.Internal.StateMinerPower(p0, p1, p2) +} + +func (s *GatewayStub) StateMinerPower(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*api.MinerPower, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateMinerProvingDeadline(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) { + return s.Internal.StateMinerProvingDeadline(p0, p1, p2) +} + +func (s *GatewayStub) StateMinerProvingDeadline(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (network.Version, error) { + return s.Internal.StateNetworkVersion(p0, p1) +} + +func (s *GatewayStub) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (network.Version, error) { + return *new(network.Version), xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateSearchMsg(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) { + return s.Internal.StateSearchMsg(p0, p1) +} + +func (s *GatewayStub) StateSearchMsg(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateSectorGetInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) { + return s.Internal.StateSectorGetInfo(p0, p1, p2, p3) +} + +func (s *GatewayStub) StateSectorGetInfo(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateVerifiedClientStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) { + return s.Internal.StateVerifiedClientStatus(p0, p1, p2) +} + +func (s *GatewayStub) StateVerifiedClientStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64) (*api.MsgLookup, error) { + return s.Internal.StateWaitMsg(p0, p1, p2) +} + +func (s *GatewayStub) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64) (*api.MsgLookup, error) { + return nil, xerrors.New("method not supported") +} + +func (s *GatewayStruct) Version(p0 context.Context) (api.APIVersion, error) { + return s.Internal.Version(p0) +} + +func (s *GatewayStub) Version(p0 context.Context) (api.APIVersion, error) { + return *new(api.APIVersion), xerrors.New("method not supported") +} + +func (s *GatewayStruct) WalletBalance(p0 context.Context, p1 address.Address) (types.BigInt, error) { + return s.Internal.WalletBalance(p0, p1) +} + +func (s *GatewayStub) WalletBalance(p0 context.Context, p1 address.Address) (types.BigInt, error) { + return *new(types.BigInt), xerrors.New("method not supported") +} + +var _ FullNode = new(FullNodeStruct) +var _ Gateway = new(GatewayStruct) diff --git a/api/v0api/v0mocks/mock_full.go b/api/v0api/v0mocks/mock_full.go new file mode 100644 index 00000000000..6a4ef690ed1 --- /dev/null +++ b/api/v0api/v0mocks/mock_full.go @@ -0,0 +1,3079 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/filecoin-project/lotus/api/v0api (interfaces: FullNode) + +// Package v0mocks is a generated GoMock package. +package v0mocks + +import ( + context "context" + reflect "reflect" + + address "github.com/filecoin-project/go-address" + bitfield "github.com/filecoin-project/go-bitfield" + datatransfer "github.com/filecoin-project/go-data-transfer" + retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket" + storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket" + auth "github.com/filecoin-project/go-jsonrpc/auth" + multistore "github.com/filecoin-project/go-multistore" + abi "github.com/filecoin-project/go-state-types/abi" + big "github.com/filecoin-project/go-state-types/big" + crypto "github.com/filecoin-project/go-state-types/crypto" + dline "github.com/filecoin-project/go-state-types/dline" + network "github.com/filecoin-project/go-state-types/network" + api "github.com/filecoin-project/lotus/api" + apitypes "github.com/filecoin-project/lotus/api/types" + miner "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + types "github.com/filecoin-project/lotus/chain/types" + marketevents "github.com/filecoin-project/lotus/markets/loggers" + dtypes "github.com/filecoin-project/lotus/node/modules/dtypes" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + paych "github.com/filecoin-project/specs-actors/actors/builtin/paych" + gomock "github.com/golang/mock/gomock" + uuid "github.com/google/uuid" + cid "github.com/ipfs/go-cid" + metrics "github.com/libp2p/go-libp2p-core/metrics" + network0 "github.com/libp2p/go-libp2p-core/network" + peer "github.com/libp2p/go-libp2p-core/peer" + protocol "github.com/libp2p/go-libp2p-core/protocol" +) + +// MockFullNode is a mock of FullNode interface. +type MockFullNode struct { + ctrl *gomock.Controller + recorder *MockFullNodeMockRecorder +} + +// MockFullNodeMockRecorder is the mock recorder for MockFullNode. +type MockFullNodeMockRecorder struct { + mock *MockFullNode +} + +// NewMockFullNode creates a new mock instance. +func NewMockFullNode(ctrl *gomock.Controller) *MockFullNode { + mock := &MockFullNode{ctrl: ctrl} + mock.recorder = &MockFullNodeMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockFullNode) EXPECT() *MockFullNodeMockRecorder { + return m.recorder +} + +// AuthNew mocks base method. +func (m *MockFullNode) AuthNew(arg0 context.Context, arg1 []auth.Permission) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AuthNew", arg0, arg1) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AuthNew indicates an expected call of AuthNew. +func (mr *MockFullNodeMockRecorder) AuthNew(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AuthNew", reflect.TypeOf((*MockFullNode)(nil).AuthNew), arg0, arg1) +} + +// AuthVerify mocks base method. +func (m *MockFullNode) AuthVerify(arg0 context.Context, arg1 string) ([]auth.Permission, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AuthVerify", arg0, arg1) + ret0, _ := ret[0].([]auth.Permission) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AuthVerify indicates an expected call of AuthVerify. +func (mr *MockFullNodeMockRecorder) AuthVerify(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AuthVerify", reflect.TypeOf((*MockFullNode)(nil).AuthVerify), arg0, arg1) +} + +// BeaconGetEntry mocks base method. +func (m *MockFullNode) BeaconGetEntry(arg0 context.Context, arg1 abi.ChainEpoch) (*types.BeaconEntry, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BeaconGetEntry", arg0, arg1) + ret0, _ := ret[0].(*types.BeaconEntry) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BeaconGetEntry indicates an expected call of BeaconGetEntry. +func (mr *MockFullNodeMockRecorder) BeaconGetEntry(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeaconGetEntry", reflect.TypeOf((*MockFullNode)(nil).BeaconGetEntry), arg0, arg1) +} + +// ChainDeleteObj mocks base method. +func (m *MockFullNode) ChainDeleteObj(arg0 context.Context, arg1 cid.Cid) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainDeleteObj", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ChainDeleteObj indicates an expected call of ChainDeleteObj. +func (mr *MockFullNodeMockRecorder) ChainDeleteObj(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainDeleteObj", reflect.TypeOf((*MockFullNode)(nil).ChainDeleteObj), arg0, arg1) +} + +// ChainExport mocks base method. +func (m *MockFullNode) ChainExport(arg0 context.Context, arg1 abi.ChainEpoch, arg2 bool, arg3 types.TipSetKey) (<-chan []byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainExport", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(<-chan []byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainExport indicates an expected call of ChainExport. +func (mr *MockFullNodeMockRecorder) ChainExport(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainExport", reflect.TypeOf((*MockFullNode)(nil).ChainExport), arg0, arg1, arg2, arg3) +} + +// ChainGetBlock mocks base method. +func (m *MockFullNode) ChainGetBlock(arg0 context.Context, arg1 cid.Cid) (*types.BlockHeader, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetBlock", arg0, arg1) + ret0, _ := ret[0].(*types.BlockHeader) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetBlock indicates an expected call of ChainGetBlock. +func (mr *MockFullNodeMockRecorder) ChainGetBlock(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetBlock", reflect.TypeOf((*MockFullNode)(nil).ChainGetBlock), arg0, arg1) +} + +// ChainGetBlockMessages mocks base method. +func (m *MockFullNode) ChainGetBlockMessages(arg0 context.Context, arg1 cid.Cid) (*api.BlockMessages, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetBlockMessages", arg0, arg1) + ret0, _ := ret[0].(*api.BlockMessages) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetBlockMessages indicates an expected call of ChainGetBlockMessages. +func (mr *MockFullNodeMockRecorder) ChainGetBlockMessages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetBlockMessages", reflect.TypeOf((*MockFullNode)(nil).ChainGetBlockMessages), arg0, arg1) +} + +// ChainGetGenesis mocks base method. +func (m *MockFullNode) ChainGetGenesis(arg0 context.Context) (*types.TipSet, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetGenesis", arg0) + ret0, _ := ret[0].(*types.TipSet) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetGenesis indicates an expected call of ChainGetGenesis. +func (mr *MockFullNodeMockRecorder) ChainGetGenesis(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetGenesis", reflect.TypeOf((*MockFullNode)(nil).ChainGetGenesis), arg0) +} + +// ChainGetMessage mocks base method. +func (m *MockFullNode) ChainGetMessage(arg0 context.Context, arg1 cid.Cid) (*types.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetMessage", arg0, arg1) + ret0, _ := ret[0].(*types.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetMessage indicates an expected call of ChainGetMessage. +func (mr *MockFullNodeMockRecorder) ChainGetMessage(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetMessage", reflect.TypeOf((*MockFullNode)(nil).ChainGetMessage), arg0, arg1) +} + +// ChainGetMessagesInTipset mocks base method. +func (m *MockFullNode) ChainGetMessagesInTipset(arg0 context.Context, arg1 types.TipSetKey) ([]api.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetMessagesInTipset", arg0, arg1) + ret0, _ := ret[0].([]api.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetMessagesInTipset indicates an expected call of ChainGetMessagesInTipset. +func (mr *MockFullNodeMockRecorder) ChainGetMessagesInTipset(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetMessagesInTipset", reflect.TypeOf((*MockFullNode)(nil).ChainGetMessagesInTipset), arg0, arg1) +} + +// ChainGetNode mocks base method. +func (m *MockFullNode) ChainGetNode(arg0 context.Context, arg1 string) (*api.IpldObject, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetNode", arg0, arg1) + ret0, _ := ret[0].(*api.IpldObject) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetNode indicates an expected call of ChainGetNode. +func (mr *MockFullNodeMockRecorder) ChainGetNode(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetNode", reflect.TypeOf((*MockFullNode)(nil).ChainGetNode), arg0, arg1) +} + +// ChainGetParentMessages mocks base method. +func (m *MockFullNode) ChainGetParentMessages(arg0 context.Context, arg1 cid.Cid) ([]api.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetParentMessages", arg0, arg1) + ret0, _ := ret[0].([]api.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetParentMessages indicates an expected call of ChainGetParentMessages. +func (mr *MockFullNodeMockRecorder) ChainGetParentMessages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetParentMessages", reflect.TypeOf((*MockFullNode)(nil).ChainGetParentMessages), arg0, arg1) +} + +// ChainGetParentReceipts mocks base method. +func (m *MockFullNode) ChainGetParentReceipts(arg0 context.Context, arg1 cid.Cid) ([]*types.MessageReceipt, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetParentReceipts", arg0, arg1) + ret0, _ := ret[0].([]*types.MessageReceipt) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetParentReceipts indicates an expected call of ChainGetParentReceipts. +func (mr *MockFullNodeMockRecorder) ChainGetParentReceipts(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetParentReceipts", reflect.TypeOf((*MockFullNode)(nil).ChainGetParentReceipts), arg0, arg1) +} + +// ChainGetPath mocks base method. +func (m *MockFullNode) ChainGetPath(arg0 context.Context, arg1, arg2 types.TipSetKey) ([]*api.HeadChange, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetPath", arg0, arg1, arg2) + ret0, _ := ret[0].([]*api.HeadChange) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetPath indicates an expected call of ChainGetPath. +func (mr *MockFullNodeMockRecorder) ChainGetPath(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetPath", reflect.TypeOf((*MockFullNode)(nil).ChainGetPath), arg0, arg1, arg2) +} + +// ChainGetRandomnessFromBeacon mocks base method. +func (m *MockFullNode) ChainGetRandomnessFromBeacon(arg0 context.Context, arg1 types.TipSetKey, arg2 crypto.DomainSeparationTag, arg3 abi.ChainEpoch, arg4 []byte) (abi.Randomness, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetRandomnessFromBeacon", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(abi.Randomness) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetRandomnessFromBeacon indicates an expected call of ChainGetRandomnessFromBeacon. +func (mr *MockFullNodeMockRecorder) ChainGetRandomnessFromBeacon(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetRandomnessFromBeacon", reflect.TypeOf((*MockFullNode)(nil).ChainGetRandomnessFromBeacon), arg0, arg1, arg2, arg3, arg4) +} + +// ChainGetRandomnessFromTickets mocks base method. +func (m *MockFullNode) ChainGetRandomnessFromTickets(arg0 context.Context, arg1 types.TipSetKey, arg2 crypto.DomainSeparationTag, arg3 abi.ChainEpoch, arg4 []byte) (abi.Randomness, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetRandomnessFromTickets", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(abi.Randomness) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetRandomnessFromTickets indicates an expected call of ChainGetRandomnessFromTickets. +func (mr *MockFullNodeMockRecorder) ChainGetRandomnessFromTickets(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetRandomnessFromTickets", reflect.TypeOf((*MockFullNode)(nil).ChainGetRandomnessFromTickets), arg0, arg1, arg2, arg3, arg4) +} + +// ChainGetTipSet mocks base method. +func (m *MockFullNode) ChainGetTipSet(arg0 context.Context, arg1 types.TipSetKey) (*types.TipSet, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetTipSet", arg0, arg1) + ret0, _ := ret[0].(*types.TipSet) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetTipSet indicates an expected call of ChainGetTipSet. +func (mr *MockFullNodeMockRecorder) ChainGetTipSet(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetTipSet", reflect.TypeOf((*MockFullNode)(nil).ChainGetTipSet), arg0, arg1) +} + +// ChainGetTipSetByHeight mocks base method. +func (m *MockFullNode) ChainGetTipSetByHeight(arg0 context.Context, arg1 abi.ChainEpoch, arg2 types.TipSetKey) (*types.TipSet, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetTipSetByHeight", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.TipSet) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetTipSetByHeight indicates an expected call of ChainGetTipSetByHeight. +func (mr *MockFullNodeMockRecorder) ChainGetTipSetByHeight(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetTipSetByHeight", reflect.TypeOf((*MockFullNode)(nil).ChainGetTipSetByHeight), arg0, arg1, arg2) +} + +// ChainHasObj mocks base method. +func (m *MockFullNode) ChainHasObj(arg0 context.Context, arg1 cid.Cid) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainHasObj", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainHasObj indicates an expected call of ChainHasObj. +func (mr *MockFullNodeMockRecorder) ChainHasObj(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHasObj", reflect.TypeOf((*MockFullNode)(nil).ChainHasObj), arg0, arg1) +} + +// ChainHead mocks base method. +func (m *MockFullNode) ChainHead(arg0 context.Context) (*types.TipSet, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainHead", arg0) + ret0, _ := ret[0].(*types.TipSet) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainHead indicates an expected call of ChainHead. +func (mr *MockFullNodeMockRecorder) ChainHead(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHead", reflect.TypeOf((*MockFullNode)(nil).ChainHead), arg0) +} + +// ChainNotify mocks base method. +func (m *MockFullNode) ChainNotify(arg0 context.Context) (<-chan []*api.HeadChange, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainNotify", arg0) + ret0, _ := ret[0].(<-chan []*api.HeadChange) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainNotify indicates an expected call of ChainNotify. +func (mr *MockFullNodeMockRecorder) ChainNotify(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainNotify", reflect.TypeOf((*MockFullNode)(nil).ChainNotify), arg0) +} + +// ChainReadObj mocks base method. +func (m *MockFullNode) ChainReadObj(arg0 context.Context, arg1 cid.Cid) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainReadObj", arg0, arg1) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainReadObj indicates an expected call of ChainReadObj. +func (mr *MockFullNodeMockRecorder) ChainReadObj(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainReadObj", reflect.TypeOf((*MockFullNode)(nil).ChainReadObj), arg0, arg1) +} + +// ChainSetHead mocks base method. +func (m *MockFullNode) ChainSetHead(arg0 context.Context, arg1 types.TipSetKey) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainSetHead", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ChainSetHead indicates an expected call of ChainSetHead. +func (mr *MockFullNodeMockRecorder) ChainSetHead(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainSetHead", reflect.TypeOf((*MockFullNode)(nil).ChainSetHead), arg0, arg1) +} + +// ChainStatObj mocks base method. +func (m *MockFullNode) ChainStatObj(arg0 context.Context, arg1, arg2 cid.Cid) (api.ObjStat, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainStatObj", arg0, arg1, arg2) + ret0, _ := ret[0].(api.ObjStat) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainStatObj indicates an expected call of ChainStatObj. +func (mr *MockFullNodeMockRecorder) ChainStatObj(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainStatObj", reflect.TypeOf((*MockFullNode)(nil).ChainStatObj), arg0, arg1, arg2) +} + +// ChainTipSetWeight mocks base method. +func (m *MockFullNode) ChainTipSetWeight(arg0 context.Context, arg1 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainTipSetWeight", arg0, arg1) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainTipSetWeight indicates an expected call of ChainTipSetWeight. +func (mr *MockFullNodeMockRecorder) ChainTipSetWeight(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainTipSetWeight", reflect.TypeOf((*MockFullNode)(nil).ChainTipSetWeight), arg0, arg1) +} + +// ClientCalcCommP mocks base method. +func (m *MockFullNode) ClientCalcCommP(arg0 context.Context, arg1 string) (*api.CommPRet, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientCalcCommP", arg0, arg1) + ret0, _ := ret[0].(*api.CommPRet) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientCalcCommP indicates an expected call of ClientCalcCommP. +func (mr *MockFullNodeMockRecorder) ClientCalcCommP(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCalcCommP", reflect.TypeOf((*MockFullNode)(nil).ClientCalcCommP), arg0, arg1) +} + +// ClientCancelDataTransfer mocks base method. +func (m *MockFullNode) ClientCancelDataTransfer(arg0 context.Context, arg1 datatransfer.TransferID, arg2 peer.ID, arg3 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientCancelDataTransfer", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClientCancelDataTransfer indicates an expected call of ClientCancelDataTransfer. +func (mr *MockFullNodeMockRecorder) ClientCancelDataTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCancelDataTransfer", reflect.TypeOf((*MockFullNode)(nil).ClientCancelDataTransfer), arg0, arg1, arg2, arg3) +} + +// ClientCancelRetrievalDeal mocks base method. +func (m *MockFullNode) ClientCancelRetrievalDeal(arg0 context.Context, arg1 retrievalmarket.DealID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientCancelRetrievalDeal", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClientCancelRetrievalDeal indicates an expected call of ClientCancelRetrievalDeal. +func (mr *MockFullNodeMockRecorder) ClientCancelRetrievalDeal(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCancelRetrievalDeal", reflect.TypeOf((*MockFullNode)(nil).ClientCancelRetrievalDeal), arg0, arg1) +} + +// ClientDataTransferUpdates mocks base method. +func (m *MockFullNode) ClientDataTransferUpdates(arg0 context.Context) (<-chan api.DataTransferChannel, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientDataTransferUpdates", arg0) + ret0, _ := ret[0].(<-chan api.DataTransferChannel) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientDataTransferUpdates indicates an expected call of ClientDataTransferUpdates. +func (mr *MockFullNodeMockRecorder) ClientDataTransferUpdates(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDataTransferUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientDataTransferUpdates), arg0) +} + +// ClientDealPieceCID mocks base method. +func (m *MockFullNode) ClientDealPieceCID(arg0 context.Context, arg1 cid.Cid) (api.DataCIDSize, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientDealPieceCID", arg0, arg1) + ret0, _ := ret[0].(api.DataCIDSize) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientDealPieceCID indicates an expected call of ClientDealPieceCID. +func (mr *MockFullNodeMockRecorder) ClientDealPieceCID(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealPieceCID", reflect.TypeOf((*MockFullNode)(nil).ClientDealPieceCID), arg0, arg1) +} + +// ClientDealSize mocks base method. +func (m *MockFullNode) ClientDealSize(arg0 context.Context, arg1 cid.Cid) (api.DataSize, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientDealSize", arg0, arg1) + ret0, _ := ret[0].(api.DataSize) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientDealSize indicates an expected call of ClientDealSize. +func (mr *MockFullNodeMockRecorder) ClientDealSize(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealSize", reflect.TypeOf((*MockFullNode)(nil).ClientDealSize), arg0, arg1) +} + +// ClientFindData mocks base method. +func (m *MockFullNode) ClientFindData(arg0 context.Context, arg1 cid.Cid, arg2 *cid.Cid) ([]api.QueryOffer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientFindData", arg0, arg1, arg2) + ret0, _ := ret[0].([]api.QueryOffer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientFindData indicates an expected call of ClientFindData. +func (mr *MockFullNodeMockRecorder) ClientFindData(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientFindData", reflect.TypeOf((*MockFullNode)(nil).ClientFindData), arg0, arg1, arg2) +} + +// ClientGenCar mocks base method. +func (m *MockFullNode) ClientGenCar(arg0 context.Context, arg1 api.FileRef, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientGenCar", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClientGenCar indicates an expected call of ClientGenCar. +func (mr *MockFullNodeMockRecorder) ClientGenCar(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGenCar", reflect.TypeOf((*MockFullNode)(nil).ClientGenCar), arg0, arg1, arg2) +} + +// ClientGetDealInfo mocks base method. +func (m *MockFullNode) ClientGetDealInfo(arg0 context.Context, arg1 cid.Cid) (*api.DealInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientGetDealInfo", arg0, arg1) + ret0, _ := ret[0].(*api.DealInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientGetDealInfo indicates an expected call of ClientGetDealInfo. +func (mr *MockFullNodeMockRecorder) ClientGetDealInfo(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealInfo", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealInfo), arg0, arg1) +} + +// ClientGetDealStatus mocks base method. +func (m *MockFullNode) ClientGetDealStatus(arg0 context.Context, arg1 uint64) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientGetDealStatus", arg0, arg1) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientGetDealStatus indicates an expected call of ClientGetDealStatus. +func (mr *MockFullNodeMockRecorder) ClientGetDealStatus(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealStatus", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealStatus), arg0, arg1) +} + +// ClientGetDealUpdates mocks base method. +func (m *MockFullNode) ClientGetDealUpdates(arg0 context.Context) (<-chan api.DealInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientGetDealUpdates", arg0) + ret0, _ := ret[0].(<-chan api.DealInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientGetDealUpdates indicates an expected call of ClientGetDealUpdates. +func (mr *MockFullNodeMockRecorder) ClientGetDealUpdates(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealUpdates), arg0) +} + +// ClientGetRetrievalUpdates mocks base method. +func (m *MockFullNode) ClientGetRetrievalUpdates(arg0 context.Context) (<-chan api.RetrievalInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientGetRetrievalUpdates", arg0) + ret0, _ := ret[0].(<-chan api.RetrievalInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientGetRetrievalUpdates indicates an expected call of ClientGetRetrievalUpdates. +func (mr *MockFullNodeMockRecorder) ClientGetRetrievalUpdates(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetRetrievalUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientGetRetrievalUpdates), arg0) +} + +// ClientHasLocal mocks base method. +func (m *MockFullNode) ClientHasLocal(arg0 context.Context, arg1 cid.Cid) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientHasLocal", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientHasLocal indicates an expected call of ClientHasLocal. +func (mr *MockFullNodeMockRecorder) ClientHasLocal(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientHasLocal", reflect.TypeOf((*MockFullNode)(nil).ClientHasLocal), arg0, arg1) +} + +// ClientImport mocks base method. +func (m *MockFullNode) ClientImport(arg0 context.Context, arg1 api.FileRef) (*api.ImportRes, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientImport", arg0, arg1) + ret0, _ := ret[0].(*api.ImportRes) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientImport indicates an expected call of ClientImport. +func (mr *MockFullNodeMockRecorder) ClientImport(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientImport", reflect.TypeOf((*MockFullNode)(nil).ClientImport), arg0, arg1) +} + +// ClientListDataTransfers mocks base method. +func (m *MockFullNode) ClientListDataTransfers(arg0 context.Context) ([]api.DataTransferChannel, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientListDataTransfers", arg0) + ret0, _ := ret[0].([]api.DataTransferChannel) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientListDataTransfers indicates an expected call of ClientListDataTransfers. +func (mr *MockFullNodeMockRecorder) ClientListDataTransfers(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListDataTransfers", reflect.TypeOf((*MockFullNode)(nil).ClientListDataTransfers), arg0) +} + +// ClientListDeals mocks base method. +func (m *MockFullNode) ClientListDeals(arg0 context.Context) ([]api.DealInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientListDeals", arg0) + ret0, _ := ret[0].([]api.DealInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientListDeals indicates an expected call of ClientListDeals. +func (mr *MockFullNodeMockRecorder) ClientListDeals(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListDeals", reflect.TypeOf((*MockFullNode)(nil).ClientListDeals), arg0) +} + +// ClientListImports mocks base method. +func (m *MockFullNode) ClientListImports(arg0 context.Context) ([]api.Import, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientListImports", arg0) + ret0, _ := ret[0].([]api.Import) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientListImports indicates an expected call of ClientListImports. +func (mr *MockFullNodeMockRecorder) ClientListImports(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListImports", reflect.TypeOf((*MockFullNode)(nil).ClientListImports), arg0) +} + +// ClientListRetrievals mocks base method. +func (m *MockFullNode) ClientListRetrievals(arg0 context.Context) ([]api.RetrievalInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientListRetrievals", arg0) + ret0, _ := ret[0].([]api.RetrievalInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientListRetrievals indicates an expected call of ClientListRetrievals. +func (mr *MockFullNodeMockRecorder) ClientListRetrievals(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListRetrievals", reflect.TypeOf((*MockFullNode)(nil).ClientListRetrievals), arg0) +} + +// ClientMinerQueryOffer mocks base method. +func (m *MockFullNode) ClientMinerQueryOffer(arg0 context.Context, arg1 address.Address, arg2 cid.Cid, arg3 *cid.Cid) (api.QueryOffer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientMinerQueryOffer", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(api.QueryOffer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientMinerQueryOffer indicates an expected call of ClientMinerQueryOffer. +func (mr *MockFullNodeMockRecorder) ClientMinerQueryOffer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientMinerQueryOffer", reflect.TypeOf((*MockFullNode)(nil).ClientMinerQueryOffer), arg0, arg1, arg2, arg3) +} + +// ClientQueryAsk mocks base method. +func (m *MockFullNode) ClientQueryAsk(arg0 context.Context, arg1 peer.ID, arg2 address.Address) (*storagemarket.StorageAsk, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientQueryAsk", arg0, arg1, arg2) + ret0, _ := ret[0].(*storagemarket.StorageAsk) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientQueryAsk indicates an expected call of ClientQueryAsk. +func (mr *MockFullNodeMockRecorder) ClientQueryAsk(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientQueryAsk", reflect.TypeOf((*MockFullNode)(nil).ClientQueryAsk), arg0, arg1, arg2) +} + +// ClientRemoveImport mocks base method. +func (m *MockFullNode) ClientRemoveImport(arg0 context.Context, arg1 multistore.StoreID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientRemoveImport", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClientRemoveImport indicates an expected call of ClientRemoveImport. +func (mr *MockFullNodeMockRecorder) ClientRemoveImport(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRemoveImport", reflect.TypeOf((*MockFullNode)(nil).ClientRemoveImport), arg0, arg1) +} + +// ClientRestartDataTransfer mocks base method. +func (m *MockFullNode) ClientRestartDataTransfer(arg0 context.Context, arg1 datatransfer.TransferID, arg2 peer.ID, arg3 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientRestartDataTransfer", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClientRestartDataTransfer indicates an expected call of ClientRestartDataTransfer. +func (mr *MockFullNodeMockRecorder) ClientRestartDataTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRestartDataTransfer", reflect.TypeOf((*MockFullNode)(nil).ClientRestartDataTransfer), arg0, arg1, arg2, arg3) +} + +// ClientRetrieve mocks base method. +func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientRetrieve", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClientRetrieve indicates an expected call of ClientRetrieve. +func (mr *MockFullNodeMockRecorder) ClientRetrieve(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieve", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieve), arg0, arg1, arg2) +} + +// ClientRetrieveTryRestartInsufficientFunds mocks base method. +func (m *MockFullNode) ClientRetrieveTryRestartInsufficientFunds(arg0 context.Context, arg1 address.Address) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientRetrieveTryRestartInsufficientFunds", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClientRetrieveTryRestartInsufficientFunds indicates an expected call of ClientRetrieveTryRestartInsufficientFunds. +func (mr *MockFullNodeMockRecorder) ClientRetrieveTryRestartInsufficientFunds(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveTryRestartInsufficientFunds", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveTryRestartInsufficientFunds), arg0, arg1) +} + +// ClientRetrieveWithEvents mocks base method. +func (m *MockFullNode) ClientRetrieveWithEvents(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientRetrieveWithEvents", arg0, arg1, arg2) + ret0, _ := ret[0].(<-chan marketevents.RetrievalEvent) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientRetrieveWithEvents indicates an expected call of ClientRetrieveWithEvents. +func (mr *MockFullNodeMockRecorder) ClientRetrieveWithEvents(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveWithEvents", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveWithEvents), arg0, arg1, arg2) +} + +// ClientStartDeal mocks base method. +func (m *MockFullNode) ClientStartDeal(arg0 context.Context, arg1 *api.StartDealParams) (*cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientStartDeal", arg0, arg1) + ret0, _ := ret[0].(*cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientStartDeal indicates an expected call of ClientStartDeal. +func (mr *MockFullNodeMockRecorder) ClientStartDeal(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientStartDeal", reflect.TypeOf((*MockFullNode)(nil).ClientStartDeal), arg0, arg1) +} + +// ClientStatelessDeal mocks base method. +func (m *MockFullNode) ClientStatelessDeal(arg0 context.Context, arg1 *api.StartDealParams) (*cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientStatelessDeal", arg0, arg1) + ret0, _ := ret[0].(*cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientStatelessDeal indicates an expected call of ClientStatelessDeal. +func (mr *MockFullNodeMockRecorder) ClientStatelessDeal(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientStatelessDeal", reflect.TypeOf((*MockFullNode)(nil).ClientStatelessDeal), arg0, arg1) +} + +// Closing mocks base method. +func (m *MockFullNode) Closing(arg0 context.Context) (<-chan struct{}, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Closing", arg0) + ret0, _ := ret[0].(<-chan struct{}) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Closing indicates an expected call of Closing. +func (mr *MockFullNodeMockRecorder) Closing(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Closing", reflect.TypeOf((*MockFullNode)(nil).Closing), arg0) +} + +// CreateBackup mocks base method. +func (m *MockFullNode) CreateBackup(arg0 context.Context, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateBackup", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateBackup indicates an expected call of CreateBackup. +func (mr *MockFullNodeMockRecorder) CreateBackup(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateBackup", reflect.TypeOf((*MockFullNode)(nil).CreateBackup), arg0, arg1) +} + +// Discover mocks base method. +func (m *MockFullNode) Discover(arg0 context.Context) (apitypes.OpenRPCDocument, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Discover", arg0) + ret0, _ := ret[0].(apitypes.OpenRPCDocument) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Discover indicates an expected call of Discover. +func (mr *MockFullNodeMockRecorder) Discover(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Discover", reflect.TypeOf((*MockFullNode)(nil).Discover), arg0) +} + +// GasEstimateFeeCap mocks base method. +func (m *MockFullNode) GasEstimateFeeCap(arg0 context.Context, arg1 *types.Message, arg2 int64, arg3 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GasEstimateFeeCap", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GasEstimateFeeCap indicates an expected call of GasEstimateFeeCap. +func (mr *MockFullNodeMockRecorder) GasEstimateFeeCap(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateFeeCap", reflect.TypeOf((*MockFullNode)(nil).GasEstimateFeeCap), arg0, arg1, arg2, arg3) +} + +// GasEstimateGasLimit mocks base method. +func (m *MockFullNode) GasEstimateGasLimit(arg0 context.Context, arg1 *types.Message, arg2 types.TipSetKey) (int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GasEstimateGasLimit", arg0, arg1, arg2) + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GasEstimateGasLimit indicates an expected call of GasEstimateGasLimit. +func (mr *MockFullNodeMockRecorder) GasEstimateGasLimit(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateGasLimit", reflect.TypeOf((*MockFullNode)(nil).GasEstimateGasLimit), arg0, arg1, arg2) +} + +// GasEstimateGasPremium mocks base method. +func (m *MockFullNode) GasEstimateGasPremium(arg0 context.Context, arg1 uint64, arg2 address.Address, arg3 int64, arg4 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GasEstimateGasPremium", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GasEstimateGasPremium indicates an expected call of GasEstimateGasPremium. +func (mr *MockFullNodeMockRecorder) GasEstimateGasPremium(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateGasPremium", reflect.TypeOf((*MockFullNode)(nil).GasEstimateGasPremium), arg0, arg1, arg2, arg3, arg4) +} + +// GasEstimateMessageGas mocks base method. +func (m *MockFullNode) GasEstimateMessageGas(arg0 context.Context, arg1 *types.Message, arg2 *api.MessageSendSpec, arg3 types.TipSetKey) (*types.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GasEstimateMessageGas", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*types.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GasEstimateMessageGas indicates an expected call of GasEstimateMessageGas. +func (mr *MockFullNodeMockRecorder) GasEstimateMessageGas(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateMessageGas", reflect.TypeOf((*MockFullNode)(nil).GasEstimateMessageGas), arg0, arg1, arg2, arg3) +} + +// ID mocks base method. +func (m *MockFullNode) ID(arg0 context.Context) (peer.ID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ID", arg0) + ret0, _ := ret[0].(peer.ID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ID indicates an expected call of ID. +func (mr *MockFullNodeMockRecorder) ID(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ID", reflect.TypeOf((*MockFullNode)(nil).ID), arg0) +} + +// LogList mocks base method. +func (m *MockFullNode) LogList(arg0 context.Context) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LogList", arg0) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LogList indicates an expected call of LogList. +func (mr *MockFullNodeMockRecorder) LogList(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LogList", reflect.TypeOf((*MockFullNode)(nil).LogList), arg0) +} + +// LogSetLevel mocks base method. +func (m *MockFullNode) LogSetLevel(arg0 context.Context, arg1, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LogSetLevel", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// LogSetLevel indicates an expected call of LogSetLevel. +func (mr *MockFullNodeMockRecorder) LogSetLevel(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LogSetLevel", reflect.TypeOf((*MockFullNode)(nil).LogSetLevel), arg0, arg1, arg2) +} + +// MarketAddBalance mocks base method. +func (m *MockFullNode) MarketAddBalance(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketAddBalance", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketAddBalance indicates an expected call of MarketAddBalance. +func (mr *MockFullNodeMockRecorder) MarketAddBalance(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketAddBalance", reflect.TypeOf((*MockFullNode)(nil).MarketAddBalance), arg0, arg1, arg2, arg3) +} + +// MarketGetReserved mocks base method. +func (m *MockFullNode) MarketGetReserved(arg0 context.Context, arg1 address.Address) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketGetReserved", arg0, arg1) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketGetReserved indicates an expected call of MarketGetReserved. +func (mr *MockFullNodeMockRecorder) MarketGetReserved(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketGetReserved", reflect.TypeOf((*MockFullNode)(nil).MarketGetReserved), arg0, arg1) +} + +// MarketReleaseFunds mocks base method. +func (m *MockFullNode) MarketReleaseFunds(arg0 context.Context, arg1 address.Address, arg2 big.Int) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketReleaseFunds", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// MarketReleaseFunds indicates an expected call of MarketReleaseFunds. +func (mr *MockFullNodeMockRecorder) MarketReleaseFunds(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketReleaseFunds", reflect.TypeOf((*MockFullNode)(nil).MarketReleaseFunds), arg0, arg1, arg2) +} + +// MarketReserveFunds mocks base method. +func (m *MockFullNode) MarketReserveFunds(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketReserveFunds", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketReserveFunds indicates an expected call of MarketReserveFunds. +func (mr *MockFullNodeMockRecorder) MarketReserveFunds(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketReserveFunds", reflect.TypeOf((*MockFullNode)(nil).MarketReserveFunds), arg0, arg1, arg2, arg3) +} + +// MarketWithdraw mocks base method. +func (m *MockFullNode) MarketWithdraw(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarketWithdraw", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarketWithdraw indicates an expected call of MarketWithdraw. +func (mr *MockFullNodeMockRecorder) MarketWithdraw(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketWithdraw", reflect.TypeOf((*MockFullNode)(nil).MarketWithdraw), arg0, arg1, arg2, arg3) +} + +// MinerCreateBlock mocks base method. +func (m *MockFullNode) MinerCreateBlock(arg0 context.Context, arg1 *api.BlockTemplate) (*types.BlockMsg, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MinerCreateBlock", arg0, arg1) + ret0, _ := ret[0].(*types.BlockMsg) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MinerCreateBlock indicates an expected call of MinerCreateBlock. +func (mr *MockFullNodeMockRecorder) MinerCreateBlock(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MinerCreateBlock", reflect.TypeOf((*MockFullNode)(nil).MinerCreateBlock), arg0, arg1) +} + +// MinerGetBaseInfo mocks base method. +func (m *MockFullNode) MinerGetBaseInfo(arg0 context.Context, arg1 address.Address, arg2 abi.ChainEpoch, arg3 types.TipSetKey) (*api.MiningBaseInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MinerGetBaseInfo", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*api.MiningBaseInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MinerGetBaseInfo indicates an expected call of MinerGetBaseInfo. +func (mr *MockFullNodeMockRecorder) MinerGetBaseInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MinerGetBaseInfo", reflect.TypeOf((*MockFullNode)(nil).MinerGetBaseInfo), arg0, arg1, arg2, arg3) +} + +// MpoolBatchPush mocks base method. +func (m *MockFullNode) MpoolBatchPush(arg0 context.Context, arg1 []*types.SignedMessage) ([]cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolBatchPush", arg0, arg1) + ret0, _ := ret[0].([]cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolBatchPush indicates an expected call of MpoolBatchPush. +func (mr *MockFullNodeMockRecorder) MpoolBatchPush(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolBatchPush", reflect.TypeOf((*MockFullNode)(nil).MpoolBatchPush), arg0, arg1) +} + +// MpoolBatchPushMessage mocks base method. +func (m *MockFullNode) MpoolBatchPushMessage(arg0 context.Context, arg1 []*types.Message, arg2 *api.MessageSendSpec) ([]*types.SignedMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolBatchPushMessage", arg0, arg1, arg2) + ret0, _ := ret[0].([]*types.SignedMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolBatchPushMessage indicates an expected call of MpoolBatchPushMessage. +func (mr *MockFullNodeMockRecorder) MpoolBatchPushMessage(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolBatchPushMessage", reflect.TypeOf((*MockFullNode)(nil).MpoolBatchPushMessage), arg0, arg1, arg2) +} + +// MpoolBatchPushUntrusted mocks base method. +func (m *MockFullNode) MpoolBatchPushUntrusted(arg0 context.Context, arg1 []*types.SignedMessage) ([]cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolBatchPushUntrusted", arg0, arg1) + ret0, _ := ret[0].([]cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolBatchPushUntrusted indicates an expected call of MpoolBatchPushUntrusted. +func (mr *MockFullNodeMockRecorder) MpoolBatchPushUntrusted(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolBatchPushUntrusted", reflect.TypeOf((*MockFullNode)(nil).MpoolBatchPushUntrusted), arg0, arg1) +} + +// MpoolClear mocks base method. +func (m *MockFullNode) MpoolClear(arg0 context.Context, arg1 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolClear", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// MpoolClear indicates an expected call of MpoolClear. +func (mr *MockFullNodeMockRecorder) MpoolClear(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolClear", reflect.TypeOf((*MockFullNode)(nil).MpoolClear), arg0, arg1) +} + +// MpoolGetConfig mocks base method. +func (m *MockFullNode) MpoolGetConfig(arg0 context.Context) (*types.MpoolConfig, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolGetConfig", arg0) + ret0, _ := ret[0].(*types.MpoolConfig) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolGetConfig indicates an expected call of MpoolGetConfig. +func (mr *MockFullNodeMockRecorder) MpoolGetConfig(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolGetConfig", reflect.TypeOf((*MockFullNode)(nil).MpoolGetConfig), arg0) +} + +// MpoolGetNonce mocks base method. +func (m *MockFullNode) MpoolGetNonce(arg0 context.Context, arg1 address.Address) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolGetNonce", arg0, arg1) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolGetNonce indicates an expected call of MpoolGetNonce. +func (mr *MockFullNodeMockRecorder) MpoolGetNonce(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolGetNonce", reflect.TypeOf((*MockFullNode)(nil).MpoolGetNonce), arg0, arg1) +} + +// MpoolPending mocks base method. +func (m *MockFullNode) MpoolPending(arg0 context.Context, arg1 types.TipSetKey) ([]*types.SignedMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolPending", arg0, arg1) + ret0, _ := ret[0].([]*types.SignedMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolPending indicates an expected call of MpoolPending. +func (mr *MockFullNodeMockRecorder) MpoolPending(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPending", reflect.TypeOf((*MockFullNode)(nil).MpoolPending), arg0, arg1) +} + +// MpoolPush mocks base method. +func (m *MockFullNode) MpoolPush(arg0 context.Context, arg1 *types.SignedMessage) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolPush", arg0, arg1) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolPush indicates an expected call of MpoolPush. +func (mr *MockFullNodeMockRecorder) MpoolPush(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPush", reflect.TypeOf((*MockFullNode)(nil).MpoolPush), arg0, arg1) +} + +// MpoolPushMessage mocks base method. +func (m *MockFullNode) MpoolPushMessage(arg0 context.Context, arg1 *types.Message, arg2 *api.MessageSendSpec) (*types.SignedMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolPushMessage", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.SignedMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolPushMessage indicates an expected call of MpoolPushMessage. +func (mr *MockFullNodeMockRecorder) MpoolPushMessage(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPushMessage", reflect.TypeOf((*MockFullNode)(nil).MpoolPushMessage), arg0, arg1, arg2) +} + +// MpoolPushUntrusted mocks base method. +func (m *MockFullNode) MpoolPushUntrusted(arg0 context.Context, arg1 *types.SignedMessage) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolPushUntrusted", arg0, arg1) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolPushUntrusted indicates an expected call of MpoolPushUntrusted. +func (mr *MockFullNodeMockRecorder) MpoolPushUntrusted(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPushUntrusted", reflect.TypeOf((*MockFullNode)(nil).MpoolPushUntrusted), arg0, arg1) +} + +// MpoolSelect mocks base method. +func (m *MockFullNode) MpoolSelect(arg0 context.Context, arg1 types.TipSetKey, arg2 float64) ([]*types.SignedMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolSelect", arg0, arg1, arg2) + ret0, _ := ret[0].([]*types.SignedMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolSelect indicates an expected call of MpoolSelect. +func (mr *MockFullNodeMockRecorder) MpoolSelect(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolSelect", reflect.TypeOf((*MockFullNode)(nil).MpoolSelect), arg0, arg1, arg2) +} + +// MpoolSetConfig mocks base method. +func (m *MockFullNode) MpoolSetConfig(arg0 context.Context, arg1 *types.MpoolConfig) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolSetConfig", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// MpoolSetConfig indicates an expected call of MpoolSetConfig. +func (mr *MockFullNodeMockRecorder) MpoolSetConfig(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolSetConfig", reflect.TypeOf((*MockFullNode)(nil).MpoolSetConfig), arg0, arg1) +} + +// MpoolSub mocks base method. +func (m *MockFullNode) MpoolSub(arg0 context.Context) (<-chan api.MpoolUpdate, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolSub", arg0) + ret0, _ := ret[0].(<-chan api.MpoolUpdate) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolSub indicates an expected call of MpoolSub. +func (mr *MockFullNodeMockRecorder) MpoolSub(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolSub", reflect.TypeOf((*MockFullNode)(nil).MpoolSub), arg0) +} + +// MsigAddApprove mocks base method. +func (m *MockFullNode) MsigAddApprove(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4, arg5 address.Address, arg6 bool) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigAddApprove", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigAddApprove indicates an expected call of MsigAddApprove. +func (mr *MockFullNodeMockRecorder) MsigAddApprove(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigAddApprove", reflect.TypeOf((*MockFullNode)(nil).MsigAddApprove), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +// MsigAddCancel mocks base method. +func (m *MockFullNode) MsigAddCancel(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4 address.Address, arg5 bool) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigAddCancel", arg0, arg1, arg2, arg3, arg4, arg5) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigAddCancel indicates an expected call of MsigAddCancel. +func (mr *MockFullNodeMockRecorder) MsigAddCancel(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigAddCancel", reflect.TypeOf((*MockFullNode)(nil).MsigAddCancel), arg0, arg1, arg2, arg3, arg4, arg5) +} + +// MsigAddPropose mocks base method. +func (m *MockFullNode) MsigAddPropose(arg0 context.Context, arg1, arg2, arg3 address.Address, arg4 bool) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigAddPropose", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigAddPropose indicates an expected call of MsigAddPropose. +func (mr *MockFullNodeMockRecorder) MsigAddPropose(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigAddPropose", reflect.TypeOf((*MockFullNode)(nil).MsigAddPropose), arg0, arg1, arg2, arg3, arg4) +} + +// MsigApprove mocks base method. +func (m *MockFullNode) MsigApprove(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigApprove", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigApprove indicates an expected call of MsigApprove. +func (mr *MockFullNodeMockRecorder) MsigApprove(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigApprove", reflect.TypeOf((*MockFullNode)(nil).MsigApprove), arg0, arg1, arg2, arg3) +} + +// MsigApproveTxnHash mocks base method. +func (m *MockFullNode) MsigApproveTxnHash(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3, arg4 address.Address, arg5 big.Int, arg6 address.Address, arg7 uint64, arg8 []byte) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigApproveTxnHash", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigApproveTxnHash indicates an expected call of MsigApproveTxnHash. +func (mr *MockFullNodeMockRecorder) MsigApproveTxnHash(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigApproveTxnHash", reflect.TypeOf((*MockFullNode)(nil).MsigApproveTxnHash), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) +} + +// MsigCancel mocks base method. +func (m *MockFullNode) MsigCancel(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address, arg4 big.Int, arg5 address.Address, arg6 uint64, arg7 []byte) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigCancel", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigCancel indicates an expected call of MsigCancel. +func (mr *MockFullNodeMockRecorder) MsigCancel(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCancel", reflect.TypeOf((*MockFullNode)(nil).MsigCancel), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) +} + +// MsigCreate mocks base method. +func (m *MockFullNode) MsigCreate(arg0 context.Context, arg1 uint64, arg2 []address.Address, arg3 abi.ChainEpoch, arg4 big.Int, arg5 address.Address, arg6 big.Int) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigCreate", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigCreate indicates an expected call of MsigCreate. +func (mr *MockFullNodeMockRecorder) MsigCreate(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCreate", reflect.TypeOf((*MockFullNode)(nil).MsigCreate), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +// MsigGetAvailableBalance mocks base method. +func (m *MockFullNode) MsigGetAvailableBalance(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigGetAvailableBalance", arg0, arg1, arg2) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigGetAvailableBalance indicates an expected call of MsigGetAvailableBalance. +func (mr *MockFullNodeMockRecorder) MsigGetAvailableBalance(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigGetAvailableBalance", reflect.TypeOf((*MockFullNode)(nil).MsigGetAvailableBalance), arg0, arg1, arg2) +} + +// MsigGetPending mocks base method. +func (m *MockFullNode) MsigGetPending(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) ([]*api.MsigTransaction, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigGetPending", arg0, arg1, arg2) + ret0, _ := ret[0].([]*api.MsigTransaction) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigGetPending indicates an expected call of MsigGetPending. +func (mr *MockFullNodeMockRecorder) MsigGetPending(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigGetPending", reflect.TypeOf((*MockFullNode)(nil).MsigGetPending), arg0, arg1, arg2) +} + +// MsigGetVested mocks base method. +func (m *MockFullNode) MsigGetVested(arg0 context.Context, arg1 address.Address, arg2, arg3 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigGetVested", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigGetVested indicates an expected call of MsigGetVested. +func (mr *MockFullNodeMockRecorder) MsigGetVested(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigGetVested", reflect.TypeOf((*MockFullNode)(nil).MsigGetVested), arg0, arg1, arg2, arg3) +} + +// MsigGetVestingSchedule mocks base method. +func (m *MockFullNode) MsigGetVestingSchedule(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (api.MsigVesting, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigGetVestingSchedule", arg0, arg1, arg2) + ret0, _ := ret[0].(api.MsigVesting) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigGetVestingSchedule indicates an expected call of MsigGetVestingSchedule. +func (mr *MockFullNodeMockRecorder) MsigGetVestingSchedule(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigGetVestingSchedule", reflect.TypeOf((*MockFullNode)(nil).MsigGetVestingSchedule), arg0, arg1, arg2) +} + +// MsigPropose mocks base method. +func (m *MockFullNode) MsigPropose(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int, arg4 address.Address, arg5 uint64, arg6 []byte) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigPropose", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigPropose indicates an expected call of MsigPropose. +func (mr *MockFullNodeMockRecorder) MsigPropose(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigPropose", reflect.TypeOf((*MockFullNode)(nil).MsigPropose), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +// MsigRemoveSigner mocks base method. +func (m *MockFullNode) MsigRemoveSigner(arg0 context.Context, arg1, arg2, arg3 address.Address, arg4 bool) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigRemoveSigner", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigRemoveSigner indicates an expected call of MsigRemoveSigner. +func (mr *MockFullNodeMockRecorder) MsigRemoveSigner(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigRemoveSigner", reflect.TypeOf((*MockFullNode)(nil).MsigRemoveSigner), arg0, arg1, arg2, arg3, arg4) +} + +// MsigSwapApprove mocks base method. +func (m *MockFullNode) MsigSwapApprove(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4, arg5, arg6 address.Address) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigSwapApprove", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigSwapApprove indicates an expected call of MsigSwapApprove. +func (mr *MockFullNodeMockRecorder) MsigSwapApprove(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigSwapApprove", reflect.TypeOf((*MockFullNode)(nil).MsigSwapApprove), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +// MsigSwapCancel mocks base method. +func (m *MockFullNode) MsigSwapCancel(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4, arg5 address.Address) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigSwapCancel", arg0, arg1, arg2, arg3, arg4, arg5) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigSwapCancel indicates an expected call of MsigSwapCancel. +func (mr *MockFullNodeMockRecorder) MsigSwapCancel(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigSwapCancel", reflect.TypeOf((*MockFullNode)(nil).MsigSwapCancel), arg0, arg1, arg2, arg3, arg4, arg5) +} + +// MsigSwapPropose mocks base method. +func (m *MockFullNode) MsigSwapPropose(arg0 context.Context, arg1, arg2, arg3, arg4 address.Address) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigSwapPropose", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigSwapPropose indicates an expected call of MsigSwapPropose. +func (mr *MockFullNodeMockRecorder) MsigSwapPropose(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigSwapPropose", reflect.TypeOf((*MockFullNode)(nil).MsigSwapPropose), arg0, arg1, arg2, arg3, arg4) +} + +// NetAddrsListen mocks base method. +func (m *MockFullNode) NetAddrsListen(arg0 context.Context) (peer.AddrInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetAddrsListen", arg0) + ret0, _ := ret[0].(peer.AddrInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetAddrsListen indicates an expected call of NetAddrsListen. +func (mr *MockFullNodeMockRecorder) NetAddrsListen(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetAddrsListen", reflect.TypeOf((*MockFullNode)(nil).NetAddrsListen), arg0) +} + +// NetAgentVersion mocks base method. +func (m *MockFullNode) NetAgentVersion(arg0 context.Context, arg1 peer.ID) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetAgentVersion", arg0, arg1) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetAgentVersion indicates an expected call of NetAgentVersion. +func (mr *MockFullNodeMockRecorder) NetAgentVersion(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetAgentVersion", reflect.TypeOf((*MockFullNode)(nil).NetAgentVersion), arg0, arg1) +} + +// NetAutoNatStatus mocks base method. +func (m *MockFullNode) NetAutoNatStatus(arg0 context.Context) (api.NatInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetAutoNatStatus", arg0) + ret0, _ := ret[0].(api.NatInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetAutoNatStatus indicates an expected call of NetAutoNatStatus. +func (mr *MockFullNodeMockRecorder) NetAutoNatStatus(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetAutoNatStatus", reflect.TypeOf((*MockFullNode)(nil).NetAutoNatStatus), arg0) +} + +// NetBandwidthStats mocks base method. +func (m *MockFullNode) NetBandwidthStats(arg0 context.Context) (metrics.Stats, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetBandwidthStats", arg0) + ret0, _ := ret[0].(metrics.Stats) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetBandwidthStats indicates an expected call of NetBandwidthStats. +func (mr *MockFullNodeMockRecorder) NetBandwidthStats(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBandwidthStats", reflect.TypeOf((*MockFullNode)(nil).NetBandwidthStats), arg0) +} + +// NetBandwidthStatsByPeer mocks base method. +func (m *MockFullNode) NetBandwidthStatsByPeer(arg0 context.Context) (map[string]metrics.Stats, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetBandwidthStatsByPeer", arg0) + ret0, _ := ret[0].(map[string]metrics.Stats) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetBandwidthStatsByPeer indicates an expected call of NetBandwidthStatsByPeer. +func (mr *MockFullNodeMockRecorder) NetBandwidthStatsByPeer(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBandwidthStatsByPeer", reflect.TypeOf((*MockFullNode)(nil).NetBandwidthStatsByPeer), arg0) +} + +// NetBandwidthStatsByProtocol mocks base method. +func (m *MockFullNode) NetBandwidthStatsByProtocol(arg0 context.Context) (map[protocol.ID]metrics.Stats, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetBandwidthStatsByProtocol", arg0) + ret0, _ := ret[0].(map[protocol.ID]metrics.Stats) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetBandwidthStatsByProtocol indicates an expected call of NetBandwidthStatsByProtocol. +func (mr *MockFullNodeMockRecorder) NetBandwidthStatsByProtocol(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBandwidthStatsByProtocol", reflect.TypeOf((*MockFullNode)(nil).NetBandwidthStatsByProtocol), arg0) +} + +// NetBlockAdd mocks base method. +func (m *MockFullNode) NetBlockAdd(arg0 context.Context, arg1 api.NetBlockList) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetBlockAdd", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// NetBlockAdd indicates an expected call of NetBlockAdd. +func (mr *MockFullNodeMockRecorder) NetBlockAdd(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBlockAdd", reflect.TypeOf((*MockFullNode)(nil).NetBlockAdd), arg0, arg1) +} + +// NetBlockList mocks base method. +func (m *MockFullNode) NetBlockList(arg0 context.Context) (api.NetBlockList, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetBlockList", arg0) + ret0, _ := ret[0].(api.NetBlockList) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetBlockList indicates an expected call of NetBlockList. +func (mr *MockFullNodeMockRecorder) NetBlockList(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBlockList", reflect.TypeOf((*MockFullNode)(nil).NetBlockList), arg0) +} + +// NetBlockRemove mocks base method. +func (m *MockFullNode) NetBlockRemove(arg0 context.Context, arg1 api.NetBlockList) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetBlockRemove", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// NetBlockRemove indicates an expected call of NetBlockRemove. +func (mr *MockFullNodeMockRecorder) NetBlockRemove(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBlockRemove", reflect.TypeOf((*MockFullNode)(nil).NetBlockRemove), arg0, arg1) +} + +// NetConnect mocks base method. +func (m *MockFullNode) NetConnect(arg0 context.Context, arg1 peer.AddrInfo) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetConnect", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// NetConnect indicates an expected call of NetConnect. +func (mr *MockFullNodeMockRecorder) NetConnect(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetConnect", reflect.TypeOf((*MockFullNode)(nil).NetConnect), arg0, arg1) +} + +// NetConnectedness mocks base method. +func (m *MockFullNode) NetConnectedness(arg0 context.Context, arg1 peer.ID) (network0.Connectedness, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetConnectedness", arg0, arg1) + ret0, _ := ret[0].(network0.Connectedness) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetConnectedness indicates an expected call of NetConnectedness. +func (mr *MockFullNodeMockRecorder) NetConnectedness(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetConnectedness", reflect.TypeOf((*MockFullNode)(nil).NetConnectedness), arg0, arg1) +} + +// NetDisconnect mocks base method. +func (m *MockFullNode) NetDisconnect(arg0 context.Context, arg1 peer.ID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetDisconnect", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// NetDisconnect indicates an expected call of NetDisconnect. +func (mr *MockFullNodeMockRecorder) NetDisconnect(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetDisconnect", reflect.TypeOf((*MockFullNode)(nil).NetDisconnect), arg0, arg1) +} + +// NetFindPeer mocks base method. +func (m *MockFullNode) NetFindPeer(arg0 context.Context, arg1 peer.ID) (peer.AddrInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetFindPeer", arg0, arg1) + ret0, _ := ret[0].(peer.AddrInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetFindPeer indicates an expected call of NetFindPeer. +func (mr *MockFullNodeMockRecorder) NetFindPeer(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetFindPeer", reflect.TypeOf((*MockFullNode)(nil).NetFindPeer), arg0, arg1) +} + +// NetPeerInfo mocks base method. +func (m *MockFullNode) NetPeerInfo(arg0 context.Context, arg1 peer.ID) (*api.ExtendedPeerInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetPeerInfo", arg0, arg1) + ret0, _ := ret[0].(*api.ExtendedPeerInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetPeerInfo indicates an expected call of NetPeerInfo. +func (mr *MockFullNodeMockRecorder) NetPeerInfo(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPeerInfo", reflect.TypeOf((*MockFullNode)(nil).NetPeerInfo), arg0, arg1) +} + +// NetPeers mocks base method. +func (m *MockFullNode) NetPeers(arg0 context.Context) ([]peer.AddrInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetPeers", arg0) + ret0, _ := ret[0].([]peer.AddrInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetPeers indicates an expected call of NetPeers. +func (mr *MockFullNodeMockRecorder) NetPeers(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPeers", reflect.TypeOf((*MockFullNode)(nil).NetPeers), arg0) +} + +// NetPubsubScores mocks base method. +func (m *MockFullNode) NetPubsubScores(arg0 context.Context) ([]api.PubsubScore, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetPubsubScores", arg0) + ret0, _ := ret[0].([]api.PubsubScore) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetPubsubScores indicates an expected call of NetPubsubScores. +func (mr *MockFullNodeMockRecorder) NetPubsubScores(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPubsubScores", reflect.TypeOf((*MockFullNode)(nil).NetPubsubScores), arg0) +} + +// PaychAllocateLane mocks base method. +func (m *MockFullNode) PaychAllocateLane(arg0 context.Context, arg1 address.Address) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychAllocateLane", arg0, arg1) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychAllocateLane indicates an expected call of PaychAllocateLane. +func (mr *MockFullNodeMockRecorder) PaychAllocateLane(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychAllocateLane", reflect.TypeOf((*MockFullNode)(nil).PaychAllocateLane), arg0, arg1) +} + +// PaychAvailableFunds mocks base method. +func (m *MockFullNode) PaychAvailableFunds(arg0 context.Context, arg1 address.Address) (*api.ChannelAvailableFunds, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychAvailableFunds", arg0, arg1) + ret0, _ := ret[0].(*api.ChannelAvailableFunds) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychAvailableFunds indicates an expected call of PaychAvailableFunds. +func (mr *MockFullNodeMockRecorder) PaychAvailableFunds(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychAvailableFunds", reflect.TypeOf((*MockFullNode)(nil).PaychAvailableFunds), arg0, arg1) +} + +// PaychAvailableFundsByFromTo mocks base method. +func (m *MockFullNode) PaychAvailableFundsByFromTo(arg0 context.Context, arg1, arg2 address.Address) (*api.ChannelAvailableFunds, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychAvailableFundsByFromTo", arg0, arg1, arg2) + ret0, _ := ret[0].(*api.ChannelAvailableFunds) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychAvailableFundsByFromTo indicates an expected call of PaychAvailableFundsByFromTo. +func (mr *MockFullNodeMockRecorder) PaychAvailableFundsByFromTo(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychAvailableFundsByFromTo", reflect.TypeOf((*MockFullNode)(nil).PaychAvailableFundsByFromTo), arg0, arg1, arg2) +} + +// PaychCollect mocks base method. +func (m *MockFullNode) PaychCollect(arg0 context.Context, arg1 address.Address) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychCollect", arg0, arg1) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychCollect indicates an expected call of PaychCollect. +func (mr *MockFullNodeMockRecorder) PaychCollect(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychCollect", reflect.TypeOf((*MockFullNode)(nil).PaychCollect), arg0, arg1) +} + +// PaychGet mocks base method. +func (m *MockFullNode) PaychGet(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (*api.ChannelInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychGet", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*api.ChannelInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychGet indicates an expected call of PaychGet. +func (mr *MockFullNodeMockRecorder) PaychGet(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychGet", reflect.TypeOf((*MockFullNode)(nil).PaychGet), arg0, arg1, arg2, arg3) +} + +// PaychGetWaitReady mocks base method. +func (m *MockFullNode) PaychGetWaitReady(arg0 context.Context, arg1 cid.Cid) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychGetWaitReady", arg0, arg1) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychGetWaitReady indicates an expected call of PaychGetWaitReady. +func (mr *MockFullNodeMockRecorder) PaychGetWaitReady(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychGetWaitReady", reflect.TypeOf((*MockFullNode)(nil).PaychGetWaitReady), arg0, arg1) +} + +// PaychList mocks base method. +func (m *MockFullNode) PaychList(arg0 context.Context) ([]address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychList", arg0) + ret0, _ := ret[0].([]address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychList indicates an expected call of PaychList. +func (mr *MockFullNodeMockRecorder) PaychList(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychList", reflect.TypeOf((*MockFullNode)(nil).PaychList), arg0) +} + +// PaychNewPayment mocks base method. +func (m *MockFullNode) PaychNewPayment(arg0 context.Context, arg1, arg2 address.Address, arg3 []api.VoucherSpec) (*api.PaymentInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychNewPayment", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*api.PaymentInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychNewPayment indicates an expected call of PaychNewPayment. +func (mr *MockFullNodeMockRecorder) PaychNewPayment(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychNewPayment", reflect.TypeOf((*MockFullNode)(nil).PaychNewPayment), arg0, arg1, arg2, arg3) +} + +// PaychSettle mocks base method. +func (m *MockFullNode) PaychSettle(arg0 context.Context, arg1 address.Address) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychSettle", arg0, arg1) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychSettle indicates an expected call of PaychSettle. +func (mr *MockFullNodeMockRecorder) PaychSettle(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychSettle", reflect.TypeOf((*MockFullNode)(nil).PaychSettle), arg0, arg1) +} + +// PaychStatus mocks base method. +func (m *MockFullNode) PaychStatus(arg0 context.Context, arg1 address.Address) (*api.PaychStatus, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychStatus", arg0, arg1) + ret0, _ := ret[0].(*api.PaychStatus) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychStatus indicates an expected call of PaychStatus. +func (mr *MockFullNodeMockRecorder) PaychStatus(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychStatus", reflect.TypeOf((*MockFullNode)(nil).PaychStatus), arg0, arg1) +} + +// PaychVoucherAdd mocks base method. +func (m *MockFullNode) PaychVoucherAdd(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher, arg3 []byte, arg4 big.Int) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychVoucherAdd", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychVoucherAdd indicates an expected call of PaychVoucherAdd. +func (mr *MockFullNodeMockRecorder) PaychVoucherAdd(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherAdd", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherAdd), arg0, arg1, arg2, arg3, arg4) +} + +// PaychVoucherCheckSpendable mocks base method. +func (m *MockFullNode) PaychVoucherCheckSpendable(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher, arg3, arg4 []byte) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychVoucherCheckSpendable", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychVoucherCheckSpendable indicates an expected call of PaychVoucherCheckSpendable. +func (mr *MockFullNodeMockRecorder) PaychVoucherCheckSpendable(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherCheckSpendable", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherCheckSpendable), arg0, arg1, arg2, arg3, arg4) +} + +// PaychVoucherCheckValid mocks base method. +func (m *MockFullNode) PaychVoucherCheckValid(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychVoucherCheckValid", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// PaychVoucherCheckValid indicates an expected call of PaychVoucherCheckValid. +func (mr *MockFullNodeMockRecorder) PaychVoucherCheckValid(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherCheckValid", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherCheckValid), arg0, arg1, arg2) +} + +// PaychVoucherCreate mocks base method. +func (m *MockFullNode) PaychVoucherCreate(arg0 context.Context, arg1 address.Address, arg2 big.Int, arg3 uint64) (*api.VoucherCreateResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychVoucherCreate", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*api.VoucherCreateResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychVoucherCreate indicates an expected call of PaychVoucherCreate. +func (mr *MockFullNodeMockRecorder) PaychVoucherCreate(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherCreate", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherCreate), arg0, arg1, arg2, arg3) +} + +// PaychVoucherList mocks base method. +func (m *MockFullNode) PaychVoucherList(arg0 context.Context, arg1 address.Address) ([]*paych.SignedVoucher, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychVoucherList", arg0, arg1) + ret0, _ := ret[0].([]*paych.SignedVoucher) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychVoucherList indicates an expected call of PaychVoucherList. +func (mr *MockFullNodeMockRecorder) PaychVoucherList(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherList", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherList), arg0, arg1) +} + +// PaychVoucherSubmit mocks base method. +func (m *MockFullNode) PaychVoucherSubmit(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher, arg3, arg4 []byte) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaychVoucherSubmit", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaychVoucherSubmit indicates an expected call of PaychVoucherSubmit. +func (mr *MockFullNodeMockRecorder) PaychVoucherSubmit(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherSubmit", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherSubmit), arg0, arg1, arg2, arg3, arg4) +} + +// Session mocks base method. +func (m *MockFullNode) Session(arg0 context.Context) (uuid.UUID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Session", arg0) + ret0, _ := ret[0].(uuid.UUID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Session indicates an expected call of Session. +func (mr *MockFullNodeMockRecorder) Session(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Session", reflect.TypeOf((*MockFullNode)(nil).Session), arg0) +} + +// Shutdown mocks base method. +func (m *MockFullNode) Shutdown(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Shutdown", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Shutdown indicates an expected call of Shutdown. +func (mr *MockFullNodeMockRecorder) Shutdown(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Shutdown", reflect.TypeOf((*MockFullNode)(nil).Shutdown), arg0) +} + +// StateAccountKey mocks base method. +func (m *MockFullNode) StateAccountKey(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateAccountKey", arg0, arg1, arg2) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateAccountKey indicates an expected call of StateAccountKey. +func (mr *MockFullNodeMockRecorder) StateAccountKey(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateAccountKey", reflect.TypeOf((*MockFullNode)(nil).StateAccountKey), arg0, arg1, arg2) +} + +// StateAllMinerFaults mocks base method. +func (m *MockFullNode) StateAllMinerFaults(arg0 context.Context, arg1 abi.ChainEpoch, arg2 types.TipSetKey) ([]*api.Fault, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateAllMinerFaults", arg0, arg1, arg2) + ret0, _ := ret[0].([]*api.Fault) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateAllMinerFaults indicates an expected call of StateAllMinerFaults. +func (mr *MockFullNodeMockRecorder) StateAllMinerFaults(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateAllMinerFaults", reflect.TypeOf((*MockFullNode)(nil).StateAllMinerFaults), arg0, arg1, arg2) +} + +// StateCall mocks base method. +func (m *MockFullNode) StateCall(arg0 context.Context, arg1 *types.Message, arg2 types.TipSetKey) (*api.InvocResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateCall", arg0, arg1, arg2) + ret0, _ := ret[0].(*api.InvocResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateCall indicates an expected call of StateCall. +func (mr *MockFullNodeMockRecorder) StateCall(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateCall", reflect.TypeOf((*MockFullNode)(nil).StateCall), arg0, arg1, arg2) +} + +// StateChangedActors mocks base method. +func (m *MockFullNode) StateChangedActors(arg0 context.Context, arg1, arg2 cid.Cid) (map[string]types.Actor, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateChangedActors", arg0, arg1, arg2) + ret0, _ := ret[0].(map[string]types.Actor) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateChangedActors indicates an expected call of StateChangedActors. +func (mr *MockFullNodeMockRecorder) StateChangedActors(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateChangedActors", reflect.TypeOf((*MockFullNode)(nil).StateChangedActors), arg0, arg1, arg2) +} + +// StateCirculatingSupply mocks base method. +func (m *MockFullNode) StateCirculatingSupply(arg0 context.Context, arg1 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateCirculatingSupply", arg0, arg1) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateCirculatingSupply indicates an expected call of StateCirculatingSupply. +func (mr *MockFullNodeMockRecorder) StateCirculatingSupply(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateCirculatingSupply", reflect.TypeOf((*MockFullNode)(nil).StateCirculatingSupply), arg0, arg1) +} + +// StateCompute mocks base method. +func (m *MockFullNode) StateCompute(arg0 context.Context, arg1 abi.ChainEpoch, arg2 []*types.Message, arg3 types.TipSetKey) (*api.ComputeStateOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateCompute", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*api.ComputeStateOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateCompute indicates an expected call of StateCompute. +func (mr *MockFullNodeMockRecorder) StateCompute(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateCompute", reflect.TypeOf((*MockFullNode)(nil).StateCompute), arg0, arg1, arg2, arg3) +} + +// StateDealProviderCollateralBounds mocks base method. +func (m *MockFullNode) StateDealProviderCollateralBounds(arg0 context.Context, arg1 abi.PaddedPieceSize, arg2 bool, arg3 types.TipSetKey) (api.DealCollateralBounds, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateDealProviderCollateralBounds", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(api.DealCollateralBounds) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateDealProviderCollateralBounds indicates an expected call of StateDealProviderCollateralBounds. +func (mr *MockFullNodeMockRecorder) StateDealProviderCollateralBounds(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateDealProviderCollateralBounds", reflect.TypeOf((*MockFullNode)(nil).StateDealProviderCollateralBounds), arg0, arg1, arg2, arg3) +} + +// StateDecodeParams mocks base method. +func (m *MockFullNode) StateDecodeParams(arg0 context.Context, arg1 address.Address, arg2 abi.MethodNum, arg3 []byte, arg4 types.TipSetKey) (interface{}, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateDecodeParams", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(interface{}) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateDecodeParams indicates an expected call of StateDecodeParams. +func (mr *MockFullNodeMockRecorder) StateDecodeParams(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateDecodeParams", reflect.TypeOf((*MockFullNode)(nil).StateDecodeParams), arg0, arg1, arg2, arg3, arg4) +} + +// StateGetActor mocks base method. +func (m *MockFullNode) StateGetActor(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*types.Actor, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetActor", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.Actor) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetActor indicates an expected call of StateGetActor. +func (mr *MockFullNodeMockRecorder) StateGetActor(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetActor", reflect.TypeOf((*MockFullNode)(nil).StateGetActor), arg0, arg1, arg2) +} + +// StateGetReceipt mocks base method. +func (m *MockFullNode) StateGetReceipt(arg0 context.Context, arg1 cid.Cid, arg2 types.TipSetKey) (*types.MessageReceipt, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetReceipt", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.MessageReceipt) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetReceipt indicates an expected call of StateGetReceipt. +func (mr *MockFullNodeMockRecorder) StateGetReceipt(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetReceipt", reflect.TypeOf((*MockFullNode)(nil).StateGetReceipt), arg0, arg1, arg2) +} + +// StateListActors mocks base method. +func (m *MockFullNode) StateListActors(arg0 context.Context, arg1 types.TipSetKey) ([]address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateListActors", arg0, arg1) + ret0, _ := ret[0].([]address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateListActors indicates an expected call of StateListActors. +func (mr *MockFullNodeMockRecorder) StateListActors(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateListActors", reflect.TypeOf((*MockFullNode)(nil).StateListActors), arg0, arg1) +} + +// StateListMessages mocks base method. +func (m *MockFullNode) StateListMessages(arg0 context.Context, arg1 *api.MessageMatch, arg2 types.TipSetKey, arg3 abi.ChainEpoch) ([]cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateListMessages", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateListMessages indicates an expected call of StateListMessages. +func (mr *MockFullNodeMockRecorder) StateListMessages(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateListMessages", reflect.TypeOf((*MockFullNode)(nil).StateListMessages), arg0, arg1, arg2, arg3) +} + +// StateListMiners mocks base method. +func (m *MockFullNode) StateListMiners(arg0 context.Context, arg1 types.TipSetKey) ([]address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateListMiners", arg0, arg1) + ret0, _ := ret[0].([]address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateListMiners indicates an expected call of StateListMiners. +func (mr *MockFullNodeMockRecorder) StateListMiners(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateListMiners", reflect.TypeOf((*MockFullNode)(nil).StateListMiners), arg0, arg1) +} + +// StateLookupID mocks base method. +func (m *MockFullNode) StateLookupID(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateLookupID", arg0, arg1, arg2) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateLookupID indicates an expected call of StateLookupID. +func (mr *MockFullNodeMockRecorder) StateLookupID(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateLookupID", reflect.TypeOf((*MockFullNode)(nil).StateLookupID), arg0, arg1, arg2) +} + +// StateMarketBalance mocks base method. +func (m *MockFullNode) StateMarketBalance(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (api.MarketBalance, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMarketBalance", arg0, arg1, arg2) + ret0, _ := ret[0].(api.MarketBalance) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMarketBalance indicates an expected call of StateMarketBalance. +func (mr *MockFullNodeMockRecorder) StateMarketBalance(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketBalance", reflect.TypeOf((*MockFullNode)(nil).StateMarketBalance), arg0, arg1, arg2) +} + +// StateMarketDeals mocks base method. +func (m *MockFullNode) StateMarketDeals(arg0 context.Context, arg1 types.TipSetKey) (map[string]api.MarketDeal, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMarketDeals", arg0, arg1) + ret0, _ := ret[0].(map[string]api.MarketDeal) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMarketDeals indicates an expected call of StateMarketDeals. +func (mr *MockFullNodeMockRecorder) StateMarketDeals(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketDeals", reflect.TypeOf((*MockFullNode)(nil).StateMarketDeals), arg0, arg1) +} + +// StateMarketParticipants mocks base method. +func (m *MockFullNode) StateMarketParticipants(arg0 context.Context, arg1 types.TipSetKey) (map[string]api.MarketBalance, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMarketParticipants", arg0, arg1) + ret0, _ := ret[0].(map[string]api.MarketBalance) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMarketParticipants indicates an expected call of StateMarketParticipants. +func (mr *MockFullNodeMockRecorder) StateMarketParticipants(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketParticipants", reflect.TypeOf((*MockFullNode)(nil).StateMarketParticipants), arg0, arg1) +} + +// StateMarketStorageDeal mocks base method. +func (m *MockFullNode) StateMarketStorageDeal(arg0 context.Context, arg1 abi.DealID, arg2 types.TipSetKey) (*api.MarketDeal, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMarketStorageDeal", arg0, arg1, arg2) + ret0, _ := ret[0].(*api.MarketDeal) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMarketStorageDeal indicates an expected call of StateMarketStorageDeal. +func (mr *MockFullNodeMockRecorder) StateMarketStorageDeal(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketStorageDeal", reflect.TypeOf((*MockFullNode)(nil).StateMarketStorageDeal), arg0, arg1, arg2) +} + +// StateMinerActiveSectors mocks base method. +func (m *MockFullNode) StateMinerActiveSectors(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerActiveSectors", arg0, arg1, arg2) + ret0, _ := ret[0].([]*miner.SectorOnChainInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerActiveSectors indicates an expected call of StateMinerActiveSectors. +func (mr *MockFullNodeMockRecorder) StateMinerActiveSectors(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerActiveSectors", reflect.TypeOf((*MockFullNode)(nil).StateMinerActiveSectors), arg0, arg1, arg2) +} + +// StateMinerAvailableBalance mocks base method. +func (m *MockFullNode) StateMinerAvailableBalance(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerAvailableBalance", arg0, arg1, arg2) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerAvailableBalance indicates an expected call of StateMinerAvailableBalance. +func (mr *MockFullNodeMockRecorder) StateMinerAvailableBalance(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerAvailableBalance", reflect.TypeOf((*MockFullNode)(nil).StateMinerAvailableBalance), arg0, arg1, arg2) +} + +// StateMinerDeadlines mocks base method. +func (m *MockFullNode) StateMinerDeadlines(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) ([]api.Deadline, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerDeadlines", arg0, arg1, arg2) + ret0, _ := ret[0].([]api.Deadline) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerDeadlines indicates an expected call of StateMinerDeadlines. +func (mr *MockFullNodeMockRecorder) StateMinerDeadlines(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerDeadlines", reflect.TypeOf((*MockFullNode)(nil).StateMinerDeadlines), arg0, arg1, arg2) +} + +// StateMinerFaults mocks base method. +func (m *MockFullNode) StateMinerFaults(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (bitfield.BitField, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerFaults", arg0, arg1, arg2) + ret0, _ := ret[0].(bitfield.BitField) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerFaults indicates an expected call of StateMinerFaults. +func (mr *MockFullNodeMockRecorder) StateMinerFaults(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerFaults", reflect.TypeOf((*MockFullNode)(nil).StateMinerFaults), arg0, arg1, arg2) +} + +// StateMinerInfo mocks base method. +func (m *MockFullNode) StateMinerInfo(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (miner.MinerInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerInfo", arg0, arg1, arg2) + ret0, _ := ret[0].(miner.MinerInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerInfo indicates an expected call of StateMinerInfo. +func (mr *MockFullNodeMockRecorder) StateMinerInfo(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerInfo", reflect.TypeOf((*MockFullNode)(nil).StateMinerInfo), arg0, arg1, arg2) +} + +// StateMinerInitialPledgeCollateral mocks base method. +func (m *MockFullNode) StateMinerInitialPledgeCollateral(arg0 context.Context, arg1 address.Address, arg2 miner0.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerInitialPledgeCollateral", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerInitialPledgeCollateral indicates an expected call of StateMinerInitialPledgeCollateral. +func (mr *MockFullNodeMockRecorder) StateMinerInitialPledgeCollateral(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerInitialPledgeCollateral", reflect.TypeOf((*MockFullNode)(nil).StateMinerInitialPledgeCollateral), arg0, arg1, arg2, arg3) +} + +// StateMinerPartitions mocks base method. +func (m *MockFullNode) StateMinerPartitions(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 types.TipSetKey) ([]api.Partition, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerPartitions", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]api.Partition) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerPartitions indicates an expected call of StateMinerPartitions. +func (mr *MockFullNodeMockRecorder) StateMinerPartitions(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerPartitions", reflect.TypeOf((*MockFullNode)(nil).StateMinerPartitions), arg0, arg1, arg2, arg3) +} + +// StateMinerPower mocks base method. +func (m *MockFullNode) StateMinerPower(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*api.MinerPower, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerPower", arg0, arg1, arg2) + ret0, _ := ret[0].(*api.MinerPower) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerPower indicates an expected call of StateMinerPower. +func (mr *MockFullNodeMockRecorder) StateMinerPower(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerPower", reflect.TypeOf((*MockFullNode)(nil).StateMinerPower), arg0, arg1, arg2) +} + +// StateMinerPreCommitDepositForPower mocks base method. +func (m *MockFullNode) StateMinerPreCommitDepositForPower(arg0 context.Context, arg1 address.Address, arg2 miner0.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerPreCommitDepositForPower", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerPreCommitDepositForPower indicates an expected call of StateMinerPreCommitDepositForPower. +func (mr *MockFullNodeMockRecorder) StateMinerPreCommitDepositForPower(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerPreCommitDepositForPower", reflect.TypeOf((*MockFullNode)(nil).StateMinerPreCommitDepositForPower), arg0, arg1, arg2, arg3) +} + +// StateMinerProvingDeadline mocks base method. +func (m *MockFullNode) StateMinerProvingDeadline(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*dline.Info, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerProvingDeadline", arg0, arg1, arg2) + ret0, _ := ret[0].(*dline.Info) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerProvingDeadline indicates an expected call of StateMinerProvingDeadline. +func (mr *MockFullNodeMockRecorder) StateMinerProvingDeadline(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerProvingDeadline", reflect.TypeOf((*MockFullNode)(nil).StateMinerProvingDeadline), arg0, arg1, arg2) +} + +// StateMinerRecoveries mocks base method. +func (m *MockFullNode) StateMinerRecoveries(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (bitfield.BitField, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerRecoveries", arg0, arg1, arg2) + ret0, _ := ret[0].(bitfield.BitField) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerRecoveries indicates an expected call of StateMinerRecoveries. +func (mr *MockFullNodeMockRecorder) StateMinerRecoveries(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerRecoveries", reflect.TypeOf((*MockFullNode)(nil).StateMinerRecoveries), arg0, arg1, arg2) +} + +// StateMinerSectorAllocated mocks base method. +func (m *MockFullNode) StateMinerSectorAllocated(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerSectorAllocated", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerSectorAllocated indicates an expected call of StateMinerSectorAllocated. +func (mr *MockFullNodeMockRecorder) StateMinerSectorAllocated(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerSectorAllocated", reflect.TypeOf((*MockFullNode)(nil).StateMinerSectorAllocated), arg0, arg1, arg2, arg3) +} + +// StateMinerSectorCount mocks base method. +func (m *MockFullNode) StateMinerSectorCount(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (api.MinerSectors, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerSectorCount", arg0, arg1, arg2) + ret0, _ := ret[0].(api.MinerSectors) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerSectorCount indicates an expected call of StateMinerSectorCount. +func (mr *MockFullNodeMockRecorder) StateMinerSectorCount(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerSectorCount", reflect.TypeOf((*MockFullNode)(nil).StateMinerSectorCount), arg0, arg1, arg2) +} + +// StateMinerSectors mocks base method. +func (m *MockFullNode) StateMinerSectors(arg0 context.Context, arg1 address.Address, arg2 *bitfield.BitField, arg3 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerSectors", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]*miner.SectorOnChainInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerSectors indicates an expected call of StateMinerSectors. +func (mr *MockFullNodeMockRecorder) StateMinerSectors(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerSectors", reflect.TypeOf((*MockFullNode)(nil).StateMinerSectors), arg0, arg1, arg2, arg3) +} + +// StateNetworkName mocks base method. +func (m *MockFullNode) StateNetworkName(arg0 context.Context) (dtypes.NetworkName, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateNetworkName", arg0) + ret0, _ := ret[0].(dtypes.NetworkName) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateNetworkName indicates an expected call of StateNetworkName. +func (mr *MockFullNodeMockRecorder) StateNetworkName(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateNetworkName", reflect.TypeOf((*MockFullNode)(nil).StateNetworkName), arg0) +} + +// StateNetworkVersion mocks base method. +func (m *MockFullNode) StateNetworkVersion(arg0 context.Context, arg1 types.TipSetKey) (network.Version, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateNetworkVersion", arg0, arg1) + ret0, _ := ret[0].(network.Version) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateNetworkVersion indicates an expected call of StateNetworkVersion. +func (mr *MockFullNodeMockRecorder) StateNetworkVersion(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateNetworkVersion", reflect.TypeOf((*MockFullNode)(nil).StateNetworkVersion), arg0, arg1) +} + +// StateReadState mocks base method. +func (m *MockFullNode) StateReadState(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*api.ActorState, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateReadState", arg0, arg1, arg2) + ret0, _ := ret[0].(*api.ActorState) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateReadState indicates an expected call of StateReadState. +func (mr *MockFullNodeMockRecorder) StateReadState(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateReadState", reflect.TypeOf((*MockFullNode)(nil).StateReadState), arg0, arg1, arg2) +} + +// StateReplay mocks base method. +func (m *MockFullNode) StateReplay(arg0 context.Context, arg1 types.TipSetKey, arg2 cid.Cid) (*api.InvocResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateReplay", arg0, arg1, arg2) + ret0, _ := ret[0].(*api.InvocResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateReplay indicates an expected call of StateReplay. +func (mr *MockFullNodeMockRecorder) StateReplay(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateReplay", reflect.TypeOf((*MockFullNode)(nil).StateReplay), arg0, arg1, arg2) +} + +// StateSearchMsg mocks base method. +func (m *MockFullNode) StateSearchMsg(arg0 context.Context, arg1 cid.Cid) (*api.MsgLookup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSearchMsg", arg0, arg1) + ret0, _ := ret[0].(*api.MsgLookup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSearchMsg indicates an expected call of StateSearchMsg. +func (mr *MockFullNodeMockRecorder) StateSearchMsg(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSearchMsg", reflect.TypeOf((*MockFullNode)(nil).StateSearchMsg), arg0, arg1) +} + +// StateSearchMsgLimited mocks base method. +func (m *MockFullNode) StateSearchMsgLimited(arg0 context.Context, arg1 cid.Cid, arg2 abi.ChainEpoch) (*api.MsgLookup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSearchMsgLimited", arg0, arg1, arg2) + ret0, _ := ret[0].(*api.MsgLookup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSearchMsgLimited indicates an expected call of StateSearchMsgLimited. +func (mr *MockFullNodeMockRecorder) StateSearchMsgLimited(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSearchMsgLimited", reflect.TypeOf((*MockFullNode)(nil).StateSearchMsgLimited), arg0, arg1, arg2) +} + +// StateSectorExpiration mocks base method. +func (m *MockFullNode) StateSectorExpiration(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner.SectorExpiration, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSectorExpiration", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*miner.SectorExpiration) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSectorExpiration indicates an expected call of StateSectorExpiration. +func (mr *MockFullNodeMockRecorder) StateSectorExpiration(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorExpiration", reflect.TypeOf((*MockFullNode)(nil).StateSectorExpiration), arg0, arg1, arg2, arg3) +} + +// StateSectorGetInfo mocks base method. +func (m *MockFullNode) StateSectorGetInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner.SectorOnChainInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSectorGetInfo", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*miner.SectorOnChainInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSectorGetInfo indicates an expected call of StateSectorGetInfo. +func (mr *MockFullNodeMockRecorder) StateSectorGetInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorGetInfo", reflect.TypeOf((*MockFullNode)(nil).StateSectorGetInfo), arg0, arg1, arg2, arg3) +} + +// StateSectorPartition mocks base method. +func (m *MockFullNode) StateSectorPartition(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner.SectorLocation, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSectorPartition", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*miner.SectorLocation) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSectorPartition indicates an expected call of StateSectorPartition. +func (mr *MockFullNodeMockRecorder) StateSectorPartition(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorPartition", reflect.TypeOf((*MockFullNode)(nil).StateSectorPartition), arg0, arg1, arg2, arg3) +} + +// StateSectorPreCommitInfo mocks base method. +func (m *MockFullNode) StateSectorPreCommitInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSectorPreCommitInfo", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(miner.SectorPreCommitOnChainInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSectorPreCommitInfo indicates an expected call of StateSectorPreCommitInfo. +func (mr *MockFullNodeMockRecorder) StateSectorPreCommitInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorPreCommitInfo", reflect.TypeOf((*MockFullNode)(nil).StateSectorPreCommitInfo), arg0, arg1, arg2, arg3) +} + +// StateVMCirculatingSupplyInternal mocks base method. +func (m *MockFullNode) StateVMCirculatingSupplyInternal(arg0 context.Context, arg1 types.TipSetKey) (api.CirculatingSupply, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateVMCirculatingSupplyInternal", arg0, arg1) + ret0, _ := ret[0].(api.CirculatingSupply) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateVMCirculatingSupplyInternal indicates an expected call of StateVMCirculatingSupplyInternal. +func (mr *MockFullNodeMockRecorder) StateVMCirculatingSupplyInternal(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVMCirculatingSupplyInternal", reflect.TypeOf((*MockFullNode)(nil).StateVMCirculatingSupplyInternal), arg0, arg1) +} + +// StateVerifiedClientStatus mocks base method. +func (m *MockFullNode) StateVerifiedClientStatus(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateVerifiedClientStatus", arg0, arg1, arg2) + ret0, _ := ret[0].(*big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateVerifiedClientStatus indicates an expected call of StateVerifiedClientStatus. +func (mr *MockFullNodeMockRecorder) StateVerifiedClientStatus(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVerifiedClientStatus", reflect.TypeOf((*MockFullNode)(nil).StateVerifiedClientStatus), arg0, arg1, arg2) +} + +// StateVerifiedRegistryRootKey mocks base method. +func (m *MockFullNode) StateVerifiedRegistryRootKey(arg0 context.Context, arg1 types.TipSetKey) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateVerifiedRegistryRootKey", arg0, arg1) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateVerifiedRegistryRootKey indicates an expected call of StateVerifiedRegistryRootKey. +func (mr *MockFullNodeMockRecorder) StateVerifiedRegistryRootKey(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVerifiedRegistryRootKey", reflect.TypeOf((*MockFullNode)(nil).StateVerifiedRegistryRootKey), arg0, arg1) +} + +// StateVerifierStatus mocks base method. +func (m *MockFullNode) StateVerifierStatus(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateVerifierStatus", arg0, arg1, arg2) + ret0, _ := ret[0].(*big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateVerifierStatus indicates an expected call of StateVerifierStatus. +func (mr *MockFullNodeMockRecorder) StateVerifierStatus(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVerifierStatus", reflect.TypeOf((*MockFullNode)(nil).StateVerifierStatus), arg0, arg1, arg2) +} + +// StateWaitMsg mocks base method. +func (m *MockFullNode) StateWaitMsg(arg0 context.Context, arg1 cid.Cid, arg2 uint64) (*api.MsgLookup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateWaitMsg", arg0, arg1, arg2) + ret0, _ := ret[0].(*api.MsgLookup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateWaitMsg indicates an expected call of StateWaitMsg. +func (mr *MockFullNodeMockRecorder) StateWaitMsg(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateWaitMsg", reflect.TypeOf((*MockFullNode)(nil).StateWaitMsg), arg0, arg1, arg2) +} + +// StateWaitMsgLimited mocks base method. +func (m *MockFullNode) StateWaitMsgLimited(arg0 context.Context, arg1 cid.Cid, arg2 uint64, arg3 abi.ChainEpoch) (*api.MsgLookup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateWaitMsgLimited", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*api.MsgLookup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateWaitMsgLimited indicates an expected call of StateWaitMsgLimited. +func (mr *MockFullNodeMockRecorder) StateWaitMsgLimited(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateWaitMsgLimited", reflect.TypeOf((*MockFullNode)(nil).StateWaitMsgLimited), arg0, arg1, arg2, arg3) +} + +// SyncCheckBad mocks base method. +func (m *MockFullNode) SyncCheckBad(arg0 context.Context, arg1 cid.Cid) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncCheckBad", arg0, arg1) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SyncCheckBad indicates an expected call of SyncCheckBad. +func (mr *MockFullNodeMockRecorder) SyncCheckBad(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncCheckBad", reflect.TypeOf((*MockFullNode)(nil).SyncCheckBad), arg0, arg1) +} + +// SyncCheckpoint mocks base method. +func (m *MockFullNode) SyncCheckpoint(arg0 context.Context, arg1 types.TipSetKey) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncCheckpoint", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SyncCheckpoint indicates an expected call of SyncCheckpoint. +func (mr *MockFullNodeMockRecorder) SyncCheckpoint(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncCheckpoint", reflect.TypeOf((*MockFullNode)(nil).SyncCheckpoint), arg0, arg1) +} + +// SyncIncomingBlocks mocks base method. +func (m *MockFullNode) SyncIncomingBlocks(arg0 context.Context) (<-chan *types.BlockHeader, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncIncomingBlocks", arg0) + ret0, _ := ret[0].(<-chan *types.BlockHeader) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SyncIncomingBlocks indicates an expected call of SyncIncomingBlocks. +func (mr *MockFullNodeMockRecorder) SyncIncomingBlocks(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncIncomingBlocks", reflect.TypeOf((*MockFullNode)(nil).SyncIncomingBlocks), arg0) +} + +// SyncMarkBad mocks base method. +func (m *MockFullNode) SyncMarkBad(arg0 context.Context, arg1 cid.Cid) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncMarkBad", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SyncMarkBad indicates an expected call of SyncMarkBad. +func (mr *MockFullNodeMockRecorder) SyncMarkBad(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncMarkBad", reflect.TypeOf((*MockFullNode)(nil).SyncMarkBad), arg0, arg1) +} + +// SyncState mocks base method. +func (m *MockFullNode) SyncState(arg0 context.Context) (*api.SyncState, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncState", arg0) + ret0, _ := ret[0].(*api.SyncState) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SyncState indicates an expected call of SyncState. +func (mr *MockFullNodeMockRecorder) SyncState(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncState", reflect.TypeOf((*MockFullNode)(nil).SyncState), arg0) +} + +// SyncSubmitBlock mocks base method. +func (m *MockFullNode) SyncSubmitBlock(arg0 context.Context, arg1 *types.BlockMsg) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncSubmitBlock", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SyncSubmitBlock indicates an expected call of SyncSubmitBlock. +func (mr *MockFullNodeMockRecorder) SyncSubmitBlock(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncSubmitBlock", reflect.TypeOf((*MockFullNode)(nil).SyncSubmitBlock), arg0, arg1) +} + +// SyncUnmarkAllBad mocks base method. +func (m *MockFullNode) SyncUnmarkAllBad(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncUnmarkAllBad", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SyncUnmarkAllBad indicates an expected call of SyncUnmarkAllBad. +func (mr *MockFullNodeMockRecorder) SyncUnmarkAllBad(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncUnmarkAllBad", reflect.TypeOf((*MockFullNode)(nil).SyncUnmarkAllBad), arg0) +} + +// SyncUnmarkBad mocks base method. +func (m *MockFullNode) SyncUnmarkBad(arg0 context.Context, arg1 cid.Cid) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncUnmarkBad", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SyncUnmarkBad indicates an expected call of SyncUnmarkBad. +func (mr *MockFullNodeMockRecorder) SyncUnmarkBad(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncUnmarkBad", reflect.TypeOf((*MockFullNode)(nil).SyncUnmarkBad), arg0, arg1) +} + +// SyncValidateTipset mocks base method. +func (m *MockFullNode) SyncValidateTipset(arg0 context.Context, arg1 types.TipSetKey) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncValidateTipset", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SyncValidateTipset indicates an expected call of SyncValidateTipset. +func (mr *MockFullNodeMockRecorder) SyncValidateTipset(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncValidateTipset", reflect.TypeOf((*MockFullNode)(nil).SyncValidateTipset), arg0, arg1) +} + +// Version mocks base method. +func (m *MockFullNode) Version(arg0 context.Context) (api.APIVersion, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Version", arg0) + ret0, _ := ret[0].(api.APIVersion) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Version indicates an expected call of Version. +func (mr *MockFullNodeMockRecorder) Version(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockFullNode)(nil).Version), arg0) +} + +// WalletBalance mocks base method. +func (m *MockFullNode) WalletBalance(arg0 context.Context, arg1 address.Address) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletBalance", arg0, arg1) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletBalance indicates an expected call of WalletBalance. +func (mr *MockFullNodeMockRecorder) WalletBalance(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletBalance", reflect.TypeOf((*MockFullNode)(nil).WalletBalance), arg0, arg1) +} + +// WalletDefaultAddress mocks base method. +func (m *MockFullNode) WalletDefaultAddress(arg0 context.Context) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletDefaultAddress", arg0) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletDefaultAddress indicates an expected call of WalletDefaultAddress. +func (mr *MockFullNodeMockRecorder) WalletDefaultAddress(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletDefaultAddress", reflect.TypeOf((*MockFullNode)(nil).WalletDefaultAddress), arg0) +} + +// WalletDelete mocks base method. +func (m *MockFullNode) WalletDelete(arg0 context.Context, arg1 address.Address) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletDelete", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// WalletDelete indicates an expected call of WalletDelete. +func (mr *MockFullNodeMockRecorder) WalletDelete(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletDelete", reflect.TypeOf((*MockFullNode)(nil).WalletDelete), arg0, arg1) +} + +// WalletExport mocks base method. +func (m *MockFullNode) WalletExport(arg0 context.Context, arg1 address.Address) (*types.KeyInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletExport", arg0, arg1) + ret0, _ := ret[0].(*types.KeyInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletExport indicates an expected call of WalletExport. +func (mr *MockFullNodeMockRecorder) WalletExport(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletExport", reflect.TypeOf((*MockFullNode)(nil).WalletExport), arg0, arg1) +} + +// WalletHas mocks base method. +func (m *MockFullNode) WalletHas(arg0 context.Context, arg1 address.Address) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletHas", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletHas indicates an expected call of WalletHas. +func (mr *MockFullNodeMockRecorder) WalletHas(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletHas", reflect.TypeOf((*MockFullNode)(nil).WalletHas), arg0, arg1) +} + +// WalletImport mocks base method. +func (m *MockFullNode) WalletImport(arg0 context.Context, arg1 *types.KeyInfo) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletImport", arg0, arg1) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletImport indicates an expected call of WalletImport. +func (mr *MockFullNodeMockRecorder) WalletImport(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletImport", reflect.TypeOf((*MockFullNode)(nil).WalletImport), arg0, arg1) +} + +// WalletList mocks base method. +func (m *MockFullNode) WalletList(arg0 context.Context) ([]address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletList", arg0) + ret0, _ := ret[0].([]address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletList indicates an expected call of WalletList. +func (mr *MockFullNodeMockRecorder) WalletList(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletList", reflect.TypeOf((*MockFullNode)(nil).WalletList), arg0) +} + +// WalletNew mocks base method. +func (m *MockFullNode) WalletNew(arg0 context.Context, arg1 types.KeyType) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletNew", arg0, arg1) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletNew indicates an expected call of WalletNew. +func (mr *MockFullNodeMockRecorder) WalletNew(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletNew", reflect.TypeOf((*MockFullNode)(nil).WalletNew), arg0, arg1) +} + +// WalletSetDefault mocks base method. +func (m *MockFullNode) WalletSetDefault(arg0 context.Context, arg1 address.Address) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletSetDefault", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// WalletSetDefault indicates an expected call of WalletSetDefault. +func (mr *MockFullNodeMockRecorder) WalletSetDefault(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletSetDefault", reflect.TypeOf((*MockFullNode)(nil).WalletSetDefault), arg0, arg1) +} + +// WalletSign mocks base method. +func (m *MockFullNode) WalletSign(arg0 context.Context, arg1 address.Address, arg2 []byte) (*crypto.Signature, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletSign", arg0, arg1, arg2) + ret0, _ := ret[0].(*crypto.Signature) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletSign indicates an expected call of WalletSign. +func (mr *MockFullNodeMockRecorder) WalletSign(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletSign", reflect.TypeOf((*MockFullNode)(nil).WalletSign), arg0, arg1, arg2) +} + +// WalletSignMessage mocks base method. +func (m *MockFullNode) WalletSignMessage(arg0 context.Context, arg1 address.Address, arg2 *types.Message) (*types.SignedMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletSignMessage", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.SignedMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletSignMessage indicates an expected call of WalletSignMessage. +func (mr *MockFullNodeMockRecorder) WalletSignMessage(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletSignMessage", reflect.TypeOf((*MockFullNode)(nil).WalletSignMessage), arg0, arg1, arg2) +} + +// WalletValidateAddress mocks base method. +func (m *MockFullNode) WalletValidateAddress(arg0 context.Context, arg1 string) (address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletValidateAddress", arg0, arg1) + ret0, _ := ret[0].(address.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletValidateAddress indicates an expected call of WalletValidateAddress. +func (mr *MockFullNodeMockRecorder) WalletValidateAddress(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletValidateAddress", reflect.TypeOf((*MockFullNode)(nil).WalletValidateAddress), arg0, arg1) +} + +// WalletVerify mocks base method. +func (m *MockFullNode) WalletVerify(arg0 context.Context, arg1 address.Address, arg2 []byte, arg3 *crypto.Signature) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalletVerify", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WalletVerify indicates an expected call of WalletVerify. +func (mr *MockFullNodeMockRecorder) WalletVerify(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletVerify", reflect.TypeOf((*MockFullNode)(nil).WalletVerify), arg0, arg1, arg2, arg3) +} diff --git a/api/v0api/v1_wrapper.go b/api/v0api/v1_wrapper.go new file mode 100644 index 00000000000..ff4474fe57a --- /dev/null +++ b/api/v0api/v1_wrapper.go @@ -0,0 +1,187 @@ +package v0api + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/chain/types" + "golang.org/x/xerrors" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v1api" +) + +type WrapperV1Full struct { + v1api.FullNode +} + +func (w *WrapperV1Full) StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error) { + return w.FullNode.StateSearchMsg(ctx, types.EmptyTSK, msg, api.LookbackNoLimit, true) +} + +func (w *WrapperV1Full) StateSearchMsgLimited(ctx context.Context, msg cid.Cid, limit abi.ChainEpoch) (*api.MsgLookup, error) { + return w.FullNode.StateSearchMsg(ctx, types.EmptyTSK, msg, limit, true) +} + +func (w *WrapperV1Full) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error) { + return w.FullNode.StateWaitMsg(ctx, msg, confidence, api.LookbackNoLimit, true) +} + +func (w *WrapperV1Full) StateWaitMsgLimited(ctx context.Context, msg cid.Cid, confidence uint64, limit abi.ChainEpoch) (*api.MsgLookup, error) { + return w.FullNode.StateWaitMsg(ctx, msg, confidence, limit, true) +} + +func (w *WrapperV1Full) StateGetReceipt(ctx context.Context, msg cid.Cid, from types.TipSetKey) (*types.MessageReceipt, error) { + ml, err := w.FullNode.StateSearchMsg(ctx, from, msg, api.LookbackNoLimit, true) + if err != nil { + return nil, err + } + + if ml == nil { + return nil, nil + } + + return &ml.Receipt, nil +} + +func (w *WrapperV1Full) Version(ctx context.Context) (api.APIVersion, error) { + ver, err := w.FullNode.Version(ctx) + if err != nil { + return api.APIVersion{}, err + } + + ver.APIVersion = api.FullAPIVersion0 + + return ver, nil +} + +func (w *WrapperV1Full) executePrototype(ctx context.Context, p *api.MessagePrototype) (cid.Cid, error) { + sm, err := w.FullNode.MpoolPushMessage(ctx, &p.Message, nil) + if err != nil { + return cid.Undef, xerrors.Errorf("pushing message: %w", err) + } + + return sm.Cid(), nil +} +func (w *WrapperV1Full) MsigCreate(ctx context.Context, req uint64, addrs []address.Address, duration abi.ChainEpoch, val types.BigInt, src address.Address, gp types.BigInt) (cid.Cid, error) { + + p, err := w.FullNode.MsigCreate(ctx, req, addrs, duration, val, src, gp) + if err != nil { + return cid.Undef, xerrors.Errorf("creating prototype: %w", err) + } + + return w.executePrototype(ctx, p) +} + +func (w *WrapperV1Full) MsigPropose(ctx context.Context, msig address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) { + + p, err := w.FullNode.MsigPropose(ctx, msig, to, amt, src, method, params) + if err != nil { + return cid.Undef, xerrors.Errorf("creating prototype: %w", err) + } + + return w.executePrototype(ctx, p) +} +func (w *WrapperV1Full) MsigApprove(ctx context.Context, msig address.Address, txID uint64, src address.Address) (cid.Cid, error) { + + p, err := w.FullNode.MsigApprove(ctx, msig, txID, src) + if err != nil { + return cid.Undef, xerrors.Errorf("creating prototype: %w", err) + } + + return w.executePrototype(ctx, p) +} + +func (w *WrapperV1Full) MsigApproveTxnHash(ctx context.Context, msig address.Address, txID uint64, proposer address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) { + p, err := w.FullNode.MsigApproveTxnHash(ctx, msig, txID, proposer, to, amt, src, method, params) + if err != nil { + return cid.Undef, xerrors.Errorf("creating prototype: %w", err) + } + + return w.executePrototype(ctx, p) +} + +func (w *WrapperV1Full) MsigCancel(ctx context.Context, msig address.Address, txID uint64, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) { + p, err := w.FullNode.MsigCancel(ctx, msig, txID, to, amt, src, method, params) + if err != nil { + return cid.Undef, xerrors.Errorf("creating prototype: %w", err) + } + + return w.executePrototype(ctx, p) +} + +func (w *WrapperV1Full) MsigAddPropose(ctx context.Context, msig address.Address, src address.Address, newAdd address.Address, inc bool) (cid.Cid, error) { + + p, err := w.FullNode.MsigAddPropose(ctx, msig, src, newAdd, inc) + if err != nil { + return cid.Undef, xerrors.Errorf("creating prototype: %w", err) + } + + return w.executePrototype(ctx, p) +} + +func (w *WrapperV1Full) MsigAddApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, newAdd address.Address, inc bool) (cid.Cid, error) { + + p, err := w.FullNode.MsigAddApprove(ctx, msig, src, txID, proposer, newAdd, inc) + if err != nil { + return cid.Undef, xerrors.Errorf("creating prototype: %w", err) + } + + return w.executePrototype(ctx, p) +} + +func (w *WrapperV1Full) MsigAddCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, newAdd address.Address, inc bool) (cid.Cid, error) { + + p, err := w.FullNode.MsigAddCancel(ctx, msig, src, txID, newAdd, inc) + if err != nil { + return cid.Undef, xerrors.Errorf("creating prototype: %w", err) + } + + return w.executePrototype(ctx, p) +} + +func (w *WrapperV1Full) MsigSwapPropose(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) { + + p, err := w.FullNode.MsigSwapPropose(ctx, msig, src, oldAdd, newAdd) + if err != nil { + return cid.Undef, xerrors.Errorf("creating prototype: %w", err) + } + + return w.executePrototype(ctx, p) +} + +func (w *WrapperV1Full) MsigSwapApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) { + + p, err := w.FullNode.MsigSwapApprove(ctx, msig, src, txID, proposer, oldAdd, newAdd) + if err != nil { + return cid.Undef, xerrors.Errorf("creating prototype: %w", err) + } + + return w.executePrototype(ctx, p) +} + +func (w *WrapperV1Full) MsigSwapCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) { + + p, err := w.FullNode.MsigSwapCancel(ctx, msig, src, txID, oldAdd, newAdd) + if err != nil { + return cid.Undef, xerrors.Errorf("creating prototype: %w", err) + } + + return w.executePrototype(ctx, p) +} + +func (w *WrapperV1Full) MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error) { + + p, err := w.FullNode.MsigRemoveSigner(ctx, msig, proposer, toRemove, decrease) + if err != nil { + return cid.Undef, xerrors.Errorf("creating prototype: %w", err) + } + + return w.executePrototype(ctx, p) +} + +var _ FullNode = &WrapperV1Full{} diff --git a/api/v1api/latest.go b/api/v1api/latest.go new file mode 100644 index 00000000000..6f57d88262c --- /dev/null +++ b/api/v1api/latest.go @@ -0,0 +1,12 @@ +package v1api + +import ( + "github.com/filecoin-project/lotus/api" +) + +type FullNode = api.FullNode +type FullNodeStruct = api.FullNodeStruct + +func PermissionedFullAPI(a FullNode) FullNode { + return api.PermissionedFullAPI(a) +} diff --git a/api/version.go b/api/version.go new file mode 100644 index 00000000000..687f5135a89 --- /dev/null +++ b/api/version.go @@ -0,0 +1,73 @@ +package api + +import ( + "fmt" + + xerrors "golang.org/x/xerrors" +) + +type Version uint32 + +func newVer(major, minor, patch uint8) Version { + return Version(uint32(major)<<16 | uint32(minor)<<8 | uint32(patch)) +} + +// Ints returns (major, minor, patch) versions +func (ve Version) Ints() (uint32, uint32, uint32) { + v := uint32(ve) + return (v & majorOnlyMask) >> 16, (v & minorOnlyMask) >> 8, v & patchOnlyMask +} + +func (ve Version) String() string { + vmj, vmi, vp := ve.Ints() + return fmt.Sprintf("%d.%d.%d", vmj, vmi, vp) +} + +func (ve Version) EqMajorMinor(v2 Version) bool { + return ve&minorMask == v2&minorMask +} + +type NodeType int + +const ( + NodeUnknown NodeType = iota + + NodeFull + NodeMiner + NodeWorker +) + +var RunningNodeType NodeType + +func VersionForType(nodeType NodeType) (Version, error) { + switch nodeType { + case NodeFull: + return FullAPIVersion1, nil + case NodeMiner: + return MinerAPIVersion0, nil + case NodeWorker: + return WorkerAPIVersion0, nil + default: + return Version(0), xerrors.Errorf("unknown node type %d", nodeType) + } +} + +// semver versions of the rpc api exposed +var ( + FullAPIVersion0 = newVer(1, 3, 0) + FullAPIVersion1 = newVer(2, 1, 0) + + MinerAPIVersion0 = newVer(1, 2, 0) + WorkerAPIVersion0 = newVer(1, 1, 0) +) + +//nolint:varcheck,deadcode +const ( + majorMask = 0xff0000 + minorMask = 0xffff00 + patchMask = 0xffffff + + majorOnlyMask = 0xff0000 + minorOnlyMask = 0x00ff00 + patchOnlyMask = 0x0000ff +) diff --git a/api/wrap.go b/api/wrap.go new file mode 100644 index 00000000000..b26489a42d7 --- /dev/null +++ b/api/wrap.go @@ -0,0 +1,53 @@ +package api + +import ( + "reflect" +) + +// Wrap adapts partial api impl to another version +// proxyT is the proxy type used as input in wrapperT +// Usage: Wrap(new(v1api.FullNodeStruct), new(v0api.WrapperV1Full), eventsApi).(EventAPI) +func Wrap(proxyT, wrapperT, impl interface{}) interface{} { + proxy := reflect.New(reflect.TypeOf(proxyT).Elem()) + proxyMethods := proxy.Elem().FieldByName("Internal") + ri := reflect.ValueOf(impl) + + for i := 0; i < ri.NumMethod(); i++ { + mt := ri.Type().Method(i) + if proxyMethods.FieldByName(mt.Name).Kind() == reflect.Invalid { + continue + } + + fn := ri.Method(i) + of := proxyMethods.FieldByName(mt.Name) + + proxyMethods.FieldByName(mt.Name).Set(reflect.MakeFunc(of.Type(), func(args []reflect.Value) (results []reflect.Value) { + return fn.Call(args) + })) + } + + for i := 0; i < proxy.Elem().NumField(); i++ { + if proxy.Elem().Type().Field(i).Name == "Internal" { + continue + } + + subProxy := proxy.Elem().Field(i).FieldByName("Internal") + for i := 0; i < ri.NumMethod(); i++ { + mt := ri.Type().Method(i) + if subProxy.FieldByName(mt.Name).Kind() == reflect.Invalid { + continue + } + + fn := ri.Method(i) + of := subProxy.FieldByName(mt.Name) + + subProxy.FieldByName(mt.Name).Set(reflect.MakeFunc(of.Type(), func(args []reflect.Value) (results []reflect.Value) { + return fn.Call(args) + })) + } + } + + wp := reflect.New(reflect.TypeOf(wrapperT).Elem()) + wp.Elem().Field(0).Set(proxy) + return wp.Interface() +} diff --git a/blockstore/api.go b/blockstore/api.go new file mode 100644 index 00000000000..6715b476677 --- /dev/null +++ b/blockstore/api.go @@ -0,0 +1,66 @@ +package blockstore + +import ( + "context" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" +) + +type ChainIO interface { + ChainReadObj(context.Context, cid.Cid) ([]byte, error) + ChainHasObj(context.Context, cid.Cid) (bool, error) +} + +type apiBlockstore struct { + api ChainIO +} + +// This blockstore is adapted in the constructor. +var _ BasicBlockstore = (*apiBlockstore)(nil) + +func NewAPIBlockstore(cio ChainIO) Blockstore { + bs := &apiBlockstore{api: cio} + return Adapt(bs) // return an adapted blockstore. +} + +func (a *apiBlockstore) DeleteBlock(cid.Cid) error { + return xerrors.New("not supported") +} + +func (a *apiBlockstore) Has(c cid.Cid) (bool, error) { + return a.api.ChainHasObj(context.TODO(), c) +} + +func (a *apiBlockstore) Get(c cid.Cid) (blocks.Block, error) { + bb, err := a.api.ChainReadObj(context.TODO(), c) + if err != nil { + return nil, err + } + return blocks.NewBlockWithCid(bb, c) +} + +func (a *apiBlockstore) GetSize(c cid.Cid) (int, error) { + bb, err := a.api.ChainReadObj(context.TODO(), c) + if err != nil { + return 0, err + } + return len(bb), nil +} + +func (a *apiBlockstore) Put(blocks.Block) error { + return xerrors.New("not supported") +} + +func (a *apiBlockstore) PutMany([]blocks.Block) error { + return xerrors.New("not supported") +} + +func (a *apiBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + return nil, xerrors.New("not supported") +} + +func (a *apiBlockstore) HashOnRead(enabled bool) { + return +} diff --git a/blockstore/badger/blockstore.go b/blockstore/badger/blockstore.go new file mode 100644 index 00000000000..82f0e3360c3 --- /dev/null +++ b/blockstore/badger/blockstore.go @@ -0,0 +1,581 @@ +package badgerbs + +import ( + "context" + "fmt" + "io" + "runtime" + "sync" + + "github.com/dgraph-io/badger/v2" + "github.com/dgraph-io/badger/v2/options" + "github.com/multiformats/go-base32" + "go.uber.org/zap" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + logger "github.com/ipfs/go-log/v2" + pool "github.com/libp2p/go-buffer-pool" + + "github.com/filecoin-project/lotus/blockstore" +) + +var ( + // KeyPool is the buffer pool we use to compute storage keys. + KeyPool *pool.BufferPool = pool.GlobalPool +) + +var ( + // ErrBlockstoreClosed is returned from blockstore operations after + // the blockstore has been closed. + ErrBlockstoreClosed = fmt.Errorf("badger blockstore closed") + + log = logger.Logger("badgerbs") +) + +// aliases to mask badger dependencies. +const ( + // FileIO is equivalent to badger/options.FileIO. + FileIO = options.FileIO + // MemoryMap is equivalent to badger/options.MemoryMap. + MemoryMap = options.MemoryMap + // LoadToRAM is equivalent to badger/options.LoadToRAM. + LoadToRAM = options.LoadToRAM +) + +// Options embeds the badger options themselves, and augments them with +// blockstore-specific options. +type Options struct { + badger.Options + + // Prefix is an optional prefix to prepend to keys. Default: "". + Prefix string +} + +func DefaultOptions(path string) Options { + return Options{ + Options: badger.DefaultOptions(path), + Prefix: "", + } +} + +// badgerLogger is a local wrapper for go-log to make the interface +// compatible with badger.Logger (namely, aliasing Warnf to Warningf) +type badgerLogger struct { + *zap.SugaredLogger // skips 1 caller to get useful line info, skipping over badger.Options. + + skip2 *zap.SugaredLogger // skips 2 callers, just like above + this logger. +} + +// Warningf is required by the badger logger APIs. +func (b *badgerLogger) Warningf(format string, args ...interface{}) { + b.skip2.Warnf(format, args...) +} + +const ( + stateOpen = iota + stateClosing + stateClosed +) + +// Blockstore is a badger-backed IPLD blockstore. +type Blockstore struct { + stateLk sync.RWMutex + state int + viewers sync.WaitGroup + + DB *badger.DB + + prefixing bool + prefix []byte + prefixLen int +} + +var _ blockstore.Blockstore = (*Blockstore)(nil) +var _ blockstore.Viewer = (*Blockstore)(nil) +var _ blockstore.BlockstoreIterator = (*Blockstore)(nil) +var _ blockstore.BlockstoreGC = (*Blockstore)(nil) +var _ io.Closer = (*Blockstore)(nil) + +// Open creates a new badger-backed blockstore, with the supplied options. +func Open(opts Options) (*Blockstore, error) { + opts.Logger = &badgerLogger{ + SugaredLogger: log.Desugar().WithOptions(zap.AddCallerSkip(1)).Sugar(), + skip2: log.Desugar().WithOptions(zap.AddCallerSkip(2)).Sugar(), + } + + db, err := badger.Open(opts.Options) + if err != nil { + return nil, fmt.Errorf("failed to open badger blockstore: %w", err) + } + + bs := &Blockstore{DB: db} + if p := opts.Prefix; p != "" { + bs.prefixing = true + bs.prefix = []byte(p) + bs.prefixLen = len(bs.prefix) + } + + return bs, nil +} + +// Close closes the store. If the store has already been closed, this noops and +// returns an error, even if the first closure resulted in error. +func (b *Blockstore) Close() error { + b.stateLk.Lock() + if b.state != stateOpen { + b.stateLk.Unlock() + return nil + } + b.state = stateClosing + b.stateLk.Unlock() + + defer func() { + b.stateLk.Lock() + b.state = stateClosed + b.stateLk.Unlock() + }() + + // wait for all accesses to complete + b.viewers.Wait() + + return b.DB.Close() +} + +func (b *Blockstore) access() error { + b.stateLk.RLock() + defer b.stateLk.RUnlock() + + if b.state != stateOpen { + return ErrBlockstoreClosed + } + + b.viewers.Add(1) + return nil +} + +func (b *Blockstore) isOpen() bool { + b.stateLk.RLock() + defer b.stateLk.RUnlock() + + return b.state == stateOpen +} + +// CollectGarbage runs garbage collection on the value log +func (b *Blockstore) CollectGarbage() error { + if err := b.access(); err != nil { + return err + } + defer b.viewers.Done() + + // compact first to gather the necessary statistics for GC + nworkers := runtime.NumCPU() / 2 + if nworkers < 2 { + nworkers = 2 + } + + err := b.DB.Flatten(nworkers) + if err != nil { + return err + } + + for err == nil { + err = b.DB.RunValueLogGC(0.125) + } + + if err == badger.ErrNoRewrite { + // not really an error in this case, it signals the end of GC + return nil + } + + return err +} + +// View implements blockstore.Viewer, which leverages zero-copy read-only +// access to values. +func (b *Blockstore) View(cid cid.Cid, fn func([]byte) error) error { + if err := b.access(); err != nil { + return err + } + defer b.viewers.Done() + + k, pooled := b.PooledStorageKey(cid) + if pooled { + defer KeyPool.Put(k) + } + + return b.DB.View(func(txn *badger.Txn) error { + switch item, err := txn.Get(k); err { + case nil: + return item.Value(fn) + case badger.ErrKeyNotFound: + return blockstore.ErrNotFound + default: + return fmt.Errorf("failed to view block from badger blockstore: %w", err) + } + }) +} + +// Has implements Blockstore.Has. +func (b *Blockstore) Has(cid cid.Cid) (bool, error) { + if err := b.access(); err != nil { + return false, err + } + defer b.viewers.Done() + + k, pooled := b.PooledStorageKey(cid) + if pooled { + defer KeyPool.Put(k) + } + + err := b.DB.View(func(txn *badger.Txn) error { + _, err := txn.Get(k) + return err + }) + + switch err { + case badger.ErrKeyNotFound: + return false, nil + case nil: + return true, nil + default: + return false, fmt.Errorf("failed to check if block exists in badger blockstore: %w", err) + } +} + +// Get implements Blockstore.Get. +func (b *Blockstore) Get(cid cid.Cid) (blocks.Block, error) { + if !cid.Defined() { + return nil, blockstore.ErrNotFound + } + + if err := b.access(); err != nil { + return nil, err + } + defer b.viewers.Done() + + k, pooled := b.PooledStorageKey(cid) + if pooled { + defer KeyPool.Put(k) + } + + var val []byte + err := b.DB.View(func(txn *badger.Txn) error { + switch item, err := txn.Get(k); err { + case nil: + val, err = item.ValueCopy(nil) + return err + case badger.ErrKeyNotFound: + return blockstore.ErrNotFound + default: + return fmt.Errorf("failed to get block from badger blockstore: %w", err) + } + }) + if err != nil { + return nil, err + } + return blocks.NewBlockWithCid(val, cid) +} + +// GetSize implements Blockstore.GetSize. +func (b *Blockstore) GetSize(cid cid.Cid) (int, error) { + if err := b.access(); err != nil { + return 0, err + } + defer b.viewers.Done() + + k, pooled := b.PooledStorageKey(cid) + if pooled { + defer KeyPool.Put(k) + } + + var size int + err := b.DB.View(func(txn *badger.Txn) error { + switch item, err := txn.Get(k); err { + case nil: + size = int(item.ValueSize()) + case badger.ErrKeyNotFound: + return blockstore.ErrNotFound + default: + return fmt.Errorf("failed to get block size from badger blockstore: %w", err) + } + return nil + }) + if err != nil { + size = -1 + } + return size, err +} + +// Put implements Blockstore.Put. +func (b *Blockstore) Put(block blocks.Block) error { + if err := b.access(); err != nil { + return err + } + defer b.viewers.Done() + + k, pooled := b.PooledStorageKey(block.Cid()) + if pooled { + defer KeyPool.Put(k) + } + + err := b.DB.Update(func(txn *badger.Txn) error { + return txn.Set(k, block.RawData()) + }) + if err != nil { + err = fmt.Errorf("failed to put block in badger blockstore: %w", err) + } + return err +} + +// PutMany implements Blockstore.PutMany. +func (b *Blockstore) PutMany(blocks []blocks.Block) error { + if err := b.access(); err != nil { + return err + } + defer b.viewers.Done() + + // toReturn tracks the byte slices to return to the pool, if we're using key + // prefixing. we can't return each slice to the pool after each Set, because + // badger holds on to the slice. + var toReturn [][]byte + if b.prefixing { + toReturn = make([][]byte, 0, len(blocks)) + defer func() { + for _, b := range toReturn { + KeyPool.Put(b) + } + }() + } + + batch := b.DB.NewWriteBatch() + defer batch.Cancel() + + for _, block := range blocks { + k, pooled := b.PooledStorageKey(block.Cid()) + if pooled { + toReturn = append(toReturn, k) + } + if err := batch.Set(k, block.RawData()); err != nil { + return err + } + } + + err := batch.Flush() + if err != nil { + err = fmt.Errorf("failed to put blocks in badger blockstore: %w", err) + } + return err +} + +// DeleteBlock implements Blockstore.DeleteBlock. +func (b *Blockstore) DeleteBlock(cid cid.Cid) error { + if err := b.access(); err != nil { + return err + } + defer b.viewers.Done() + + k, pooled := b.PooledStorageKey(cid) + if pooled { + defer KeyPool.Put(k) + } + + return b.DB.Update(func(txn *badger.Txn) error { + return txn.Delete(k) + }) +} + +func (b *Blockstore) DeleteMany(cids []cid.Cid) error { + if err := b.access(); err != nil { + return err + } + defer b.viewers.Done() + + // toReturn tracks the byte slices to return to the pool, if we're using key + // prefixing. we can't return each slice to the pool after each Set, because + // badger holds on to the slice. + var toReturn [][]byte + if b.prefixing { + toReturn = make([][]byte, 0, len(cids)) + defer func() { + for _, b := range toReturn { + KeyPool.Put(b) + } + }() + } + + batch := b.DB.NewWriteBatch() + defer batch.Cancel() + + for _, cid := range cids { + k, pooled := b.PooledStorageKey(cid) + if pooled { + toReturn = append(toReturn, k) + } + if err := batch.Delete(k); err != nil { + return err + } + } + + err := batch.Flush() + if err != nil { + err = fmt.Errorf("failed to delete blocks from badger blockstore: %w", err) + } + return err +} + +// AllKeysChan implements Blockstore.AllKeysChan. +func (b *Blockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + if err := b.access(); err != nil { + return nil, err + } + + txn := b.DB.NewTransaction(false) + opts := badger.IteratorOptions{PrefetchSize: 100} + if b.prefixing { + opts.Prefix = b.prefix + } + iter := txn.NewIterator(opts) + + ch := make(chan cid.Cid) + go func() { + defer b.viewers.Done() + defer close(ch) + defer iter.Close() + + // NewCidV1 makes a copy of the multihash buffer, so we can reuse it to + // contain allocs. + var buf []byte + for iter.Rewind(); iter.Valid(); iter.Next() { + if ctx.Err() != nil { + return // context has fired. + } + if !b.isOpen() { + // open iterators will run even after the database is closed... + return // closing, yield. + } + k := iter.Item().Key() + if b.prefixing { + k = k[b.prefixLen:] + } + + if reqlen := base32.RawStdEncoding.DecodedLen(len(k)); len(buf) < reqlen { + buf = make([]byte, reqlen) + } + if n, err := base32.RawStdEncoding.Decode(buf, k); err == nil { + select { + case ch <- cid.NewCidV1(cid.Raw, buf[:n]): + case <-ctx.Done(): + return + } + } else { + log.Warnf("failed to decode key %s in badger AllKeysChan; err: %s", k, err) + } + } + }() + + return ch, nil +} + +// Implementation of BlockstoreIterator interface +func (b *Blockstore) ForEachKey(f func(cid.Cid) error) error { + if err := b.access(); err != nil { + return err + } + defer b.viewers.Done() + + txn := b.DB.NewTransaction(false) + defer txn.Discard() + + opts := badger.IteratorOptions{PrefetchSize: 100} + if b.prefixing { + opts.Prefix = b.prefix + } + + iter := txn.NewIterator(opts) + defer iter.Close() + + var buf []byte + for iter.Rewind(); iter.Valid(); iter.Next() { + if !b.isOpen() { + return ErrBlockstoreClosed + } + + k := iter.Item().Key() + if b.prefixing { + k = k[b.prefixLen:] + } + + klen := base32.RawStdEncoding.DecodedLen(len(k)) + if klen > len(buf) { + buf = make([]byte, klen) + } + + n, err := base32.RawStdEncoding.Decode(buf, k) + if err != nil { + return err + } + + c := cid.NewCidV1(cid.Raw, buf[:n]) + + err = f(c) + if err != nil { + return err + } + } + + return nil +} + +// HashOnRead implements Blockstore.HashOnRead. It is not supported by this +// blockstore. +func (b *Blockstore) HashOnRead(_ bool) { + log.Warnf("called HashOnRead on badger blockstore; function not supported; ignoring") +} + +// PooledStorageKey returns the storage key under which this CID is stored. +// +// The key is: prefix + base32_no_padding(cid.Hash) +// +// This method may return pooled byte slice, which MUST be returned to the +// KeyPool if pooled=true, or a leak will occur. +func (b *Blockstore) PooledStorageKey(cid cid.Cid) (key []byte, pooled bool) { + h := cid.Hash() + size := base32.RawStdEncoding.EncodedLen(len(h)) + if !b.prefixing { // optimize for branch prediction. + k := pool.Get(size) + base32.RawStdEncoding.Encode(k, h) + return k, true // slicing upto length unnecessary; the pool has already done this. + } + + size += b.prefixLen + k := pool.Get(size) + copy(k, b.prefix) + base32.RawStdEncoding.Encode(k[b.prefixLen:], h) + return k, true // slicing upto length unnecessary; the pool has already done this. +} + +// Storage acts like PooledStorageKey, but attempts to write the storage key +// into the provided slice. If the slice capacity is insufficient, it allocates +// a new byte slice with enough capacity to accommodate the result. This method +// returns the resulting slice. +func (b *Blockstore) StorageKey(dst []byte, cid cid.Cid) []byte { + h := cid.Hash() + reqsize := base32.RawStdEncoding.EncodedLen(len(h)) + b.prefixLen + if reqsize > cap(dst) { + // passed slice is smaller than required size; create new. + dst = make([]byte, reqsize) + } else if reqsize > len(dst) { + // passed slice has enough capacity, but its length is + // restricted, expand. + dst = dst[:cap(dst)] + } + + if b.prefixing { // optimize for branch prediction. + copy(dst, b.prefix) + base32.RawStdEncoding.Encode(dst[b.prefixLen:], h) + } else { + base32.RawStdEncoding.Encode(dst, h) + } + return dst[:reqsize] +} diff --git a/blockstore/badger/blockstore_test.go b/blockstore/badger/blockstore_test.go new file mode 100644 index 00000000000..3221458d28f --- /dev/null +++ b/blockstore/badger/blockstore_test.go @@ -0,0 +1,91 @@ +package badgerbs + +import ( + "io/ioutil" + "os" + "testing" + + blocks "github.com/ipfs/go-block-format" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/lotus/blockstore" +) + +func TestBadgerBlockstore(t *testing.T) { + (&Suite{ + NewBlockstore: newBlockstore(DefaultOptions), + OpenBlockstore: openBlockstore(DefaultOptions), + }).RunTests(t, "non_prefixed") + + prefixed := func(path string) Options { + opts := DefaultOptions(path) + opts.Prefix = "/prefixed/" + return opts + } + + (&Suite{ + NewBlockstore: newBlockstore(prefixed), + OpenBlockstore: openBlockstore(prefixed), + }).RunTests(t, "prefixed") +} + +func TestStorageKey(t *testing.T) { + bs, _ := newBlockstore(DefaultOptions)(t) + bbs := bs.(*Blockstore) + defer bbs.Close() //nolint:errcheck + + cid1 := blocks.NewBlock([]byte("some data")).Cid() + cid2 := blocks.NewBlock([]byte("more data")).Cid() + cid3 := blocks.NewBlock([]byte("a little more data")).Cid() + require.NotEqual(t, cid1, cid2) // sanity check + require.NotEqual(t, cid2, cid3) // sanity check + + // nil slice; let StorageKey allocate for us. + k1 := bbs.StorageKey(nil, cid1) + require.Len(t, k1, 55) + require.True(t, cap(k1) == len(k1)) + + // k1's backing array is reused. + k2 := bbs.StorageKey(k1, cid2) + require.Len(t, k2, 55) + require.True(t, cap(k2) == len(k1)) + + // bring k2 to len=0, and verify that its backing array gets reused + // (i.e. k1 and k2 are overwritten) + k3 := bbs.StorageKey(k2[:0], cid3) + require.Len(t, k3, 55) + require.True(t, cap(k3) == len(k3)) + + // backing array of k1 and k2 has been modified, i.e. memory is shared. + require.Equal(t, k3, k1) + require.Equal(t, k3, k2) +} + +func newBlockstore(optsSupplier func(path string) Options) func(tb testing.TB) (bs blockstore.BasicBlockstore, path string) { + return func(tb testing.TB) (bs blockstore.BasicBlockstore, path string) { + tb.Helper() + + path, err := ioutil.TempDir("", "") + if err != nil { + tb.Fatal(err) + } + + db, err := Open(optsSupplier(path)) + if err != nil { + tb.Fatal(err) + } + + tb.Cleanup(func() { + _ = os.RemoveAll(path) + }) + + return db, path + } +} + +func openBlockstore(optsSupplier func(path string) Options) func(tb testing.TB, path string) (bs blockstore.BasicBlockstore, err error) { + return func(tb testing.TB, path string) (bs blockstore.BasicBlockstore, err error) { + tb.Helper() + return Open(optsSupplier(path)) + } +} diff --git a/blockstore/badger/blockstore_test_suite.go b/blockstore/badger/blockstore_test_suite.go new file mode 100644 index 00000000000..93be82ac87e --- /dev/null +++ b/blockstore/badger/blockstore_test_suite.go @@ -0,0 +1,313 @@ +package badgerbs + +import ( + "context" + "fmt" + "io" + "reflect" + "strings" + "testing" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + u "github.com/ipfs/go-ipfs-util" + + "github.com/filecoin-project/lotus/blockstore" + + "github.com/stretchr/testify/require" +) + +// TODO: move this to go-ipfs-blockstore. +type Suite struct { + NewBlockstore func(tb testing.TB) (bs blockstore.BasicBlockstore, path string) + OpenBlockstore func(tb testing.TB, path string) (bs blockstore.BasicBlockstore, err error) +} + +func (s *Suite) RunTests(t *testing.T, prefix string) { + v := reflect.TypeOf(s) + f := func(t *testing.T) { + for i := 0; i < v.NumMethod(); i++ { + if m := v.Method(i); strings.HasPrefix(m.Name, "Test") { + f := m.Func.Interface().(func(*Suite, *testing.T)) + t.Run(m.Name, func(t *testing.T) { + f(s, t) + }) + } + } + } + + if prefix == "" { + f(t) + } else { + t.Run(prefix, f) + } +} + +func (s *Suite) TestGetWhenKeyNotPresent(t *testing.T) { + bs, _ := s.NewBlockstore(t) + if c, ok := bs.(io.Closer); ok { + defer func() { require.NoError(t, c.Close()) }() + } + + c := cid.NewCidV0(u.Hash([]byte("stuff"))) + bl, err := bs.Get(c) + require.Nil(t, bl) + require.Equal(t, blockstore.ErrNotFound, err) +} + +func (s *Suite) TestGetWhenKeyIsNil(t *testing.T) { + bs, _ := s.NewBlockstore(t) + if c, ok := bs.(io.Closer); ok { + defer func() { require.NoError(t, c.Close()) }() + } + + _, err := bs.Get(cid.Undef) + require.Equal(t, blockstore.ErrNotFound, err) +} + +func (s *Suite) TestPutThenGetBlock(t *testing.T) { + bs, _ := s.NewBlockstore(t) + if c, ok := bs.(io.Closer); ok { + defer func() { require.NoError(t, c.Close()) }() + } + + orig := blocks.NewBlock([]byte("some data")) + + err := bs.Put(orig) + require.NoError(t, err) + + fetched, err := bs.Get(orig.Cid()) + require.NoError(t, err) + require.Equal(t, orig.RawData(), fetched.RawData()) +} + +func (s *Suite) TestHas(t *testing.T) { + bs, _ := s.NewBlockstore(t) + if c, ok := bs.(io.Closer); ok { + defer func() { require.NoError(t, c.Close()) }() + } + + orig := blocks.NewBlock([]byte("some data")) + + err := bs.Put(orig) + require.NoError(t, err) + + ok, err := bs.Has(orig.Cid()) + require.NoError(t, err) + require.True(t, ok) + + ok, err = bs.Has(blocks.NewBlock([]byte("another thing")).Cid()) + require.NoError(t, err) + require.False(t, ok) +} + +func (s *Suite) TestCidv0v1(t *testing.T) { + bs, _ := s.NewBlockstore(t) + if c, ok := bs.(io.Closer); ok { + defer func() { require.NoError(t, c.Close()) }() + } + + orig := blocks.NewBlock([]byte("some data")) + + err := bs.Put(orig) + require.NoError(t, err) + + fetched, err := bs.Get(cid.NewCidV1(cid.DagProtobuf, orig.Cid().Hash())) + require.NoError(t, err) + require.Equal(t, orig.RawData(), fetched.RawData()) +} + +func (s *Suite) TestPutThenGetSizeBlock(t *testing.T) { + bs, _ := s.NewBlockstore(t) + if c, ok := bs.(io.Closer); ok { + defer func() { require.NoError(t, c.Close()) }() + } + + block := blocks.NewBlock([]byte("some data")) + missingBlock := blocks.NewBlock([]byte("missingBlock")) + emptyBlock := blocks.NewBlock([]byte{}) + + err := bs.Put(block) + require.NoError(t, err) + + blockSize, err := bs.GetSize(block.Cid()) + require.NoError(t, err) + require.Len(t, block.RawData(), blockSize) + + err = bs.Put(emptyBlock) + require.NoError(t, err) + + emptySize, err := bs.GetSize(emptyBlock.Cid()) + require.NoError(t, err) + require.Zero(t, emptySize) + + missingSize, err := bs.GetSize(missingBlock.Cid()) + require.Equal(t, blockstore.ErrNotFound, err) + require.Equal(t, -1, missingSize) +} + +func (s *Suite) TestAllKeysSimple(t *testing.T) { + bs, _ := s.NewBlockstore(t) + if c, ok := bs.(io.Closer); ok { + defer func() { require.NoError(t, c.Close()) }() + } + + keys := insertBlocks(t, bs, 100) + + ctx := context.Background() + ch, err := bs.AllKeysChan(ctx) + require.NoError(t, err) + actual := collect(ch) + + require.ElementsMatch(t, keys, actual) +} + +func (s *Suite) TestAllKeysRespectsContext(t *testing.T) { + bs, _ := s.NewBlockstore(t) + if c, ok := bs.(io.Closer); ok { + defer func() { require.NoError(t, c.Close()) }() + } + + _ = insertBlocks(t, bs, 100) + + ctx, cancel := context.WithCancel(context.Background()) + ch, err := bs.AllKeysChan(ctx) + require.NoError(t, err) + + // consume 2, then cancel context. + v, ok := <-ch + require.NotEqual(t, cid.Undef, v) + require.True(t, ok) + + v, ok = <-ch + require.NotEqual(t, cid.Undef, v) + require.True(t, ok) + + cancel() + // pull one value out to avoid race + _, _ = <-ch + + v, ok = <-ch + require.Equal(t, cid.Undef, v) + require.False(t, ok) +} + +func (s *Suite) TestDoubleClose(t *testing.T) { + bs, _ := s.NewBlockstore(t) + c, ok := bs.(io.Closer) + if !ok { + t.SkipNow() + } + require.NoError(t, c.Close()) + require.NoError(t, c.Close()) +} + +func (s *Suite) TestReopenPutGet(t *testing.T) { + bs, path := s.NewBlockstore(t) + c, ok := bs.(io.Closer) + if !ok { + t.SkipNow() + } + + orig := blocks.NewBlock([]byte("some data")) + err := bs.Put(orig) + require.NoError(t, err) + + err = c.Close() + require.NoError(t, err) + + bs, err = s.OpenBlockstore(t, path) + require.NoError(t, err) + + fetched, err := bs.Get(orig.Cid()) + require.NoError(t, err) + require.Equal(t, orig.RawData(), fetched.RawData()) + + err = bs.(io.Closer).Close() + require.NoError(t, err) +} + +func (s *Suite) TestPutMany(t *testing.T) { + bs, _ := s.NewBlockstore(t) + if c, ok := bs.(io.Closer); ok { + defer func() { require.NoError(t, c.Close()) }() + } + + blks := []blocks.Block{ + blocks.NewBlock([]byte("foo1")), + blocks.NewBlock([]byte("foo2")), + blocks.NewBlock([]byte("foo3")), + } + err := bs.PutMany(blks) + require.NoError(t, err) + + for _, blk := range blks { + fetched, err := bs.Get(blk.Cid()) + require.NoError(t, err) + require.Equal(t, blk.RawData(), fetched.RawData()) + + ok, err := bs.Has(blk.Cid()) + require.NoError(t, err) + require.True(t, ok) + } + + ch, err := bs.AllKeysChan(context.Background()) + require.NoError(t, err) + + cids := collect(ch) + require.Len(t, cids, 3) +} + +func (s *Suite) TestDelete(t *testing.T) { + bs, _ := s.NewBlockstore(t) + if c, ok := bs.(io.Closer); ok { + defer func() { require.NoError(t, c.Close()) }() + } + + blks := []blocks.Block{ + blocks.NewBlock([]byte("foo1")), + blocks.NewBlock([]byte("foo2")), + blocks.NewBlock([]byte("foo3")), + } + err := bs.PutMany(blks) + require.NoError(t, err) + + err = bs.DeleteBlock(blks[1].Cid()) + require.NoError(t, err) + + ch, err := bs.AllKeysChan(context.Background()) + require.NoError(t, err) + + cids := collect(ch) + require.Len(t, cids, 2) + require.ElementsMatch(t, cids, []cid.Cid{ + cid.NewCidV1(cid.Raw, blks[0].Cid().Hash()), + cid.NewCidV1(cid.Raw, blks[2].Cid().Hash()), + }) + + has, err := bs.Has(blks[1].Cid()) + require.NoError(t, err) + require.False(t, has) + +} + +func insertBlocks(t *testing.T, bs blockstore.BasicBlockstore, count int) []cid.Cid { + keys := make([]cid.Cid, count) + for i := 0; i < count; i++ { + block := blocks.NewBlock([]byte(fmt.Sprintf("some data %d", i))) + err := bs.Put(block) + require.NoError(t, err) + // NewBlock assigns a CIDv0; we convert it to CIDv1 because that's what + // the store returns. + keys[i] = cid.NewCidV1(cid.Raw, block.Multihash()) + } + return keys +} + +func collect(ch <-chan cid.Cid) []cid.Cid { + var keys []cid.Cid + for k := range ch { + keys = append(keys, k) + } + return keys +} diff --git a/blockstore/blockstore.go b/blockstore/blockstore.go new file mode 100644 index 00000000000..97f9f5f7b58 --- /dev/null +++ b/blockstore/blockstore.go @@ -0,0 +1,105 @@ +package blockstore + +import ( + cid "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + logging "github.com/ipfs/go-log/v2" + + blockstore "github.com/ipfs/go-ipfs-blockstore" +) + +var log = logging.Logger("blockstore") + +var ErrNotFound = blockstore.ErrNotFound + +// Blockstore is the blockstore interface used by Lotus. It is the union +// of the basic go-ipfs blockstore, with other capabilities required by Lotus, +// e.g. View or Sync. +type Blockstore interface { + blockstore.Blockstore + blockstore.Viewer + BatchDeleter +} + +// BasicBlockstore is an alias to the original IPFS Blockstore. +type BasicBlockstore = blockstore.Blockstore + +type Viewer = blockstore.Viewer + +type BatchDeleter interface { + DeleteMany(cids []cid.Cid) error +} + +// BlockstoreIterator is a trait for efficient iteration +type BlockstoreIterator interface { + ForEachKey(func(cid.Cid) error) error +} + +// BlockstoreGC is a trait for blockstores that support online garbage collection +type BlockstoreGC interface { + CollectGarbage() error +} + +// WrapIDStore wraps the underlying blockstore in an "identity" blockstore. +// The ID store filters out all puts for blocks with CIDs using the "identity" +// hash function. It also extracts inlined blocks from CIDs using the identity +// hash function and returns them on get/has, ignoring the contents of the +// blockstore. +func WrapIDStore(bstore blockstore.Blockstore) Blockstore { + if is, ok := bstore.(*idstore); ok { + // already wrapped + return is + } + + if bs, ok := bstore.(Blockstore); ok { + // we need to wrap our own because we don't want to neuter the DeleteMany method + // the underlying blockstore has implemented an (efficient) DeleteMany + return NewIDStore(bs) + } + + // The underlying blockstore does not implement DeleteMany, so we need to shim it. + // This is less efficient as it'll iterate and perform single deletes. + return NewIDStore(Adapt(bstore)) +} + +// FromDatastore creates a new blockstore backed by the given datastore. +func FromDatastore(dstore ds.Batching) Blockstore { + return WrapIDStore(blockstore.NewBlockstore(dstore)) +} + +type adaptedBlockstore struct { + blockstore.Blockstore +} + +var _ Blockstore = (*adaptedBlockstore)(nil) + +func (a *adaptedBlockstore) View(cid cid.Cid, callback func([]byte) error) error { + blk, err := a.Get(cid) + if err != nil { + return err + } + return callback(blk.RawData()) +} + +func (a *adaptedBlockstore) DeleteMany(cids []cid.Cid) error { + for _, cid := range cids { + err := a.DeleteBlock(cid) + if err != nil { + return err + } + } + + return nil +} + +// Adapt adapts a standard blockstore to a Lotus blockstore by +// enriching it with the extra methods that Lotus requires (e.g. View, Sync). +// +// View proxies over to Get and calls the callback with the value supplied by Get. +// Sync noops. +func Adapt(bs blockstore.Blockstore) Blockstore { + if ret, ok := bs.(Blockstore); ok { + return ret + } + return &adaptedBlockstore{bs} +} diff --git a/blockstore/buffered.go b/blockstore/buffered.go new file mode 100644 index 00000000000..5d3d38f78f9 --- /dev/null +++ b/blockstore/buffered.go @@ -0,0 +1,174 @@ +package blockstore + +import ( + "context" + "os" + + block "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" +) + +// buflog is a logger for the buffered blockstore. It is subscoped from the +// blockstore logger. +var buflog = log.Named("buf") + +type BufferedBlockstore struct { + read Blockstore + write Blockstore +} + +func NewBuffered(base Blockstore) *BufferedBlockstore { + var buf Blockstore + if os.Getenv("LOTUS_DISABLE_VM_BUF") == "iknowitsabadidea" { + buflog.Warn("VM BLOCKSTORE BUFFERING IS DISABLED") + buf = base + } else { + buf = NewMemory() + } + + bs := &BufferedBlockstore{ + read: base, + write: buf, + } + return bs +} + +func NewTieredBstore(r Blockstore, w Blockstore) *BufferedBlockstore { + return &BufferedBlockstore{ + read: r, + write: w, + } +} + +var ( + _ Blockstore = (*BufferedBlockstore)(nil) + _ Viewer = (*BufferedBlockstore)(nil) +) + +func (bs *BufferedBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + a, err := bs.read.AllKeysChan(ctx) + if err != nil { + return nil, err + } + + b, err := bs.write.AllKeysChan(ctx) + if err != nil { + return nil, err + } + + out := make(chan cid.Cid) + go func() { + defer close(out) + for a != nil || b != nil { + select { + case val, ok := <-a: + if !ok { + a = nil + } else { + select { + case out <- val: + case <-ctx.Done(): + return + } + } + case val, ok := <-b: + if !ok { + b = nil + } else { + select { + case out <- val: + case <-ctx.Done(): + return + } + } + } + } + }() + + return out, nil +} + +func (bs *BufferedBlockstore) DeleteBlock(c cid.Cid) error { + if err := bs.read.DeleteBlock(c); err != nil { + return err + } + + return bs.write.DeleteBlock(c) +} + +func (bs *BufferedBlockstore) DeleteMany(cids []cid.Cid) error { + if err := bs.read.DeleteMany(cids); err != nil { + return err + } + + return bs.write.DeleteMany(cids) +} + +func (bs *BufferedBlockstore) View(c cid.Cid, callback func([]byte) error) error { + // both stores are viewable. + if err := bs.write.View(c, callback); err == ErrNotFound { + // not found in write blockstore; fall through. + } else { + return err // propagate errors, or nil, i.e. found. + } + return bs.read.View(c, callback) +} + +func (bs *BufferedBlockstore) Get(c cid.Cid) (block.Block, error) { + if out, err := bs.write.Get(c); err != nil { + if err != ErrNotFound { + return nil, err + } + } else { + return out, nil + } + + return bs.read.Get(c) +} + +func (bs *BufferedBlockstore) GetSize(c cid.Cid) (int, error) { + s, err := bs.read.GetSize(c) + if err == ErrNotFound || s == 0 { + return bs.write.GetSize(c) + } + + return s, err +} + +func (bs *BufferedBlockstore) Put(blk block.Block) error { + has, err := bs.read.Has(blk.Cid()) // TODO: consider dropping this check + if err != nil { + return err + } + + if has { + return nil + } + + return bs.write.Put(blk) +} + +func (bs *BufferedBlockstore) Has(c cid.Cid) (bool, error) { + has, err := bs.write.Has(c) + if err != nil { + return false, err + } + if has { + return true, nil + } + + return bs.read.Has(c) +} + +func (bs *BufferedBlockstore) HashOnRead(hor bool) { + bs.read.HashOnRead(hor) + bs.write.HashOnRead(hor) +} + +func (bs *BufferedBlockstore) PutMany(blks []block.Block) error { + return bs.write.PutMany(blks) +} + +func (bs *BufferedBlockstore) Read() Blockstore { + return bs.read +} diff --git a/blockstore/discard.go b/blockstore/discard.go new file mode 100644 index 00000000000..afd0651bc07 --- /dev/null +++ b/blockstore/discard.go @@ -0,0 +1,66 @@ +package blockstore + +import ( + "context" + "io" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" +) + +var _ Blockstore = (*discardstore)(nil) + +type discardstore struct { + bs Blockstore +} + +func NewDiscardStore(bs Blockstore) Blockstore { + return &discardstore{bs: bs} +} + +func (b *discardstore) Has(cid cid.Cid) (bool, error) { + return b.bs.Has(cid) +} + +func (b *discardstore) HashOnRead(hor bool) { + b.bs.HashOnRead(hor) +} + +func (b *discardstore) Get(cid cid.Cid) (blocks.Block, error) { + return b.bs.Get(cid) +} + +func (b *discardstore) GetSize(cid cid.Cid) (int, error) { + return b.bs.GetSize(cid) +} + +func (b *discardstore) View(cid cid.Cid, f func([]byte) error) error { + return b.bs.View(cid, f) +} + +func (b *discardstore) Put(blk blocks.Block) error { + return nil +} + +func (b *discardstore) PutMany(blks []blocks.Block) error { + return nil +} + +func (b *discardstore) DeleteBlock(cid cid.Cid) error { + return nil +} + +func (b *discardstore) DeleteMany(cids []cid.Cid) error { + return nil +} + +func (b *discardstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + return b.bs.AllKeysChan(ctx) +} + +func (b *discardstore) Close() error { + if c, ok := b.bs.(io.Closer); ok { + return c.Close() + } + return nil +} diff --git a/blockstore/doc.go b/blockstore/doc.go new file mode 100644 index 00000000000..fea1126f5ca --- /dev/null +++ b/blockstore/doc.go @@ -0,0 +1,9 @@ +// Package blockstore and subpackages contain most of the blockstore +// implementations used by Lotus. +// +// Blockstores not ultimately constructed out of the building blocks in this +// package may not work properly. +// +// This package re-exports parts of the go-ipfs-blockstore package such that +// no other package needs to import it directly, for ergonomics and traceability. +package blockstore diff --git a/blockstore/fallback.go b/blockstore/fallback.go new file mode 100644 index 00000000000..5f220f941bb --- /dev/null +++ b/blockstore/fallback.go @@ -0,0 +1,106 @@ +package blockstore + +import ( + "context" + "sync" + "time" + + "golang.org/x/xerrors" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" +) + +// UnwrapFallbackStore takes a blockstore, and returns the underlying blockstore +// if it was a FallbackStore. Otherwise, it just returns the supplied store +// unmodified. +func UnwrapFallbackStore(bs Blockstore) (Blockstore, bool) { + if fbs, ok := bs.(*FallbackStore); ok { + return fbs.Blockstore, true + } + return bs, false +} + +// FallbackStore is a read-through store that queries another (potentially +// remote) source if the block is not found locally. If the block is found +// during the fallback, it stores it in the local store. +type FallbackStore struct { + Blockstore + + lk sync.RWMutex + // missFn is the function that will be invoked on a local miss to pull the + // block from elsewhere. + missFn func(context.Context, cid.Cid) (blocks.Block, error) +} + +var _ Blockstore = (*FallbackStore)(nil) + +func (fbs *FallbackStore) SetFallback(missFn func(context.Context, cid.Cid) (blocks.Block, error)) { + fbs.lk.Lock() + defer fbs.lk.Unlock() + + fbs.missFn = missFn +} + +func (fbs *FallbackStore) getFallback(c cid.Cid) (blocks.Block, error) { + log.Warnf("fallbackstore: block not found locally, fetching from the network; cid: %s", c) + fbs.lk.RLock() + defer fbs.lk.RUnlock() + + if fbs.missFn == nil { + // FallbackStore wasn't configured yet (chainstore/bitswap aren't up yet) + // Wait for a bit and retry + fbs.lk.RUnlock() + time.Sleep(5 * time.Second) + fbs.lk.RLock() + + if fbs.missFn == nil { + log.Errorw("fallbackstore: missFn not configured yet") + return nil, ErrNotFound + } + } + + ctx, cancel := context.WithTimeout(context.TODO(), 120*time.Second) + defer cancel() + + b, err := fbs.missFn(ctx, c) + if err != nil { + return nil, err + } + + // chain bitswap puts blocks in temp blockstore which is cleaned up + // every few min (to drop any messages we fetched but don't want) + // in this case we want to keep this block around + if err := fbs.Put(b); err != nil { + return nil, xerrors.Errorf("persisting fallback-fetched block: %w", err) + } + return b, nil +} + +func (fbs *FallbackStore) Get(c cid.Cid) (blocks.Block, error) { + b, err := fbs.Blockstore.Get(c) + switch err { + case nil: + return b, nil + case ErrNotFound: + return fbs.getFallback(c) + default: + return b, err + } +} + +func (fbs *FallbackStore) GetSize(c cid.Cid) (int, error) { + sz, err := fbs.Blockstore.GetSize(c) + switch err { + case nil: + return sz, nil + case ErrNotFound: + b, err := fbs.getFallback(c) + if err != nil { + return 0, err + } + return len(b.RawData()), nil + default: + return sz, err + } +} diff --git a/blockstore/idstore.go b/blockstore/idstore.go new file mode 100644 index 00000000000..e6148ff04e2 --- /dev/null +++ b/blockstore/idstore.go @@ -0,0 +1,174 @@ +package blockstore + +import ( + "context" + "io" + + "golang.org/x/xerrors" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" +) + +var _ Blockstore = (*idstore)(nil) + +type idstore struct { + bs Blockstore +} + +func NewIDStore(bs Blockstore) Blockstore { + return &idstore{bs: bs} +} + +func decodeCid(cid cid.Cid) (inline bool, data []byte, err error) { + if cid.Prefix().MhType != mh.IDENTITY { + return false, nil, nil + } + + dmh, err := mh.Decode(cid.Hash()) + if err != nil { + return false, nil, err + } + + if dmh.Code == mh.IDENTITY { + return true, dmh.Digest, nil + } + + return false, nil, err +} + +func (b *idstore) Has(cid cid.Cid) (bool, error) { + inline, _, err := decodeCid(cid) + if err != nil { + return false, xerrors.Errorf("error decoding Cid: %w", err) + } + + if inline { + return true, nil + } + + return b.bs.Has(cid) +} + +func (b *idstore) Get(cid cid.Cid) (blocks.Block, error) { + inline, data, err := decodeCid(cid) + if err != nil { + return nil, xerrors.Errorf("error decoding Cid: %w", err) + } + + if inline { + return blocks.NewBlockWithCid(data, cid) + } + + return b.bs.Get(cid) +} + +func (b *idstore) GetSize(cid cid.Cid) (int, error) { + inline, data, err := decodeCid(cid) + if err != nil { + return 0, xerrors.Errorf("error decoding Cid: %w", err) + } + + if inline { + return len(data), err + } + + return b.bs.GetSize(cid) +} + +func (b *idstore) View(cid cid.Cid, cb func([]byte) error) error { + inline, data, err := decodeCid(cid) + if err != nil { + return xerrors.Errorf("error decoding Cid: %w", err) + } + + if inline { + return cb(data) + } + + return b.bs.View(cid, cb) +} + +func (b *idstore) Put(blk blocks.Block) error { + inline, _, err := decodeCid(blk.Cid()) + if err != nil { + return xerrors.Errorf("error decoding Cid: %w", err) + } + + if inline { + return nil + } + + return b.bs.Put(blk) +} + +func (b *idstore) PutMany(blks []blocks.Block) error { + toPut := make([]blocks.Block, 0, len(blks)) + for _, blk := range blks { + inline, _, err := decodeCid(blk.Cid()) + if err != nil { + return xerrors.Errorf("error decoding Cid: %w", err) + } + + if inline { + continue + } + toPut = append(toPut, blk) + } + + if len(toPut) > 0 { + return b.bs.PutMany(toPut) + } + + return nil +} + +func (b *idstore) DeleteBlock(cid cid.Cid) error { + inline, _, err := decodeCid(cid) + if err != nil { + return xerrors.Errorf("error decoding Cid: %w", err) + } + + if inline { + return nil + } + + return b.bs.DeleteBlock(cid) +} + +func (b *idstore) DeleteMany(cids []cid.Cid) error { + toDelete := make([]cid.Cid, 0, len(cids)) + for _, cid := range cids { + inline, _, err := decodeCid(cid) + if err != nil { + return xerrors.Errorf("error decoding Cid: %w", err) + } + + if inline { + continue + } + toDelete = append(toDelete, cid) + } + + if len(toDelete) > 0 { + return b.bs.DeleteMany(toDelete) + } + + return nil +} + +func (b *idstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + return b.bs.AllKeysChan(ctx) +} + +func (b *idstore) HashOnRead(enabled bool) { + b.bs.HashOnRead(enabled) +} + +func (b *idstore) Close() error { + if c, ok := b.bs.(io.Closer); ok { + return c.Close() + } + return nil +} diff --git a/lib/ipfsbstore/ipfsbstore.go b/blockstore/ipfs.go similarity index 57% rename from lib/ipfsbstore/ipfsbstore.go rename to blockstore/ipfs.go index 748afee5187..51b4bd95123 100644 --- a/lib/ipfsbstore/ipfsbstore.go +++ b/blockstore/ipfs.go @@ -1,4 +1,4 @@ -package ipfsbstore +package blockstore import ( "bytes" @@ -16,53 +16,75 @@ import ( iface "github.com/ipfs/interface-go-ipfs-core" "github.com/ipfs/interface-go-ipfs-core/options" "github.com/ipfs/interface-go-ipfs-core/path" - - "github.com/filecoin-project/lotus/lib/blockstore" ) -type IpfsBstore struct { - ctx context.Context - api iface.CoreAPI +type IPFSBlockstore struct { + ctx context.Context + api, offlineAPI iface.CoreAPI } -func NewIpfsBstore(ctx context.Context) (*IpfsBstore, error) { +var _ BasicBlockstore = (*IPFSBlockstore)(nil) + +func NewLocalIPFSBlockstore(ctx context.Context, onlineMode bool) (Blockstore, error) { localApi, err := httpapi.NewLocalApi() if err != nil { return nil, xerrors.Errorf("getting local ipfs api: %w", err) } - api, err := localApi.WithOptions(options.Api.Offline(true)) + api, err := localApi.WithOptions(options.Api.Offline(!onlineMode)) if err != nil { return nil, xerrors.Errorf("setting offline mode: %s", err) } - return &IpfsBstore{ - ctx: ctx, - api: api, - }, nil + offlineAPI := api + if onlineMode { + offlineAPI, err = localApi.WithOptions(options.Api.Offline(true)) + if err != nil { + return nil, xerrors.Errorf("applying offline mode: %s", err) + } + } + + bs := &IPFSBlockstore{ + ctx: ctx, + api: api, + offlineAPI: offlineAPI, + } + + return Adapt(bs), nil } -func NewRemoteIpfsBstore(ctx context.Context, maddr multiaddr.Multiaddr) (*IpfsBstore, error) { +func NewRemoteIPFSBlockstore(ctx context.Context, maddr multiaddr.Multiaddr, onlineMode bool) (Blockstore, error) { httpApi, err := httpapi.NewApi(maddr) if err != nil { return nil, xerrors.Errorf("setting remote ipfs api: %w", err) } - api, err := httpApi.WithOptions(options.Api.Offline(true)) + api, err := httpApi.WithOptions(options.Api.Offline(!onlineMode)) if err != nil { return nil, xerrors.Errorf("applying offline mode: %s", err) } - return &IpfsBstore{ - ctx: ctx, - api: api, - }, nil + offlineAPI := api + if onlineMode { + offlineAPI, err = httpApi.WithOptions(options.Api.Offline(true)) + if err != nil { + return nil, xerrors.Errorf("applying offline mode: %s", err) + } + } + + bs := &IPFSBlockstore{ + ctx: ctx, + api: api, + offlineAPI: offlineAPI, + } + + return Adapt(bs), nil } -func (i *IpfsBstore) DeleteBlock(cid cid.Cid) error { +func (i *IPFSBlockstore) DeleteBlock(cid cid.Cid) error { return xerrors.Errorf("not supported") } -func (i *IpfsBstore) Has(cid cid.Cid) (bool, error) { - _, err := i.api.Block().Stat(i.ctx, path.IpldPath(cid)) +func (i *IPFSBlockstore) Has(cid cid.Cid) (bool, error) { + _, err := i.offlineAPI.Block().Stat(i.ctx, path.IpldPath(cid)) if err != nil { // The underlying client is running in Offline mode. // Stat() will fail with an err if the block isn't in the @@ -77,7 +99,7 @@ func (i *IpfsBstore) Has(cid cid.Cid) (bool, error) { return true, nil } -func (i *IpfsBstore) Get(cid cid.Cid) (blocks.Block, error) { +func (i *IPFSBlockstore) Get(cid cid.Cid) (blocks.Block, error) { rd, err := i.api.Block().Get(i.ctx, path.IpldPath(cid)) if err != nil { return nil, xerrors.Errorf("getting ipfs block: %w", err) @@ -91,7 +113,7 @@ func (i *IpfsBstore) Get(cid cid.Cid) (blocks.Block, error) { return blocks.NewBlockWithCid(data, cid) } -func (i *IpfsBstore) GetSize(cid cid.Cid) (int, error) { +func (i *IPFSBlockstore) GetSize(cid cid.Cid) (int, error) { st, err := i.api.Block().Stat(i.ctx, path.IpldPath(cid)) if err != nil { return 0, xerrors.Errorf("getting ipfs block: %w", err) @@ -100,7 +122,7 @@ func (i *IpfsBstore) GetSize(cid cid.Cid) (int, error) { return st.Size(), nil } -func (i *IpfsBstore) Put(block blocks.Block) error { +func (i *IPFSBlockstore) Put(block blocks.Block) error { mhd, err := multihash.Decode(block.Cid().Hash()) if err != nil { return err @@ -112,7 +134,7 @@ func (i *IpfsBstore) Put(block blocks.Block) error { return err } -func (i *IpfsBstore) PutMany(blocks []blocks.Block) error { +func (i *IPFSBlockstore) PutMany(blocks []blocks.Block) error { // TODO: could be done in parallel for _, block := range blocks { @@ -124,12 +146,10 @@ func (i *IpfsBstore) PutMany(blocks []blocks.Block) error { return nil } -func (i *IpfsBstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { +func (i *IPFSBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { return nil, xerrors.Errorf("not supported") } -func (i *IpfsBstore) HashOnRead(enabled bool) { +func (i *IPFSBlockstore) HashOnRead(enabled bool) { return // TODO: We could technically support this, but.. } - -var _ blockstore.Blockstore = &IpfsBstore{} diff --git a/lib/blockstore/memstore.go b/blockstore/mem.go similarity index 56% rename from lib/blockstore/memstore.go rename to blockstore/mem.go index 9745d6f0395..8ea69d46a49 100644 --- a/lib/blockstore/memstore.go +++ b/blockstore/mem.go @@ -5,38 +5,60 @@ import ( blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" - blockstore "github.com/ipfs/go-ipfs-blockstore" ) -type MemStore map[cid.Cid]blocks.Block +// NewMemory returns a temporary memory-backed blockstore. +func NewMemory() MemBlockstore { + return make(MemBlockstore) +} + +// MemBlockstore is a terminal blockstore that keeps blocks in memory. +type MemBlockstore map[cid.Cid]blocks.Block -func (m MemStore) DeleteBlock(k cid.Cid) error { +func (m MemBlockstore) DeleteBlock(k cid.Cid) error { delete(m, k) return nil } -func (m MemStore) Has(k cid.Cid) (bool, error) { + +func (m MemBlockstore) DeleteMany(ks []cid.Cid) error { + for _, k := range ks { + delete(m, k) + } + return nil +} + +func (m MemBlockstore) Has(k cid.Cid) (bool, error) { _, ok := m[k] return ok, nil } -func (m MemStore) Get(k cid.Cid) (blocks.Block, error) { + +func (m MemBlockstore) View(k cid.Cid, callback func([]byte) error) error { + b, ok := m[k] + if !ok { + return ErrNotFound + } + return callback(b.RawData()) +} + +func (m MemBlockstore) Get(k cid.Cid) (blocks.Block, error) { b, ok := m[k] if !ok { - return nil, blockstore.ErrNotFound + return nil, ErrNotFound } return b, nil } // GetSize returns the CIDs mapped BlockSize -func (m MemStore) GetSize(k cid.Cid) (int, error) { +func (m MemBlockstore) GetSize(k cid.Cid) (int, error) { b, ok := m[k] if !ok { - return 0, blockstore.ErrNotFound + return 0, ErrNotFound } return len(b.RawData()), nil } // Put puts a given block to the underlying datastore -func (m MemStore) Put(b blocks.Block) error { +func (m MemBlockstore) Put(b blocks.Block) error { // Convert to a basic block for safety, but try to reuse the existing // block if it's already a basic block. k := b.Cid() @@ -54,7 +76,7 @@ func (m MemStore) Put(b blocks.Block) error { // PutMany puts a slice of blocks at the same time using batching // capabilities of the underlying datastore whenever possible. -func (m MemStore) PutMany(bs []blocks.Block) error { +func (m MemBlockstore) PutMany(bs []blocks.Block) error { for _, b := range bs { _ = m.Put(b) // can't fail } @@ -64,7 +86,7 @@ func (m MemStore) PutMany(bs []blocks.Block) error { // AllKeysChan returns a channel from which // the CIDs in the Blockstore can be read. It should respect // the given context, closing the channel if it becomes Done. -func (m MemStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { +func (m MemBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { ch := make(chan cid.Cid, len(m)) for k := range m { ch <- k @@ -75,6 +97,6 @@ func (m MemStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { // HashOnRead specifies if every read block should be // rehashed to make sure it matches its CID. -func (m MemStore) HashOnRead(enabled bool) { +func (m MemBlockstore) HashOnRead(enabled bool) { // no-op } diff --git a/blockstore/metrics.go b/blockstore/metrics.go new file mode 100644 index 00000000000..737690a1106 --- /dev/null +++ b/blockstore/metrics.go @@ -0,0 +1,154 @@ +package blockstore + +import ( + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +// +// Currently unused, but kept in repo in case we introduce one of the candidate +// cache implementations (Freecache, Ristretto), both of which report these +// metrics. +// + +// CacheMetricsEmitInterval is the interval at which metrics are emitted onto +// OpenCensus. +var CacheMetricsEmitInterval = 5 * time.Second + +var ( + CacheName, _ = tag.NewKey("cache_name") +) + +// CacheMeasures groups all metrics emitted by the blockstore caches. +var CacheMeasures = struct { + HitRatio *stats.Float64Measure + Hits *stats.Int64Measure + Misses *stats.Int64Measure + Entries *stats.Int64Measure + QueriesServed *stats.Int64Measure + Adds *stats.Int64Measure + Updates *stats.Int64Measure + Evictions *stats.Int64Measure + CostAdded *stats.Int64Measure + CostEvicted *stats.Int64Measure + SetsDropped *stats.Int64Measure + SetsRejected *stats.Int64Measure + QueriesDropped *stats.Int64Measure +}{ + HitRatio: stats.Float64("blockstore/cache/hit_ratio", "Hit ratio of blockstore cache", stats.UnitDimensionless), + Hits: stats.Int64("blockstore/cache/hits", "Total number of hits at blockstore cache", stats.UnitDimensionless), + Misses: stats.Int64("blockstore/cache/misses", "Total number of misses at blockstore cache", stats.UnitDimensionless), + Entries: stats.Int64("blockstore/cache/entry_count", "Total number of entries currently in the blockstore cache", stats.UnitDimensionless), + QueriesServed: stats.Int64("blockstore/cache/queries_served", "Total number of queries served by the blockstore cache", stats.UnitDimensionless), + Adds: stats.Int64("blockstore/cache/adds", "Total number of adds to blockstore cache", stats.UnitDimensionless), + Updates: stats.Int64("blockstore/cache/updates", "Total number of updates in blockstore cache", stats.UnitDimensionless), + Evictions: stats.Int64("blockstore/cache/evictions", "Total number of evictions from blockstore cache", stats.UnitDimensionless), + CostAdded: stats.Int64("blockstore/cache/cost_added", "Total cost (byte size) of entries added into blockstore cache", stats.UnitBytes), + CostEvicted: stats.Int64("blockstore/cache/cost_evicted", "Total cost (byte size) of entries evicted by blockstore cache", stats.UnitBytes), + SetsDropped: stats.Int64("blockstore/cache/sets_dropped", "Total number of sets dropped by blockstore cache", stats.UnitDimensionless), + SetsRejected: stats.Int64("blockstore/cache/sets_rejected", "Total number of sets rejected by blockstore cache", stats.UnitDimensionless), + QueriesDropped: stats.Int64("blockstore/cache/queries_dropped", "Total number of queries dropped by blockstore cache", stats.UnitDimensionless), +} + +// CacheViews groups all cache-related default views. +var CacheViews = struct { + HitRatio *view.View + Hits *view.View + Misses *view.View + Entries *view.View + QueriesServed *view.View + Adds *view.View + Updates *view.View + Evictions *view.View + CostAdded *view.View + CostEvicted *view.View + SetsDropped *view.View + SetsRejected *view.View + QueriesDropped *view.View +}{ + HitRatio: &view.View{ + Measure: CacheMeasures.HitRatio, + Aggregation: view.LastValue(), + TagKeys: []tag.Key{CacheName}, + }, + Hits: &view.View{ + Measure: CacheMeasures.Hits, + Aggregation: view.LastValue(), + TagKeys: []tag.Key{CacheName}, + }, + Misses: &view.View{ + Measure: CacheMeasures.Misses, + Aggregation: view.LastValue(), + TagKeys: []tag.Key{CacheName}, + }, + Entries: &view.View{ + Measure: CacheMeasures.Entries, + Aggregation: view.LastValue(), + TagKeys: []tag.Key{CacheName}, + }, + QueriesServed: &view.View{ + Measure: CacheMeasures.QueriesServed, + Aggregation: view.LastValue(), + TagKeys: []tag.Key{CacheName}, + }, + Adds: &view.View{ + Measure: CacheMeasures.Adds, + Aggregation: view.LastValue(), + TagKeys: []tag.Key{CacheName}, + }, + Updates: &view.View{ + Measure: CacheMeasures.Updates, + Aggregation: view.LastValue(), + TagKeys: []tag.Key{CacheName}, + }, + Evictions: &view.View{ + Measure: CacheMeasures.Evictions, + Aggregation: view.LastValue(), + TagKeys: []tag.Key{CacheName}, + }, + CostAdded: &view.View{ + Measure: CacheMeasures.CostAdded, + Aggregation: view.LastValue(), + TagKeys: []tag.Key{CacheName}, + }, + CostEvicted: &view.View{ + Measure: CacheMeasures.CostEvicted, + Aggregation: view.LastValue(), + TagKeys: []tag.Key{CacheName}, + }, + SetsDropped: &view.View{ + Measure: CacheMeasures.SetsDropped, + Aggregation: view.LastValue(), + TagKeys: []tag.Key{CacheName}, + }, + SetsRejected: &view.View{ + Measure: CacheMeasures.SetsRejected, + Aggregation: view.LastValue(), + TagKeys: []tag.Key{CacheName}, + }, + QueriesDropped: &view.View{ + Measure: CacheMeasures.QueriesDropped, + Aggregation: view.LastValue(), + TagKeys: []tag.Key{CacheName}, + }, +} + +// DefaultViews exports all default views for this package. +var DefaultViews = []*view.View{ + CacheViews.HitRatio, + CacheViews.Hits, + CacheViews.Misses, + CacheViews.Entries, + CacheViews.QueriesServed, + CacheViews.Adds, + CacheViews.Updates, + CacheViews.Evictions, + CacheViews.CostAdded, + CacheViews.CostEvicted, + CacheViews.SetsDropped, + CacheViews.SetsRejected, + CacheViews.QueriesDropped, +} diff --git a/blockstore/splitstore/README.md b/blockstore/splitstore/README.md new file mode 100644 index 00000000000..1c6569a34e7 --- /dev/null +++ b/blockstore/splitstore/README.md @@ -0,0 +1,72 @@ +# SplitStore: An actively scalable blockstore for the Filecoin chain + +The SplitStore was first introduced in lotus v1.5.1, as an experiment +in reducing the performance impact of large blockstores. + +With lotus v1.11.1, we introduce the next iteration in design and +implementation, which we call SplitStore v1. + +The new design (see [#6474](https://github.com/filecoin-project/lotus/pull/6474) +evolves the splitstore to be a freestanding compacting blockstore that +allows us to keep a small (60-100GB) working set in a hot blockstore +and reliably archive out of scope objects in a coldstore. The +coldstore can also be a discard store, whereby out of scope objects +are discarded or a regular badger blockstore (the default), which can +be periodically garbage collected according to configurable user +retention policies. + +To enable the splitstore, edit `.lotus/config.toml` and add the following: +``` +[Chainstore] + EnableSplitstore = true +``` + +If you intend to use the discard coldstore, your also need to add the following: +``` + [Chainstore.Splitstore] + ColdStoreType = "discard" +``` +In general you _should not_ have to use the discard store, unless you +are running a network booster or have very constrained hardware with +not enough disk space to maintain a coldstore, even with garbage +collection. + + +## Operation + +When the splitstore is first enabled, the existing blockstore becomes +the coldstore and a fresh hotstore is initialized. + +The hotstore is warmed up on first startup so as to load all chain +headers and state roots in the current head. This allows us to +immediately gain the performance benefits of a smallerblockstore which +can be substantial for full archival nodes. + +All new writes are directed to the hotstore, while reads first hit the +hotstore, with fallback to the coldstore. + +Once 5 finalities have ellapsed, and every finality henceforth, the +blockstore _compacts_. Compaction is the process of moving all +unreachable objects within the last 4 finalities from the hotstore to +the coldstore. If the system is configured with a discard coldstore, +these objects are discarded. Note that chain headers, all the way to +genesis, are considered reachable. Stateroots and messages are +considered reachable only within the last 4 finalities, unless there +is a live reference to them. + +## Compaction + +Compaction works transactionally with the following algorithm: +- We prepare a transaction, whereby all i/o referenced objects through the API are tracked. +- We walk the chain and mark reachable objects, keeping 4 finalities of state roots and messages and all headers all the way to genesis. +- Once the chain walk is complete, we begin full transaction protection with concurrent marking; we walk and mark all references created during the chain walk. On the same time, all I/O through the API concurrently marks objects as live references. +- We collect cold objects by iterating through the hotstore and checking the mark set; if an object is not marked, then it is candidate for purge. +- When running with a coldstore, we next copy all cold objects to the coldstore. +- At this point we are ready to begin purging: + - We sort cold objects heaviest first, so as to never delete the consituents of a DAG before the DAG itself (which would leave dangling references) + - We delete in small batches taking a lock; each batch is checked again for marks, from the concurrent transactional mark, so as to never delete anything live +- We then end the transaction and compact/gc the hotstore. + +## Coldstore Garbage Collection + +TBD -- see [#6577](https://github.com/filecoin-project/lotus/issues/6577) diff --git a/blockstore/splitstore/debug.go b/blockstore/splitstore/debug.go new file mode 100644 index 00000000000..2be85ebfe8d --- /dev/null +++ b/blockstore/splitstore/debug.go @@ -0,0 +1,273 @@ +package splitstore + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "os" + "os/exec" + "path/filepath" + "runtime/debug" + "strings" + "sync" + "time" + + "go.uber.org/multierr" + "golang.org/x/xerrors" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" +) + +type debugLog struct { + readLog, writeLog, deleteLog, stackLog *debugLogOp + + stackMx sync.Mutex + stackMap map[string]string +} + +type debugLogOp struct { + path string + mx sync.Mutex + log *os.File + count int +} + +func openDebugLog(path string) (*debugLog, error) { + basePath := filepath.Join(path, "debug") + err := os.MkdirAll(basePath, 0755) + if err != nil { + return nil, err + } + + readLog, err := openDebugLogOp(basePath, "read.log") + if err != nil { + return nil, err + } + + writeLog, err := openDebugLogOp(basePath, "write.log") + if err != nil { + _ = readLog.Close() + return nil, err + } + + deleteLog, err := openDebugLogOp(basePath, "delete.log") + if err != nil { + _ = readLog.Close() + _ = writeLog.Close() + return nil, err + } + + stackLog, err := openDebugLogOp(basePath, "stack.log") + if err != nil { + _ = readLog.Close() + _ = writeLog.Close() + _ = deleteLog.Close() + return nil, xerrors.Errorf("error opening stack log: %w", err) + } + + return &debugLog{ + readLog: readLog, + writeLog: writeLog, + deleteLog: deleteLog, + stackLog: stackLog, + stackMap: make(map[string]string), + }, nil +} + +func (d *debugLog) LogReadMiss(cid cid.Cid) { + if d == nil { + return + } + + stack := d.getStack() + err := d.readLog.Log("%s %s %s\n", d.timestamp(), cid, stack) + if err != nil { + log.Warnf("error writing read log: %s", err) + } +} + +func (d *debugLog) LogWrite(blk blocks.Block) { + if d == nil { + return + } + + var stack string + if enableDebugLogWriteTraces { + stack = " " + d.getStack() + } + + err := d.writeLog.Log("%s %s%s\n", d.timestamp(), blk.Cid(), stack) + if err != nil { + log.Warnf("error writing write log: %s", err) + } +} + +func (d *debugLog) LogWriteMany(blks []blocks.Block) { + if d == nil { + return + } + + var stack string + if enableDebugLogWriteTraces { + stack = " " + d.getStack() + } + + now := d.timestamp() + for _, blk := range blks { + err := d.writeLog.Log("%s %s%s\n", now, blk.Cid(), stack) + if err != nil { + log.Warnf("error writing write log: %s", err) + break + } + } +} + +func (d *debugLog) LogDelete(cids []cid.Cid) { + if d == nil { + return + } + + now := d.timestamp() + for _, c := range cids { + err := d.deleteLog.Log("%s %s\n", now, c) + if err != nil { + log.Warnf("error writing delete log: %s", err) + break + } + } +} + +func (d *debugLog) Flush() { + if d == nil { + return + } + + // rotate non-empty logs + d.readLog.Rotate() + d.writeLog.Rotate() + d.deleteLog.Rotate() + d.stackLog.Rotate() +} + +func (d *debugLog) Close() error { + if d == nil { + return nil + } + + err1 := d.readLog.Close() + err2 := d.writeLog.Close() + err3 := d.deleteLog.Close() + err4 := d.stackLog.Close() + + return multierr.Combine(err1, err2, err3, err4) +} + +func (d *debugLog) getStack() string { + sk := d.getNormalizedStackTrace() + hash := sha256.Sum256([]byte(sk)) + key := string(hash[:]) + + d.stackMx.Lock() + repr, ok := d.stackMap[key] + if !ok { + repr = hex.EncodeToString(hash[:]) + d.stackMap[key] = repr + + err := d.stackLog.Log("%s\n%s\n", repr, sk) + if err != nil { + log.Warnf("error writing stack trace for %s: %s", repr, err) + } + } + d.stackMx.Unlock() + + return repr +} + +func (d *debugLog) getNormalizedStackTrace() string { + sk := string(debug.Stack()) + + // Normalization for deduplication + // skip first line -- it's the goroutine + // for each line that ends in a ), remove the call args -- these are the registers + lines := strings.Split(sk, "\n")[1:] + for i, line := range lines { + if len(line) > 0 && line[len(line)-1] == ')' { + idx := strings.LastIndex(line, "(") + if idx < 0 { + continue + } + lines[i] = line[:idx] + } + } + + return strings.Join(lines, "\n") +} + +func (d *debugLog) timestamp() string { + ts, _ := time.Now().MarshalText() + return string(ts) +} + +func openDebugLogOp(basePath, name string) (*debugLogOp, error) { + path := filepath.Join(basePath, name) + file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644) + if err != nil { + return nil, xerrors.Errorf("error opening %s: %w", name, err) + } + + return &debugLogOp{path: path, log: file}, nil +} + +func (d *debugLogOp) Close() error { + d.mx.Lock() + defer d.mx.Unlock() + + return d.log.Close() +} + +func (d *debugLogOp) Log(template string, arg ...interface{}) error { + d.mx.Lock() + defer d.mx.Unlock() + + d.count++ + _, err := fmt.Fprintf(d.log, template, arg...) + return err +} + +func (d *debugLogOp) Rotate() { + d.mx.Lock() + defer d.mx.Unlock() + + if d.count == 0 { + return + } + + err := d.log.Close() + if err != nil { + log.Warnf("error closing log (file: %s): %s", d.path, err) + return + } + + arxivPath := fmt.Sprintf("%s-%d", d.path, time.Now().Unix()) + err = os.Rename(d.path, arxivPath) + if err != nil { + log.Warnf("error moving log (file: %s): %s", d.path, err) + return + } + + go func() { + cmd := exec.Command("gzip", arxivPath) + err := cmd.Run() + if err != nil { + log.Warnf("error compressing log (file: %s): %s", arxivPath, err) + } + }() + + d.count = 0 + d.log, err = os.OpenFile(d.path, os.O_WRONLY|os.O_CREATE, 0644) + if err != nil { + log.Warnf("error opening log (file: %s): %s", d.path, err) + return + } +} diff --git a/blockstore/splitstore/markset.go b/blockstore/splitstore/markset.go new file mode 100644 index 00000000000..a644e727955 --- /dev/null +++ b/blockstore/splitstore/markset.go @@ -0,0 +1,38 @@ +package splitstore + +import ( + "errors" + + "golang.org/x/xerrors" + + cid "github.com/ipfs/go-cid" +) + +var errMarkSetClosed = errors.New("markset closed") + +// MarkSet is a utility to keep track of seen CID, and later query for them. +// +// * If the expected dataset is large, it can be backed by a datastore (e.g. bbolt). +// * If a probabilistic result is acceptable, it can be backed by a bloom filter +type MarkSet interface { + Mark(cid.Cid) error + Has(cid.Cid) (bool, error) + Close() error + SetConcurrent() +} + +type MarkSetEnv interface { + Create(name string, sizeHint int64) (MarkSet, error) + Close() error +} + +func OpenMarkSetEnv(path string, mtype string) (MarkSetEnv, error) { + switch mtype { + case "bloom": + return NewBloomMarkSetEnv() + case "map": + return NewMapMarkSetEnv() + default: + return nil, xerrors.Errorf("unknown mark set type %s", mtype) + } +} diff --git a/blockstore/splitstore/markset_bloom.go b/blockstore/splitstore/markset_bloom.go new file mode 100644 index 00000000000..9261de7c753 --- /dev/null +++ b/blockstore/splitstore/markset_bloom.go @@ -0,0 +1,107 @@ +package splitstore + +import ( + "crypto/rand" + "crypto/sha256" + "sync" + + "golang.org/x/xerrors" + + bbloom "github.com/ipfs/bbloom" + cid "github.com/ipfs/go-cid" +) + +const ( + BloomFilterMinSize = 10_000_000 + BloomFilterProbability = 0.01 +) + +type BloomMarkSetEnv struct{} + +var _ MarkSetEnv = (*BloomMarkSetEnv)(nil) + +type BloomMarkSet struct { + salt []byte + mx sync.RWMutex + bf *bbloom.Bloom + ts bool +} + +var _ MarkSet = (*BloomMarkSet)(nil) + +func NewBloomMarkSetEnv() (*BloomMarkSetEnv, error) { + return &BloomMarkSetEnv{}, nil +} + +func (e *BloomMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error) { + size := int64(BloomFilterMinSize) + for size < sizeHint { + size += BloomFilterMinSize + } + + salt := make([]byte, 4) + _, err := rand.Read(salt) + if err != nil { + return nil, xerrors.Errorf("error reading salt: %w", err) + } + + bf, err := bbloom.New(float64(size), BloomFilterProbability) + if err != nil { + return nil, xerrors.Errorf("error creating bloom filter: %w", err) + } + + return &BloomMarkSet{salt: salt, bf: bf}, nil +} + +func (e *BloomMarkSetEnv) Close() error { + return nil +} + +func (s *BloomMarkSet) saltedKey(cid cid.Cid) []byte { + hash := cid.Hash() + key := make([]byte, len(s.salt)+len(hash)) + n := copy(key, s.salt) + copy(key[n:], hash) + rehash := sha256.Sum256(key) + return rehash[:] +} + +func (s *BloomMarkSet) Mark(cid cid.Cid) error { + if s.ts { + s.mx.Lock() + defer s.mx.Unlock() + } + + if s.bf == nil { + return errMarkSetClosed + } + + s.bf.Add(s.saltedKey(cid)) + return nil +} + +func (s *BloomMarkSet) Has(cid cid.Cid) (bool, error) { + if s.ts { + s.mx.RLock() + defer s.mx.RUnlock() + } + + if s.bf == nil { + return false, errMarkSetClosed + } + + return s.bf.Has(s.saltedKey(cid)), nil +} + +func (s *BloomMarkSet) Close() error { + if s.ts { + s.mx.Lock() + defer s.mx.Unlock() + } + s.bf = nil + return nil +} + +func (s *BloomMarkSet) SetConcurrent() { + s.ts = true +} diff --git a/blockstore/splitstore/markset_map.go b/blockstore/splitstore/markset_map.go new file mode 100644 index 00000000000..197c824242a --- /dev/null +++ b/blockstore/splitstore/markset_map.go @@ -0,0 +1,75 @@ +package splitstore + +import ( + "sync" + + cid "github.com/ipfs/go-cid" +) + +type MapMarkSetEnv struct{} + +var _ MarkSetEnv = (*MapMarkSetEnv)(nil) + +type MapMarkSet struct { + mx sync.RWMutex + set map[string]struct{} + + ts bool +} + +var _ MarkSet = (*MapMarkSet)(nil) + +func NewMapMarkSetEnv() (*MapMarkSetEnv, error) { + return &MapMarkSetEnv{}, nil +} + +func (e *MapMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error) { + return &MapMarkSet{ + set: make(map[string]struct{}, sizeHint), + }, nil +} + +func (e *MapMarkSetEnv) Close() error { + return nil +} + +func (s *MapMarkSet) Mark(cid cid.Cid) error { + if s.ts { + s.mx.Lock() + defer s.mx.Unlock() + } + + if s.set == nil { + return errMarkSetClosed + } + + s.set[string(cid.Hash())] = struct{}{} + return nil +} + +func (s *MapMarkSet) Has(cid cid.Cid) (bool, error) { + if s.ts { + s.mx.RLock() + defer s.mx.RUnlock() + } + + if s.set == nil { + return false, errMarkSetClosed + } + + _, ok := s.set[string(cid.Hash())] + return ok, nil +} + +func (s *MapMarkSet) Close() error { + if s.ts { + s.mx.Lock() + defer s.mx.Unlock() + } + s.set = nil + return nil +} + +func (s *MapMarkSet) SetConcurrent() { + s.ts = true +} diff --git a/blockstore/splitstore/markset_test.go b/blockstore/splitstore/markset_test.go new file mode 100644 index 00000000000..d5c01e22029 --- /dev/null +++ b/blockstore/splitstore/markset_test.go @@ -0,0 +1,138 @@ +package splitstore + +import ( + "io/ioutil" + "testing" + + cid "github.com/ipfs/go-cid" + "github.com/multiformats/go-multihash" +) + +func TestMapMarkSet(t *testing.T) { + testMarkSet(t, "map") +} + +func TestBloomMarkSet(t *testing.T) { + testMarkSet(t, "bloom") +} + +func testMarkSet(t *testing.T, lsType string) { + t.Helper() + + path, err := ioutil.TempDir("", "sweep-test.*") + if err != nil { + t.Fatal(err) + } + + env, err := OpenMarkSetEnv(path, lsType) + if err != nil { + t.Fatal(err) + } + defer env.Close() //nolint:errcheck + + hotSet, err := env.Create("hot", 0) + if err != nil { + t.Fatal(err) + } + + coldSet, err := env.Create("cold", 0) + if err != nil { + t.Fatal(err) + } + + makeCid := func(key string) cid.Cid { + h, err := multihash.Sum([]byte(key), multihash.SHA2_256, -1) + if err != nil { + t.Fatal(err) + } + + return cid.NewCidV1(cid.Raw, h) + } + + mustHave := func(s MarkSet, cid cid.Cid) { + has, err := s.Has(cid) + if err != nil { + t.Fatal(err) + } + + if !has { + t.Fatal("mark not found") + } + } + + mustNotHave := func(s MarkSet, cid cid.Cid) { + has, err := s.Has(cid) + if err != nil { + t.Fatal(err) + } + + if has { + t.Fatal("unexpected mark") + } + } + + k1 := makeCid("a") + k2 := makeCid("b") + k3 := makeCid("c") + k4 := makeCid("d") + + hotSet.Mark(k1) //nolint + hotSet.Mark(k2) //nolint + coldSet.Mark(k3) //nolint + + mustHave(hotSet, k1) + mustHave(hotSet, k2) + mustNotHave(hotSet, k3) + mustNotHave(hotSet, k4) + + mustNotHave(coldSet, k1) + mustNotHave(coldSet, k2) + mustHave(coldSet, k3) + mustNotHave(coldSet, k4) + + // close them and reopen to redo the dance + + err = hotSet.Close() + if err != nil { + t.Fatal(err) + } + + err = coldSet.Close() + if err != nil { + t.Fatal(err) + } + + hotSet, err = env.Create("hot", 0) + if err != nil { + t.Fatal(err) + } + + coldSet, err = env.Create("cold", 0) + if err != nil { + t.Fatal(err) + } + + hotSet.Mark(k3) //nolint + hotSet.Mark(k4) //nolint + coldSet.Mark(k1) //nolint + + mustNotHave(hotSet, k1) + mustNotHave(hotSet, k2) + mustHave(hotSet, k3) + mustHave(hotSet, k4) + + mustHave(coldSet, k1) + mustNotHave(coldSet, k2) + mustNotHave(coldSet, k3) + mustNotHave(coldSet, k4) + + err = hotSet.Close() + if err != nil { + t.Fatal(err) + } + + err = coldSet.Close() + if err != nil { + t.Fatal(err) + } +} diff --git a/blockstore/splitstore/splitstore.go b/blockstore/splitstore/splitstore.go new file mode 100644 index 00000000000..821ebb2b6c2 --- /dev/null +++ b/blockstore/splitstore/splitstore.go @@ -0,0 +1,568 @@ +package splitstore + +import ( + "context" + "errors" + "os" + "sync" + "sync/atomic" + "time" + + "go.uber.org/multierr" + "golang.org/x/xerrors" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + dstore "github.com/ipfs/go-datastore" + logging "github.com/ipfs/go-log/v2" + + "github.com/filecoin-project/go-state-types/abi" + bstore "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/metrics" + + "go.opencensus.io/stats" +) + +var ( + // baseEpochKey stores the base epoch (last compaction epoch) in the + // metadata store. + baseEpochKey = dstore.NewKey("/splitstore/baseEpoch") + + // warmupEpochKey stores whether a hot store warmup has been performed. + // On first start, the splitstore will walk the state tree and will copy + // all active blocks into the hotstore. + warmupEpochKey = dstore.NewKey("/splitstore/warmupEpoch") + + // markSetSizeKey stores the current estimate for the mark set size. + // this is first computed at warmup and updated in every compaction + markSetSizeKey = dstore.NewKey("/splitstore/markSetSize") + + // compactionIndexKey stores the compaction index (serial number) + compactionIndexKey = dstore.NewKey("/splitstore/compactionIndex") + + log = logging.Logger("splitstore") + + // set this to true if you are debugging the splitstore to enable debug logging + enableDebugLog = false + // set this to true if you want to track origin stack traces in the write log + enableDebugLogWriteTraces = false +) + +func init() { + if os.Getenv("LOTUS_SPLITSTORE_DEBUG_LOG") == "1" { + enableDebugLog = true + } + + if os.Getenv("LOTUS_SPLITSTORE_DEBUG_LOG_WRITE_TRACES") == "1" { + enableDebugLogWriteTraces = true + } +} + +type Config struct { + // MarkSetType is the type of mark set to use. + // + // Only current sane value is "map", but we may add an option for a disk-backed + // markset for memory-constrained situations. + MarkSetType string + + // DiscardColdBlocks indicates whether to skip moving cold blocks to the coldstore. + // If the splitstore is running with a noop coldstore then this option is set to true + // which skips moving (as it is a noop, but still takes time to read all the cold objects) + // and directly purges cold blocks. + DiscardColdBlocks bool + + // HotstoreMessageRetention indicates the hotstore retention policy for messages. + // It has the following semantics: + // - a value of 0 will only retain messages within the compaction boundary (4 finalities) + // - a positive integer indicates the number of finalities, outside the compaction boundary, + // for which messages will be retained in the hotstore. + HotStoreMessageRetention uint64 +} + +// ChainAccessor allows the Splitstore to access the chain. It will most likely +// be a ChainStore at runtime. +type ChainAccessor interface { + GetTipsetByHeight(context.Context, abi.ChainEpoch, *types.TipSet, bool) (*types.TipSet, error) + GetHeaviestTipSet() *types.TipSet + SubscribeHeadChanges(change func(revert []*types.TipSet, apply []*types.TipSet) error) +} + +// hotstore is the interface that must be satisfied by the hot blockstore; it is an extension +// of the Blockstore interface with the traits we need for compaction. +type hotstore interface { + bstore.Blockstore + bstore.BlockstoreIterator +} + +type SplitStore struct { + compacting int32 // compaction/prune/warmup in progress + closing int32 // the splitstore is closing + + cfg *Config + + mx sync.Mutex + warmupEpoch abi.ChainEpoch // protected by mx + baseEpoch abi.ChainEpoch // protected by compaction lock + + headChangeMx sync.Mutex + + coldPurgeSize int + + chain ChainAccessor + ds dstore.Datastore + cold bstore.Blockstore + hot hotstore + + markSetEnv MarkSetEnv + markSetSize int64 + + compactionIndex int64 + + ctx context.Context + cancel func() + + debug *debugLog + + // transactional protection for concurrent read/writes during compaction + txnLk sync.RWMutex + txnViewsMx sync.Mutex + txnViewsCond sync.Cond + txnViews int + txnViewsWaiting bool + txnActive bool + txnProtect MarkSet + txnRefsMx sync.Mutex + txnRefs map[cid.Cid]struct{} + txnMissing map[cid.Cid]struct{} + + // registered protectors + protectors []func(func(cid.Cid) error) error +} + +var _ bstore.Blockstore = (*SplitStore)(nil) + +// Open opens an existing splistore, or creates a new splitstore. The splitstore +// is backed by the provided hot and cold stores. The returned SplitStore MUST be +// attached to the ChainStore with Start in order to trigger compaction. +func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Config) (*SplitStore, error) { + // hot blockstore must support the hotstore interface + hots, ok := hot.(hotstore) + if !ok { + // be specific about what is missing + if _, ok := hot.(bstore.BlockstoreIterator); !ok { + return nil, xerrors.Errorf("hot blockstore does not support efficient iteration: %T", hot) + } + + return nil, xerrors.Errorf("hot blockstore does not support the necessary traits: %T", hot) + } + + // the markset env + markSetEnv, err := OpenMarkSetEnv(path, cfg.MarkSetType) + if err != nil { + return nil, err + } + + // and now we can make a SplitStore + ss := &SplitStore{ + cfg: cfg, + ds: ds, + cold: cold, + hot: hots, + markSetEnv: markSetEnv, + + coldPurgeSize: defaultColdPurgeSize, + } + + ss.txnViewsCond.L = &ss.txnViewsMx + ss.ctx, ss.cancel = context.WithCancel(context.Background()) + + if enableDebugLog { + ss.debug, err = openDebugLog(path) + if err != nil { + return nil, err + } + } + + return ss, nil +} + +// Blockstore interface +func (s *SplitStore) DeleteBlock(_ cid.Cid) error { + // afaict we don't seem to be using this method, so it's not implemented + return errors.New("DeleteBlock not implemented on SplitStore; don't do this Luke!") //nolint +} + +func (s *SplitStore) DeleteMany(_ []cid.Cid) error { + // afaict we don't seem to be using this method, so it's not implemented + return errors.New("DeleteMany not implemented on SplitStore; don't do this Luke!") //nolint +} + +func (s *SplitStore) Has(cid cid.Cid) (bool, error) { + if isIdentiyCid(cid) { + return true, nil + } + + s.txnLk.RLock() + defer s.txnLk.RUnlock() + + has, err := s.hot.Has(cid) + + if err != nil { + return has, err + } + + if has { + s.trackTxnRef(cid) + return true, nil + } + + return s.cold.Has(cid) +} + +func (s *SplitStore) Get(cid cid.Cid) (blocks.Block, error) { + if isIdentiyCid(cid) { + data, err := decodeIdentityCid(cid) + if err != nil { + return nil, err + } + + return blocks.NewBlockWithCid(data, cid) + } + + s.txnLk.RLock() + defer s.txnLk.RUnlock() + + blk, err := s.hot.Get(cid) + + switch err { + case nil: + s.trackTxnRef(cid) + return blk, nil + + case bstore.ErrNotFound: + if s.isWarm() { + s.debug.LogReadMiss(cid) + } + + blk, err = s.cold.Get(cid) + if err == nil { + stats.Record(s.ctx, metrics.SplitstoreMiss.M(1)) + + } + return blk, err + + default: + return nil, err + } +} + +func (s *SplitStore) GetSize(cid cid.Cid) (int, error) { + if isIdentiyCid(cid) { + data, err := decodeIdentityCid(cid) + if err != nil { + return 0, err + } + + return len(data), nil + } + + s.txnLk.RLock() + defer s.txnLk.RUnlock() + + size, err := s.hot.GetSize(cid) + + switch err { + case nil: + s.trackTxnRef(cid) + return size, nil + + case bstore.ErrNotFound: + if s.isWarm() { + s.debug.LogReadMiss(cid) + } + + size, err = s.cold.GetSize(cid) + if err == nil { + stats.Record(s.ctx, metrics.SplitstoreMiss.M(1)) + } + return size, err + + default: + return 0, err + } +} + +func (s *SplitStore) Put(blk blocks.Block) error { + if isIdentiyCid(blk.Cid()) { + return nil + } + + s.txnLk.RLock() + defer s.txnLk.RUnlock() + + err := s.hot.Put(blk) + if err != nil { + return err + } + + s.debug.LogWrite(blk) + + s.trackTxnRef(blk.Cid()) + return nil +} + +func (s *SplitStore) PutMany(blks []blocks.Block) error { + // filter identites + idcids := 0 + for _, blk := range blks { + if isIdentiyCid(blk.Cid()) { + idcids++ + } + } + + if idcids > 0 { + if idcids == len(blks) { + // it's all identities + return nil + } + + filtered := make([]blocks.Block, 0, len(blks)-idcids) + for _, blk := range blks { + if isIdentiyCid(blk.Cid()) { + continue + } + filtered = append(filtered, blk) + } + + blks = filtered + } + + batch := make([]cid.Cid, 0, len(blks)) + for _, blk := range blks { + batch = append(batch, blk.Cid()) + } + + s.txnLk.RLock() + defer s.txnLk.RUnlock() + + err := s.hot.PutMany(blks) + if err != nil { + return err + } + + s.debug.LogWriteMany(blks) + + s.trackTxnRefMany(batch) + return nil +} + +func (s *SplitStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + ctx, cancel := context.WithCancel(ctx) + + chHot, err := s.hot.AllKeysChan(ctx) + if err != nil { + cancel() + return nil, err + } + + chCold, err := s.cold.AllKeysChan(ctx) + if err != nil { + cancel() + return nil, err + } + + seen := cid.NewSet() + ch := make(chan cid.Cid, 8) // buffer is arbitrary, just enough to avoid context switches + go func() { + defer cancel() + defer close(ch) + + for _, in := range []<-chan cid.Cid{chHot, chCold} { + for c := range in { + // ensure we only emit each key once + if !seen.Visit(c) { + continue + } + + select { + case ch <- c: + case <-ctx.Done(): + return + } + } + } + }() + + return ch, nil +} + +func (s *SplitStore) HashOnRead(enabled bool) { + s.hot.HashOnRead(enabled) + s.cold.HashOnRead(enabled) +} + +func (s *SplitStore) View(cid cid.Cid, cb func([]byte) error) error { + if isIdentiyCid(cid) { + data, err := decodeIdentityCid(cid) + if err != nil { + return err + } + + return cb(data) + } + + // views are (optimistically) protected two-fold: + // - if there is an active transaction, then the reference is protected. + // - if there is no active transaction, active views are tracked in a + // wait group and compaction is inhibited from starting until they + // have all completed. this is necessary to ensure that a (very) long-running + // view can't have its data pointer deleted, which would be catastrophic. + // Note that we can't just RLock for the duration of the view, as this could + // lead to deadlock with recursive views. + s.protectView(cid) + defer s.viewDone() + + err := s.hot.View(cid, cb) + switch err { + case bstore.ErrNotFound: + if s.isWarm() { + s.debug.LogReadMiss(cid) + } + + err = s.cold.View(cid, cb) + if err == nil { + stats.Record(s.ctx, metrics.SplitstoreMiss.M(1)) + } + return err + + default: + return err + } +} + +func (s *SplitStore) isWarm() bool { + s.mx.Lock() + defer s.mx.Unlock() + return s.warmupEpoch > 0 +} + +// State tracking +func (s *SplitStore) Start(chain ChainAccessor) error { + s.chain = chain + curTs := chain.GetHeaviestTipSet() + + // should we warmup + warmup := false + + // load base epoch from metadata ds + // if none, then use current epoch because it's a fresh start + bs, err := s.ds.Get(baseEpochKey) + switch err { + case nil: + s.baseEpoch = bytesToEpoch(bs) + + case dstore.ErrNotFound: + if curTs == nil { + // this can happen in some tests + break + } + + err = s.setBaseEpoch(curTs.Height()) + if err != nil { + return xerrors.Errorf("error saving base epoch: %w", err) + } + + default: + return xerrors.Errorf("error loading base epoch: %w", err) + } + + // load warmup epoch from metadata ds + bs, err = s.ds.Get(warmupEpochKey) + switch err { + case nil: + s.warmupEpoch = bytesToEpoch(bs) + + case dstore.ErrNotFound: + warmup = true + + default: + return xerrors.Errorf("error loading warmup epoch: %w", err) + } + + // load markSetSize from metadata ds to provide a size hint for marksets + bs, err = s.ds.Get(markSetSizeKey) + switch err { + case nil: + s.markSetSize = bytesToInt64(bs) + + case dstore.ErrNotFound: + default: + return xerrors.Errorf("error loading mark set size: %w", err) + } + + // load compactionIndex from metadata ds to provide a hint as to when to perform moving gc + bs, err = s.ds.Get(compactionIndexKey) + switch err { + case nil: + s.compactionIndex = bytesToInt64(bs) + + case dstore.ErrNotFound: + // this is potentially an upgrade from splitstore v0; schedule a warmup as v0 has + // some issues with hot references leaking into the coldstore. + warmup = true + default: + return xerrors.Errorf("error loading compaction index: %w", err) + } + + log.Infow("starting splitstore", "baseEpoch", s.baseEpoch, "warmupEpoch", s.warmupEpoch) + + if warmup { + err = s.warmup(curTs) + if err != nil { + return xerrors.Errorf("error starting warmup: %w", err) + } + } + + // watch the chain + chain.SubscribeHeadChanges(s.HeadChange) + + return nil +} + +func (s *SplitStore) AddProtector(protector func(func(cid.Cid) error) error) { + s.mx.Lock() + defer s.mx.Unlock() + + s.protectors = append(s.protectors, protector) +} + +func (s *SplitStore) Close() error { + if !atomic.CompareAndSwapInt32(&s.closing, 0, 1) { + // already closing + return nil + } + + if atomic.LoadInt32(&s.compacting) == 1 { + log.Warn("close with ongoing compaction in progress; waiting for it to finish...") + for atomic.LoadInt32(&s.compacting) == 1 { + time.Sleep(time.Second) + } + } + + s.cancel() + return multierr.Combine(s.markSetEnv.Close(), s.debug.Close()) +} + +func (s *SplitStore) checkClosing() error { + if atomic.LoadInt32(&s.closing) == 1 { + return xerrors.Errorf("splitstore is closing") + } + + return nil +} + +func (s *SplitStore) setBaseEpoch(epoch abi.ChainEpoch) error { + s.baseEpoch = epoch + return s.ds.Put(baseEpochKey, epochToBytes(epoch)) +} diff --git a/blockstore/splitstore/splitstore_compact.go b/blockstore/splitstore/splitstore_compact.go new file mode 100644 index 00000000000..86f035e6f4a --- /dev/null +++ b/blockstore/splitstore/splitstore_compact.go @@ -0,0 +1,1144 @@ +package splitstore + +import ( + "bytes" + "errors" + "runtime" + "sort" + "sync/atomic" + "time" + + "golang.org/x/sync/errgroup" + "golang.org/x/xerrors" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-state-types/abi" + bstore "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/metrics" + + "go.opencensus.io/stats" +) + +var ( + // CompactionThreshold is the number of epochs that need to have elapsed + // from the previously compacted epoch to trigger a new compaction. + // + // |················· CompactionThreshold ··················| + // | | + // =======‖≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡‖------------------------» + // | | chain --> ↑__ current epoch + // | archived epochs ___↑ + // ↑________ CompactionBoundary + // + // === :: cold (already archived) + // ≡≡≡ :: to be archived in this compaction + // --- :: hot + CompactionThreshold = 5 * build.Finality + + // CompactionBoundary is the number of epochs from the current epoch at which + // we will walk the chain for live objects. + CompactionBoundary = 4 * build.Finality + + // SyncGapTime is the time delay from a tipset's min timestamp before we decide + // there is a sync gap + SyncGapTime = time.Minute +) + +var ( + // used to signal end of walk + errStopWalk = errors.New("stop walk") +) + +const ( + batchSize = 16384 + + defaultColdPurgeSize = 7_000_000 +) + +func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error { + s.headChangeMx.Lock() + defer s.headChangeMx.Unlock() + + // Revert only. + if len(apply) == 0 { + return nil + } + + curTs := apply[len(apply)-1] + epoch := curTs.Height() + + // NOTE: there is an implicit invariant assumption that HeadChange is invoked + // synchronously and no other HeadChange can be invoked while one is in + // progress. + // this is guaranteed by the chainstore, and it is pervasive in all lotus + // -- if that ever changes then all hell will break loose in general and + // we will have a rance to protectTipSets here. + // Reagrdless, we put a mutex in HeadChange just to be safe + + if !atomic.CompareAndSwapInt32(&s.compacting, 0, 1) { + // we are currently compacting -- protect the new tipset(s) + s.protectTipSets(apply) + return nil + } + + // check if we are actually closing first + if atomic.LoadInt32(&s.closing) == 1 { + atomic.StoreInt32(&s.compacting, 0) + return nil + } + + timestamp := time.Unix(int64(curTs.MinTimestamp()), 0) + if time.Since(timestamp) > SyncGapTime { + // don't attempt compaction before we have caught up syncing + atomic.StoreInt32(&s.compacting, 0) + return nil + } + + if epoch-s.baseEpoch > CompactionThreshold { + // it's time to compact -- prepare the transaction and go! + s.beginTxnProtect() + go func() { + defer atomic.StoreInt32(&s.compacting, 0) + defer s.endTxnProtect() + + log.Info("compacting splitstore") + start := time.Now() + + s.compact(curTs) + + log.Infow("compaction done", "took", time.Since(start)) + }() + } else { + // no compaction necessary + atomic.StoreInt32(&s.compacting, 0) + } + + return nil +} + +// transactionally protect incoming tipsets +func (s *SplitStore) protectTipSets(apply []*types.TipSet) { + s.txnLk.RLock() + defer s.txnLk.RUnlock() + + if !s.txnActive { + return + } + + var cids []cid.Cid + for _, ts := range apply { + cids = append(cids, ts.Cids()...) + } + + s.trackTxnRefMany(cids) +} + +// transactionally protect a view +func (s *SplitStore) protectView(c cid.Cid) { + s.txnLk.RLock() + defer s.txnLk.RUnlock() + + if s.txnActive { + s.trackTxnRef(c) + } + + s.txnViewsMx.Lock() + s.txnViews++ + s.txnViewsMx.Unlock() +} + +func (s *SplitStore) viewDone() { + s.txnViewsMx.Lock() + defer s.txnViewsMx.Unlock() + + s.txnViews-- + if s.txnViews == 0 && s.txnViewsWaiting { + s.txnViewsCond.Broadcast() + } +} + +func (s *SplitStore) viewWait() { + s.txnViewsMx.Lock() + defer s.txnViewsMx.Unlock() + + s.txnViewsWaiting = true + for s.txnViews > 0 { + s.txnViewsCond.Wait() + } + s.txnViewsWaiting = false +} + +// transactionally protect a reference to an object +func (s *SplitStore) trackTxnRef(c cid.Cid) { + if !s.txnActive { + // not compacting + return + } + + if isUnitaryObject(c) { + return + } + + if s.txnProtect != nil { + mark, err := s.txnProtect.Has(c) + if err != nil { + log.Warnf("error checking markset: %s", err) + // track it anyways + } else if mark { + return + } + } + + s.txnRefsMx.Lock() + s.txnRefs[c] = struct{}{} + s.txnRefsMx.Unlock() +} + +// transactionally protect a batch of references +func (s *SplitStore) trackTxnRefMany(cids []cid.Cid) { + if !s.txnActive { + // not compacting + return + } + + s.txnRefsMx.Lock() + defer s.txnRefsMx.Unlock() + + quiet := false + for _, c := range cids { + if isUnitaryObject(c) { + continue + } + + if s.txnProtect != nil { + mark, err := s.txnProtect.Has(c) + if err != nil { + if !quiet { + quiet = true + log.Warnf("error checking markset: %s", err) + } + // track it anyways + } + + if mark { + continue + } + } + + s.txnRefs[c] = struct{}{} + } + + return +} + +// protect all pending transactional references +func (s *SplitStore) protectTxnRefs(markSet MarkSet) error { + for { + var txnRefs map[cid.Cid]struct{} + + s.txnRefsMx.Lock() + if len(s.txnRefs) > 0 { + txnRefs = s.txnRefs + s.txnRefs = make(map[cid.Cid]struct{}) + } + s.txnRefsMx.Unlock() + + if len(txnRefs) == 0 { + return nil + } + + log.Infow("protecting transactional references", "refs", len(txnRefs)) + count := 0 + workch := make(chan cid.Cid, len(txnRefs)) + startProtect := time.Now() + + for c := range txnRefs { + mark, err := markSet.Has(c) + if err != nil { + return xerrors.Errorf("error checking markset: %w", err) + } + + if mark { + continue + } + + workch <- c + count++ + } + close(workch) + + if count == 0 { + return nil + } + + workers := runtime.NumCPU() / 2 + if workers < 2 { + workers = 2 + } + if workers > count { + workers = count + } + + worker := func() error { + for c := range workch { + err := s.doTxnProtect(c, markSet) + if err != nil { + return xerrors.Errorf("error protecting transactional references to %s: %w", c, err) + } + } + return nil + } + + g := new(errgroup.Group) + for i := 0; i < workers; i++ { + g.Go(worker) + } + + if err := g.Wait(); err != nil { + return err + } + + log.Infow("protecting transactional refs done", "took", time.Since(startProtect), "protected", count) + } +} + +// transactionally protect a reference by walking the object and marking. +// concurrent markings are short circuited by checking the markset. +func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSet) error { + if err := s.checkClosing(); err != nil { + return err + } + + // Note: cold objects are deleted heaviest first, so the consituents of an object + // cannot be deleted before the object itself. + return s.walkObjectIncomplete(root, cid.NewSet(), + func(c cid.Cid) error { + if isUnitaryObject(c) { + return errStopWalk + } + + mark, err := markSet.Has(c) + if err != nil { + return xerrors.Errorf("error checking markset: %w", err) + } + + // it's marked, nothing to do + if mark { + return errStopWalk + } + + return markSet.Mark(c) + }, + func(c cid.Cid) error { + if s.txnMissing != nil { + log.Warnf("missing object reference %s in %s", c, root) + s.txnRefsMx.Lock() + s.txnMissing[c] = struct{}{} + s.txnRefsMx.Unlock() + } + return errStopWalk + }) +} + +func (s *SplitStore) applyProtectors() error { + s.mx.Lock() + defer s.mx.Unlock() + + count := 0 + for _, protect := range s.protectors { + err := protect(func(c cid.Cid) error { + s.trackTxnRef(c) + count++ + return nil + }) + + if err != nil { + return xerrors.Errorf("error applynig protector: %w", err) + } + } + + if count > 0 { + log.Infof("protected %d references through %d protectors", count, len(s.protectors)) + } + + return nil +} + +// --- Compaction --- +// Compaction works transactionally with the following algorithm: +// - We prepare a transaction, whereby all i/o referenced objects through the API are tracked. +// - We walk the chain and mark reachable objects, keeping 4 finalities of state roots and messages and all headers all the way to genesis. +// - Once the chain walk is complete, we begin full transaction protection with concurrent marking; we walk and mark all references created during the chain walk. On the same time, all I/O through the API concurrently marks objects as live references. +// - We collect cold objects by iterating through the hotstore and checking the mark set; if an object is not marked, then it is candidate for purge. +// - When running with a coldstore, we next copy all cold objects to the coldstore. +// - At this point we are ready to begin purging: +// - We sort cold objects heaviest first, so as to never delete the consituents of a DAG before the DAG itself (which would leave dangling references) +// - We delete in small batches taking a lock; each batch is checked again for marks, from the concurrent transactional mark, so as to never delete anything live +// - We then end the transaction and compact/gc the hotstore. +func (s *SplitStore) compact(curTs *types.TipSet) { + log.Info("waiting for active views to complete") + start := time.Now() + s.viewWait() + log.Infow("waiting for active views done", "took", time.Since(start)) + + start = time.Now() + err := s.doCompact(curTs) + took := time.Since(start).Milliseconds() + stats.Record(s.ctx, metrics.SplitstoreCompactionTimeSeconds.M(float64(took)/1e3)) + + if err != nil { + log.Errorf("COMPACTION ERROR: %s", err) + } +} + +func (s *SplitStore) doCompact(curTs *types.TipSet) error { + currentEpoch := curTs.Height() + boundaryEpoch := currentEpoch - CompactionBoundary + + var inclMsgsEpoch abi.ChainEpoch + inclMsgsRange := abi.ChainEpoch(s.cfg.HotStoreMessageRetention) * build.Finality + if inclMsgsRange < boundaryEpoch { + inclMsgsEpoch = boundaryEpoch - inclMsgsRange + } + + log.Infow("running compaction", "currentEpoch", currentEpoch, "baseEpoch", s.baseEpoch, "boundaryEpoch", boundaryEpoch, "inclMsgsEpoch", inclMsgsEpoch, "compactionIndex", s.compactionIndex) + + markSet, err := s.markSetEnv.Create("live", s.markSetSize) + if err != nil { + return xerrors.Errorf("error creating mark set: %w", err) + } + defer markSet.Close() //nolint:errcheck + defer s.debug.Flush() + + if err := s.checkClosing(); err != nil { + return err + } + + // we are ready for concurrent marking + s.beginTxnMarking(markSet) + + // 0. track all protected references at beginning of compaction; anything added later should + // be transactionally protected by the write + log.Info("protecting references with registered protectors") + err = s.applyProtectors() + if err != nil { + return err + } + + // 1. mark reachable objects by walking the chain from the current epoch; we keep state roots + // and messages until the boundary epoch. + log.Info("marking reachable objects") + startMark := time.Now() + + var count int64 + err = s.walkChain(curTs, boundaryEpoch, inclMsgsEpoch, + func(c cid.Cid) error { + if isUnitaryObject(c) { + return errStopWalk + } + + count++ + return markSet.Mark(c) + }) + + if err != nil { + return xerrors.Errorf("error marking: %w", err) + } + + s.markSetSize = count + count>>2 // overestimate a bit + + log.Infow("marking done", "took", time.Since(startMark), "marked", count) + + if err := s.checkClosing(); err != nil { + return err + } + + // 1.1 protect transactional refs + err = s.protectTxnRefs(markSet) + if err != nil { + return xerrors.Errorf("error protecting transactional refs: %w", err) + } + + if err := s.checkClosing(); err != nil { + return err + } + + // 2. iterate through the hotstore to collect cold objects + log.Info("collecting cold objects") + startCollect := time.Now() + + // some stats for logging + var hotCnt, coldCnt int + + cold := make([]cid.Cid, 0, s.coldPurgeSize) + err = s.hot.ForEachKey(func(c cid.Cid) error { + // was it marked? + mark, err := markSet.Has(c) + if err != nil { + return xerrors.Errorf("error checking mark set for %s: %w", c, err) + } + + if mark { + hotCnt++ + return nil + } + + // it's cold, mark it as candidate for move + cold = append(cold, c) + coldCnt++ + + return nil + }) + + if err != nil { + return xerrors.Errorf("error collecting cold objects: %w", err) + } + + log.Infow("cold collection done", "took", time.Since(startCollect)) + + if coldCnt > 0 { + s.coldPurgeSize = coldCnt + coldCnt>>2 // overestimate a bit + } + + log.Infow("compaction stats", "hot", hotCnt, "cold", coldCnt) + stats.Record(s.ctx, metrics.SplitstoreCompactionHot.M(int64(hotCnt))) + stats.Record(s.ctx, metrics.SplitstoreCompactionCold.M(int64(coldCnt))) + + if err := s.checkClosing(); err != nil { + return err + } + + // now that we have collected cold objects, check for missing references from transactional i/o + // and disable further collection of such references (they will not be acted upon as we can't + // possibly delete objects we didn't have when we were collecting cold objects) + s.waitForMissingRefs(markSet) + + if err := s.checkClosing(); err != nil { + return err + } + + // 3. copy the cold objects to the coldstore -- if we have one + if !s.cfg.DiscardColdBlocks { + log.Info("moving cold objects to the coldstore") + startMove := time.Now() + err = s.moveColdBlocks(cold) + if err != nil { + return xerrors.Errorf("error moving cold objects: %w", err) + } + log.Infow("moving done", "took", time.Since(startMove)) + + if err := s.checkClosing(); err != nil { + return err + } + } + + // 4. sort cold objects so that the dags with most references are deleted first + // this ensures that we can't refer to a dag with its consituents already deleted, ie + // we lave no dangling references. + log.Info("sorting cold objects") + startSort := time.Now() + err = s.sortObjects(cold) + if err != nil { + return xerrors.Errorf("error sorting objects: %w", err) + } + log.Infow("sorting done", "took", time.Since(startSort)) + + // 4.1 protect transactional refs once more + // strictly speaking, this is not necessary as purge will do it before deleting each + // batch. however, there is likely a largish number of references accumulated during + // ths sort and this protects before entering pruge context. + err = s.protectTxnRefs(markSet) + if err != nil { + return xerrors.Errorf("error protecting transactional refs: %w", err) + } + + if err := s.checkClosing(); err != nil { + return err + } + + // 5. purge cold objects from the hotstore, taking protected references into account + log.Info("purging cold objects from the hotstore") + startPurge := time.Now() + err = s.purge(cold, markSet) + if err != nil { + return xerrors.Errorf("error purging cold blocks: %w", err) + } + log.Infow("purging cold objects from hotstore done", "took", time.Since(startPurge)) + + // we are done; do some housekeeping + s.endTxnProtect() + s.gcHotstore() + + err = s.setBaseEpoch(boundaryEpoch) + if err != nil { + return xerrors.Errorf("error saving base epoch: %w", err) + } + + err = s.ds.Put(markSetSizeKey, int64ToBytes(s.markSetSize)) + if err != nil { + return xerrors.Errorf("error saving mark set size: %w", err) + } + + s.compactionIndex++ + err = s.ds.Put(compactionIndexKey, int64ToBytes(s.compactionIndex)) + if err != nil { + return xerrors.Errorf("error saving compaction index: %w", err) + } + + return nil +} + +func (s *SplitStore) beginTxnProtect() { + log.Info("preparing compaction transaction") + + s.txnLk.Lock() + defer s.txnLk.Unlock() + + s.txnActive = true + s.txnRefs = make(map[cid.Cid]struct{}) + s.txnMissing = make(map[cid.Cid]struct{}) +} + +func (s *SplitStore) beginTxnMarking(markSet MarkSet) { + markSet.SetConcurrent() + + s.txnLk.Lock() + s.txnProtect = markSet + s.txnLk.Unlock() +} + +func (s *SplitStore) endTxnProtect() { + s.txnLk.Lock() + defer s.txnLk.Unlock() + + if !s.txnActive { + return + } + + // release markset memory + if s.txnProtect != nil { + _ = s.txnProtect.Close() + } + + s.txnActive = false + s.txnProtect = nil + s.txnRefs = nil + s.txnMissing = nil +} + +func (s *SplitStore) walkChain(ts *types.TipSet, inclState abi.ChainEpoch, inclMsgs abi.ChainEpoch, + f func(cid.Cid) error) error { + visited := cid.NewSet() + walked := cid.NewSet() + toWalk := ts.Cids() + walkCnt := 0 + scanCnt := 0 + + walkBlock := func(c cid.Cid) error { + if !visited.Visit(c) { + return nil + } + + walkCnt++ + + if err := f(c); err != nil { + return err + } + + var hdr types.BlockHeader + err := s.view(c, func(data []byte) error { + return hdr.UnmarshalCBOR(bytes.NewBuffer(data)) + }) + + if err != nil { + return xerrors.Errorf("error unmarshaling block header (cid: %s): %w", c, err) + } + + // message are retained if within the inclMsgs boundary + if hdr.Height >= inclMsgs && hdr.Height > 0 { + if inclMsgs < inclState { + // we need to use walkObjectIncomplete here, as messages may be missing early on if we + // synced from snapshot and have a long HotStoreMessageRetentionPolicy. + stopWalk := func(_ cid.Cid) error { return errStopWalk } + if err := s.walkObjectIncomplete(hdr.Messages, walked, f, stopWalk); err != nil { + return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err) + } + } else { + if err := s.walkObject(hdr.Messages, walked, f); err != nil { + return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err) + } + } + } + + // state and message receipts is only retained if within the inclState boundary + if hdr.Height >= inclState || hdr.Height == 0 { + if hdr.Height > 0 { + if err := s.walkObject(hdr.ParentMessageReceipts, walked, f); err != nil { + return xerrors.Errorf("error walking message receipts (cid: %s): %w", hdr.ParentMessageReceipts, err) + } + } + + if err := s.walkObject(hdr.ParentStateRoot, walked, f); err != nil { + return xerrors.Errorf("error walking state root (cid: %s): %w", hdr.ParentStateRoot, err) + } + scanCnt++ + } + + if hdr.Height > 0 { + toWalk = append(toWalk, hdr.Parents...) + } + + return nil + } + + for len(toWalk) > 0 { + // walking can take a while, so check this with every opportunity + if err := s.checkClosing(); err != nil { + return err + } + + walking := toWalk + toWalk = nil + for _, c := range walking { + if err := walkBlock(c); err != nil { + return xerrors.Errorf("error walking block (cid: %s): %w", c, err) + } + } + } + + log.Infow("chain walk done", "walked", walkCnt, "scanned", scanCnt) + + return nil +} + +func (s *SplitStore) walkObject(c cid.Cid, walked *cid.Set, f func(cid.Cid) error) error { + if !walked.Visit(c) { + return nil + } + + if err := f(c); err != nil { + if err == errStopWalk { + return nil + } + + return err + } + + if c.Prefix().Codec != cid.DagCBOR { + return nil + } + + // check this before recursing + if err := s.checkClosing(); err != nil { + return err + } + + var links []cid.Cid + err := s.view(c, func(data []byte) error { + return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) { + links = append(links, c) + }) + }) + + if err != nil { + return xerrors.Errorf("error scanning linked block (cid: %s): %w", c, err) + } + + for _, c := range links { + err := s.walkObject(c, walked, f) + if err != nil { + return xerrors.Errorf("error walking link (cid: %s): %w", c, err) + } + } + + return nil +} + +// like walkObject, but the object may be potentially incomplete (references missing) +func (s *SplitStore) walkObjectIncomplete(c cid.Cid, walked *cid.Set, f, missing func(cid.Cid) error) error { + if !walked.Visit(c) { + return nil + } + + // occurs check -- only for DAGs + if c.Prefix().Codec == cid.DagCBOR { + has, err := s.has(c) + if err != nil { + return xerrors.Errorf("error occur checking %s: %w", c, err) + } + + if !has { + err = missing(c) + if err == errStopWalk { + return nil + } + + return err + } + } + + if err := f(c); err != nil { + if err == errStopWalk { + return nil + } + + return err + } + + if c.Prefix().Codec != cid.DagCBOR { + return nil + } + + // check this before recursing + if err := s.checkClosing(); err != nil { + return err + } + + var links []cid.Cid + err := s.view(c, func(data []byte) error { + return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) { + links = append(links, c) + }) + }) + + if err != nil { + return xerrors.Errorf("error scanning linked block (cid: %s): %w", c, err) + } + + for _, c := range links { + err := s.walkObjectIncomplete(c, walked, f, missing) + if err != nil { + return xerrors.Errorf("error walking link (cid: %s): %w", c, err) + } + } + + return nil +} + +// internal version used by walk +func (s *SplitStore) view(c cid.Cid, cb func([]byte) error) error { + if isIdentiyCid(c) { + data, err := decodeIdentityCid(c) + if err != nil { + return err + } + + return cb(data) + } + + err := s.hot.View(c, cb) + switch err { + case bstore.ErrNotFound: + return s.cold.View(c, cb) + + default: + return err + } +} + +func (s *SplitStore) has(c cid.Cid) (bool, error) { + if isIdentiyCid(c) { + return true, nil + } + + has, err := s.hot.Has(c) + + if has || err != nil { + return has, err + } + + return s.cold.Has(c) +} + +func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error { + batch := make([]blocks.Block, 0, batchSize) + + for _, c := range cold { + if err := s.checkClosing(); err != nil { + return err + } + + blk, err := s.hot.Get(c) + if err != nil { + if err == bstore.ErrNotFound { + log.Warnf("hotstore missing block %s", c) + continue + } + + return xerrors.Errorf("error retrieving block %s from hotstore: %w", c, err) + } + + batch = append(batch, blk) + if len(batch) == batchSize { + err = s.cold.PutMany(batch) + if err != nil { + return xerrors.Errorf("error putting batch to coldstore: %w", err) + } + batch = batch[:0] + } + } + + if len(batch) > 0 { + err := s.cold.PutMany(batch) + if err != nil { + return xerrors.Errorf("error putting batch to coldstore: %w", err) + } + } + + return nil +} + +// sorts a slice of objects heaviest first -- it's a little expensive but worth the +// guarantee that we don't leave dangling references behind, e.g. if we die in the middle +// of a purge. +func (s *SplitStore) sortObjects(cids []cid.Cid) error { + // we cache the keys to avoid making a gazillion of strings + keys := make(map[cid.Cid]string) + key := func(c cid.Cid) string { + s, ok := keys[c] + if !ok { + s = string(c.Hash()) + keys[c] = s + } + return s + } + + // compute sorting weights as the cumulative number of DAG links + weights := make(map[string]int) + for _, c := range cids { + // this can take quite a while, so check for shutdown with every opportunity + if err := s.checkClosing(); err != nil { + return err + } + + w := s.getObjectWeight(c, weights, key) + weights[key(c)] = w + } + + // sort! + sort.Slice(cids, func(i, j int) bool { + wi := weights[key(cids[i])] + wj := weights[key(cids[j])] + if wi == wj { + return bytes.Compare(cids[i].Hash(), cids[j].Hash()) > 0 + } + + return wi > wj + }) + + return nil +} + +func (s *SplitStore) getObjectWeight(c cid.Cid, weights map[string]int, key func(cid.Cid) string) int { + w, ok := weights[key(c)] + if ok { + return w + } + + // we treat block headers specially to avoid walking the entire chain + var hdr types.BlockHeader + err := s.view(c, func(data []byte) error { + return hdr.UnmarshalCBOR(bytes.NewBuffer(data)) + }) + if err == nil { + w1 := s.getObjectWeight(hdr.ParentStateRoot, weights, key) + weights[key(hdr.ParentStateRoot)] = w1 + + w2 := s.getObjectWeight(hdr.Messages, weights, key) + weights[key(hdr.Messages)] = w2 + + return 1 + w1 + w2 + } + + var links []cid.Cid + err = s.view(c, func(data []byte) error { + return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) { + links = append(links, c) + }) + }) + if err != nil { + return 1 + } + + w = 1 + for _, c := range links { + // these are internal refs, so dags will be dags + if c.Prefix().Codec != cid.DagCBOR { + w++ + continue + } + + wc := s.getObjectWeight(c, weights, key) + weights[key(c)] = wc + + w += wc + } + + return w +} + +func (s *SplitStore) purgeBatch(cids []cid.Cid, deleteBatch func([]cid.Cid) error) error { + if len(cids) == 0 { + return nil + } + + // we don't delete one giant batch of millions of objects, but rather do smaller batches + // so that we don't stop the world for an extended period of time + done := false + for i := 0; !done; i++ { + start := i * batchSize + end := start + batchSize + if end >= len(cids) { + end = len(cids) + done = true + } + + err := deleteBatch(cids[start:end]) + if err != nil { + return xerrors.Errorf("error deleting batch: %w", err) + } + } + + return nil +} + +func (s *SplitStore) purge(cids []cid.Cid, markSet MarkSet) error { + deadCids := make([]cid.Cid, 0, batchSize) + var purgeCnt, liveCnt int + defer func() { + log.Infow("purged cold objects", "purged", purgeCnt, "live", liveCnt) + }() + + return s.purgeBatch(cids, + func(cids []cid.Cid) error { + deadCids := deadCids[:0] + + for { + if err := s.checkClosing(); err != nil { + return err + } + + s.txnLk.Lock() + if len(s.txnRefs) == 0 { + // keep the lock! + break + } + + // unlock and protect + s.txnLk.Unlock() + + err := s.protectTxnRefs(markSet) + if err != nil { + return xerrors.Errorf("error protecting transactional refs: %w", err) + } + } + + defer s.txnLk.Unlock() + + for _, c := range cids { + live, err := markSet.Has(c) + if err != nil { + return xerrors.Errorf("error checking for liveness: %w", err) + } + + if live { + liveCnt++ + continue + } + + deadCids = append(deadCids, c) + } + + err := s.hot.DeleteMany(deadCids) + if err != nil { + return xerrors.Errorf("error purging cold objects: %w", err) + } + + s.debug.LogDelete(deadCids) + + purgeCnt += len(deadCids) + return nil + }) +} + +// I really don't like having this code, but we seem to have some occasional DAG references with +// missing constituents. During testing in mainnet *some* of these references *sometimes* appeared +// after a little bit. +// We need to figure out where they are coming from and eliminate that vector, but until then we +// have this gem[TM]. +// My best guess is that they are parent message receipts or yet to be computed state roots; magik +// thinks the cause may be block validation. +func (s *SplitStore) waitForMissingRefs(markSet MarkSet) { + s.txnLk.Lock() + missing := s.txnMissing + s.txnMissing = nil + s.txnLk.Unlock() + + if len(missing) == 0 { + return + } + + log.Info("waiting for missing references") + start := time.Now() + count := 0 + defer func() { + log.Infow("waiting for missing references done", "took", time.Since(start), "marked", count) + }() + + for i := 0; i < 3 && len(missing) > 0; i++ { + if err := s.checkClosing(); err != nil { + return + } + + wait := time.Duration(i) * time.Minute + log.Infof("retrying for %d missing references in %s (attempt: %d)", len(missing), wait, i+1) + if wait > 0 { + time.Sleep(wait) + } + + towalk := missing + walked := cid.NewSet() + missing = make(map[cid.Cid]struct{}) + + for c := range towalk { + err := s.walkObjectIncomplete(c, walked, + func(c cid.Cid) error { + if isUnitaryObject(c) { + return errStopWalk + } + + mark, err := markSet.Has(c) + if err != nil { + return xerrors.Errorf("error checking markset for %s: %w", c, err) + } + + if mark { + return errStopWalk + } + + count++ + return markSet.Mark(c) + }, + func(c cid.Cid) error { + missing[c] = struct{}{} + return errStopWalk + }) + + if err != nil { + log.Warnf("error marking: %s", err) + } + } + } + + if len(missing) > 0 { + log.Warnf("still missing %d references", len(missing)) + for c := range missing { + log.Warnf("unresolved missing reference: %s", c) + } + } +} diff --git a/blockstore/splitstore/splitstore_expose.go b/blockstore/splitstore/splitstore_expose.go new file mode 100644 index 00000000000..1065e460c2d --- /dev/null +++ b/blockstore/splitstore/splitstore_expose.go @@ -0,0 +1,114 @@ +package splitstore + +import ( + "context" + "errors" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + + bstore "github.com/filecoin-project/lotus/blockstore" +) + +type exposedSplitStore struct { + s *SplitStore +} + +var _ bstore.Blockstore = (*exposedSplitStore)(nil) + +func (s *SplitStore) Expose() bstore.Blockstore { + return &exposedSplitStore{s: s} +} + +func (es *exposedSplitStore) DeleteBlock(_ cid.Cid) error { + return errors.New("DeleteBlock: operation not supported") +} + +func (es *exposedSplitStore) DeleteMany(_ []cid.Cid) error { + return errors.New("DeleteMany: operation not supported") +} + +func (es *exposedSplitStore) Has(c cid.Cid) (bool, error) { + if isIdentiyCid(c) { + return true, nil + } + + has, err := es.s.hot.Has(c) + if has || err != nil { + return has, err + } + + return es.s.cold.Has(c) +} + +func (es *exposedSplitStore) Get(c cid.Cid) (blocks.Block, error) { + if isIdentiyCid(c) { + data, err := decodeIdentityCid(c) + if err != nil { + return nil, err + } + + return blocks.NewBlockWithCid(data, c) + } + + blk, err := es.s.hot.Get(c) + switch err { + case bstore.ErrNotFound: + return es.s.cold.Get(c) + default: + return blk, err + } +} + +func (es *exposedSplitStore) GetSize(c cid.Cid) (int, error) { + if isIdentiyCid(c) { + data, err := decodeIdentityCid(c) + if err != nil { + return 0, err + } + + return len(data), nil + } + + size, err := es.s.hot.GetSize(c) + switch err { + case bstore.ErrNotFound: + return es.s.cold.GetSize(c) + default: + return size, err + } +} + +func (es *exposedSplitStore) Put(blk blocks.Block) error { + return es.s.Put(blk) +} + +func (es *exposedSplitStore) PutMany(blks []blocks.Block) error { + return es.s.PutMany(blks) +} + +func (es *exposedSplitStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + return es.s.AllKeysChan(ctx) +} + +func (es *exposedSplitStore) HashOnRead(enabled bool) {} + +func (es *exposedSplitStore) View(c cid.Cid, f func([]byte) error) error { + if isIdentiyCid(c) { + data, err := decodeIdentityCid(c) + if err != nil { + return err + } + + return f(data) + } + + err := es.s.hot.View(c, f) + switch err { + case bstore.ErrNotFound: + return es.s.cold.View(c, f) + + default: + return err + } +} diff --git a/blockstore/splitstore/splitstore_gc.go b/blockstore/splitstore/splitstore_gc.go new file mode 100644 index 00000000000..46668167ccc --- /dev/null +++ b/blockstore/splitstore/splitstore_gc.go @@ -0,0 +1,30 @@ +package splitstore + +import ( + "fmt" + "time" + + bstore "github.com/filecoin-project/lotus/blockstore" +) + +func (s *SplitStore) gcHotstore() { + if err := s.gcBlockstoreOnline(s.hot); err != nil { + log.Warnf("error garbage collecting hostore: %s", err) + } +} + +func (s *SplitStore) gcBlockstoreOnline(b bstore.Blockstore) error { + if gc, ok := b.(bstore.BlockstoreGC); ok { + log.Info("garbage collecting blockstore") + startGC := time.Now() + + if err := gc.CollectGarbage(); err != nil { + return err + } + + log.Infow("garbage collecting hotstore done", "took", time.Since(startGC)) + return nil + } + + return fmt.Errorf("blockstore doesn't support online gc: %T", b) +} diff --git a/blockstore/splitstore/splitstore_test.go b/blockstore/splitstore/splitstore_test.go new file mode 100644 index 00000000000..26e5c3cc0b6 --- /dev/null +++ b/blockstore/splitstore/splitstore_test.go @@ -0,0 +1,381 @@ +package splitstore + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/types/mock" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + datastore "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + logging "github.com/ipfs/go-log/v2" +) + +func init() { + CompactionThreshold = 5 + CompactionBoundary = 2 + logging.SetLogLevel("splitstore", "DEBUG") +} + +func testSplitStore(t *testing.T, cfg *Config) { + chain := &mockChain{t: t} + + // the myriads of stores + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + hot := newMockStore() + cold := newMockStore() + + // this is necessary to avoid the garbage mock puts in the blocks + garbage := blocks.NewBlock([]byte{1, 2, 3}) + err := cold.Put(garbage) + if err != nil { + t.Fatal(err) + } + + // genesis + genBlock := mock.MkBlock(nil, 0, 0) + genBlock.Messages = garbage.Cid() + genBlock.ParentMessageReceipts = garbage.Cid() + genBlock.ParentStateRoot = garbage.Cid() + genBlock.Timestamp = uint64(time.Now().Unix()) + + genTs := mock.TipSet(genBlock) + chain.push(genTs) + + // put the genesis block to cold store + blk, err := genBlock.ToStorageBlock() + if err != nil { + t.Fatal(err) + } + + err = cold.Put(blk) + if err != nil { + t.Fatal(err) + } + + // create a garbage block that is protected with a rgistered protector + protected := blocks.NewBlock([]byte("protected!")) + err = hot.Put(protected) + if err != nil { + t.Fatal(err) + } + + // and another one that is not protected + unprotected := blocks.NewBlock([]byte("unprotected!")) + err = hot.Put(unprotected) + if err != nil { + t.Fatal(err) + } + + // open the splitstore + ss, err := Open("", ds, hot, cold, cfg) + if err != nil { + t.Fatal(err) + } + defer ss.Close() //nolint + + // register our protector + ss.AddProtector(func(protect func(cid.Cid) error) error { + return protect(protected.Cid()) + }) + + err = ss.Start(chain) + if err != nil { + t.Fatal(err) + } + + // make some tipsets, but not enough to cause compaction + mkBlock := func(curTs *types.TipSet, i int, stateRoot blocks.Block) *types.TipSet { + blk := mock.MkBlock(curTs, uint64(i), uint64(i)) + + blk.Messages = garbage.Cid() + blk.ParentMessageReceipts = garbage.Cid() + blk.ParentStateRoot = stateRoot.Cid() + blk.Timestamp = uint64(time.Now().Unix()) + + sblk, err := blk.ToStorageBlock() + if err != nil { + t.Fatal(err) + } + err = ss.Put(stateRoot) + if err != nil { + t.Fatal(err) + } + err = ss.Put(sblk) + if err != nil { + t.Fatal(err) + } + ts := mock.TipSet(blk) + chain.push(ts) + + return ts + } + + waitForCompaction := func() { + for atomic.LoadInt32(&ss.compacting) == 1 { + time.Sleep(100 * time.Millisecond) + } + } + + curTs := genTs + for i := 1; i < 5; i++ { + stateRoot := blocks.NewBlock([]byte{byte(i), 3, 3, 7}) + curTs = mkBlock(curTs, i, stateRoot) + waitForCompaction() + } + + // count objects in the cold and hot stores + countBlocks := func(bs blockstore.Blockstore) int { + count := 0 + _ = bs.(blockstore.BlockstoreIterator).ForEachKey(func(_ cid.Cid) error { + count++ + return nil + }) + return count + } + + coldCnt := countBlocks(cold) + hotCnt := countBlocks(hot) + + if coldCnt != 2 { + t.Errorf("expected %d blocks, but got %d", 2, coldCnt) + } + + if hotCnt != 12 { + t.Errorf("expected %d blocks, but got %d", 12, hotCnt) + } + + // trigger a compaction + for i := 5; i < 10; i++ { + stateRoot := blocks.NewBlock([]byte{byte(i), 3, 3, 7}) + curTs = mkBlock(curTs, i, stateRoot) + waitForCompaction() + } + + coldCnt = countBlocks(cold) + hotCnt = countBlocks(hot) + + if coldCnt != 6 { + t.Errorf("expected %d cold blocks, but got %d", 6, coldCnt) + } + + if hotCnt != 18 { + t.Errorf("expected %d hot blocks, but got %d", 18, hotCnt) + } + + // ensure our protected block is still there + has, err := hot.Has(protected.Cid()) + if err != nil { + t.Fatal(err) + } + + if !has { + t.Fatal("protected block is missing from hotstore") + } + + // ensure our unprotected block is in the coldstore now + has, err = hot.Has(unprotected.Cid()) + if err != nil { + t.Fatal(err) + } + + if has { + t.Fatal("unprotected block is still in hotstore") + } + + has, err = cold.Has(unprotected.Cid()) + if err != nil { + t.Fatal(err) + } + + if !has { + t.Fatal("unprotected block is missing from coldstore") + } + + // Make sure we can revert without panicking. + chain.revert(2) +} + +func TestSplitStoreCompaction(t *testing.T) { + testSplitStore(t, &Config{MarkSetType: "map"}) +} + +type mockChain struct { + t testing.TB + + sync.Mutex + genesis *types.BlockHeader + tipsets []*types.TipSet + listener func(revert []*types.TipSet, apply []*types.TipSet) error +} + +func (c *mockChain) push(ts *types.TipSet) { + c.Lock() + c.tipsets = append(c.tipsets, ts) + if c.genesis == nil { + c.genesis = ts.Blocks()[0] + } + c.Unlock() + + if c.listener != nil { + err := c.listener(nil, []*types.TipSet{ts}) + if err != nil { + c.t.Errorf("mockchain: error dispatching listener: %s", err) + } + } +} + +func (c *mockChain) revert(count int) { + c.Lock() + revert := make([]*types.TipSet, count) + if count > len(c.tipsets) { + c.Unlock() + c.t.Fatalf("not enough tipsets to revert") + } + copy(revert, c.tipsets[len(c.tipsets)-count:]) + c.tipsets = c.tipsets[:len(c.tipsets)-count] + c.Unlock() + + if c.listener != nil { + err := c.listener(revert, nil) + if err != nil { + c.t.Errorf("mockchain: error dispatching listener: %s", err) + } + } +} + +func (c *mockChain) GetTipsetByHeight(_ context.Context, epoch abi.ChainEpoch, _ *types.TipSet, _ bool) (*types.TipSet, error) { + c.Lock() + defer c.Unlock() + + iEpoch := int(epoch) + if iEpoch > len(c.tipsets) { + return nil, fmt.Errorf("bad epoch %d", epoch) + } + + return c.tipsets[iEpoch], nil +} + +func (c *mockChain) GetHeaviestTipSet() *types.TipSet { + c.Lock() + defer c.Unlock() + + return c.tipsets[len(c.tipsets)-1] +} + +func (c *mockChain) SubscribeHeadChanges(change func(revert []*types.TipSet, apply []*types.TipSet) error) { + c.listener = change +} + +type mockStore struct { + mx sync.Mutex + set map[cid.Cid]blocks.Block +} + +func newMockStore() *mockStore { + return &mockStore{set: make(map[cid.Cid]blocks.Block)} +} + +func (b *mockStore) Has(cid cid.Cid) (bool, error) { + b.mx.Lock() + defer b.mx.Unlock() + _, ok := b.set[cid] + return ok, nil +} + +func (b *mockStore) HashOnRead(hor bool) {} + +func (b *mockStore) Get(cid cid.Cid) (blocks.Block, error) { + b.mx.Lock() + defer b.mx.Unlock() + + blk, ok := b.set[cid] + if !ok { + return nil, blockstore.ErrNotFound + } + return blk, nil +} + +func (b *mockStore) GetSize(cid cid.Cid) (int, error) { + blk, err := b.Get(cid) + if err != nil { + return 0, err + } + + return len(blk.RawData()), nil +} + +func (b *mockStore) View(cid cid.Cid, f func([]byte) error) error { + blk, err := b.Get(cid) + if err != nil { + return err + } + return f(blk.RawData()) +} + +func (b *mockStore) Put(blk blocks.Block) error { + b.mx.Lock() + defer b.mx.Unlock() + + b.set[blk.Cid()] = blk + return nil +} + +func (b *mockStore) PutMany(blks []blocks.Block) error { + b.mx.Lock() + defer b.mx.Unlock() + + for _, blk := range blks { + b.set[blk.Cid()] = blk + } + return nil +} + +func (b *mockStore) DeleteBlock(cid cid.Cid) error { + b.mx.Lock() + defer b.mx.Unlock() + + delete(b.set, cid) + return nil +} + +func (b *mockStore) DeleteMany(cids []cid.Cid) error { + b.mx.Lock() + defer b.mx.Unlock() + + for _, c := range cids { + delete(b.set, c) + } + return nil +} + +func (b *mockStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + return nil, errors.New("not implemented") +} + +func (b *mockStore) ForEachKey(f func(cid.Cid) error) error { + b.mx.Lock() + defer b.mx.Unlock() + + for c := range b.set { + err := f(c) + if err != nil { + return err + } + } + return nil +} + +func (b *mockStore) Close() error { + return nil +} diff --git a/blockstore/splitstore/splitstore_util.go b/blockstore/splitstore/splitstore_util.go new file mode 100644 index 00000000000..aef845832c0 --- /dev/null +++ b/blockstore/splitstore/splitstore_util.go @@ -0,0 +1,67 @@ +package splitstore + +import ( + "encoding/binary" + + "golang.org/x/xerrors" + + cid "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" + + "github.com/filecoin-project/go-state-types/abi" +) + +func epochToBytes(epoch abi.ChainEpoch) []byte { + return uint64ToBytes(uint64(epoch)) +} + +func bytesToEpoch(buf []byte) abi.ChainEpoch { + return abi.ChainEpoch(bytesToUint64(buf)) +} + +func int64ToBytes(i int64) []byte { + return uint64ToBytes(uint64(i)) +} + +func bytesToInt64(buf []byte) int64 { + return int64(bytesToUint64(buf)) +} + +func uint64ToBytes(i uint64) []byte { + buf := make([]byte, 16) + n := binary.PutUvarint(buf, i) + return buf[:n] +} + +func bytesToUint64(buf []byte) uint64 { + i, _ := binary.Uvarint(buf) + return i +} + +func isUnitaryObject(c cid.Cid) bool { + pre := c.Prefix() + switch pre.Codec { + case cid.FilCommitmentSealed, cid.FilCommitmentUnsealed: + return true + default: + return pre.MhType == mh.IDENTITY + } +} + +func isIdentiyCid(c cid.Cid) bool { + return c.Prefix().MhType == mh.IDENTITY +} + +func decodeIdentityCid(c cid.Cid) ([]byte, error) { + dmh, err := mh.Decode(c.Hash()) + if err != nil { + return nil, xerrors.Errorf("error decoding identity cid %s: %w", c, err) + } + + // sanity check + if dmh.Code != mh.IDENTITY { + return nil, xerrors.Errorf("error decoding identity cid %s: hash type is not identity", c) + } + + return dmh.Digest, nil +} diff --git a/blockstore/splitstore/splitstore_warmup.go b/blockstore/splitstore/splitstore_warmup.go new file mode 100644 index 00000000000..55fa94c6ffb --- /dev/null +++ b/blockstore/splitstore/splitstore_warmup.go @@ -0,0 +1,126 @@ +package splitstore + +import ( + "sync/atomic" + "time" + + "golang.org/x/xerrors" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + + bstore "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/types" +) + +// warmup acuiqres the compaction lock and spawns a goroutine to warm up the hotstore; +// this is necessary when we sync from a snapshot or when we enable the splitstore +// on top of an existing blockstore (which becomes the coldstore). +func (s *SplitStore) warmup(curTs *types.TipSet) error { + if !atomic.CompareAndSwapInt32(&s.compacting, 0, 1) { + return xerrors.Errorf("error locking compaction") + } + + go func() { + defer atomic.StoreInt32(&s.compacting, 0) + + log.Info("warming up hotstore") + start := time.Now() + + err := s.doWarmup(curTs) + if err != nil { + log.Errorf("error warming up hotstore: %s", err) + return + } + + log.Infow("warm up done", "took", time.Since(start)) + }() + + return nil +} + +// the actual warmup procedure; it walks the chain loading all state roots at the boundary +// and headers all the way up to genesis. +// objects are written in batches so as to minimize overhead. +func (s *SplitStore) doWarmup(curTs *types.TipSet) error { + epoch := curTs.Height() + batchHot := make([]blocks.Block, 0, batchSize) + count := int64(0) + xcount := int64(0) + missing := int64(0) + err := s.walkChain(curTs, epoch, epoch+1, // we don't load messages in warmup + func(c cid.Cid) error { + if isUnitaryObject(c) { + return errStopWalk + } + + count++ + + has, err := s.hot.Has(c) + if err != nil { + return err + } + + if has { + return nil + } + + blk, err := s.cold.Get(c) + if err != nil { + if err == bstore.ErrNotFound { + missing++ + return nil + } + return err + } + + xcount++ + + batchHot = append(batchHot, blk) + if len(batchHot) == batchSize { + err = s.hot.PutMany(batchHot) + if err != nil { + return err + } + batchHot = batchHot[:0] + } + + return nil + }) + + if err != nil { + return err + } + + if len(batchHot) > 0 { + err = s.hot.PutMany(batchHot) + if err != nil { + return err + } + } + + log.Infow("warmup stats", "visited", count, "warm", xcount, "missing", missing) + + s.markSetSize = count + count>>2 // overestimate a bit + err = s.ds.Put(markSetSizeKey, int64ToBytes(s.markSetSize)) + if err != nil { + log.Warnf("error saving mark set size: %s", err) + } + + // save the warmup epoch + err = s.ds.Put(warmupEpochKey, epochToBytes(epoch)) + if err != nil { + return xerrors.Errorf("error saving warm up epoch: %w", err) + } + s.mx.Lock() + s.warmupEpoch = epoch + s.mx.Unlock() + + // also save the compactionIndex, as this is used as an indicator of warmup for upgraded nodes + err = s.ds.Put(compactionIndexKey, int64ToBytes(s.compactionIndex)) + if err != nil { + return xerrors.Errorf("error saving compaction index: %w", err) + } + + return nil +} diff --git a/blockstore/sync.go b/blockstore/sync.go new file mode 100644 index 00000000000..848ccd19d2b --- /dev/null +++ b/blockstore/sync.go @@ -0,0 +1,81 @@ +package blockstore + +import ( + "context" + "sync" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" +) + +// NewMemorySync returns a thread-safe in-memory blockstore. +func NewMemorySync() *SyncBlockstore { + return &SyncBlockstore{bs: make(MemBlockstore)} +} + +// SyncBlockstore is a terminal blockstore that is a synchronized version +// of MemBlockstore. +type SyncBlockstore struct { + mu sync.RWMutex + bs MemBlockstore // specifically use a memStore to save indirection overhead. +} + +func (m *SyncBlockstore) DeleteBlock(k cid.Cid) error { + m.mu.Lock() + defer m.mu.Unlock() + return m.bs.DeleteBlock(k) +} + +func (m *SyncBlockstore) DeleteMany(ks []cid.Cid) error { + m.mu.Lock() + defer m.mu.Unlock() + return m.bs.DeleteMany(ks) +} + +func (m *SyncBlockstore) Has(k cid.Cid) (bool, error) { + m.mu.RLock() + defer m.mu.RUnlock() + return m.bs.Has(k) +} + +func (m *SyncBlockstore) View(k cid.Cid, callback func([]byte) error) error { + m.mu.RLock() + defer m.mu.RUnlock() + + return m.bs.View(k, callback) +} + +func (m *SyncBlockstore) Get(k cid.Cid) (blocks.Block, error) { + m.mu.RLock() + defer m.mu.RUnlock() + return m.bs.Get(k) +} + +func (m *SyncBlockstore) GetSize(k cid.Cid) (int, error) { + m.mu.RLock() + defer m.mu.RUnlock() + return m.bs.GetSize(k) +} + +func (m *SyncBlockstore) Put(b blocks.Block) error { + m.mu.Lock() + defer m.mu.Unlock() + return m.bs.Put(b) +} + +func (m *SyncBlockstore) PutMany(bs []blocks.Block) error { + m.mu.Lock() + defer m.mu.Unlock() + return m.bs.PutMany(bs) +} + +func (m *SyncBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + m.mu.RLock() + defer m.mu.RUnlock() + // this blockstore implementation doesn't do any async work. + return m.bs.AllKeysChan(ctx) +} + +func (m *SyncBlockstore) HashOnRead(enabled bool) { + // noop +} diff --git a/lib/timedbs/timedbs.go b/blockstore/timed.go similarity index 51% rename from lib/timedbs/timedbs.go rename to blockstore/timed.go index c5c1a8fe003..80e6c8a080f 100644 --- a/lib/timedbs/timedbs.go +++ b/blockstore/timed.go @@ -1,4 +1,4 @@ -package timedbs +package blockstore import ( "context" @@ -10,37 +10,37 @@ import ( "github.com/ipfs/go-cid" "github.com/raulk/clock" "go.uber.org/multierr" - - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/lib/blockstore" ) -// TimedCacheBS is a blockstore that keeps blocks for at least the specified -// caching interval before discarding them. Garbage collection must be started -// and stopped by calling Start/Stop. +// TimedCacheBlockstore is a blockstore that keeps blocks for at least the +// specified caching interval before discarding them. Garbage collection must +// be started and stopped by calling Start/Stop. // // Under the covers, it's implemented with an active and an inactive blockstore // that are rotated every cache time interval. This means all blocks will be // stored at most 2x the cache interval. -type TimedCacheBS struct { +// +// Create a new instance by calling the NewTimedCacheBlockstore constructor. +type TimedCacheBlockstore struct { mu sync.RWMutex - active, inactive blockstore.MemStore + active, inactive MemBlockstore clock clock.Clock interval time.Duration closeCh chan struct{} doneRotatingCh chan struct{} } -func NewTimedCacheBS(cacheTime time.Duration) *TimedCacheBS { - return &TimedCacheBS{ - active: blockstore.NewTemporary(), - inactive: blockstore.NewTemporary(), - interval: cacheTime, - clock: build.Clock, +func NewTimedCacheBlockstore(interval time.Duration) *TimedCacheBlockstore { + b := &TimedCacheBlockstore{ + active: NewMemory(), + inactive: NewMemory(), + interval: interval, + clock: clock.New(), } + return b } -func (t *TimedCacheBS) Start(ctx context.Context) error { +func (t *TimedCacheBlockstore) Start(_ context.Context) error { t.mu.Lock() defer t.mu.Unlock() if t.closeCh != nil { @@ -65,11 +65,11 @@ func (t *TimedCacheBS) Start(ctx context.Context) error { return nil } -func (t *TimedCacheBS) Stop(ctx context.Context) error { +func (t *TimedCacheBlockstore) Stop(_ context.Context) error { t.mu.Lock() defer t.mu.Unlock() if t.closeCh == nil { - return fmt.Errorf("not started started") + return fmt.Errorf("not started") } select { case <-t.closeCh: @@ -80,15 +80,15 @@ func (t *TimedCacheBS) Stop(ctx context.Context) error { return nil } -func (t *TimedCacheBS) rotate() { - newBs := blockstore.NewTemporary() +func (t *TimedCacheBlockstore) rotate() { + newBs := NewMemory() t.mu.Lock() t.inactive, t.active = t.active, newBs t.mu.Unlock() } -func (t *TimedCacheBS) Put(b blocks.Block) error { +func (t *TimedCacheBlockstore) Put(b blocks.Block) error { // Don't check the inactive set here. We want to keep this block for at // least one interval. t.mu.Lock() @@ -96,33 +96,50 @@ func (t *TimedCacheBS) Put(b blocks.Block) error { return t.active.Put(b) } -func (t *TimedCacheBS) PutMany(bs []blocks.Block) error { +func (t *TimedCacheBlockstore) PutMany(bs []blocks.Block) error { t.mu.Lock() defer t.mu.Unlock() return t.active.PutMany(bs) } -func (t *TimedCacheBS) Get(k cid.Cid) (blocks.Block, error) { +func (t *TimedCacheBlockstore) View(k cid.Cid, callback func([]byte) error) error { + // The underlying blockstore is always a "mem" blockstore so there's no difference, + // from a performance perspective, between view & get. So we call Get to avoid + // calling an arbitrary callback while holding a lock. + t.mu.RLock() + block, err := t.active.Get(k) + if err == ErrNotFound { + block, err = t.inactive.Get(k) + } + t.mu.RUnlock() + + if err != nil { + return err + } + return callback(block.RawData()) +} + +func (t *TimedCacheBlockstore) Get(k cid.Cid) (blocks.Block, error) { t.mu.RLock() defer t.mu.RUnlock() b, err := t.active.Get(k) - if err == blockstore.ErrNotFound { + if err == ErrNotFound { b, err = t.inactive.Get(k) } return b, err } -func (t *TimedCacheBS) GetSize(k cid.Cid) (int, error) { +func (t *TimedCacheBlockstore) GetSize(k cid.Cid) (int, error) { t.mu.RLock() defer t.mu.RUnlock() size, err := t.active.GetSize(k) - if err == blockstore.ErrNotFound { + if err == ErrNotFound { size, err = t.inactive.GetSize(k) } return size, err } -func (t *TimedCacheBS) Has(k cid.Cid) (bool, error) { +func (t *TimedCacheBlockstore) Has(k cid.Cid) (bool, error) { t.mu.RLock() defer t.mu.RUnlock() if has, err := t.active.Has(k); err != nil { @@ -133,17 +150,23 @@ func (t *TimedCacheBS) Has(k cid.Cid) (bool, error) { return t.inactive.Has(k) } -func (t *TimedCacheBS) HashOnRead(_ bool) { +func (t *TimedCacheBlockstore) HashOnRead(_ bool) { // no-op } -func (t *TimedCacheBS) DeleteBlock(k cid.Cid) error { +func (t *TimedCacheBlockstore) DeleteBlock(k cid.Cid) error { t.mu.Lock() defer t.mu.Unlock() return multierr.Combine(t.active.DeleteBlock(k), t.inactive.DeleteBlock(k)) } -func (t *TimedCacheBS) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { +func (t *TimedCacheBlockstore) DeleteMany(ks []cid.Cid) error { + t.mu.Lock() + defer t.mu.Unlock() + return multierr.Combine(t.active.DeleteMany(ks), t.inactive.DeleteMany(ks)) +} + +func (t *TimedCacheBlockstore) AllKeysChan(_ context.Context) (<-chan cid.Cid, error) { t.mu.RLock() defer t.mu.RUnlock() diff --git a/lib/timedbs/timedbs_test.go b/blockstore/timed_test.go similarity index 93% rename from lib/timedbs/timedbs_test.go rename to blockstore/timed_test.go index e01215bbdb0..d5fefff9461 100644 --- a/lib/timedbs/timedbs_test.go +++ b/blockstore/timed_test.go @@ -1,4 +1,4 @@ -package timedbs +package blockstore import ( "context" @@ -12,8 +12,8 @@ import ( "github.com/ipfs/go-cid" ) -func TestTimedBSSimple(t *testing.T) { - tc := NewTimedCacheBS(10 * time.Millisecond) +func TestTimedCacheBlockstoreSimple(t *testing.T) { + tc := NewTimedCacheBlockstore(10 * time.Millisecond) mClock := clock.NewMock() mClock.Set(time.Now()) tc.clock = mClock diff --git a/blockstore/union.go b/blockstore/union.go new file mode 100644 index 00000000000..a99ba259133 --- /dev/null +++ b/blockstore/union.go @@ -0,0 +1,119 @@ +package blockstore + +import ( + "context" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" +) + +type unionBlockstore []Blockstore + +// Union returns an unioned blockstore. +// +// * Reads return from the first blockstore that has the value, querying in the +// supplied order. +// * Writes (puts and deltes) are broadcast to all stores. +// +func Union(stores ...Blockstore) Blockstore { + return unionBlockstore(stores) +} + +func (m unionBlockstore) Has(cid cid.Cid) (has bool, err error) { + for _, bs := range m { + if has, err = bs.Has(cid); has || err != nil { + break + } + } + return has, err +} + +func (m unionBlockstore) Get(cid cid.Cid) (blk blocks.Block, err error) { + for _, bs := range m { + if blk, err = bs.Get(cid); err == nil || err != ErrNotFound { + break + } + } + return blk, err +} + +func (m unionBlockstore) View(cid cid.Cid, callback func([]byte) error) (err error) { + for _, bs := range m { + if err = bs.View(cid, callback); err == nil || err != ErrNotFound { + break + } + } + return err +} + +func (m unionBlockstore) GetSize(cid cid.Cid) (size int, err error) { + for _, bs := range m { + if size, err = bs.GetSize(cid); err == nil || err != ErrNotFound { + break + } + } + return size, err +} + +func (m unionBlockstore) Put(block blocks.Block) (err error) { + for _, bs := range m { + if err = bs.Put(block); err != nil { + break + } + } + return err +} + +func (m unionBlockstore) PutMany(blks []blocks.Block) (err error) { + for _, bs := range m { + if err = bs.PutMany(blks); err != nil { + break + } + } + return err +} + +func (m unionBlockstore) DeleteBlock(cid cid.Cid) (err error) { + for _, bs := range m { + if err = bs.DeleteBlock(cid); err != nil { + break + } + } + return err +} + +func (m unionBlockstore) DeleteMany(cids []cid.Cid) (err error) { + for _, bs := range m { + if err = bs.DeleteMany(cids); err != nil { + break + } + } + return err +} + +func (m unionBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + // this does not deduplicate; this interface needs to be revisited. + outCh := make(chan cid.Cid) + + go func() { + defer close(outCh) + + for _, bs := range m { + ch, err := bs.AllKeysChan(ctx) + if err != nil { + return + } + for cid := range ch { + outCh <- cid + } + } + }() + + return outCh, nil +} + +func (m unionBlockstore) HashOnRead(enabled bool) { + for _, bs := range m { + bs.HashOnRead(enabled) + } +} diff --git a/blockstore/union_test.go b/blockstore/union_test.go new file mode 100644 index 00000000000..b6202689227 --- /dev/null +++ b/blockstore/union_test.go @@ -0,0 +1,102 @@ +package blockstore + +import ( + "context" + "testing" + + blocks "github.com/ipfs/go-block-format" + "github.com/stretchr/testify/require" +) + +var ( + b0 = blocks.NewBlock([]byte("abc")) + b1 = blocks.NewBlock([]byte("foo")) + b2 = blocks.NewBlock([]byte("bar")) +) + +func TestUnionBlockstore_Get(t *testing.T) { + m1 := NewMemory() + m2 := NewMemory() + + _ = m1.Put(b1) + _ = m2.Put(b2) + + u := Union(m1, m2) + + v1, err := u.Get(b1.Cid()) + require.NoError(t, err) + require.Equal(t, b1.RawData(), v1.RawData()) + + v2, err := u.Get(b2.Cid()) + require.NoError(t, err) + require.Equal(t, b2.RawData(), v2.RawData()) +} + +func TestUnionBlockstore_Put_PutMany_Delete_AllKeysChan(t *testing.T) { + m1 := NewMemory() + m2 := NewMemory() + + u := Union(m1, m2) + + err := u.Put(b0) + require.NoError(t, err) + + var has bool + + // write was broadcasted to all stores. + has, _ = m1.Has(b0.Cid()) + require.True(t, has) + + has, _ = m2.Has(b0.Cid()) + require.True(t, has) + + has, _ = u.Has(b0.Cid()) + require.True(t, has) + + // put many. + err = u.PutMany([]blocks.Block{b1, b2}) + require.NoError(t, err) + + // write was broadcasted to all stores. + has, _ = m1.Has(b1.Cid()) + require.True(t, has) + + has, _ = m1.Has(b2.Cid()) + require.True(t, has) + + has, _ = m2.Has(b1.Cid()) + require.True(t, has) + + has, _ = m2.Has(b2.Cid()) + require.True(t, has) + + // also in the union store. + has, _ = u.Has(b1.Cid()) + require.True(t, has) + + has, _ = u.Has(b2.Cid()) + require.True(t, has) + + // deleted from all stores. + err = u.DeleteBlock(b1.Cid()) + require.NoError(t, err) + + has, _ = u.Has(b1.Cid()) + require.False(t, has) + + has, _ = m1.Has(b1.Cid()) + require.False(t, has) + + has, _ = m2.Has(b1.Cid()) + require.False(t, has) + + // check that AllKeysChan returns b0 and b2, twice (once per backing store) + ch, err := u.AllKeysChan(context.Background()) + require.NoError(t, err) + + var i int + for range ch { + i++ + } + require.Equal(t, 4, i) +} diff --git a/build/bootstrap.go b/build/bootstrap.go index 80c1529ff6c..98fa2e2f9cf 100644 --- a/build/bootstrap.go +++ b/build/bootstrap.go @@ -2,39 +2,33 @@ package build import ( "context" - "os" + "embed" + "path" "strings" "github.com/filecoin-project/lotus/lib/addrutil" - "golang.org/x/xerrors" - rice "github.com/GeertJohan/go.rice" "github.com/libp2p/go-libp2p-core/peer" ) +//go:embed bootstrap +var bootstrapfs embed.FS + func BuiltinBootstrap() ([]peer.AddrInfo, error) { if DisableBuiltinAssets { return nil, nil } - - var out []peer.AddrInfo - - b := rice.MustFindBox("bootstrap") - err := b.Walk("", func(path string, info os.FileInfo, err error) error { + if BootstrappersFile != "" { + spi, err := bootstrapfs.ReadFile(path.Join("bootstrap", BootstrappersFile)) if err != nil { - return xerrors.Errorf("failed to walk box: %w", err) - } - - if !strings.HasSuffix(path, ".pi") { - return nil + return nil, err } - spi := b.MustString(path) - if spi == "" { - return nil + if len(spi) == 0 { + return nil, nil } - pi, err := addrutil.ParseAddresses(context.TODO(), strings.Split(strings.TrimSpace(spi), "\n")) - out = append(out, pi...) - return err - }) - return out, err + + return addrutil.ParseAddresses(context.TODO(), strings.Split(strings.TrimSpace(string(spi)), "\n")) + } + + return nil, nil } diff --git a/build/bootstrap/butterflynet.pi b/build/bootstrap/butterflynet.pi new file mode 100644 index 00000000000..cc4ce4f1d22 --- /dev/null +++ b/build/bootstrap/butterflynet.pi @@ -0,0 +1,2 @@ +/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWBbZd7Su9XfLUQ12RynGQ3ZmGY1nGqFntmqop9pLNJE6g +/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWGKRzEY4tJFTmAmrYUpa1CVVohmV9YjJbC9v5XWY2gUji diff --git a/build/bootstrap/calibnet.pi b/build/bootstrap/calibnet.pi new file mode 100644 index 00000000000..20473eaaa61 --- /dev/null +++ b/build/bootstrap/calibnet.pi @@ -0,0 +1,4 @@ +/dns4/bootstrap-0.calibration.fildev.network/tcp/1347/p2p/12D3KooWJkikQQkxS58spo76BYzFt4fotaT5NpV2zngvrqm4u5ow +/dns4/bootstrap-1.calibration.fildev.network/tcp/1347/p2p/12D3KooWLce5FDHR4EX4CrYavphA5xS3uDsX6aoowXh5tzDUxJav +/dns4/bootstrap-2.calibration.fildev.network/tcp/1347/p2p/12D3KooWA9hFfQG9GjP6bHeuQQbMD3FDtZLdW1NayxKXUT26PQZu +/dns4/bootstrap-3.calibration.fildev.network/tcp/1347/p2p/12D3KooWMHDi3LVTFG8Szqogt7RkNXvonbQYqSazxBx41A5aeuVz diff --git a/build/bootstrap/interopnet.pi b/build/bootstrap/interopnet.pi new file mode 100644 index 00000000000..923653d94e3 --- /dev/null +++ b/build/bootstrap/interopnet.pi @@ -0,0 +1,2 @@ +/dns4/bootstrap-0.interop.fildev.network/tcp/1347/p2p/12D3KooWLGPq9JL1xwL6gHok7HSNxtK1Q5kyfg4Hk69ifRPghn4i +/dns4/bootstrap-1.interop.fildev.network/tcp/1347/p2p/12D3KooWFYS1f31zafv8mqqYu8U3hEqYvaZ6avWzYU3BmZdpyH3h diff --git a/build/bootstrap/bootstrappers.pi b/build/bootstrap/mainnet.pi similarity index 93% rename from build/bootstrap/bootstrappers.pi rename to build/bootstrap/mainnet.pi index 886ac8e9991..370e954bd4a 100644 --- a/build/bootstrap/bootstrappers.pi +++ b/build/bootstrap/mainnet.pi @@ -7,7 +7,7 @@ /dns4/bootstrap-6.mainnet.filops.net/tcp/1347/p2p/12D3KooWP5MwCiqdMETF9ub1P3MbCvQCcfconnYHbWg6sUJcDRQQ /dns4/bootstrap-7.mainnet.filops.net/tcp/1347/p2p/12D3KooWRs3aY1p3juFjPy8gPN95PEQChm2QKGUCAdcDCC4EBMKf /dns4/bootstrap-8.mainnet.filops.net/tcp/1347/p2p/12D3KooWScFR7385LTyR4zU1bYdzSiiAb5rnNABfVahPvVSzyTkR -/dns4/lotus-bootstrap.forceup.cn/tcp/41778/p2p/12D3KooWFQsv3nRMUevZNWWsY1Wu6NUzUbawnWU5NcRhgKuJA37C +/dns4/lotus-bootstrap.ipfsforce.com/tcp/41778/p2p/12D3KooWGhufNmZHF3sv48aQeS13ng5XVJZ9E6qy2Ms4VzqeUsHk /dns4/bootstrap-0.starpool.in/tcp/12757/p2p/12D3KooWGHpBMeZbestVEWkfdnC9u7p6uFHXL1n7m1ZBqsEmiUzz /dns4/bootstrap-1.starpool.in/tcp/12757/p2p/12D3KooWQZrGH1PxSNZPum99M1zNvjNFM33d1AAu5DcvdHptuU7u /dns4/node.glif.io/tcp/1235/p2p/12D3KooWBF8cpp65hp2u9LK5mh19x67ftAam84z9LsfaquTDSBpt diff --git a/build/bootstrap/nerpanet.pi b/build/bootstrap/nerpanet.pi new file mode 100644 index 00000000000..83ad1d184b8 --- /dev/null +++ b/build/bootstrap/nerpanet.pi @@ -0,0 +1,4 @@ +/dns4/bootstrap-2.nerpa.interplanetary.dev/tcp/1347/p2p/12D3KooWQcL6ReWmR6ASWx4iT7EiAmxKDQpvgq1MKNTQZp5NPnWW +/dns4/bootstrap-0.nerpa.interplanetary.dev/tcp/1347/p2p/12D3KooWGyJCwCm7EfupM15CFPXM4c7zRVHwwwjcuy9umaGeztMX +/dns4/bootstrap-3.nerpa.interplanetary.dev/tcp/1347/p2p/12D3KooWNK9RmfksKXSCQj7ZwAM7L6roqbN4kwJteihq7yPvSgPs +/dns4/bootstrap-1.nerpa.interplanetary.dev/tcp/1347/p2p/12D3KooWCWSaH6iUyXYspYxELjDfzToBsyVGVz3QvC7ysXv7wESo diff --git a/build/genesis.go b/build/genesis.go index dc4ded27365..6d94b38cf68 100644 --- a/build/genesis.go +++ b/build/genesis.go @@ -1,23 +1,23 @@ package build import ( - rice "github.com/GeertJohan/go.rice" + "embed" + "path" + logging "github.com/ipfs/go-log/v2" ) // moved from now-defunct build/paramfetch.go var log = logging.Logger("build") +//go:embed genesis +var genesisfs embed.FS + func MaybeGenesis() []byte { - builtinGen, err := rice.FindBox("genesis") + genBytes, err := genesisfs.ReadFile(path.Join("genesis", GenesisFile)) if err != nil { log.Warnf("loading built-in genesis: %s", err) return nil } - genBytes, err := builtinGen.Bytes("devnet.car") - if err != nil { - log.Warnf("loading built-in genesis: %s", err) - } - return genBytes } diff --git a/build/genesis/butterflynet.car b/build/genesis/butterflynet.car new file mode 100644 index 00000000000..7c2d19251f7 Binary files /dev/null and b/build/genesis/butterflynet.car differ diff --git a/build/genesis/calibnet.car b/build/genesis/calibnet.car new file mode 100644 index 00000000000..cbade953f86 Binary files /dev/null and b/build/genesis/calibnet.car differ diff --git a/build/genesis/interopnet.car b/build/genesis/interopnet.car new file mode 100644 index 00000000000..2c7c2a49873 Binary files /dev/null and b/build/genesis/interopnet.car differ diff --git a/build/genesis/devnet.car b/build/genesis/mainnet.car similarity index 100% rename from build/genesis/devnet.car rename to build/genesis/mainnet.car diff --git a/build/genesis/nerpanet.car b/build/genesis/nerpanet.car new file mode 100644 index 00000000000..c32e0171bce Binary files /dev/null and b/build/genesis/nerpanet.car differ diff --git a/build/isnearupgrade.go b/build/isnearupgrade.go new file mode 100644 index 00000000000..4273f0e9e3f --- /dev/null +++ b/build/isnearupgrade.go @@ -0,0 +1,9 @@ +package build + +import ( + "github.com/filecoin-project/go-state-types/abi" +) + +func IsNearUpgrade(epoch, upgradeEpoch abi.ChainEpoch) bool { + return epoch > upgradeEpoch-Finality && epoch < upgradeEpoch+Finality +} diff --git a/build/openrpc.go b/build/openrpc.go new file mode 100644 index 00000000000..ac951c17287 --- /dev/null +++ b/build/openrpc.go @@ -0,0 +1,54 @@ +package build + +import ( + "bytes" + "compress/gzip" + "embed" + "encoding/json" + + apitypes "github.com/filecoin-project/lotus/api/types" +) + +//go:embed openrpc +var openrpcfs embed.FS + +func mustReadGzippedOpenRPCDocument(data []byte) apitypes.OpenRPCDocument { + zr, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + log.Fatal(err) + } + m := apitypes.OpenRPCDocument{} + err = json.NewDecoder(zr).Decode(&m) + if err != nil { + log.Fatal(err) + } + err = zr.Close() + if err != nil { + log.Fatal(err) + } + return m +} + +func OpenRPCDiscoverJSON_Full() apitypes.OpenRPCDocument { + data, err := openrpcfs.ReadFile("openrpc/full.json.gz") + if err != nil { + panic(err) + } + return mustReadGzippedOpenRPCDocument(data) +} + +func OpenRPCDiscoverJSON_Miner() apitypes.OpenRPCDocument { + data, err := openrpcfs.ReadFile("openrpc/miner.json.gz") + if err != nil { + panic(err) + } + return mustReadGzippedOpenRPCDocument(data) +} + +func OpenRPCDiscoverJSON_Worker() apitypes.OpenRPCDocument { + data, err := openrpcfs.ReadFile("openrpc/worker.json.gz") + if err != nil { + panic(err) + } + return mustReadGzippedOpenRPCDocument(data) +} diff --git a/build/openrpc/full.json.gz b/build/openrpc/full.json.gz new file mode 100644 index 00000000000..56feb6ee5e4 Binary files /dev/null and b/build/openrpc/full.json.gz differ diff --git a/build/openrpc/miner.json.gz b/build/openrpc/miner.json.gz new file mode 100644 index 00000000000..aa8ba625d7a Binary files /dev/null and b/build/openrpc/miner.json.gz differ diff --git a/build/openrpc/worker.json.gz b/build/openrpc/worker.json.gz new file mode 100644 index 00000000000..593a45b6acd Binary files /dev/null and b/build/openrpc/worker.json.gz differ diff --git a/build/openrpc_test.go b/build/openrpc_test.go new file mode 100644 index 00000000000..20c77533193 --- /dev/null +++ b/build/openrpc_test.go @@ -0,0 +1,23 @@ +package build + +import ( + "testing" + + apitypes "github.com/filecoin-project/lotus/api/types" +) + +func TestOpenRPCDiscoverJSON_Version(t *testing.T) { + // openRPCDocVersion is the current OpenRPC version of the API docs. + openRPCDocVersion := "1.2.6" + + for i, docFn := range []func() apitypes.OpenRPCDocument{ + OpenRPCDiscoverJSON_Full, + OpenRPCDiscoverJSON_Miner, + OpenRPCDiscoverJSON_Worker, + } { + doc := docFn() + if got, ok := doc["openrpc"]; !ok || got != openRPCDocVersion { + t.Fatalf("case: %d, want: %s, got: %v, doc: %v", i, openRPCDocVersion, got, doc) + } + } +} diff --git a/build/parameters.go b/build/parameters.go index 7d34a783122..9e60f12a6a3 100644 --- a/build/parameters.go +++ b/build/parameters.go @@ -1,7 +1,19 @@ package build -import rice "github.com/GeertJohan/go.rice" +import ( + _ "embed" +) + +//go:embed proof-params/parameters.json +var params []byte + +//go:embed proof-params/srs-inner-product.json +var srs []byte func ParametersJSON() []byte { - return rice.MustFindBox("proof-params").MustBytes("parameters.json") + return params +} + +func SrsJSON() []byte { + return srs } diff --git a/build/params_2k.go b/build/params_2k.go index 5a0e8fd612c..387d2da0bbd 100644 --- a/build/params_2k.go +++ b/build/params_2k.go @@ -3,41 +3,85 @@ package build import ( - "math" "os" + "strconv" - "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/chain/actors/policy" ) -const UpgradeBreezeHeight = -1 +const BootstrappersFile = "" +const GenesisFile = "" + +var UpgradeBreezeHeight = abi.ChainEpoch(-1) + const BreezeGasTampingDuration = 0 -const UpgradeSmokeHeight = -1 -const UpgradeIgnitionHeight = -2 -const UpgradeRefuelHeight = -3 -const UpgradeTapeHeight = -4 +var UpgradeSmokeHeight = abi.ChainEpoch(-1) +var UpgradeIgnitionHeight = abi.ChainEpoch(-2) +var UpgradeRefuelHeight = abi.ChainEpoch(-3) +var UpgradeTapeHeight = abi.ChainEpoch(-4) + +var UpgradeAssemblyHeight = abi.ChainEpoch(-5) +var UpgradeLiftoffHeight = abi.ChainEpoch(-6) + +var UpgradeKumquatHeight = abi.ChainEpoch(-7) +var UpgradeCalicoHeight = abi.ChainEpoch(-8) +var UpgradePersianHeight = abi.ChainEpoch(-9) +var UpgradeOrangeHeight = abi.ChainEpoch(-10) +var UpgradeClausHeight = abi.ChainEpoch(-11) + +var UpgradeTrustHeight = abi.ChainEpoch(-12) -var UpgradeActorsV2Height = abi.ChainEpoch(10) -var UpgradeLiftoffHeight = abi.ChainEpoch(-5) +var UpgradeNorwegianHeight = abi.ChainEpoch(-13) -const UpgradeKumquatHeight = -6 +var UpgradeTurboHeight = abi.ChainEpoch(-14) + +var UpgradeHyperdriveHeight = abi.ChainEpoch(-15) var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, } func init() { - policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) + policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1, abi.RegisteredSealProof_StackedDrg8MiBV1) policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048)) policy.SetMinVerifiedDealSize(abi.NewStoragePower(256)) + policy.SetPreCommitChallengeDelay(abi.ChainEpoch(10)) + + getUpgradeHeight := func(ev string, def abi.ChainEpoch) abi.ChainEpoch { + hs, found := os.LookupEnv(ev) + if found { + h, err := strconv.Atoi(hs) + if err != nil { + log.Panicf("failed to parse %s env var", ev) + } + + return abi.ChainEpoch(h) + } - if os.Getenv("LOTUS_DISABLE_V2_ACTOR_MIGRATION") == "1" { - UpgradeActorsV2Height = math.MaxInt64 - UpgradeLiftoffHeight = 11 + return def } + UpgradeBreezeHeight = getUpgradeHeight("LOTUS_BREEZE_HEIGHT", UpgradeBreezeHeight) + UpgradeSmokeHeight = getUpgradeHeight("LOTUS_SMOKE_HEIGHT", UpgradeSmokeHeight) + UpgradeIgnitionHeight = getUpgradeHeight("LOTUS_IGNITION_HEIGHT", UpgradeIgnitionHeight) + UpgradeRefuelHeight = getUpgradeHeight("LOTUS_REFUEL_HEIGHT", UpgradeRefuelHeight) + UpgradeTapeHeight = getUpgradeHeight("LOTUS_TAPE_HEIGHT", UpgradeTapeHeight) + UpgradeAssemblyHeight = getUpgradeHeight("LOTUS_ACTORSV2_HEIGHT", UpgradeAssemblyHeight) + UpgradeLiftoffHeight = getUpgradeHeight("LOTUS_LIFTOFF_HEIGHT", UpgradeLiftoffHeight) + UpgradeKumquatHeight = getUpgradeHeight("LOTUS_KUMQUAT_HEIGHT", UpgradeKumquatHeight) + UpgradeCalicoHeight = getUpgradeHeight("LOTUS_CALICO_HEIGHT", UpgradeCalicoHeight) + UpgradePersianHeight = getUpgradeHeight("LOTUS_PERSIAN_HEIGHT", UpgradePersianHeight) + UpgradeOrangeHeight = getUpgradeHeight("LOTUS_ORANGE_HEIGHT", UpgradeOrangeHeight) + UpgradeClausHeight = getUpgradeHeight("LOTUS_CLAUS_HEIGHT", UpgradeClausHeight) + UpgradeTrustHeight = getUpgradeHeight("LOTUS_ACTORSV3_HEIGHT", UpgradeTrustHeight) + UpgradeNorwegianHeight = getUpgradeHeight("LOTUS_NORWEGIAN_HEIGHT", UpgradeNorwegianHeight) + UpgradeTurboHeight = getUpgradeHeight("LOTUS_ACTORSV4_HEIGHT", UpgradeTurboHeight) + UpgradeHyperdriveHeight = getUpgradeHeight("LOTUS_HYPERDRIVE_HEIGHT", UpgradeHyperdriveHeight) + BuildType |= Build2k } @@ -53,3 +97,7 @@ const SlashablePowerDelay = 20 // Epochs const InteractivePoRepConfidence = 6 + +const BootstrapPeerThreshold = 1 + +var WhitelistedBlock = cid.Undef diff --git a/build/params_butterfly.go b/build/params_butterfly.go new file mode 100644 index 00000000000..258f6ab0f2e --- /dev/null +++ b/build/params_butterfly.go @@ -0,0 +1,58 @@ +// +build butterflynet + +package build + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/actors/policy" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + "github.com/ipfs/go-cid" +) + +var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ + 0: DrandMainnet, +} + +const BootstrappersFile = "butterflynet.pi" +const GenesisFile = "butterflynet.car" + +const UpgradeBreezeHeight = -1 +const BreezeGasTampingDuration = 120 +const UpgradeSmokeHeight = -2 +const UpgradeIgnitionHeight = -3 +const UpgradeRefuelHeight = -4 + +var UpgradeAssemblyHeight = abi.ChainEpoch(30) + +const UpgradeTapeHeight = 60 +const UpgradeLiftoffHeight = -5 +const UpgradeKumquatHeight = 90 +const UpgradeCalicoHeight = 120 +const UpgradePersianHeight = 150 +const UpgradeClausHeight = 180 +const UpgradeOrangeHeight = 210 +const UpgradeTrustHeight = 240 +const UpgradeNorwegianHeight = UpgradeTrustHeight + (builtin2.EpochsInHour * 12) +const UpgradeTurboHeight = 8922 +const UpgradeHyperdriveHeight = 9999999 + +func init() { + policy.SetConsensusMinerMinPower(abi.NewStoragePower(2 << 30)) + policy.SetSupportedProofTypes( + abi.RegisteredSealProof_StackedDrg512MiBV1, + ) + + SetAddressNetwork(address.Testnet) + + Devnet = true +} + +const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds) + +const PropagationDelaySecs = uint64(6) + +// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start +const BootstrapPeerThreshold = 2 + +var WhitelistedBlock = cid.Undef diff --git a/build/params_calibnet.go b/build/params_calibnet.go new file mode 100644 index 00000000000..df334a516a7 --- /dev/null +++ b/build/params_calibnet.go @@ -0,0 +1,72 @@ +// +build calibnet + +package build + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/actors/policy" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + "github.com/ipfs/go-cid" +) + +var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ + 0: DrandMainnet, +} + +const BootstrappersFile = "calibnet.pi" +const GenesisFile = "calibnet.car" + +const UpgradeBreezeHeight = -1 +const BreezeGasTampingDuration = 120 + +const UpgradeSmokeHeight = -2 + +const UpgradeIgnitionHeight = -3 +const UpgradeRefuelHeight = -4 + +var UpgradeAssemblyHeight = abi.ChainEpoch(30) + +const UpgradeTapeHeight = 60 + +const UpgradeLiftoffHeight = -5 + +const UpgradeKumquatHeight = 90 + +const UpgradeCalicoHeight = 120 +const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 1) + +const UpgradeClausHeight = 270 + +const UpgradeOrangeHeight = 300 + +const UpgradeTrustHeight = 330 + +const UpgradeNorwegianHeight = 360 + +const UpgradeTurboHeight = 390 + +const UpgradeHyperdriveHeight = 420 + +func init() { + policy.SetConsensusMinerMinPower(abi.NewStoragePower(32 << 30)) + policy.SetSupportedProofTypes( + abi.RegisteredSealProof_StackedDrg32GiBV1, + abi.RegisteredSealProof_StackedDrg64GiBV1, + ) + + SetAddressNetwork(address.Testnet) + + Devnet = true + + BuildType = BuildCalibnet +} + +const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds) + +const PropagationDelaySecs = uint64(6) + +// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start +const BootstrapPeerThreshold = 4 + +var WhitelistedBlock = cid.Undef diff --git a/build/params_interop.go b/build/params_interop.go new file mode 100644 index 00000000000..73cc1c7d9ca --- /dev/null +++ b/build/params_interop.go @@ -0,0 +1,104 @@ +// +build interopnet + +package build + +import ( + "os" + "strconv" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + "github.com/filecoin-project/lotus/chain/actors/policy" +) + +const BootstrappersFile = "interopnet.pi" +const GenesisFile = "interopnet.car" + +var UpgradeBreezeHeight = abi.ChainEpoch(-1) + +const BreezeGasTampingDuration = 0 + +var UpgradeSmokeHeight = abi.ChainEpoch(-1) +var UpgradeIgnitionHeight = abi.ChainEpoch(-2) +var UpgradeRefuelHeight = abi.ChainEpoch(-3) +var UpgradeTapeHeight = abi.ChainEpoch(-4) + +var UpgradeAssemblyHeight = abi.ChainEpoch(-5) +var UpgradeLiftoffHeight = abi.ChainEpoch(-6) + +var UpgradeKumquatHeight = abi.ChainEpoch(-7) +var UpgradeCalicoHeight = abi.ChainEpoch(-8) +var UpgradePersianHeight = abi.ChainEpoch(-9) +var UpgradeOrangeHeight = abi.ChainEpoch(-10) +var UpgradeClausHeight = abi.ChainEpoch(-11) + +var UpgradeTrustHeight = abi.ChainEpoch(-12) + +var UpgradeNorwegianHeight = abi.ChainEpoch(-13) + +var UpgradeTurboHeight = abi.ChainEpoch(-14) + +var UpgradeHyperdriveHeight = abi.ChainEpoch(-15) + +var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ + 0: DrandMainnet, +} + +func init() { + policy.SetSupportedProofTypes( + abi.RegisteredSealProof_StackedDrg2KiBV1, + abi.RegisteredSealProof_StackedDrg8MiBV1, + abi.RegisteredSealProof_StackedDrg512MiBV1, + ) + policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048)) + policy.SetMinVerifiedDealSize(abi.NewStoragePower(256)) + policy.SetPreCommitChallengeDelay(abi.ChainEpoch(10)) + + getUpgradeHeight := func(ev string, def abi.ChainEpoch) abi.ChainEpoch { + hs, found := os.LookupEnv(ev) + if found { + h, err := strconv.Atoi(hs) + if err != nil { + log.Panicf("failed to parse %s env var", ev) + } + + return abi.ChainEpoch(h) + } + + return def + } + + UpgradeBreezeHeight = getUpgradeHeight("LOTUS_BREEZE_HEIGHT", UpgradeBreezeHeight) + UpgradeSmokeHeight = getUpgradeHeight("LOTUS_SMOKE_HEIGHT", UpgradeSmokeHeight) + UpgradeIgnitionHeight = getUpgradeHeight("LOTUS_IGNITION_HEIGHT", UpgradeIgnitionHeight) + UpgradeRefuelHeight = getUpgradeHeight("LOTUS_REFUEL_HEIGHT", UpgradeRefuelHeight) + UpgradeTapeHeight = getUpgradeHeight("LOTUS_TAPE_HEIGHT", UpgradeTapeHeight) + UpgradeAssemblyHeight = getUpgradeHeight("LOTUS_ACTORSV2_HEIGHT", UpgradeAssemblyHeight) + UpgradeLiftoffHeight = getUpgradeHeight("LOTUS_LIFTOFF_HEIGHT", UpgradeLiftoffHeight) + UpgradeKumquatHeight = getUpgradeHeight("LOTUS_KUMQUAT_HEIGHT", UpgradeKumquatHeight) + UpgradeCalicoHeight = getUpgradeHeight("LOTUS_CALICO_HEIGHT", UpgradeCalicoHeight) + UpgradePersianHeight = getUpgradeHeight("LOTUS_PERSIAN_HEIGHT", UpgradePersianHeight) + UpgradeOrangeHeight = getUpgradeHeight("LOTUS_ORANGE_HEIGHT", UpgradeOrangeHeight) + UpgradeClausHeight = getUpgradeHeight("LOTUS_CLAUS_HEIGHT", UpgradeClausHeight) + UpgradeTrustHeight = getUpgradeHeight("LOTUS_ACTORSV3_HEIGHT", UpgradeTrustHeight) + UpgradeNorwegianHeight = getUpgradeHeight("LOTUS_NORWEGIAN_HEIGHT", UpgradeNorwegianHeight) + UpgradeTurboHeight = getUpgradeHeight("LOTUS_ACTORSV4_HEIGHT", UpgradeTurboHeight) + UpgradeHyperdriveHeight = getUpgradeHeight("LOTUS_HYPERDRIVE_HEIGHT", UpgradeHyperdriveHeight) + + BuildType |= BuildInteropnet + SetAddressNetwork(address.Testnet) + Devnet = true +} + +const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds) + +const PropagationDelaySecs = uint64(6) + +// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start +const BootstrapPeerThreshold = 2 + +var WhitelistedBlock = cid.Undef diff --git a/build/params_mainnet.go b/build/params_mainnet.go index 94deedfec33..1c9b6946267 100644 --- a/build/params_mainnet.go +++ b/build/params_mainnet.go @@ -1,6 +1,10 @@ // +build !debug // +build !2k // +build !testground +// +build !calibnet +// +build !nerpanet +// +build !butterflynet +// +build !interopnet package build @@ -10,8 +14,6 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/chain/actors/policy" - builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" ) @@ -20,7 +22,11 @@ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ UpgradeSmokeHeight: DrandMainnet, } +const BootstrappersFile = "mainnet.pi" +const GenesisFile = "mainnet.car" + const UpgradeBreezeHeight = 41280 + const BreezeGasTampingDuration = 120 const UpgradeSmokeHeight = 51000 @@ -28,7 +34,7 @@ const UpgradeSmokeHeight = 51000 const UpgradeIgnitionHeight = 94000 const UpgradeRefuelHeight = 130800 -var UpgradeActorsV2Height = abi.ChainEpoch(138720) +const UpgradeAssemblyHeight = 138720 const UpgradeTapeHeight = 140760 @@ -39,24 +45,46 @@ const UpgradeLiftoffHeight = 148888 const UpgradeKumquatHeight = 170000 -func init() { - policy.SetConsensusMinerMinPower(abi.NewStoragePower(10 << 40)) - policy.SetSupportedProofTypes( - abi.RegisteredSealProof_StackedDrg32GiBV1, - abi.RegisteredSealProof_StackedDrg64GiBV1, - ) +const UpgradeCalicoHeight = 265200 +const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 60) + +const UpgradeOrangeHeight = 336458 + +// 2020-12-22T02:00:00Z +var UpgradeClausHeight = abi.ChainEpoch(343200) + +// 2021-03-04T00:00:30Z +const UpgradeTrustHeight = 550321 +// 2021-04-12T22:00:00Z +const UpgradeNorwegianHeight = 665280 + +// 2021-04-29T06:00:00Z +const UpgradeTurboHeight = 712320 + +// 2021-06-30T22:00:00Z +var UpgradeHyperdriveHeight = abi.ChainEpoch(892800) + +func init() { if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" { SetAddressNetwork(address.Mainnet) } - if os.Getenv("LOTUS_DISABLE_V2_ACTOR_MIGRATION") == "1" { - UpgradeActorsV2Height = math.MaxInt64 + if os.Getenv("LOTUS_DISABLE_HYPERDRIVE") == "1" { + UpgradeHyperdriveHeight = math.MaxInt64 } Devnet = false + + BuildType = BuildMainnet } const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds) const PropagationDelaySecs = uint64(6) + +// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start +const BootstrapPeerThreshold = 4 + +// we skip checks on message validity in this block to sidestep the zero-bls signature +var WhitelistedBlock = MustParseCid("bafy2bzaceapyg2uyzk7vueh3xccxkuwbz3nxewjyguoxvhx77malc2lzn2ybi") diff --git a/build/params_nerpanet.go b/build/params_nerpanet.go new file mode 100644 index 00000000000..6663a91628a --- /dev/null +++ b/build/params_nerpanet.go @@ -0,0 +1,78 @@ +// +build nerpanet + +package build + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/ipfs/go-cid" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" +) + +var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ + 0: DrandMainnet, +} + +const BootstrappersFile = "nerpanet.pi" +const GenesisFile = "nerpanet.car" + +const UpgradeBreezeHeight = -1 +const BreezeGasTampingDuration = 0 + +const UpgradeSmokeHeight = -1 + +const UpgradeIgnitionHeight = -2 +const UpgradeRefuelHeight = -3 + +const UpgradeLiftoffHeight = -5 + +const UpgradeAssemblyHeight = 30 // critical: the network can bootstrap from v1 only +const UpgradeTapeHeight = 60 + +const UpgradeKumquatHeight = 90 + +const UpgradeCalicoHeight = 100 +const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 1) + +const UpgradeClausHeight = 250 + +const UpgradeOrangeHeight = 300 + +const UpgradeTrustHeight = 600 +const UpgradeNorwegianHeight = 201000 +const UpgradeTurboHeight = 203000 +const UpgradeHyperdriveHeight = 999999999 + +func init() { + // Minimum block production power is set to 4 TiB + // Rationale is to discourage small-scale miners from trying to take over the network + // One needs to invest in ~2.3x the compute to break consensus, making it not worth it + // + // DOWNSIDE: the fake-seals need to be kept alive/protected, otherwise network will seize + // + policy.SetConsensusMinerMinPower(abi.NewStoragePower(4 << 40)) + + policy.SetSupportedProofTypes( + abi.RegisteredSealProof_StackedDrg512MiBV1, + abi.RegisteredSealProof_StackedDrg32GiBV1, + abi.RegisteredSealProof_StackedDrg64GiBV1, + ) + + // Lower the most time-consuming parts of PoRep + policy.SetPreCommitChallengeDelay(10) + + // TODO - make this a variable + //miner.WPoStChallengeLookback = abi.ChainEpoch(2) + + Devnet = false +} + +const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds) + +const PropagationDelaySecs = uint64(6) + +// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start +const BootstrapPeerThreshold = 4 + +var WhitelistedBlock = cid.Undef diff --git a/build/params_shared_funcs.go b/build/params_shared_funcs.go index 77fd9256d44..f59fee653e9 100644 --- a/build/params_shared_funcs.go +++ b/build/params_shared_funcs.go @@ -2,6 +2,7 @@ package build import ( "github.com/filecoin-project/go-address" + "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p-core/protocol" @@ -19,3 +20,21 @@ func DhtProtocolName(netName dtypes.NetworkName) protocol.ID { func SetAddressNetwork(n address.Network) { address.CurrentNetwork = n } + +func MustParseAddress(addr string) address.Address { + ret, err := address.NewFromString(addr) + if err != nil { + panic(err) + } + + return ret +} + +func MustParseCid(c string) cid.Cid { + ret, err := cid.Decode(c) + if err != nil { + panic(err) + } + + return ret +} diff --git a/build/params_shared_vals.go b/build/params_shared_vals.go index 5070777bd6f..e4240ccce12 100644 --- a/build/params_shared_vals.go +++ b/build/params_shared_vals.go @@ -25,7 +25,7 @@ const UnixfsLinksPerLevel = 1024 // Consensus / Network const AllowableClockDriftSecs = uint64(1) -const NewestNetworkVersion = network.Version6 +const NewestNetworkVersion = network.Version13 const ActorUpgradeNetworkVersion = network.Version4 // Epochs @@ -61,6 +61,9 @@ const TicketRandomnessLookback = abi.ChainEpoch(1) const AddressMainnetEnvVar = "_mainnet_" +// the 'f' prefix doesn't matter +var ZeroAddress = MustParseAddress("f3yaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaby2smx7a") + // ///// // Devnet settings @@ -115,5 +118,5 @@ const PackingEfficiencyNum = 4 const PackingEfficiencyDenom = 5 // Actor consts -// TODO: Pull from actors when its made not private -var MinDealDuration = abi.ChainEpoch(180 * builtin2.EpochsInDay) +// TODO: pieceSize unused from actors +var MinDealDuration, MaxDealDuration = policy.DealDurationBounds(0) diff --git a/build/params_testground.go b/build/params_testground.go index d9893a5f5ea..252d23e759e 100644 --- a/build/params_testground.go +++ b/build/params_testground.go @@ -12,6 +12,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/network" + "github.com/ipfs/go-cid" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" @@ -72,8 +73,8 @@ var ( }() // Actor consts - // TODO: Pull from actors when its made not private - MinDealDuration = abi.ChainEpoch(180 * builtin2.EpochsInDay) + // TODO: pieceSize unused from actors + MinDealDuration, MaxDealDuration = policy.DealDurationBounds(0) PackingEfficiencyNum int64 = 4 PackingEfficiencyDenom int64 = 5 @@ -81,20 +82,35 @@ var ( UpgradeBreezeHeight abi.ChainEpoch = -1 BreezeGasTampingDuration abi.ChainEpoch = 0 - UpgradeSmokeHeight abi.ChainEpoch = -1 - UpgradeIgnitionHeight abi.ChainEpoch = -2 - UpgradeRefuelHeight abi.ChainEpoch = -3 - UpgradeTapeHeight abi.ChainEpoch = -4 - UpgradeActorsV2Height abi.ChainEpoch = 10 - UpgradeLiftoffHeight abi.ChainEpoch = -5 - UpgradeKumquatHeight abi.ChainEpoch = -6 + UpgradeSmokeHeight abi.ChainEpoch = -1 + UpgradeIgnitionHeight abi.ChainEpoch = -2 + UpgradeRefuelHeight abi.ChainEpoch = -3 + UpgradeTapeHeight abi.ChainEpoch = -4 + UpgradeAssemblyHeight abi.ChainEpoch = 10 + UpgradeLiftoffHeight abi.ChainEpoch = -5 + UpgradeKumquatHeight abi.ChainEpoch = -6 + UpgradeCalicoHeight abi.ChainEpoch = -7 + UpgradePersianHeight abi.ChainEpoch = -8 + UpgradeOrangeHeight abi.ChainEpoch = -9 + UpgradeClausHeight abi.ChainEpoch = -10 + UpgradeTrustHeight abi.ChainEpoch = -11 + UpgradeNorwegianHeight abi.ChainEpoch = -12 + UpgradeTurboHeight abi.ChainEpoch = -13 + UpgradeHyperdriveHeight abi.ChainEpoch = -13 DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, } - NewestNetworkVersion = network.Version5 + NewestNetworkVersion = network.Version11 ActorUpgradeNetworkVersion = network.Version4 - Devnet = true + Devnet = true + ZeroAddress = MustParseAddress("f3yaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaby2smx7a") + + WhitelistedBlock = cid.Undef + BootstrappersFile = "" + GenesisFile = "" ) + +const BootstrapPeerThreshold = 1 diff --git a/build/proof-params/srs-inner-product.json b/build/proof-params/srs-inner-product.json new file mode 100644 index 00000000000..8566bf5fd89 --- /dev/null +++ b/build/proof-params/srs-inner-product.json @@ -0,0 +1,7 @@ +{ + "v28-fil-inner-product-v1.srs": { + "cid": "Qmdq44DjcQnFfU3PJcdX7J49GCqcUYszr1TxMbHtAkvQ3g", + "digest": "ae20310138f5ba81451d723f858e3797", + "sector_size": 0 + } +} diff --git a/build/tools.go b/build/tools.go index 638c335a669..57b6e7d1f36 100644 --- a/build/tools.go +++ b/build/tools.go @@ -3,5 +3,8 @@ package build import ( + _ "github.com/GeertJohan/go.rice/rice" + _ "github.com/golang/mock/mockgen" _ "github.com/whyrusleeping/bencher" + _ "golang.org/x/tools/cmd/stringer" ) diff --git a/build/version.go b/build/version.go index 80977f2f147..c6a1be3e2dc 100644 --- a/build/version.go +++ b/build/version.go @@ -1,100 +1,45 @@ package build -import ( - "fmt" - - "golang.org/x/xerrors" -) +import "os" var CurrentCommit string var BuildType int const ( - BuildDefault = 0 - Build2k = 0x1 - BuildDebug = 0x3 + BuildDefault = 0 + BuildMainnet = 0x1 + Build2k = 0x2 + BuildDebug = 0x3 + BuildCalibnet = 0x4 + BuildInteropnet = 0x5 ) func buildType() string { switch BuildType { case BuildDefault: return "" - case BuildDebug: - return "+debug" + case BuildMainnet: + return "+mainnet" case Build2k: return "+2k" + case BuildDebug: + return "+debug" + case BuildCalibnet: + return "+calibnet" + case BuildInteropnet: + return "+interopnet" default: return "+huh?" } } // BuildVersion is the local build version, set by build system -const BuildVersion = "1.1.2" +const BuildVersion = "1.11.1-dev" func UserVersion() string { - return BuildVersion + buildType() + CurrentCommit -} - -type Version uint32 - -func newVer(major, minor, patch uint8) Version { - return Version(uint32(major)<<16 | uint32(minor)<<8 | uint32(patch)) -} - -// Ints returns (major, minor, patch) versions -func (ve Version) Ints() (uint32, uint32, uint32) { - v := uint32(ve) - return (v & majorOnlyMask) >> 16, (v & minorOnlyMask) >> 8, v & patchOnlyMask -} - -func (ve Version) String() string { - vmj, vmi, vp := ve.Ints() - return fmt.Sprintf("%d.%d.%d", vmj, vmi, vp) -} - -func (ve Version) EqMajorMinor(v2 Version) bool { - return ve&minorMask == v2&minorMask -} - -type NodeType int - -const ( - NodeUnknown NodeType = iota - - NodeFull - NodeMiner - NodeWorker -) - -var RunningNodeType NodeType - -func VersionForType(nodeType NodeType) (Version, error) { - switch nodeType { - case NodeFull: - return FullAPIVersion, nil - case NodeMiner: - return MinerAPIVersion, nil - case NodeWorker: - return WorkerAPIVersion, nil - default: - return Version(0), xerrors.Errorf("unknown node type %d", nodeType) + if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" { + return BuildVersion } -} - -// semver versions of the rpc api exposed -var ( - FullAPIVersion = newVer(0, 17, 0) - MinerAPIVersion = newVer(0, 17, 0) - WorkerAPIVersion = newVer(0, 16, 0) -) - -//nolint:varcheck,deadcode -const ( - majorMask = 0xff0000 - minorMask = 0xffff00 - patchMask = 0xffffff - majorOnlyMask = 0xff0000 - minorOnlyMask = 0x00ff00 - patchOnlyMask = 0x0000ff -) + return BuildVersion + buildType() + CurrentCommit +} diff --git a/chain/actors/adt/adt.go b/chain/actors/adt/adt.go index 6a454ac2657..084471bb8ff 100644 --- a/chain/actors/adt/adt.go +++ b/chain/actors/adt/adt.go @@ -2,16 +2,9 @@ package adt import ( "github.com/ipfs/go-cid" - "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/cbor" - "github.com/filecoin-project/go-state-types/network" - - "github.com/filecoin-project/lotus/chain/actors" - - adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" - adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt" ) type Map interface { @@ -24,26 +17,6 @@ type Map interface { ForEach(v cbor.Unmarshaler, fn func(key string) error) error } -func AsMap(store Store, root cid.Cid, version actors.Version) (Map, error) { - switch version { - case actors.Version0: - return adt0.AsMap(store, root) - case actors.Version2: - return adt2.AsMap(store, root) - } - return nil, xerrors.Errorf("unknown network version: %d", version) -} - -func NewMap(store Store, version actors.Version) (Map, error) { - switch version { - case actors.Version0: - return adt0.MakeEmptyMap(store), nil - case actors.Version2: - return adt2.MakeEmptyMap(store), nil - } - return nil, xerrors.Errorf("unknown network version: %d", version) -} - type Array interface { Root() (cid.Cid, error) @@ -54,23 +27,3 @@ type Array interface { ForEach(v cbor.Unmarshaler, fn func(idx int64) error) error } - -func AsArray(store Store, root cid.Cid, version network.Version) (Array, error) { - switch actors.VersionForNetwork(version) { - case actors.Version0: - return adt0.AsArray(store, root) - case actors.Version2: - return adt2.AsArray(store, root) - } - return nil, xerrors.Errorf("unknown network version: %d", version) -} - -func NewArray(store Store, version actors.Version) (Array, error) { - switch version { - case actors.Version0: - return adt0.MakeEmptyArray(store), nil - case actors.Version2: - return adt2.MakeEmptyArray(store), nil - } - return nil, xerrors.Errorf("unknown network version: %d", version) -} diff --git a/chain/actors/adt/diff_adt_test.go b/chain/actors/adt/diff_adt_test.go index a187c9f3568..b0e01b78d31 100644 --- a/chain/actors/adt/diff_adt_test.go +++ b/chain/actors/adt/diff_adt_test.go @@ -16,7 +16,7 @@ import ( builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt" - bstore "github.com/filecoin-project/lotus/lib/blockstore" + bstore "github.com/filecoin-project/lotus/blockstore" ) func TestDiffAdtArray(t *testing.T) { @@ -295,7 +295,7 @@ func (t *TestDiffArray) Remove(key uint64, val *typegen.Deferred) error { func newContextStore() Store { ctx := context.Background() - bs := bstore.NewTemporarySync() + bs := bstore.NewMemorySync() store := cbornode.NewCborStore(bs) return WrapStore(ctx, store) } diff --git a/chain/actors/agen/main.go b/chain/actors/agen/main.go new file mode 100644 index 00000000000..9a3b8fd20f8 --- /dev/null +++ b/chain/actors/agen/main.go @@ -0,0 +1,224 @@ +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "text/template" + + lotusactors "github.com/filecoin-project/lotus/chain/actors" + + "golang.org/x/xerrors" +) + +var actors = map[string][]int{ + "account": lotusactors.Versions, + "cron": lotusactors.Versions, + "init": lotusactors.Versions, + "market": lotusactors.Versions, + "miner": lotusactors.Versions, + "multisig": lotusactors.Versions, + "paych": lotusactors.Versions, + "power": lotusactors.Versions, + "system": lotusactors.Versions, + "reward": lotusactors.Versions, + "verifreg": lotusactors.Versions, +} + +func main() { + if err := generateAdapters(); err != nil { + fmt.Println(err) + return + } + + if err := generatePolicy("chain/actors/policy/policy.go"); err != nil { + fmt.Println(err) + return + } + + if err := generateBuiltin("chain/actors/builtin/builtin.go"); err != nil { + fmt.Println(err) + return + } +} + +func generateAdapters() error { + for act, versions := range actors { + actDir := filepath.Join("chain/actors/builtin", act) + + if err := generateState(actDir); err != nil { + return err + } + + if err := generateMessages(actDir); err != nil { + return err + } + + { + af, err := ioutil.ReadFile(filepath.Join(actDir, "actor.go.template")) + if err != nil { + return xerrors.Errorf("loading actor template: %w", err) + } + + tpl := template.Must(template.New("").Funcs(template.FuncMap{ + "import": func(v int) string { return getVersionImports()[v] }, + }).Parse(string(af))) + + var b bytes.Buffer + + err = tpl.Execute(&b, map[string]interface{}{ + "versions": versions, + "latestVersion": lotusactors.LatestVersion, + }) + if err != nil { + return err + } + + if err := ioutil.WriteFile(filepath.Join(actDir, fmt.Sprintf("%s.go", act)), b.Bytes(), 0666); err != nil { + return err + } + } + } + + return nil +} + +func generateState(actDir string) error { + af, err := ioutil.ReadFile(filepath.Join(actDir, "state.go.template")) + if err != nil { + if os.IsNotExist(err) { + return nil // skip + } + + return xerrors.Errorf("loading state adapter template: %w", err) + } + + for _, version := range lotusactors.Versions { + tpl := template.Must(template.New("").Funcs(template.FuncMap{}).Parse(string(af))) + + var b bytes.Buffer + + err := tpl.Execute(&b, map[string]interface{}{ + "v": version, + "import": getVersionImports()[version], + }) + if err != nil { + return err + } + + if err := ioutil.WriteFile(filepath.Join(actDir, fmt.Sprintf("v%d.go", version)), b.Bytes(), 0666); err != nil { + return err + } + } + + return nil +} + +func generateMessages(actDir string) error { + af, err := ioutil.ReadFile(filepath.Join(actDir, "message.go.template")) + if err != nil { + if os.IsNotExist(err) { + return nil // skip + } + + return xerrors.Errorf("loading message adapter template: %w", err) + } + + for _, version := range lotusactors.Versions { + tpl := template.Must(template.New("").Funcs(template.FuncMap{}).Parse(string(af))) + + var b bytes.Buffer + + err := tpl.Execute(&b, map[string]interface{}{ + "v": version, + "import": getVersionImports()[version], + }) + if err != nil { + return err + } + + if err := ioutil.WriteFile(filepath.Join(actDir, fmt.Sprintf("message%d.go", version)), b.Bytes(), 0666); err != nil { + return err + } + } + + return nil +} + +func generatePolicy(policyPath string) error { + + pf, err := ioutil.ReadFile(policyPath + ".template") + if err != nil { + if os.IsNotExist(err) { + return nil // skip + } + + return xerrors.Errorf("loading policy template file: %w", err) + } + + tpl := template.Must(template.New("").Funcs(template.FuncMap{ + "import": func(v int) string { return getVersionImports()[v] }, + }).Parse(string(pf))) + var b bytes.Buffer + + err = tpl.Execute(&b, map[string]interface{}{ + "versions": lotusactors.Versions, + "latestVersion": lotusactors.LatestVersion, + }) + if err != nil { + return err + } + + if err := ioutil.WriteFile(policyPath, b.Bytes(), 0666); err != nil { + return err + } + + return nil +} + +func generateBuiltin(builtinPath string) error { + + bf, err := ioutil.ReadFile(builtinPath + ".template") + if err != nil { + if os.IsNotExist(err) { + return nil // skip + } + + return xerrors.Errorf("loading builtin template file: %w", err) + } + + tpl := template.Must(template.New("").Funcs(template.FuncMap{ + "import": func(v int) string { return getVersionImports()[v] }, + }).Parse(string(bf))) + var b bytes.Buffer + + err = tpl.Execute(&b, map[string]interface{}{ + "versions": lotusactors.Versions, + "latestVersion": lotusactors.LatestVersion, + }) + if err != nil { + return err + } + + if err := ioutil.WriteFile(builtinPath, b.Bytes(), 0666); err != nil { + return err + } + + return nil +} + +func getVersionImports() map[int]string { + versionImports := make(map[int]string, lotusactors.LatestVersion) + for _, v := range lotusactors.Versions { + if v == 0 { + versionImports[v] = "/" + } else { + versionImports[v] = "/v" + strconv.Itoa(v) + "/" + } + } + + return versionImports +} diff --git a/chain/actors/builtin/account/account.go b/chain/actors/builtin/account/account.go index 38ed2654b7b..04c82b340f4 100644 --- a/chain/actors/builtin/account/account.go +++ b/chain/actors/builtin/account/account.go @@ -1,6 +1,7 @@ package account import ( + "github.com/filecoin-project/lotus/chain/actors" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" @@ -12,32 +13,111 @@ import ( "github.com/filecoin-project/lotus/chain/types" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" ) func init() { + builtin.RegisterActorState(builtin0.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load0(store, root) }) + builtin.RegisterActorState(builtin2.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load2(store, root) }) + + builtin.RegisterActorState(builtin3.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load3(store, root) + }) + + builtin.RegisterActorState(builtin4.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load4(store, root) + }) + + builtin.RegisterActorState(builtin5.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load5(store, root) + }) } -var Methods = builtin2.MethodsAccount +var Methods = builtin4.MethodsAccount func Load(store adt.Store, act *types.Actor) (State, error) { switch act.Code { + case builtin0.AccountActorCodeID: return load0(store, act.Head) + case builtin2.AccountActorCodeID: return load2(store, act.Head) + + case builtin3.AccountActorCodeID: + return load3(store, act.Head) + + case builtin4.AccountActorCodeID: + return load4(store, act.Head) + + case builtin5.AccountActorCodeID: + return load5(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } +func MakeState(store adt.Store, av actors.Version, addr address.Address) (State, error) { + switch av { + + case actors.Version0: + return make0(store, addr) + + case actors.Version2: + return make2(store, addr) + + case actors.Version3: + return make3(store, addr) + + case actors.Version4: + return make4(store, addr) + + case actors.Version5: + return make5(store, addr) + + } + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { + + case actors.Version0: + return builtin0.AccountActorCodeID, nil + + case actors.Version2: + return builtin2.AccountActorCodeID, nil + + case actors.Version3: + return builtin3.AccountActorCodeID, nil + + case actors.Version4: + return builtin4.AccountActorCodeID, nil + + case actors.Version5: + return builtin5.AccountActorCodeID, nil + + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + type State interface { cbor.Marshaler PubkeyAddress() (address.Address, error) + GetState() interface{} } diff --git a/chain/actors/builtin/account/actor.go.template b/chain/actors/builtin/account/actor.go.template new file mode 100644 index 00000000000..53962cc9412 --- /dev/null +++ b/chain/actors/builtin/account/actor.go.template @@ -0,0 +1,64 @@ +package account + +import ( + "github.com/filecoin-project/lotus/chain/actors" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/types" +{{range .versions}} + builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" +{{end}} +) + +func init() { +{{range .versions}} + builtin.RegisterActorState(builtin{{.}}.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load{{.}}(store, root) + }) +{{end}}} + +var Methods = builtin4.MethodsAccount + +func Load(store adt.Store, act *types.Actor) (State, error) { + switch act.Code { +{{range .versions}} + case builtin{{.}}.AccountActorCodeID: + return load{{.}}(store, act.Head) +{{end}} + } + return nil, xerrors.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actors.Version, addr address.Address) (State, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return make{{.}}(store, addr) +{{end}} +} + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return builtin{{.}}.AccountActorCodeID, nil +{{end}} + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + +type State interface { + cbor.Marshaler + + PubkeyAddress() (address.Address, error) + GetState() interface{} +} diff --git a/chain/actors/builtin/account/state.go.template b/chain/actors/builtin/account/state.go.template new file mode 100644 index 00000000000..5be262eceb9 --- /dev/null +++ b/chain/actors/builtin/account/state.go.template @@ -0,0 +1,40 @@ +package account + +import ( + "github.com/filecoin-project/go-address" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + account{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/account" +) + +var _ State = (*state{{.v}})(nil) + +func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { + out := state{{.v}}{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make{{.v}}(store adt.Store, addr address.Address) (State, error) { + out := state{{.v}}{store: store} + out.State = account{{.v}}.State{Address:addr} + return &out, nil +} + +type state{{.v}} struct { + account{{.v}}.State + store adt.Store +} + +func (s *state{{.v}}) PubkeyAddress() (address.Address, error) { + return s.Address, nil +} + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} \ No newline at end of file diff --git a/chain/actors/builtin/account/v0.go b/chain/actors/builtin/account/v0.go index 67c555c5dcf..bdfca2fd705 100644 --- a/chain/actors/builtin/account/v0.go +++ b/chain/actors/builtin/account/v0.go @@ -20,6 +20,12 @@ func load0(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make0(store adt.Store, addr address.Address) (State, error) { + out := state0{store: store} + out.State = account0.State{Address: addr} + return &out, nil +} + type state0 struct { account0.State store adt.Store @@ -28,3 +34,7 @@ type state0 struct { func (s *state0) PubkeyAddress() (address.Address, error) { return s.Address, nil } + +func (s *state0) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/account/v2.go b/chain/actors/builtin/account/v2.go index 2664631bc92..66618e06a4e 100644 --- a/chain/actors/builtin/account/v2.go +++ b/chain/actors/builtin/account/v2.go @@ -20,6 +20,12 @@ func load2(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make2(store adt.Store, addr address.Address) (State, error) { + out := state2{store: store} + out.State = account2.State{Address: addr} + return &out, nil +} + type state2 struct { account2.State store adt.Store @@ -28,3 +34,7 @@ type state2 struct { func (s *state2) PubkeyAddress() (address.Address, error) { return s.Address, nil } + +func (s *state2) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/account/v3.go b/chain/actors/builtin/account/v3.go new file mode 100644 index 00000000000..dbe100a4f83 --- /dev/null +++ b/chain/actors/builtin/account/v3.go @@ -0,0 +1,40 @@ +package account + +import ( + "github.com/filecoin-project/go-address" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + account3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/account" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make3(store adt.Store, addr address.Address) (State, error) { + out := state3{store: store} + out.State = account3.State{Address: addr} + return &out, nil +} + +type state3 struct { + account3.State + store adt.Store +} + +func (s *state3) PubkeyAddress() (address.Address, error) { + return s.Address, nil +} + +func (s *state3) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/account/v4.go b/chain/actors/builtin/account/v4.go new file mode 100644 index 00000000000..53f71dcc5e9 --- /dev/null +++ b/chain/actors/builtin/account/v4.go @@ -0,0 +1,40 @@ +package account + +import ( + "github.com/filecoin-project/go-address" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + account4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/account" +) + +var _ State = (*state4)(nil) + +func load4(store adt.Store, root cid.Cid) (State, error) { + out := state4{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make4(store adt.Store, addr address.Address) (State, error) { + out := state4{store: store} + out.State = account4.State{Address: addr} + return &out, nil +} + +type state4 struct { + account4.State + store adt.Store +} + +func (s *state4) PubkeyAddress() (address.Address, error) { + return s.Address, nil +} + +func (s *state4) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/account/v5.go b/chain/actors/builtin/account/v5.go new file mode 100644 index 00000000000..538f5698750 --- /dev/null +++ b/chain/actors/builtin/account/v5.go @@ -0,0 +1,40 @@ +package account + +import ( + "github.com/filecoin-project/go-address" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + account5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/account" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store, addr address.Address) (State, error) { + out := state5{store: store} + out.State = account5.State{Address: addr} + return &out, nil +} + +type state5 struct { + account5.State + store adt.Store +} + +func (s *state5) PubkeyAddress() (address.Address, error) { + return s.Address, nil +} + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/builtin.go b/chain/actors/builtin/builtin.go index afba8efe88e..74d6228193b 100644 --- a/chain/actors/builtin/builtin.go +++ b/chain/actors/builtin/builtin.go @@ -2,12 +2,23 @@ package builtin import ( "github.com/filecoin-project/go-address" - builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" - smoothing2 "github.com/filecoin-project/specs-actors/v2/actors/util/smoothing" "github.com/ipfs/go-cid" "golang.org/x/xerrors" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + smoothing0 "github.com/filecoin-project/specs-actors/actors/util/smoothing" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + smoothing2 "github.com/filecoin-project/specs-actors/v2/actors/util/smoothing" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + smoothing3 "github.com/filecoin-project/specs-actors/v3/actors/util/smoothing" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + smoothing4 "github.com/filecoin-project/specs-actors/v4/actors/util/smoothing" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + smoothing5 "github.com/filecoin-project/specs-actors/v5/actors/util/smoothing" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/cbor" @@ -15,49 +26,70 @@ import ( "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/types" - miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" - proof0 "github.com/filecoin-project/specs-actors/actors/runtime/proof" - smoothing0 "github.com/filecoin-project/specs-actors/actors/util/smoothing" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" ) -var SystemActorAddr = builtin0.SystemActorAddr -var BurntFundsActorAddr = builtin0.BurntFundsActorAddr -var CronActorAddr = builtin0.CronActorAddr +var SystemActorAddr = builtin5.SystemActorAddr +var BurntFundsActorAddr = builtin5.BurntFundsActorAddr +var CronActorAddr = builtin5.CronActorAddr var SaftAddress = makeAddress("t0122") var ReserveAddress = makeAddress("t090") var RootVerifierAddress = makeAddress("t080") var ( - ExpectedLeadersPerEpoch = builtin0.ExpectedLeadersPerEpoch + ExpectedLeadersPerEpoch = builtin5.ExpectedLeadersPerEpoch ) const ( - EpochDurationSeconds = builtin0.EpochDurationSeconds - EpochsInDay = builtin0.EpochsInDay - SecondsInDay = builtin0.SecondsInDay + EpochDurationSeconds = builtin5.EpochDurationSeconds + EpochsInDay = builtin5.EpochsInDay + SecondsInDay = builtin5.SecondsInDay ) const ( - MethodSend = builtin2.MethodSend - MethodConstructor = builtin2.MethodConstructor + MethodSend = builtin5.MethodSend + MethodConstructor = builtin5.MethodConstructor ) -// TODO: Why does actors have 2 different versions of this? -type SectorInfo = proof0.SectorInfo -type PoStProof = proof0.PoStProof +// These are all just type aliases across actor versions. In the future, that might change +// and we might need to do something fancier. +type SectorInfo = proof5.SectorInfo +type PoStProof = proof5.PoStProof type FilterEstimate = smoothing0.FilterEstimate +func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower { + return miner5.QAPowerForWeight(size, duration, dealWeight, verifiedWeight) +} + func FromV0FilterEstimate(v0 smoothing0.FilterEstimate) FilterEstimate { - return (FilterEstimate)(v0) + + return (FilterEstimate)(v0) //nolint:unconvert + } -// Doesn't change between actors v0 and v1 -func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower { - return miner0.QAPowerForWeight(size, duration, dealWeight, verifiedWeight) +func FromV2FilterEstimate(v2 smoothing2.FilterEstimate) FilterEstimate { + + return (FilterEstimate)(v2) + +} + +func FromV3FilterEstimate(v3 smoothing3.FilterEstimate) FilterEstimate { + + return (FilterEstimate)(v3) + } -func FromV2FilterEstimate(v1 smoothing2.FilterEstimate) FilterEstimate { - return (FilterEstimate)(v1) +func FromV4FilterEstimate(v4 smoothing4.FilterEstimate) FilterEstimate { + + return (FilterEstimate)(v4) + +} + +func FromV5FilterEstimate(v5 smoothing5.FilterEstimate) FilterEstimate { + + return (FilterEstimate)(v5) + } type ActorStateLoader func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) @@ -78,34 +110,150 @@ func Load(store adt.Store, act *types.Actor) (cbor.Marshaler, error) { func ActorNameByCode(c cid.Cid) string { switch { + case builtin0.IsBuiltinActor(c): return builtin0.ActorNameByCode(c) + case builtin2.IsBuiltinActor(c): return builtin2.ActorNameByCode(c) + + case builtin3.IsBuiltinActor(c): + return builtin3.ActorNameByCode(c) + + case builtin4.IsBuiltinActor(c): + return builtin4.ActorNameByCode(c) + + case builtin5.IsBuiltinActor(c): + return builtin5.ActorNameByCode(c) + default: return "" } } func IsBuiltinActor(c cid.Cid) bool { - return builtin0.IsBuiltinActor(c) || builtin2.IsBuiltinActor(c) + + if builtin0.IsBuiltinActor(c) { + return true + } + + if builtin2.IsBuiltinActor(c) { + return true + } + + if builtin3.IsBuiltinActor(c) { + return true + } + + if builtin4.IsBuiltinActor(c) { + return true + } + + if builtin5.IsBuiltinActor(c) { + return true + } + + return false } func IsAccountActor(c cid.Cid) bool { - return c == builtin0.AccountActorCodeID || c == builtin2.AccountActorCodeID + + if c == builtin0.AccountActorCodeID { + return true + } + + if c == builtin2.AccountActorCodeID { + return true + } + + if c == builtin3.AccountActorCodeID { + return true + } + + if c == builtin4.AccountActorCodeID { + return true + } + + if c == builtin5.AccountActorCodeID { + return true + } + + return false } func IsStorageMinerActor(c cid.Cid) bool { - return c == builtin0.StorageMinerActorCodeID || c == builtin2.StorageMinerActorCodeID + + if c == builtin0.StorageMinerActorCodeID { + return true + } + + if c == builtin2.StorageMinerActorCodeID { + return true + } + + if c == builtin3.StorageMinerActorCodeID { + return true + } + + if c == builtin4.StorageMinerActorCodeID { + return true + } + + if c == builtin5.StorageMinerActorCodeID { + return true + } + + return false } func IsMultisigActor(c cid.Cid) bool { - return c == builtin0.MultisigActorCodeID || c == builtin2.MultisigActorCodeID + if c == builtin0.MultisigActorCodeID { + return true + } + + if c == builtin2.MultisigActorCodeID { + return true + } + + if c == builtin3.MultisigActorCodeID { + return true + } + + if c == builtin4.MultisigActorCodeID { + return true + } + + if c == builtin5.MultisigActorCodeID { + return true + } + + return false } func IsPaymentChannelActor(c cid.Cid) bool { - return c == builtin0.PaymentChannelActorCodeID || c == builtin2.PaymentChannelActorCodeID + + if c == builtin0.PaymentChannelActorCodeID { + return true + } + + if c == builtin2.PaymentChannelActorCodeID { + return true + } + + if c == builtin3.PaymentChannelActorCodeID { + return true + } + + if c == builtin4.PaymentChannelActorCodeID { + return true + } + + if c == builtin5.PaymentChannelActorCodeID { + return true + } + + return false } func makeAddress(addr string) address.Address { diff --git a/chain/actors/builtin/builtin.go.template b/chain/actors/builtin/builtin.go.template new file mode 100644 index 00000000000..031c05182e4 --- /dev/null +++ b/chain/actors/builtin/builtin.go.template @@ -0,0 +1,144 @@ +package builtin + +import ( + "github.com/filecoin-project/go-address" + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + {{range .versions}} + builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" + smoothing{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/util/smoothing" + {{end}} + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/types" + + miner{{.latestVersion}} "github.com/filecoin-project/specs-actors{{import .latestVersion}}actors/builtin/miner" + proof{{.latestVersion}} "github.com/filecoin-project/specs-actors{{import .latestVersion}}actors/runtime/proof" +) + +var SystemActorAddr = builtin{{.latestVersion}}.SystemActorAddr +var BurntFundsActorAddr = builtin{{.latestVersion}}.BurntFundsActorAddr +var CronActorAddr = builtin{{.latestVersion}}.CronActorAddr +var SaftAddress = makeAddress("t0122") +var ReserveAddress = makeAddress("t090") +var RootVerifierAddress = makeAddress("t080") + +var ( + ExpectedLeadersPerEpoch = builtin{{.latestVersion}}.ExpectedLeadersPerEpoch +) + +const ( + EpochDurationSeconds = builtin{{.latestVersion}}.EpochDurationSeconds + EpochsInDay = builtin{{.latestVersion}}.EpochsInDay + SecondsInDay = builtin{{.latestVersion}}.SecondsInDay +) + +const ( + MethodSend = builtin{{.latestVersion}}.MethodSend + MethodConstructor = builtin{{.latestVersion}}.MethodConstructor +) + +// These are all just type aliases across actor versions. In the future, that might change +// and we might need to do something fancier. +type SectorInfo = proof{{.latestVersion}}.SectorInfo +type PoStProof = proof{{.latestVersion}}.PoStProof +type FilterEstimate = smoothing0.FilterEstimate + +func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower { + return miner{{.latestVersion}}.QAPowerForWeight(size, duration, dealWeight, verifiedWeight) +} + +{{range .versions}} + func FromV{{.}}FilterEstimate(v{{.}} smoothing{{.}}.FilterEstimate) FilterEstimate { + {{if (eq . 0)}} + return (FilterEstimate)(v{{.}}) //nolint:unconvert + {{else}} + return (FilterEstimate)(v{{.}}) + {{end}} + } +{{end}} + +type ActorStateLoader func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) + +var ActorStateLoaders = make(map[cid.Cid]ActorStateLoader) + +func RegisterActorState(code cid.Cid, loader ActorStateLoader) { + ActorStateLoaders[code] = loader +} + +func Load(store adt.Store, act *types.Actor) (cbor.Marshaler, error) { + loader, found := ActorStateLoaders[act.Code] + if !found { + return nil, xerrors.Errorf("unknown actor code %s", act.Code) + } + return loader(store, act.Head) +} + +func ActorNameByCode(c cid.Cid) string { + switch { + {{range .versions}} + case builtin{{.}}.IsBuiltinActor(c): + return builtin{{.}}.ActorNameByCode(c) + {{end}} + default: + return "" + } +} + +func IsBuiltinActor(c cid.Cid) bool { + {{range .versions}} + if builtin{{.}}.IsBuiltinActor(c) { + return true + } + {{end}} + return false +} + +func IsAccountActor(c cid.Cid) bool { + {{range .versions}} + if c == builtin{{.}}.AccountActorCodeID { + return true + } + {{end}} + return false +} + +func IsStorageMinerActor(c cid.Cid) bool { + {{range .versions}} + if c == builtin{{.}}.StorageMinerActorCodeID { + return true + } + {{end}} + return false +} + +func IsMultisigActor(c cid.Cid) bool { + {{range .versions}} + if c == builtin{{.}}.MultisigActorCodeID { + return true + } + {{end}} + return false +} + +func IsPaymentChannelActor(c cid.Cid) bool { + {{range .versions}} + if c == builtin{{.}}.PaymentChannelActorCodeID { + return true + } + {{end}} + return false +} + +func makeAddress(addr string) address.Address { + ret, err := address.NewFromString(addr) + if err != nil { + panic(err) + } + + return ret +} diff --git a/chain/actors/builtin/cron/actor.go.template b/chain/actors/builtin/cron/actor.go.template new file mode 100644 index 00000000000..d7380855632 --- /dev/null +++ b/chain/actors/builtin/cron/actor.go.template @@ -0,0 +1,42 @@ +package cron + +import ( + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "golang.org/x/xerrors" + "github.com/ipfs/go-cid" +{{range .versions}} + builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" +{{end}} +) + +func MakeState(store adt.Store, av actors.Version) (State, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return make{{.}}(store) +{{end}} +} + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return builtin{{.}}.CronActorCodeID, nil +{{end}} + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + +var ( + Address = builtin{{.latestVersion}}.CronActorAddr + Methods = builtin{{.latestVersion}}.MethodsCron +) + + +type State interface { + GetState() interface{} +} diff --git a/chain/actors/builtin/cron/cron.go b/chain/actors/builtin/cron/cron.go index 65bfd992f75..2275e747f36 100644 --- a/chain/actors/builtin/cron/cron.go +++ b/chain/actors/builtin/cron/cron.go @@ -1,10 +1,72 @@ package cron import ( + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" ) +func MakeState(store adt.Store, av actors.Version) (State, error) { + switch av { + + case actors.Version0: + return make0(store) + + case actors.Version2: + return make2(store) + + case actors.Version3: + return make3(store) + + case actors.Version4: + return make4(store) + + case actors.Version5: + return make5(store) + + } + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { + + case actors.Version0: + return builtin0.CronActorCodeID, nil + + case actors.Version2: + return builtin2.CronActorCodeID, nil + + case actors.Version3: + return builtin3.CronActorCodeID, nil + + case actors.Version4: + return builtin4.CronActorCodeID, nil + + case actors.Version5: + return builtin5.CronActorCodeID, nil + + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + var ( - Address = builtin2.CronActorAddr - Methods = builtin2.MethodsCron + Address = builtin5.CronActorAddr + Methods = builtin5.MethodsCron ) + +type State interface { + GetState() interface{} +} diff --git a/chain/actors/builtin/cron/state.go.template b/chain/actors/builtin/cron/state.go.template new file mode 100644 index 00000000000..99a06d7f806 --- /dev/null +++ b/chain/actors/builtin/cron/state.go.template @@ -0,0 +1,35 @@ +package cron + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + cron{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/cron" +) + +var _ State = (*state{{.v}})(nil) + +func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { + out := state{{.v}}{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make{{.v}}(store adt.Store) (State, error) { + out := state{{.v}}{store: store} + out.State = *cron{{.v}}.ConstructState(cron{{.v}}.BuiltInEntries()) + return &out, nil +} + +type state{{.v}} struct { + cron{{.v}}.State + store adt.Store +} + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} \ No newline at end of file diff --git a/chain/actors/builtin/cron/v0.go b/chain/actors/builtin/cron/v0.go new file mode 100644 index 00000000000..6147b858c10 --- /dev/null +++ b/chain/actors/builtin/cron/v0.go @@ -0,0 +1,35 @@ +package cron + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + cron0 "github.com/filecoin-project/specs-actors/actors/builtin/cron" +) + +var _ State = (*state0)(nil) + +func load0(store adt.Store, root cid.Cid) (State, error) { + out := state0{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make0(store adt.Store) (State, error) { + out := state0{store: store} + out.State = *cron0.ConstructState(cron0.BuiltInEntries()) + return &out, nil +} + +type state0 struct { + cron0.State + store adt.Store +} + +func (s *state0) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/cron/v2.go b/chain/actors/builtin/cron/v2.go new file mode 100644 index 00000000000..51ca179d9ce --- /dev/null +++ b/chain/actors/builtin/cron/v2.go @@ -0,0 +1,35 @@ +package cron + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + cron2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/cron" +) + +var _ State = (*state2)(nil) + +func load2(store adt.Store, root cid.Cid) (State, error) { + out := state2{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make2(store adt.Store) (State, error) { + out := state2{store: store} + out.State = *cron2.ConstructState(cron2.BuiltInEntries()) + return &out, nil +} + +type state2 struct { + cron2.State + store adt.Store +} + +func (s *state2) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/cron/v3.go b/chain/actors/builtin/cron/v3.go new file mode 100644 index 00000000000..ff74d511de5 --- /dev/null +++ b/chain/actors/builtin/cron/v3.go @@ -0,0 +1,35 @@ +package cron + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + cron3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/cron" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make3(store adt.Store) (State, error) { + out := state3{store: store} + out.State = *cron3.ConstructState(cron3.BuiltInEntries()) + return &out, nil +} + +type state3 struct { + cron3.State + store adt.Store +} + +func (s *state3) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/cron/v4.go b/chain/actors/builtin/cron/v4.go new file mode 100644 index 00000000000..1cff8cc2813 --- /dev/null +++ b/chain/actors/builtin/cron/v4.go @@ -0,0 +1,35 @@ +package cron + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + cron4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/cron" +) + +var _ State = (*state4)(nil) + +func load4(store adt.Store, root cid.Cid) (State, error) { + out := state4{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make4(store adt.Store) (State, error) { + out := state4{store: store} + out.State = *cron4.ConstructState(cron4.BuiltInEntries()) + return &out, nil +} + +type state4 struct { + cron4.State + store adt.Store +} + +func (s *state4) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/cron/v5.go b/chain/actors/builtin/cron/v5.go new file mode 100644 index 00000000000..2bb00dc21da --- /dev/null +++ b/chain/actors/builtin/cron/v5.go @@ -0,0 +1,35 @@ +package cron + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + cron5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/cron" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store) (State, error) { + out := state5{store: store} + out.State = *cron5.ConstructState(cron5.BuiltInEntries()) + return &out, nil +} + +type state5 struct { + cron5.State + store adt.Store +} + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/init/actor.go.template b/chain/actors/builtin/init/actor.go.template new file mode 100644 index 00000000000..f825eb9fa45 --- /dev/null +++ b/chain/actors/builtin/init/actor.go.template @@ -0,0 +1,89 @@ +package init + +import ( + "github.com/filecoin-project/lotus/chain/actors" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/node/modules/dtypes" +{{range .versions}} + builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" +{{end}} +) + +func init() { +{{range .versions}} + builtin.RegisterActorState(builtin{{.}}.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load{{.}}(store, root) + }) +{{end}}} + +var ( + Address = builtin{{.latestVersion}}.InitActorAddr + Methods = builtin{{.latestVersion}}.MethodsInit +) + +func Load(store adt.Store, act *types.Actor) (State, error) { + switch act.Code { +{{range .versions}} + case builtin{{.}}.InitActorCodeID: + return load{{.}}(store, act.Head) +{{end}} + } + return nil, xerrors.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actors.Version, networkName string) (State, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return make{{.}}(store, networkName) +{{end}} +} + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return builtin{{.}}.InitActorCodeID, nil +{{end}} + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + +type State interface { + cbor.Marshaler + + ResolveAddress(address address.Address) (address.Address, bool, error) + MapAddressToNewID(address address.Address) (address.Address, error) + NetworkName() (dtypes.NetworkName, error) + + ForEachActor(func(id abi.ActorID, address address.Address) error) error + + // Remove exists to support tooling that manipulates state for testing. + // It should not be used in production code, as init actor entries are + // immutable. + Remove(addrs ...address.Address) error + + // Sets the network's name. This should only be used on upgrade/fork. + SetNetworkName(name string) error + + // Sets the next ID for the init actor. This should only be used for testing. + SetNextID(id abi.ActorID) error + + // Sets the address map for the init actor. This should only be used for testing. + SetAddressMap(mcid cid.Cid) error + + AddressMap() (adt.Map, error) + GetState() interface{} +} diff --git a/chain/actors/builtin/init/diff.go b/chain/actors/builtin/init/diff.go new file mode 100644 index 00000000000..5eb8f3c75b6 --- /dev/null +++ b/chain/actors/builtin/init/diff.go @@ -0,0 +1,152 @@ +package init + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + typegen "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +func DiffAddressMap(pre, cur State) (*AddressMapChanges, error) { + prem, err := pre.AddressMap() + if err != nil { + return nil, err + } + + curm, err := cur.AddressMap() + if err != nil { + return nil, err + } + + preRoot, err := prem.Root() + if err != nil { + return nil, err + } + + curRoot, err := curm.Root() + if err != nil { + return nil, err + } + + results := new(AddressMapChanges) + // no change. + if curRoot.Equals(preRoot) { + return results, nil + } + + err = adt.DiffAdtMap(prem, curm, &addressMapDiffer{results, pre, cur}) + if err != nil { + return nil, err + } + + return results, nil +} + +type addressMapDiffer struct { + Results *AddressMapChanges + pre, adter State +} + +type AddressMapChanges struct { + Added []AddressPair + Modified []AddressChange + Removed []AddressPair +} + +func (i *addressMapDiffer) AsKey(key string) (abi.Keyer, error) { + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return nil, err + } + return abi.AddrKey(addr), nil +} + +func (i *addressMapDiffer) Add(key string, val *typegen.Deferred) error { + pkAddr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + id := new(typegen.CborInt) + if err := id.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return err + } + idAddr, err := address.NewIDAddress(uint64(*id)) + if err != nil { + return err + } + i.Results.Added = append(i.Results.Added, AddressPair{ + ID: idAddr, + PK: pkAddr, + }) + return nil +} + +func (i *addressMapDiffer) Modify(key string, from, to *typegen.Deferred) error { + pkAddr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + + fromID := new(typegen.CborInt) + if err := fromID.UnmarshalCBOR(bytes.NewReader(from.Raw)); err != nil { + return err + } + fromIDAddr, err := address.NewIDAddress(uint64(*fromID)) + if err != nil { + return err + } + + toID := new(typegen.CborInt) + if err := toID.UnmarshalCBOR(bytes.NewReader(to.Raw)); err != nil { + return err + } + toIDAddr, err := address.NewIDAddress(uint64(*toID)) + if err != nil { + return err + } + + i.Results.Modified = append(i.Results.Modified, AddressChange{ + From: AddressPair{ + ID: fromIDAddr, + PK: pkAddr, + }, + To: AddressPair{ + ID: toIDAddr, + PK: pkAddr, + }, + }) + return nil +} + +func (i *addressMapDiffer) Remove(key string, val *typegen.Deferred) error { + pkAddr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + id := new(typegen.CborInt) + if err := id.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return err + } + idAddr, err := address.NewIDAddress(uint64(*id)) + if err != nil { + return err + } + i.Results.Removed = append(i.Results.Removed, AddressPair{ + ID: idAddr, + PK: pkAddr, + }) + return nil +} + +type AddressChange struct { + From AddressPair + To AddressPair +} + +type AddressPair struct { + ID address.Address + PK address.Address +} diff --git a/chain/actors/builtin/init/init.go b/chain/actors/builtin/init/init.go index 60dbdf4fea4..e1bd6f3711c 100644 --- a/chain/actors/builtin/init/init.go +++ b/chain/actors/builtin/init/init.go @@ -1,6 +1,7 @@ package init import ( + "github.com/filecoin-project/lotus/chain/actors" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" @@ -14,33 +15,111 @@ import ( "github.com/filecoin-project/lotus/node/modules/dtypes" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" ) func init() { + builtin.RegisterActorState(builtin0.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load0(store, root) }) + builtin.RegisterActorState(builtin2.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load2(store, root) }) + + builtin.RegisterActorState(builtin3.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load3(store, root) + }) + + builtin.RegisterActorState(builtin4.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load4(store, root) + }) + + builtin.RegisterActorState(builtin5.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load5(store, root) + }) } var ( - Address = builtin2.InitActorAddr - Methods = builtin2.MethodsInit + Address = builtin5.InitActorAddr + Methods = builtin5.MethodsInit ) func Load(store adt.Store, act *types.Actor) (State, error) { switch act.Code { + case builtin0.InitActorCodeID: return load0(store, act.Head) + case builtin2.InitActorCodeID: return load2(store, act.Head) + + case builtin3.InitActorCodeID: + return load3(store, act.Head) + + case builtin4.InitActorCodeID: + return load4(store, act.Head) + + case builtin5.InitActorCodeID: + return load5(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } +func MakeState(store adt.Store, av actors.Version, networkName string) (State, error) { + switch av { + + case actors.Version0: + return make0(store, networkName) + + case actors.Version2: + return make2(store, networkName) + + case actors.Version3: + return make3(store, networkName) + + case actors.Version4: + return make4(store, networkName) + + case actors.Version5: + return make5(store, networkName) + + } + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { + + case actors.Version0: + return builtin0.InitActorCodeID, nil + + case actors.Version2: + return builtin2.InitActorCodeID, nil + + case actors.Version3: + return builtin3.InitActorCodeID, nil + + case actors.Version4: + return builtin4.InitActorCodeID, nil + + case actors.Version5: + return builtin5.InitActorCodeID, nil + + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + type State interface { cbor.Marshaler @@ -57,4 +136,13 @@ type State interface { // Sets the network's name. This should only be used on upgrade/fork. SetNetworkName(name string) error + + // Sets the next ID for the init actor. This should only be used for testing. + SetNextID(id abi.ActorID) error + + // Sets the address map for the init actor. This should only be used for testing. + SetAddressMap(mcid cid.Cid) error + + AddressMap() (adt.Map, error) + GetState() interface{} } diff --git a/chain/actors/builtin/init/state.go.template b/chain/actors/builtin/init/state.go.template new file mode 100644 index 00000000000..482ad4df526 --- /dev/null +++ b/chain/actors/builtin/init/state.go.template @@ -0,0 +1,123 @@ +package init + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/node/modules/dtypes" + +{{if (ge .v 3)}} + builtin{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin" +{{end}} + + init{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/init" + adt{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/util/adt" +) + +var _ State = (*state{{.v}})(nil) + +func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { + out := state{{.v}}{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make{{.v}}(store adt.Store, networkName string) (State, error) { + out := state{{.v}}{store: store} + {{if (le .v 2)}} + mr, err := adt{{.v}}.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State = *init{{.v}}.ConstructState(mr, networkName) + {{else}} + s, err := init{{.v}}.ConstructState(store, networkName) + if err != nil { + return nil, err + } + + out.State = *s + {{end}} + return &out, nil +} + +type state{{.v}} struct { + init{{.v}}.State + store adt.Store +} + +func (s *state{{.v}}) ResolveAddress(address address.Address) (address.Address, bool, error) { + return s.State.ResolveAddress(s.store, address) +} + +func (s *state{{.v}}) MapAddressToNewID(address address.Address) (address.Address, error) { + return s.State.MapAddressToNewID(s.store, address) +} + +func (s *state{{.v}}) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error { + addrs, err := adt{{.v}}.AsMap(s.store, s.State.AddressMap{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}}) + if err != nil { + return err + } + var actorID cbg.CborInt + return addrs.ForEach(&actorID, func(key string) error { + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(abi.ActorID(actorID), addr) + }) +} + +func (s *state{{.v}}) NetworkName() (dtypes.NetworkName, error) { + return dtypes.NetworkName(s.State.NetworkName), nil +} + +func (s *state{{.v}}) SetNetworkName(name string) error { + s.State.NetworkName = name + return nil +} + +func (s *state{{.v}}) SetNextID(id abi.ActorID) error { + s.State.NextID = id + return nil +} + +func (s *state{{.v}}) Remove(addrs ...address.Address) (err error) { + m, err := adt{{.v}}.AsMap(s.store, s.State.AddressMap{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}}) + if err != nil { + return err + } + for _, addr := range addrs { + if err = m.Delete(abi.AddrKey(addr)); err != nil { + return xerrors.Errorf("failed to delete entry for address: %s; err: %w", addr, err) + } + } + amr, err := m.Root() + if err != nil { + return xerrors.Errorf("failed to get address map root: %w", err) + } + s.State.AddressMap = amr + return nil +} + +func (s *state{{.v}}) SetAddressMap(mcid cid.Cid) error { + s.State.AddressMap = mcid + return nil +} + +func (s *state{{.v}}) AddressMap() (adt.Map, error) { + return adt{{.v}}.AsMap(s.store, s.State.AddressMap{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}}) +} + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} \ No newline at end of file diff --git a/chain/actors/builtin/init/v0.go b/chain/actors/builtin/init/v0.go index ceb87f97083..ddd2dab94f2 100644 --- a/chain/actors/builtin/init/v0.go +++ b/chain/actors/builtin/init/v0.go @@ -25,6 +25,19 @@ func load0(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make0(store adt.Store, networkName string) (State, error) { + out := state0{store: store} + + mr, err := adt0.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State = *init0.ConstructState(mr, networkName) + + return &out, nil +} + type state0 struct { init0.State store adt.Store @@ -62,6 +75,11 @@ func (s *state0) SetNetworkName(name string) error { return nil } +func (s *state0) SetNextID(id abi.ActorID) error { + s.State.NextID = id + return nil +} + func (s *state0) Remove(addrs ...address.Address) (err error) { m, err := adt0.AsMap(s.store, s.State.AddressMap) if err != nil { @@ -79,3 +97,16 @@ func (s *state0) Remove(addrs ...address.Address) (err error) { s.State.AddressMap = amr return nil } + +func (s *state0) SetAddressMap(mcid cid.Cid) error { + s.State.AddressMap = mcid + return nil +} + +func (s *state0) AddressMap() (adt.Map, error) { + return adt0.AsMap(s.store, s.State.AddressMap) +} + +func (s *state0) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/init/v2.go b/chain/actors/builtin/init/v2.go index 5aa0ddc1839..72e2d56a522 100644 --- a/chain/actors/builtin/init/v2.go +++ b/chain/actors/builtin/init/v2.go @@ -25,6 +25,19 @@ func load2(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make2(store adt.Store, networkName string) (State, error) { + out := state2{store: store} + + mr, err := adt2.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State = *init2.ConstructState(mr, networkName) + + return &out, nil +} + type state2 struct { init2.State store adt.Store @@ -62,6 +75,11 @@ func (s *state2) SetNetworkName(name string) error { return nil } +func (s *state2) SetNextID(id abi.ActorID) error { + s.State.NextID = id + return nil +} + func (s *state2) Remove(addrs ...address.Address) (err error) { m, err := adt2.AsMap(s.store, s.State.AddressMap) if err != nil { @@ -79,3 +97,16 @@ func (s *state2) Remove(addrs ...address.Address) (err error) { s.State.AddressMap = amr return nil } + +func (s *state2) SetAddressMap(mcid cid.Cid) error { + s.State.AddressMap = mcid + return nil +} + +func (s *state2) AddressMap() (adt.Map, error) { + return adt2.AsMap(s.store, s.State.AddressMap) +} + +func (s *state2) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/init/v3.go b/chain/actors/builtin/init/v3.go new file mode 100644 index 00000000000..4609c94a372 --- /dev/null +++ b/chain/actors/builtin/init/v3.go @@ -0,0 +1,114 @@ +package init + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/node/modules/dtypes" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + init3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/init" + adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make3(store adt.Store, networkName string) (State, error) { + out := state3{store: store} + + s, err := init3.ConstructState(store, networkName) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state3 struct { + init3.State + store adt.Store +} + +func (s *state3) ResolveAddress(address address.Address) (address.Address, bool, error) { + return s.State.ResolveAddress(s.store, address) +} + +func (s *state3) MapAddressToNewID(address address.Address) (address.Address, error) { + return s.State.MapAddressToNewID(s.store, address) +} + +func (s *state3) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error { + addrs, err := adt3.AsMap(s.store, s.State.AddressMap, builtin3.DefaultHamtBitwidth) + if err != nil { + return err + } + var actorID cbg.CborInt + return addrs.ForEach(&actorID, func(key string) error { + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(abi.ActorID(actorID), addr) + }) +} + +func (s *state3) NetworkName() (dtypes.NetworkName, error) { + return dtypes.NetworkName(s.State.NetworkName), nil +} + +func (s *state3) SetNetworkName(name string) error { + s.State.NetworkName = name + return nil +} + +func (s *state3) SetNextID(id abi.ActorID) error { + s.State.NextID = id + return nil +} + +func (s *state3) Remove(addrs ...address.Address) (err error) { + m, err := adt3.AsMap(s.store, s.State.AddressMap, builtin3.DefaultHamtBitwidth) + if err != nil { + return err + } + for _, addr := range addrs { + if err = m.Delete(abi.AddrKey(addr)); err != nil { + return xerrors.Errorf("failed to delete entry for address: %s; err: %w", addr, err) + } + } + amr, err := m.Root() + if err != nil { + return xerrors.Errorf("failed to get address map root: %w", err) + } + s.State.AddressMap = amr + return nil +} + +func (s *state3) SetAddressMap(mcid cid.Cid) error { + s.State.AddressMap = mcid + return nil +} + +func (s *state3) AddressMap() (adt.Map, error) { + return adt3.AsMap(s.store, s.State.AddressMap, builtin3.DefaultHamtBitwidth) +} + +func (s *state3) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/init/v4.go b/chain/actors/builtin/init/v4.go new file mode 100644 index 00000000000..dc56d1f196c --- /dev/null +++ b/chain/actors/builtin/init/v4.go @@ -0,0 +1,114 @@ +package init + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/node/modules/dtypes" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + init4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/init" + adt4 "github.com/filecoin-project/specs-actors/v4/actors/util/adt" +) + +var _ State = (*state4)(nil) + +func load4(store adt.Store, root cid.Cid) (State, error) { + out := state4{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make4(store adt.Store, networkName string) (State, error) { + out := state4{store: store} + + s, err := init4.ConstructState(store, networkName) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state4 struct { + init4.State + store adt.Store +} + +func (s *state4) ResolveAddress(address address.Address) (address.Address, bool, error) { + return s.State.ResolveAddress(s.store, address) +} + +func (s *state4) MapAddressToNewID(address address.Address) (address.Address, error) { + return s.State.MapAddressToNewID(s.store, address) +} + +func (s *state4) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error { + addrs, err := adt4.AsMap(s.store, s.State.AddressMap, builtin4.DefaultHamtBitwidth) + if err != nil { + return err + } + var actorID cbg.CborInt + return addrs.ForEach(&actorID, func(key string) error { + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(abi.ActorID(actorID), addr) + }) +} + +func (s *state4) NetworkName() (dtypes.NetworkName, error) { + return dtypes.NetworkName(s.State.NetworkName), nil +} + +func (s *state4) SetNetworkName(name string) error { + s.State.NetworkName = name + return nil +} + +func (s *state4) SetNextID(id abi.ActorID) error { + s.State.NextID = id + return nil +} + +func (s *state4) Remove(addrs ...address.Address) (err error) { + m, err := adt4.AsMap(s.store, s.State.AddressMap, builtin4.DefaultHamtBitwidth) + if err != nil { + return err + } + for _, addr := range addrs { + if err = m.Delete(abi.AddrKey(addr)); err != nil { + return xerrors.Errorf("failed to delete entry for address: %s; err: %w", addr, err) + } + } + amr, err := m.Root() + if err != nil { + return xerrors.Errorf("failed to get address map root: %w", err) + } + s.State.AddressMap = amr + return nil +} + +func (s *state4) SetAddressMap(mcid cid.Cid) error { + s.State.AddressMap = mcid + return nil +} + +func (s *state4) AddressMap() (adt.Map, error) { + return adt4.AsMap(s.store, s.State.AddressMap, builtin4.DefaultHamtBitwidth) +} + +func (s *state4) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/init/v5.go b/chain/actors/builtin/init/v5.go new file mode 100644 index 00000000000..107366de536 --- /dev/null +++ b/chain/actors/builtin/init/v5.go @@ -0,0 +1,114 @@ +package init + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/node/modules/dtypes" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + + init5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/init" + adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store, networkName string) (State, error) { + out := state5{store: store} + + s, err := init5.ConstructState(store, networkName) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state5 struct { + init5.State + store adt.Store +} + +func (s *state5) ResolveAddress(address address.Address) (address.Address, bool, error) { + return s.State.ResolveAddress(s.store, address) +} + +func (s *state5) MapAddressToNewID(address address.Address) (address.Address, error) { + return s.State.MapAddressToNewID(s.store, address) +} + +func (s *state5) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error { + addrs, err := adt5.AsMap(s.store, s.State.AddressMap, builtin5.DefaultHamtBitwidth) + if err != nil { + return err + } + var actorID cbg.CborInt + return addrs.ForEach(&actorID, func(key string) error { + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(abi.ActorID(actorID), addr) + }) +} + +func (s *state5) NetworkName() (dtypes.NetworkName, error) { + return dtypes.NetworkName(s.State.NetworkName), nil +} + +func (s *state5) SetNetworkName(name string) error { + s.State.NetworkName = name + return nil +} + +func (s *state5) SetNextID(id abi.ActorID) error { + s.State.NextID = id + return nil +} + +func (s *state5) Remove(addrs ...address.Address) (err error) { + m, err := adt5.AsMap(s.store, s.State.AddressMap, builtin5.DefaultHamtBitwidth) + if err != nil { + return err + } + for _, addr := range addrs { + if err = m.Delete(abi.AddrKey(addr)); err != nil { + return xerrors.Errorf("failed to delete entry for address: %s; err: %w", addr, err) + } + } + amr, err := m.Root() + if err != nil { + return xerrors.Errorf("failed to get address map root: %w", err) + } + s.State.AddressMap = amr + return nil +} + +func (s *state5) SetAddressMap(mcid cid.Cid) error { + s.State.AddressMap = mcid + return nil +} + +func (s *state5) AddressMap() (adt.Map, error) { + return adt5.AsMap(s.store, s.State.AddressMap, builtin5.DefaultHamtBitwidth) +} + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/market/actor.go.template b/chain/actors/builtin/market/actor.go.template new file mode 100644 index 00000000000..f78c84b8f92 --- /dev/null +++ b/chain/actors/builtin/market/actor.go.template @@ -0,0 +1,182 @@ +package market + +import ( + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + market0 "github.com/filecoin-project/specs-actors/actors/builtin/market" +{{range .versions}} + builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" +{{end}} + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/types" +) + +func init() { +{{range .versions}} + builtin.RegisterActorState(builtin{{.}}.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load{{.}}(store, root) + }) +{{end}}} + +var ( + Address = builtin{{.latestVersion}}.StorageMarketActorAddr + Methods = builtin{{.latestVersion}}.MethodsMarket +) + +func Load(store adt.Store, act *types.Actor) (State, error) { + switch act.Code { +{{range .versions}} + case builtin{{.}}.StorageMarketActorCodeID: + return load{{.}}(store, act.Head) +{{end}} + } + return nil, xerrors.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actors.Version) (State, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return make{{.}}(store) +{{end}} +} + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return builtin{{.}}.StorageMarketActorCodeID, nil +{{end}} + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + +type State interface { + cbor.Marshaler + BalancesChanged(State) (bool, error) + EscrowTable() (BalanceTable, error) + LockedTable() (BalanceTable, error) + TotalLocked() (abi.TokenAmount, error) + StatesChanged(State) (bool, error) + States() (DealStates, error) + ProposalsChanged(State) (bool, error) + Proposals() (DealProposals, error) + VerifyDealsForActivation( + minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, + ) (weight, verifiedWeight abi.DealWeight, err error) + NextID() (abi.DealID, error) + GetState() interface{} +} + +type BalanceTable interface { + ForEach(cb func(address.Address, abi.TokenAmount) error) error + Get(key address.Address) (abi.TokenAmount, error) +} + +type DealStates interface { + ForEach(cb func(id abi.DealID, ds DealState) error) error + Get(id abi.DealID) (*DealState, bool, error) + + array() adt.Array + decode(*cbg.Deferred) (*DealState, error) +} + +type DealProposals interface { + ForEach(cb func(id abi.DealID, dp DealProposal) error) error + Get(id abi.DealID) (*DealProposal, bool, error) + + array() adt.Array + decode(*cbg.Deferred) (*DealProposal, error) +} + +type PublishStorageDealsParams = market0.PublishStorageDealsParams +type PublishStorageDealsReturn = market0.PublishStorageDealsReturn +type VerifyDealsForActivationParams = market0.VerifyDealsForActivationParams +type WithdrawBalanceParams = market0.WithdrawBalanceParams + +type ClientDealProposal = market0.ClientDealProposal + +type DealState struct { + SectorStartEpoch abi.ChainEpoch // -1 if not yet included in proven sector + LastUpdatedEpoch abi.ChainEpoch // -1 if deal state never updated + SlashEpoch abi.ChainEpoch // -1 if deal never slashed +} + +type DealProposal struct { + PieceCID cid.Cid + PieceSize abi.PaddedPieceSize + VerifiedDeal bool + Client address.Address + Provider address.Address + Label string + StartEpoch abi.ChainEpoch + EndEpoch abi.ChainEpoch + StoragePricePerEpoch abi.TokenAmount + ProviderCollateral abi.TokenAmount + ClientCollateral abi.TokenAmount +} + +type DealStateChanges struct { + Added []DealIDState + Modified []DealStateChange + Removed []DealIDState +} + +type DealIDState struct { + ID abi.DealID + Deal DealState +} + +// DealStateChange is a change in deal state from -> to +type DealStateChange struct { + ID abi.DealID + From *DealState + To *DealState +} + +type DealProposalChanges struct { + Added []ProposalIDState + Removed []ProposalIDState +} + +type ProposalIDState struct { + ID abi.DealID + Proposal DealProposal +} + +func EmptyDealState() *DealState { + return &DealState{ + SectorStartEpoch: -1, + SlashEpoch: -1, + LastUpdatedEpoch: -1, + } +} + +// returns the earned fees and pending fees for a given deal +func (deal DealProposal) GetDealFees(height abi.ChainEpoch) (abi.TokenAmount, abi.TokenAmount) { + tf := big.Mul(deal.StoragePricePerEpoch, big.NewInt(int64(deal.EndEpoch-deal.StartEpoch))) + + ef := big.Mul(deal.StoragePricePerEpoch, big.NewInt(int64(height-deal.StartEpoch))) + if ef.LessThan(big.Zero()) { + ef = big.Zero() + } + + if ef.GreaterThan(tf) { + ef = tf + } + + return ef, big.Sub(tf, ef) +} diff --git a/chain/actors/builtin/market/market.go b/chain/actors/builtin/market/market.go index 195ca40b930..026e35d4e2f 100644 --- a/chain/actors/builtin/market/market.go +++ b/chain/actors/builtin/market/market.go @@ -5,43 +5,124 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/cbor" "github.com/ipfs/go-cid" cbg "github.com/whyrusleeping/cbor-gen" - builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" market0 "github.com/filecoin-project/specs-actors/actors/builtin/market" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/types" ) func init() { + builtin.RegisterActorState(builtin0.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load0(store, root) }) + builtin.RegisterActorState(builtin2.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load2(store, root) }) + + builtin.RegisterActorState(builtin3.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load3(store, root) + }) + + builtin.RegisterActorState(builtin4.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load4(store, root) + }) + + builtin.RegisterActorState(builtin5.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load5(store, root) + }) } var ( - Address = builtin2.StorageMarketActorAddr - Methods = builtin2.MethodsMarket + Address = builtin5.StorageMarketActorAddr + Methods = builtin5.MethodsMarket ) -func Load(store adt.Store, act *types.Actor) (st State, err error) { +func Load(store adt.Store, act *types.Actor) (State, error) { switch act.Code { + case builtin0.StorageMarketActorCodeID: return load0(store, act.Head) + case builtin2.StorageMarketActorCodeID: return load2(store, act.Head) + + case builtin3.StorageMarketActorCodeID: + return load3(store, act.Head) + + case builtin4.StorageMarketActorCodeID: + return load4(store, act.Head) + + case builtin5.StorageMarketActorCodeID: + return load5(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } +func MakeState(store adt.Store, av actors.Version) (State, error) { + switch av { + + case actors.Version0: + return make0(store) + + case actors.Version2: + return make2(store) + + case actors.Version3: + return make3(store) + + case actors.Version4: + return make4(store) + + case actors.Version5: + return make5(store) + + } + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { + + case actors.Version0: + return builtin0.StorageMarketActorCodeID, nil + + case actors.Version2: + return builtin2.StorageMarketActorCodeID, nil + + case actors.Version3: + return builtin3.StorageMarketActorCodeID, nil + + case actors.Version4: + return builtin4.StorageMarketActorCodeID, nil + + case actors.Version5: + return builtin5.StorageMarketActorCodeID, nil + + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + type State interface { cbor.Marshaler BalancesChanged(State) (bool, error) @@ -55,6 +136,8 @@ type State interface { VerifyDealsForActivation( minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, ) (weight, verifiedWeight abi.DealWeight, err error) + NextID() (abi.DealID, error) + GetState() interface{} } type BalanceTable interface { @@ -81,6 +164,7 @@ type DealProposals interface { type PublishStorageDealsParams = market0.PublishStorageDealsParams type PublishStorageDealsReturn = market0.PublishStorageDealsReturn type VerifyDealsForActivationParams = market0.VerifyDealsForActivationParams +type WithdrawBalanceParams = market0.WithdrawBalanceParams type ClientDealProposal = market0.ClientDealProposal @@ -139,3 +223,19 @@ func EmptyDealState() *DealState { LastUpdatedEpoch: -1, } } + +// returns the earned fees and pending fees for a given deal +func (deal DealProposal) GetDealFees(height abi.ChainEpoch) (abi.TokenAmount, abi.TokenAmount) { + tf := big.Mul(deal.StoragePricePerEpoch, big.NewInt(int64(deal.EndEpoch-deal.StartEpoch))) + + ef := big.Mul(deal.StoragePricePerEpoch, big.NewInt(int64(height-deal.StartEpoch))) + if ef.LessThan(big.Zero()) { + ef = big.Zero() + } + + if ef.GreaterThan(tf) { + ef = tf + } + + return ef, big.Sub(tf, ef) +} diff --git a/chain/actors/builtin/market/state.go.template b/chain/actors/builtin/market/state.go.template new file mode 100644 index 00000000000..70b73114843 --- /dev/null +++ b/chain/actors/builtin/market/state.go.template @@ -0,0 +1,238 @@ +package market + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/types" + + market{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/market" + adt{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/util/adt" +) + +var _ State = (*state{{.v}})(nil) + +func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { + out := state{{.v}}{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make{{.v}}(store adt.Store) (State, error) { + out := state{{.v}}{store: store} + {{if (le .v 2)}} + ea, err := adt{{.v}}.MakeEmptyArray(store).Root() + if err != nil { + return nil, err + } + + em, err := adt{{.v}}.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State = *market{{.v}}.ConstructState(ea, em, em) + {{else}} + s, err := market{{.v}}.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + {{end}} + return &out, nil +} + +type state{{.v}} struct { + market{{.v}}.State + store adt.Store +} + +func (s *state{{.v}}) TotalLocked() (abi.TokenAmount, error) { + fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral) + fml = types.BigAdd(fml, s.TotalClientStorageFee) + return fml, nil +} + +func (s *state{{.v}}) BalancesChanged(otherState State) (bool, error) { + otherState{{.v}}, ok := otherState.(*state{{.v}}) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.EscrowTable.Equals(otherState{{.v}}.State.EscrowTable) || !s.State.LockedTable.Equals(otherState{{.v}}.State.LockedTable), nil +} + +func (s *state{{.v}}) StatesChanged(otherState State) (bool, error) { + otherState{{.v}}, ok := otherState.(*state{{.v}}) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.States.Equals(otherState{{.v}}.State.States), nil +} + +func (s *state{{.v}}) States() (DealStates, error) { + stateArray, err := adt{{.v}}.AsArray(s.store, s.State.States{{if (ge .v 3)}}, market{{.v}}.StatesAmtBitwidth{{end}}) + if err != nil { + return nil, err + } + return &dealStates{{.v}}{stateArray}, nil +} + +func (s *state{{.v}}) ProposalsChanged(otherState State) (bool, error) { + otherState{{.v}}, ok := otherState.(*state{{.v}}) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.Proposals.Equals(otherState{{.v}}.State.Proposals), nil +} + +func (s *state{{.v}}) Proposals() (DealProposals, error) { + proposalArray, err := adt{{.v}}.AsArray(s.store, s.State.Proposals{{if (ge .v 3)}}, market{{.v}}.ProposalsAmtBitwidth{{end}}) + if err != nil { + return nil, err + } + return &dealProposals{{.v}}{proposalArray}, nil +} + +func (s *state{{.v}}) EscrowTable() (BalanceTable, error) { + bt, err := adt{{.v}}.AsBalanceTable(s.store, s.State.EscrowTable) + if err != nil { + return nil, err + } + return &balanceTable{{.v}}{bt}, nil +} + +func (s *state{{.v}}) LockedTable() (BalanceTable, error) { + bt, err := adt{{.v}}.AsBalanceTable(s.store, s.State.LockedTable) + if err != nil { + return nil, err + } + return &balanceTable{{.v}}{bt}, nil +} + +func (s *state{{.v}}) VerifyDealsForActivation( + minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, +) (weight, verifiedWeight abi.DealWeight, err error) { + w, vw{{if (ge .v 2)}}, _{{end}}, err := market{{.v}}.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch) + return w, vw, err +} + +func (s *state{{.v}}) NextID() (abi.DealID, error) { + return s.State.NextID, nil +} + +type balanceTable{{.v}} struct { + *adt{{.v}}.BalanceTable +} + +func (bt *balanceTable{{.v}}) ForEach(cb func(address.Address, abi.TokenAmount) error) error { + asMap := (*adt{{.v}}.Map)(bt.BalanceTable) + var ta abi.TokenAmount + return asMap.ForEach(&ta, func(key string) error { + a, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(a, ta) + }) +} + +type dealStates{{.v}} struct { + adt.Array +} + +func (s *dealStates{{.v}}) Get(dealID abi.DealID) (*DealState, bool, error) { + var deal{{.v}} market{{.v}}.DealState + found, err := s.Array.Get(uint64(dealID), &deal{{.v}}) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + deal := fromV{{.v}}DealState(deal{{.v}}) + return &deal, true, nil +} + +func (s *dealStates{{.v}}) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { + var ds{{.v}} market{{.v}}.DealState + return s.Array.ForEach(&ds{{.v}}, func(idx int64) error { + return cb(abi.DealID(idx), fromV{{.v}}DealState(ds{{.v}})) + }) +} + +func (s *dealStates{{.v}}) decode(val *cbg.Deferred) (*DealState, error) { + var ds{{.v}} market{{.v}}.DealState + if err := ds{{.v}}.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + ds := fromV{{.v}}DealState(ds{{.v}}) + return &ds, nil +} + +func (s *dealStates{{.v}}) array() adt.Array { + return s.Array +} + +func fromV{{.v}}DealState(v{{.v}} market{{.v}}.DealState) DealState { + return (DealState)(v{{.v}}) +} + +type dealProposals{{.v}} struct { + adt.Array +} + +func (s *dealProposals{{.v}}) Get(dealID abi.DealID) (*DealProposal, bool, error) { + var proposal{{.v}} market{{.v}}.DealProposal + found, err := s.Array.Get(uint64(dealID), &proposal{{.v}}) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + proposal := fromV{{.v}}DealProposal(proposal{{.v}}) + return &proposal, true, nil +} + +func (s *dealProposals{{.v}}) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error { + var dp{{.v}} market{{.v}}.DealProposal + return s.Array.ForEach(&dp{{.v}}, func(idx int64) error { + return cb(abi.DealID(idx), fromV{{.v}}DealProposal(dp{{.v}})) + }) +} + +func (s *dealProposals{{.v}}) decode(val *cbg.Deferred) (*DealProposal, error) { + var dp{{.v}} market{{.v}}.DealProposal + if err := dp{{.v}}.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + dp := fromV{{.v}}DealProposal(dp{{.v}}) + return &dp, nil +} + +func (s *dealProposals{{.v}}) array() adt.Array { + return s.Array +} + +func fromV{{.v}}DealProposal(v{{.v}} market{{.v}}.DealProposal) DealProposal { + return (DealProposal)(v{{.v}}) +} + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/market/v0.go b/chain/actors/builtin/market/v0.go index 20d38b5f125..b3093b54b0f 100644 --- a/chain/actors/builtin/market/v0.go +++ b/chain/actors/builtin/market/v0.go @@ -26,6 +26,24 @@ func load0(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make0(store adt.Store) (State, error) { + out := state0{store: store} + + ea, err := adt0.MakeEmptyArray(store).Root() + if err != nil { + return nil, err + } + + em, err := adt0.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State = *market0.ConstructState(ea, em, em) + + return &out, nil +} + type state0 struct { market0.State store adt.Store @@ -102,7 +120,12 @@ func (s *state0) LockedTable() (BalanceTable, error) { func (s *state0) VerifyDealsForActivation( minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, ) (weight, verifiedWeight abi.DealWeight, err error) { - return market0.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch) + w, vw, err := market0.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch) + return w, vw, err +} + +func (s *state0) NextID() (abi.DealID, error) { + return s.State.NextID, nil } type balanceTable0 struct { @@ -202,3 +225,7 @@ func (s *dealProposals0) array() adt.Array { func fromV0DealProposal(v0 market0.DealProposal) DealProposal { return (DealProposal)(v0) } + +func (s *state0) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/market/v2.go b/chain/actors/builtin/market/v2.go index a5e5c7b45d4..fdedcce8547 100644 --- a/chain/actors/builtin/market/v2.go +++ b/chain/actors/builtin/market/v2.go @@ -26,6 +26,24 @@ func load2(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make2(store adt.Store) (State, error) { + out := state2{store: store} + + ea, err := adt2.MakeEmptyArray(store).Root() + if err != nil { + return nil, err + } + + em, err := adt2.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State = *market2.ConstructState(ea, em, em) + + return &out, nil +} + type state2 struct { market2.State store adt.Store @@ -106,6 +124,10 @@ func (s *state2) VerifyDealsForActivation( return w, vw, err } +func (s *state2) NextID() (abi.DealID, error) { + return s.State.NextID, nil +} + type balanceTable2 struct { *adt2.BalanceTable } @@ -140,18 +162,18 @@ func (s *dealStates2) Get(dealID abi.DealID) (*DealState, bool, error) { } func (s *dealStates2) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { - var ds1 market2.DealState - return s.Array.ForEach(&ds1, func(idx int64) error { - return cb(abi.DealID(idx), fromV2DealState(ds1)) + var ds2 market2.DealState + return s.Array.ForEach(&ds2, func(idx int64) error { + return cb(abi.DealID(idx), fromV2DealState(ds2)) }) } func (s *dealStates2) decode(val *cbg.Deferred) (*DealState, error) { - var ds1 market2.DealState - if err := ds1.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + var ds2 market2.DealState + if err := ds2.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } - ds := fromV2DealState(ds1) + ds := fromV2DealState(ds2) return &ds, nil } @@ -159,8 +181,8 @@ func (s *dealStates2) array() adt.Array { return s.Array } -func fromV2DealState(v1 market2.DealState) DealState { - return (DealState)(v1) +func fromV2DealState(v2 market2.DealState) DealState { + return (DealState)(v2) } type dealProposals2 struct { @@ -181,18 +203,18 @@ func (s *dealProposals2) Get(dealID abi.DealID) (*DealProposal, bool, error) { } func (s *dealProposals2) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error { - var dp1 market2.DealProposal - return s.Array.ForEach(&dp1, func(idx int64) error { - return cb(abi.DealID(idx), fromV2DealProposal(dp1)) + var dp2 market2.DealProposal + return s.Array.ForEach(&dp2, func(idx int64) error { + return cb(abi.DealID(idx), fromV2DealProposal(dp2)) }) } func (s *dealProposals2) decode(val *cbg.Deferred) (*DealProposal, error) { - var dp1 market2.DealProposal - if err := dp1.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + var dp2 market2.DealProposal + if err := dp2.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } - dp := fromV2DealProposal(dp1) + dp := fromV2DealProposal(dp2) return &dp, nil } @@ -200,6 +222,10 @@ func (s *dealProposals2) array() adt.Array { return s.Array } -func fromV2DealProposal(v1 market2.DealProposal) DealProposal { - return (DealProposal)(v1) +func fromV2DealProposal(v2 market2.DealProposal) DealProposal { + return (DealProposal)(v2) +} + +func (s *state2) GetState() interface{} { + return &s.State } diff --git a/chain/actors/builtin/market/v3.go b/chain/actors/builtin/market/v3.go new file mode 100644 index 00000000000..53d26644380 --- /dev/null +++ b/chain/actors/builtin/market/v3.go @@ -0,0 +1,226 @@ +package market + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/types" + + market3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/market" + adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make3(store adt.Store) (State, error) { + out := state3{store: store} + + s, err := market3.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state3 struct { + market3.State + store adt.Store +} + +func (s *state3) TotalLocked() (abi.TokenAmount, error) { + fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral) + fml = types.BigAdd(fml, s.TotalClientStorageFee) + return fml, nil +} + +func (s *state3) BalancesChanged(otherState State) (bool, error) { + otherState3, ok := otherState.(*state3) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.EscrowTable.Equals(otherState3.State.EscrowTable) || !s.State.LockedTable.Equals(otherState3.State.LockedTable), nil +} + +func (s *state3) StatesChanged(otherState State) (bool, error) { + otherState3, ok := otherState.(*state3) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.States.Equals(otherState3.State.States), nil +} + +func (s *state3) States() (DealStates, error) { + stateArray, err := adt3.AsArray(s.store, s.State.States, market3.StatesAmtBitwidth) + if err != nil { + return nil, err + } + return &dealStates3{stateArray}, nil +} + +func (s *state3) ProposalsChanged(otherState State) (bool, error) { + otherState3, ok := otherState.(*state3) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.Proposals.Equals(otherState3.State.Proposals), nil +} + +func (s *state3) Proposals() (DealProposals, error) { + proposalArray, err := adt3.AsArray(s.store, s.State.Proposals, market3.ProposalsAmtBitwidth) + if err != nil { + return nil, err + } + return &dealProposals3{proposalArray}, nil +} + +func (s *state3) EscrowTable() (BalanceTable, error) { + bt, err := adt3.AsBalanceTable(s.store, s.State.EscrowTable) + if err != nil { + return nil, err + } + return &balanceTable3{bt}, nil +} + +func (s *state3) LockedTable() (BalanceTable, error) { + bt, err := adt3.AsBalanceTable(s.store, s.State.LockedTable) + if err != nil { + return nil, err + } + return &balanceTable3{bt}, nil +} + +func (s *state3) VerifyDealsForActivation( + minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, +) (weight, verifiedWeight abi.DealWeight, err error) { + w, vw, _, err := market3.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch) + return w, vw, err +} + +func (s *state3) NextID() (abi.DealID, error) { + return s.State.NextID, nil +} + +type balanceTable3 struct { + *adt3.BalanceTable +} + +func (bt *balanceTable3) ForEach(cb func(address.Address, abi.TokenAmount) error) error { + asMap := (*adt3.Map)(bt.BalanceTable) + var ta abi.TokenAmount + return asMap.ForEach(&ta, func(key string) error { + a, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(a, ta) + }) +} + +type dealStates3 struct { + adt.Array +} + +func (s *dealStates3) Get(dealID abi.DealID) (*DealState, bool, error) { + var deal3 market3.DealState + found, err := s.Array.Get(uint64(dealID), &deal3) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + deal := fromV3DealState(deal3) + return &deal, true, nil +} + +func (s *dealStates3) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { + var ds3 market3.DealState + return s.Array.ForEach(&ds3, func(idx int64) error { + return cb(abi.DealID(idx), fromV3DealState(ds3)) + }) +} + +func (s *dealStates3) decode(val *cbg.Deferred) (*DealState, error) { + var ds3 market3.DealState + if err := ds3.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + ds := fromV3DealState(ds3) + return &ds, nil +} + +func (s *dealStates3) array() adt.Array { + return s.Array +} + +func fromV3DealState(v3 market3.DealState) DealState { + return (DealState)(v3) +} + +type dealProposals3 struct { + adt.Array +} + +func (s *dealProposals3) Get(dealID abi.DealID) (*DealProposal, bool, error) { + var proposal3 market3.DealProposal + found, err := s.Array.Get(uint64(dealID), &proposal3) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + proposal := fromV3DealProposal(proposal3) + return &proposal, true, nil +} + +func (s *dealProposals3) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error { + var dp3 market3.DealProposal + return s.Array.ForEach(&dp3, func(idx int64) error { + return cb(abi.DealID(idx), fromV3DealProposal(dp3)) + }) +} + +func (s *dealProposals3) decode(val *cbg.Deferred) (*DealProposal, error) { + var dp3 market3.DealProposal + if err := dp3.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + dp := fromV3DealProposal(dp3) + return &dp, nil +} + +func (s *dealProposals3) array() adt.Array { + return s.Array +} + +func fromV3DealProposal(v3 market3.DealProposal) DealProposal { + return (DealProposal)(v3) +} + +func (s *state3) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/market/v4.go b/chain/actors/builtin/market/v4.go new file mode 100644 index 00000000000..30aa2692057 --- /dev/null +++ b/chain/actors/builtin/market/v4.go @@ -0,0 +1,226 @@ +package market + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/types" + + market4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/market" + adt4 "github.com/filecoin-project/specs-actors/v4/actors/util/adt" +) + +var _ State = (*state4)(nil) + +func load4(store adt.Store, root cid.Cid) (State, error) { + out := state4{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make4(store adt.Store) (State, error) { + out := state4{store: store} + + s, err := market4.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state4 struct { + market4.State + store adt.Store +} + +func (s *state4) TotalLocked() (abi.TokenAmount, error) { + fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral) + fml = types.BigAdd(fml, s.TotalClientStorageFee) + return fml, nil +} + +func (s *state4) BalancesChanged(otherState State) (bool, error) { + otherState4, ok := otherState.(*state4) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.EscrowTable.Equals(otherState4.State.EscrowTable) || !s.State.LockedTable.Equals(otherState4.State.LockedTable), nil +} + +func (s *state4) StatesChanged(otherState State) (bool, error) { + otherState4, ok := otherState.(*state4) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.States.Equals(otherState4.State.States), nil +} + +func (s *state4) States() (DealStates, error) { + stateArray, err := adt4.AsArray(s.store, s.State.States, market4.StatesAmtBitwidth) + if err != nil { + return nil, err + } + return &dealStates4{stateArray}, nil +} + +func (s *state4) ProposalsChanged(otherState State) (bool, error) { + otherState4, ok := otherState.(*state4) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.Proposals.Equals(otherState4.State.Proposals), nil +} + +func (s *state4) Proposals() (DealProposals, error) { + proposalArray, err := adt4.AsArray(s.store, s.State.Proposals, market4.ProposalsAmtBitwidth) + if err != nil { + return nil, err + } + return &dealProposals4{proposalArray}, nil +} + +func (s *state4) EscrowTable() (BalanceTable, error) { + bt, err := adt4.AsBalanceTable(s.store, s.State.EscrowTable) + if err != nil { + return nil, err + } + return &balanceTable4{bt}, nil +} + +func (s *state4) LockedTable() (BalanceTable, error) { + bt, err := adt4.AsBalanceTable(s.store, s.State.LockedTable) + if err != nil { + return nil, err + } + return &balanceTable4{bt}, nil +} + +func (s *state4) VerifyDealsForActivation( + minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, +) (weight, verifiedWeight abi.DealWeight, err error) { + w, vw, _, err := market4.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch) + return w, vw, err +} + +func (s *state4) NextID() (abi.DealID, error) { + return s.State.NextID, nil +} + +type balanceTable4 struct { + *adt4.BalanceTable +} + +func (bt *balanceTable4) ForEach(cb func(address.Address, abi.TokenAmount) error) error { + asMap := (*adt4.Map)(bt.BalanceTable) + var ta abi.TokenAmount + return asMap.ForEach(&ta, func(key string) error { + a, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(a, ta) + }) +} + +type dealStates4 struct { + adt.Array +} + +func (s *dealStates4) Get(dealID abi.DealID) (*DealState, bool, error) { + var deal4 market4.DealState + found, err := s.Array.Get(uint64(dealID), &deal4) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + deal := fromV4DealState(deal4) + return &deal, true, nil +} + +func (s *dealStates4) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { + var ds4 market4.DealState + return s.Array.ForEach(&ds4, func(idx int64) error { + return cb(abi.DealID(idx), fromV4DealState(ds4)) + }) +} + +func (s *dealStates4) decode(val *cbg.Deferred) (*DealState, error) { + var ds4 market4.DealState + if err := ds4.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + ds := fromV4DealState(ds4) + return &ds, nil +} + +func (s *dealStates4) array() adt.Array { + return s.Array +} + +func fromV4DealState(v4 market4.DealState) DealState { + return (DealState)(v4) +} + +type dealProposals4 struct { + adt.Array +} + +func (s *dealProposals4) Get(dealID abi.DealID) (*DealProposal, bool, error) { + var proposal4 market4.DealProposal + found, err := s.Array.Get(uint64(dealID), &proposal4) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + proposal := fromV4DealProposal(proposal4) + return &proposal, true, nil +} + +func (s *dealProposals4) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error { + var dp4 market4.DealProposal + return s.Array.ForEach(&dp4, func(idx int64) error { + return cb(abi.DealID(idx), fromV4DealProposal(dp4)) + }) +} + +func (s *dealProposals4) decode(val *cbg.Deferred) (*DealProposal, error) { + var dp4 market4.DealProposal + if err := dp4.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + dp := fromV4DealProposal(dp4) + return &dp, nil +} + +func (s *dealProposals4) array() adt.Array { + return s.Array +} + +func fromV4DealProposal(v4 market4.DealProposal) DealProposal { + return (DealProposal)(v4) +} + +func (s *state4) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/market/v5.go b/chain/actors/builtin/market/v5.go new file mode 100644 index 00000000000..12378c76dc1 --- /dev/null +++ b/chain/actors/builtin/market/v5.go @@ -0,0 +1,226 @@ +package market + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/types" + + market5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/market" + adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store) (State, error) { + out := state5{store: store} + + s, err := market5.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state5 struct { + market5.State + store adt.Store +} + +func (s *state5) TotalLocked() (abi.TokenAmount, error) { + fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral) + fml = types.BigAdd(fml, s.TotalClientStorageFee) + return fml, nil +} + +func (s *state5) BalancesChanged(otherState State) (bool, error) { + otherState5, ok := otherState.(*state5) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.EscrowTable.Equals(otherState5.State.EscrowTable) || !s.State.LockedTable.Equals(otherState5.State.LockedTable), nil +} + +func (s *state5) StatesChanged(otherState State) (bool, error) { + otherState5, ok := otherState.(*state5) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.States.Equals(otherState5.State.States), nil +} + +func (s *state5) States() (DealStates, error) { + stateArray, err := adt5.AsArray(s.store, s.State.States, market5.StatesAmtBitwidth) + if err != nil { + return nil, err + } + return &dealStates5{stateArray}, nil +} + +func (s *state5) ProposalsChanged(otherState State) (bool, error) { + otherState5, ok := otherState.(*state5) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.Proposals.Equals(otherState5.State.Proposals), nil +} + +func (s *state5) Proposals() (DealProposals, error) { + proposalArray, err := adt5.AsArray(s.store, s.State.Proposals, market5.ProposalsAmtBitwidth) + if err != nil { + return nil, err + } + return &dealProposals5{proposalArray}, nil +} + +func (s *state5) EscrowTable() (BalanceTable, error) { + bt, err := adt5.AsBalanceTable(s.store, s.State.EscrowTable) + if err != nil { + return nil, err + } + return &balanceTable5{bt}, nil +} + +func (s *state5) LockedTable() (BalanceTable, error) { + bt, err := adt5.AsBalanceTable(s.store, s.State.LockedTable) + if err != nil { + return nil, err + } + return &balanceTable5{bt}, nil +} + +func (s *state5) VerifyDealsForActivation( + minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, +) (weight, verifiedWeight abi.DealWeight, err error) { + w, vw, _, err := market5.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch) + return w, vw, err +} + +func (s *state5) NextID() (abi.DealID, error) { + return s.State.NextID, nil +} + +type balanceTable5 struct { + *adt5.BalanceTable +} + +func (bt *balanceTable5) ForEach(cb func(address.Address, abi.TokenAmount) error) error { + asMap := (*adt5.Map)(bt.BalanceTable) + var ta abi.TokenAmount + return asMap.ForEach(&ta, func(key string) error { + a, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(a, ta) + }) +} + +type dealStates5 struct { + adt.Array +} + +func (s *dealStates5) Get(dealID abi.DealID) (*DealState, bool, error) { + var deal5 market5.DealState + found, err := s.Array.Get(uint64(dealID), &deal5) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + deal := fromV5DealState(deal5) + return &deal, true, nil +} + +func (s *dealStates5) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { + var ds5 market5.DealState + return s.Array.ForEach(&ds5, func(idx int64) error { + return cb(abi.DealID(idx), fromV5DealState(ds5)) + }) +} + +func (s *dealStates5) decode(val *cbg.Deferred) (*DealState, error) { + var ds5 market5.DealState + if err := ds5.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + ds := fromV5DealState(ds5) + return &ds, nil +} + +func (s *dealStates5) array() adt.Array { + return s.Array +} + +func fromV5DealState(v5 market5.DealState) DealState { + return (DealState)(v5) +} + +type dealProposals5 struct { + adt.Array +} + +func (s *dealProposals5) Get(dealID abi.DealID) (*DealProposal, bool, error) { + var proposal5 market5.DealProposal + found, err := s.Array.Get(uint64(dealID), &proposal5) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + proposal := fromV5DealProposal(proposal5) + return &proposal, true, nil +} + +func (s *dealProposals5) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error { + var dp5 market5.DealProposal + return s.Array.ForEach(&dp5, func(idx int64) error { + return cb(abi.DealID(idx), fromV5DealProposal(dp5)) + }) +} + +func (s *dealProposals5) decode(val *cbg.Deferred) (*DealProposal, error) { + var dp5 market5.DealProposal + if err := dp5.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + dp := fromV5DealProposal(dp5) + return &dp, nil +} + +func (s *dealProposals5) array() adt.Array { + return s.Array +} + +func fromV5DealProposal(v5 market5.DealProposal) DealProposal { + return (DealProposal)(v5) +} + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/miner/actor.go.template b/chain/actors/builtin/miner/actor.go.template new file mode 100644 index 00000000000..12f418b3784 --- /dev/null +++ b/chain/actors/builtin/miner/actor.go.template @@ -0,0 +1,305 @@ +package miner + +import ( + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p-core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/go-state-types/dline" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/types" + + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" +{{range .versions}} + builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" +{{end}} +) + +func init() { +{{range .versions}} + builtin.RegisterActorState(builtin{{.}}.StorageMinerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load{{.}}(store, root) + }) +{{end}} +} + +var Methods = builtin{{.latestVersion}}.MethodsMiner + +// Unchanged between v0, v2, v3, and v4 actors +var WPoStProvingPeriod = miner0.WPoStProvingPeriod +var WPoStPeriodDeadlines = miner0.WPoStPeriodDeadlines +var WPoStChallengeWindow = miner0.WPoStChallengeWindow +var WPoStChallengeLookback = miner0.WPoStChallengeLookback +var FaultDeclarationCutoff = miner0.FaultDeclarationCutoff + +const MinSectorExpiration = miner0.MinSectorExpiration + +// Not used / checked in v0 +// TODO: Abstract over network versions +var DeclarationsMax = miner2.DeclarationsMax +var AddressedSectorsMax = miner2.AddressedSectorsMax + +func Load(store adt.Store, act *types.Actor) (State, error) { + switch act.Code { +{{range .versions}} + case builtin{{.}}.StorageMinerActorCodeID: + return load{{.}}(store, act.Head) +{{end}} +} + return nil, xerrors.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actors.Version) (State, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return make{{.}}(store) +{{end}} +} + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return builtin{{.}}.StorageMinerActorCodeID, nil +{{end}} + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + +type State interface { + cbor.Marshaler + + // Total available balance to spend. + AvailableBalance(abi.TokenAmount) (abi.TokenAmount, error) + // Funds that will vest by the given epoch. + VestedFunds(abi.ChainEpoch) (abi.TokenAmount, error) + // Funds locked for various reasons. + LockedFunds() (LockedFunds, error) + FeeDebt() (abi.TokenAmount, error) + + GetSector(abi.SectorNumber) (*SectorOnChainInfo, error) + FindSector(abi.SectorNumber) (*SectorLocation, error) + GetSectorExpiration(abi.SectorNumber) (*SectorExpiration, error) + GetPrecommittedSector(abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) + ForEachPrecommittedSector(func(SectorPreCommitOnChainInfo) error) error + LoadSectors(sectorNos *bitfield.BitField) ([]*SectorOnChainInfo, error) + NumLiveSectors() (uint64, error) + IsAllocated(abi.SectorNumber) (bool, error) + // UnallocatedSectorNumbers returns up to count unallocated sector numbers (or less than + // count if there aren't enough). + UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) + GetAllocatedSectors() (*bitfield.BitField, error) + + // Note that ProvingPeriodStart is deprecated and will be renamed / removed in a future version of actors + GetProvingPeriodStart() (abi.ChainEpoch, error) + // Testing only + EraseAllUnproven() error + + LoadDeadline(idx uint64) (Deadline, error) + ForEachDeadline(cb func(idx uint64, dl Deadline) error) error + NumDeadlines() (uint64, error) + DeadlinesChanged(State) (bool, error) + + Info() (MinerInfo, error) + MinerInfoChanged(State) (bool, error) + + DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) + DeadlineCronActive() (bool, error) + + // Diff helpers. Used by Diff* functions internally. + sectors() (adt.Array, error) + decodeSectorOnChainInfo(*cbg.Deferred) (SectorOnChainInfo, error) + precommits() (adt.Map, error) + decodeSectorPreCommitOnChainInfo(*cbg.Deferred) (SectorPreCommitOnChainInfo, error) + GetState() interface{} +} + +type Deadline interface { + LoadPartition(idx uint64) (Partition, error) + ForEachPartition(cb func(idx uint64, part Partition) error) error + PartitionsPoSted() (bitfield.BitField, error) + + PartitionsChanged(Deadline) (bool, error) + DisputableProofCount() (uint64, error) +} + +type Partition interface { + AllSectors() (bitfield.BitField, error) + FaultySectors() (bitfield.BitField, error) + RecoveringSectors() (bitfield.BitField, error) + LiveSectors() (bitfield.BitField, error) + ActiveSectors() (bitfield.BitField, error) +} + +type SectorOnChainInfo struct { + SectorNumber abi.SectorNumber + SealProof abi.RegisteredSealProof + SealedCID cid.Cid + DealIDs []abi.DealID + Activation abi.ChainEpoch + Expiration abi.ChainEpoch + DealWeight abi.DealWeight + VerifiedDealWeight abi.DealWeight + InitialPledge abi.TokenAmount + ExpectedDayReward abi.TokenAmount + ExpectedStoragePledge abi.TokenAmount +} + +type SectorPreCommitInfo = miner0.SectorPreCommitInfo + +type SectorPreCommitOnChainInfo struct { + Info SectorPreCommitInfo + PreCommitDeposit abi.TokenAmount + PreCommitEpoch abi.ChainEpoch + DealWeight abi.DealWeight + VerifiedDealWeight abi.DealWeight +} + +type PoStPartition = miner0.PoStPartition +type RecoveryDeclaration = miner0.RecoveryDeclaration +type FaultDeclaration = miner0.FaultDeclaration + +// Params +type DeclareFaultsParams = miner0.DeclareFaultsParams +type DeclareFaultsRecoveredParams = miner0.DeclareFaultsRecoveredParams +type SubmitWindowedPoStParams = miner0.SubmitWindowedPoStParams +type ProveCommitSectorParams = miner0.ProveCommitSectorParams +type DisputeWindowedPoStParams = miner3.DisputeWindowedPoStParams +type ProveCommitAggregateParams = miner5.ProveCommitAggregateParams + +func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) { + // We added support for the new proofs in network version 7, and removed support for the old + // ones in network version 8. + if nver < network.Version7 { + switch proof { + case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1: + return abi.RegisteredSealProof_StackedDrg2KiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1: + return abi.RegisteredSealProof_StackedDrg8MiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: + return abi.RegisteredSealProof_StackedDrg512MiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: + return abi.RegisteredSealProof_StackedDrg32GiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: + return abi.RegisteredSealProof_StackedDrg64GiBV1, nil + default: + return -1, xerrors.Errorf("unrecognized window post type: %d", proof) + } + } + + switch proof { + case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1: + return abi.RegisteredSealProof_StackedDrg2KiBV1_1, nil + case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1: + return abi.RegisteredSealProof_StackedDrg8MiBV1_1, nil + case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: + return abi.RegisteredSealProof_StackedDrg512MiBV1_1, nil + case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: + return abi.RegisteredSealProof_StackedDrg32GiBV1_1, nil + case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: + return abi.RegisteredSealProof_StackedDrg64GiBV1_1, nil + default: + return -1, xerrors.Errorf("unrecognized window post type: %d", proof) + } +} + +func WinningPoStProofTypeFromWindowPoStProofType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredPoStProof, error) { + switch proof { + case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1: + return abi.RegisteredPoStProof_StackedDrgWinning2KiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1: + return abi.RegisteredPoStProof_StackedDrgWinning8MiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: + return abi.RegisteredPoStProof_StackedDrgWinning512MiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: + return abi.RegisteredPoStProof_StackedDrgWinning32GiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: + return abi.RegisteredPoStProof_StackedDrgWinning64GiBV1, nil + default: + return -1, xerrors.Errorf("unknown proof type %d", proof) + } +} + +type MinerInfo struct { + Owner address.Address // Must be an ID-address. + Worker address.Address // Must be an ID-address. + NewWorker address.Address // Must be an ID-address. + ControlAddresses []address.Address // Must be an ID-addresses. + WorkerChangeEpoch abi.ChainEpoch + PeerId *peer.ID + Multiaddrs []abi.Multiaddrs + WindowPoStProofType abi.RegisteredPoStProof + SectorSize abi.SectorSize + WindowPoStPartitionSectors uint64 + ConsensusFaultElapsed abi.ChainEpoch +} + +func (mi MinerInfo) IsController(addr address.Address) bool { + if addr == mi.Owner || addr == mi.Worker { + return true + } + + for _, ca := range mi.ControlAddresses { + if addr == ca { + return true + } + } + + return false +} + +type SectorExpiration struct { + OnTime abi.ChainEpoch + + // non-zero if sector is faulty, epoch at which it will be permanently + // removed if it doesn't recover + Early abi.ChainEpoch +} + +type SectorLocation struct { + Deadline uint64 + Partition uint64 +} + +type SectorChanges struct { + Added []SectorOnChainInfo + Extended []SectorExtensions + Removed []SectorOnChainInfo +} + +type SectorExtensions struct { + From SectorOnChainInfo + To SectorOnChainInfo +} + +type PreCommitChanges struct { + Added []SectorPreCommitOnChainInfo + Removed []SectorPreCommitOnChainInfo +} + +type LockedFunds struct { + VestingFunds abi.TokenAmount + InitialPledgeRequirement abi.TokenAmount + PreCommitDeposits abi.TokenAmount +} + +func (lf LockedFunds) TotalLockedFunds() abi.TokenAmount { + return big.Add(lf.VestingFunds, big.Add(lf.InitialPledgeRequirement, lf.PreCommitDeposits)) +} diff --git a/chain/actors/builtin/miner/miner.go b/chain/actors/builtin/miner/miner.go index 48236ffd0a4..fc1d60e718a 100644 --- a/chain/actors/builtin/miner/miner.go +++ b/chain/actors/builtin/miner/miner.go @@ -2,6 +2,8 @@ package miner import ( "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/chain/actors" "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p-core/peer" cbg "github.com/whyrusleeping/cbor-gen" @@ -17,23 +19,49 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/types" - builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" ) func init() { + builtin.RegisterActorState(builtin0.StorageMinerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load0(store, root) }) + builtin.RegisterActorState(builtin2.StorageMinerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load2(store, root) }) + + builtin.RegisterActorState(builtin3.StorageMinerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load3(store, root) + }) + + builtin.RegisterActorState(builtin4.StorageMinerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load4(store, root) + }) + + builtin.RegisterActorState(builtin5.StorageMinerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load5(store, root) + }) + } -var Methods = builtin2.MethodsMiner +var Methods = builtin5.MethodsMiner -// Unchanged between v0 and v2 actors +// Unchanged between v0, v2, v3, and v4 actors var WPoStProvingPeriod = miner0.WPoStProvingPeriod var WPoStPeriodDeadlines = miner0.WPoStPeriodDeadlines var WPoStChallengeWindow = miner0.WPoStChallengeWindow @@ -42,16 +70,78 @@ var FaultDeclarationCutoff = miner0.FaultDeclarationCutoff const MinSectorExpiration = miner0.MinSectorExpiration -func Load(store adt.Store, act *types.Actor) (st State, err error) { +// Not used / checked in v0 +// TODO: Abstract over network versions +var DeclarationsMax = miner2.DeclarationsMax +var AddressedSectorsMax = miner2.AddressedSectorsMax + +func Load(store adt.Store, act *types.Actor) (State, error) { switch act.Code { + case builtin0.StorageMinerActorCodeID: return load0(store, act.Head) + case builtin2.StorageMinerActorCodeID: return load2(store, act.Head) + + case builtin3.StorageMinerActorCodeID: + return load3(store, act.Head) + + case builtin4.StorageMinerActorCodeID: + return load4(store, act.Head) + + case builtin5.StorageMinerActorCodeID: + return load5(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } +func MakeState(store adt.Store, av actors.Version) (State, error) { + switch av { + + case actors.Version0: + return make0(store) + + case actors.Version2: + return make2(store) + + case actors.Version3: + return make3(store) + + case actors.Version4: + return make4(store) + + case actors.Version5: + return make5(store) + + } + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { + + case actors.Version0: + return builtin0.StorageMinerActorCodeID, nil + + case actors.Version2: + return builtin2.StorageMinerActorCodeID, nil + + case actors.Version3: + return builtin3.StorageMinerActorCodeID, nil + + case actors.Version4: + return builtin4.StorageMinerActorCodeID, nil + + case actors.Version5: + return builtin5.StorageMinerActorCodeID, nil + + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + type State interface { cbor.Marshaler @@ -67,11 +157,20 @@ type State interface { FindSector(abi.SectorNumber) (*SectorLocation, error) GetSectorExpiration(abi.SectorNumber) (*SectorExpiration, error) GetPrecommittedSector(abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) + ForEachPrecommittedSector(func(SectorPreCommitOnChainInfo) error) error LoadSectors(sectorNos *bitfield.BitField) ([]*SectorOnChainInfo, error) NumLiveSectors() (uint64, error) IsAllocated(abi.SectorNumber) (bool, error) + // UnallocatedSectorNumbers returns up to count unallocated sector numbers (or less than + // count if there aren't enough). + UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) GetAllocatedSectors() (*bitfield.BitField, error) + // Note that ProvingPeriodStart is deprecated and will be renamed / removed in a future version of actors + GetProvingPeriodStart() (abi.ChainEpoch, error) + // Testing only + EraseAllUnproven() error + LoadDeadline(idx uint64) (Deadline, error) ForEachDeadline(cb func(idx uint64, dl Deadline) error) error NumDeadlines() (uint64, error) @@ -81,20 +180,23 @@ type State interface { MinerInfoChanged(State) (bool, error) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) + DeadlineCronActive() (bool, error) // Diff helpers. Used by Diff* functions internally. sectors() (adt.Array, error) decodeSectorOnChainInfo(*cbg.Deferred) (SectorOnChainInfo, error) precommits() (adt.Map, error) decodeSectorPreCommitOnChainInfo(*cbg.Deferred) (SectorPreCommitOnChainInfo, error) + GetState() interface{} } type Deadline interface { LoadPartition(idx uint64) (Partition, error) ForEachPartition(cb func(idx uint64, part Partition) error) error - PostSubmissions() (bitfield.BitField, error) + PartitionsPoSted() (bitfield.BitField, error) PartitionsChanged(Deadline) (bool, error) + DisputableProofCount() (uint64, error) } type Partition interface { @@ -138,6 +240,61 @@ type DeclareFaultsParams = miner0.DeclareFaultsParams type DeclareFaultsRecoveredParams = miner0.DeclareFaultsRecoveredParams type SubmitWindowedPoStParams = miner0.SubmitWindowedPoStParams type ProveCommitSectorParams = miner0.ProveCommitSectorParams +type DisputeWindowedPoStParams = miner3.DisputeWindowedPoStParams +type ProveCommitAggregateParams = miner5.ProveCommitAggregateParams + +func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) { + // We added support for the new proofs in network version 7, and removed support for the old + // ones in network version 8. + if nver < network.Version7 { + switch proof { + case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1: + return abi.RegisteredSealProof_StackedDrg2KiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1: + return abi.RegisteredSealProof_StackedDrg8MiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: + return abi.RegisteredSealProof_StackedDrg512MiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: + return abi.RegisteredSealProof_StackedDrg32GiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: + return abi.RegisteredSealProof_StackedDrg64GiBV1, nil + default: + return -1, xerrors.Errorf("unrecognized window post type: %d", proof) + } + } + + switch proof { + case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1: + return abi.RegisteredSealProof_StackedDrg2KiBV1_1, nil + case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1: + return abi.RegisteredSealProof_StackedDrg8MiBV1_1, nil + case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: + return abi.RegisteredSealProof_StackedDrg512MiBV1_1, nil + case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: + return abi.RegisteredSealProof_StackedDrg32GiBV1_1, nil + case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: + return abi.RegisteredSealProof_StackedDrg64GiBV1_1, nil + default: + return -1, xerrors.Errorf("unrecognized window post type: %d", proof) + } +} + +func WinningPoStProofTypeFromWindowPoStProofType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredPoStProof, error) { + switch proof { + case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1: + return abi.RegisteredPoStProof_StackedDrgWinning2KiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1: + return abi.RegisteredPoStProof_StackedDrgWinning8MiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: + return abi.RegisteredPoStProof_StackedDrgWinning512MiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: + return abi.RegisteredPoStProof_StackedDrgWinning32GiBV1, nil + case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: + return abi.RegisteredPoStProof_StackedDrgWinning64GiBV1, nil + default: + return -1, xerrors.Errorf("unknown proof type %d", proof) + } +} type MinerInfo struct { Owner address.Address // Must be an ID-address. @@ -147,7 +304,7 @@ type MinerInfo struct { WorkerChangeEpoch abi.ChainEpoch PeerId *peer.ID Multiaddrs []abi.Multiaddrs - SealProofType abi.RegisteredSealProof + WindowPoStProofType abi.RegisteredPoStProof SectorSize abi.SectorSize WindowPoStPartitionSectors uint64 ConsensusFaultElapsed abi.ChainEpoch diff --git a/chain/actors/builtin/miner/state.go.template b/chain/actors/builtin/miner/state.go.template new file mode 100644 index 00000000000..09c1202d95e --- /dev/null +++ b/chain/actors/builtin/miner/state.go.template @@ -0,0 +1,585 @@ +package miner + +import ( + "bytes" + "errors" +{{if (le .v 1)}} + "github.com/filecoin-project/go-state-types/big" +{{end}} + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + rle "github.com/filecoin-project/go-bitfield/rle" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/dline" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p-core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors/adt" + +{{if (ge .v 3)}} + builtin{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin" +{{end}} + miner{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/miner" + adt{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/util/adt" +) + +var _ State = (*state{{.v}})(nil) + +func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { + out := state{{.v}}{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make{{.v}}(store adt.Store) (State, error) { + out := state{{.v}}{store: store} + out.State = miner{{.v}}.State{} + return &out, nil +} + +type state{{.v}} struct { + miner{{.v}}.State + store adt.Store +} + +type deadline{{.v}} struct { + miner{{.v}}.Deadline + store adt.Store +} + +type partition{{.v}} struct { + miner{{.v}}.Partition + store adt.Store +} + +func (s *state{{.v}}) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) { + defer func() { + if r := recover(); r != nil { + err = xerrors.Errorf("failed to get available balance: %w", r) + available = abi.NewTokenAmount(0) + } + }() + // this panics if the miner doesnt have enough funds to cover their locked pledge + available{{if (ge .v 2)}}, err{{end}} = s.GetAvailableBalance(bal) + return available, err +} + +func (s *state{{.v}}) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.CheckVestedFunds(s.store, epoch) +} + +func (s *state{{.v}}) LockedFunds() (LockedFunds, error) { + return LockedFunds{ + VestingFunds: s.State.LockedFunds, + InitialPledgeRequirement: s.State.InitialPledge{{if (le .v 1)}}Requirement{{end}}, + PreCommitDeposits: s.State.PreCommitDeposits, + }, nil +} + +func (s *state{{.v}}) FeeDebt() (abi.TokenAmount, error) { + return {{if (ge .v 2)}}s.State.FeeDebt{{else}}big.Zero(){{end}}, nil +} + +func (s *state{{.v}}) InitialPledge() (abi.TokenAmount, error) { + return s.State.InitialPledge{{if (le .v 1)}}Requirement{{end}}, nil +} + +func (s *state{{.v}}) PreCommitDeposits() (abi.TokenAmount, error) { + return s.State.PreCommitDeposits, nil +} + +func (s *state{{.v}}) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) { + info, ok, err := s.State.GetSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV{{.v}}SectorOnChainInfo(*info) + return &ret, nil +} + +func (s *state{{.v}}) FindSector(num abi.SectorNumber) (*SectorLocation, error) { + dlIdx, partIdx, err := s.State.FindSector(s.store, num) + if err != nil { + return nil, err + } + return &SectorLocation{ + Deadline: dlIdx, + Partition: partIdx, + }, nil +} + +func (s *state{{.v}}) NumLiveSectors() (uint64, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return 0, err + } + var total uint64 + if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner{{.v}}.Deadline) error { + total += dl.LiveSectors + return nil + }); err != nil { + return 0, err + } + return total, nil +} + +// GetSectorExpiration returns the effective expiration of the given sector. +// +// If the sector does not expire early, the Early expiration field is 0. +func (s *state{{.v}}) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + // NOTE: this can be optimized significantly. + // 1. If the sector is non-faulty, it will either expire on-time (can be + // learned from the sector info), or in the next quantized expiration + // epoch (i.e., the first element in the partition's expiration queue. + // 2. If it's faulty, it will expire early within the first 14 entries + // of the expiration queue. + stopErr := errors.New("stop") + out := SectorExpiration{} + err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner{{.v}}.Deadline) error { + partitions, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + quant := s.State.QuantSpecForDeadline(dlIdx) + var part miner{{.v}}.Partition + return partitions.ForEach(&part, func(partIdx int64) error { + if found, err := part.Sectors.IsSet(uint64(num)); err != nil { + return err + } else if !found { + return nil + } + if found, err := part.Terminated.IsSet(uint64(num)); err != nil { + return err + } else if found { + // already terminated + return stopErr + } + + q, err := miner{{.v}}.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant{{if (ge .v 3)}}, miner{{.v}}.PartitionExpirationAmtBitwidth{{end}}) + if err != nil { + return err + } + var exp miner{{.v}}.ExpirationSet + return q.ForEach(&exp, func(epoch int64) error { + if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil { + return err + } else if early { + out.Early = abi.ChainEpoch(epoch) + return nil + } + if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil { + return err + } else if onTime { + out.OnTime = abi.ChainEpoch(epoch) + return stopErr + } + return nil + }) + }) + }) + if err == stopErr { + err = nil + } + if err != nil { + return nil, err + } + if out.Early == 0 && out.OnTime == 0 { + return nil, xerrors.Errorf("failed to find sector %d", num) + } + return &out, nil +} + +func (s *state{{.v}}) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) { + info, ok, err := s.State.GetPrecommittedSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV{{.v}}SectorPreCommitOnChainInfo(*info) + + return &ret, nil +} + +func (s *state{{.v}}) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error { +{{if (ge .v 3) -}} + precommitted, err := adt{{.v}}.AsMap(s.store, s.State.PreCommittedSectors, builtin{{.v}}.DefaultHamtBitwidth) +{{- else -}} + precommitted, err := adt{{.v}}.AsMap(s.store, s.State.PreCommittedSectors) +{{- end}} + if err != nil { + return err + } + + var info miner{{.v}}.SectorPreCommitOnChainInfo + if err := precommitted.ForEach(&info, func(_ string) error { + return cb(fromV{{.v}}SectorPreCommitOnChainInfo(info)) + }); err != nil { + return err + } + + return nil +} + +func (s *state{{.v}}) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { + sectors, err := miner{{.v}}.LoadSectors(s.store, s.State.Sectors) + if err != nil { + return nil, err + } + + // If no sector numbers are specified, load all. + if snos == nil { + infos := make([]*SectorOnChainInfo, 0, sectors.Length()) + var info{{.v}} miner{{.v}}.SectorOnChainInfo + if err := sectors.ForEach(&info{{.v}}, func(_ int64) error { + info := fromV{{.v}}SectorOnChainInfo(info{{.v}}) + infos = append(infos, &info) + return nil + }); err != nil { + return nil, err + } + return infos, nil + } + + // Otherwise, load selected. + infos{{.v}}, err := sectors.Load(*snos) + if err != nil { + return nil, err + } + infos := make([]*SectorOnChainInfo, len(infos{{.v}})) + for i, info{{.v}} := range infos{{.v}} { + info := fromV{{.v}}SectorOnChainInfo(*info{{.v}}) + infos[i] = &info + } + return infos, nil +} + +func (s *state{{.v}}) loadAllocatedSectorNumbers() (bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors) + return allocatedSectors, err +} + +func (s *state{{.v}}) IsAllocated(num abi.SectorNumber) (bool, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return false, err + } + + return allocatedSectors.IsSet(uint64(num)) +} + +func (s *state{{.v}}) GetProvingPeriodStart() (abi.ChainEpoch, error) { + return s.State.ProvingPeriodStart, nil +} + +func (s *state{{.v}}) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return nil, err + } + + allocatedRuns, err := allocatedSectors.RunIterator() + if err != nil { + return nil, err + } + + unallocatedRuns, err := rle.Subtract( + &rle.RunSliceIterator{Runs: []rle.Run{ {Val: true, Len: abi.MaxSectorNumber} }}, + allocatedRuns, + ) + if err != nil { + return nil, err + } + + iter, err := rle.BitsFromRuns(unallocatedRuns) + if err != nil { + return nil, err + } + + sectors := make([]abi.SectorNumber, 0, count) + for iter.HasNext() && len(sectors) < count { + nextNo, err := iter.Next() + if err != nil { + return nil, err + } + sectors = append(sectors, abi.SectorNumber(nextNo)) + } + + return sectors, nil +} + +func (s *state{{.v}}) GetAllocatedSectors() (*bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + return nil, err + } + + return &allocatedSectors, nil +} + +func (s *state{{.v}}) LoadDeadline(idx uint64) (Deadline, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + dl, err := dls.LoadDeadline(s.store, idx) + if err != nil { + return nil, err + } + return &deadline{{.v}}{*dl, s.store}, nil +} + +func (s *state{{.v}}) ForEachDeadline(cb func(uint64, Deadline) error) error { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + return dls.ForEach(s.store, func(i uint64, dl *miner{{.v}}.Deadline) error { + return cb(i, &deadline{{.v}}{*dl, s.store}) + }) +} + +func (s *state{{.v}}) NumDeadlines() (uint64, error) { + return miner{{.v}}.WPoStPeriodDeadlines, nil +} + +func (s *state{{.v}}) DeadlinesChanged(other State) (bool, error) { + other{{.v}}, ok := other.(*state{{.v}}) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !s.State.Deadlines.Equals(other{{.v}}.Deadlines), nil +} + +func (s *state{{.v}}) MinerInfoChanged(other State) (bool, error) { + other0, ok := other.(*state{{.v}}) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Info.Equals(other0.State.Info), nil +} + +func (s *state{{.v}}) Info() (MinerInfo, error) { + info, err := s.State.GetInfo(s.store) + if err != nil { + return MinerInfo{}, err + } + + var pid *peer.ID + if peerID, err := peer.IDFromBytes(info.PeerId); err == nil { + pid = &peerID + } +{{if (le .v 2)}} + wpp, err := info.SealProofType.RegisteredWindowPoStProof() + if err != nil { + return MinerInfo{}, err + } +{{end}} + mi := MinerInfo{ + Owner: info.Owner, + Worker: info.Worker, + ControlAddresses: info.ControlAddresses, + + NewWorker: address.Undef, + WorkerChangeEpoch: -1, + + PeerId: pid, + Multiaddrs: info.Multiaddrs, + WindowPoStProofType: {{if (ge .v 3)}}info.WindowPoStProofType{{else}}wpp{{end}}, + SectorSize: info.SectorSize, + WindowPoStPartitionSectors: info.WindowPoStPartitionSectors, + ConsensusFaultElapsed: {{if (ge .v 2)}}info.ConsensusFaultElapsed{{else}}-1{{end}}, + } + + if info.PendingWorkerKey != nil { + mi.NewWorker = info.PendingWorkerKey.NewWorker + mi.WorkerChangeEpoch = info.PendingWorkerKey.EffectiveAt + } + + return mi, nil +} + +func (s *state{{.v}}) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) { + return s.State.{{if (ge .v 4)}}Recorded{{end}}DeadlineInfo(epoch), nil +} + +func (s *state{{.v}}) DeadlineCronActive() (bool, error) { + return {{if (ge .v 4)}}s.State.DeadlineCronActive{{else}}true{{end}}, nil{{if (lt .v 4)}} // always active in this version{{end}} +} + +func (s *state{{.v}}) sectors() (adt.Array, error) { + return adt{{.v}}.AsArray(s.store, s.Sectors{{if (ge .v 3)}}, miner{{.v}}.SectorsAmtBitwidth{{end}}) +} + +func (s *state{{.v}}) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) { + var si miner{{.v}}.SectorOnChainInfo + err := si.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorOnChainInfo{}, err + } + + return fromV{{.v}}SectorOnChainInfo(si), nil +} + +func (s *state{{.v}}) precommits() (adt.Map, error) { + return adt{{.v}}.AsMap(s.store, s.PreCommittedSectors{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}}) +} + +func (s *state{{.v}}) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreCommitOnChainInfo, error) { + var sp miner{{.v}}.SectorPreCommitOnChainInfo + err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorPreCommitOnChainInfo{}, err + } + + return fromV{{.v}}SectorPreCommitOnChainInfo(sp), nil +} + +func (s *state{{.v}}) EraseAllUnproven() error { + {{if (ge .v 2)}} + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + + err = dls.ForEach(s.store, func(dindx uint64, dl *miner{{.v}}.Deadline) error { + ps, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + + var part miner{{.v}}.Partition + err = ps.ForEach(&part, func(pindx int64) error { + _ = part.ActivateUnproven() + err = ps.Set(uint64(pindx), &part) + return nil + }) + + if err != nil { + return err + } + + dl.Partitions, err = ps.Root() + if err != nil { + return err + } + + return dls.UpdateDeadline(s.store, dindx, dl) + }) + + return s.State.SaveDeadlines(s.store, dls) + {{else}} + // field doesn't exist until v2 + {{end}} + return nil +} + +func (d *deadline{{.v}}) LoadPartition(idx uint64) (Partition, error) { + p, err := d.Deadline.LoadPartition(d.store, idx) + if err != nil { + return nil, err + } + return &partition{{.v}}{*p, d.store}, nil +} + +func (d *deadline{{.v}}) ForEachPartition(cb func(uint64, Partition) error) error { + ps, err := d.Deadline.PartitionsArray(d.store) + if err != nil { + return err + } + var part miner{{.v}}.Partition + return ps.ForEach(&part, func(i int64) error { + return cb(uint64(i), &partition{{.v}}{part, d.store}) + }) +} + +func (d *deadline{{.v}}) PartitionsChanged(other Deadline) (bool, error) { + other{{.v}}, ok := other.(*deadline{{.v}}) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !d.Deadline.Partitions.Equals(other{{.v}}.Deadline.Partitions), nil +} + +func (d *deadline{{.v}}) PartitionsPoSted() (bitfield.BitField, error) { + return d.Deadline.{{if (ge .v 3)}}PartitionsPoSted{{else}}PostSubmissions{{end}}, nil +} + +func (d *deadline{{.v}}) DisputableProofCount() (uint64, error) { +{{if (ge .v 3)}} + ops, err := d.OptimisticProofsSnapshotArray(d.store) + if err != nil { + return 0, err + } + + return ops.Length(), nil +{{else}} + // field doesn't exist until v3 + return 0, nil +{{end}} +} + +func (p *partition{{.v}}) AllSectors() (bitfield.BitField, error) { + return p.Partition.Sectors, nil +} + +func (p *partition{{.v}}) FaultySectors() (bitfield.BitField, error) { + return p.Partition.Faults, nil +} + +func (p *partition{{.v}}) RecoveringSectors() (bitfield.BitField, error) { + return p.Partition.Recoveries, nil +} + +func fromV{{.v}}SectorOnChainInfo(v{{.v}} miner{{.v}}.SectorOnChainInfo) SectorOnChainInfo { +{{if (ge .v 2)}} + return SectorOnChainInfo{ + SectorNumber: v{{.v}}.SectorNumber, + SealProof: v{{.v}}.SealProof, + SealedCID: v{{.v}}.SealedCID, + DealIDs: v{{.v}}.DealIDs, + Activation: v{{.v}}.Activation, + Expiration: v{{.v}}.Expiration, + DealWeight: v{{.v}}.DealWeight, + VerifiedDealWeight: v{{.v}}.VerifiedDealWeight, + InitialPledge: v{{.v}}.InitialPledge, + ExpectedDayReward: v{{.v}}.ExpectedDayReward, + ExpectedStoragePledge: v{{.v}}.ExpectedStoragePledge, + } +{{else}} + return (SectorOnChainInfo)(v0) +{{end}} +} + +func fromV{{.v}}SectorPreCommitOnChainInfo(v{{.v}} miner{{.v}}.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { +{{if (ge .v 2)}} + return SectorPreCommitOnChainInfo{ + Info: (SectorPreCommitInfo)(v{{.v}}.Info), + PreCommitDeposit: v{{.v}}.PreCommitDeposit, + PreCommitEpoch: v{{.v}}.PreCommitEpoch, + DealWeight: v{{.v}}.DealWeight, + VerifiedDealWeight: v{{.v}}.VerifiedDealWeight, + } +{{else}} + return (SectorPreCommitOnChainInfo)(v0) +{{end}} +} + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/miner/utils.go b/chain/actors/builtin/miner/utils.go index f9c6b3da332..2f24e845401 100644 --- a/chain/actors/builtin/miner/utils.go +++ b/chain/actors/builtin/miner/utils.go @@ -4,6 +4,8 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" ) func AllPartSectors(mas State, sget func(Partition) (bitfield.BitField, error)) (bitfield.BitField, error) { @@ -26,3 +28,42 @@ func AllPartSectors(mas State, sget func(Partition) (bitfield.BitField, error)) return bitfield.MultiMerge(parts...) } + +// SealProofTypeFromSectorSize returns preferred seal proof type for creating +// new miner actors and new sectors +func SealProofTypeFromSectorSize(ssize abi.SectorSize, nv network.Version) (abi.RegisteredSealProof, error) { + switch { + case nv < network.Version7: + switch ssize { + case 2 << 10: + return abi.RegisteredSealProof_StackedDrg2KiBV1, nil + case 8 << 20: + return abi.RegisteredSealProof_StackedDrg8MiBV1, nil + case 512 << 20: + return abi.RegisteredSealProof_StackedDrg512MiBV1, nil + case 32 << 30: + return abi.RegisteredSealProof_StackedDrg32GiBV1, nil + case 64 << 30: + return abi.RegisteredSealProof_StackedDrg64GiBV1, nil + default: + return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize) + } + case nv >= network.Version7: + switch ssize { + case 2 << 10: + return abi.RegisteredSealProof_StackedDrg2KiBV1_1, nil + case 8 << 20: + return abi.RegisteredSealProof_StackedDrg8MiBV1_1, nil + case 512 << 20: + return abi.RegisteredSealProof_StackedDrg512MiBV1_1, nil + case 32 << 30: + return abi.RegisteredSealProof_StackedDrg32GiBV1_1, nil + case 64 << 30: + return abi.RegisteredSealProof_StackedDrg64GiBV1_1, nil + default: + return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize) + } + } + + return 0, xerrors.Errorf("unsupported network version") +} diff --git a/chain/actors/builtin/miner/v0.go b/chain/actors/builtin/miner/v0.go index a34cd5b43bc..cd922645ea4 100644 --- a/chain/actors/builtin/miner/v0.go +++ b/chain/actors/builtin/miner/v0.go @@ -8,6 +8,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" + rle "github.com/filecoin-project/go-bitfield/rle" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/dline" "github.com/ipfs/go-cid" @@ -32,6 +33,12 @@ func load0(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make0(store adt.Store) (State, error) { + out := state0{store: store} + out.State = miner0.State{} + return &out, nil +} + type state0 struct { miner0.State store adt.Store @@ -196,9 +203,26 @@ func (s *state0) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOn } ret := fromV0SectorPreCommitOnChainInfo(*info) + return &ret, nil } +func (s *state0) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error { + precommitted, err := adt0.AsMap(s.store, s.State.PreCommittedSectors) + if err != nil { + return err + } + + var info miner0.SectorPreCommitOnChainInfo + if err := precommitted.ForEach(&info, func(_ string) error { + return cb(fromV0SectorPreCommitOnChainInfo(info)) + }); err != nil { + return err + } + + return nil +} + func (s *state0) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { sectors, err := miner0.LoadSectors(s.store, s.State.Sectors) if err != nil { @@ -232,15 +256,61 @@ func (s *state0) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, err return infos, nil } -func (s *state0) IsAllocated(num abi.SectorNumber) (bool, error) { +func (s *state0) loadAllocatedSectorNumbers() (bitfield.BitField, error) { var allocatedSectors bitfield.BitField - if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors) + return allocatedSectors, err +} + +func (s *state0) IsAllocated(num abi.SectorNumber) (bool, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { return false, err } return allocatedSectors.IsSet(uint64(num)) } +func (s *state0) GetProvingPeriodStart() (abi.ChainEpoch, error) { + return s.State.ProvingPeriodStart, nil +} + +func (s *state0) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return nil, err + } + + allocatedRuns, err := allocatedSectors.RunIterator() + if err != nil { + return nil, err + } + + unallocatedRuns, err := rle.Subtract( + &rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}}, + allocatedRuns, + ) + if err != nil { + return nil, err + } + + iter, err := rle.BitsFromRuns(unallocatedRuns) + if err != nil { + return nil, err + } + + sectors := make([]abi.SectorNumber, 0, count) + for iter.HasNext() && len(sectors) < count { + nextNo, err := iter.Next() + if err != nil { + return nil, err + } + sectors = append(sectors, abi.SectorNumber(nextNo)) + } + + return sectors, nil +} + func (s *state0) GetAllocatedSectors() (*bitfield.BitField, error) { var allocatedSectors bitfield.BitField if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { @@ -306,6 +376,11 @@ func (s *state0) Info() (MinerInfo, error) { pid = &peerID } + wpp, err := info.SealProofType.RegisteredWindowPoStProof() + if err != nil { + return MinerInfo{}, err + } + mi := MinerInfo{ Owner: info.Owner, Worker: info.Worker, @@ -316,7 +391,7 @@ func (s *state0) Info() (MinerInfo, error) { PeerId: pid, Multiaddrs: info.Multiaddrs, - SealProofType: info.SealProofType, + WindowPoStProofType: wpp, SectorSize: info.SectorSize, WindowPoStPartitionSectors: info.WindowPoStPartitionSectors, ConsensusFaultElapsed: -1, @@ -334,6 +409,10 @@ func (s *state0) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) { return s.State.DeadlineInfo(epoch), nil } +func (s *state0) DeadlineCronActive() (bool, error) { + return true, nil // always active in this version +} + func (s *state0) sectors() (adt.Array, error) { return adt0.AsArray(s.store, s.Sectors) } @@ -362,6 +441,13 @@ func (s *state0) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreC return fromV0SectorPreCommitOnChainInfo(sp), nil } +func (s *state0) EraseAllUnproven() error { + + // field doesn't exist until v2 + + return nil +} + func (d *deadline0) LoadPartition(idx uint64) (Partition, error) { p, err := d.Deadline.LoadPartition(d.store, idx) if err != nil { @@ -391,10 +477,17 @@ func (d *deadline0) PartitionsChanged(other Deadline) (bool, error) { return !d.Deadline.Partitions.Equals(other0.Deadline.Partitions), nil } -func (d *deadline0) PostSubmissions() (bitfield.BitField, error) { +func (d *deadline0) PartitionsPoSted() (bitfield.BitField, error) { return d.Deadline.PostSubmissions, nil } +func (d *deadline0) DisputableProofCount() (uint64, error) { + + // field doesn't exist until v3 + return 0, nil + +} + func (p *partition0) AllSectors() (bitfield.BitField, error) { return p.Partition.Sectors, nil } @@ -408,9 +501,17 @@ func (p *partition0) RecoveringSectors() (bitfield.BitField, error) { } func fromV0SectorOnChainInfo(v0 miner0.SectorOnChainInfo) SectorOnChainInfo { + return (SectorOnChainInfo)(v0) + } func fromV0SectorPreCommitOnChainInfo(v0 miner0.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { + return (SectorPreCommitOnChainInfo)(v0) + +} + +func (s *state0) GetState() interface{} { + return &s.State } diff --git a/chain/actors/builtin/miner/v2.go b/chain/actors/builtin/miner/v2.go index 118ac87d65f..5de653fe4e2 100644 --- a/chain/actors/builtin/miner/v2.go +++ b/chain/actors/builtin/miner/v2.go @@ -6,6 +6,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" + rle "github.com/filecoin-project/go-bitfield/rle" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/dline" "github.com/ipfs/go-cid" @@ -30,6 +31,12 @@ func load2(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make2(store adt.Store) (State, error) { + out := state2{store: store} + out.State = miner2.State{} + return &out, nil +} + type state2 struct { miner2.State store adt.Store @@ -198,6 +205,22 @@ func (s *state2) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOn return &ret, nil } +func (s *state2) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error { + precommitted, err := adt2.AsMap(s.store, s.State.PreCommittedSectors) + if err != nil { + return err + } + + var info miner2.SectorPreCommitOnChainInfo + if err := precommitted.ForEach(&info, func(_ string) error { + return cb(fromV2SectorPreCommitOnChainInfo(info)) + }); err != nil { + return err + } + + return nil +} + func (s *state2) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { sectors, err := miner2.LoadSectors(s.store, s.State.Sectors) if err != nil { @@ -231,22 +254,68 @@ func (s *state2) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, err return infos, nil } -func (s *state2) GetAllocatedSectors() (*bitfield.BitField, error) { +func (s *state2) loadAllocatedSectorNumbers() (bitfield.BitField, error) { var allocatedSectors bitfield.BitField - if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors) + return allocatedSectors, err +} + +func (s *state2) IsAllocated(num abi.SectorNumber) (bool, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return false, err + } + + return allocatedSectors.IsSet(uint64(num)) +} + +func (s *state2) GetProvingPeriodStart() (abi.ChainEpoch, error) { + return s.State.ProvingPeriodStart, nil +} + +func (s *state2) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { return nil, err } - return &allocatedSectors, nil + allocatedRuns, err := allocatedSectors.RunIterator() + if err != nil { + return nil, err + } + + unallocatedRuns, err := rle.Subtract( + &rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}}, + allocatedRuns, + ) + if err != nil { + return nil, err + } + + iter, err := rle.BitsFromRuns(unallocatedRuns) + if err != nil { + return nil, err + } + + sectors := make([]abi.SectorNumber, 0, count) + for iter.HasNext() && len(sectors) < count { + nextNo, err := iter.Next() + if err != nil { + return nil, err + } + sectors = append(sectors, abi.SectorNumber(nextNo)) + } + + return sectors, nil } -func (s *state2) IsAllocated(num abi.SectorNumber) (bool, error) { +func (s *state2) GetAllocatedSectors() (*bitfield.BitField, error) { var allocatedSectors bitfield.BitField if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { - return false, err + return nil, err } - return allocatedSectors.IsSet(uint64(num)) + return &allocatedSectors, nil } func (s *state2) LoadDeadline(idx uint64) (Deadline, error) { @@ -305,6 +374,11 @@ func (s *state2) Info() (MinerInfo, error) { pid = &peerID } + wpp, err := info.SealProofType.RegisteredWindowPoStProof() + if err != nil { + return MinerInfo{}, err + } + mi := MinerInfo{ Owner: info.Owner, Worker: info.Worker, @@ -315,7 +389,7 @@ func (s *state2) Info() (MinerInfo, error) { PeerId: pid, Multiaddrs: info.Multiaddrs, - SealProofType: info.SealProofType, + WindowPoStProofType: wpp, SectorSize: info.SectorSize, WindowPoStPartitionSectors: info.WindowPoStPartitionSectors, ConsensusFaultElapsed: info.ConsensusFaultElapsed, @@ -333,6 +407,10 @@ func (s *state2) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) { return s.State.DeadlineInfo(epoch), nil } +func (s *state2) DeadlineCronActive() (bool, error) { + return true, nil // always active in this version +} + func (s *state2) sectors() (adt.Array, error) { return adt2.AsArray(s.store, s.Sectors) } @@ -361,6 +439,43 @@ func (s *state2) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreC return fromV2SectorPreCommitOnChainInfo(sp), nil } +func (s *state2) EraseAllUnproven() error { + + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + + err = dls.ForEach(s.store, func(dindx uint64, dl *miner2.Deadline) error { + ps, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + + var part miner2.Partition + err = ps.ForEach(&part, func(pindx int64) error { + _ = part.ActivateUnproven() + err = ps.Set(uint64(pindx), &part) + return nil + }) + + if err != nil { + return err + } + + dl.Partitions, err = ps.Root() + if err != nil { + return err + } + + return dls.UpdateDeadline(s.store, dindx, dl) + }) + + return s.State.SaveDeadlines(s.store, dls) + + return nil +} + func (d *deadline2) LoadPartition(idx uint64) (Partition, error) { p, err := d.Deadline.LoadPartition(d.store, idx) if err != nil { @@ -390,10 +505,17 @@ func (d *deadline2) PartitionsChanged(other Deadline) (bool, error) { return !d.Deadline.Partitions.Equals(other2.Deadline.Partitions), nil } -func (d *deadline2) PostSubmissions() (bitfield.BitField, error) { +func (d *deadline2) PartitionsPoSted() (bitfield.BitField, error) { return d.Deadline.PostSubmissions, nil } +func (d *deadline2) DisputableProofCount() (uint64, error) { + + // field doesn't exist until v3 + return 0, nil + +} + func (p *partition2) AllSectors() (bitfield.BitField, error) { return p.Partition.Sectors, nil } @@ -407,6 +529,7 @@ func (p *partition2) RecoveringSectors() (bitfield.BitField, error) { } func fromV2SectorOnChainInfo(v2 miner2.SectorOnChainInfo) SectorOnChainInfo { + return SectorOnChainInfo{ SectorNumber: v2.SectorNumber, SealProof: v2.SealProof, @@ -420,9 +543,11 @@ func fromV2SectorOnChainInfo(v2 miner2.SectorOnChainInfo) SectorOnChainInfo { ExpectedDayReward: v2.ExpectedDayReward, ExpectedStoragePledge: v2.ExpectedStoragePledge, } + } func fromV2SectorPreCommitOnChainInfo(v2 miner2.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { + return SectorPreCommitOnChainInfo{ Info: (SectorPreCommitInfo)(v2.Info), PreCommitDeposit: v2.PreCommitDeposit, @@ -430,4 +555,9 @@ func fromV2SectorPreCommitOnChainInfo(v2 miner2.SectorPreCommitOnChainInfo) Sect DealWeight: v2.DealWeight, VerifiedDealWeight: v2.VerifiedDealWeight, } + +} + +func (s *state2) GetState() interface{} { + return &s.State } diff --git a/chain/actors/builtin/miner/v3.go b/chain/actors/builtin/miner/v3.go new file mode 100644 index 00000000000..1819428a6d3 --- /dev/null +++ b/chain/actors/builtin/miner/v3.go @@ -0,0 +1,564 @@ +package miner + +import ( + "bytes" + "errors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + rle "github.com/filecoin-project/go-bitfield/rle" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/dline" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p-core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner" + adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make3(store adt.Store) (State, error) { + out := state3{store: store} + out.State = miner3.State{} + return &out, nil +} + +type state3 struct { + miner3.State + store adt.Store +} + +type deadline3 struct { + miner3.Deadline + store adt.Store +} + +type partition3 struct { + miner3.Partition + store adt.Store +} + +func (s *state3) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) { + defer func() { + if r := recover(); r != nil { + err = xerrors.Errorf("failed to get available balance: %w", r) + available = abi.NewTokenAmount(0) + } + }() + // this panics if the miner doesnt have enough funds to cover their locked pledge + available, err = s.GetAvailableBalance(bal) + return available, err +} + +func (s *state3) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.CheckVestedFunds(s.store, epoch) +} + +func (s *state3) LockedFunds() (LockedFunds, error) { + return LockedFunds{ + VestingFunds: s.State.LockedFunds, + InitialPledgeRequirement: s.State.InitialPledge, + PreCommitDeposits: s.State.PreCommitDeposits, + }, nil +} + +func (s *state3) FeeDebt() (abi.TokenAmount, error) { + return s.State.FeeDebt, nil +} + +func (s *state3) InitialPledge() (abi.TokenAmount, error) { + return s.State.InitialPledge, nil +} + +func (s *state3) PreCommitDeposits() (abi.TokenAmount, error) { + return s.State.PreCommitDeposits, nil +} + +func (s *state3) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) { + info, ok, err := s.State.GetSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV3SectorOnChainInfo(*info) + return &ret, nil +} + +func (s *state3) FindSector(num abi.SectorNumber) (*SectorLocation, error) { + dlIdx, partIdx, err := s.State.FindSector(s.store, num) + if err != nil { + return nil, err + } + return &SectorLocation{ + Deadline: dlIdx, + Partition: partIdx, + }, nil +} + +func (s *state3) NumLiveSectors() (uint64, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return 0, err + } + var total uint64 + if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner3.Deadline) error { + total += dl.LiveSectors + return nil + }); err != nil { + return 0, err + } + return total, nil +} + +// GetSectorExpiration returns the effective expiration of the given sector. +// +// If the sector does not expire early, the Early expiration field is 0. +func (s *state3) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + // NOTE: this can be optimized significantly. + // 1. If the sector is non-faulty, it will either expire on-time (can be + // learned from the sector info), or in the next quantized expiration + // epoch (i.e., the first element in the partition's expiration queue. + // 2. If it's faulty, it will expire early within the first 14 entries + // of the expiration queue. + stopErr := errors.New("stop") + out := SectorExpiration{} + err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner3.Deadline) error { + partitions, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + quant := s.State.QuantSpecForDeadline(dlIdx) + var part miner3.Partition + return partitions.ForEach(&part, func(partIdx int64) error { + if found, err := part.Sectors.IsSet(uint64(num)); err != nil { + return err + } else if !found { + return nil + } + if found, err := part.Terminated.IsSet(uint64(num)); err != nil { + return err + } else if found { + // already terminated + return stopErr + } + + q, err := miner3.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant, miner3.PartitionExpirationAmtBitwidth) + if err != nil { + return err + } + var exp miner3.ExpirationSet + return q.ForEach(&exp, func(epoch int64) error { + if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil { + return err + } else if early { + out.Early = abi.ChainEpoch(epoch) + return nil + } + if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil { + return err + } else if onTime { + out.OnTime = abi.ChainEpoch(epoch) + return stopErr + } + return nil + }) + }) + }) + if err == stopErr { + err = nil + } + if err != nil { + return nil, err + } + if out.Early == 0 && out.OnTime == 0 { + return nil, xerrors.Errorf("failed to find sector %d", num) + } + return &out, nil +} + +func (s *state3) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) { + info, ok, err := s.State.GetPrecommittedSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV3SectorPreCommitOnChainInfo(*info) + + return &ret, nil +} + +func (s *state3) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error { + precommitted, err := adt3.AsMap(s.store, s.State.PreCommittedSectors, builtin3.DefaultHamtBitwidth) + if err != nil { + return err + } + + var info miner3.SectorPreCommitOnChainInfo + if err := precommitted.ForEach(&info, func(_ string) error { + return cb(fromV3SectorPreCommitOnChainInfo(info)) + }); err != nil { + return err + } + + return nil +} + +func (s *state3) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { + sectors, err := miner3.LoadSectors(s.store, s.State.Sectors) + if err != nil { + return nil, err + } + + // If no sector numbers are specified, load all. + if snos == nil { + infos := make([]*SectorOnChainInfo, 0, sectors.Length()) + var info3 miner3.SectorOnChainInfo + if err := sectors.ForEach(&info3, func(_ int64) error { + info := fromV3SectorOnChainInfo(info3) + infos = append(infos, &info) + return nil + }); err != nil { + return nil, err + } + return infos, nil + } + + // Otherwise, load selected. + infos3, err := sectors.Load(*snos) + if err != nil { + return nil, err + } + infos := make([]*SectorOnChainInfo, len(infos3)) + for i, info3 := range infos3 { + info := fromV3SectorOnChainInfo(*info3) + infos[i] = &info + } + return infos, nil +} + +func (s *state3) loadAllocatedSectorNumbers() (bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors) + return allocatedSectors, err +} + +func (s *state3) IsAllocated(num abi.SectorNumber) (bool, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return false, err + } + + return allocatedSectors.IsSet(uint64(num)) +} + +func (s *state3) GetProvingPeriodStart() (abi.ChainEpoch, error) { + return s.State.ProvingPeriodStart, nil +} + +func (s *state3) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return nil, err + } + + allocatedRuns, err := allocatedSectors.RunIterator() + if err != nil { + return nil, err + } + + unallocatedRuns, err := rle.Subtract( + &rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}}, + allocatedRuns, + ) + if err != nil { + return nil, err + } + + iter, err := rle.BitsFromRuns(unallocatedRuns) + if err != nil { + return nil, err + } + + sectors := make([]abi.SectorNumber, 0, count) + for iter.HasNext() && len(sectors) < count { + nextNo, err := iter.Next() + if err != nil { + return nil, err + } + sectors = append(sectors, abi.SectorNumber(nextNo)) + } + + return sectors, nil +} + +func (s *state3) GetAllocatedSectors() (*bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + return nil, err + } + + return &allocatedSectors, nil +} + +func (s *state3) LoadDeadline(idx uint64) (Deadline, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + dl, err := dls.LoadDeadline(s.store, idx) + if err != nil { + return nil, err + } + return &deadline3{*dl, s.store}, nil +} + +func (s *state3) ForEachDeadline(cb func(uint64, Deadline) error) error { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + return dls.ForEach(s.store, func(i uint64, dl *miner3.Deadline) error { + return cb(i, &deadline3{*dl, s.store}) + }) +} + +func (s *state3) NumDeadlines() (uint64, error) { + return miner3.WPoStPeriodDeadlines, nil +} + +func (s *state3) DeadlinesChanged(other State) (bool, error) { + other3, ok := other.(*state3) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !s.State.Deadlines.Equals(other3.Deadlines), nil +} + +func (s *state3) MinerInfoChanged(other State) (bool, error) { + other0, ok := other.(*state3) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Info.Equals(other0.State.Info), nil +} + +func (s *state3) Info() (MinerInfo, error) { + info, err := s.State.GetInfo(s.store) + if err != nil { + return MinerInfo{}, err + } + + var pid *peer.ID + if peerID, err := peer.IDFromBytes(info.PeerId); err == nil { + pid = &peerID + } + + mi := MinerInfo{ + Owner: info.Owner, + Worker: info.Worker, + ControlAddresses: info.ControlAddresses, + + NewWorker: address.Undef, + WorkerChangeEpoch: -1, + + PeerId: pid, + Multiaddrs: info.Multiaddrs, + WindowPoStProofType: info.WindowPoStProofType, + SectorSize: info.SectorSize, + WindowPoStPartitionSectors: info.WindowPoStPartitionSectors, + ConsensusFaultElapsed: info.ConsensusFaultElapsed, + } + + if info.PendingWorkerKey != nil { + mi.NewWorker = info.PendingWorkerKey.NewWorker + mi.WorkerChangeEpoch = info.PendingWorkerKey.EffectiveAt + } + + return mi, nil +} + +func (s *state3) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) { + return s.State.DeadlineInfo(epoch), nil +} + +func (s *state3) DeadlineCronActive() (bool, error) { + return true, nil // always active in this version +} + +func (s *state3) sectors() (adt.Array, error) { + return adt3.AsArray(s.store, s.Sectors, miner3.SectorsAmtBitwidth) +} + +func (s *state3) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) { + var si miner3.SectorOnChainInfo + err := si.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorOnChainInfo{}, err + } + + return fromV3SectorOnChainInfo(si), nil +} + +func (s *state3) precommits() (adt.Map, error) { + return adt3.AsMap(s.store, s.PreCommittedSectors, builtin3.DefaultHamtBitwidth) +} + +func (s *state3) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreCommitOnChainInfo, error) { + var sp miner3.SectorPreCommitOnChainInfo + err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorPreCommitOnChainInfo{}, err + } + + return fromV3SectorPreCommitOnChainInfo(sp), nil +} + +func (s *state3) EraseAllUnproven() error { + + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + + err = dls.ForEach(s.store, func(dindx uint64, dl *miner3.Deadline) error { + ps, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + + var part miner3.Partition + err = ps.ForEach(&part, func(pindx int64) error { + _ = part.ActivateUnproven() + err = ps.Set(uint64(pindx), &part) + return nil + }) + + if err != nil { + return err + } + + dl.Partitions, err = ps.Root() + if err != nil { + return err + } + + return dls.UpdateDeadline(s.store, dindx, dl) + }) + + return s.State.SaveDeadlines(s.store, dls) + + return nil +} + +func (d *deadline3) LoadPartition(idx uint64) (Partition, error) { + p, err := d.Deadline.LoadPartition(d.store, idx) + if err != nil { + return nil, err + } + return &partition3{*p, d.store}, nil +} + +func (d *deadline3) ForEachPartition(cb func(uint64, Partition) error) error { + ps, err := d.Deadline.PartitionsArray(d.store) + if err != nil { + return err + } + var part miner3.Partition + return ps.ForEach(&part, func(i int64) error { + return cb(uint64(i), &partition3{part, d.store}) + }) +} + +func (d *deadline3) PartitionsChanged(other Deadline) (bool, error) { + other3, ok := other.(*deadline3) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !d.Deadline.Partitions.Equals(other3.Deadline.Partitions), nil +} + +func (d *deadline3) PartitionsPoSted() (bitfield.BitField, error) { + return d.Deadline.PartitionsPoSted, nil +} + +func (d *deadline3) DisputableProofCount() (uint64, error) { + + ops, err := d.OptimisticProofsSnapshotArray(d.store) + if err != nil { + return 0, err + } + + return ops.Length(), nil + +} + +func (p *partition3) AllSectors() (bitfield.BitField, error) { + return p.Partition.Sectors, nil +} + +func (p *partition3) FaultySectors() (bitfield.BitField, error) { + return p.Partition.Faults, nil +} + +func (p *partition3) RecoveringSectors() (bitfield.BitField, error) { + return p.Partition.Recoveries, nil +} + +func fromV3SectorOnChainInfo(v3 miner3.SectorOnChainInfo) SectorOnChainInfo { + + return SectorOnChainInfo{ + SectorNumber: v3.SectorNumber, + SealProof: v3.SealProof, + SealedCID: v3.SealedCID, + DealIDs: v3.DealIDs, + Activation: v3.Activation, + Expiration: v3.Expiration, + DealWeight: v3.DealWeight, + VerifiedDealWeight: v3.VerifiedDealWeight, + InitialPledge: v3.InitialPledge, + ExpectedDayReward: v3.ExpectedDayReward, + ExpectedStoragePledge: v3.ExpectedStoragePledge, + } + +} + +func fromV3SectorPreCommitOnChainInfo(v3 miner3.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { + + return SectorPreCommitOnChainInfo{ + Info: (SectorPreCommitInfo)(v3.Info), + PreCommitDeposit: v3.PreCommitDeposit, + PreCommitEpoch: v3.PreCommitEpoch, + DealWeight: v3.DealWeight, + VerifiedDealWeight: v3.VerifiedDealWeight, + } + +} + +func (s *state3) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/miner/v4.go b/chain/actors/builtin/miner/v4.go new file mode 100644 index 00000000000..5a3a75053c3 --- /dev/null +++ b/chain/actors/builtin/miner/v4.go @@ -0,0 +1,564 @@ +package miner + +import ( + "bytes" + "errors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + rle "github.com/filecoin-project/go-bitfield/rle" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/dline" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p-core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + miner4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/miner" + adt4 "github.com/filecoin-project/specs-actors/v4/actors/util/adt" +) + +var _ State = (*state4)(nil) + +func load4(store adt.Store, root cid.Cid) (State, error) { + out := state4{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make4(store adt.Store) (State, error) { + out := state4{store: store} + out.State = miner4.State{} + return &out, nil +} + +type state4 struct { + miner4.State + store adt.Store +} + +type deadline4 struct { + miner4.Deadline + store adt.Store +} + +type partition4 struct { + miner4.Partition + store adt.Store +} + +func (s *state4) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) { + defer func() { + if r := recover(); r != nil { + err = xerrors.Errorf("failed to get available balance: %w", r) + available = abi.NewTokenAmount(0) + } + }() + // this panics if the miner doesnt have enough funds to cover their locked pledge + available, err = s.GetAvailableBalance(bal) + return available, err +} + +func (s *state4) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.CheckVestedFunds(s.store, epoch) +} + +func (s *state4) LockedFunds() (LockedFunds, error) { + return LockedFunds{ + VestingFunds: s.State.LockedFunds, + InitialPledgeRequirement: s.State.InitialPledge, + PreCommitDeposits: s.State.PreCommitDeposits, + }, nil +} + +func (s *state4) FeeDebt() (abi.TokenAmount, error) { + return s.State.FeeDebt, nil +} + +func (s *state4) InitialPledge() (abi.TokenAmount, error) { + return s.State.InitialPledge, nil +} + +func (s *state4) PreCommitDeposits() (abi.TokenAmount, error) { + return s.State.PreCommitDeposits, nil +} + +func (s *state4) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) { + info, ok, err := s.State.GetSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV4SectorOnChainInfo(*info) + return &ret, nil +} + +func (s *state4) FindSector(num abi.SectorNumber) (*SectorLocation, error) { + dlIdx, partIdx, err := s.State.FindSector(s.store, num) + if err != nil { + return nil, err + } + return &SectorLocation{ + Deadline: dlIdx, + Partition: partIdx, + }, nil +} + +func (s *state4) NumLiveSectors() (uint64, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return 0, err + } + var total uint64 + if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner4.Deadline) error { + total += dl.LiveSectors + return nil + }); err != nil { + return 0, err + } + return total, nil +} + +// GetSectorExpiration returns the effective expiration of the given sector. +// +// If the sector does not expire early, the Early expiration field is 0. +func (s *state4) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + // NOTE: this can be optimized significantly. + // 1. If the sector is non-faulty, it will either expire on-time (can be + // learned from the sector info), or in the next quantized expiration + // epoch (i.e., the first element in the partition's expiration queue. + // 2. If it's faulty, it will expire early within the first 14 entries + // of the expiration queue. + stopErr := errors.New("stop") + out := SectorExpiration{} + err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner4.Deadline) error { + partitions, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + quant := s.State.QuantSpecForDeadline(dlIdx) + var part miner4.Partition + return partitions.ForEach(&part, func(partIdx int64) error { + if found, err := part.Sectors.IsSet(uint64(num)); err != nil { + return err + } else if !found { + return nil + } + if found, err := part.Terminated.IsSet(uint64(num)); err != nil { + return err + } else if found { + // already terminated + return stopErr + } + + q, err := miner4.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant, miner4.PartitionExpirationAmtBitwidth) + if err != nil { + return err + } + var exp miner4.ExpirationSet + return q.ForEach(&exp, func(epoch int64) error { + if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil { + return err + } else if early { + out.Early = abi.ChainEpoch(epoch) + return nil + } + if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil { + return err + } else if onTime { + out.OnTime = abi.ChainEpoch(epoch) + return stopErr + } + return nil + }) + }) + }) + if err == stopErr { + err = nil + } + if err != nil { + return nil, err + } + if out.Early == 0 && out.OnTime == 0 { + return nil, xerrors.Errorf("failed to find sector %d", num) + } + return &out, nil +} + +func (s *state4) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) { + info, ok, err := s.State.GetPrecommittedSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV4SectorPreCommitOnChainInfo(*info) + + return &ret, nil +} + +func (s *state4) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error { + precommitted, err := adt4.AsMap(s.store, s.State.PreCommittedSectors, builtin4.DefaultHamtBitwidth) + if err != nil { + return err + } + + var info miner4.SectorPreCommitOnChainInfo + if err := precommitted.ForEach(&info, func(_ string) error { + return cb(fromV4SectorPreCommitOnChainInfo(info)) + }); err != nil { + return err + } + + return nil +} + +func (s *state4) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { + sectors, err := miner4.LoadSectors(s.store, s.State.Sectors) + if err != nil { + return nil, err + } + + // If no sector numbers are specified, load all. + if snos == nil { + infos := make([]*SectorOnChainInfo, 0, sectors.Length()) + var info4 miner4.SectorOnChainInfo + if err := sectors.ForEach(&info4, func(_ int64) error { + info := fromV4SectorOnChainInfo(info4) + infos = append(infos, &info) + return nil + }); err != nil { + return nil, err + } + return infos, nil + } + + // Otherwise, load selected. + infos4, err := sectors.Load(*snos) + if err != nil { + return nil, err + } + infos := make([]*SectorOnChainInfo, len(infos4)) + for i, info4 := range infos4 { + info := fromV4SectorOnChainInfo(*info4) + infos[i] = &info + } + return infos, nil +} + +func (s *state4) loadAllocatedSectorNumbers() (bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors) + return allocatedSectors, err +} + +func (s *state4) IsAllocated(num abi.SectorNumber) (bool, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return false, err + } + + return allocatedSectors.IsSet(uint64(num)) +} + +func (s *state4) GetProvingPeriodStart() (abi.ChainEpoch, error) { + return s.State.ProvingPeriodStart, nil +} + +func (s *state4) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return nil, err + } + + allocatedRuns, err := allocatedSectors.RunIterator() + if err != nil { + return nil, err + } + + unallocatedRuns, err := rle.Subtract( + &rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}}, + allocatedRuns, + ) + if err != nil { + return nil, err + } + + iter, err := rle.BitsFromRuns(unallocatedRuns) + if err != nil { + return nil, err + } + + sectors := make([]abi.SectorNumber, 0, count) + for iter.HasNext() && len(sectors) < count { + nextNo, err := iter.Next() + if err != nil { + return nil, err + } + sectors = append(sectors, abi.SectorNumber(nextNo)) + } + + return sectors, nil +} + +func (s *state4) GetAllocatedSectors() (*bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + return nil, err + } + + return &allocatedSectors, nil +} + +func (s *state4) LoadDeadline(idx uint64) (Deadline, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + dl, err := dls.LoadDeadline(s.store, idx) + if err != nil { + return nil, err + } + return &deadline4{*dl, s.store}, nil +} + +func (s *state4) ForEachDeadline(cb func(uint64, Deadline) error) error { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + return dls.ForEach(s.store, func(i uint64, dl *miner4.Deadline) error { + return cb(i, &deadline4{*dl, s.store}) + }) +} + +func (s *state4) NumDeadlines() (uint64, error) { + return miner4.WPoStPeriodDeadlines, nil +} + +func (s *state4) DeadlinesChanged(other State) (bool, error) { + other4, ok := other.(*state4) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !s.State.Deadlines.Equals(other4.Deadlines), nil +} + +func (s *state4) MinerInfoChanged(other State) (bool, error) { + other0, ok := other.(*state4) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Info.Equals(other0.State.Info), nil +} + +func (s *state4) Info() (MinerInfo, error) { + info, err := s.State.GetInfo(s.store) + if err != nil { + return MinerInfo{}, err + } + + var pid *peer.ID + if peerID, err := peer.IDFromBytes(info.PeerId); err == nil { + pid = &peerID + } + + mi := MinerInfo{ + Owner: info.Owner, + Worker: info.Worker, + ControlAddresses: info.ControlAddresses, + + NewWorker: address.Undef, + WorkerChangeEpoch: -1, + + PeerId: pid, + Multiaddrs: info.Multiaddrs, + WindowPoStProofType: info.WindowPoStProofType, + SectorSize: info.SectorSize, + WindowPoStPartitionSectors: info.WindowPoStPartitionSectors, + ConsensusFaultElapsed: info.ConsensusFaultElapsed, + } + + if info.PendingWorkerKey != nil { + mi.NewWorker = info.PendingWorkerKey.NewWorker + mi.WorkerChangeEpoch = info.PendingWorkerKey.EffectiveAt + } + + return mi, nil +} + +func (s *state4) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) { + return s.State.RecordedDeadlineInfo(epoch), nil +} + +func (s *state4) DeadlineCronActive() (bool, error) { + return s.State.DeadlineCronActive, nil +} + +func (s *state4) sectors() (adt.Array, error) { + return adt4.AsArray(s.store, s.Sectors, miner4.SectorsAmtBitwidth) +} + +func (s *state4) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) { + var si miner4.SectorOnChainInfo + err := si.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorOnChainInfo{}, err + } + + return fromV4SectorOnChainInfo(si), nil +} + +func (s *state4) precommits() (adt.Map, error) { + return adt4.AsMap(s.store, s.PreCommittedSectors, builtin4.DefaultHamtBitwidth) +} + +func (s *state4) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreCommitOnChainInfo, error) { + var sp miner4.SectorPreCommitOnChainInfo + err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorPreCommitOnChainInfo{}, err + } + + return fromV4SectorPreCommitOnChainInfo(sp), nil +} + +func (s *state4) EraseAllUnproven() error { + + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + + err = dls.ForEach(s.store, func(dindx uint64, dl *miner4.Deadline) error { + ps, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + + var part miner4.Partition + err = ps.ForEach(&part, func(pindx int64) error { + _ = part.ActivateUnproven() + err = ps.Set(uint64(pindx), &part) + return nil + }) + + if err != nil { + return err + } + + dl.Partitions, err = ps.Root() + if err != nil { + return err + } + + return dls.UpdateDeadline(s.store, dindx, dl) + }) + + return s.State.SaveDeadlines(s.store, dls) + + return nil +} + +func (d *deadline4) LoadPartition(idx uint64) (Partition, error) { + p, err := d.Deadline.LoadPartition(d.store, idx) + if err != nil { + return nil, err + } + return &partition4{*p, d.store}, nil +} + +func (d *deadline4) ForEachPartition(cb func(uint64, Partition) error) error { + ps, err := d.Deadline.PartitionsArray(d.store) + if err != nil { + return err + } + var part miner4.Partition + return ps.ForEach(&part, func(i int64) error { + return cb(uint64(i), &partition4{part, d.store}) + }) +} + +func (d *deadline4) PartitionsChanged(other Deadline) (bool, error) { + other4, ok := other.(*deadline4) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !d.Deadline.Partitions.Equals(other4.Deadline.Partitions), nil +} + +func (d *deadline4) PartitionsPoSted() (bitfield.BitField, error) { + return d.Deadline.PartitionsPoSted, nil +} + +func (d *deadline4) DisputableProofCount() (uint64, error) { + + ops, err := d.OptimisticProofsSnapshotArray(d.store) + if err != nil { + return 0, err + } + + return ops.Length(), nil + +} + +func (p *partition4) AllSectors() (bitfield.BitField, error) { + return p.Partition.Sectors, nil +} + +func (p *partition4) FaultySectors() (bitfield.BitField, error) { + return p.Partition.Faults, nil +} + +func (p *partition4) RecoveringSectors() (bitfield.BitField, error) { + return p.Partition.Recoveries, nil +} + +func fromV4SectorOnChainInfo(v4 miner4.SectorOnChainInfo) SectorOnChainInfo { + + return SectorOnChainInfo{ + SectorNumber: v4.SectorNumber, + SealProof: v4.SealProof, + SealedCID: v4.SealedCID, + DealIDs: v4.DealIDs, + Activation: v4.Activation, + Expiration: v4.Expiration, + DealWeight: v4.DealWeight, + VerifiedDealWeight: v4.VerifiedDealWeight, + InitialPledge: v4.InitialPledge, + ExpectedDayReward: v4.ExpectedDayReward, + ExpectedStoragePledge: v4.ExpectedStoragePledge, + } + +} + +func fromV4SectorPreCommitOnChainInfo(v4 miner4.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { + + return SectorPreCommitOnChainInfo{ + Info: (SectorPreCommitInfo)(v4.Info), + PreCommitDeposit: v4.PreCommitDeposit, + PreCommitEpoch: v4.PreCommitEpoch, + DealWeight: v4.DealWeight, + VerifiedDealWeight: v4.VerifiedDealWeight, + } + +} + +func (s *state4) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/miner/v5.go b/chain/actors/builtin/miner/v5.go new file mode 100644 index 00000000000..82e98c2ef06 --- /dev/null +++ b/chain/actors/builtin/miner/v5.go @@ -0,0 +1,564 @@ +package miner + +import ( + "bytes" + "errors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + rle "github.com/filecoin-project/go-bitfield/rle" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/dline" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p-core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store) (State, error) { + out := state5{store: store} + out.State = miner5.State{} + return &out, nil +} + +type state5 struct { + miner5.State + store adt.Store +} + +type deadline5 struct { + miner5.Deadline + store adt.Store +} + +type partition5 struct { + miner5.Partition + store adt.Store +} + +func (s *state5) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) { + defer func() { + if r := recover(); r != nil { + err = xerrors.Errorf("failed to get available balance: %w", r) + available = abi.NewTokenAmount(0) + } + }() + // this panics if the miner doesnt have enough funds to cover their locked pledge + available, err = s.GetAvailableBalance(bal) + return available, err +} + +func (s *state5) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.CheckVestedFunds(s.store, epoch) +} + +func (s *state5) LockedFunds() (LockedFunds, error) { + return LockedFunds{ + VestingFunds: s.State.LockedFunds, + InitialPledgeRequirement: s.State.InitialPledge, + PreCommitDeposits: s.State.PreCommitDeposits, + }, nil +} + +func (s *state5) FeeDebt() (abi.TokenAmount, error) { + return s.State.FeeDebt, nil +} + +func (s *state5) InitialPledge() (abi.TokenAmount, error) { + return s.State.InitialPledge, nil +} + +func (s *state5) PreCommitDeposits() (abi.TokenAmount, error) { + return s.State.PreCommitDeposits, nil +} + +func (s *state5) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) { + info, ok, err := s.State.GetSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV5SectorOnChainInfo(*info) + return &ret, nil +} + +func (s *state5) FindSector(num abi.SectorNumber) (*SectorLocation, error) { + dlIdx, partIdx, err := s.State.FindSector(s.store, num) + if err != nil { + return nil, err + } + return &SectorLocation{ + Deadline: dlIdx, + Partition: partIdx, + }, nil +} + +func (s *state5) NumLiveSectors() (uint64, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return 0, err + } + var total uint64 + if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner5.Deadline) error { + total += dl.LiveSectors + return nil + }); err != nil { + return 0, err + } + return total, nil +} + +// GetSectorExpiration returns the effective expiration of the given sector. +// +// If the sector does not expire early, the Early expiration field is 0. +func (s *state5) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + // NOTE: this can be optimized significantly. + // 1. If the sector is non-faulty, it will either expire on-time (can be + // learned from the sector info), or in the next quantized expiration + // epoch (i.e., the first element in the partition's expiration queue. + // 2. If it's faulty, it will expire early within the first 14 entries + // of the expiration queue. + stopErr := errors.New("stop") + out := SectorExpiration{} + err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner5.Deadline) error { + partitions, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + quant := s.State.QuantSpecForDeadline(dlIdx) + var part miner5.Partition + return partitions.ForEach(&part, func(partIdx int64) error { + if found, err := part.Sectors.IsSet(uint64(num)); err != nil { + return err + } else if !found { + return nil + } + if found, err := part.Terminated.IsSet(uint64(num)); err != nil { + return err + } else if found { + // already terminated + return stopErr + } + + q, err := miner5.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant, miner5.PartitionExpirationAmtBitwidth) + if err != nil { + return err + } + var exp miner5.ExpirationSet + return q.ForEach(&exp, func(epoch int64) error { + if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil { + return err + } else if early { + out.Early = abi.ChainEpoch(epoch) + return nil + } + if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil { + return err + } else if onTime { + out.OnTime = abi.ChainEpoch(epoch) + return stopErr + } + return nil + }) + }) + }) + if err == stopErr { + err = nil + } + if err != nil { + return nil, err + } + if out.Early == 0 && out.OnTime == 0 { + return nil, xerrors.Errorf("failed to find sector %d", num) + } + return &out, nil +} + +func (s *state5) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) { + info, ok, err := s.State.GetPrecommittedSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV5SectorPreCommitOnChainInfo(*info) + + return &ret, nil +} + +func (s *state5) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error { + precommitted, err := adt5.AsMap(s.store, s.State.PreCommittedSectors, builtin5.DefaultHamtBitwidth) + if err != nil { + return err + } + + var info miner5.SectorPreCommitOnChainInfo + if err := precommitted.ForEach(&info, func(_ string) error { + return cb(fromV5SectorPreCommitOnChainInfo(info)) + }); err != nil { + return err + } + + return nil +} + +func (s *state5) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { + sectors, err := miner5.LoadSectors(s.store, s.State.Sectors) + if err != nil { + return nil, err + } + + // If no sector numbers are specified, load all. + if snos == nil { + infos := make([]*SectorOnChainInfo, 0, sectors.Length()) + var info5 miner5.SectorOnChainInfo + if err := sectors.ForEach(&info5, func(_ int64) error { + info := fromV5SectorOnChainInfo(info5) + infos = append(infos, &info) + return nil + }); err != nil { + return nil, err + } + return infos, nil + } + + // Otherwise, load selected. + infos5, err := sectors.Load(*snos) + if err != nil { + return nil, err + } + infos := make([]*SectorOnChainInfo, len(infos5)) + for i, info5 := range infos5 { + info := fromV5SectorOnChainInfo(*info5) + infos[i] = &info + } + return infos, nil +} + +func (s *state5) loadAllocatedSectorNumbers() (bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors) + return allocatedSectors, err +} + +func (s *state5) IsAllocated(num abi.SectorNumber) (bool, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return false, err + } + + return allocatedSectors.IsSet(uint64(num)) +} + +func (s *state5) GetProvingPeriodStart() (abi.ChainEpoch, error) { + return s.State.ProvingPeriodStart, nil +} + +func (s *state5) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return nil, err + } + + allocatedRuns, err := allocatedSectors.RunIterator() + if err != nil { + return nil, err + } + + unallocatedRuns, err := rle.Subtract( + &rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}}, + allocatedRuns, + ) + if err != nil { + return nil, err + } + + iter, err := rle.BitsFromRuns(unallocatedRuns) + if err != nil { + return nil, err + } + + sectors := make([]abi.SectorNumber, 0, count) + for iter.HasNext() && len(sectors) < count { + nextNo, err := iter.Next() + if err != nil { + return nil, err + } + sectors = append(sectors, abi.SectorNumber(nextNo)) + } + + return sectors, nil +} + +func (s *state5) GetAllocatedSectors() (*bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + return nil, err + } + + return &allocatedSectors, nil +} + +func (s *state5) LoadDeadline(idx uint64) (Deadline, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + dl, err := dls.LoadDeadline(s.store, idx) + if err != nil { + return nil, err + } + return &deadline5{*dl, s.store}, nil +} + +func (s *state5) ForEachDeadline(cb func(uint64, Deadline) error) error { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + return dls.ForEach(s.store, func(i uint64, dl *miner5.Deadline) error { + return cb(i, &deadline5{*dl, s.store}) + }) +} + +func (s *state5) NumDeadlines() (uint64, error) { + return miner5.WPoStPeriodDeadlines, nil +} + +func (s *state5) DeadlinesChanged(other State) (bool, error) { + other5, ok := other.(*state5) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !s.State.Deadlines.Equals(other5.Deadlines), nil +} + +func (s *state5) MinerInfoChanged(other State) (bool, error) { + other0, ok := other.(*state5) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Info.Equals(other0.State.Info), nil +} + +func (s *state5) Info() (MinerInfo, error) { + info, err := s.State.GetInfo(s.store) + if err != nil { + return MinerInfo{}, err + } + + var pid *peer.ID + if peerID, err := peer.IDFromBytes(info.PeerId); err == nil { + pid = &peerID + } + + mi := MinerInfo{ + Owner: info.Owner, + Worker: info.Worker, + ControlAddresses: info.ControlAddresses, + + NewWorker: address.Undef, + WorkerChangeEpoch: -1, + + PeerId: pid, + Multiaddrs: info.Multiaddrs, + WindowPoStProofType: info.WindowPoStProofType, + SectorSize: info.SectorSize, + WindowPoStPartitionSectors: info.WindowPoStPartitionSectors, + ConsensusFaultElapsed: info.ConsensusFaultElapsed, + } + + if info.PendingWorkerKey != nil { + mi.NewWorker = info.PendingWorkerKey.NewWorker + mi.WorkerChangeEpoch = info.PendingWorkerKey.EffectiveAt + } + + return mi, nil +} + +func (s *state5) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) { + return s.State.RecordedDeadlineInfo(epoch), nil +} + +func (s *state5) DeadlineCronActive() (bool, error) { + return s.State.DeadlineCronActive, nil +} + +func (s *state5) sectors() (adt.Array, error) { + return adt5.AsArray(s.store, s.Sectors, miner5.SectorsAmtBitwidth) +} + +func (s *state5) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) { + var si miner5.SectorOnChainInfo + err := si.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorOnChainInfo{}, err + } + + return fromV5SectorOnChainInfo(si), nil +} + +func (s *state5) precommits() (adt.Map, error) { + return adt5.AsMap(s.store, s.PreCommittedSectors, builtin5.DefaultHamtBitwidth) +} + +func (s *state5) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreCommitOnChainInfo, error) { + var sp miner5.SectorPreCommitOnChainInfo + err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorPreCommitOnChainInfo{}, err + } + + return fromV5SectorPreCommitOnChainInfo(sp), nil +} + +func (s *state5) EraseAllUnproven() error { + + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + + err = dls.ForEach(s.store, func(dindx uint64, dl *miner5.Deadline) error { + ps, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + + var part miner5.Partition + err = ps.ForEach(&part, func(pindx int64) error { + _ = part.ActivateUnproven() + err = ps.Set(uint64(pindx), &part) + return nil + }) + + if err != nil { + return err + } + + dl.Partitions, err = ps.Root() + if err != nil { + return err + } + + return dls.UpdateDeadline(s.store, dindx, dl) + }) + + return s.State.SaveDeadlines(s.store, dls) + + return nil +} + +func (d *deadline5) LoadPartition(idx uint64) (Partition, error) { + p, err := d.Deadline.LoadPartition(d.store, idx) + if err != nil { + return nil, err + } + return &partition5{*p, d.store}, nil +} + +func (d *deadline5) ForEachPartition(cb func(uint64, Partition) error) error { + ps, err := d.Deadline.PartitionsArray(d.store) + if err != nil { + return err + } + var part miner5.Partition + return ps.ForEach(&part, func(i int64) error { + return cb(uint64(i), &partition5{part, d.store}) + }) +} + +func (d *deadline5) PartitionsChanged(other Deadline) (bool, error) { + other5, ok := other.(*deadline5) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !d.Deadline.Partitions.Equals(other5.Deadline.Partitions), nil +} + +func (d *deadline5) PartitionsPoSted() (bitfield.BitField, error) { + return d.Deadline.PartitionsPoSted, nil +} + +func (d *deadline5) DisputableProofCount() (uint64, error) { + + ops, err := d.OptimisticProofsSnapshotArray(d.store) + if err != nil { + return 0, err + } + + return ops.Length(), nil + +} + +func (p *partition5) AllSectors() (bitfield.BitField, error) { + return p.Partition.Sectors, nil +} + +func (p *partition5) FaultySectors() (bitfield.BitField, error) { + return p.Partition.Faults, nil +} + +func (p *partition5) RecoveringSectors() (bitfield.BitField, error) { + return p.Partition.Recoveries, nil +} + +func fromV5SectorOnChainInfo(v5 miner5.SectorOnChainInfo) SectorOnChainInfo { + + return SectorOnChainInfo{ + SectorNumber: v5.SectorNumber, + SealProof: v5.SealProof, + SealedCID: v5.SealedCID, + DealIDs: v5.DealIDs, + Activation: v5.Activation, + Expiration: v5.Expiration, + DealWeight: v5.DealWeight, + VerifiedDealWeight: v5.VerifiedDealWeight, + InitialPledge: v5.InitialPledge, + ExpectedDayReward: v5.ExpectedDayReward, + ExpectedStoragePledge: v5.ExpectedStoragePledge, + } + +} + +func fromV5SectorPreCommitOnChainInfo(v5 miner5.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { + + return SectorPreCommitOnChainInfo{ + Info: (SectorPreCommitInfo)(v5.Info), + PreCommitDeposit: v5.PreCommitDeposit, + PreCommitEpoch: v5.PreCommitEpoch, + DealWeight: v5.DealWeight, + VerifiedDealWeight: v5.VerifiedDealWeight, + } + +} + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/multisig/actor.go.template b/chain/actors/builtin/multisig/actor.go.template new file mode 100644 index 00000000000..b899815a668 --- /dev/null +++ b/chain/actors/builtin/multisig/actor.go.template @@ -0,0 +1,141 @@ +package multisig + +import ( + "fmt" + + "github.com/minio/blake2b-simd" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/ipfs/go-cid" + + msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" + msig{{.latestVersion}} "github.com/filecoin-project/specs-actors{{import .latestVersion}}actors/builtin/multisig" +{{range .versions}} + builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" +{{end}} + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/types" +) + +func init() { +{{range .versions}} + builtin.RegisterActorState(builtin{{.}}.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load{{.}}(store, root) + }) +{{end}}} + +func Load(store adt.Store, act *types.Actor) (State, error) { + switch act.Code { +{{range .versions}} + case builtin{{.}}.MultisigActorCodeID: + return load{{.}}(store, act.Head) +{{end}} + } + return nil, xerrors.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actors.Version, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return make{{.}}(store, signers, threshold, startEpoch, unlockDuration, initialBalance) +{{end}} +} + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return builtin{{.}}.MultisigActorCodeID, nil +{{end}} + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + +type State interface { + cbor.Marshaler + + LockedBalance(epoch abi.ChainEpoch) (abi.TokenAmount, error) + StartEpoch() (abi.ChainEpoch, error) + UnlockDuration() (abi.ChainEpoch, error) + InitialBalance() (abi.TokenAmount, error) + Threshold() (uint64, error) + Signers() ([]address.Address, error) + + ForEachPendingTxn(func(id int64, txn Transaction) error) error + PendingTxnChanged(State) (bool, error) + + transactions() (adt.Map, error) + decodeTransaction(val *cbg.Deferred) (Transaction, error) + GetState() interface{} +} + +type Transaction = msig0.Transaction + +var Methods = builtin{{.latestVersion}}.MethodsMultisig + +func Message(version actors.Version, from address.Address) MessageBuilder { + switch version { +{{range .versions}} + case actors.Version{{.}}: + return message{{.}}{{"{"}}{{if (ge . 2)}}message0{from}{{else}}from{{end}}} +{{end}} default: + panic(fmt.Sprintf("unsupported actors version: %d", version)) + } +} + +type MessageBuilder interface { + // Create a new multisig with the specified parameters. + Create(signers []address.Address, threshold uint64, + vestingStart, vestingDuration abi.ChainEpoch, + initialAmount abi.TokenAmount) (*types.Message, error) + + // Propose a transaction to the given multisig. + Propose(msig, target address.Address, amt abi.TokenAmount, + method abi.MethodNum, params []byte) (*types.Message, error) + + // Approve a multisig transaction. The "hash" is optional. + Approve(msig address.Address, txID uint64, hash *ProposalHashData) (*types.Message, error) + + // Cancel a multisig transaction. The "hash" is optional. + Cancel(msig address.Address, txID uint64, hash *ProposalHashData) (*types.Message, error) +} + +// this type is the same between v0 and v2 +type ProposalHashData = msig{{.latestVersion}}.ProposalHashData +type ProposeReturn = msig{{.latestVersion}}.ProposeReturn +type ProposeParams = msig{{.latestVersion}}.ProposeParams +type ApproveReturn = msig{{.latestVersion}}.ApproveReturn + +func txnParams(id uint64, data *ProposalHashData) ([]byte, error) { + params := msig{{.latestVersion}}.TxnIDParams{ID: msig{{.latestVersion}}.TxnID(id)} + if data != nil { + if data.Requester.Protocol() != address.ID { + return nil, xerrors.Errorf("proposer address must be an ID address, was %s", data.Requester) + } + if data.Value.Sign() == -1 { + return nil, xerrors.Errorf("proposal value must be non-negative, was %s", data.Value) + } + if data.To == address.Undef { + return nil, xerrors.Errorf("proposed destination address must be set") + } + pser, err := data.Serialize() + if err != nil { + return nil, err + } + hash := blake2b.Sum256(pser) + params.ProposalHash = hash[:] + } + + return actors.SerializeParams(¶ms) +} diff --git a/chain/actors/builtin/multisig/diff.go b/chain/actors/builtin/multisig/diff.go new file mode 100644 index 00000000000..680d0870ab1 --- /dev/null +++ b/chain/actors/builtin/multisig/diff.go @@ -0,0 +1,134 @@ +package multisig + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +type PendingTransactionChanges struct { + Added []TransactionChange + Modified []TransactionModification + Removed []TransactionChange +} + +type TransactionChange struct { + TxID int64 + Tx Transaction +} + +type TransactionModification struct { + TxID int64 + From Transaction + To Transaction +} + +func DiffPendingTransactions(pre, cur State) (*PendingTransactionChanges, error) { + results := new(PendingTransactionChanges) + if changed, err := pre.PendingTxnChanged(cur); err != nil { + return nil, err + } else if !changed { // if nothing has changed then return an empty result and bail. + return results, nil + } + + pret, err := pre.transactions() + if err != nil { + return nil, err + } + + curt, err := cur.transactions() + if err != nil { + return nil, err + } + + if err := adt.DiffAdtMap(pret, curt, &transactionDiffer{results, pre, cur}); err != nil { + return nil, err + } + return results, nil +} + +type transactionDiffer struct { + Results *PendingTransactionChanges + pre, after State +} + +func (t *transactionDiffer) AsKey(key string) (abi.Keyer, error) { + txID, err := abi.ParseIntKey(key) + if err != nil { + return nil, err + } + return abi.IntKey(txID), nil +} + +func (t *transactionDiffer) Add(key string, val *cbg.Deferred) error { + txID, err := abi.ParseIntKey(key) + if err != nil { + return err + } + tx, err := t.after.decodeTransaction(val) + if err != nil { + return err + } + t.Results.Added = append(t.Results.Added, TransactionChange{ + TxID: txID, + Tx: tx, + }) + return nil +} + +func (t *transactionDiffer) Modify(key string, from, to *cbg.Deferred) error { + txID, err := abi.ParseIntKey(key) + if err != nil { + return err + } + + txFrom, err := t.pre.decodeTransaction(from) + if err != nil { + return err + } + + txTo, err := t.after.decodeTransaction(to) + if err != nil { + return err + } + + if approvalsChanged(txFrom.Approved, txTo.Approved) { + t.Results.Modified = append(t.Results.Modified, TransactionModification{ + TxID: txID, + From: txFrom, + To: txTo, + }) + } + + return nil +} + +func approvalsChanged(from, to []address.Address) bool { + if len(from) != len(to) { + return true + } + for idx := range from { + if from[idx] != to[idx] { + return true + } + } + return false +} + +func (t *transactionDiffer) Remove(key string, val *cbg.Deferred) error { + txID, err := abi.ParseIntKey(key) + if err != nil { + return err + } + tx, err := t.pre.decodeTransaction(val) + if err != nil { + return err + } + t.Results.Removed = append(t.Results.Removed, TransactionChange{ + TxID: txID, + Tx: tx, + }) + return nil +} diff --git a/chain/actors/builtin/multisig/message.go b/chain/actors/builtin/multisig/message.go deleted file mode 100644 index 3d2c66e6b02..00000000000 --- a/chain/actors/builtin/multisig/message.go +++ /dev/null @@ -1,74 +0,0 @@ -package multisig - -import ( - "fmt" - - "github.com/minio/blake2b-simd" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - - builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" - multisig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig" - - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/types" -) - -var Methods = builtin2.MethodsMultisig - -func Message(version actors.Version, from address.Address) MessageBuilder { - switch version { - case actors.Version0: - return message0{from} - case actors.Version2: - return message2{message0{from}} - default: - panic(fmt.Sprintf("unsupported actors version: %d", version)) - } -} - -type MessageBuilder interface { - // Create a new multisig with the specified parameters. - Create(signers []address.Address, threshold uint64, - vestingStart, vestingDuration abi.ChainEpoch, - initialAmount abi.TokenAmount) (*types.Message, error) - - // Propose a transaction to the given multisig. - Propose(msig, target address.Address, amt abi.TokenAmount, - method abi.MethodNum, params []byte) (*types.Message, error) - - // Approve a multisig transaction. The "hash" is optional. - Approve(msig address.Address, txID uint64, hash *ProposalHashData) (*types.Message, error) - - // Cancel a multisig transaction. The "hash" is optional. - Cancel(msig address.Address, txID uint64, hash *ProposalHashData) (*types.Message, error) -} - -// this type is the same between v0 and v2 -type ProposalHashData = multisig2.ProposalHashData -type ProposeReturn = multisig2.ProposeReturn - -func txnParams(id uint64, data *ProposalHashData) ([]byte, error) { - params := multisig2.TxnIDParams{ID: multisig2.TxnID(id)} - if data != nil { - if data.Requester.Protocol() != address.ID { - return nil, xerrors.Errorf("proposer address must be an ID address, was %s", data.Requester) - } - if data.Value.Sign() == -1 { - return nil, xerrors.Errorf("proposal value must be non-negative, was %s", data.Value) - } - if data.To == address.Undef { - return nil, xerrors.Errorf("proposed destination address must be set") - } - pser, err := data.Serialize() - if err != nil { - return nil, err - } - hash := blake2b.Sum256(pser) - params.ProposalHash = hash[:] - } - - return actors.SerializeParams(¶ms) -} diff --git a/chain/actors/builtin/multisig/message.go.template b/chain/actors/builtin/multisig/message.go.template new file mode 100644 index 00000000000..6bff8983ab0 --- /dev/null +++ b/chain/actors/builtin/multisig/message.go.template @@ -0,0 +1,146 @@ +package multisig + +import ( + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + builtin{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin" + init{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/init" + multisig{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/multisig" + + "github.com/filecoin-project/lotus/chain/actors" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/types" +) + +type message{{.v}} struct{ {{if (ge .v 2)}}message0{{else}}from address.Address{{end}} } + +func (m message{{.v}}) Create( + signers []address.Address, threshold uint64, + unlockStart, unlockDuration abi.ChainEpoch, + initialAmount abi.TokenAmount, +) (*types.Message, error) { + + lenAddrs := uint64(len(signers)) + + if lenAddrs < threshold { + return nil, xerrors.Errorf("cannot require signing of more addresses than provided for multisig") + } + + if threshold == 0 { + threshold = lenAddrs + } + + if m.from == address.Undef { + return nil, xerrors.Errorf("must provide source address") + } +{{if (le .v 1)}} + if unlockStart != 0 { + return nil, xerrors.Errorf("actors v0 does not support a non-zero vesting start time") + } +{{end}} + // Set up constructor parameters for multisig + msigParams := &multisig{{.v}}.ConstructorParams{ + Signers: signers, + NumApprovalsThreshold: threshold, + UnlockDuration: unlockDuration,{{if (ge .v 2)}} + StartEpoch: unlockStart,{{end}} + } + + enc, actErr := actors.SerializeParams(msigParams) + if actErr != nil { + return nil, actErr + } + + // new actors are created by invoking 'exec' on the init actor with the constructor params + execParams := &init{{.v}}.ExecParams{ + CodeCID: builtin{{.v}}.MultisigActorCodeID, + ConstructorParams: enc, + } + + enc, actErr = actors.SerializeParams(execParams) + if actErr != nil { + return nil, actErr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Method: builtin{{.v}}.MethodsInit.Exec, + Params: enc, + Value: initialAmount, + }, nil +} + +{{if (le .v 1)}} + +func (m message0) Propose(msig, to address.Address, amt abi.TokenAmount, + method abi.MethodNum, params []byte) (*types.Message, error) { + + if msig == address.Undef { + return nil, xerrors.Errorf("must provide a multisig address for proposal") + } + + if to == address.Undef { + return nil, xerrors.Errorf("must provide a target address for proposal") + } + + if amt.Sign() == -1 { + return nil, xerrors.Errorf("must provide a non-negative amount for proposed send") + } + + if m.from == address.Undef { + return nil, xerrors.Errorf("must provide source address") + } + + enc, actErr := actors.SerializeParams(&multisig0.ProposeParams{ + To: to, + Value: amt, + Method: method, + Params: params, + }) + if actErr != nil { + return nil, xerrors.Errorf("failed to serialize parameters: %w", actErr) + } + + return &types.Message{ + To: msig, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin0.MethodsMultisig.Propose, + Params: enc, + }, nil +} + +func (m message0) Approve(msig address.Address, txID uint64, hashData *ProposalHashData) (*types.Message, error) { + enc, err := txnParams(txID, hashData) + if err != nil { + return nil, err + } + + return &types.Message{ + To: msig, + From: m.from, + Value: types.NewInt(0), + Method: builtin0.MethodsMultisig.Approve, + Params: enc, + }, nil +} + +func (m message0) Cancel(msig address.Address, txID uint64, hashData *ProposalHashData) (*types.Message, error) { + enc, err := txnParams(txID, hashData) + if err != nil { + return nil, err + } + + return &types.Message{ + To: msig, + From: m.from, + Value: types.NewInt(0), + Method: builtin0.MethodsMultisig.Cancel, + Params: enc, + }, nil +} +{{end}} diff --git a/chain/actors/builtin/multisig/message3.go b/chain/actors/builtin/multisig/message3.go new file mode 100644 index 00000000000..f5f6d8cdfba --- /dev/null +++ b/chain/actors/builtin/multisig/message3.go @@ -0,0 +1,71 @@ +package multisig + +import ( + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + init3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/init" + multisig3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/multisig" + + "github.com/filecoin-project/lotus/chain/actors" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/types" +) + +type message3 struct{ message0 } + +func (m message3) Create( + signers []address.Address, threshold uint64, + unlockStart, unlockDuration abi.ChainEpoch, + initialAmount abi.TokenAmount, +) (*types.Message, error) { + + lenAddrs := uint64(len(signers)) + + if lenAddrs < threshold { + return nil, xerrors.Errorf("cannot require signing of more addresses than provided for multisig") + } + + if threshold == 0 { + threshold = lenAddrs + } + + if m.from == address.Undef { + return nil, xerrors.Errorf("must provide source address") + } + + // Set up constructor parameters for multisig + msigParams := &multisig3.ConstructorParams{ + Signers: signers, + NumApprovalsThreshold: threshold, + UnlockDuration: unlockDuration, + StartEpoch: unlockStart, + } + + enc, actErr := actors.SerializeParams(msigParams) + if actErr != nil { + return nil, actErr + } + + // new actors are created by invoking 'exec' on the init actor with the constructor params + execParams := &init3.ExecParams{ + CodeCID: builtin3.MultisigActorCodeID, + ConstructorParams: enc, + } + + enc, actErr = actors.SerializeParams(execParams) + if actErr != nil { + return nil, actErr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Method: builtin3.MethodsInit.Exec, + Params: enc, + Value: initialAmount, + }, nil +} diff --git a/chain/actors/builtin/multisig/message4.go b/chain/actors/builtin/multisig/message4.go new file mode 100644 index 00000000000..90885aa0715 --- /dev/null +++ b/chain/actors/builtin/multisig/message4.go @@ -0,0 +1,71 @@ +package multisig + +import ( + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + init4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/init" + multisig4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/multisig" + + "github.com/filecoin-project/lotus/chain/actors" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/types" +) + +type message4 struct{ message0 } + +func (m message4) Create( + signers []address.Address, threshold uint64, + unlockStart, unlockDuration abi.ChainEpoch, + initialAmount abi.TokenAmount, +) (*types.Message, error) { + + lenAddrs := uint64(len(signers)) + + if lenAddrs < threshold { + return nil, xerrors.Errorf("cannot require signing of more addresses than provided for multisig") + } + + if threshold == 0 { + threshold = lenAddrs + } + + if m.from == address.Undef { + return nil, xerrors.Errorf("must provide source address") + } + + // Set up constructor parameters for multisig + msigParams := &multisig4.ConstructorParams{ + Signers: signers, + NumApprovalsThreshold: threshold, + UnlockDuration: unlockDuration, + StartEpoch: unlockStart, + } + + enc, actErr := actors.SerializeParams(msigParams) + if actErr != nil { + return nil, actErr + } + + // new actors are created by invoking 'exec' on the init actor with the constructor params + execParams := &init4.ExecParams{ + CodeCID: builtin4.MultisigActorCodeID, + ConstructorParams: enc, + } + + enc, actErr = actors.SerializeParams(execParams) + if actErr != nil { + return nil, actErr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Method: builtin4.MethodsInit.Exec, + Params: enc, + Value: initialAmount, + }, nil +} diff --git a/chain/actors/builtin/multisig/message5.go b/chain/actors/builtin/multisig/message5.go new file mode 100644 index 00000000000..9a8110f2cd5 --- /dev/null +++ b/chain/actors/builtin/multisig/message5.go @@ -0,0 +1,71 @@ +package multisig + +import ( + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + init5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/init" + multisig5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/multisig" + + "github.com/filecoin-project/lotus/chain/actors" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/types" +) + +type message5 struct{ message0 } + +func (m message5) Create( + signers []address.Address, threshold uint64, + unlockStart, unlockDuration abi.ChainEpoch, + initialAmount abi.TokenAmount, +) (*types.Message, error) { + + lenAddrs := uint64(len(signers)) + + if lenAddrs < threshold { + return nil, xerrors.Errorf("cannot require signing of more addresses than provided for multisig") + } + + if threshold == 0 { + threshold = lenAddrs + } + + if m.from == address.Undef { + return nil, xerrors.Errorf("must provide source address") + } + + // Set up constructor parameters for multisig + msigParams := &multisig5.ConstructorParams{ + Signers: signers, + NumApprovalsThreshold: threshold, + UnlockDuration: unlockDuration, + StartEpoch: unlockStart, + } + + enc, actErr := actors.SerializeParams(msigParams) + if actErr != nil { + return nil, actErr + } + + // new actors are created by invoking 'exec' on the init actor with the constructor params + execParams := &init5.ExecParams{ + CodeCID: builtin5.MultisigActorCodeID, + ConstructorParams: enc, + } + + enc, actErr = actors.SerializeParams(execParams) + if actErr != nil { + return nil, actErr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Method: builtin5.MethodsInit.Exec, + Params: enc, + Value: initialAmount, + }, nil +} diff --git a/chain/actors/builtin/multisig/multisig.go b/chain/actors/builtin/multisig/multisig.go new file mode 100644 index 00000000000..c950ced908e --- /dev/null +++ b/chain/actors/builtin/multisig/multisig.go @@ -0,0 +1,212 @@ +package multisig + +import ( + "fmt" + + "github.com/minio/blake2b-simd" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/ipfs/go-cid" + + msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" + msig5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/multisig" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/types" +) + +func init() { + + builtin.RegisterActorState(builtin0.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load0(store, root) + }) + + builtin.RegisterActorState(builtin2.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load2(store, root) + }) + + builtin.RegisterActorState(builtin3.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load3(store, root) + }) + + builtin.RegisterActorState(builtin4.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load4(store, root) + }) + + builtin.RegisterActorState(builtin5.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load5(store, root) + }) +} + +func Load(store adt.Store, act *types.Actor) (State, error) { + switch act.Code { + + case builtin0.MultisigActorCodeID: + return load0(store, act.Head) + + case builtin2.MultisigActorCodeID: + return load2(store, act.Head) + + case builtin3.MultisigActorCodeID: + return load3(store, act.Head) + + case builtin4.MultisigActorCodeID: + return load4(store, act.Head) + + case builtin5.MultisigActorCodeID: + return load5(store, act.Head) + + } + return nil, xerrors.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actors.Version, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + switch av { + + case actors.Version0: + return make0(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + + case actors.Version2: + return make2(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + + case actors.Version3: + return make3(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + + case actors.Version4: + return make4(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + + case actors.Version5: + return make5(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + + } + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { + + case actors.Version0: + return builtin0.MultisigActorCodeID, nil + + case actors.Version2: + return builtin2.MultisigActorCodeID, nil + + case actors.Version3: + return builtin3.MultisigActorCodeID, nil + + case actors.Version4: + return builtin4.MultisigActorCodeID, nil + + case actors.Version5: + return builtin5.MultisigActorCodeID, nil + + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + +type State interface { + cbor.Marshaler + + LockedBalance(epoch abi.ChainEpoch) (abi.TokenAmount, error) + StartEpoch() (abi.ChainEpoch, error) + UnlockDuration() (abi.ChainEpoch, error) + InitialBalance() (abi.TokenAmount, error) + Threshold() (uint64, error) + Signers() ([]address.Address, error) + + ForEachPendingTxn(func(id int64, txn Transaction) error) error + PendingTxnChanged(State) (bool, error) + + transactions() (adt.Map, error) + decodeTransaction(val *cbg.Deferred) (Transaction, error) + GetState() interface{} +} + +type Transaction = msig0.Transaction + +var Methods = builtin5.MethodsMultisig + +func Message(version actors.Version, from address.Address) MessageBuilder { + switch version { + + case actors.Version0: + return message0{from} + + case actors.Version2: + return message2{message0{from}} + + case actors.Version3: + return message3{message0{from}} + + case actors.Version4: + return message4{message0{from}} + + case actors.Version5: + return message5{message0{from}} + default: + panic(fmt.Sprintf("unsupported actors version: %d", version)) + } +} + +type MessageBuilder interface { + // Create a new multisig with the specified parameters. + Create(signers []address.Address, threshold uint64, + vestingStart, vestingDuration abi.ChainEpoch, + initialAmount abi.TokenAmount) (*types.Message, error) + + // Propose a transaction to the given multisig. + Propose(msig, target address.Address, amt abi.TokenAmount, + method abi.MethodNum, params []byte) (*types.Message, error) + + // Approve a multisig transaction. The "hash" is optional. + Approve(msig address.Address, txID uint64, hash *ProposalHashData) (*types.Message, error) + + // Cancel a multisig transaction. The "hash" is optional. + Cancel(msig address.Address, txID uint64, hash *ProposalHashData) (*types.Message, error) +} + +// this type is the same between v0 and v2 +type ProposalHashData = msig5.ProposalHashData +type ProposeReturn = msig5.ProposeReturn +type ProposeParams = msig5.ProposeParams +type ApproveReturn = msig5.ApproveReturn + +func txnParams(id uint64, data *ProposalHashData) ([]byte, error) { + params := msig5.TxnIDParams{ID: msig5.TxnID(id)} + if data != nil { + if data.Requester.Protocol() != address.ID { + return nil, xerrors.Errorf("proposer address must be an ID address, was %s", data.Requester) + } + if data.Value.Sign() == -1 { + return nil, xerrors.Errorf("proposal value must be non-negative, was %s", data.Value) + } + if data.To == address.Undef { + return nil, xerrors.Errorf("proposed destination address must be set") + } + pser, err := data.Serialize() + if err != nil { + return nil, err + } + hash := blake2b.Sum256(pser) + params.ProposalHash = hash[:] + } + + return actors.SerializeParams(¶ms) +} diff --git a/chain/actors/builtin/multisig/state.go b/chain/actors/builtin/multisig/state.go deleted file mode 100644 index 89a7eedade7..00000000000 --- a/chain/actors/builtin/multisig/state.go +++ /dev/null @@ -1,52 +0,0 @@ -package multisig - -import ( - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/cbor" - "github.com/ipfs/go-cid" - - builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" - msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" - builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" - - "github.com/filecoin-project/lotus/chain/actors/adt" - "github.com/filecoin-project/lotus/chain/actors/builtin" - "github.com/filecoin-project/lotus/chain/types" -) - -func init() { - builtin.RegisterActorState(builtin0.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { - return load0(store, root) - }) - builtin.RegisterActorState(builtin2.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { - return load2(store, root) - }) -} - -func Load(store adt.Store, act *types.Actor) (State, error) { - switch act.Code { - case builtin0.MultisigActorCodeID: - return load0(store, act.Head) - case builtin2.MultisigActorCodeID: - return load2(store, act.Head) - } - return nil, xerrors.Errorf("unknown actor code %s", act.Code) -} - -type State interface { - cbor.Marshaler - - LockedBalance(epoch abi.ChainEpoch) (abi.TokenAmount, error) - StartEpoch() (abi.ChainEpoch, error) - UnlockDuration() (abi.ChainEpoch, error) - InitialBalance() (abi.TokenAmount, error) - Threshold() (uint64, error) - Signers() ([]address.Address, error) - - ForEachPendingTxn(func(id int64, txn Transaction) error) error -} - -type Transaction = msig0.Transaction diff --git a/chain/actors/builtin/multisig/state.go.template b/chain/actors/builtin/multisig/state.go.template new file mode 100644 index 00000000000..6c0130c0998 --- /dev/null +++ b/chain/actors/builtin/multisig/state.go.template @@ -0,0 +1,127 @@ +package multisig + +import ( + "bytes" + "encoding/binary" + + adt{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/util/adt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors/adt" + +{{if (ge .v 3)}} + builtin{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin" +{{end}} + msig{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/multisig" +) + +var _ State = (*state{{.v}})(nil) + +func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { + out := state{{.v}}{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make{{.v}}(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + out := state{{.v}}{store: store} + out.State = msig{{.v}}.State{} + out.State.Signers = signers + out.State.NumApprovalsThreshold = threshold + out.State.StartEpoch = startEpoch + out.State.UnlockDuration = unlockDuration + out.State.InitialBalance = initialBalance + {{if (le .v 2)}} + em, err := adt{{.v}}.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + {{else}} + em, err := adt{{.v}}.StoreEmptyMap(store, builtin{{.v}}.DefaultHamtBitwidth) + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + {{end}} + return &out, nil +} + +type state{{.v}} struct { + msig{{.v}}.State + store adt.Store +} + +func (s *state{{.v}}) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil +} + +func (s *state{{.v}}) StartEpoch() (abi.ChainEpoch, error) { + return s.State.StartEpoch, nil +} + +func (s *state{{.v}}) UnlockDuration() (abi.ChainEpoch, error) { + return s.State.UnlockDuration, nil +} + +func (s *state{{.v}}) InitialBalance() (abi.TokenAmount, error) { + return s.State.InitialBalance, nil +} + +func (s *state{{.v}}) Threshold() (uint64, error) { + return s.State.NumApprovalsThreshold, nil +} + +func (s *state{{.v}}) Signers() ([]address.Address, error) { + return s.State.Signers, nil +} + +func (s *state{{.v}}) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error { + arr, err := adt{{.v}}.AsMap(s.store, s.State.PendingTxns{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}}) + if err != nil { + return err + } + var out msig{{.v}}.Transaction + return arr.ForEach(&out, func(key string) error { + txid, n := binary.Varint([]byte(key)) + if n <= 0 { + return xerrors.Errorf("invalid pending transaction key: %v", key) + } + return cb(txid, (Transaction)(out)) //nolint:unconvert + }) +} + +func (s *state{{.v}}) PendingTxnChanged(other State) (bool, error) { + other{{.v}}, ok := other.(*state{{.v}}) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.PendingTxns.Equals(other{{.v}}.PendingTxns), nil +} + +func (s *state{{.v}}) transactions() (adt.Map, error) { + return adt{{.v}}.AsMap(s.store, s.PendingTxns{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}}) +} + +func (s *state{{.v}}) decodeTransaction(val *cbg.Deferred) (Transaction, error) { + var tx msig{{.v}}.Transaction + if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Transaction{}, err + } + return tx, nil +} + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/multisig/state0.go b/chain/actors/builtin/multisig/v0.go similarity index 56% rename from chain/actors/builtin/multisig/state0.go rename to chain/actors/builtin/multisig/v0.go index c934343e702..973ac920904 100644 --- a/chain/actors/builtin/multisig/state0.go +++ b/chain/actors/builtin/multisig/v0.go @@ -1,17 +1,20 @@ package multisig import ( + "bytes" "encoding/binary" + adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" "github.com/filecoin-project/lotus/chain/actors/adt" msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" - adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" ) var _ State = (*state0)(nil) @@ -25,6 +28,25 @@ func load0(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make0(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + out := state0{store: store} + out.State = msig0.State{} + out.State.Signers = signers + out.State.NumApprovalsThreshold = threshold + out.State.StartEpoch = startEpoch + out.State.UnlockDuration = unlockDuration + out.State.InitialBalance = initialBalance + + em, err := adt0.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + + return &out, nil +} + type state0 struct { msig0.State store adt.Store @@ -65,6 +87,31 @@ func (s *state0) ForEachPendingTxn(cb func(id int64, txn Transaction) error) err if n <= 0 { return xerrors.Errorf("invalid pending transaction key: %v", key) } - return cb(txid, (Transaction)(out)) + return cb(txid, (Transaction)(out)) //nolint:unconvert }) } + +func (s *state0) PendingTxnChanged(other State) (bool, error) { + other0, ok := other.(*state0) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.PendingTxns.Equals(other0.PendingTxns), nil +} + +func (s *state0) transactions() (adt.Map, error) { + return adt0.AsMap(s.store, s.PendingTxns) +} + +func (s *state0) decodeTransaction(val *cbg.Deferred) (Transaction, error) { + var tx msig0.Transaction + if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Transaction{}, err + } + return tx, nil +} + +func (s *state0) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/multisig/state2.go b/chain/actors/builtin/multisig/v2.go similarity index 56% rename from chain/actors/builtin/multisig/state2.go rename to chain/actors/builtin/multisig/v2.go index a78b07d551f..5b830e69530 100644 --- a/chain/actors/builtin/multisig/state2.go +++ b/chain/actors/builtin/multisig/v2.go @@ -1,17 +1,20 @@ package multisig import ( + "bytes" "encoding/binary" + adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" "github.com/filecoin-project/lotus/chain/actors/adt" msig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig" - adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt" ) var _ State = (*state2)(nil) @@ -25,6 +28,25 @@ func load2(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make2(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + out := state2{store: store} + out.State = msig2.State{} + out.State.Signers = signers + out.State.NumApprovalsThreshold = threshold + out.State.StartEpoch = startEpoch + out.State.UnlockDuration = unlockDuration + out.State.InitialBalance = initialBalance + + em, err := adt2.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + + return &out, nil +} + type state2 struct { msig2.State store adt.Store @@ -65,6 +87,31 @@ func (s *state2) ForEachPendingTxn(cb func(id int64, txn Transaction) error) err if n <= 0 { return xerrors.Errorf("invalid pending transaction key: %v", key) } - return cb(txid, (Transaction)(out)) + return cb(txid, (Transaction)(out)) //nolint:unconvert }) } + +func (s *state2) PendingTxnChanged(other State) (bool, error) { + other2, ok := other.(*state2) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.PendingTxns.Equals(other2.PendingTxns), nil +} + +func (s *state2) transactions() (adt.Map, error) { + return adt2.AsMap(s.store, s.PendingTxns) +} + +func (s *state2) decodeTransaction(val *cbg.Deferred) (Transaction, error) { + var tx msig2.Transaction + if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Transaction{}, err + } + return tx, nil +} + +func (s *state2) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/multisig/v3.go b/chain/actors/builtin/multisig/v3.go new file mode 100644 index 00000000000..c4a2791b705 --- /dev/null +++ b/chain/actors/builtin/multisig/v3.go @@ -0,0 +1,119 @@ +package multisig + +import ( + "bytes" + "encoding/binary" + + adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + msig3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/multisig" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make3(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + out := state3{store: store} + out.State = msig3.State{} + out.State.Signers = signers + out.State.NumApprovalsThreshold = threshold + out.State.StartEpoch = startEpoch + out.State.UnlockDuration = unlockDuration + out.State.InitialBalance = initialBalance + + em, err := adt3.StoreEmptyMap(store, builtin3.DefaultHamtBitwidth) + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + + return &out, nil +} + +type state3 struct { + msig3.State + store adt.Store +} + +func (s *state3) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil +} + +func (s *state3) StartEpoch() (abi.ChainEpoch, error) { + return s.State.StartEpoch, nil +} + +func (s *state3) UnlockDuration() (abi.ChainEpoch, error) { + return s.State.UnlockDuration, nil +} + +func (s *state3) InitialBalance() (abi.TokenAmount, error) { + return s.State.InitialBalance, nil +} + +func (s *state3) Threshold() (uint64, error) { + return s.State.NumApprovalsThreshold, nil +} + +func (s *state3) Signers() ([]address.Address, error) { + return s.State.Signers, nil +} + +func (s *state3) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error { + arr, err := adt3.AsMap(s.store, s.State.PendingTxns, builtin3.DefaultHamtBitwidth) + if err != nil { + return err + } + var out msig3.Transaction + return arr.ForEach(&out, func(key string) error { + txid, n := binary.Varint([]byte(key)) + if n <= 0 { + return xerrors.Errorf("invalid pending transaction key: %v", key) + } + return cb(txid, (Transaction)(out)) //nolint:unconvert + }) +} + +func (s *state3) PendingTxnChanged(other State) (bool, error) { + other3, ok := other.(*state3) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.PendingTxns.Equals(other3.PendingTxns), nil +} + +func (s *state3) transactions() (adt.Map, error) { + return adt3.AsMap(s.store, s.PendingTxns, builtin3.DefaultHamtBitwidth) +} + +func (s *state3) decodeTransaction(val *cbg.Deferred) (Transaction, error) { + var tx msig3.Transaction + if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Transaction{}, err + } + return tx, nil +} + +func (s *state3) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/multisig/v4.go b/chain/actors/builtin/multisig/v4.go new file mode 100644 index 00000000000..a35a890f870 --- /dev/null +++ b/chain/actors/builtin/multisig/v4.go @@ -0,0 +1,119 @@ +package multisig + +import ( + "bytes" + "encoding/binary" + + adt4 "github.com/filecoin-project/specs-actors/v4/actors/util/adt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + msig4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/multisig" +) + +var _ State = (*state4)(nil) + +func load4(store adt.Store, root cid.Cid) (State, error) { + out := state4{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make4(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + out := state4{store: store} + out.State = msig4.State{} + out.State.Signers = signers + out.State.NumApprovalsThreshold = threshold + out.State.StartEpoch = startEpoch + out.State.UnlockDuration = unlockDuration + out.State.InitialBalance = initialBalance + + em, err := adt4.StoreEmptyMap(store, builtin4.DefaultHamtBitwidth) + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + + return &out, nil +} + +type state4 struct { + msig4.State + store adt.Store +} + +func (s *state4) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil +} + +func (s *state4) StartEpoch() (abi.ChainEpoch, error) { + return s.State.StartEpoch, nil +} + +func (s *state4) UnlockDuration() (abi.ChainEpoch, error) { + return s.State.UnlockDuration, nil +} + +func (s *state4) InitialBalance() (abi.TokenAmount, error) { + return s.State.InitialBalance, nil +} + +func (s *state4) Threshold() (uint64, error) { + return s.State.NumApprovalsThreshold, nil +} + +func (s *state4) Signers() ([]address.Address, error) { + return s.State.Signers, nil +} + +func (s *state4) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error { + arr, err := adt4.AsMap(s.store, s.State.PendingTxns, builtin4.DefaultHamtBitwidth) + if err != nil { + return err + } + var out msig4.Transaction + return arr.ForEach(&out, func(key string) error { + txid, n := binary.Varint([]byte(key)) + if n <= 0 { + return xerrors.Errorf("invalid pending transaction key: %v", key) + } + return cb(txid, (Transaction)(out)) //nolint:unconvert + }) +} + +func (s *state4) PendingTxnChanged(other State) (bool, error) { + other4, ok := other.(*state4) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.PendingTxns.Equals(other4.PendingTxns), nil +} + +func (s *state4) transactions() (adt.Map, error) { + return adt4.AsMap(s.store, s.PendingTxns, builtin4.DefaultHamtBitwidth) +} + +func (s *state4) decodeTransaction(val *cbg.Deferred) (Transaction, error) { + var tx msig4.Transaction + if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Transaction{}, err + } + return tx, nil +} + +func (s *state4) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/multisig/v5.go b/chain/actors/builtin/multisig/v5.go new file mode 100644 index 00000000000..4ad9aea941a --- /dev/null +++ b/chain/actors/builtin/multisig/v5.go @@ -0,0 +1,119 @@ +package multisig + +import ( + "bytes" + "encoding/binary" + + adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + + msig5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/multisig" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + out := state5{store: store} + out.State = msig5.State{} + out.State.Signers = signers + out.State.NumApprovalsThreshold = threshold + out.State.StartEpoch = startEpoch + out.State.UnlockDuration = unlockDuration + out.State.InitialBalance = initialBalance + + em, err := adt5.StoreEmptyMap(store, builtin5.DefaultHamtBitwidth) + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + + return &out, nil +} + +type state5 struct { + msig5.State + store adt.Store +} + +func (s *state5) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil +} + +func (s *state5) StartEpoch() (abi.ChainEpoch, error) { + return s.State.StartEpoch, nil +} + +func (s *state5) UnlockDuration() (abi.ChainEpoch, error) { + return s.State.UnlockDuration, nil +} + +func (s *state5) InitialBalance() (abi.TokenAmount, error) { + return s.State.InitialBalance, nil +} + +func (s *state5) Threshold() (uint64, error) { + return s.State.NumApprovalsThreshold, nil +} + +func (s *state5) Signers() ([]address.Address, error) { + return s.State.Signers, nil +} + +func (s *state5) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error { + arr, err := adt5.AsMap(s.store, s.State.PendingTxns, builtin5.DefaultHamtBitwidth) + if err != nil { + return err + } + var out msig5.Transaction + return arr.ForEach(&out, func(key string) error { + txid, n := binary.Varint([]byte(key)) + if n <= 0 { + return xerrors.Errorf("invalid pending transaction key: %v", key) + } + return cb(txid, (Transaction)(out)) //nolint:unconvert + }) +} + +func (s *state5) PendingTxnChanged(other State) (bool, error) { + other5, ok := other.(*state5) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.PendingTxns.Equals(other5.PendingTxns), nil +} + +func (s *state5) transactions() (adt.Map, error) { + return adt5.AsMap(s.store, s.PendingTxns, builtin5.DefaultHamtBitwidth) +} + +func (s *state5) decodeTransaction(val *cbg.Deferred) (Transaction, error) { + var tx msig5.Transaction + if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Transaction{}, err + } + return tx, nil +} + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/paych/state.go b/chain/actors/builtin/paych/actor.go.template similarity index 55% rename from chain/actors/builtin/paych/state.go rename to chain/actors/builtin/paych/actor.go.template index 20c7a74b734..7699e76b631 100644 --- a/chain/actors/builtin/paych/state.go +++ b/chain/actors/builtin/paych/actor.go.template @@ -2,6 +2,7 @@ package paych import ( "encoding/base64" + "fmt" "golang.org/x/xerrors" @@ -12,35 +13,56 @@ import ( "github.com/ipfs/go-cid" ipldcbor "github.com/ipfs/go-ipld-cbor" - builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" paych0 "github.com/filecoin-project/specs-actors/actors/builtin/paych" - builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" +{{range .versions}} + builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" +{{end}} + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/types" ) func init() { - builtin.RegisterActorState(builtin0.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { - return load0(store, root) +{{range .versions}} + builtin.RegisterActorState(builtin{{.}}.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load{{.}}(store, root) }) - builtin.RegisterActorState(builtin2.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { - return load2(store, root) - }) -} +{{end}}} // Load returns an abstract copy of payment channel state, irregardless of actor version func Load(store adt.Store, act *types.Actor) (State, error) { switch act.Code { - case builtin0.PaymentChannelActorCodeID: - return load0(store, act.Head) - case builtin2.PaymentChannelActorCodeID: - return load2(store, act.Head) +{{range .versions}} + case builtin{{.}}.PaymentChannelActorCodeID: + return load{{.}}(store, act.Head) +{{end}} } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } +func MakeState(store adt.Store, av actors.Version) (State, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return make{{.}}(store) +{{end}} +} + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return builtin{{.}}.PaymentChannelActorCodeID, nil +{{end}} + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + // State is an abstract version of payment channel state that works across // versions type State interface { @@ -61,6 +83,8 @@ type State interface { // Iterate lane states ForEachLaneState(cb func(idx uint64, dl LaneState) error) error + + GetState() interface{} } // LaneState is an abstract copy of the state of a single lane @@ -86,3 +110,23 @@ func DecodeSignedVoucher(s string) (*SignedVoucher, error) { return &sv, nil } + +var Methods = builtin{{.latestVersion}}.MethodsPaych + +func Message(version actors.Version, from address.Address) MessageBuilder { + switch version { +{{range .versions}} + case actors.Version{{.}}: + return message{{.}}{from} +{{end}} + default: + panic(fmt.Sprintf("unsupported actors version: %d", version)) + } +} + +type MessageBuilder interface { + Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) + Update(paych address.Address, voucher *SignedVoucher, secret []byte) (*types.Message, error) + Settle(paych address.Address) (*types.Message, error) + Collect(paych address.Address) (*types.Message, error) +} diff --git a/chain/actors/builtin/paych/message.go b/chain/actors/builtin/paych/message.go deleted file mode 100644 index 5709d4b23d8..00000000000 --- a/chain/actors/builtin/paych/message.go +++ /dev/null @@ -1,32 +0,0 @@ -package paych - -import ( - "fmt" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/types" - - builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" -) - -var Methods = builtin2.MethodsPaych - -func Message(version actors.Version, from address.Address) MessageBuilder { - switch version { - case actors.Version0: - return message0{from} - case actors.Version2: - return message2{from} - default: - panic(fmt.Sprintf("unsupported actors version: %d", version)) - } -} - -type MessageBuilder interface { - Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) - Update(paych address.Address, voucher *SignedVoucher, secret []byte) (*types.Message, error) - Settle(paych address.Address) (*types.Message, error) - Collect(paych address.Address) (*types.Message, error) -} diff --git a/chain/actors/builtin/paych/message.go.template b/chain/actors/builtin/paych/message.go.template new file mode 100644 index 00000000000..4a5ea2331e5 --- /dev/null +++ b/chain/actors/builtin/paych/message.go.template @@ -0,0 +1,74 @@ +package paych + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + builtin{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin" + init{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/init" + paych{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/paych" + + "github.com/filecoin-project/lotus/chain/actors" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/types" +) + +type message{{.v}} struct{ from address.Address } + +func (m message{{.v}}) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) { + params, aerr := actors.SerializeParams(&paych{{.v}}.ConstructorParams{From: m.from, To: to}) + if aerr != nil { + return nil, aerr + } + enc, aerr := actors.SerializeParams(&init{{.v}}.ExecParams{ + CodeCID: builtin{{.v}}.PaymentChannelActorCodeID, + ConstructorParams: params, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Value: initialAmount, + Method: builtin{{.v}}.MethodsInit.Exec, + Params: enc, + }, nil +} + +func (m message{{.v}}) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) { + params, aerr := actors.SerializeParams(&paych{{.v}}.UpdateChannelStateParams{ + Sv: *sv, + Secret: secret, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin{{.v}}.MethodsPaych.UpdateChannelState, + Params: params, + }, nil +} + +func (m message{{.v}}) Settle(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin{{.v}}.MethodsPaych.Settle, + }, nil +} + +func (m message{{.v}}) Collect(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin{{.v}}.MethodsPaych.Collect, + }, nil +} diff --git a/chain/actors/builtin/paych/message3.go b/chain/actors/builtin/paych/message3.go new file mode 100644 index 00000000000..50503a1409a --- /dev/null +++ b/chain/actors/builtin/paych/message3.go @@ -0,0 +1,74 @@ +package paych + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + init3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/init" + paych3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/paych" + + "github.com/filecoin-project/lotus/chain/actors" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/types" +) + +type message3 struct{ from address.Address } + +func (m message3) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) { + params, aerr := actors.SerializeParams(&paych3.ConstructorParams{From: m.from, To: to}) + if aerr != nil { + return nil, aerr + } + enc, aerr := actors.SerializeParams(&init3.ExecParams{ + CodeCID: builtin3.PaymentChannelActorCodeID, + ConstructorParams: params, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Value: initialAmount, + Method: builtin3.MethodsInit.Exec, + Params: enc, + }, nil +} + +func (m message3) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) { + params, aerr := actors.SerializeParams(&paych3.UpdateChannelStateParams{ + Sv: *sv, + Secret: secret, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin3.MethodsPaych.UpdateChannelState, + Params: params, + }, nil +} + +func (m message3) Settle(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin3.MethodsPaych.Settle, + }, nil +} + +func (m message3) Collect(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin3.MethodsPaych.Collect, + }, nil +} diff --git a/chain/actors/builtin/paych/message4.go b/chain/actors/builtin/paych/message4.go new file mode 100644 index 00000000000..b2c6b612e38 --- /dev/null +++ b/chain/actors/builtin/paych/message4.go @@ -0,0 +1,74 @@ +package paych + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + init4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/init" + paych4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/paych" + + "github.com/filecoin-project/lotus/chain/actors" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/types" +) + +type message4 struct{ from address.Address } + +func (m message4) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) { + params, aerr := actors.SerializeParams(&paych4.ConstructorParams{From: m.from, To: to}) + if aerr != nil { + return nil, aerr + } + enc, aerr := actors.SerializeParams(&init4.ExecParams{ + CodeCID: builtin4.PaymentChannelActorCodeID, + ConstructorParams: params, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Value: initialAmount, + Method: builtin4.MethodsInit.Exec, + Params: enc, + }, nil +} + +func (m message4) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) { + params, aerr := actors.SerializeParams(&paych4.UpdateChannelStateParams{ + Sv: *sv, + Secret: secret, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin4.MethodsPaych.UpdateChannelState, + Params: params, + }, nil +} + +func (m message4) Settle(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin4.MethodsPaych.Settle, + }, nil +} + +func (m message4) Collect(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin4.MethodsPaych.Collect, + }, nil +} diff --git a/chain/actors/builtin/paych/message5.go b/chain/actors/builtin/paych/message5.go new file mode 100644 index 00000000000..37a2b6f04af --- /dev/null +++ b/chain/actors/builtin/paych/message5.go @@ -0,0 +1,74 @@ +package paych + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + init5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/init" + paych5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/paych" + + "github.com/filecoin-project/lotus/chain/actors" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/types" +) + +type message5 struct{ from address.Address } + +func (m message5) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) { + params, aerr := actors.SerializeParams(&paych5.ConstructorParams{From: m.from, To: to}) + if aerr != nil { + return nil, aerr + } + enc, aerr := actors.SerializeParams(&init5.ExecParams{ + CodeCID: builtin5.PaymentChannelActorCodeID, + ConstructorParams: params, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Value: initialAmount, + Method: builtin5.MethodsInit.Exec, + Params: enc, + }, nil +} + +func (m message5) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) { + params, aerr := actors.SerializeParams(&paych5.UpdateChannelStateParams{ + Sv: *sv, + Secret: secret, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin5.MethodsPaych.UpdateChannelState, + Params: params, + }, nil +} + +func (m message5) Settle(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin5.MethodsPaych.Settle, + }, nil +} + +func (m message5) Collect(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin5.MethodsPaych.Collect, + }, nil +} diff --git a/chain/actors/builtin/paych/mock/mock.go b/chain/actors/builtin/paych/mock/mock.go index 3b82511ffa0..1ecfa113070 100644 --- a/chain/actors/builtin/paych/mock/mock.go +++ b/chain/actors/builtin/paych/mock/mock.go @@ -17,6 +17,10 @@ type mockState struct { lanes map[uint64]paych.LaneState } +func (ms *mockState) GetState() interface{} { + panic("implement me") +} + type mockLaneState struct { redeemed big.Int nonce uint64 diff --git a/chain/actors/builtin/paych/paych.go b/chain/actors/builtin/paych/paych.go new file mode 100644 index 00000000000..d87f70f0c2a --- /dev/null +++ b/chain/actors/builtin/paych/paych.go @@ -0,0 +1,203 @@ +package paych + +import ( + "encoding/base64" + "fmt" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + big "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/ipfs/go-cid" + ipldcbor "github.com/ipfs/go-ipld-cbor" + + paych0 "github.com/filecoin-project/specs-actors/actors/builtin/paych" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/types" +) + +func init() { + + builtin.RegisterActorState(builtin0.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load0(store, root) + }) + + builtin.RegisterActorState(builtin2.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load2(store, root) + }) + + builtin.RegisterActorState(builtin3.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load3(store, root) + }) + + builtin.RegisterActorState(builtin4.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load4(store, root) + }) + + builtin.RegisterActorState(builtin5.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load5(store, root) + }) +} + +// Load returns an abstract copy of payment channel state, irregardless of actor version +func Load(store adt.Store, act *types.Actor) (State, error) { + switch act.Code { + + case builtin0.PaymentChannelActorCodeID: + return load0(store, act.Head) + + case builtin2.PaymentChannelActorCodeID: + return load2(store, act.Head) + + case builtin3.PaymentChannelActorCodeID: + return load3(store, act.Head) + + case builtin4.PaymentChannelActorCodeID: + return load4(store, act.Head) + + case builtin5.PaymentChannelActorCodeID: + return load5(store, act.Head) + + } + return nil, xerrors.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actors.Version) (State, error) { + switch av { + + case actors.Version0: + return make0(store) + + case actors.Version2: + return make2(store) + + case actors.Version3: + return make3(store) + + case actors.Version4: + return make4(store) + + case actors.Version5: + return make5(store) + + } + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { + + case actors.Version0: + return builtin0.PaymentChannelActorCodeID, nil + + case actors.Version2: + return builtin2.PaymentChannelActorCodeID, nil + + case actors.Version3: + return builtin3.PaymentChannelActorCodeID, nil + + case actors.Version4: + return builtin4.PaymentChannelActorCodeID, nil + + case actors.Version5: + return builtin5.PaymentChannelActorCodeID, nil + + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + +// State is an abstract version of payment channel state that works across +// versions +type State interface { + cbor.Marshaler + // Channel owner, who has funded the actor + From() (address.Address, error) + // Recipient of payouts from channel + To() (address.Address, error) + + // Height at which the channel can be `Collected` + SettlingAt() (abi.ChainEpoch, error) + + // Amount successfully redeemed through the payment channel, paid out on `Collect()` + ToSend() (abi.TokenAmount, error) + + // Get total number of lanes + LaneCount() (uint64, error) + + // Iterate lane states + ForEachLaneState(cb func(idx uint64, dl LaneState) error) error + + GetState() interface{} +} + +// LaneState is an abstract copy of the state of a single lane +type LaneState interface { + Redeemed() (big.Int, error) + Nonce() (uint64, error) +} + +type SignedVoucher = paych0.SignedVoucher +type ModVerifyParams = paych0.ModVerifyParams + +// DecodeSignedVoucher decodes base64 encoded signed voucher. +func DecodeSignedVoucher(s string) (*SignedVoucher, error) { + data, err := base64.RawURLEncoding.DecodeString(s) + if err != nil { + return nil, err + } + + var sv SignedVoucher + if err := ipldcbor.DecodeInto(data, &sv); err != nil { + return nil, err + } + + return &sv, nil +} + +var Methods = builtin5.MethodsPaych + +func Message(version actors.Version, from address.Address) MessageBuilder { + switch version { + + case actors.Version0: + return message0{from} + + case actors.Version2: + return message2{from} + + case actors.Version3: + return message3{from} + + case actors.Version4: + return message4{from} + + case actors.Version5: + return message5{from} + + default: + panic(fmt.Sprintf("unsupported actors version: %d", version)) + } +} + +type MessageBuilder interface { + Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) + Update(paych address.Address, voucher *SignedVoucher, secret []byte) (*types.Message, error) + Settle(paych address.Address) (*types.Message, error) + Collect(paych address.Address) (*types.Message, error) +} diff --git a/chain/actors/builtin/paych/state.go.template b/chain/actors/builtin/paych/state.go.template new file mode 100644 index 00000000000..3e41f5be5f6 --- /dev/null +++ b/chain/actors/builtin/paych/state.go.template @@ -0,0 +1,114 @@ +package paych + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + paych{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/paych" + adt{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/util/adt" +) + +var _ State = (*state{{.v}})(nil) + +func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { + out := state{{.v}}{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make{{.v}}(store adt.Store) (State, error) { + out := state{{.v}}{store: store} + out.State = paych{{.v}}.State{} + return &out, nil +} + +type state{{.v}} struct { + paych{{.v}}.State + store adt.Store + lsAmt *adt{{.v}}.Array +} + +// Channel owner, who has funded the actor +func (s *state{{.v}}) From() (address.Address, error) { + return s.State.From, nil +} + +// Recipient of payouts from channel +func (s *state{{.v}}) To() (address.Address, error) { + return s.State.To, nil +} + +// Height at which the channel can be `Collected` +func (s *state{{.v}}) SettlingAt() (abi.ChainEpoch, error) { + return s.State.SettlingAt, nil +} + +// Amount successfully redeemed through the payment channel, paid out on `Collect()` +func (s *state{{.v}}) ToSend() (abi.TokenAmount, error) { + return s.State.ToSend, nil +} + +func (s *state{{.v}}) getOrLoadLsAmt() (*adt{{.v}}.Array, error) { + if s.lsAmt != nil { + return s.lsAmt, nil + } + + // Get the lane state from the chain + lsamt, err := adt{{.v}}.AsArray(s.store, s.State.LaneStates{{if (ge .v 3)}}, paych{{.v}}.LaneStatesAmtBitwidth{{end}}) + if err != nil { + return nil, err + } + + s.lsAmt = lsamt + return lsamt, nil +} + +// Get total number of lanes +func (s *state{{.v}}) LaneCount() (uint64, error) { + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return 0, err + } + return lsamt.Length(), nil +} + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} + +// Iterate lane states +func (s *state{{.v}}) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error { + // Get the lane state from the chain + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return err + } + + // Note: we use a map instead of an array to store laneStates because the + // client sets the lane ID (the index) and potentially they could use a + // very large index. + var ls paych{{.v}}.LaneState + return lsamt.ForEach(&ls, func(i int64) error { + return cb(uint64(i), &laneState{{.v}}{ls}) + }) +} + +type laneState{{.v}} struct { + paych{{.v}}.LaneState +} + +func (ls *laneState{{.v}}) Redeemed() (big.Int, error) { + return ls.LaneState.Redeemed, nil +} + +func (ls *laneState{{.v}}) Nonce() (uint64, error) { + return ls.LaneState.Nonce, nil +} diff --git a/chain/actors/builtin/paych/state0.go b/chain/actors/builtin/paych/v0.go similarity index 92% rename from chain/actors/builtin/paych/state0.go rename to chain/actors/builtin/paych/v0.go index 8e0e3434e07..e9bc30e3d18 100644 --- a/chain/actors/builtin/paych/state0.go +++ b/chain/actors/builtin/paych/v0.go @@ -24,6 +24,12 @@ func load0(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make0(store adt.Store) (State, error) { + out := state0{store: store} + out.State = paych0.State{} + return &out, nil +} + type state0 struct { paych0.State store adt.Store @@ -74,6 +80,10 @@ func (s *state0) LaneCount() (uint64, error) { return lsamt.Length(), nil } +func (s *state0) GetState() interface{} { + return &s.State +} + // Iterate lane states func (s *state0) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error { // Get the lane state from the chain diff --git a/chain/actors/builtin/paych/state2.go b/chain/actors/builtin/paych/v2.go similarity index 92% rename from chain/actors/builtin/paych/state2.go rename to chain/actors/builtin/paych/v2.go index fbf4b9fde3b..400305e2fb0 100644 --- a/chain/actors/builtin/paych/state2.go +++ b/chain/actors/builtin/paych/v2.go @@ -24,6 +24,12 @@ func load2(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make2(store adt.Store) (State, error) { + out := state2{store: store} + out.State = paych2.State{} + return &out, nil +} + type state2 struct { paych2.State store adt.Store @@ -74,6 +80,10 @@ func (s *state2) LaneCount() (uint64, error) { return lsamt.Length(), nil } +func (s *state2) GetState() interface{} { + return &s.State +} + // Iterate lane states func (s *state2) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error { // Get the lane state from the chain diff --git a/chain/actors/builtin/paych/v3.go b/chain/actors/builtin/paych/v3.go new file mode 100644 index 00000000000..1d7c2f94b06 --- /dev/null +++ b/chain/actors/builtin/paych/v3.go @@ -0,0 +1,114 @@ +package paych + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + paych3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/paych" + adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make3(store adt.Store) (State, error) { + out := state3{store: store} + out.State = paych3.State{} + return &out, nil +} + +type state3 struct { + paych3.State + store adt.Store + lsAmt *adt3.Array +} + +// Channel owner, who has funded the actor +func (s *state3) From() (address.Address, error) { + return s.State.From, nil +} + +// Recipient of payouts from channel +func (s *state3) To() (address.Address, error) { + return s.State.To, nil +} + +// Height at which the channel can be `Collected` +func (s *state3) SettlingAt() (abi.ChainEpoch, error) { + return s.State.SettlingAt, nil +} + +// Amount successfully redeemed through the payment channel, paid out on `Collect()` +func (s *state3) ToSend() (abi.TokenAmount, error) { + return s.State.ToSend, nil +} + +func (s *state3) getOrLoadLsAmt() (*adt3.Array, error) { + if s.lsAmt != nil { + return s.lsAmt, nil + } + + // Get the lane state from the chain + lsamt, err := adt3.AsArray(s.store, s.State.LaneStates, paych3.LaneStatesAmtBitwidth) + if err != nil { + return nil, err + } + + s.lsAmt = lsamt + return lsamt, nil +} + +// Get total number of lanes +func (s *state3) LaneCount() (uint64, error) { + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return 0, err + } + return lsamt.Length(), nil +} + +func (s *state3) GetState() interface{} { + return &s.State +} + +// Iterate lane states +func (s *state3) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error { + // Get the lane state from the chain + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return err + } + + // Note: we use a map instead of an array to store laneStates because the + // client sets the lane ID (the index) and potentially they could use a + // very large index. + var ls paych3.LaneState + return lsamt.ForEach(&ls, func(i int64) error { + return cb(uint64(i), &laneState3{ls}) + }) +} + +type laneState3 struct { + paych3.LaneState +} + +func (ls *laneState3) Redeemed() (big.Int, error) { + return ls.LaneState.Redeemed, nil +} + +func (ls *laneState3) Nonce() (uint64, error) { + return ls.LaneState.Nonce, nil +} diff --git a/chain/actors/builtin/paych/v4.go b/chain/actors/builtin/paych/v4.go new file mode 100644 index 00000000000..b7d1e52a5b8 --- /dev/null +++ b/chain/actors/builtin/paych/v4.go @@ -0,0 +1,114 @@ +package paych + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + paych4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/paych" + adt4 "github.com/filecoin-project/specs-actors/v4/actors/util/adt" +) + +var _ State = (*state4)(nil) + +func load4(store adt.Store, root cid.Cid) (State, error) { + out := state4{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make4(store adt.Store) (State, error) { + out := state4{store: store} + out.State = paych4.State{} + return &out, nil +} + +type state4 struct { + paych4.State + store adt.Store + lsAmt *adt4.Array +} + +// Channel owner, who has funded the actor +func (s *state4) From() (address.Address, error) { + return s.State.From, nil +} + +// Recipient of payouts from channel +func (s *state4) To() (address.Address, error) { + return s.State.To, nil +} + +// Height at which the channel can be `Collected` +func (s *state4) SettlingAt() (abi.ChainEpoch, error) { + return s.State.SettlingAt, nil +} + +// Amount successfully redeemed through the payment channel, paid out on `Collect()` +func (s *state4) ToSend() (abi.TokenAmount, error) { + return s.State.ToSend, nil +} + +func (s *state4) getOrLoadLsAmt() (*adt4.Array, error) { + if s.lsAmt != nil { + return s.lsAmt, nil + } + + // Get the lane state from the chain + lsamt, err := adt4.AsArray(s.store, s.State.LaneStates, paych4.LaneStatesAmtBitwidth) + if err != nil { + return nil, err + } + + s.lsAmt = lsamt + return lsamt, nil +} + +// Get total number of lanes +func (s *state4) LaneCount() (uint64, error) { + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return 0, err + } + return lsamt.Length(), nil +} + +func (s *state4) GetState() interface{} { + return &s.State +} + +// Iterate lane states +func (s *state4) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error { + // Get the lane state from the chain + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return err + } + + // Note: we use a map instead of an array to store laneStates because the + // client sets the lane ID (the index) and potentially they could use a + // very large index. + var ls paych4.LaneState + return lsamt.ForEach(&ls, func(i int64) error { + return cb(uint64(i), &laneState4{ls}) + }) +} + +type laneState4 struct { + paych4.LaneState +} + +func (ls *laneState4) Redeemed() (big.Int, error) { + return ls.LaneState.Redeemed, nil +} + +func (ls *laneState4) Nonce() (uint64, error) { + return ls.LaneState.Nonce, nil +} diff --git a/chain/actors/builtin/paych/v5.go b/chain/actors/builtin/paych/v5.go new file mode 100644 index 00000000000..b331a1500bf --- /dev/null +++ b/chain/actors/builtin/paych/v5.go @@ -0,0 +1,114 @@ +package paych + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + paych5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/paych" + adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store) (State, error) { + out := state5{store: store} + out.State = paych5.State{} + return &out, nil +} + +type state5 struct { + paych5.State + store adt.Store + lsAmt *adt5.Array +} + +// Channel owner, who has funded the actor +func (s *state5) From() (address.Address, error) { + return s.State.From, nil +} + +// Recipient of payouts from channel +func (s *state5) To() (address.Address, error) { + return s.State.To, nil +} + +// Height at which the channel can be `Collected` +func (s *state5) SettlingAt() (abi.ChainEpoch, error) { + return s.State.SettlingAt, nil +} + +// Amount successfully redeemed through the payment channel, paid out on `Collect()` +func (s *state5) ToSend() (abi.TokenAmount, error) { + return s.State.ToSend, nil +} + +func (s *state5) getOrLoadLsAmt() (*adt5.Array, error) { + if s.lsAmt != nil { + return s.lsAmt, nil + } + + // Get the lane state from the chain + lsamt, err := adt5.AsArray(s.store, s.State.LaneStates, paych5.LaneStatesAmtBitwidth) + if err != nil { + return nil, err + } + + s.lsAmt = lsamt + return lsamt, nil +} + +// Get total number of lanes +func (s *state5) LaneCount() (uint64, error) { + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return 0, err + } + return lsamt.Length(), nil +} + +func (s *state5) GetState() interface{} { + return &s.State +} + +// Iterate lane states +func (s *state5) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error { + // Get the lane state from the chain + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return err + } + + // Note: we use a map instead of an array to store laneStates because the + // client sets the lane ID (the index) and potentially they could use a + // very large index. + var ls paych5.LaneState + return lsamt.ForEach(&ls, func(i int64) error { + return cb(uint64(i), &laneState5{ls}) + }) +} + +type laneState5 struct { + paych5.LaneState +} + +func (ls *laneState5) Redeemed() (big.Int, error) { + return ls.LaneState.Redeemed, nil +} + +func (ls *laneState5) Nonce() (uint64, error) { + return ls.LaneState.Nonce, nil +} diff --git a/chain/actors/builtin/power/actor.go.template b/chain/actors/builtin/power/actor.go.template new file mode 100644 index 00000000000..fe11fc16069 --- /dev/null +++ b/chain/actors/builtin/power/actor.go.template @@ -0,0 +1,107 @@ +package power + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/types" +{{range .versions}} + builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" +{{end}} +) + +func init() { +{{range .versions}} + builtin.RegisterActorState(builtin{{.}}.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load{{.}}(store, root) + }) +{{end}}} + +var ( + Address = builtin{{.latestVersion}}.StoragePowerActorAddr + Methods = builtin{{.latestVersion}}.MethodsPower +) + +func Load(store adt.Store, act *types.Actor) (State, error) { + switch act.Code { +{{range .versions}} + case builtin{{.}}.StoragePowerActorCodeID: + return load{{.}}(store, act.Head) +{{end}} + } + return nil, xerrors.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actors.Version) (State, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return make{{.}}(store) +{{end}} +} + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return builtin{{.}}.StoragePowerActorCodeID, nil +{{end}} + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + +type State interface { + cbor.Marshaler + + TotalLocked() (abi.TokenAmount, error) + TotalPower() (Claim, error) + TotalCommitted() (Claim, error) + TotalPowerSmoothed() (builtin.FilterEstimate, error) + GetState() interface{} + + // MinerCounts returns the number of miners. Participating is the number + // with power above the minimum miner threshold. + MinerCounts() (participating, total uint64, err error) + MinerPower(address.Address) (Claim, bool, error) + MinerNominalPowerMeetsConsensusMinimum(address.Address) (bool, error) + ListAllMiners() ([]address.Address, error) + ForEachClaim(func(miner address.Address, claim Claim) error) error + ClaimsChanged(State) (bool, error) + + // Testing or genesis setup only + SetTotalQualityAdjPower(abi.StoragePower) error + SetTotalRawBytePower(abi.StoragePower) error + SetThisEpochQualityAdjPower(abi.StoragePower) error + SetThisEpochRawBytePower(abi.StoragePower) error + + // Diff helpers. Used by Diff* functions internally. + claims() (adt.Map, error) + decodeClaim(*cbg.Deferred) (Claim, error) +} + +type Claim struct { + // Sum of raw byte power for a miner's sectors. + RawBytePower abi.StoragePower + + // Sum of quality adjusted power for a miner's sectors. + QualityAdjPower abi.StoragePower +} + +func AddClaims(a Claim, b Claim) Claim { + return Claim{ + RawBytePower: big.Add(a.RawBytePower, b.RawBytePower), + QualityAdjPower: big.Add(a.QualityAdjPower, b.QualityAdjPower), + } +} diff --git a/chain/actors/builtin/power/diff.go b/chain/actors/builtin/power/diff.go new file mode 100644 index 00000000000..3daa7056956 --- /dev/null +++ b/chain/actors/builtin/power/diff.go @@ -0,0 +1,117 @@ +package power + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +type ClaimChanges struct { + Added []ClaimInfo + Modified []ClaimModification + Removed []ClaimInfo +} + +type ClaimModification struct { + Miner address.Address + From Claim + To Claim +} + +type ClaimInfo struct { + Miner address.Address + Claim Claim +} + +func DiffClaims(pre, cur State) (*ClaimChanges, error) { + results := new(ClaimChanges) + + prec, err := pre.claims() + if err != nil { + return nil, err + } + + curc, err := cur.claims() + if err != nil { + return nil, err + } + + if err := adt.DiffAdtMap(prec, curc, &claimDiffer{results, pre, cur}); err != nil { + return nil, err + } + + return results, nil +} + +type claimDiffer struct { + Results *ClaimChanges + pre, after State +} + +func (c *claimDiffer) AsKey(key string) (abi.Keyer, error) { + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return nil, err + } + return abi.AddrKey(addr), nil +} + +func (c *claimDiffer) Add(key string, val *cbg.Deferred) error { + ci, err := c.after.decodeClaim(val) + if err != nil { + return err + } + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + c.Results.Added = append(c.Results.Added, ClaimInfo{ + Miner: addr, + Claim: ci, + }) + return nil +} + +func (c *claimDiffer) Modify(key string, from, to *cbg.Deferred) error { + ciFrom, err := c.pre.decodeClaim(from) + if err != nil { + return err + } + + ciTo, err := c.after.decodeClaim(to) + if err != nil { + return err + } + + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + + if ciFrom != ciTo { + c.Results.Modified = append(c.Results.Modified, ClaimModification{ + Miner: addr, + From: ciFrom, + To: ciTo, + }) + } + return nil +} + +func (c *claimDiffer) Remove(key string, val *cbg.Deferred) error { + ci, err := c.after.decodeClaim(val) + if err != nil { + return err + } + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + c.Results.Removed = append(c.Results.Removed, ClaimInfo{ + Miner: addr, + Claim: ci, + }) + return nil +} diff --git a/chain/actors/builtin/power/power.go b/chain/actors/builtin/power/power.go index f941ce93e17..5b4aa1b04ff 100644 --- a/chain/actors/builtin/power/power.go +++ b/chain/actors/builtin/power/power.go @@ -3,7 +3,9 @@ package power import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/chain/actors" "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" @@ -14,33 +16,111 @@ import ( "github.com/filecoin-project/lotus/chain/types" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" ) func init() { + builtin.RegisterActorState(builtin0.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load0(store, root) }) + builtin.RegisterActorState(builtin2.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load2(store, root) }) + + builtin.RegisterActorState(builtin3.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load3(store, root) + }) + + builtin.RegisterActorState(builtin4.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load4(store, root) + }) + + builtin.RegisterActorState(builtin5.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load5(store, root) + }) } var ( - Address = builtin2.StoragePowerActorAddr - Methods = builtin2.MethodsPower + Address = builtin5.StoragePowerActorAddr + Methods = builtin5.MethodsPower ) -func Load(store adt.Store, act *types.Actor) (st State, err error) { +func Load(store adt.Store, act *types.Actor) (State, error) { switch act.Code { + case builtin0.StoragePowerActorCodeID: return load0(store, act.Head) + case builtin2.StoragePowerActorCodeID: return load2(store, act.Head) + + case builtin3.StoragePowerActorCodeID: + return load3(store, act.Head) + + case builtin4.StoragePowerActorCodeID: + return load4(store, act.Head) + + case builtin5.StoragePowerActorCodeID: + return load5(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } +func MakeState(store adt.Store, av actors.Version) (State, error) { + switch av { + + case actors.Version0: + return make0(store) + + case actors.Version2: + return make2(store) + + case actors.Version3: + return make3(store) + + case actors.Version4: + return make4(store) + + case actors.Version5: + return make5(store) + + } + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { + + case actors.Version0: + return builtin0.StoragePowerActorCodeID, nil + + case actors.Version2: + return builtin2.StoragePowerActorCodeID, nil + + case actors.Version3: + return builtin3.StoragePowerActorCodeID, nil + + case actors.Version4: + return builtin4.StoragePowerActorCodeID, nil + + case actors.Version5: + return builtin5.StoragePowerActorCodeID, nil + + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + type State interface { cbor.Marshaler @@ -48,6 +128,7 @@ type State interface { TotalPower() (Claim, error) TotalCommitted() (Claim, error) TotalPowerSmoothed() (builtin.FilterEstimate, error) + GetState() interface{} // MinerCounts returns the number of miners. Participating is the number // with power above the minimum miner threshold. @@ -56,6 +137,17 @@ type State interface { MinerNominalPowerMeetsConsensusMinimum(address.Address) (bool, error) ListAllMiners() ([]address.Address, error) ForEachClaim(func(miner address.Address, claim Claim) error) error + ClaimsChanged(State) (bool, error) + + // Testing or genesis setup only + SetTotalQualityAdjPower(abi.StoragePower) error + SetTotalRawBytePower(abi.StoragePower) error + SetThisEpochQualityAdjPower(abi.StoragePower) error + SetThisEpochRawBytePower(abi.StoragePower) error + + // Diff helpers. Used by Diff* functions internally. + claims() (adt.Map, error) + decodeClaim(*cbg.Deferred) (Claim, error) } type Claim struct { diff --git a/chain/actors/builtin/power/state.go.template b/chain/actors/builtin/power/state.go.template new file mode 100644 index 00000000000..fcdc5c35046 --- /dev/null +++ b/chain/actors/builtin/power/state.go.template @@ -0,0 +1,201 @@ +package power + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + +{{if (ge .v 3)}} + builtin{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin" +{{end}} + power{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/power" + adt{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/util/adt" +) + +var _ State = (*state{{.v}})(nil) + +func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { + out := state{{.v}}{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make{{.v}}(store adt.Store) (State, error) { + out := state{{.v}}{store: store} + {{if (le .v 2)}} + em, err := adt{{.v}}.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + emm, err := adt{{.v}}.MakeEmptyMultimap(store).Root() + if err != nil { + return nil, err + } + + out.State = *power{{.v}}.ConstructState(em, emm) + {{else}} + s, err := power{{.v}}.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + {{end}} + + return &out, nil +} + +type state{{.v}} struct { + power{{.v}}.State + store adt.Store +} + +func (s *state{{.v}}) TotalLocked() (abi.TokenAmount, error) { + return s.TotalPledgeCollateral, nil +} + +func (s *state{{.v}}) TotalPower() (Claim, error) { + return Claim{ + RawBytePower: s.TotalRawBytePower, + QualityAdjPower: s.TotalQualityAdjPower, + }, nil +} + +// Committed power to the network. Includes miners below the minimum threshold. +func (s *state{{.v}}) TotalCommitted() (Claim, error) { + return Claim{ + RawBytePower: s.TotalBytesCommitted, + QualityAdjPower: s.TotalQABytesCommitted, + }, nil +} + +func (s *state{{.v}}) MinerPower(addr address.Address) (Claim, bool, error) { + claims, err := s.claims() + if err != nil { + return Claim{}, false, err + } + var claim power{{.v}}.Claim + ok, err := claims.Get(abi.AddrKey(addr), &claim) + if err != nil { + return Claim{}, false, err + } + return Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }, ok, nil +} + +func (s *state{{.v}}) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) { + return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a) +} + +func (s *state{{.v}}) TotalPowerSmoothed() (builtin.FilterEstimate, error) { + return builtin.FromV{{.v}}FilterEstimate({{if (le .v 1)}}*{{end}}s.State.ThisEpochQAPowerSmoothed), nil +} + +func (s *state{{.v}}) MinerCounts() (uint64, uint64, error) { + return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil +} + +func (s *state{{.v}}) ListAllMiners() ([]address.Address, error) { + claims, err := s.claims() + if err != nil { + return nil, err + } + + var miners []address.Address + err = claims.ForEach(nil, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + miners = append(miners, a) + return nil + }) + if err != nil { + return nil, err + } + + return miners, nil +} + +func (s *state{{.v}}) ForEachClaim(cb func(miner address.Address, claim Claim) error) error { + claims, err := s.claims() + if err != nil { + return err + } + + var claim power{{.v}}.Claim + return claims.ForEach(&claim, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + return cb(a, Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }) + }) +} + +func (s *state{{.v}}) ClaimsChanged(other State) (bool, error) { + other{{.v}}, ok := other.(*state{{.v}}) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Claims.Equals(other{{.v}}.State.Claims), nil +} + +func (s *state{{.v}}) SetTotalQualityAdjPower(p abi.StoragePower) error { + s.State.TotalQualityAdjPower = p + return nil +} + +func (s *state{{.v}}) SetTotalRawBytePower(p abi.StoragePower) error { + s.State.TotalRawBytePower = p + return nil +} + +func (s *state{{.v}}) SetThisEpochQualityAdjPower(p abi.StoragePower) error { + s.State.ThisEpochQualityAdjPower = p + return nil +} + +func (s *state{{.v}}) SetThisEpochRawBytePower(p abi.StoragePower) error { + s.State.ThisEpochRawBytePower = p + return nil +} + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} + +func (s *state{{.v}}) claims() (adt.Map, error) { + return adt{{.v}}.AsMap(s.store, s.Claims{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}}) +} + +func (s *state{{.v}}) decodeClaim(val *cbg.Deferred) (Claim, error) { + var ci power{{.v}}.Claim + if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Claim{}, err + } + return fromV{{.v}}Claim(ci), nil +} + +func fromV{{.v}}Claim(v{{.v}} power{{.v}}.Claim) Claim { + return Claim{ + RawBytePower: v{{.v}}.RawBytePower, + QualityAdjPower: v{{.v}}.QualityAdjPower, + } +} diff --git a/chain/actors/builtin/power/v0.go b/chain/actors/builtin/power/v0.go index 3f9a657777f..465d16c5c35 100644 --- a/chain/actors/builtin/power/v0.go +++ b/chain/actors/builtin/power/v0.go @@ -1,9 +1,12 @@ package power import ( + "bytes" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -23,6 +26,24 @@ func load0(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make0(store adt.Store) (State, error) { + out := state0{store: store} + + em, err := adt0.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + emm, err := adt0.MakeEmptyMultimap(store).Root() + if err != nil { + return nil, err + } + + out.State = *power0.ConstructState(em, emm) + + return &out, nil +} + type state0 struct { power0.State store adt.Store @@ -48,7 +69,7 @@ func (s *state0) TotalCommitted() (Claim, error) { } func (s *state0) MinerPower(addr address.Address) (Claim, bool, error) { - claims, err := adt0.AsMap(s.store, s.Claims) + claims, err := s.claims() if err != nil { return Claim{}, false, err } @@ -76,7 +97,7 @@ func (s *state0) MinerCounts() (uint64, uint64, error) { } func (s *state0) ListAllMiners() ([]address.Address, error) { - claims, err := adt0.AsMap(s.store, s.Claims) + claims, err := s.claims() if err != nil { return nil, err } @@ -98,7 +119,7 @@ func (s *state0) ListAllMiners() ([]address.Address, error) { } func (s *state0) ForEachClaim(cb func(miner address.Address, claim Claim) error) error { - claims, err := adt0.AsMap(s.store, s.Claims) + claims, err := s.claims() if err != nil { return err } @@ -115,3 +136,55 @@ func (s *state0) ForEachClaim(cb func(miner address.Address, claim Claim) error) }) }) } + +func (s *state0) ClaimsChanged(other State) (bool, error) { + other0, ok := other.(*state0) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Claims.Equals(other0.State.Claims), nil +} + +func (s *state0) SetTotalQualityAdjPower(p abi.StoragePower) error { + s.State.TotalQualityAdjPower = p + return nil +} + +func (s *state0) SetTotalRawBytePower(p abi.StoragePower) error { + s.State.TotalRawBytePower = p + return nil +} + +func (s *state0) SetThisEpochQualityAdjPower(p abi.StoragePower) error { + s.State.ThisEpochQualityAdjPower = p + return nil +} + +func (s *state0) SetThisEpochRawBytePower(p abi.StoragePower) error { + s.State.ThisEpochRawBytePower = p + return nil +} + +func (s *state0) GetState() interface{} { + return &s.State +} + +func (s *state0) claims() (adt.Map, error) { + return adt0.AsMap(s.store, s.Claims) +} + +func (s *state0) decodeClaim(val *cbg.Deferred) (Claim, error) { + var ci power0.Claim + if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Claim{}, err + } + return fromV0Claim(ci), nil +} + +func fromV0Claim(v0 power0.Claim) Claim { + return Claim{ + RawBytePower: v0.RawBytePower, + QualityAdjPower: v0.QualityAdjPower, + } +} diff --git a/chain/actors/builtin/power/v2.go b/chain/actors/builtin/power/v2.go index 0c15f066928..606534cef26 100644 --- a/chain/actors/builtin/power/v2.go +++ b/chain/actors/builtin/power/v2.go @@ -1,9 +1,12 @@ package power import ( + "bytes" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -23,6 +26,24 @@ func load2(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make2(store adt.Store) (State, error) { + out := state2{store: store} + + em, err := adt2.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + emm, err := adt2.MakeEmptyMultimap(store).Root() + if err != nil { + return nil, err + } + + out.State = *power2.ConstructState(em, emm) + + return &out, nil +} + type state2 struct { power2.State store adt.Store @@ -48,7 +69,7 @@ func (s *state2) TotalCommitted() (Claim, error) { } func (s *state2) MinerPower(addr address.Address) (Claim, bool, error) { - claims, err := adt2.AsMap(s.store, s.Claims) + claims, err := s.claims() if err != nil { return Claim{}, false, err } @@ -76,7 +97,7 @@ func (s *state2) MinerCounts() (uint64, uint64, error) { } func (s *state2) ListAllMiners() ([]address.Address, error) { - claims, err := adt2.AsMap(s.store, s.Claims) + claims, err := s.claims() if err != nil { return nil, err } @@ -98,7 +119,7 @@ func (s *state2) ListAllMiners() ([]address.Address, error) { } func (s *state2) ForEachClaim(cb func(miner address.Address, claim Claim) error) error { - claims, err := adt2.AsMap(s.store, s.Claims) + claims, err := s.claims() if err != nil { return err } @@ -115,3 +136,55 @@ func (s *state2) ForEachClaim(cb func(miner address.Address, claim Claim) error) }) }) } + +func (s *state2) ClaimsChanged(other State) (bool, error) { + other2, ok := other.(*state2) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Claims.Equals(other2.State.Claims), nil +} + +func (s *state2) SetTotalQualityAdjPower(p abi.StoragePower) error { + s.State.TotalQualityAdjPower = p + return nil +} + +func (s *state2) SetTotalRawBytePower(p abi.StoragePower) error { + s.State.TotalRawBytePower = p + return nil +} + +func (s *state2) SetThisEpochQualityAdjPower(p abi.StoragePower) error { + s.State.ThisEpochQualityAdjPower = p + return nil +} + +func (s *state2) SetThisEpochRawBytePower(p abi.StoragePower) error { + s.State.ThisEpochRawBytePower = p + return nil +} + +func (s *state2) GetState() interface{} { + return &s.State +} + +func (s *state2) claims() (adt.Map, error) { + return adt2.AsMap(s.store, s.Claims) +} + +func (s *state2) decodeClaim(val *cbg.Deferred) (Claim, error) { + var ci power2.Claim + if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Claim{}, err + } + return fromV2Claim(ci), nil +} + +func fromV2Claim(v2 power2.Claim) Claim { + return Claim{ + RawBytePower: v2.RawBytePower, + QualityAdjPower: v2.QualityAdjPower, + } +} diff --git a/chain/actors/builtin/power/v3.go b/chain/actors/builtin/power/v3.go new file mode 100644 index 00000000000..3dec3c63ef6 --- /dev/null +++ b/chain/actors/builtin/power/v3.go @@ -0,0 +1,187 @@ +package power + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + power3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/power" + adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make3(store adt.Store) (State, error) { + out := state3{store: store} + + s, err := power3.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state3 struct { + power3.State + store adt.Store +} + +func (s *state3) TotalLocked() (abi.TokenAmount, error) { + return s.TotalPledgeCollateral, nil +} + +func (s *state3) TotalPower() (Claim, error) { + return Claim{ + RawBytePower: s.TotalRawBytePower, + QualityAdjPower: s.TotalQualityAdjPower, + }, nil +} + +// Committed power to the network. Includes miners below the minimum threshold. +func (s *state3) TotalCommitted() (Claim, error) { + return Claim{ + RawBytePower: s.TotalBytesCommitted, + QualityAdjPower: s.TotalQABytesCommitted, + }, nil +} + +func (s *state3) MinerPower(addr address.Address) (Claim, bool, error) { + claims, err := s.claims() + if err != nil { + return Claim{}, false, err + } + var claim power3.Claim + ok, err := claims.Get(abi.AddrKey(addr), &claim) + if err != nil { + return Claim{}, false, err + } + return Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }, ok, nil +} + +func (s *state3) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) { + return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a) +} + +func (s *state3) TotalPowerSmoothed() (builtin.FilterEstimate, error) { + return builtin.FromV3FilterEstimate(s.State.ThisEpochQAPowerSmoothed), nil +} + +func (s *state3) MinerCounts() (uint64, uint64, error) { + return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil +} + +func (s *state3) ListAllMiners() ([]address.Address, error) { + claims, err := s.claims() + if err != nil { + return nil, err + } + + var miners []address.Address + err = claims.ForEach(nil, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + miners = append(miners, a) + return nil + }) + if err != nil { + return nil, err + } + + return miners, nil +} + +func (s *state3) ForEachClaim(cb func(miner address.Address, claim Claim) error) error { + claims, err := s.claims() + if err != nil { + return err + } + + var claim power3.Claim + return claims.ForEach(&claim, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + return cb(a, Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }) + }) +} + +func (s *state3) ClaimsChanged(other State) (bool, error) { + other3, ok := other.(*state3) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Claims.Equals(other3.State.Claims), nil +} + +func (s *state3) SetTotalQualityAdjPower(p abi.StoragePower) error { + s.State.TotalQualityAdjPower = p + return nil +} + +func (s *state3) SetTotalRawBytePower(p abi.StoragePower) error { + s.State.TotalRawBytePower = p + return nil +} + +func (s *state3) SetThisEpochQualityAdjPower(p abi.StoragePower) error { + s.State.ThisEpochQualityAdjPower = p + return nil +} + +func (s *state3) SetThisEpochRawBytePower(p abi.StoragePower) error { + s.State.ThisEpochRawBytePower = p + return nil +} + +func (s *state3) GetState() interface{} { + return &s.State +} + +func (s *state3) claims() (adt.Map, error) { + return adt3.AsMap(s.store, s.Claims, builtin3.DefaultHamtBitwidth) +} + +func (s *state3) decodeClaim(val *cbg.Deferred) (Claim, error) { + var ci power3.Claim + if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Claim{}, err + } + return fromV3Claim(ci), nil +} + +func fromV3Claim(v3 power3.Claim) Claim { + return Claim{ + RawBytePower: v3.RawBytePower, + QualityAdjPower: v3.QualityAdjPower, + } +} diff --git a/chain/actors/builtin/power/v4.go b/chain/actors/builtin/power/v4.go new file mode 100644 index 00000000000..b73eedf5a82 --- /dev/null +++ b/chain/actors/builtin/power/v4.go @@ -0,0 +1,187 @@ +package power + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + power4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/power" + adt4 "github.com/filecoin-project/specs-actors/v4/actors/util/adt" +) + +var _ State = (*state4)(nil) + +func load4(store adt.Store, root cid.Cid) (State, error) { + out := state4{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make4(store adt.Store) (State, error) { + out := state4{store: store} + + s, err := power4.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state4 struct { + power4.State + store adt.Store +} + +func (s *state4) TotalLocked() (abi.TokenAmount, error) { + return s.TotalPledgeCollateral, nil +} + +func (s *state4) TotalPower() (Claim, error) { + return Claim{ + RawBytePower: s.TotalRawBytePower, + QualityAdjPower: s.TotalQualityAdjPower, + }, nil +} + +// Committed power to the network. Includes miners below the minimum threshold. +func (s *state4) TotalCommitted() (Claim, error) { + return Claim{ + RawBytePower: s.TotalBytesCommitted, + QualityAdjPower: s.TotalQABytesCommitted, + }, nil +} + +func (s *state4) MinerPower(addr address.Address) (Claim, bool, error) { + claims, err := s.claims() + if err != nil { + return Claim{}, false, err + } + var claim power4.Claim + ok, err := claims.Get(abi.AddrKey(addr), &claim) + if err != nil { + return Claim{}, false, err + } + return Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }, ok, nil +} + +func (s *state4) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) { + return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a) +} + +func (s *state4) TotalPowerSmoothed() (builtin.FilterEstimate, error) { + return builtin.FromV4FilterEstimate(s.State.ThisEpochQAPowerSmoothed), nil +} + +func (s *state4) MinerCounts() (uint64, uint64, error) { + return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil +} + +func (s *state4) ListAllMiners() ([]address.Address, error) { + claims, err := s.claims() + if err != nil { + return nil, err + } + + var miners []address.Address + err = claims.ForEach(nil, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + miners = append(miners, a) + return nil + }) + if err != nil { + return nil, err + } + + return miners, nil +} + +func (s *state4) ForEachClaim(cb func(miner address.Address, claim Claim) error) error { + claims, err := s.claims() + if err != nil { + return err + } + + var claim power4.Claim + return claims.ForEach(&claim, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + return cb(a, Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }) + }) +} + +func (s *state4) ClaimsChanged(other State) (bool, error) { + other4, ok := other.(*state4) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Claims.Equals(other4.State.Claims), nil +} + +func (s *state4) SetTotalQualityAdjPower(p abi.StoragePower) error { + s.State.TotalQualityAdjPower = p + return nil +} + +func (s *state4) SetTotalRawBytePower(p abi.StoragePower) error { + s.State.TotalRawBytePower = p + return nil +} + +func (s *state4) SetThisEpochQualityAdjPower(p abi.StoragePower) error { + s.State.ThisEpochQualityAdjPower = p + return nil +} + +func (s *state4) SetThisEpochRawBytePower(p abi.StoragePower) error { + s.State.ThisEpochRawBytePower = p + return nil +} + +func (s *state4) GetState() interface{} { + return &s.State +} + +func (s *state4) claims() (adt.Map, error) { + return adt4.AsMap(s.store, s.Claims, builtin4.DefaultHamtBitwidth) +} + +func (s *state4) decodeClaim(val *cbg.Deferred) (Claim, error) { + var ci power4.Claim + if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Claim{}, err + } + return fromV4Claim(ci), nil +} + +func fromV4Claim(v4 power4.Claim) Claim { + return Claim{ + RawBytePower: v4.RawBytePower, + QualityAdjPower: v4.QualityAdjPower, + } +} diff --git a/chain/actors/builtin/power/v5.go b/chain/actors/builtin/power/v5.go new file mode 100644 index 00000000000..84b23a5777f --- /dev/null +++ b/chain/actors/builtin/power/v5.go @@ -0,0 +1,187 @@ +package power + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + + power5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/power" + adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store) (State, error) { + out := state5{store: store} + + s, err := power5.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state5 struct { + power5.State + store adt.Store +} + +func (s *state5) TotalLocked() (abi.TokenAmount, error) { + return s.TotalPledgeCollateral, nil +} + +func (s *state5) TotalPower() (Claim, error) { + return Claim{ + RawBytePower: s.TotalRawBytePower, + QualityAdjPower: s.TotalQualityAdjPower, + }, nil +} + +// Committed power to the network. Includes miners below the minimum threshold. +func (s *state5) TotalCommitted() (Claim, error) { + return Claim{ + RawBytePower: s.TotalBytesCommitted, + QualityAdjPower: s.TotalQABytesCommitted, + }, nil +} + +func (s *state5) MinerPower(addr address.Address) (Claim, bool, error) { + claims, err := s.claims() + if err != nil { + return Claim{}, false, err + } + var claim power5.Claim + ok, err := claims.Get(abi.AddrKey(addr), &claim) + if err != nil { + return Claim{}, false, err + } + return Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }, ok, nil +} + +func (s *state5) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) { + return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a) +} + +func (s *state5) TotalPowerSmoothed() (builtin.FilterEstimate, error) { + return builtin.FromV5FilterEstimate(s.State.ThisEpochQAPowerSmoothed), nil +} + +func (s *state5) MinerCounts() (uint64, uint64, error) { + return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil +} + +func (s *state5) ListAllMiners() ([]address.Address, error) { + claims, err := s.claims() + if err != nil { + return nil, err + } + + var miners []address.Address + err = claims.ForEach(nil, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + miners = append(miners, a) + return nil + }) + if err != nil { + return nil, err + } + + return miners, nil +} + +func (s *state5) ForEachClaim(cb func(miner address.Address, claim Claim) error) error { + claims, err := s.claims() + if err != nil { + return err + } + + var claim power5.Claim + return claims.ForEach(&claim, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + return cb(a, Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }) + }) +} + +func (s *state5) ClaimsChanged(other State) (bool, error) { + other5, ok := other.(*state5) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Claims.Equals(other5.State.Claims), nil +} + +func (s *state5) SetTotalQualityAdjPower(p abi.StoragePower) error { + s.State.TotalQualityAdjPower = p + return nil +} + +func (s *state5) SetTotalRawBytePower(p abi.StoragePower) error { + s.State.TotalRawBytePower = p + return nil +} + +func (s *state5) SetThisEpochQualityAdjPower(p abi.StoragePower) error { + s.State.ThisEpochQualityAdjPower = p + return nil +} + +func (s *state5) SetThisEpochRawBytePower(p abi.StoragePower) error { + s.State.ThisEpochRawBytePower = p + return nil +} + +func (s *state5) GetState() interface{} { + return &s.State +} + +func (s *state5) claims() (adt.Map, error) { + return adt5.AsMap(s.store, s.Claims, builtin5.DefaultHamtBitwidth) +} + +func (s *state5) decodeClaim(val *cbg.Deferred) (Claim, error) { + var ci power5.Claim + if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Claim{}, err + } + return fromV5Claim(ci), nil +} + +func fromV5Claim(v5 power5.Claim) Claim { + return Claim{ + RawBytePower: v5.RawBytePower, + QualityAdjPower: v5.QualityAdjPower, + } +} diff --git a/chain/actors/builtin/reward/actor.go.template b/chain/actors/builtin/reward/actor.go.template new file mode 100644 index 00000000000..89cdddaeceb --- /dev/null +++ b/chain/actors/builtin/reward/actor.go.template @@ -0,0 +1,83 @@ +package reward + +import ( + "github.com/filecoin-project/go-state-types/abi" + reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward" + "github.com/ipfs/go-cid" + "github.com/filecoin-project/lotus/chain/actors" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/cbor" +{{range .versions}} + builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" +{{end}} + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/types" +) + +func init() { +{{range .versions}} + builtin.RegisterActorState(builtin{{.}}.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load{{.}}(store, root) + }) +{{end}}} + +var ( + Address = builtin{{.latestVersion}}.RewardActorAddr + Methods = builtin{{.latestVersion}}.MethodsReward +) + +func Load(store adt.Store, act *types.Actor) (State, error) { + switch act.Code { +{{range .versions}} + case builtin{{.}}.RewardActorCodeID: + return load{{.}}(store, act.Head) +{{end}} + } + return nil, xerrors.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actors.Version, currRealizedPower abi.StoragePower) (State, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return make{{.}}(store, currRealizedPower) +{{end}} +} + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return builtin{{.}}.RewardActorCodeID, nil +{{end}} + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + +type State interface { + cbor.Marshaler + + ThisEpochBaselinePower() (abi.StoragePower, error) + ThisEpochReward() (abi.StoragePower, error) + ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) + + EffectiveBaselinePower() (abi.StoragePower, error) + EffectiveNetworkTime() (abi.ChainEpoch, error) + + TotalStoragePowerReward() (abi.TokenAmount, error) + + CumsumBaseline() (abi.StoragePower, error) + CumsumRealized() (abi.StoragePower, error) + + InitialPledgeForPower(abi.StoragePower, abi.TokenAmount, *builtin.FilterEstimate, abi.TokenAmount) (abi.TokenAmount, error) + PreCommitDepositForPower(builtin.FilterEstimate, abi.StoragePower) (abi.TokenAmount, error) + GetState() interface{} +} + +type AwardBlockRewardParams = reward0.AwardBlockRewardParams diff --git a/chain/actors/builtin/reward/reward.go b/chain/actors/builtin/reward/reward.go index 952ca270b9b..ebec85517fb 100644 --- a/chain/actors/builtin/reward/reward.go +++ b/chain/actors/builtin/reward/reward.go @@ -2,43 +2,123 @@ package reward import ( "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/actors" reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward" "github.com/ipfs/go-cid" "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/cbor" + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/types" ) func init() { + builtin.RegisterActorState(builtin0.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load0(store, root) }) + builtin.RegisterActorState(builtin2.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load2(store, root) }) + + builtin.RegisterActorState(builtin3.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load3(store, root) + }) + + builtin.RegisterActorState(builtin4.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load4(store, root) + }) + + builtin.RegisterActorState(builtin5.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load5(store, root) + }) } var ( - Address = builtin2.RewardActorAddr - Methods = builtin2.MethodsReward + Address = builtin5.RewardActorAddr + Methods = builtin5.MethodsReward ) -func Load(store adt.Store, act *types.Actor) (st State, err error) { +func Load(store adt.Store, act *types.Actor) (State, error) { switch act.Code { + case builtin0.RewardActorCodeID: return load0(store, act.Head) + case builtin2.RewardActorCodeID: return load2(store, act.Head) + + case builtin3.RewardActorCodeID: + return load3(store, act.Head) + + case builtin4.RewardActorCodeID: + return load4(store, act.Head) + + case builtin5.RewardActorCodeID: + return load5(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } +func MakeState(store adt.Store, av actors.Version, currRealizedPower abi.StoragePower) (State, error) { + switch av { + + case actors.Version0: + return make0(store, currRealizedPower) + + case actors.Version2: + return make2(store, currRealizedPower) + + case actors.Version3: + return make3(store, currRealizedPower) + + case actors.Version4: + return make4(store, currRealizedPower) + + case actors.Version5: + return make5(store, currRealizedPower) + + } + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { + + case actors.Version0: + return builtin0.RewardActorCodeID, nil + + case actors.Version2: + return builtin2.RewardActorCodeID, nil + + case actors.Version3: + return builtin3.RewardActorCodeID, nil + + case actors.Version4: + return builtin4.RewardActorCodeID, nil + + case actors.Version5: + return builtin5.RewardActorCodeID, nil + + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + type State interface { cbor.Marshaler @@ -56,6 +136,7 @@ type State interface { InitialPledgeForPower(abi.StoragePower, abi.TokenAmount, *builtin.FilterEstimate, abi.TokenAmount) (abi.TokenAmount, error) PreCommitDepositForPower(builtin.FilterEstimate, abi.StoragePower) (abi.TokenAmount, error) + GetState() interface{} } type AwardBlockRewardParams = reward0.AwardBlockRewardParams diff --git a/chain/actors/builtin/reward/state.go.template b/chain/actors/builtin/reward/state.go.template new file mode 100644 index 00000000000..2bc271cbbfa --- /dev/null +++ b/chain/actors/builtin/reward/state.go.template @@ -0,0 +1,113 @@ +package reward + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + + miner{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/miner" + reward{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/reward" + smoothing{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/util/smoothing" +) + +var _ State = (*state{{.v}})(nil) + +func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { + out := state{{.v}}{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make{{.v}}(store adt.Store, currRealizedPower abi.StoragePower) (State, error) { + out := state{{.v}}{store: store} + out.State = *reward{{.v}}.ConstructState(currRealizedPower) + return &out, nil +} + +type state{{.v}} struct { + reward{{.v}}.State + store adt.Store +} + +func (s *state{{.v}}) ThisEpochReward() (abi.TokenAmount, error) { + return s.State.ThisEpochReward, nil +} + +func (s *state{{.v}}) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) { +{{if (ge .v 2)}} + return builtin.FilterEstimate{ + PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate, + VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate, + }, nil +{{else}} + return builtin.FromV0FilterEstimate(*s.State.ThisEpochRewardSmoothed), nil +{{end}} +} + +func (s *state{{.v}}) ThisEpochBaselinePower() (abi.StoragePower, error) { + return s.State.ThisEpochBaselinePower, nil +} + +func (s *state{{.v}}) TotalStoragePowerReward() (abi.TokenAmount, error) { + return s.State.{{if (ge .v 2)}}TotalStoragePowerReward{{else}}TotalMined{{end}}, nil +} + +func (s *state{{.v}}) EffectiveBaselinePower() (abi.StoragePower, error) { + return s.State.EffectiveBaselinePower, nil +} + +func (s *state{{.v}}) EffectiveNetworkTime() (abi.ChainEpoch, error) { + return s.State.EffectiveNetworkTime, nil +} + +func (s *state{{.v}}) CumsumBaseline() (reward{{.v}}.Spacetime, error) { + return s.State.CumsumBaseline, nil +} + +func (s *state{{.v}}) CumsumRealized() (reward{{.v}}.Spacetime, error) { + return s.State.CumsumRealized, nil +} +{{if (ge .v 2)}} +func (s *state{{.v}}) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) { + return miner{{.v}}.InitialPledgeForPower( + qaPower, + s.State.ThisEpochBaselinePower, + s.State.ThisEpochRewardSmoothed, + smoothing{{.v}}.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + circSupply, + ), nil +} +{{else}} +func (s *state0) InitialPledgeForPower(sectorWeight abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) { + return miner0.InitialPledgeForPower( + sectorWeight, + s.State.ThisEpochBaselinePower, + networkTotalPledge, + s.State.ThisEpochRewardSmoothed, + &smoothing0.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + circSupply), nil +} +{{end}} +func (s *state{{.v}}) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) { + return miner{{.v}}.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed, + {{if (le .v 0)}}&{{end}}smoothing{{.v}}.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + sectorWeight), nil +} + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/reward/v0.go b/chain/actors/builtin/reward/v0.go index 6a6e6d12e9d..cd098c151e8 100644 --- a/chain/actors/builtin/reward/v0.go +++ b/chain/actors/builtin/reward/v0.go @@ -23,17 +23,25 @@ func load0(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make0(store adt.Store, currRealizedPower abi.StoragePower) (State, error) { + out := state0{store: store} + out.State = *reward0.ConstructState(currRealizedPower) + return &out, nil +} + type state0 struct { reward0.State store adt.Store } -func (s *state0) ThisEpochReward() (abi.StoragePower, error) { +func (s *state0) ThisEpochReward() (abi.TokenAmount, error) { return s.State.ThisEpochReward, nil } func (s *state0) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) { + return builtin.FromV0FilterEstimate(*s.State.ThisEpochRewardSmoothed), nil + } func (s *state0) ThisEpochBaselinePower() (abi.StoragePower, error) { @@ -52,11 +60,11 @@ func (s *state0) EffectiveNetworkTime() (abi.ChainEpoch, error) { return s.State.EffectiveNetworkTime, nil } -func (s *state0) CumsumBaseline() (abi.StoragePower, error) { +func (s *state0) CumsumBaseline() (reward0.Spacetime, error) { return s.State.CumsumBaseline, nil } -func (s *state0) CumsumRealized() (abi.StoragePower, error) { +func (s *state0) CumsumRealized() (reward0.Spacetime, error) { return s.State.CumsumRealized, nil } @@ -81,3 +89,7 @@ func (s *state0) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, }, sectorWeight), nil } + +func (s *state0) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/reward/v2.go b/chain/actors/builtin/reward/v2.go index b7cb4910278..08e9a7bc39a 100644 --- a/chain/actors/builtin/reward/v2.go +++ b/chain/actors/builtin/reward/v2.go @@ -23,20 +23,28 @@ func load2(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make2(store adt.Store, currRealizedPower abi.StoragePower) (State, error) { + out := state2{store: store} + out.State = *reward2.ConstructState(currRealizedPower) + return &out, nil +} + type state2 struct { reward2.State store adt.Store } -func (s *state2) ThisEpochReward() (abi.StoragePower, error) { +func (s *state2) ThisEpochReward() (abi.TokenAmount, error) { return s.State.ThisEpochReward, nil } func (s *state2) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) { + return builtin.FilterEstimate{ PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate, VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate, }, nil + } func (s *state2) ThisEpochBaselinePower() (abi.StoragePower, error) { @@ -55,11 +63,11 @@ func (s *state2) EffectiveNetworkTime() (abi.ChainEpoch, error) { return s.State.EffectiveNetworkTime, nil } -func (s *state2) CumsumBaseline() (abi.StoragePower, error) { +func (s *state2) CumsumBaseline() (reward2.Spacetime, error) { return s.State.CumsumBaseline, nil } -func (s *state2) CumsumRealized() (abi.StoragePower, error) { +func (s *state2) CumsumRealized() (reward2.Spacetime, error) { return s.State.CumsumRealized, nil } @@ -84,3 +92,7 @@ func (s *state2) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, }, sectorWeight), nil } + +func (s *state2) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/reward/v3.go b/chain/actors/builtin/reward/v3.go new file mode 100644 index 00000000000..fd9fa56e27e --- /dev/null +++ b/chain/actors/builtin/reward/v3.go @@ -0,0 +1,98 @@ +package reward + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + + miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner" + reward3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/reward" + smoothing3 "github.com/filecoin-project/specs-actors/v3/actors/util/smoothing" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make3(store adt.Store, currRealizedPower abi.StoragePower) (State, error) { + out := state3{store: store} + out.State = *reward3.ConstructState(currRealizedPower) + return &out, nil +} + +type state3 struct { + reward3.State + store adt.Store +} + +func (s *state3) ThisEpochReward() (abi.TokenAmount, error) { + return s.State.ThisEpochReward, nil +} + +func (s *state3) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) { + + return builtin.FilterEstimate{ + PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate, + VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate, + }, nil + +} + +func (s *state3) ThisEpochBaselinePower() (abi.StoragePower, error) { + return s.State.ThisEpochBaselinePower, nil +} + +func (s *state3) TotalStoragePowerReward() (abi.TokenAmount, error) { + return s.State.TotalStoragePowerReward, nil +} + +func (s *state3) EffectiveBaselinePower() (abi.StoragePower, error) { + return s.State.EffectiveBaselinePower, nil +} + +func (s *state3) EffectiveNetworkTime() (abi.ChainEpoch, error) { + return s.State.EffectiveNetworkTime, nil +} + +func (s *state3) CumsumBaseline() (reward3.Spacetime, error) { + return s.State.CumsumBaseline, nil +} + +func (s *state3) CumsumRealized() (reward3.Spacetime, error) { + return s.State.CumsumRealized, nil +} + +func (s *state3) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) { + return miner3.InitialPledgeForPower( + qaPower, + s.State.ThisEpochBaselinePower, + s.State.ThisEpochRewardSmoothed, + smoothing3.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + circSupply, + ), nil +} + +func (s *state3) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) { + return miner3.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed, + smoothing3.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + sectorWeight), nil +} + +func (s *state3) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/reward/v4.go b/chain/actors/builtin/reward/v4.go new file mode 100644 index 00000000000..310ca04e8df --- /dev/null +++ b/chain/actors/builtin/reward/v4.go @@ -0,0 +1,98 @@ +package reward + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + + miner4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/miner" + reward4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/reward" + smoothing4 "github.com/filecoin-project/specs-actors/v4/actors/util/smoothing" +) + +var _ State = (*state4)(nil) + +func load4(store adt.Store, root cid.Cid) (State, error) { + out := state4{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make4(store adt.Store, currRealizedPower abi.StoragePower) (State, error) { + out := state4{store: store} + out.State = *reward4.ConstructState(currRealizedPower) + return &out, nil +} + +type state4 struct { + reward4.State + store adt.Store +} + +func (s *state4) ThisEpochReward() (abi.TokenAmount, error) { + return s.State.ThisEpochReward, nil +} + +func (s *state4) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) { + + return builtin.FilterEstimate{ + PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate, + VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate, + }, nil + +} + +func (s *state4) ThisEpochBaselinePower() (abi.StoragePower, error) { + return s.State.ThisEpochBaselinePower, nil +} + +func (s *state4) TotalStoragePowerReward() (abi.TokenAmount, error) { + return s.State.TotalStoragePowerReward, nil +} + +func (s *state4) EffectiveBaselinePower() (abi.StoragePower, error) { + return s.State.EffectiveBaselinePower, nil +} + +func (s *state4) EffectiveNetworkTime() (abi.ChainEpoch, error) { + return s.State.EffectiveNetworkTime, nil +} + +func (s *state4) CumsumBaseline() (reward4.Spacetime, error) { + return s.State.CumsumBaseline, nil +} + +func (s *state4) CumsumRealized() (reward4.Spacetime, error) { + return s.State.CumsumRealized, nil +} + +func (s *state4) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) { + return miner4.InitialPledgeForPower( + qaPower, + s.State.ThisEpochBaselinePower, + s.State.ThisEpochRewardSmoothed, + smoothing4.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + circSupply, + ), nil +} + +func (s *state4) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) { + return miner4.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed, + smoothing4.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + sectorWeight), nil +} + +func (s *state4) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/reward/v5.go b/chain/actors/builtin/reward/v5.go new file mode 100644 index 00000000000..7200f7d11af --- /dev/null +++ b/chain/actors/builtin/reward/v5.go @@ -0,0 +1,98 @@ +package reward + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + reward5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/reward" + smoothing5 "github.com/filecoin-project/specs-actors/v5/actors/util/smoothing" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store, currRealizedPower abi.StoragePower) (State, error) { + out := state5{store: store} + out.State = *reward5.ConstructState(currRealizedPower) + return &out, nil +} + +type state5 struct { + reward5.State + store adt.Store +} + +func (s *state5) ThisEpochReward() (abi.TokenAmount, error) { + return s.State.ThisEpochReward, nil +} + +func (s *state5) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) { + + return builtin.FilterEstimate{ + PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate, + VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate, + }, nil + +} + +func (s *state5) ThisEpochBaselinePower() (abi.StoragePower, error) { + return s.State.ThisEpochBaselinePower, nil +} + +func (s *state5) TotalStoragePowerReward() (abi.TokenAmount, error) { + return s.State.TotalStoragePowerReward, nil +} + +func (s *state5) EffectiveBaselinePower() (abi.StoragePower, error) { + return s.State.EffectiveBaselinePower, nil +} + +func (s *state5) EffectiveNetworkTime() (abi.ChainEpoch, error) { + return s.State.EffectiveNetworkTime, nil +} + +func (s *state5) CumsumBaseline() (reward5.Spacetime, error) { + return s.State.CumsumBaseline, nil +} + +func (s *state5) CumsumRealized() (reward5.Spacetime, error) { + return s.State.CumsumRealized, nil +} + +func (s *state5) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) { + return miner5.InitialPledgeForPower( + qaPower, + s.State.ThisEpochBaselinePower, + s.State.ThisEpochRewardSmoothed, + smoothing5.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + circSupply, + ), nil +} + +func (s *state5) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) { + return miner5.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed, + smoothing5.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + sectorWeight), nil +} + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/system/actor.go.template b/chain/actors/builtin/system/actor.go.template new file mode 100644 index 00000000000..9253199709c --- /dev/null +++ b/chain/actors/builtin/system/actor.go.template @@ -0,0 +1,41 @@ +package system + +import ( + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors" + "golang.org/x/xerrors" + "github.com/ipfs/go-cid" + +{{range .versions}} + builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" +{{end}} +) + +var ( + Address = builtin{{.latestVersion}}.SystemActorAddr +) + +func MakeState(store adt.Store, av actors.Version) (State, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return make{{.}}(store) +{{end}} +} + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return builtin{{.}}.SystemActorCodeID, nil +{{end}} + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + +type State interface { + GetState() interface{} +} diff --git a/chain/actors/builtin/system/state.go.template b/chain/actors/builtin/system/state.go.template new file mode 100644 index 00000000000..fa644f8c755 --- /dev/null +++ b/chain/actors/builtin/system/state.go.template @@ -0,0 +1,35 @@ +package system + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + system{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/system" +) + +var _ State = (*state{{.v}})(nil) + +func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { + out := state{{.v}}{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make{{.v}}(store adt.Store) (State, error) { + out := state{{.v}}{store: store} + out.State = system{{.v}}.State{} + return &out, nil +} + +type state{{.v}} struct { + system{{.v}}.State + store adt.Store +} + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} \ No newline at end of file diff --git a/chain/actors/builtin/system/system.go b/chain/actors/builtin/system/system.go new file mode 100644 index 00000000000..289fb4d5de6 --- /dev/null +++ b/chain/actors/builtin/system/system.go @@ -0,0 +1,71 @@ +package system + +import ( + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" +) + +var ( + Address = builtin5.SystemActorAddr +) + +func MakeState(store adt.Store, av actors.Version) (State, error) { + switch av { + + case actors.Version0: + return make0(store) + + case actors.Version2: + return make2(store) + + case actors.Version3: + return make3(store) + + case actors.Version4: + return make4(store) + + case actors.Version5: + return make5(store) + + } + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { + + case actors.Version0: + return builtin0.SystemActorCodeID, nil + + case actors.Version2: + return builtin2.SystemActorCodeID, nil + + case actors.Version3: + return builtin3.SystemActorCodeID, nil + + case actors.Version4: + return builtin4.SystemActorCodeID, nil + + case actors.Version5: + return builtin5.SystemActorCodeID, nil + + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + +type State interface { + GetState() interface{} +} diff --git a/chain/actors/builtin/system/v0.go b/chain/actors/builtin/system/v0.go new file mode 100644 index 00000000000..64c6f53d3cf --- /dev/null +++ b/chain/actors/builtin/system/v0.go @@ -0,0 +1,35 @@ +package system + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + system0 "github.com/filecoin-project/specs-actors/actors/builtin/system" +) + +var _ State = (*state0)(nil) + +func load0(store adt.Store, root cid.Cid) (State, error) { + out := state0{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make0(store adt.Store) (State, error) { + out := state0{store: store} + out.State = system0.State{} + return &out, nil +} + +type state0 struct { + system0.State + store adt.Store +} + +func (s *state0) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/system/v2.go b/chain/actors/builtin/system/v2.go new file mode 100644 index 00000000000..eb540891cc3 --- /dev/null +++ b/chain/actors/builtin/system/v2.go @@ -0,0 +1,35 @@ +package system + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + system2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/system" +) + +var _ State = (*state2)(nil) + +func load2(store adt.Store, root cid.Cid) (State, error) { + out := state2{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make2(store adt.Store) (State, error) { + out := state2{store: store} + out.State = system2.State{} + return &out, nil +} + +type state2 struct { + system2.State + store adt.Store +} + +func (s *state2) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/system/v3.go b/chain/actors/builtin/system/v3.go new file mode 100644 index 00000000000..5b04e189ee6 --- /dev/null +++ b/chain/actors/builtin/system/v3.go @@ -0,0 +1,35 @@ +package system + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + system3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/system" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make3(store adt.Store) (State, error) { + out := state3{store: store} + out.State = system3.State{} + return &out, nil +} + +type state3 struct { + system3.State + store adt.Store +} + +func (s *state3) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/system/v4.go b/chain/actors/builtin/system/v4.go new file mode 100644 index 00000000000..b6c92497884 --- /dev/null +++ b/chain/actors/builtin/system/v4.go @@ -0,0 +1,35 @@ +package system + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + system4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/system" +) + +var _ State = (*state4)(nil) + +func load4(store adt.Store, root cid.Cid) (State, error) { + out := state4{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make4(store adt.Store) (State, error) { + out := state4{store: store} + out.State = system4.State{} + return &out, nil +} + +type state4 struct { + system4.State + store adt.Store +} + +func (s *state4) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/system/v5.go b/chain/actors/builtin/system/v5.go new file mode 100644 index 00000000000..77d2a8478be --- /dev/null +++ b/chain/actors/builtin/system/v5.go @@ -0,0 +1,35 @@ +package system + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + system5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/system" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store) (State, error) { + out := state5{store: store} + out.State = system5.State{} + return &out, nil +} + +type state5 struct { + system5.State + store adt.Store +} + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/verifreg/actor.go.template b/chain/actors/builtin/verifreg/actor.go.template new file mode 100644 index 00000000000..9ea8e155aec --- /dev/null +++ b/chain/actors/builtin/verifreg/actor.go.template @@ -0,0 +1,75 @@ +package verifreg + +import ( + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/go-state-types/cbor" +{{range .versions}} + builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" +{{end}} + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/types" +) + +func init() { +{{range .versions}} + builtin.RegisterActorState(builtin{{.}}.VerifiedRegistryActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load{{.}}(store, root) + }) +{{end}} +} + +var ( + Address = builtin{{.latestVersion}}.VerifiedRegistryActorAddr + Methods = builtin{{.latestVersion}}.MethodsVerifiedRegistry +) + +func Load(store adt.Store, act *types.Actor) (State, error) { + switch act.Code { +{{range .versions}} + case builtin{{.}}.VerifiedRegistryActorCodeID: + return load{{.}}(store, act.Head) +{{end}} + } + return nil, xerrors.Errorf("unknown actor code %s", act.Code) +} + +func MakeState(store adt.Store, av actors.Version, rootKeyAddress address.Address) (State, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return make{{.}}(store, rootKeyAddress) +{{end}} +} + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return builtin{{.}}.VerifiedRegistryActorCodeID, nil +{{end}} + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + + +type State interface { + cbor.Marshaler + + RootKey() (address.Address, error) + VerifiedClientDataCap(address.Address) (bool, abi.StoragePower, error) + VerifierDataCap(address.Address) (bool, abi.StoragePower, error) + ForEachVerifier(func(addr address.Address, dcap abi.StoragePower) error) error + ForEachClient(func(addr address.Address, dcap abi.StoragePower) error) error + GetState() interface{} +} diff --git a/chain/actors/builtin/verifreg/state.go.template b/chain/actors/builtin/verifreg/state.go.template new file mode 100644 index 00000000000..b59cfb6289d --- /dev/null +++ b/chain/actors/builtin/verifreg/state.go.template @@ -0,0 +1,82 @@ +package verifreg + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + +{{if (ge .v 3)}} builtin{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin" +{{end}} verifreg{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/verifreg" + adt{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/util/adt" +) + +var _ State = (*state{{.v}})(nil) + +func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { + out := state{{.v}}{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make{{.v}}(store adt.Store, rootKeyAddress address.Address) (State, error) { + out := state{{.v}}{store: store} + {{if (le .v 2)}} + em, err := adt{{.v}}.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State = *verifreg{{.v}}.ConstructState(em, rootKeyAddress) + {{else}} + s, err := verifreg{{.v}}.ConstructState(store, rootKeyAddress) + if err != nil { + return nil, err + } + + out.State = *s + {{end}} + return &out, nil +} + +type state{{.v}} struct { + verifreg{{.v}}.State + store adt.Store +} + +func (s *state{{.v}}) RootKey() (address.Address, error) { + return s.State.RootKey, nil +} + +func (s *state{{.v}}) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version{{.v}}, s.verifiedClients, addr) +} + +func (s *state{{.v}}) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version{{.v}}, s.verifiers, addr) +} + +func (s *state{{.v}}) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, actors.Version{{.v}}, s.verifiers, cb) +} + +func (s *state{{.v}}) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, actors.Version{{.v}}, s.verifiedClients, cb) +} + +func (s *state{{.v}}) verifiedClients() (adt.Map, error) { + return adt{{.v}}.AsMap(s.store, s.VerifiedClients{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}}) +} + +func (s *state{{.v}}) verifiers() (adt.Map, error) { + return adt{{.v}}.AsMap(s.store, s.Verifiers{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}}) +} + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} \ No newline at end of file diff --git a/chain/actors/builtin/verifreg/util.go b/chain/actors/builtin/verifreg/util.go index 4136c0c3077..16e50c50a77 100644 --- a/chain/actors/builtin/verifreg/util.go +++ b/chain/actors/builtin/verifreg/util.go @@ -6,16 +6,21 @@ import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" - "github.com/ipfs/go-cid" "golang.org/x/xerrors" ) -func getDataCap(store adt.Store, ver actors.Version, root cid.Cid, addr address.Address) (bool, abi.StoragePower, error) { +// taking this as a function instead of asking the caller to call it helps reduce some of the error +// checking boilerplate. +// +// "go made me do it" +type rootFunc func() (adt.Map, error) + +// Assumes that the bitwidth for v3 HAMTs is the DefaultHamtBitwidth +func getDataCap(store adt.Store, ver actors.Version, root rootFunc, addr address.Address) (bool, abi.StoragePower, error) { if addr.Protocol() != address.ID { return false, big.Zero(), xerrors.Errorf("can only look up ID addresses") } - - vh, err := adt.AsMap(store, root, ver) + vh, err := root() if err != nil { return false, big.Zero(), xerrors.Errorf("loading verifreg: %w", err) } @@ -30,8 +35,9 @@ func getDataCap(store adt.Store, ver actors.Version, root cid.Cid, addr address. return true, dcap, nil } -func forEachCap(store adt.Store, ver actors.Version, root cid.Cid, cb func(addr address.Address, dcap abi.StoragePower) error) error { - vh, err := adt.AsMap(store, root, ver) +// Assumes that the bitwidth for v3 HAMTs is the DefaultHamtBitwidth +func forEachCap(store adt.Store, ver actors.Version, root rootFunc, cb func(addr address.Address, dcap abi.StoragePower) error) error { + vh, err := root() if err != nil { return xerrors.Errorf("loading verified clients: %w", err) } diff --git a/chain/actors/builtin/verifreg/v0.go b/chain/actors/builtin/verifreg/v0.go index 64def470695..e70b0e3c92d 100644 --- a/chain/actors/builtin/verifreg/v0.go +++ b/chain/actors/builtin/verifreg/v0.go @@ -9,6 +9,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors/adt" verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" + adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" ) var _ State = (*state0)(nil) @@ -22,6 +23,19 @@ func load0(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make0(store adt.Store, rootKeyAddress address.Address) (State, error) { + out := state0{store: store} + + em, err := adt0.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State = *verifreg0.ConstructState(em, rootKeyAddress) + + return &out, nil +} + type state0 struct { verifreg0.State store adt.Store @@ -32,17 +46,29 @@ func (s *state0) RootKey() (address.Address, error) { } func (s *state0) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { - return getDataCap(s.store, actors.Version0, s.State.VerifiedClients, addr) + return getDataCap(s.store, actors.Version0, s.verifiedClients, addr) } func (s *state0) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) { - return getDataCap(s.store, actors.Version0, s.State.Verifiers, addr) + return getDataCap(s.store, actors.Version0, s.verifiers, addr) } func (s *state0) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { - return forEachCap(s.store, actors.Version0, s.State.Verifiers, cb) + return forEachCap(s.store, actors.Version0, s.verifiers, cb) } func (s *state0) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { - return forEachCap(s.store, actors.Version0, s.State.VerifiedClients, cb) + return forEachCap(s.store, actors.Version0, s.verifiedClients, cb) +} + +func (s *state0) verifiedClients() (adt.Map, error) { + return adt0.AsMap(s.store, s.VerifiedClients) +} + +func (s *state0) verifiers() (adt.Map, error) { + return adt0.AsMap(s.store, s.Verifiers) +} + +func (s *state0) GetState() interface{} { + return &s.State } diff --git a/chain/actors/builtin/verifreg/v2.go b/chain/actors/builtin/verifreg/v2.go index 5ee3bad05d7..0bcbe02121d 100644 --- a/chain/actors/builtin/verifreg/v2.go +++ b/chain/actors/builtin/verifreg/v2.go @@ -9,6 +9,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors/adt" verifreg2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/verifreg" + adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt" ) var _ State = (*state2)(nil) @@ -22,6 +23,19 @@ func load2(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make2(store adt.Store, rootKeyAddress address.Address) (State, error) { + out := state2{store: store} + + em, err := adt2.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State = *verifreg2.ConstructState(em, rootKeyAddress) + + return &out, nil +} + type state2 struct { verifreg2.State store adt.Store @@ -32,17 +46,29 @@ func (s *state2) RootKey() (address.Address, error) { } func (s *state2) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { - return getDataCap(s.store, actors.Version2, s.State.VerifiedClients, addr) + return getDataCap(s.store, actors.Version2, s.verifiedClients, addr) } func (s *state2) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) { - return getDataCap(s.store, actors.Version2, s.State.Verifiers, addr) + return getDataCap(s.store, actors.Version2, s.verifiers, addr) } func (s *state2) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { - return forEachCap(s.store, actors.Version2, s.State.Verifiers, cb) + return forEachCap(s.store, actors.Version2, s.verifiers, cb) } func (s *state2) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { - return forEachCap(s.store, actors.Version2, s.State.VerifiedClients, cb) + return forEachCap(s.store, actors.Version2, s.verifiedClients, cb) +} + +func (s *state2) verifiedClients() (adt.Map, error) { + return adt2.AsMap(s.store, s.VerifiedClients) +} + +func (s *state2) verifiers() (adt.Map, error) { + return adt2.AsMap(s.store, s.Verifiers) +} + +func (s *state2) GetState() interface{} { + return &s.State } diff --git a/chain/actors/builtin/verifreg/v3.go b/chain/actors/builtin/verifreg/v3.go new file mode 100644 index 00000000000..32003ca3a30 --- /dev/null +++ b/chain/actors/builtin/verifreg/v3.go @@ -0,0 +1,75 @@ +package verifreg + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + verifreg3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/verifreg" + adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make3(store adt.Store, rootKeyAddress address.Address) (State, error) { + out := state3{store: store} + + s, err := verifreg3.ConstructState(store, rootKeyAddress) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state3 struct { + verifreg3.State + store adt.Store +} + +func (s *state3) RootKey() (address.Address, error) { + return s.State.RootKey, nil +} + +func (s *state3) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version3, s.verifiedClients, addr) +} + +func (s *state3) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version3, s.verifiers, addr) +} + +func (s *state3) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, actors.Version3, s.verifiers, cb) +} + +func (s *state3) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, actors.Version3, s.verifiedClients, cb) +} + +func (s *state3) verifiedClients() (adt.Map, error) { + return adt3.AsMap(s.store, s.VerifiedClients, builtin3.DefaultHamtBitwidth) +} + +func (s *state3) verifiers() (adt.Map, error) { + return adt3.AsMap(s.store, s.Verifiers, builtin3.DefaultHamtBitwidth) +} + +func (s *state3) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/verifreg/v4.go b/chain/actors/builtin/verifreg/v4.go new file mode 100644 index 00000000000..b752e747bb3 --- /dev/null +++ b/chain/actors/builtin/verifreg/v4.go @@ -0,0 +1,75 @@ +package verifreg + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg" + adt4 "github.com/filecoin-project/specs-actors/v4/actors/util/adt" +) + +var _ State = (*state4)(nil) + +func load4(store adt.Store, root cid.Cid) (State, error) { + out := state4{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make4(store adt.Store, rootKeyAddress address.Address) (State, error) { + out := state4{store: store} + + s, err := verifreg4.ConstructState(store, rootKeyAddress) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state4 struct { + verifreg4.State + store adt.Store +} + +func (s *state4) RootKey() (address.Address, error) { + return s.State.RootKey, nil +} + +func (s *state4) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version4, s.verifiedClients, addr) +} + +func (s *state4) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version4, s.verifiers, addr) +} + +func (s *state4) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, actors.Version4, s.verifiers, cb) +} + +func (s *state4) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, actors.Version4, s.verifiedClients, cb) +} + +func (s *state4) verifiedClients() (adt.Map, error) { + return adt4.AsMap(s.store, s.VerifiedClients, builtin4.DefaultHamtBitwidth) +} + +func (s *state4) verifiers() (adt.Map, error) { + return adt4.AsMap(s.store, s.Verifiers, builtin4.DefaultHamtBitwidth) +} + +func (s *state4) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/verifreg/v5.go b/chain/actors/builtin/verifreg/v5.go new file mode 100644 index 00000000000..6fefd711540 --- /dev/null +++ b/chain/actors/builtin/verifreg/v5.go @@ -0,0 +1,75 @@ +package verifreg + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + verifreg5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/verifreg" + adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store, rootKeyAddress address.Address) (State, error) { + out := state5{store: store} + + s, err := verifreg5.ConstructState(store, rootKeyAddress) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state5 struct { + verifreg5.State + store adt.Store +} + +func (s *state5) RootKey() (address.Address, error) { + return s.State.RootKey, nil +} + +func (s *state5) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version5, s.verifiedClients, addr) +} + +func (s *state5) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version5, s.verifiers, addr) +} + +func (s *state5) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, actors.Version5, s.verifiers, cb) +} + +func (s *state5) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, actors.Version5, s.verifiedClients, cb) +} + +func (s *state5) verifiedClients() (adt.Map, error) { + return adt5.AsMap(s.store, s.VerifiedClients, builtin5.DefaultHamtBitwidth) +} + +func (s *state5) verifiers() (adt.Map, error) { + return adt5.AsMap(s.store, s.Verifiers, builtin5.DefaultHamtBitwidth) +} + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/verifreg/verifreg.go b/chain/actors/builtin/verifreg/verifreg.go index a4468d8a0d2..88104ad6955 100644 --- a/chain/actors/builtin/verifreg/verifreg.go +++ b/chain/actors/builtin/verifreg/verifreg.go @@ -1,44 +1,126 @@ package verifreg import ( - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" "github.com/ipfs/go-cid" "golang.org/x/xerrors" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/types" ) func init() { + builtin.RegisterActorState(builtin0.VerifiedRegistryActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load0(store, root) }) + builtin.RegisterActorState(builtin2.VerifiedRegistryActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load2(store, root) }) + + builtin.RegisterActorState(builtin3.VerifiedRegistryActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load3(store, root) + }) + + builtin.RegisterActorState(builtin4.VerifiedRegistryActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load4(store, root) + }) + + builtin.RegisterActorState(builtin5.VerifiedRegistryActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load5(store, root) + }) + } var ( - Address = builtin2.VerifiedRegistryActorAddr - Methods = builtin2.MethodsVerifiedRegistry + Address = builtin5.VerifiedRegistryActorAddr + Methods = builtin5.MethodsVerifiedRegistry ) func Load(store adt.Store, act *types.Actor) (State, error) { switch act.Code { + case builtin0.VerifiedRegistryActorCodeID: return load0(store, act.Head) + case builtin2.VerifiedRegistryActorCodeID: return load2(store, act.Head) + + case builtin3.VerifiedRegistryActorCodeID: + return load3(store, act.Head) + + case builtin4.VerifiedRegistryActorCodeID: + return load4(store, act.Head) + + case builtin5.VerifiedRegistryActorCodeID: + return load5(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } +func MakeState(store adt.Store, av actors.Version, rootKeyAddress address.Address) (State, error) { + switch av { + + case actors.Version0: + return make0(store, rootKeyAddress) + + case actors.Version2: + return make2(store, rootKeyAddress) + + case actors.Version3: + return make3(store, rootKeyAddress) + + case actors.Version4: + return make4(store, rootKeyAddress) + + case actors.Version5: + return make5(store, rootKeyAddress) + + } + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { + + case actors.Version0: + return builtin0.VerifiedRegistryActorCodeID, nil + + case actors.Version2: + return builtin2.VerifiedRegistryActorCodeID, nil + + case actors.Version3: + return builtin3.VerifiedRegistryActorCodeID, nil + + case actors.Version4: + return builtin4.VerifiedRegistryActorCodeID, nil + + case actors.Version5: + return builtin5.VerifiedRegistryActorCodeID, nil + + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + type State interface { cbor.Marshaler @@ -47,4 +129,5 @@ type State interface { VerifierDataCap(address.Address) (bool, abi.StoragePower, error) ForEachVerifier(func(addr address.Address, dcap abi.StoragePower) error) error ForEachClient(func(addr address.Address, dcap abi.StoragePower) error) error + GetState() interface{} } diff --git a/chain/actors/params.go b/chain/actors/params.go index e14dcafc9ff..6dc0b1084db 100644 --- a/chain/actors/params.go +++ b/chain/actors/params.go @@ -3,6 +3,8 @@ package actors import ( "bytes" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/lotus/chain/actors/aerrors" cbg "github.com/whyrusleeping/cbor-gen" ) @@ -11,7 +13,7 @@ func SerializeParams(i cbg.CBORMarshaler) ([]byte, aerrors.ActorError) { buf := new(bytes.Buffer) if err := i.MarshalCBOR(buf); err != nil { // TODO: shouldnt this be a fatal error? - return nil, aerrors.Absorb(err, 1, "failed to encode parameter") + return nil, aerrors.Absorb(err, exitcode.ErrSerialization, "failed to encode parameter") } return buf.Bytes(), nil } diff --git a/chain/actors/policy/policy.go b/chain/actors/policy/policy.go index c1a971db51e..c06c85d380c 100644 --- a/chain/actors/policy/policy.go +++ b/chain/actors/policy/policy.go @@ -3,45 +3,105 @@ package policy import ( "sort" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/chain/actors" + market0 "github.com/filecoin-project/specs-actors/actors/builtin/market" miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" power0 "github.com/filecoin-project/specs-actors/actors/builtin/power" verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" - paych2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/paych" verifreg2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/verifreg" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + market3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/market" + miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner" + verifreg3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/verifreg" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + market4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/market" + miner4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/miner" + verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + market5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/market" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + verifreg5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/verifreg" + + paych5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/paych" ) const ( - ChainFinality = miner0.ChainFinality - SealRandomnessLookback = ChainFinality - PaychSettleDelay = paych2.SettleDelay + ChainFinality = miner5.ChainFinality + SealRandomnessLookback = ChainFinality + PaychSettleDelay = paych5.SettleDelay + MaxPreCommitRandomnessLookback = builtin5.EpochsInDay + SealRandomnessLookback ) // SetSupportedProofTypes sets supported proof types, across all actor versions. // This should only be used for testing. func SetSupportedProofTypes(types ...abi.RegisteredSealProof) { - newTypes := make(map[abi.RegisteredSealProof]struct{}, len(types)) - for _, t := range types { - newTypes[t] = struct{}{} - } - // Set for all miner versions. - miner0.SupportedProofTypes = newTypes - miner2.SupportedProofTypes = newTypes + + miner0.SupportedProofTypes = make(map[abi.RegisteredSealProof]struct{}, len(types)) + + miner2.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + miner2.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2) + miner2.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + + miner3.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + miner3.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2) + miner3.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + + miner4.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + miner4.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2) + miner4.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + + miner5.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + + AddSupportedProofTypes(types...) } // AddSupportedProofTypes sets supported proof types, across all actor versions. // This should only be used for testing. func AddSupportedProofTypes(types ...abi.RegisteredSealProof) { for _, t := range types { + if t >= abi.RegisteredSealProof_StackedDrg2KiBV1_1 { + panic("must specify v1 proof types only") + } // Set for all miner versions. + miner0.SupportedProofTypes[t] = struct{}{} - miner2.SupportedProofTypes[t] = struct{}{} + + miner2.PreCommitSealProofTypesV0[t] = struct{}{} + miner2.PreCommitSealProofTypesV7[t] = struct{}{} + miner2.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + miner2.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + + miner3.PreCommitSealProofTypesV0[t] = struct{}{} + miner3.PreCommitSealProofTypesV7[t] = struct{}{} + miner3.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + miner3.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + + miner4.PreCommitSealProofTypesV0[t] = struct{}{} + miner4.PreCommitSealProofTypesV7[t] = struct{}{} + miner4.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + miner4.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + + miner5.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + wpp, err := t.RegisteredWindowPoStProof() + if err != nil { + // Fine to panic, this is a test-only method + panic(err) + } + + miner5.WindowPoStProofTypes[wpp] = struct{}{} + } } @@ -49,66 +109,187 @@ func AddSupportedProofTypes(types ...abi.RegisteredSealProof) { // actors versions. Use for testing. func SetPreCommitChallengeDelay(delay abi.ChainEpoch) { // Set for all miner versions. + miner0.PreCommitChallengeDelay = delay + miner2.PreCommitChallengeDelay = delay + + miner3.PreCommitChallengeDelay = delay + + miner4.PreCommitChallengeDelay = delay + + miner5.PreCommitChallengeDelay = delay + } // TODO: this function shouldn't really exist. Instead, the API should expose the precommit delay. func GetPreCommitChallengeDelay() abi.ChainEpoch { - return miner0.PreCommitChallengeDelay + return miner5.PreCommitChallengeDelay } // SetConsensusMinerMinPower sets the minimum power of an individual miner must // meet for leader election, across all actor versions. This should only be used // for testing. func SetConsensusMinerMinPower(p abi.StoragePower) { + power0.ConsensusMinerMinPower = p + for _, policy := range builtin2.SealProofPolicies { policy.ConsensusMinerMinPower = p } + + for _, policy := range builtin3.PoStProofPolicies { + policy.ConsensusMinerMinPower = p + } + + for _, policy := range builtin4.PoStProofPolicies { + policy.ConsensusMinerMinPower = p + } + + for _, policy := range builtin5.PoStProofPolicies { + policy.ConsensusMinerMinPower = p + } + } // SetMinVerifiedDealSize sets the minimum size of a verified deal. This should // only be used for testing. func SetMinVerifiedDealSize(size abi.StoragePower) { + verifreg0.MinVerifiedDealSize = size + verifreg2.MinVerifiedDealSize = size + + verifreg3.MinVerifiedDealSize = size + + verifreg4.MinVerifiedDealSize = size + + verifreg5.MinVerifiedDealSize = size + } func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) abi.ChainEpoch { switch ver { + case actors.Version0: + return miner0.MaxSealDuration[t] + case actors.Version2: + return miner2.MaxProveCommitDuration[t] + + case actors.Version3: + + return miner3.MaxProveCommitDuration[t] + + case actors.Version4: + + return miner4.MaxProveCommitDuration[t] + + case actors.Version5: + + return miner5.MaxProveCommitDuration[t] + default: panic("unsupported actors version") } } +// SetProviderCollateralSupplyTarget sets the percentage of normalized circulating +// supply that must be covered by provider collateral in a deal. This should +// only be used for testing. +func SetProviderCollateralSupplyTarget(num, denom big.Int) { + + market2.ProviderCollateralSupplyTarget = builtin2.BigFrac{ + Numerator: num, + Denominator: denom, + } + + market3.ProviderCollateralSupplyTarget = builtin3.BigFrac{ + Numerator: num, + Denominator: denom, + } + + market4.ProviderCollateralSupplyTarget = builtin4.BigFrac{ + Numerator: num, + Denominator: denom, + } + + market5.ProviderCollateralSupplyTarget = builtin5.BigFrac{ + Numerator: num, + Denominator: denom, + } + +} + func DealProviderCollateralBounds( size abi.PaddedPieceSize, verified bool, rawBytePower, qaPower, baselinePower abi.StoragePower, circulatingFil abi.TokenAmount, nwVer network.Version, ) (min, max abi.TokenAmount) { switch actors.VersionForNetwork(nwVer) { + case actors.Version0: + return market0.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil, nwVer) + case actors.Version2: + return market2.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + + case actors.Version3: + + return market3.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + + case actors.Version4: + + return market4.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + + case actors.Version5: + + return market5.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + default: - panic("unsupported network version") + panic("unsupported actors version") } } +func DealDurationBounds(pieceSize abi.PaddedPieceSize) (min, max abi.ChainEpoch) { + return market5.DealDurationBounds(pieceSize) +} + // Sets the challenge window and scales the proving period to match (such that // there are always 48 challenge windows in a proving period). func SetWPoStChallengeWindow(period abi.ChainEpoch) { + miner0.WPoStChallengeWindow = period miner0.WPoStProvingPeriod = period * abi.ChainEpoch(miner0.WPoStPeriodDeadlines) miner2.WPoStChallengeWindow = period miner2.WPoStProvingPeriod = period * abi.ChainEpoch(miner2.WPoStPeriodDeadlines) + + miner3.WPoStChallengeWindow = period + miner3.WPoStProvingPeriod = period * abi.ChainEpoch(miner3.WPoStPeriodDeadlines) + + // by default, this is 2x finality which is 30 periods. + // scale it if we're scaling the challenge period. + miner3.WPoStDisputeWindow = period * 30 + + miner4.WPoStChallengeWindow = period + miner4.WPoStProvingPeriod = period * abi.ChainEpoch(miner4.WPoStPeriodDeadlines) + + // by default, this is 2x finality which is 30 periods. + // scale it if we're scaling the challenge period. + miner4.WPoStDisputeWindow = period * 30 + + miner5.WPoStChallengeWindow = period + miner5.WPoStProvingPeriod = period * abi.ChainEpoch(miner5.WPoStPeriodDeadlines) + + // by default, this is 2x finality which is 30 periods. + // scale it if we're scaling the challenge period. + miner5.WPoStDisputeWindow = period * 30 + } func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch { @@ -116,26 +297,27 @@ func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch { return 10 } + // NOTE: if this ever changes, adjust it in a (*Miner).mineOne() logline as well return ChainFinality } func GetMaxSectorExpirationExtension() abi.ChainEpoch { - return miner0.MaxSectorExpirationExtension + return miner5.MaxSectorExpirationExtension } -// TODO: we'll probably need to abstract over this better in the future. -func GetMaxPoStPartitions(p abi.RegisteredPoStProof) (int, error) { - sectorsPerPart, err := builtin2.PoStProofWindowPoStPartitionSectors(p) +func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, error) { + sectorsPerPart, err := builtin5.PoStProofWindowPoStPartitionSectors(p) if err != nil { return 0, err } - return int(miner2.AddressedSectorsMax / sectorsPerPart), nil + maxSectors := uint64(GetAddressedSectorsMax(nv)) + return int(maxSectors / sectorsPerPart), nil } func GetDefaultSectorSize() abi.SectorSize { - // supported proof types are the same across versions. - szs := make([]abi.SectorSize, 0, len(miner2.SupportedProofTypes)) - for spt := range miner2.SupportedProofTypes { + // supported sector sizes are the same across versions. + szs := make([]abi.SectorSize, 0, len(miner5.PreCommitSealProofTypesV8)) + for spt := range miner5.PreCommitSealProofTypesV8 { ss, err := spt.SectorSize() if err != nil { panic(err) @@ -150,3 +332,95 @@ func GetDefaultSectorSize() abi.SectorSize { return szs[0] } + +func GetDefaultAggregationProof() abi.RegisteredAggregationProof { + return abi.RegisteredAggregationProof_SnarkPackV1 +} + +func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) abi.ChainEpoch { + if nwVer <= network.Version10 { + return builtin4.SealProofPoliciesV0[proof].SectorMaxLifetime + } + + return builtin5.SealProofPoliciesV11[proof].SectorMaxLifetime +} + +func GetAddressedSectorsMax(nwVer network.Version) int { + switch actors.VersionForNetwork(nwVer) { + + case actors.Version0: + return miner0.AddressedSectorsMax + + case actors.Version2: + return miner2.AddressedSectorsMax + + case actors.Version3: + return miner3.AddressedSectorsMax + + case actors.Version4: + return miner4.AddressedSectorsMax + + case actors.Version5: + return miner5.AddressedSectorsMax + + default: + panic("unsupported network version") + } +} + +func GetDeclarationsMax(nwVer network.Version) int { + switch actors.VersionForNetwork(nwVer) { + + case actors.Version0: + + // TODO: Should we instead panic here since the concept doesn't exist yet? + return miner0.AddressedPartitionsMax + + case actors.Version2: + + return miner2.DeclarationsMax + + case actors.Version3: + + return miner3.DeclarationsMax + + case actors.Version4: + + return miner4.DeclarationsMax + + case actors.Version5: + + return miner5.DeclarationsMax + + default: + panic("unsupported network version") + } +} + +func AggregateNetworkFee(nwVer network.Version, aggregateSize int, baseFee abi.TokenAmount) abi.TokenAmount { + switch actors.VersionForNetwork(nwVer) { + + case actors.Version0: + + return big.Zero() + + case actors.Version2: + + return big.Zero() + + case actors.Version3: + + return big.Zero() + + case actors.Version4: + + return big.Zero() + + case actors.Version5: + + return miner5.AggregateNetworkFee(aggregateSize, baseFee) + + default: + panic("unsupported network version") + } +} diff --git a/chain/actors/policy/policy.go.template b/chain/actors/policy/policy.go.template new file mode 100644 index 00000000000..3257feffd41 --- /dev/null +++ b/chain/actors/policy/policy.go.template @@ -0,0 +1,279 @@ +package policy + +import ( + "sort" + + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/chain/actors" + + {{range .versions}} + {{if (ge . 2)}} builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" {{end}} + market{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin/market" + miner{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin/miner" + verifreg{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin/verifreg" + {{if (eq . 0)}} power{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin/power" {{end}} + {{end}} + + paych{{.latestVersion}} "github.com/filecoin-project/specs-actors{{import .latestVersion}}actors/builtin/paych" +) + +const ( + ChainFinality = miner{{.latestVersion}}.ChainFinality + SealRandomnessLookback = ChainFinality + PaychSettleDelay = paych{{.latestVersion}}.SettleDelay + MaxPreCommitRandomnessLookback = builtin{{.latestVersion}}.EpochsInDay + SealRandomnessLookback +) + +// SetSupportedProofTypes sets supported proof types, across all actor versions. +// This should only be used for testing. +func SetSupportedProofTypes(types ...abi.RegisteredSealProof) { + {{range .versions}} + {{if (eq . 0)}} + miner{{.}}.SupportedProofTypes = make(map[abi.RegisteredSealProof]struct{}, len(types)) + {{else if (le . 4)}} + miner{{.}}.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + miner{{.}}.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2) + miner{{.}}.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + {{else}} + miner{{.}}.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + {{end}} + {{end}} + + AddSupportedProofTypes(types...) +} + +// AddSupportedProofTypes sets supported proof types, across all actor versions. +// This should only be used for testing. +func AddSupportedProofTypes(types ...abi.RegisteredSealProof) { + for _, t := range types { + if t >= abi.RegisteredSealProof_StackedDrg2KiBV1_1 { + panic("must specify v1 proof types only") + } + // Set for all miner versions. + + {{range .versions}} + {{if (eq . 0)}} + miner{{.}}.SupportedProofTypes[t] = struct{}{} + {{else if (le . 4)}} + miner{{.}}.PreCommitSealProofTypesV0[t] = struct{}{} + miner{{.}}.PreCommitSealProofTypesV7[t] = struct{}{} + miner{{.}}.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + miner{{.}}.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + {{else}} + miner{{.}}.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + wpp, err := t.RegisteredWindowPoStProof() + if err != nil { + // Fine to panic, this is a test-only method + panic(err) + } + + miner{{.}}.WindowPoStProofTypes[wpp] = struct{}{} + {{end}} + {{end}} + } +} + +// SetPreCommitChallengeDelay sets the pre-commit challenge delay across all +// actors versions. Use for testing. +func SetPreCommitChallengeDelay(delay abi.ChainEpoch) { + // Set for all miner versions. + {{range .versions}} + miner{{.}}.PreCommitChallengeDelay = delay + {{end}} +} + +// TODO: this function shouldn't really exist. Instead, the API should expose the precommit delay. +func GetPreCommitChallengeDelay() abi.ChainEpoch { + return miner{{.latestVersion}}.PreCommitChallengeDelay +} + +// SetConsensusMinerMinPower sets the minimum power of an individual miner must +// meet for leader election, across all actor versions. This should only be used +// for testing. +func SetConsensusMinerMinPower(p abi.StoragePower) { + {{range .versions}} + {{if (eq . 0)}} + power{{.}}.ConsensusMinerMinPower = p + {{else if (eq . 2)}} + for _, policy := range builtin{{.}}.SealProofPolicies { + policy.ConsensusMinerMinPower = p + } + {{else}} + for _, policy := range builtin{{.}}.PoStProofPolicies { + policy.ConsensusMinerMinPower = p + } + {{end}} + {{end}} +} + +// SetMinVerifiedDealSize sets the minimum size of a verified deal. This should +// only be used for testing. +func SetMinVerifiedDealSize(size abi.StoragePower) { + {{range .versions}} + verifreg{{.}}.MinVerifiedDealSize = size + {{end}} +} + +func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) abi.ChainEpoch { + switch ver { + {{range .versions}} + case actors.Version{{.}}: + {{if (eq . 0)}} + return miner{{.}}.MaxSealDuration[t] + {{else}} + return miner{{.}}.MaxProveCommitDuration[t] + {{end}} + {{end}} + default: + panic("unsupported actors version") + } +} + +// SetProviderCollateralSupplyTarget sets the percentage of normalized circulating +// supply that must be covered by provider collateral in a deal. This should +// only be used for testing. +func SetProviderCollateralSupplyTarget(num, denom big.Int) { +{{range .versions}} + {{if (ge . 2)}} + market{{.}}.ProviderCollateralSupplyTarget = builtin{{.}}.BigFrac{ + Numerator: num, + Denominator: denom, + } + {{end}} +{{end}} +} + +func DealProviderCollateralBounds( + size abi.PaddedPieceSize, verified bool, + rawBytePower, qaPower, baselinePower abi.StoragePower, + circulatingFil abi.TokenAmount, nwVer network.Version, +) (min, max abi.TokenAmount) { + switch actors.VersionForNetwork(nwVer) { + {{range .versions}} + case actors.Version{{.}}: + {{if (eq . 0)}} + return market{{.}}.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil, nwVer) + {{else}} + return market{{.}}.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + {{end}} + {{end}} + default: + panic("unsupported actors version") + } +} + +func DealDurationBounds(pieceSize abi.PaddedPieceSize) (min, max abi.ChainEpoch) { + return market{{.latestVersion}}.DealDurationBounds(pieceSize) +} + +// Sets the challenge window and scales the proving period to match (such that +// there are always 48 challenge windows in a proving period). +func SetWPoStChallengeWindow(period abi.ChainEpoch) { + {{range .versions}} + miner{{.}}.WPoStChallengeWindow = period + miner{{.}}.WPoStProvingPeriod = period * abi.ChainEpoch(miner{{.}}.WPoStPeriodDeadlines) + {{if (ge . 3)}} + // by default, this is 2x finality which is 30 periods. + // scale it if we're scaling the challenge period. + miner{{.}}.WPoStDisputeWindow = period * 30 + {{end}} + {{end}} +} + +func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch { + if nwVer <= network.Version3 { + return 10 + } + + // NOTE: if this ever changes, adjust it in a (*Miner).mineOne() logline as well + return ChainFinality +} + +func GetMaxSectorExpirationExtension() abi.ChainEpoch { + return miner{{.latestVersion}}.MaxSectorExpirationExtension +} + +func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, error) { + sectorsPerPart, err := builtin{{.latestVersion}}.PoStProofWindowPoStPartitionSectors(p) + if err != nil { + return 0, err + } + maxSectors := uint64(GetAddressedSectorsMax(nv)) + return int(maxSectors / sectorsPerPart), nil +} + +func GetDefaultSectorSize() abi.SectorSize { + // supported sector sizes are the same across versions. + szs := make([]abi.SectorSize, 0, len(miner{{.latestVersion}}.PreCommitSealProofTypesV8)) + for spt := range miner{{.latestVersion}}.PreCommitSealProofTypesV8 { + ss, err := spt.SectorSize() + if err != nil { + panic(err) + } + + szs = append(szs, ss) + } + + sort.Slice(szs, func(i, j int) bool { + return szs[i] < szs[j] + }) + + return szs[0] +} + +func GetDefaultAggregationProof() abi.RegisteredAggregationProof { + return abi.RegisteredAggregationProof_SnarkPackV1 +} + +func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) abi.ChainEpoch { + if nwVer <= network.Version10 { + return builtin4.SealProofPoliciesV0[proof].SectorMaxLifetime + } + + return builtin{{.latestVersion}}.SealProofPoliciesV11[proof].SectorMaxLifetime +} + +func GetAddressedSectorsMax(nwVer network.Version) int { + switch actors.VersionForNetwork(nwVer) { + {{range .versions}} + case actors.Version{{.}}: + return miner{{.}}.AddressedSectorsMax + {{end}} + default: + panic("unsupported network version") + } +} + +func GetDeclarationsMax(nwVer network.Version) int { + switch actors.VersionForNetwork(nwVer) { + {{range .versions}} + case actors.Version{{.}}: + {{if (eq . 0)}} + // TODO: Should we instead panic here since the concept doesn't exist yet? + return miner{{.}}.AddressedPartitionsMax + {{else}} + return miner{{.}}.DeclarationsMax + {{end}} + {{end}} + default: + panic("unsupported network version") + } +} + +func AggregateNetworkFee(nwVer network.Version, aggregateSize int, baseFee abi.TokenAmount) abi.TokenAmount { + switch actors.VersionForNetwork(nwVer) { + {{range .versions}} + case actors.Version{{.}}: + {{if (le . 4)}} + return big.Zero() + {{else}} + return miner{{.}}.AggregateNetworkFee(aggregateSize, baseFee) + {{end}} + {{end}} + default: + panic("unsupported network version") + } +} diff --git a/chain/actors/policy/policy_test.go b/chain/actors/policy/policy_test.go index af600cc75cd..f40250fba8e 100644 --- a/chain/actors/policy/policy_test.go +++ b/chain/actors/policy/policy_test.go @@ -6,6 +6,7 @@ import ( "github.com/stretchr/testify/require" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" paych0 "github.com/filecoin-project/specs-actors/actors/builtin/paych" @@ -44,7 +45,7 @@ func TestSupportedProofTypes(t *testing.T) { // Tests assumptions about policies being the same between actor versions. func TestAssumptions(t *testing.T) { - require.EqualValues(t, miner0.SupportedProofTypes, miner2.SupportedProofTypes) + require.EqualValues(t, miner0.SupportedProofTypes, miner2.PreCommitSealProofTypesV0) require.Equal(t, miner0.PreCommitChallengeDelay, miner2.PreCommitChallengeDelay) require.Equal(t, miner0.MaxSectorExpirationExtension, miner2.MaxSectorExpirationExtension) require.Equal(t, miner0.ChainFinality, miner2.ChainFinality) @@ -57,10 +58,10 @@ func TestAssumptions(t *testing.T) { } func TestPartitionSizes(t *testing.T) { - for p := range abi.PoStSealProofTypes { - sizeNew, err := builtin2.PoStProofWindowPoStPartitionSectors(p) + for _, p := range abi.SealProofInfos { + sizeNew, err := builtin2.PoStProofWindowPoStPartitionSectors(p.WindowPoStProof) require.NoError(t, err) - sizeOld, err := builtin0.PoStProofWindowPoStPartitionSectors(p) + sizeOld, err := builtin0.PoStProofWindowPoStPartitionSectors(p.WindowPoStProof) if err != nil { // new proof type. continue @@ -68,3 +69,12 @@ func TestPartitionSizes(t *testing.T) { require.Equal(t, sizeOld, sizeNew) } } + +func TestPoStSize(t *testing.T) { + v12PoStSize, err := GetMaxPoStPartitions(network.Version12, abi.RegisteredPoStProof_StackedDrgWindow64GiBV1) + require.Equal(t, 4, v12PoStSize) + require.NoError(t, err) + v13PoStSize, err := GetMaxPoStPartitions(network.Version13, abi.RegisteredPoStProof_StackedDrgWindow64GiBV1) + require.NoError(t, err) + require.Equal(t, 10, v13PoStSize) +} diff --git a/chain/actors/version.go b/chain/actors/version.go index fe16d521eb2..9710e62fa8f 100644 --- a/chain/actors/version.go +++ b/chain/actors/version.go @@ -8,9 +8,16 @@ import ( type Version int +var LatestVersion = 5 + +var Versions = []int{0, 2, 3, 4, LatestVersion} + const ( Version0 Version = 0 Version2 Version = 2 + Version3 Version = 3 + Version4 Version = 4 + Version5 Version = 5 ) // Converts a network version into an actors adt version. @@ -18,8 +25,14 @@ func VersionForNetwork(version network.Version) Version { switch version { case network.Version0, network.Version1, network.Version2, network.Version3: return Version0 - case network.Version4, network.Version5, network.Version6: + case network.Version4, network.Version5, network.Version6, network.Version7, network.Version8, network.Version9: return Version2 + case network.Version10, network.Version11: + return Version3 + case network.Version12: + return Version4 + case network.Version13: + return Version5 default: panic(fmt.Sprintf("unsupported network version %d", version)) } diff --git a/chain/beacon/beacon.go b/chain/beacon/beacon.go index 9543bec54b3..220057282fd 100644 --- a/chain/beacon/beacon.go +++ b/chain/beacon/beacon.go @@ -4,7 +4,7 @@ import ( "context" "github.com/filecoin-project/go-state-types/abi" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" "golang.org/x/xerrors" "github.com/filecoin-project/lotus/build" diff --git a/chain/beacon/drand/drand.go b/chain/beacon/drand/drand.go index 4abc12d2902..e7f673d7f66 100644 --- a/chain/beacon/drand/drand.go +++ b/chain/beacon/drand/drand.go @@ -3,7 +3,6 @@ package drand import ( "bytes" "context" - "sync" "time" dchain "github.com/drand/drand/chain" @@ -13,10 +12,11 @@ import ( gclient "github.com/drand/drand/lp2p/client" "github.com/drand/kyber" kzap "github.com/go-kit/kit/log/zap" + lru "github.com/hashicorp/golang-lru" "go.uber.org/zap/zapcore" "golang.org/x/xerrors" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/filecoin-project/go-state-types/abi" @@ -61,8 +61,7 @@ type DrandBeacon struct { filGenTime uint64 filRoundTime uint64 - cacheLk sync.Mutex - localCache map[uint64]types.BeaconEntry + localCache *lru.Cache } // DrandHTTPClient interface overrides the user agent used by drand @@ -111,9 +110,14 @@ func NewDrandBeacon(genesisTs, interval uint64, ps *pubsub.PubSub, config dtypes return nil, xerrors.Errorf("creating drand client") } + lc, err := lru.New(1024) + if err != nil { + return nil, err + } + db := &DrandBeacon{ client: client, - localCache: make(map[uint64]types.BeaconEntry), + localCache: lc, } db.pubkey = drandChain.PublicKey @@ -156,19 +160,16 @@ func (db *DrandBeacon) Entry(ctx context.Context, round uint64) <-chan beacon.Re return out } func (db *DrandBeacon) cacheValue(e types.BeaconEntry) { - db.cacheLk.Lock() - defer db.cacheLk.Unlock() - db.localCache[e.Round] = e + db.localCache.Add(e.Round, e) } func (db *DrandBeacon) getCachedValue(round uint64) *types.BeaconEntry { - db.cacheLk.Lock() - defer db.cacheLk.Unlock() - v, ok := db.localCache[round] + v, ok := db.localCache.Get(round) if !ok { return nil } - return &v + e, _ := v.(types.BeaconEntry) + return &e } func (db *DrandBeacon) VerifyEntry(curr types.BeaconEntry, prev types.BeaconEntry) error { @@ -177,6 +178,9 @@ func (db *DrandBeacon) VerifyEntry(curr types.BeaconEntry, prev types.BeaconEntr return nil } if be := db.getCachedValue(curr.Round); be != nil { + if !bytes.Equal(curr.Data, be.Data) { + return xerrors.New("invalid beacon value, does not match cached good value") + } // return no error if the value is in the cache already return nil } diff --git a/chain/checkpoint.go b/chain/checkpoint.go index 8f99d73e43d..a3660a45ce4 100644 --- a/chain/checkpoint.go +++ b/chain/checkpoint.go @@ -1,81 +1,57 @@ package chain import ( - "encoding/json" + "context" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/ipfs/go-datastore" "golang.org/x/xerrors" ) -var CheckpointKey = datastore.NewKey("/chain/checks") - -func loadCheckpoint(ds dtypes.MetadataDS) (types.TipSetKey, error) { - haveChks, err := ds.Has(CheckpointKey) - if err != nil { - return types.EmptyTSK, err - } - - if !haveChks { - return types.EmptyTSK, nil +func (syncer *Syncer) SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) error { + if tsk == types.EmptyTSK { + return xerrors.Errorf("called with empty tsk") } - tskBytes, err := ds.Get(CheckpointKey) + ts, err := syncer.ChainStore().LoadTipSet(tsk) if err != nil { - return types.EmptyTSK, err + tss, err := syncer.Exchange.GetBlocks(ctx, tsk, 1) + if err != nil { + return xerrors.Errorf("failed to fetch tipset: %w", err) + } else if len(tss) != 1 { + return xerrors.Errorf("expected 1 tipset, got %d", len(tss)) + } + ts = tss[0] } - var tsk types.TipSetKey - err = json.Unmarshal(tskBytes, &tsk) - if err != nil { - return types.EmptyTSK, err + if err := syncer.switchChain(ctx, ts); err != nil { + return xerrors.Errorf("failed to switch chain when syncing checkpoint: %w", err) } - return tsk, err -} - -func (syncer *Syncer) SetCheckpoint(tsk types.TipSetKey) error { - if tsk == types.EmptyTSK { - return xerrors.Errorf("called with empty tsk") + if err := syncer.ChainStore().SetCheckpoint(ts); err != nil { + return xerrors.Errorf("failed to set the chain checkpoint: %w", err) } - syncer.checkptLk.Lock() - defer syncer.checkptLk.Unlock() - - ts, err := syncer.ChainStore().LoadTipSet(tsk) - if err != nil { - return xerrors.Errorf("cannot find tipset: %w", err) - } + return nil +} +func (syncer *Syncer) switchChain(ctx context.Context, ts *types.TipSet) error { hts := syncer.ChainStore().GetHeaviestTipSet() - anc, err := syncer.ChainStore().IsAncestorOf(ts, hts) - if err != nil { - return xerrors.Errorf("cannot determine whether checkpoint tipset is in main-chain: %w", err) + if hts.Equals(ts) { + return nil } - if !hts.Equals(ts) && !anc { - return xerrors.Errorf("cannot mark tipset as checkpoint, since it isn't in the main-chain: %w", err) + if anc, err := syncer.store.IsAncestorOf(ts, hts); err == nil && anc { + return nil } - tskBytes, err := json.Marshal(tsk) - if err != nil { - return err + // Otherwise, sync the chain and set the head. + if err := syncer.collectChain(ctx, ts, hts, true); err != nil { + return xerrors.Errorf("failed to collect chain for checkpoint: %w", err) } - err = syncer.ds.Put(CheckpointKey, tskBytes) - if err != nil { - return err + if err := syncer.ChainStore().SetHead(ts); err != nil { + return xerrors.Errorf("failed to set the chain head: %w", err) } - - syncer.checkpt = tsk - return nil } - -func (syncer *Syncer) GetCheckpoint() types.TipSetKey { - syncer.checkptLk.Lock() - defer syncer.checkptLk.Unlock() - return syncer.checkpt -} diff --git a/chain/events/events.go b/chain/events/events.go index e35e91366c3..8511de9217b 100644 --- a/chain/events/events.go +++ b/chain/events/events.go @@ -20,8 +20,10 @@ import ( var log = logging.Logger("events") // HeightHandler `curH`-`ts.Height` = `confidence` -type HeightHandler func(ctx context.Context, ts *types.TipSet, curH abi.ChainEpoch) error -type RevertHandler func(ctx context.Context, ts *types.TipSet) error +type ( + HeightHandler func(ctx context.Context, ts *types.TipSet, curH abi.ChainEpoch) error + RevertHandler func(ctx context.Context, ts *types.TipSet) error +) type heightHandler struct { confidence int @@ -31,33 +33,33 @@ type heightHandler struct { revert RevertHandler } -type eventAPI interface { +type EventAPI interface { ChainNotify(context.Context) (<-chan []*api.HeadChange, error) ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error) ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) ChainHead(context.Context) (*types.TipSet, error) - StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) + StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) // optional / for CalledMsg } type Events struct { - api eventAPI + api EventAPI tsc *tipSetCache lk sync.Mutex - ready sync.WaitGroup + ready chan struct{} readyOnce sync.Once heightEvents *hcEvents -} -func NewEvents(ctx context.Context, api eventAPI) *Events { - gcConfidence := 2 * build.ForkLengthThreshold + observers []TipSetObserver +} +func NewEventsWithConfidence(ctx context.Context, api EventAPI, gcConfidence abi.ChainEpoch) *Events { tsc := newTSCache(gcConfidence, api) e := &Events{ @@ -75,20 +77,27 @@ func NewEvents(ctx context.Context, api eventAPI) *Events { htHeights: map[abi.ChainEpoch][]uint64{}, }, - hcEvents: newHCEvents(ctx, api, tsc, uint64(gcConfidence)), + hcEvents: newHCEvents(ctx, api, tsc, uint64(gcConfidence)), + ready: make(chan struct{}), + observers: []TipSetObserver{}, } - e.ready.Add(1) - go e.listenHeadChanges(ctx) - e.ready.Wait() - - // TODO: cleanup/gc goroutine + // Wait for the first tipset to be seen or bail if shutting down + select { + case <-e.ready: + case <-ctx.Done(): + } return e } +func NewEvents(ctx context.Context, api EventAPI) *Events { + gcConfidence := 2 * build.ForkLengthThreshold + return NewEventsWithConfidence(ctx, api, gcConfidence) +} + func (e *Events) listenHeadChanges(ctx context.Context) { for { if err := e.listenHeadChangesOnce(ctx); err != nil { @@ -96,11 +105,13 @@ func (e *Events) listenHeadChanges(ctx context.Context) { } else { log.Warn("listenHeadChanges quit") } - if ctx.Err() != nil { + select { + case <-build.Clock.After(time.Second): + case <-ctx.Done(): log.Warnf("not restarting listenHeadChanges: context error: %s", ctx.Err()) return } - build.Clock.Sleep(time.Second) + log.Info("restarting listenHeadChanges") } } @@ -111,13 +122,21 @@ func (e *Events) listenHeadChangesOnce(ctx context.Context) error { notifs, err := e.api.ChainNotify(ctx) if err != nil { - // TODO: retry + // Retry is handled by caller return xerrors.Errorf("listenHeadChanges ChainNotify call failed: %w", err) } - cur, ok := <-notifs // TODO: timeout? - if !ok { - return xerrors.Errorf("notification channel closed") + var cur []*api.HeadChange + var ok bool + + // Wait for first tipset or bail + select { + case cur, ok = <-notifs: + if !ok { + return xerrors.Errorf("notification channel closed") + } + case <-ctx.Done(): + return ctx.Err() } if len(cur) != 1 { @@ -129,13 +148,13 @@ func (e *Events) listenHeadChangesOnce(ctx context.Context) error { } if err := e.tsc.add(cur[0].Val); err != nil { - log.Warn("tsc.add: adding current tipset failed: %w", err) + log.Warnf("tsc.add: adding current tipset failed: %v", err) } e.readyOnce.Do(func() { e.lastTs = cur[0].Val - - e.ready.Done() + // Signal that we have seen first tipset + close(e.ready) }) for notif := range notifs { @@ -151,7 +170,7 @@ func (e *Events) listenHeadChangesOnce(ctx context.Context) error { } } - if err := e.headChange(rev, app); err != nil { + if err := e.headChange(ctx, rev, app); err != nil { log.Warnf("headChange failed: %s", err) } @@ -164,7 +183,7 @@ func (e *Events) listenHeadChangesOnce(ctx context.Context) error { return nil } -func (e *Events) headChange(rev, app []*types.TipSet) error { +func (e *Events) headChange(ctx context.Context, rev, app []*types.TipSet) error { if len(app) == 0 { return xerrors.New("events.headChange expected at least one applied tipset") } @@ -176,5 +195,39 @@ func (e *Events) headChange(rev, app []*types.TipSet) error { return err } + if err := e.observeChanges(ctx, rev, app); err != nil { + return err + } return e.processHeadChangeEvent(rev, app) } + +// A TipSetObserver receives notifications of tipsets +type TipSetObserver interface { + Apply(ctx context.Context, ts *types.TipSet) error + Revert(ctx context.Context, ts *types.TipSet) error +} + +// TODO: add a confidence level so we can have observers with difference levels of confidence +func (e *Events) Observe(obs TipSetObserver) error { + e.lk.Lock() + defer e.lk.Unlock() + e.observers = append(e.observers, obs) + return nil +} + +// observeChanges expects caller to hold e.lk +func (e *Events) observeChanges(ctx context.Context, rev, app []*types.TipSet) error { + for _, ts := range rev { + for _, o := range e.observers { + _ = o.Revert(ctx, ts) + } + } + + for _, ts := range app { + for _, o := range e.observers { + _ = o.Apply(ctx, ts) + } + } + + return nil +} diff --git a/chain/events/events_called.go b/chain/events/events_called.go index 7532060937d..1f0b80169e1 100644 --- a/chain/events/events_called.go +++ b/chain/events/events_called.go @@ -5,6 +5,11 @@ import ( "math" "sync" + "github.com/filecoin-project/lotus/api" + lru "github.com/hashicorp/golang-lru" + + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/go-state-types/abi" "github.com/ipfs/go-cid" "golang.org/x/xerrors" @@ -66,7 +71,7 @@ type queuedEvent struct { // Manages chain head change events, which may be forward (new tipset added to // chain) or backward (chain branch discarded in favour of heavier branch) type hcEvents struct { - cs eventAPI + cs EventAPI tsc *tipSetCache ctx context.Context gcConfidence uint64 @@ -93,7 +98,7 @@ type hcEvents struct { watcherEvents } -func newHCEvents(ctx context.Context, cs eventAPI, tsc *tipSetCache, gcConfidence uint64) *hcEvents { +func newHCEvents(ctx context.Context, cs EventAPI, tsc *tipSetCache, gcConfidence uint64) *hcEvents { e := hcEvents{ ctx: ctx, cs: cs, @@ -142,8 +147,10 @@ func (e *hcEvents) processHeadChangeEvent(rev, app []*types.TipSet) error { // Queue up calls until there have been enough blocks to reach // confidence on the message calls - for tid, data := range newCalls { - e.queueForConfidence(tid, data, nil, ts) + for tid, calls := range newCalls { + for _, data := range calls { + e.queueForConfidence(tid, data, nil, ts) + } } for at := e.lastTs.Height(); at <= ts.Height(); at++ { @@ -353,14 +360,14 @@ type headChangeAPI interface { // watcherEvents watches for a state change type watcherEvents struct { ctx context.Context - cs eventAPI + cs EventAPI hcAPI headChangeAPI lk sync.RWMutex matchers map[triggerID]StateMatchFunc } -func newWatcherEvents(ctx context.Context, hcAPI headChangeAPI, cs eventAPI) watcherEvents { +func newWatcherEvents(ctx context.Context, hcAPI headChangeAPI, cs EventAPI) watcherEvents { return watcherEvents{ ctx: ctx, cs: cs, @@ -455,24 +462,30 @@ func (we *watcherEvents) StateChanged(check CheckFunc, scHnd StateChangeHandler, // messageEvents watches for message calls to actors type messageEvents struct { ctx context.Context - cs eventAPI + cs EventAPI hcAPI headChangeAPI lk sync.RWMutex - matchers map[triggerID][]MsgMatchFunc + matchers map[triggerID]MsgMatchFunc + + blockMsgLk sync.Mutex + blockMsgCache *lru.ARCCache } -func newMessageEvents(ctx context.Context, hcAPI headChangeAPI, cs eventAPI) messageEvents { +func newMessageEvents(ctx context.Context, hcAPI headChangeAPI, cs EventAPI) messageEvents { + blsMsgCache, _ := lru.NewARC(500) return messageEvents{ - ctx: ctx, - cs: cs, - hcAPI: hcAPI, - matchers: map[triggerID][]MsgMatchFunc{}, + ctx: ctx, + cs: cs, + hcAPI: hcAPI, + matchers: make(map[triggerID]MsgMatchFunc), + blockMsgLk: sync.Mutex{}, + blockMsgCache: blsMsgCache, } } // Check if there are any new actor calls -func (me *messageEvents) checkNewCalls(ts *types.TipSet) (map[triggerID]eventData, error) { +func (me *messageEvents) checkNewCalls(ts *types.TipSet) (map[triggerID][]eventData, error) { pts, err := me.cs.ChainGetTipSet(me.ctx, ts.Parents()) // we actually care about messages in the parent tipset here if err != nil { log.Errorf("getting parent tipset in checkNewCalls: %s", err) @@ -482,32 +495,23 @@ func (me *messageEvents) checkNewCalls(ts *types.TipSet) (map[triggerID]eventDat me.lk.RLock() defer me.lk.RUnlock() - res := make(map[triggerID]eventData) + // For each message in the tipset + res := make(map[triggerID][]eventData) me.messagesForTs(pts, func(msg *types.Message) { // TODO: provide receipts - for tid, matchFns := range me.matchers { - var matched bool - var once bool - for _, matchFn := range matchFns { - matchOne, ok, err := matchFn(msg) - if err != nil { - log.Errorf("event matcher failed: %s", err) - continue - } - matched = ok - once = matchOne - - if matched { - break - } + // Run each trigger's matcher against the message + for tid, matchFn := range me.matchers { + matched, err := matchFn(msg) + if err != nil { + log.Errorf("event matcher failed: %s", err) + continue } + // If there was a match, include the message in the results for the + // trigger if matched { - res[tid] = msg - if once { - break - } + res[tid] = append(res[tid], msg) } } }) @@ -520,14 +524,21 @@ func (me *messageEvents) messagesForTs(ts *types.TipSet, consume func(*types.Mes seen := map[cid.Cid]struct{}{} for _, tsb := range ts.Blocks() { - - msgs, err := me.cs.ChainGetBlockMessages(context.TODO(), tsb.Cid()) - if err != nil { - log.Errorf("messagesForTs MessagesForBlock failed (ts.H=%d, Bcid:%s, B.Mcid:%s): %s", ts.Height(), tsb.Cid(), tsb.Messages, err) - // this is quite bad, but probably better than missing all the other updates - continue + me.blockMsgLk.Lock() + msgsI, ok := me.blockMsgCache.Get(tsb.Cid()) + var err error + if !ok { + msgsI, err = me.cs.ChainGetBlockMessages(context.TODO(), tsb.Cid()) + if err != nil { + log.Errorf("messagesForTs MessagesForBlock failed (ts.H=%d, Bcid:%s, B.Mcid:%s): %s", ts.Height(), tsb.Cid(), tsb.Messages, err) + // this is quite bad, but probably better than missing all the other updates + me.blockMsgLk.Unlock() + continue + } + me.blockMsgCache.Add(tsb.Cid(), msgsI) } - + me.blockMsgLk.Unlock() + msgs := msgsI.(*api.BlockMessages) for _, m := range msgs.BlsMessages { _, ok := seen[m.Cid()] if ok { @@ -555,7 +566,7 @@ func (me *messageEvents) messagesForTs(ts *types.TipSet, consume func(*types.Mes // `curH`-`ts.Height` = `confidence` type MsgHandler func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error) -type MsgMatchFunc func(msg *types.Message) (matchOnce bool, matched bool, err error) +type MsgMatchFunc func(msg *types.Message) (matched bool, err error) // Called registers a callback which is triggered when a specified method is // called on an actor, or a timeout is reached. @@ -592,12 +603,16 @@ func (me *messageEvents) Called(check CheckFunc, msgHnd MsgHandler, rev RevertHa panic("expected msg") } - rec, err := me.cs.StateGetReceipt(me.ctx, msg.Cid(), ts.Key()) + ml, err := me.cs.StateSearchMsg(me.ctx, ts.Key(), msg.Cid(), stmgr.LookbackNoLimit, true) if err != nil { return false, err } - return msgHnd(msg, rec, ts, height) + if ml == nil { + return msgHnd(msg, nil, ts, height) + } + + return msgHnd(msg, &ml.Receipt, ts, height) } id, err := me.hcAPI.onHeadChanged(check, hnd, rev, confidence, timeout) @@ -607,7 +622,7 @@ func (me *messageEvents) Called(check CheckFunc, msgHnd MsgHandler, rev RevertHa me.lk.Lock() defer me.lk.Unlock() - me.matchers[id] = append(me.matchers[id], mf) + me.matchers[id] = mf return nil } diff --git a/chain/events/events_height.go b/chain/events/events_height.go index c8dd905d9b1..1fcff9e68f1 100644 --- a/chain/events/events_height.go +++ b/chain/events/events_height.go @@ -153,6 +153,7 @@ func (e *heightEvents) ChainAt(hnd HeightHandler, rev RevertHandler, confidence best, err := e.tsc.best() if err != nil { + e.lk.Unlock() return xerrors.Errorf("error getting best tipset: %w", err) } @@ -177,6 +178,7 @@ func (e *heightEvents) ChainAt(hnd HeightHandler, rev RevertHandler, confidence e.lk.Lock() best, err = e.tsc.best() if err != nil { + e.lk.Unlock() return xerrors.Errorf("error getting best tipset: %w", err) } bestH = best.Height() diff --git a/chain/events/events_test.go b/chain/events/events_test.go index 0e4fd34b213..04f938055f1 100644 --- a/chain/events/events_test.go +++ b/chain/events/events_test.go @@ -6,6 +6,8 @@ import ( "sync" "testing" + "gotest.tools/assert" + "github.com/ipfs/go-cid" "github.com/multiformats/go-multihash" "github.com/stretchr/testify/require" @@ -44,25 +46,43 @@ type fakeCS struct { tipsets map[types.TipSetKey]*types.TipSet sub func(rev, app []*types.TipSet) + + callNumberLk sync.Mutex + callNumber map[string]int } func (fcs *fakeCS) ChainHead(ctx context.Context) (*types.TipSet, error) { + fcs.callNumberLk.Lock() + defer fcs.callNumberLk.Unlock() + fcs.callNumber["ChainHead"] = fcs.callNumber["ChainHead"] + 1 panic("implement me") } func (fcs *fakeCS) ChainGetTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) { + fcs.callNumberLk.Lock() + defer fcs.callNumberLk.Unlock() + fcs.callNumber["ChainGetTipSet"] = fcs.callNumber["ChainGetTipSet"] + 1 return fcs.tipsets[key], nil } -func (fcs *fakeCS) StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) { +func (fcs *fakeCS) StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) { + fcs.callNumberLk.Lock() + defer fcs.callNumberLk.Unlock() + fcs.callNumber["StateSearchMsg"] = fcs.callNumber["StateSearchMsg"] + 1 return nil, nil } func (fcs *fakeCS) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) { + fcs.callNumberLk.Lock() + defer fcs.callNumberLk.Unlock() + fcs.callNumber["StateGetActor"] = fcs.callNumber["StateGetActor"] + 1 panic("Not Implemented") } func (fcs *fakeCS) ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) { + fcs.callNumberLk.Lock() + defer fcs.callNumberLk.Unlock() + fcs.callNumber["ChainGetTipSetByHeight"] = fcs.callNumber["ChainGetTipSetByHeight"] + 1 panic("Not Implemented") } @@ -113,6 +133,10 @@ func (fcs *fakeCS) makeTs(t *testing.T, parents []cid.Cid, h abi.ChainEpoch, msg } func (fcs *fakeCS) ChainNotify(context.Context) (<-chan []*api.HeadChange, error) { + fcs.callNumberLk.Lock() + defer fcs.callNumberLk.Unlock() + fcs.callNumber["ChainNotify"] = fcs.callNumber["ChainNotify"] + 1 + out := make(chan []*api.HeadChange, 1) best, err := fcs.tsc.best() if err != nil { @@ -143,6 +167,9 @@ func (fcs *fakeCS) ChainNotify(context.Context) (<-chan []*api.HeadChange, error } func (fcs *fakeCS) ChainGetBlockMessages(ctx context.Context, blk cid.Cid) (*api.BlockMessages, error) { + fcs.callNumberLk.Lock() + defer fcs.callNumberLk.Unlock() + fcs.callNumber["ChainGetBlockMessages"] = fcs.callNumber["ChainGetBlockMessages"] + 1 messages, ok := fcs.blkMsgs[blk] if !ok { return &api.BlockMessages{}, nil @@ -152,8 +179,8 @@ func (fcs *fakeCS) ChainGetBlockMessages(ctx context.Context, blk cid.Cid) (*api if !ok { return &api.BlockMessages{}, nil } - return &api.BlockMessages{BlsMessages: ms.bmsgs, SecpkMessages: ms.smsgs}, nil + return &api.BlockMessages{BlsMessages: ms.bmsgs, SecpkMessages: ms.smsgs}, nil } func (fcs *fakeCS) fakeMsgs(m fakeMsg) cid.Cid { @@ -229,13 +256,14 @@ func (fcs *fakeCS) notifDone() { fcs.sync.Unlock() } -var _ eventAPI = &fakeCS{} +var _ EventAPI = &fakeCS{} func TestAt(t *testing.T) { fcs := &fakeCS{ - t: t, - h: 1, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + t: t, + h: 1, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -298,9 +326,10 @@ func TestAt(t *testing.T) { func TestAtDoubleTrigger(t *testing.T) { fcs := &fakeCS{ - t: t, - h: 1, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + t: t, + h: 1, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -340,9 +369,10 @@ func TestAtDoubleTrigger(t *testing.T) { func TestAtNullTrigger(t *testing.T) { fcs := &fakeCS{ - t: t, - h: 1, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + t: t, + h: 1, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -374,9 +404,10 @@ func TestAtNullTrigger(t *testing.T) { func TestAtNullConf(t *testing.T) { fcs := &fakeCS{ - t: t, - h: 1, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + t: t, + h: 1, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -413,9 +444,10 @@ func TestAtNullConf(t *testing.T) { func TestAtStart(t *testing.T) { fcs := &fakeCS{ - t: t, - h: 1, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + t: t, + h: 1, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -447,9 +479,10 @@ func TestAtStart(t *testing.T) { func TestAtStartConfidence(t *testing.T) { fcs := &fakeCS{ - t: t, - h: 1, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + t: t, + h: 1, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -477,9 +510,10 @@ func TestAtStartConfidence(t *testing.T) { func TestAtChained(t *testing.T) { fcs := &fakeCS{ - t: t, - h: 1, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + t: t, + h: 1, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -511,9 +545,10 @@ func TestAtChained(t *testing.T) { func TestAtChainedConfidence(t *testing.T) { fcs := &fakeCS{ - t: t, - h: 1, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + t: t, + h: 1, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -545,9 +580,10 @@ func TestAtChainedConfidence(t *testing.T) { func TestAtChainedConfidenceNull(t *testing.T) { fcs := &fakeCS{ - t: t, - h: 1, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + t: t, + h: 1, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -572,9 +608,9 @@ func TestAtChainedConfidenceNull(t *testing.T) { require.Equal(t, false, reverted) } -func matchAddrMethod(to address.Address, m abi.MethodNum) func(msg *types.Message) (matchOnce bool, matched bool, err error) { - return func(msg *types.Message) (matchOnce bool, matched bool, err error) { - return true, to == msg.To && m == msg.Method, nil +func matchAddrMethod(to address.Address, m abi.MethodNum) func(msg *types.Message) (matched bool, err error) { + return func(msg *types.Message) (matched bool, err error) { + return to == msg.To && m == msg.Method, nil } } @@ -583,9 +619,10 @@ func TestCalled(t *testing.T) { t: t, h: 1, - msgs: map[cid.Cid]fakeMsg{}, - blkMsgs: map[cid.Cid]cid.Cid{}, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + msgs: map[cid.Cid]fakeMsg{}, + blkMsgs: map[cid.Cid]cid.Cid{}, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -795,9 +832,10 @@ func TestCalledTimeout(t *testing.T) { t: t, h: 1, - msgs: map[cid.Cid]fakeMsg{}, - blkMsgs: map[cid.Cid]cid.Cid{}, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + msgs: map[cid.Cid]fakeMsg{}, + blkMsgs: map[cid.Cid]cid.Cid{}, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -835,9 +873,10 @@ func TestCalledTimeout(t *testing.T) { t: t, h: 1, - msgs: map[cid.Cid]fakeMsg{}, - blkMsgs: map[cid.Cid]cid.Cid{}, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + msgs: map[cid.Cid]fakeMsg{}, + blkMsgs: map[cid.Cid]cid.Cid{}, + callNumber: map[string]int{}, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -869,9 +908,10 @@ func TestCalledOrder(t *testing.T) { t: t, h: 1, - msgs: map[cid.Cid]fakeMsg{}, - blkMsgs: map[cid.Cid]cid.Cid{}, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + msgs: map[cid.Cid]fakeMsg{}, + blkMsgs: map[cid.Cid]cid.Cid{}, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -932,9 +972,10 @@ func TestCalledNull(t *testing.T) { t: t, h: 1, - msgs: map[cid.Cid]fakeMsg{}, - blkMsgs: map[cid.Cid]cid.Cid{}, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + msgs: map[cid.Cid]fakeMsg{}, + blkMsgs: map[cid.Cid]cid.Cid{}, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -997,9 +1038,10 @@ func TestRemoveTriggersOnMessage(t *testing.T) { t: t, h: 1, - msgs: map[cid.Cid]fakeMsg{}, - blkMsgs: map[cid.Cid]cid.Cid{}, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + msgs: map[cid.Cid]fakeMsg{}, + blkMsgs: map[cid.Cid]cid.Cid{}, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -1087,9 +1129,10 @@ func TestStateChanged(t *testing.T) { t: t, h: 1, - msgs: map[cid.Cid]fakeMsg{}, - blkMsgs: map[cid.Cid]cid.Cid{}, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + msgs: map[cid.Cid]fakeMsg{}, + blkMsgs: map[cid.Cid]cid.Cid{}, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -1175,9 +1218,10 @@ func TestStateChangedRevert(t *testing.T) { t: t, h: 1, - msgs: map[cid.Cid]fakeMsg{}, - blkMsgs: map[cid.Cid]cid.Cid{}, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + msgs: map[cid.Cid]fakeMsg{}, + blkMsgs: map[cid.Cid]cid.Cid{}, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -1253,9 +1297,10 @@ func TestStateChangedTimeout(t *testing.T) { t: t, h: 1, - msgs: map[cid.Cid]fakeMsg{}, - blkMsgs: map[cid.Cid]cid.Cid{}, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + msgs: map[cid.Cid]fakeMsg{}, + blkMsgs: map[cid.Cid]cid.Cid{}, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + callNumber: map[string]int{}, } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -1293,9 +1338,10 @@ func TestStateChangedTimeout(t *testing.T) { t: t, h: 1, - msgs: map[cid.Cid]fakeMsg{}, - blkMsgs: map[cid.Cid]cid.Cid{}, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), + msgs: map[cid.Cid]fakeMsg{}, + blkMsgs: map[cid.Cid]cid.Cid{}, + callNumber: map[string]int{}, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), } require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) @@ -1323,3 +1369,84 @@ func TestStateChangedTimeout(t *testing.T) { fcs.advance(0, 5, nil) require.False(t, called) } + +func TestCalledMultiplePerEpoch(t *testing.T) { + fcs := &fakeCS{ + t: t, + h: 1, + + msgs: map[cid.Cid]fakeMsg{}, + blkMsgs: map[cid.Cid]cid.Cid{}, + callNumber: map[string]int{}, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + } + require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) + + events := NewEvents(context.Background(), fcs) + + t0123, err := address.NewFromString("t0123") + require.NoError(t, err) + + at := 0 + + err = events.Called(func(ts *types.TipSet) (d bool, m bool, e error) { + return false, true, nil + }, func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (bool, error) { + switch at { + case 0: + require.Equal(t, uint64(1), msg.Nonce) + require.Equal(t, abi.ChainEpoch(4), ts.Height()) + case 1: + require.Equal(t, uint64(2), msg.Nonce) + require.Equal(t, abi.ChainEpoch(4), ts.Height()) + default: + t.Fatal("apply should only get called twice, at: ", at) + } + at++ + return true, nil + }, func(_ context.Context, ts *types.TipSet) error { + switch at { + case 2: + require.Equal(t, abi.ChainEpoch(4), ts.Height()) + case 3: + require.Equal(t, abi.ChainEpoch(4), ts.Height()) + default: + t.Fatal("revert should only get called twice, at: ", at) + } + at++ + return nil + }, 3, 20, matchAddrMethod(t0123, 5)) + require.NoError(t, err) + + fcs.advance(0, 10, map[int]cid.Cid{ + 1: fcs.fakeMsgs(fakeMsg{ + bmsgs: []*types.Message{ + {To: t0123, From: t0123, Method: 5, Nonce: 1}, + {To: t0123, From: t0123, Method: 5, Nonce: 2}, + }, + }), + }) + + fcs.advance(9, 1, nil) +} + +func TestCachedSameBlock(t *testing.T) { + fcs := &fakeCS{ + t: t, + h: 1, + + msgs: map[cid.Cid]fakeMsg{}, + blkMsgs: map[cid.Cid]cid.Cid{}, + callNumber: map[string]int{}, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + } + require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) + + _ = NewEvents(context.Background(), fcs) + + fcs.advance(0, 10, map[int]cid.Cid{}) + assert.Assert(t, fcs.callNumber["ChainGetBlockMessages"] == 20, "expect call ChainGetBlockMessages %d but got ", 20, fcs.callNumber["ChainGetBlockMessages"]) + + fcs.advance(5, 10, map[int]cid.Cid{}) + assert.Assert(t, fcs.callNumber["ChainGetBlockMessages"] == 30, "expect call ChainGetBlockMessages %d but got ", 30, fcs.callNumber["ChainGetBlockMessages"]) +} diff --git a/chain/events/state/fastapi.go b/chain/events/state/fastapi.go new file mode 100644 index 00000000000..9375d9d7846 --- /dev/null +++ b/chain/events/state/fastapi.go @@ -0,0 +1,34 @@ +package state + +import ( + "context" + + "github.com/filecoin-project/go-address" + + "github.com/filecoin-project/lotus/chain/types" +) + +type FastChainApiAPI interface { + ChainAPI + + ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) +} + +type fastAPI struct { + FastChainApiAPI +} + +func WrapFastAPI(api FastChainApiAPI) ChainAPI { + return &fastAPI{ + api, + } +} + +func (a *fastAPI) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) { + ts, err := a.FastChainApiAPI.ChainGetTipSet(ctx, tsk) + if err != nil { + return nil, err + } + + return a.FastChainApiAPI.StateGetActor(ctx, actor, ts.Parents()) +} diff --git a/chain/events/state/mock/api.go b/chain/events/state/mock/api.go new file mode 100644 index 00000000000..2ed48dc39c3 --- /dev/null +++ b/chain/events/state/mock/api.go @@ -0,0 +1,69 @@ +package test + +import ( + "context" + "sync" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/types" + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" +) + +type MockAPI struct { + bs blockstore.Blockstore + + lk sync.Mutex + ts map[types.TipSetKey]*types.Actor + stateGetActorCalled int +} + +func NewMockAPI(bs blockstore.Blockstore) *MockAPI { + return &MockAPI{ + bs: bs, + ts: make(map[types.TipSetKey]*types.Actor), + } +} + +func (m *MockAPI) ChainHasObj(ctx context.Context, c cid.Cid) (bool, error) { + return m.bs.Has(c) +} + +func (m *MockAPI) ChainReadObj(ctx context.Context, c cid.Cid) ([]byte, error) { + blk, err := m.bs.Get(c) + if err != nil { + return nil, xerrors.Errorf("blockstore get: %w", err) + } + + return blk.RawData(), nil +} + +func (m *MockAPI) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) { + m.lk.Lock() + defer m.lk.Unlock() + + m.stateGetActorCalled++ + return m.ts[tsk], nil +} + +func (m *MockAPI) StateGetActorCallCount() int { + m.lk.Lock() + defer m.lk.Unlock() + + return m.stateGetActorCalled +} + +func (m *MockAPI) ResetCallCounts() { + m.lk.Lock() + defer m.lk.Unlock() + + m.stateGetActorCalled = 0 +} + +func (m *MockAPI) SetActor(tsk types.TipSetKey, act *types.Actor) { + m.lk.Lock() + defer m.lk.Unlock() + + m.ts[tsk] = act +} diff --git a/chain/events/state/mock/state.go b/chain/events/state/mock/state.go new file mode 100644 index 00000000000..bac06b59fcb --- /dev/null +++ b/chain/events/state/mock/state.go @@ -0,0 +1,32 @@ +package test + +import ( + "context" + "testing" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" + "github.com/filecoin-project/specs-actors/v2/actors/util/adt" + "github.com/stretchr/testify/require" +) + +func CreateEmptyMarketState(t *testing.T, store adt.Store) *market.State { + emptyArrayCid, err := adt.MakeEmptyArray(store).Root() + require.NoError(t, err) + emptyMap, err := adt.MakeEmptyMap(store).Root() + require.NoError(t, err) + return market.ConstructState(emptyArrayCid, emptyMap, emptyMap) +} + +func CreateDealAMT(ctx context.Context, t *testing.T, store adt.Store, deals map[abi.DealID]*market.DealState) cid.Cid { + root := adt.MakeEmptyArray(store) + for dealID, dealState := range deals { + err := root.Set(uint64(dealID), dealState) + require.NoError(t, err) + } + rootCid, err := root.Root() + require.NoError(t, err) + return rootCid +} diff --git a/chain/events/state/mock/tipset.go b/chain/events/state/mock/tipset.go new file mode 100644 index 00000000000..39d42d6e54d --- /dev/null +++ b/chain/events/state/mock/tipset.go @@ -0,0 +1,27 @@ +package test + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/lotus/chain/types" + "github.com/ipfs/go-cid" +) + +var dummyCid cid.Cid + +func init() { + dummyCid, _ = cid.Parse("bafkqaaa") +} + +func MockTipset(minerAddr address.Address, timestamp uint64) (*types.TipSet, error) { + return types.NewTipSet([]*types.BlockHeader{{ + Miner: minerAddr, + Height: 5, + ParentStateRoot: dummyCid, + Messages: dummyCid, + ParentMessageReceipts: dummyCid, + BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, + BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, + Timestamp: timestamp, + }}) +} diff --git a/chain/events/state/predicates.go b/chain/events/state/predicates.go index 99b8480dc52..33f49628978 100644 --- a/chain/events/state/predicates.go +++ b/chain/events/state/predicates.go @@ -1,18 +1,17 @@ package state import ( - "bytes" "context" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" cbor "github.com/ipfs/go-ipld-cbor" - typegen "github.com/whyrusleeping/cbor-gen" - "github.com/filecoin-project/lotus/api/apibstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/actors/adt" init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" "github.com/filecoin-project/lotus/chain/actors/builtin/market" @@ -25,7 +24,7 @@ type UserData interface{} // ChainAPI abstracts out calls made by this class to external APIs type ChainAPI interface { - apibstore.ChainIO + api.ChainIO StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) } @@ -38,7 +37,7 @@ type StatePredicates struct { func NewStatePredicates(api ChainAPI) *StatePredicates { return &StatePredicates{ api: api, - cst: cbor.NewCborStore(apibstore.NewAPIBlockstore(api)), + cst: cbor.NewCborStore(blockstore.NewAPIBlockstore(api)), } } @@ -419,179 +418,17 @@ type AddressPair struct { PK address.Address } -type InitActorAddressChanges struct { - Added []AddressPair - Modified []AddressChange - Removed []AddressPair -} - -type AddressChange struct { - From AddressPair - To AddressPair -} - type DiffInitActorStateFunc func(ctx context.Context, oldState init_.State, newState init_.State) (changed bool, user UserData, err error) -func (i *InitActorAddressChanges) AsKey(key string) (abi.Keyer, error) { - addr, err := address.NewFromBytes([]byte(key)) - if err != nil { - return nil, err - } - return abi.AddrKey(addr), nil -} - -func (i *InitActorAddressChanges) Add(key string, val *typegen.Deferred) error { - pkAddr, err := address.NewFromBytes([]byte(key)) - if err != nil { - return err - } - id := new(typegen.CborInt) - if err := id.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { - return err - } - idAddr, err := address.NewIDAddress(uint64(*id)) - if err != nil { - return err - } - i.Added = append(i.Added, AddressPair{ - ID: idAddr, - PK: pkAddr, - }) - return nil -} - -func (i *InitActorAddressChanges) Modify(key string, from, to *typegen.Deferred) error { - pkAddr, err := address.NewFromBytes([]byte(key)) - if err != nil { - return err - } - - fromID := new(typegen.CborInt) - if err := fromID.UnmarshalCBOR(bytes.NewReader(from.Raw)); err != nil { - return err - } - fromIDAddr, err := address.NewIDAddress(uint64(*fromID)) - if err != nil { - return err - } - - toID := new(typegen.CborInt) - if err := toID.UnmarshalCBOR(bytes.NewReader(to.Raw)); err != nil { - return err - } - toIDAddr, err := address.NewIDAddress(uint64(*toID)) - if err != nil { - return err - } - - i.Modified = append(i.Modified, AddressChange{ - From: AddressPair{ - ID: fromIDAddr, - PK: pkAddr, - }, - To: AddressPair{ - ID: toIDAddr, - PK: pkAddr, - }, - }) - return nil -} - -func (i *InitActorAddressChanges) Remove(key string, val *typegen.Deferred) error { - pkAddr, err := address.NewFromBytes([]byte(key)) - if err != nil { - return err - } - id := new(typegen.CborInt) - if err := id.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { - return err - } - idAddr, err := address.NewIDAddress(uint64(*id)) - if err != nil { - return err - } - i.Removed = append(i.Removed, AddressPair{ - ID: idAddr, - PK: pkAddr, - }) - return nil -} - func (sp *StatePredicates) OnAddressMapChange() DiffInitActorStateFunc { return func(ctx context.Context, oldState, newState init_.State) (changed bool, user UserData, err error) { - addressChanges := &InitActorAddressChanges{ - Added: []AddressPair{}, - Modified: []AddressChange{}, - Removed: []AddressPair{}, - } - - err = oldState.ForEachActor(func(oldId abi.ActorID, oldAddress address.Address) error { - oldIdAddress, err := address.NewIDAddress(uint64(oldId)) - if err != nil { - return err - } - - newIdAddress, found, err := newState.ResolveAddress(oldAddress) - if err != nil { - return err - } - - if !found { - addressChanges.Removed = append(addressChanges.Removed, AddressPair{ - ID: oldIdAddress, - PK: oldAddress, - }) - } - - if oldIdAddress != newIdAddress { - addressChanges.Modified = append(addressChanges.Modified, AddressChange{ - From: AddressPair{ - ID: oldIdAddress, - PK: oldAddress, - }, - To: AddressPair{ - ID: newIdAddress, - PK: oldAddress, - }, - }) - } - - return nil - }) - + addressChanges, err := init_.DiffAddressMap(oldState, newState) if err != nil { return false, nil, err } - - err = newState.ForEachActor(func(newId abi.ActorID, newAddress address.Address) error { - newIdAddress, err := address.NewIDAddress(uint64(newId)) - if err != nil { - return err - } - - _, found, err := newState.ResolveAddress(newAddress) - if err != nil { - return err - } - - if !found { - addressChanges.Added = append(addressChanges.Added, AddressPair{ - ID: newIdAddress, - PK: newAddress, - }) - } - - return nil - }) - - if err != nil { - return false, nil, err - } - - if len(addressChanges.Added)+len(addressChanges.Removed)+len(addressChanges.Modified) == 0 { + if len(addressChanges.Added)+len(addressChanges.Modified)+len(addressChanges.Removed) == 0 { return false, nil, nil } - return true, addressChanges, nil } } diff --git a/chain/events/state/predicates_test.go b/chain/events/state/predicates_test.go index 9b393f6e4ba..8af3bb6a0b9 100644 --- a/chain/events/state/predicates_test.go +++ b/chain/events/state/predicates_test.go @@ -4,30 +4,28 @@ import ( "context" "testing" + test "github.com/filecoin-project/lotus/chain/events/state/mock" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/go-bitfield" - "github.com/stretchr/testify/require" - "golang.org/x/xerrors" - "github.com/ipfs/go-cid" cbornode "github.com/ipfs/go-ipld-cbor" + "github.com/stretchr/testify/require" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/crypto" - builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt" tutils "github.com/filecoin-project/specs-actors/v2/support/testing" + bstore "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/types" - bstore "github.com/filecoin-project/lotus/lib/blockstore" ) var dummyCid cid.Cid @@ -36,42 +34,9 @@ func init() { dummyCid, _ = cid.Parse("bafkqaaa") } -type mockAPI struct { - ts map[types.TipSetKey]*types.Actor - bs bstore.Blockstore -} - -func newMockAPI(bs bstore.Blockstore) *mockAPI { - return &mockAPI{ - bs: bs, - ts: make(map[types.TipSetKey]*types.Actor), - } -} - -func (m mockAPI) ChainHasObj(ctx context.Context, c cid.Cid) (bool, error) { - return m.bs.Has(c) -} - -func (m mockAPI) ChainReadObj(ctx context.Context, c cid.Cid) ([]byte, error) { - blk, err := m.bs.Get(c) - if err != nil { - return nil, xerrors.Errorf("blockstore get: %w", err) - } - - return blk.RawData(), nil -} - -func (m mockAPI) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) { - return m.ts[tsk], nil -} - -func (m mockAPI) setActor(tsk types.TipSetKey, act *types.Actor) { - m.ts[tsk] = act -} - func TestMarketPredicates(t *testing.T) { ctx := context.Background() - bs := bstore.NewTemporarySync() + bs := bstore.NewMemorySync() store := adt2.WrapStore(ctx, cbornode.NewCborStore(bs)) oldDeal1 := &market2.DealState{ @@ -177,14 +142,14 @@ func TestMarketPredicates(t *testing.T) { minerAddr, err := address.NewFromString("t00") require.NoError(t, err) - oldState, err := mockTipset(minerAddr, 1) + oldState, err := test.MockTipset(minerAddr, 1) require.NoError(t, err) - newState, err := mockTipset(minerAddr, 2) + newState, err := test.MockTipset(minerAddr, 2) require.NoError(t, err) - api := newMockAPI(bs) - api.setActor(oldState.Key(), &types.Actor{Code: builtin2.StorageMarketActorCodeID, Head: oldStateC}) - api.setActor(newState.Key(), &types.Actor{Code: builtin2.StorageMarketActorCodeID, Head: newStateC}) + api := test.NewMockAPI(bs) + api.SetActor(oldState.Key(), &types.Actor{Code: builtin2.StorageMarketActorCodeID, Head: oldStateC}) + api.SetActor(newState.Key(), &types.Actor{Code: builtin2.StorageMarketActorCodeID, Head: newStateC}) t.Run("deal ID predicate", func(t *testing.T) { preds := NewStatePredicates(api) @@ -239,7 +204,7 @@ func TestMarketPredicates(t *testing.T) { t.Fatal("No state change so this should not be called") return false, nil, nil }) - marketState0 := createEmptyMarketState(t, store) + marketState0 := test.CreateEmptyMarketState(t, store) marketCid, err := store.Put(ctx, marketState0) require.NoError(t, err) marketState, err := market.Load(store, &types.Actor{ @@ -352,7 +317,7 @@ func TestMarketPredicates(t *testing.T) { t.Fatal("No state change so this should not be called") return false, nil, nil }) - marketState0 := createEmptyMarketState(t, store) + marketState0 := test.CreateEmptyMarketState(t, store) marketCid, err := store.Put(ctx, marketState0) require.NoError(t, err) marketState, err := market.Load(store, &types.Actor{ @@ -369,7 +334,7 @@ func TestMarketPredicates(t *testing.T) { func TestMinerSectorChange(t *testing.T) { ctx := context.Background() - bs := bstore.NewTemporarySync() + bs := bstore.NewMemorySync() store := adt2.WrapStore(ctx, cbornode.NewCborStore(bs)) nextID := uint64(0) @@ -394,14 +359,14 @@ func TestMinerSectorChange(t *testing.T) { newMinerC := createMinerState(ctx, t, store, owner, worker, []miner.SectorOnChainInfo{si1Ext, si2, si3}) minerAddr := nextIDAddrF() - oldState, err := mockTipset(minerAddr, 1) + oldState, err := test.MockTipset(minerAddr, 1) require.NoError(t, err) - newState, err := mockTipset(minerAddr, 2) + newState, err := test.MockTipset(minerAddr, 2) require.NoError(t, err) - api := newMockAPI(bs) - api.setActor(oldState.Key(), &types.Actor{Head: oldMinerC, Code: builtin2.StorageMinerActorCodeID}) - api.setActor(newState.Key(), &types.Actor{Head: newMinerC, Code: builtin2.StorageMinerActorCodeID}) + api := test.NewMockAPI(bs) + api.SetActor(oldState.Key(), &types.Actor{Head: oldMinerC, Code: builtin2.StorageMinerActorCodeID}) + api.SetActor(newState.Key(), &types.Actor{Head: newMinerC, Code: builtin2.StorageMinerActorCodeID}) preds := NewStatePredicates(api) @@ -449,29 +414,16 @@ func TestMinerSectorChange(t *testing.T) { require.Equal(t, si1Ext, sectorChanges.Extended[0].From) } -func mockTipset(minerAddr address.Address, timestamp uint64) (*types.TipSet, error) { - return types.NewTipSet([]*types.BlockHeader{{ - Miner: minerAddr, - Height: 5, - ParentStateRoot: dummyCid, - Messages: dummyCid, - ParentMessageReceipts: dummyCid, - BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, - BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, - Timestamp: timestamp, - }}) -} - type balance struct { available abi.TokenAmount locked abi.TokenAmount } func createMarketState(ctx context.Context, t *testing.T, store adt2.Store, deals map[abi.DealID]*market2.DealState, props map[abi.DealID]*market2.DealProposal, balances map[address.Address]balance) cid.Cid { - dealRootCid := createDealAMT(ctx, t, store, deals) + dealRootCid := test.CreateDealAMT(ctx, t, store, deals) propRootCid := createProposalAMT(ctx, t, store, props) balancesCids := createBalanceTable(ctx, t, store, balances) - state := createEmptyMarketState(t, store) + state := test.CreateEmptyMarketState(t, store) state.States = dealRootCid state.Proposals = propRootCid state.EscrowTable = balancesCids[0] @@ -482,25 +434,6 @@ func createMarketState(ctx context.Context, t *testing.T, store adt2.Store, deal return stateC } -func createEmptyMarketState(t *testing.T, store adt2.Store) *market2.State { - emptyArrayCid, err := adt2.MakeEmptyArray(store).Root() - require.NoError(t, err) - emptyMap, err := adt2.MakeEmptyMap(store).Root() - require.NoError(t, err) - return market2.ConstructState(emptyArrayCid, emptyMap, emptyMap) -} - -func createDealAMT(ctx context.Context, t *testing.T, store adt2.Store, deals map[abi.DealID]*market2.DealState) cid.Cid { - root := adt2.MakeEmptyArray(store) - for dealID, dealState := range deals { - err := root.Set(uint64(dealID), dealState) - require.NoError(t, err) - } - rootCid, err := root.Root() - require.NoError(t, err) - return rootCid -} - func createProposalAMT(ctx context.Context, t *testing.T, store adt2.Store, props map[abi.DealID]*market2.DealProposal) cid.Cid { root := adt2.MakeEmptyArray(store) for dealID, prop := range props { diff --git a/chain/events/utils.go b/chain/events/utils.go index e50dbc6feeb..91ea0cd7a07 100644 --- a/chain/events/utils.go +++ b/chain/events/utils.go @@ -3,6 +3,8 @@ package events import ( "context" + "github.com/filecoin-project/lotus/chain/stmgr" + "golang.org/x/xerrors" "github.com/filecoin-project/lotus/chain/types" @@ -22,23 +24,27 @@ func (me *messageEvents) CheckMsg(ctx context.Context, smsg types.ChainMsg, hnd return false, true, nil } - rec, err := me.cs.StateGetReceipt(ctx, smsg.VMMessage().Cid(), ts.Key()) + ml, err := me.cs.StateSearchMsg(me.ctx, ts.Key(), msg.Cid(), stmgr.LookbackNoLimit, true) if err != nil { return false, true, xerrors.Errorf("getting receipt in CheckMsg: %w", err) } - more, err = hnd(msg, rec, ts, ts.Height()) + if ml == nil { + more, err = hnd(msg, nil, ts, ts.Height()) + } else { + more, err = hnd(msg, &ml.Receipt, ts, ts.Height()) + } return true, more, err } } func (me *messageEvents) MatchMsg(inmsg *types.Message) MsgMatchFunc { - return func(msg *types.Message) (matchOnce bool, matched bool, err error) { + return func(msg *types.Message) (matched bool, err error) { if msg.From == inmsg.From && msg.Nonce == inmsg.Nonce && !inmsg.Equals(msg) { - return true, false, xerrors.Errorf("matching msg %s from %s, nonce %d: got duplicate origin/nonce msg %d", inmsg.Cid(), inmsg.From, inmsg.Nonce, msg.Nonce) + return false, xerrors.Errorf("matching msg %s from %s, nonce %d: got duplicate origin/nonce msg %d", inmsg.Cid(), inmsg.From, inmsg.Nonce, msg.Nonce) } - return true, inmsg.Equals(msg), nil + return inmsg.Equals(msg), nil } } diff --git a/chain/exchange/cbor_gen.go b/chain/exchange/cbor_gen.go index 29b2580813a..da5f7cbe25b 100644 --- a/chain/exchange/cbor_gen.go +++ b/chain/exchange/cbor_gen.go @@ -5,6 +5,7 @@ package exchange import ( "fmt" "io" + "sort" types "github.com/filecoin-project/lotus/chain/types" cid "github.com/ipfs/go-cid" @@ -13,6 +14,8 @@ import ( ) var _ = xerrors.Errorf +var _ = cid.Undef +var _ = sort.Sort var lengthBufRequest = []byte{131} diff --git a/chain/exchange/client.go b/chain/exchange/client.go index cb030bcf785..fa9ed2974cd 100644 --- a/chain/exchange/client.go +++ b/chain/exchange/client.go @@ -7,7 +7,6 @@ import ( "math/rand" "time" - "github.com/libp2p/go-libp2p-core/helpers" "github.com/libp2p/go-libp2p-core/host" "github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/peer" @@ -412,11 +411,7 @@ func (c *client) sendRequestToPeer(ctx context.Context, peer peer.ID, req *Reque return nil, xerrors.Errorf("failed to open stream to peer: %w", err) } - defer func() { - // Note: this will become just stream.Close once we've completed the go-libp2p migration to - // go-libp2p-core 0.7.0 - go helpers.FullClose(stream) //nolint:errcheck - }() + defer stream.Close() //nolint:errcheck // Write request. _ = stream.SetWriteDeadline(time.Now().Add(WriteReqDeadline)) diff --git a/chain/exchange/peer_tracker.go b/chain/exchange/peer_tracker.go index 902baadcee0..835a5b8a479 100644 --- a/chain/exchange/peer_tracker.go +++ b/chain/exchange/peer_tracker.go @@ -38,20 +38,26 @@ func newPeerTracker(lc fx.Lifecycle, h host.Host, pmgr *peermgr.PeerMgr) *bsPeer pmgr: pmgr, } - sub, err := h.EventBus().Subscribe(new(peermgr.NewFilPeer)) + evtSub, err := h.EventBus().Subscribe(new(peermgr.FilPeerEvt)) if err != nil { panic(err) } go func() { - for newPeer := range sub.Out() { - bsPt.addPeer(newPeer.(peermgr.NewFilPeer).Id) + for evt := range evtSub.Out() { + pEvt := evt.(peermgr.FilPeerEvt) + switch pEvt.Type { + case peermgr.AddFilPeerEvt: + bsPt.addPeer(pEvt.ID) + case peermgr.RemoveFilPeerEvt: + bsPt.removePeer(pEvt.ID) + } } }() lc.Append(fx.Hook{ OnStop: func(ctx context.Context) error { - return sub.Close() + return evtSub.Close() }, }) diff --git a/chain/exchange/protocol.go b/chain/exchange/protocol.go index 2114793359f..d0977e54c79 100644 --- a/chain/exchange/protocol.go +++ b/chain/exchange/protocol.go @@ -7,7 +7,7 @@ import ( "github.com/filecoin-project/lotus/chain/store" "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" "golang.org/x/xerrors" "github.com/filecoin-project/lotus/chain/types" diff --git a/chain/exchange/server.go b/chain/exchange/server.go index dcdb5b3a5bf..7c1624e579a 100644 --- a/chain/exchange/server.go +++ b/chain/exchange/server.go @@ -15,7 +15,6 @@ import ( "github.com/filecoin-project/lotus/chain/types" "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p-core/helpers" inet "github.com/libp2p/go-libp2p-core/network" ) @@ -40,16 +39,14 @@ func (s *server) HandleStream(stream inet.Stream) { ctx, span := trace.StartSpan(context.Background(), "chainxchg.HandleStream") defer span.End() - // Note: this will become just stream.Close once we've completed the go-libp2p migration to - // go-libp2p-core 0.7.0 - defer helpers.FullClose(stream) //nolint:errcheck + defer stream.Close() //nolint:errcheck var req Request if err := cborutil.ReadCborRPC(bufio.NewReader(stream), &req); err != nil { log.Warnf("failed to read block sync request: %s", err) return } - log.Infow("block sync request", + log.Debugw("block sync request", "start", req.Head, "len", req.Length) resp, err := s.processRequest(ctx, &req) @@ -59,7 +56,11 @@ func (s *server) HandleStream(stream inet.Stream) { } _ = stream.SetDeadline(time.Now().Add(WriteResDeadline)) - if err := cborutil.WriteCborRPC(stream, resp); err != nil { + buffered := bufio.NewWriter(stream) + if err = cborutil.WriteCborRPC(buffered, resp); err == nil { + err = buffered.Flush() + } + if err != nil { _ = stream.SetDeadline(time.Time{}) log.Warnw("failed to write back response for handle stream", "err", err, "peer", stream.Conn().RemotePeer()) diff --git a/chain/gen/gen.go b/chain/gen/gen.go index d56f285a019..424ee6edcb6 100644 --- a/chain/gen/gen.go +++ b/chain/gen/gen.go @@ -4,16 +4,18 @@ import ( "bytes" "context" "fmt" + "io" "io/ioutil" "sync/atomic" "time" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" "github.com/google/uuid" - block "github.com/ipfs/go-block-format" "github.com/ipfs/go-blockservice" "github.com/ipfs/go-cid" offline "github.com/ipfs/go-ipfs-exchange-offline" @@ -24,9 +26,10 @@ import ( "go.opencensus.io/trace" "golang.org/x/xerrors" - proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/beacon" @@ -40,7 +43,6 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/genesis" "github.com/filecoin-project/lotus/journal" - "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/lib/sigs" "github.com/filecoin-project/lotus/node/repo" ) @@ -50,7 +52,7 @@ const msgsPerBlock = 20 //nolint:deadcode,varcheck var log = logging.Logger("gen") -var ValidWpostForTesting = []proof2.PoStProof{{ +var ValidWpostForTesting = []proof5.PoStProof{{ ProofBytes: []byte("valid proof"), }} @@ -74,9 +76,10 @@ type ChainGen struct { w *wallet.LocalWallet - eppProvs map[address.Address]WinningPoStProver - Miners []address.Address - receivers []address.Address + eppProvs map[address.Address]WinningPoStProver + Miners []address.Address + receivers []address.Address + // a SecP address banker address.Address bankerNonce uint64 @@ -84,19 +87,6 @@ type ChainGen struct { lr repo.LockedRepo } -type mybs struct { - blockstore.Blockstore -} - -func (m mybs) Get(c cid.Cid) (block.Block, error) { - b, err := m.Blockstore.Get(c) - if err != nil { - return nil, err - } - - return b, nil -} - var rootkeyMultisig = genesis.MultisigMeta{ Signers: []address.Address{remAccTestKey}, Threshold: 1, @@ -122,7 +112,7 @@ var DefaultRemainderAccountActor = genesis.Actor{ Meta: remAccMeta.ActorMeta(), } -func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) { +func NewGeneratorWithSectorsAndUpgradeSchedule(numSectors int, us stmgr.UpgradeSchedule) (*ChainGen, error) { j := journal.NilJournal() // TODO: we really shouldn't modify a global variable here. policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) @@ -133,17 +123,23 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) { return nil, xerrors.Errorf("taking mem-repo lock failed: %w", err) } - ds, err := lr.Datastore("/metadata") + ds, err := lr.Datastore(context.TODO(), "/metadata") if err != nil { return nil, xerrors.Errorf("failed to get metadata datastore: %w", err) } - bds, err := lr.Datastore("/chain") + bs, err := lr.Blockstore(context.TODO(), repo.UniversalBlockstore) if err != nil { - return nil, xerrors.Errorf("failed to get blocks datastore: %w", err) + return nil, err } - bs := mybs{blockstore.NewBlockstore(bds)} + defer func() { + if c, ok := bs.(io.Closer); ok { + if err := c.Close(); err != nil { + log.Warnf("failed to close blockstore: %s", err) + } + } + }() ks, err := lr.KeyStore() if err != nil { @@ -204,6 +200,7 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) { sys := vm.Syscalls(&genFakeVerifier{}) tpl := genesis.Template{ + NetworkVersion: network.Version0, Accounts: []genesis.Actor{ { Type: genesis.TAccount, @@ -236,7 +233,7 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) { return nil, xerrors.Errorf("make genesis block failed: %w", err) } - cs := store.NewChainStore(bs, ds, sys, j) + cs := store.NewChainStore(bs, bs, ds, sys, j) genfb := &types.FullBlock{Header: genb.Genesis} gents := store.NewFullTipSet([]*types.FullBlock{genfb}) @@ -250,7 +247,10 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) { mgen[genesis2.MinerAddress(uint64(i))] = &wppProvider{} } - sm := stmgr.NewStateManager(cs) + sm, err := stmgr.NewStateManagerWithUpgradeSchedule(cs, us) + if err != nil { + return nil, xerrors.Errorf("initing stmgr: %w", err) + } miners := []address.Address{maddr1, maddr2} @@ -288,6 +288,14 @@ func NewGenerator() (*ChainGen, error) { return NewGeneratorWithSectors(1) } +func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) { + return NewGeneratorWithSectorsAndUpgradeSchedule(numSectors, stmgr.DefaultUpgradeSchedule()) +} + +func NewGeneratorWithUpgradeSchedule(us stmgr.UpgradeSchedule) (*ChainGen, error) { + return NewGeneratorWithSectorsAndUpgradeSchedule(1, us) +} + func (cg *ChainGen) StateManager() *stmgr.StateManager { return cg.sm } @@ -338,14 +346,8 @@ func (cg *ChainGen) nextBlockProof(ctx context.Context, pts *types.TipSet, m add return nil, nil, nil, xerrors.Errorf("get miner base info: %w", err) } - prev := mbi.PrevBeaconEntry - - entries, err := beacon.BeaconEntriesForBlock(ctx, cg.beacon, round, pts.Height(), prev) - if err != nil { - return nil, nil, nil, xerrors.Errorf("get beacon entries for block: %w", err) - } - - rbase := prev + entries := mbi.BeaconEntries + rbase := mbi.PrevBeaconEntry if len(entries) > 0 { rbase = entries[len(entries)-1] } @@ -396,7 +398,7 @@ type MinedTipSet struct { } func (cg *ChainGen) NextTipSet() (*MinedTipSet, error) { - mts, err := cg.NextTipSetFromMiners(cg.CurTipset.TipSet(), cg.Miners) + mts, err := cg.NextTipSetFromMiners(cg.CurTipset.TipSet(), cg.Miners, 0) if err != nil { return nil, err } @@ -409,7 +411,7 @@ func (cg *ChainGen) SetWinningPoStProver(m address.Address, wpp WinningPoStProve cg.eppProvs[m] = wpp } -func (cg *ChainGen) NextTipSetFromMiners(base *types.TipSet, miners []address.Address) (*MinedTipSet, error) { +func (cg *ChainGen) NextTipSetFromMiners(base *types.TipSet, miners []address.Address, nulls abi.ChainEpoch) (*MinedTipSet, error) { ms, err := cg.GetMessages(cg) if err != nil { return nil, xerrors.Errorf("get random messages: %w", err) @@ -420,21 +422,23 @@ func (cg *ChainGen) NextTipSetFromMiners(base *types.TipSet, miners []address.Ad msgs[i] = ms } - fts, err := cg.NextTipSetFromMinersWithMessages(base, miners, msgs) + fts, err := cg.NextTipSetFromMinersWithMessagesAndNulls(base, miners, msgs, nulls) if err != nil { return nil, err } + cg.CurTipset = fts + return &MinedTipSet{ TipSet: fts, Messages: ms, }, nil } -func (cg *ChainGen) NextTipSetFromMinersWithMessages(base *types.TipSet, miners []address.Address, msgs [][]*types.SignedMessage) (*store.FullTipSet, error) { +func (cg *ChainGen) NextTipSetFromMinersWithMessagesAndNulls(base *types.TipSet, miners []address.Address, msgs [][]*types.SignedMessage, nulls abi.ChainEpoch) (*store.FullTipSet, error) { var blks []*types.FullBlock - for round := base.Height() + 1; len(blks) == 0; round++ { + for round := base.Height() + nulls + 1; len(blks) == 0; round++ { for mi, m := range miners { bvals, et, ticket, err := cg.nextBlockProof(context.TODO(), base, m, round) if err != nil { @@ -462,12 +466,19 @@ func (cg *ChainGen) NextTipSetFromMinersWithMessages(base *types.TipSet, miners } } - return store.NewFullTipSet(blks), nil + fts := store.NewFullTipSet(blks) + if err := cg.cs.PutTipSet(context.TODO(), fts.TipSet()); err != nil { + return nil, err + } + + cg.CurTipset = fts + + return fts, nil } func (cg *ChainGen) makeBlock(parents *types.TipSet, m address.Address, vrfticket *types.Ticket, eticket *types.ElectionProof, bvals []types.BeaconEntry, height abi.ChainEpoch, - wpost []proof2.PoStProof, msgs []*types.SignedMessage) (*types.FullBlock, error) { + wpost []proof5.PoStProof, msgs []*types.SignedMessage) (*types.FullBlock, error) { var ts uint64 if cg.Timestamper != nil { @@ -581,7 +592,11 @@ func (mca mca) ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipS return nil, xerrors.Errorf("loading tipset key: %w", err) } - return mca.sm.ChainStore().GetChainRandomness(ctx, pts.Cids(), personalization, randEpoch, entropy) + if randEpoch > build.UpgradeHyperdriveHeight { + return mca.sm.ChainStore().GetChainRandomnessLookingForward(ctx, pts.Cids(), personalization, randEpoch, entropy) + } + + return mca.sm.ChainStore().GetChainRandomnessLookingBack(ctx, pts.Cids(), personalization, randEpoch, entropy) } func (mca mca) ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { @@ -590,7 +605,11 @@ func (mca mca) ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSe return nil, xerrors.Errorf("loading tipset key: %w", err) } - return mca.sm.ChainStore().GetBeaconRandomness(ctx, pts.Cids(), personalization, randEpoch, entropy) + if randEpoch > build.UpgradeHyperdriveHeight { + return mca.sm.ChainStore().GetBeaconRandomnessLookingForward(ctx, pts.Cids(), personalization, randEpoch, entropy) + } + + return mca.sm.ChainStore().GetBeaconRandomnessLookingBack(ctx, pts.Cids(), personalization, randEpoch, entropy) } func (mca mca) MinerGetBaseInfo(ctx context.Context, maddr address.Address, epoch abi.ChainEpoch, tsk types.TipSetKey) (*api.MiningBaseInfo, error) { @@ -605,7 +624,7 @@ func (mca mca) WalletSign(ctx context.Context, a address.Address, v []byte) (*cr type WinningPoStProver interface { GenerateCandidates(context.Context, abi.PoStRandomness, uint64) ([]uint64, error) - ComputeProof(context.Context, []proof2.SectorInfo, abi.PoStRandomness) ([]proof2.PoStProof, error) + ComputeProof(context.Context, []proof5.SectorInfo, abi.PoStRandomness) ([]proof5.PoStProof, error) } type wppProvider struct{} @@ -614,7 +633,7 @@ func (wpp *wppProvider) GenerateCandidates(ctx context.Context, _ abi.PoStRandom return []uint64{0}, nil } -func (wpp *wppProvider) ComputeProof(context.Context, []proof2.SectorInfo, abi.PoStRandomness) ([]proof2.PoStProof, error) { +func (wpp *wppProvider) ComputeProof(context.Context, []proof5.SectorInfo, abi.PoStRandomness) ([]proof5.PoStProof, error) { return ValidWpostForTesting, nil } @@ -681,15 +700,19 @@ type genFakeVerifier struct{} var _ ffiwrapper.Verifier = (*genFakeVerifier)(nil) -func (m genFakeVerifier) VerifySeal(svi proof2.SealVerifyInfo) (bool, error) { +func (m genFakeVerifier) VerifySeal(svi proof5.SealVerifyInfo) (bool, error) { return true, nil } -func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) { +func (m genFakeVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) { + panic("not supported") +} + +func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) { panic("not supported") } -func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStVerifyInfo) (bool, error) { +func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) { panic("not supported") } diff --git a/chain/gen/genesis/f00_system.go b/chain/gen/genesis/f00_system.go new file mode 100644 index 00000000000..4fde2710745 --- /dev/null +++ b/chain/gen/genesis/f00_system.go @@ -0,0 +1,42 @@ +package genesis + +import ( + "context" + + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin/system" + + cbor "github.com/ipfs/go-ipld-cbor" + + bstore "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/types" +) + +func SetupSystemActor(ctx context.Context, bs bstore.Blockstore, av actors.Version) (*types.Actor, error) { + + cst := cbor.NewCborStore(bs) + st, err := system.MakeState(adt.WrapStore(ctx, cst), av) + if err != nil { + return nil, err + } + + statecid, err := cst.Put(ctx, st.GetState()) + if err != nil { + return nil, err + } + + actcid, err := system.GetActorCodeID(av) + if err != nil { + return nil, err + } + + act := &types.Actor{ + Code: actcid, + Head: statecid, + Balance: big.Zero(), + } + + return act, nil +} diff --git a/chain/gen/genesis/t01_init.go b/chain/gen/genesis/f01_init.go similarity index 61% rename from chain/gen/genesis/t01_init.go rename to chain/gen/genesis/f01_init.go index 667079a6db6..61ec917036a 100644 --- a/chain/gen/genesis/t01_init.go +++ b/chain/gen/genesis/f01_init.go @@ -5,33 +5,44 @@ import ( "encoding/json" "fmt" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/specs-actors/actors/builtin" "github.com/filecoin-project/specs-actors/actors/util/adt" - init_ "github.com/filecoin-project/specs-actors/actors/builtin/init" cbor "github.com/ipfs/go-ipld-cbor" cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" + bstore "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/genesis" - bstore "github.com/filecoin-project/lotus/lib/blockstore" ) -func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesis.Actor, rootVerifier genesis.Actor) (int64, *types.Actor, map[address.Address]address.Address, error) { +func SetupInitActor(ctx context.Context, bs bstore.Blockstore, netname string, initialActors []genesis.Actor, rootVerifier genesis.Actor, remainder genesis.Actor, av actors.Version) (int64, *types.Actor, map[address.Address]address.Address, error) { if len(initialActors) > MaxAccounts { return 0, nil, nil, xerrors.New("too many initial actors") } - var ias init_.State - ias.NextID = MinerStart - ias.NetworkName = netname + cst := cbor.NewCborStore(bs) + ist, err := init_.MakeState(adt.WrapStore(ctx, cst), av, netname) + if err != nil { + return 0, nil, nil, err + } + + if err = ist.SetNextID(MinerStart); err != nil { + return 0, nil, nil, err + } - store := adt.WrapStore(context.TODO(), cbor.NewCborStore(bs)) - amap := adt.MakeEmptyMap(store) + amap, err := ist.AddressMap() + if err != nil { + return 0, nil, nil, err + } keyToId := map[address.Address]address.Address{} counter := int64(AccountStart) @@ -90,19 +101,10 @@ func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesi } } - if rootVerifier.Type == genesis.TAccount { - var ainfo genesis.AccountMeta - if err := json.Unmarshal(rootVerifier.Meta, &ainfo); err != nil { - return 0, nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err) - } - value := cbg.CborInt(80) - if err := amap.Put(abi.AddrKey(ainfo.Owner), &value); err != nil { - return 0, nil, nil, err - } - } else if rootVerifier.Type == genesis.TMultisig { + setupMsig := func(meta json.RawMessage) error { var ainfo genesis.MultisigMeta - if err := json.Unmarshal(rootVerifier.Meta, &ainfo); err != nil { - return 0, nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err) + if err := json.Unmarshal(meta, &ainfo); err != nil { + return xerrors.Errorf("unmarshaling account meta: %w", err) } for _, e := range ainfo.Signers { if _, ok := keyToId[e]; ok { @@ -112,32 +114,77 @@ func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesi value := cbg.CborInt(counter) if err := amap.Put(abi.AddrKey(e), &value); err != nil { - return 0, nil, nil, err + return err } counter = counter + 1 var err error keyToId[e], err = address.NewIDAddress(uint64(value)) if err != nil { - return 0, nil, nil, err + return err } } + + return nil + } + + if rootVerifier.Type == genesis.TAccount { + var ainfo genesis.AccountMeta + if err := json.Unmarshal(rootVerifier.Meta, &ainfo); err != nil { + return 0, nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err) + } + value := cbg.CborInt(80) + if err := amap.Put(abi.AddrKey(ainfo.Owner), &value); err != nil { + return 0, nil, nil, err + } + } else if rootVerifier.Type == genesis.TMultisig { + err := setupMsig(rootVerifier.Meta) + if err != nil { + return 0, nil, nil, xerrors.Errorf("setting up root verifier msig: %w", err) + } + } + + if remainder.Type == genesis.TAccount { + var ainfo genesis.AccountMeta + if err := json.Unmarshal(remainder.Meta, &ainfo); err != nil { + return 0, nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err) + } + + // TODO: Use builtin.ReserveAddress... + value := cbg.CborInt(90) + if err := amap.Put(abi.AddrKey(ainfo.Owner), &value); err != nil { + return 0, nil, nil, err + } + } else if remainder.Type == genesis.TMultisig { + err := setupMsig(remainder.Meta) + if err != nil { + return 0, nil, nil, xerrors.Errorf("setting up remainder msig: %w", err) + } } amapaddr, err := amap.Root() if err != nil { return 0, nil, nil, err } - ias.AddressMap = amapaddr - statecid, err := store.Put(store.Context(), &ias) + if err = ist.SetAddressMap(amapaddr); err != nil { + return 0, nil, nil, err + } + + statecid, err := cst.Put(ctx, ist.GetState()) + if err != nil { + return 0, nil, nil, err + } + + actcid, err := init_.GetActorCodeID(av) if err != nil { return 0, nil, nil, err } act := &types.Actor{ - Code: builtin.InitActorCodeID, - Head: statecid, + Code: actcid, + Head: statecid, + Balance: big.Zero(), } return counter, act, keyToId, nil diff --git a/chain/gen/genesis/f02_reward.go b/chain/gen/genesis/f02_reward.go new file mode 100644 index 00000000000..c8f479722f1 --- /dev/null +++ b/chain/gen/genesis/f02_reward.go @@ -0,0 +1,43 @@ +package genesis + +import ( + "context" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin/reward" + + "github.com/filecoin-project/go-state-types/big" + + cbor "github.com/ipfs/go-ipld-cbor" + + bstore "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" +) + +func SetupRewardActor(ctx context.Context, bs bstore.Blockstore, qaPower big.Int, av actors.Version) (*types.Actor, error) { + cst := cbor.NewCborStore(bs) + rst, err := reward.MakeState(adt.WrapStore(ctx, cst), av, qaPower) + if err != nil { + return nil, err + } + + statecid, err := cst.Put(ctx, rst.GetState()) + if err != nil { + return nil, err + } + + actcid, err := reward.GetActorCodeID(av) + if err != nil { + return nil, err + } + + act := &types.Actor{ + Code: actcid, + Balance: types.BigInt{Int: build.InitialRewardBalance}, + Head: statecid, + } + + return act, nil +} diff --git a/chain/gen/genesis/f03_cron.go b/chain/gen/genesis/f03_cron.go new file mode 100644 index 00000000000..c9dd0d34117 --- /dev/null +++ b/chain/gen/genesis/f03_cron.go @@ -0,0 +1,41 @@ +package genesis + +import ( + "context" + + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin/cron" + + cbor "github.com/ipfs/go-ipld-cbor" + + bstore "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/types" +) + +func SetupCronActor(ctx context.Context, bs bstore.Blockstore, av actors.Version) (*types.Actor, error) { + cst := cbor.NewCborStore(bs) + st, err := cron.MakeState(adt.WrapStore(ctx, cbor.NewCborStore(bs)), av) + if err != nil { + return nil, err + } + + statecid, err := cst.Put(ctx, st.GetState()) + if err != nil { + return nil, err + } + + actcid, err := cron.GetActorCodeID(av) + if err != nil { + return nil, err + } + + act := &types.Actor{ + Code: actcid, + Head: statecid, + Balance: big.Zero(), + } + + return act, nil +} diff --git a/chain/gen/genesis/f04_power.go b/chain/gen/genesis/f04_power.go new file mode 100644 index 00000000000..b5e08cebe5a --- /dev/null +++ b/chain/gen/genesis/f04_power.go @@ -0,0 +1,43 @@ +package genesis + +import ( + "context" + + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/specs-actors/actors/util/adt" + + cbor "github.com/ipfs/go-ipld-cbor" + + bstore "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/types" +) + +func SetupStoragePowerActor(ctx context.Context, bs bstore.Blockstore, av actors.Version) (*types.Actor, error) { + + cst := cbor.NewCborStore(bs) + pst, err := power.MakeState(adt.WrapStore(ctx, cbor.NewCborStore(bs)), av) + if err != nil { + return nil, err + } + + statecid, err := cst.Put(ctx, pst.GetState()) + if err != nil { + return nil, err + } + + actcid, err := power.GetActorCodeID(av) + if err != nil { + return nil, err + } + + act := &types.Actor{ + Code: actcid, + Head: statecid, + Balance: big.Zero(), + } + + return act, nil +} diff --git a/chain/gen/genesis/f05_market.go b/chain/gen/genesis/f05_market.go new file mode 100644 index 00000000000..ac32294c9f9 --- /dev/null +++ b/chain/gen/genesis/f05_market.go @@ -0,0 +1,41 @@ +package genesis + +import ( + "context" + + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + + cbor "github.com/ipfs/go-ipld-cbor" + + bstore "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/types" +) + +func SetupStorageMarketActor(ctx context.Context, bs bstore.Blockstore, av actors.Version) (*types.Actor, error) { + cst := cbor.NewCborStore(bs) + mst, err := market.MakeState(adt.WrapStore(ctx, cbor.NewCborStore(bs)), av) + if err != nil { + return nil, err + } + + statecid, err := cst.Put(ctx, mst.GetState()) + if err != nil { + return nil, err + } + + actcid, err := market.GetActorCodeID(av) + if err != nil { + return nil, err + } + + act := &types.Actor{ + Code: actcid, + Head: statecid, + Balance: big.Zero(), + } + + return act, nil +} diff --git a/chain/gen/genesis/f06_vreg.go b/chain/gen/genesis/f06_vreg.go new file mode 100644 index 00000000000..e61c951f50c --- /dev/null +++ b/chain/gen/genesis/f06_vreg.go @@ -0,0 +1,56 @@ +package genesis + +import ( + "context" + + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" + + "github.com/filecoin-project/lotus/chain/actors" + + "github.com/filecoin-project/go-address" + cbor "github.com/ipfs/go-ipld-cbor" + + "github.com/filecoin-project/specs-actors/actors/util/adt" + + bstore "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/types" +) + +var RootVerifierID address.Address + +func init() { + + idk, err := address.NewFromString("t080") + if err != nil { + panic(err) + } + + RootVerifierID = idk +} + +func SetupVerifiedRegistryActor(ctx context.Context, bs bstore.Blockstore, av actors.Version) (*types.Actor, error) { + cst := cbor.NewCborStore(bs) + vst, err := verifreg.MakeState(adt.WrapStore(ctx, cbor.NewCborStore(bs)), av, RootVerifierID) + if err != nil { + return nil, err + } + + statecid, err := cst.Put(ctx, vst.GetState()) + if err != nil { + return nil, err + } + + actcid, err := verifreg.GetActorCodeID(av) + if err != nil { + return nil, err + } + + act := &types.Actor{ + Code: actcid, + Head: statecid, + Balance: big.Zero(), + } + + return act, nil +} diff --git a/chain/gen/genesis/genesis.go b/chain/gen/genesis/genesis.go index 6a1090784d2..6dec3fea6d4 100644 --- a/chain/gen/genesis/genesis.go +++ b/chain/gen/genesis/genesis.go @@ -6,7 +6,34 @@ import ( "encoding/json" "fmt" + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" + adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" + + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/lotus/chain/actors/builtin/multisig" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin/account" + + "github.com/filecoin-project/lotus/chain/actors" + + "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" + + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + + "github.com/filecoin-project/lotus/chain/actors/builtin/cron" + + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/actors/builtin/reward" + + "github.com/filecoin-project/lotus/chain/actors/builtin/system" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/journal" "github.com/ipfs/go-cid" @@ -20,19 +47,14 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" - builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" - account0 "github.com/filecoin-project/specs-actors/actors/builtin/account" - multisig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" - verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" - adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" + bstore "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/genesis" - bstore "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/lib/sigs" ) @@ -117,94 +139,92 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge return nil, nil, xerrors.Errorf("putting empty object: %w", err) } - state, err := state.NewStateTree(cst, types.StateTreeVersion0) + sv, err := state.VersionForNetwork(template.NetworkVersion) + if err != nil { + return nil, nil, xerrors.Errorf("getting state tree version: %w", err) + } + + state, err := state.NewStateTree(cst, sv) if err != nil { return nil, nil, xerrors.Errorf("making new state tree: %w", err) } + av := actors.VersionForNetwork(template.NetworkVersion) + // Create system actor - sysact, err := SetupSystemActor(bs) + sysact, err := SetupSystemActor(ctx, bs, av) if err != nil { - return nil, nil, xerrors.Errorf("setup init actor: %w", err) + return nil, nil, xerrors.Errorf("setup system actor: %w", err) } - if err := state.SetActor(builtin0.SystemActorAddr, sysact); err != nil { - return nil, nil, xerrors.Errorf("set init actor: %w", err) + if err := state.SetActor(system.Address, sysact); err != nil { + return nil, nil, xerrors.Errorf("set system actor: %w", err) } // Create init actor - idStart, initact, keyIDs, err := SetupInitActor(bs, template.NetworkName, template.Accounts, template.VerifregRootKey) + idStart, initact, keyIDs, err := SetupInitActor(ctx, bs, template.NetworkName, template.Accounts, template.VerifregRootKey, template.RemainderAccount, av) if err != nil { return nil, nil, xerrors.Errorf("setup init actor: %w", err) } - if err := state.SetActor(builtin0.InitActorAddr, initact); err != nil { + if err := state.SetActor(init_.Address, initact); err != nil { return nil, nil, xerrors.Errorf("set init actor: %w", err) } // Setup reward - // RewardActor's state is overrwritten by SetupStorageMiners - rewact, err := SetupRewardActor(bs, big.Zero()) + // RewardActor's state is overwritten by SetupStorageMiners, but needs to exist for miner creation messages + rewact, err := SetupRewardActor(ctx, bs, big.Zero(), av) if err != nil { - return nil, nil, xerrors.Errorf("setup init actor: %w", err) + return nil, nil, xerrors.Errorf("setup reward actor: %w", err) } - err = state.SetActor(builtin0.RewardActorAddr, rewact) + err = state.SetActor(reward.Address, rewact) if err != nil { - return nil, nil, xerrors.Errorf("set network account actor: %w", err) + return nil, nil, xerrors.Errorf("set reward actor: %w", err) } // Setup cron - cronact, err := SetupCronActor(bs) + cronact, err := SetupCronActor(ctx, bs, av) if err != nil { return nil, nil, xerrors.Errorf("setup cron actor: %w", err) } - if err := state.SetActor(builtin0.CronActorAddr, cronact); err != nil { + if err := state.SetActor(cron.Address, cronact); err != nil { return nil, nil, xerrors.Errorf("set cron actor: %w", err) } // Create empty power actor - spact, err := SetupStoragePowerActor(bs) + spact, err := SetupStoragePowerActor(ctx, bs, av) if err != nil { - return nil, nil, xerrors.Errorf("setup storage market actor: %w", err) + return nil, nil, xerrors.Errorf("setup storage power actor: %w", err) } - if err := state.SetActor(builtin0.StoragePowerActorAddr, spact); err != nil { - return nil, nil, xerrors.Errorf("set storage market actor: %w", err) + if err := state.SetActor(power.Address, spact); err != nil { + return nil, nil, xerrors.Errorf("set storage power actor: %w", err) } // Create empty market actor - marketact, err := SetupStorageMarketActor(bs) + marketact, err := SetupStorageMarketActor(ctx, bs, av) if err != nil { return nil, nil, xerrors.Errorf("setup storage market actor: %w", err) } - if err := state.SetActor(builtin0.StorageMarketActorAddr, marketact); err != nil { - return nil, nil, xerrors.Errorf("set market actor: %w", err) + if err := state.SetActor(market.Address, marketact); err != nil { + return nil, nil, xerrors.Errorf("set storage market actor: %w", err) } // Create verified registry - verifact, err := SetupVerifiedRegistryActor(bs) + verifact, err := SetupVerifiedRegistryActor(ctx, bs, av) if err != nil { - return nil, nil, xerrors.Errorf("setup storage market actor: %w", err) + return nil, nil, xerrors.Errorf("setup verified registry market actor: %w", err) } - if err := state.SetActor(builtin0.VerifiedRegistryActorAddr, verifact); err != nil { - return nil, nil, xerrors.Errorf("set market actor: %w", err) + if err := state.SetActor(verifreg.Address, verifact); err != nil { + return nil, nil, xerrors.Errorf("set verified registry actor: %w", err) } - burntRoot, err := cst.Put(ctx, &account0.State{ - Address: builtin0.BurntFundsActorAddr, - }) + bact, err := makeAccountActor(ctx, cst, av, builtin.BurntFundsActorAddr, big.Zero()) if err != nil { - return nil, nil, xerrors.Errorf("failed to setup burnt funds actor state: %w", err) + return nil, nil, xerrors.Errorf("setup burnt funds actor state: %w", err) } - - // Setup burnt-funds - err = state.SetActor(builtin0.BurntFundsActorAddr, &types.Actor{ - Code: builtin0.AccountActorCodeID, - Balance: types.NewInt(0), - Head: burntRoot, - }) - if err != nil { - return nil, nil, xerrors.Errorf("set burnt funds account actor: %w", err) + if err := state.SetActor(builtin.BurntFundsActorAddr, bact); err != nil { + return nil, nil, xerrors.Errorf("set burnt funds actor: %w", err) } // Create accounts @@ -212,7 +232,7 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge switch info.Type { case genesis.TAccount: - if err := createAccountActor(ctx, cst, state, info, keyIDs); err != nil { + if err := createAccountActor(ctx, cst, state, info, keyIDs, av); err != nil { return nil, nil, xerrors.Errorf("failed to create account actor: %w", err) } @@ -224,7 +244,7 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge } idStart++ - if err := createMultisigAccount(ctx, bs, cst, state, ida, info, keyIDs); err != nil { + if err := createMultisigAccount(ctx, cst, state, ida, info, keyIDs, av); err != nil { return nil, nil, err } default: @@ -233,13 +253,31 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge } - vregroot, err := address.NewIDAddress(80) - if err != nil { - return nil, nil, err - } + switch template.VerifregRootKey.Type { + case genesis.TAccount: + var ainfo genesis.AccountMeta + if err := json.Unmarshal(template.VerifregRootKey.Meta, &ainfo); err != nil { + return nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err) + } - if err = createMultisigAccount(ctx, bs, cst, state, vregroot, template.VerifregRootKey, keyIDs); err != nil { - return nil, nil, xerrors.Errorf("failed to set up verified registry signer: %w", err) + _, ok := keyIDs[ainfo.Owner] + if ok { + return nil, nil, fmt.Errorf("rootkey account has already been declared, cannot be assigned 80: %s", ainfo.Owner) + } + + vact, err := makeAccountActor(ctx, cst, av, ainfo.Owner, template.VerifregRootKey.Balance) + if err != nil { + return nil, nil, xerrors.Errorf("setup verifreg rootkey account state: %w", err) + } + if err = state.SetActor(builtin.RootVerifierAddress, vact); err != nil { + return nil, nil, xerrors.Errorf("set verifreg rootkey account actor: %w", err) + } + case genesis.TMultisig: + if err = createMultisigAccount(ctx, cst, state, builtin.RootVerifierAddress, template.VerifregRootKey, keyIDs, av); err != nil { + return nil, nil, xerrors.Errorf("failed to set up verified registry signer: %w", err) + } + default: + return nil, nil, xerrors.Errorf("unknown account type for verifreg rootkey: %w", err) } // Setup the first verifier as ID-address 81 @@ -264,27 +302,21 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge return nil, nil, err } - verifierState, err := cst.Put(ctx, &account0.State{Address: verifierAd}) + verifierAct, err := makeAccountActor(ctx, cst, av, verifierAd, big.Zero()) if err != nil { - return nil, nil, err + return nil, nil, xerrors.Errorf("setup first verifier state: %w", err) } - err = state.SetActor(verifierId, &types.Actor{ - Code: builtin0.AccountActorCodeID, - Balance: types.NewInt(0), - Head: verifierState, - }) - if err != nil { - return nil, nil, xerrors.Errorf("setting account from actmap: %w", err) + if err = state.SetActor(verifierId, verifierAct); err != nil { + return nil, nil, xerrors.Errorf("set first verifier actor: %w", err) } totalFilAllocated := big.Zero() - // flush as ForEach works on the HAMT - if _, err := state.Flush(ctx); err != nil { - return nil, nil, err - } err = state.ForEach(func(addr address.Address, act *types.Actor) error { + if act.Balance.Nil() { + panic(fmt.Sprintf("actor %s (%s) has nil balance", addr, builtin.ActorNameByCode(act.Code))) + } totalFilAllocated = big.Add(totalFilAllocated, act.Balance) return nil }) @@ -300,19 +332,67 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge template.RemainderAccount.Balance = remainingFil - if err := createMultisigAccount(ctx, bs, cst, state, builtin.ReserveAddress, template.RemainderAccount, keyIDs); err != nil { - return nil, nil, xerrors.Errorf("failed to set up remainder account: %w", err) + switch template.RemainderAccount.Type { + case genesis.TAccount: + var ainfo genesis.AccountMeta + if err := json.Unmarshal(template.RemainderAccount.Meta, &ainfo); err != nil { + return nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err) + } + + _, ok := keyIDs[ainfo.Owner] + if ok { + return nil, nil, fmt.Errorf("remainder account has already been declared, cannot be assigned 90: %s", ainfo.Owner) + } + + keyIDs[ainfo.Owner] = builtin.ReserveAddress + err = createAccountActor(ctx, cst, state, template.RemainderAccount, keyIDs, av) + if err != nil { + return nil, nil, xerrors.Errorf("creating remainder acct: %w", err) + } + + case genesis.TMultisig: + if err = createMultisigAccount(ctx, cst, state, builtin.ReserveAddress, template.RemainderAccount, keyIDs, av); err != nil { + return nil, nil, xerrors.Errorf("failed to set up remainder: %w", err) + } + default: + return nil, nil, xerrors.Errorf("unknown account type for remainder: %w", err) } return state, keyIDs, nil } -func createAccountActor(ctx context.Context, cst cbor.IpldStore, state *state.StateTree, info genesis.Actor, keyIDs map[address.Address]address.Address) error { +func makeAccountActor(ctx context.Context, cst cbor.IpldStore, av actors.Version, addr address.Address, bal types.BigInt) (*types.Actor, error) { + ast, err := account.MakeState(adt.WrapStore(ctx, cst), av, addr) + if err != nil { + return nil, err + } + + statecid, err := cst.Put(ctx, ast.GetState()) + if err != nil { + return nil, err + } + + actcid, err := account.GetActorCodeID(av) + if err != nil { + return nil, err + } + + act := &types.Actor{ + Code: actcid, + Head: statecid, + Balance: bal, + } + + return act, nil +} + +func createAccountActor(ctx context.Context, cst cbor.IpldStore, state *state.StateTree, info genesis.Actor, keyIDs map[address.Address]address.Address, av actors.Version) error { var ainfo genesis.AccountMeta if err := json.Unmarshal(info.Meta, &ainfo); err != nil { return xerrors.Errorf("unmarshaling account meta: %w", err) } - st, err := cst.Put(ctx, &account0.State{Address: ainfo.Owner}) + + aa, err := makeAccountActor(ctx, cst, av, ainfo.Owner, info.Balance) if err != nil { return err } @@ -322,18 +402,14 @@ func createAccountActor(ctx context.Context, cst cbor.IpldStore, state *state.St return fmt.Errorf("no registered ID for account actor: %s", ainfo.Owner) } - err = state.SetActor(ida, &types.Actor{ - Code: builtin0.AccountActorCodeID, - Balance: info.Balance, - Head: st, - }) + err = state.SetActor(ida, aa) if err != nil { return xerrors.Errorf("setting account from actmap: %w", err) } return nil } -func createMultisigAccount(ctx context.Context, bs bstore.Blockstore, cst cbor.IpldStore, state *state.StateTree, ida address.Address, info genesis.Actor, keyIDs map[address.Address]address.Address) error { +func createMultisigAccount(ctx context.Context, cst cbor.IpldStore, state *state.StateTree, ida address.Address, info genesis.Actor, keyIDs map[address.Address]address.Address, av actors.Version) error { if info.Type != genesis.TMultisig { return fmt.Errorf("can only call createMultisigAccount with multisig Actor info") } @@ -341,10 +417,6 @@ func createMultisigAccount(ctx context.Context, bs bstore.Blockstore, cst cbor.I if err := json.Unmarshal(info.Meta, &ainfo); err != nil { return xerrors.Errorf("unmarshaling account meta: %w", err) } - pending, err := adt0.MakeEmptyMap(adt0.WrapStore(ctx, cst)).Root() - if err != nil { - return xerrors.Errorf("failed to create empty map: %v", err) - } var signers []address.Address @@ -361,44 +433,45 @@ func createMultisigAccount(ctx context.Context, bs bstore.Blockstore, cst cbor.I continue } - st, err := cst.Put(ctx, &account0.State{Address: e}) + aa, err := makeAccountActor(ctx, cst, av, e, big.Zero()) if err != nil { return err } - err = state.SetActor(idAddress, &types.Actor{ - Code: builtin0.AccountActorCodeID, - Balance: types.NewInt(0), - Head: st, - }) - if err != nil { + + if err = state.SetActor(idAddress, aa); err != nil { return xerrors.Errorf("setting account from actmap: %w", err) } signers = append(signers, idAddress) } - st, err := cst.Put(ctx, &multisig0.State{ - Signers: signers, - NumApprovalsThreshold: uint64(ainfo.Threshold), - StartEpoch: abi.ChainEpoch(ainfo.VestingStart), - UnlockDuration: abi.ChainEpoch(ainfo.VestingDuration), - PendingTxns: pending, - InitialBalance: info.Balance, - }) + mst, err := multisig.MakeState(adt.WrapStore(ctx, cst), av, signers, uint64(ainfo.Threshold), abi.ChainEpoch(ainfo.VestingStart), abi.ChainEpoch(ainfo.VestingDuration), info.Balance) if err != nil { return err } + + statecid, err := cst.Put(ctx, mst.GetState()) + if err != nil { + return err + } + + actcid, err := multisig.GetActorCodeID(av) + if err != nil { + return err + } + err = state.SetActor(ida, &types.Actor{ - Code: builtin0.MultisigActorCodeID, + Code: actcid, Balance: info.Balance, - Head: st, + Head: statecid, }) if err != nil { return xerrors.Errorf("setting account from actmap: %w", err) } + return nil } -func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, stateroot cid.Cid, template genesis.Template, keyIDs map[address.Address]address.Address) (cid.Cid, error) { +func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, stateroot cid.Cid, template genesis.Template, keyIDs map[address.Address]address.Address, nv network.Version) (cid.Cid, error) { verifNeeds := make(map[address.Address]abi.PaddedPieceSize) var sum abi.PaddedPieceSize @@ -406,11 +479,13 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, stateroot ci StateBase: stateroot, Epoch: 0, Rand: &fakeRand{}, - Bstore: cs.Blockstore(), + Bstore: cs.StateBlockstore(), Syscalls: mkFakedSigSyscalls(cs.VMSys()), CircSupplyCalc: nil, - NtwkVersion: genesisNetworkVersion, - BaseFee: types.NewInt(0), + NtwkVersion: func(_ context.Context, _ abi.ChainEpoch) network.Version { + return nv + }, + BaseFee: types.NewInt(0), } vm, err := vm.NewVM(ctx, &vmopt) if err != nil { @@ -439,7 +514,8 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, stateroot ci return cid.Undef, err } - _, err = doExecValue(ctx, vm, builtin0.VerifiedRegistryActorAddr, verifregRoot, types.NewInt(0), builtin0.MethodsVerifiedRegistry.AddVerifier, mustEnc(&verifreg0.AddVerifierParams{ + // Note: This is brittle, if the methodNum / param changes, it could break things + _, err = doExecValue(ctx, vm, verifreg.Address, verifregRoot, types.NewInt(0), builtin0.MethodsVerifiedRegistry.AddVerifier, mustEnc(&verifreg0.AddVerifierParams{ Address: verifier, Allowance: abi.NewStoragePower(int64(sum)), // eh, close enough @@ -450,7 +526,8 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, stateroot ci } for c, amt := range verifNeeds { - _, err := doExecValue(ctx, vm, builtin0.VerifiedRegistryActorAddr, verifier, types.NewInt(0), builtin0.MethodsVerifiedRegistry.AddVerifiedClient, mustEnc(&verifreg0.AddVerifiedClientParams{ + // Note: This is brittle, if the methodNum / param changes, it could break things + _, err := doExecValue(ctx, vm, verifreg.Address, verifier, types.NewInt(0), builtin0.MethodsVerifiedRegistry.AddVerifiedClient, mustEnc(&verifreg0.AddVerifiedClientParams{ Address: c, Allowance: abi.NewStoragePower(int64(amt)), })) @@ -482,20 +559,20 @@ func MakeGenesisBlock(ctx context.Context, j journal.Journal, bs bstore.Blocksto } // temp chainstore - cs := store.NewChainStore(bs, datastore.NewMapDatastore(), sys, j) + cs := store.NewChainStore(bs, bs, datastore.NewMapDatastore(), sys, j) // Verify PreSealed Data - stateroot, err = VerifyPreSealedData(ctx, cs, stateroot, template, keyIDs) + stateroot, err = VerifyPreSealedData(ctx, cs, stateroot, template, keyIDs, template.NetworkVersion) if err != nil { return nil, xerrors.Errorf("failed to verify presealed data: %w", err) } - stateroot, err = SetupStorageMiners(ctx, cs, stateroot, template.Miners) + stateroot, err = SetupStorageMiners(ctx, cs, stateroot, template.Miners, template.NetworkVersion) if err != nil { return nil, xerrors.Errorf("setup miners failed: %w", err) } - store := adt0.WrapStore(ctx, cbor.NewCborStore(bs)) + store := adt.WrapStore(ctx, cbor.NewCborStore(bs)) emptyroot, err := adt0.MakeEmptyArray(store).Root() if err != nil { return nil, xerrors.Errorf("amt build failed: %w", err) @@ -544,7 +621,7 @@ func MakeGenesisBlock(ctx context.Context, j journal.Journal, bs bstore.Blocksto } b := &types.BlockHeader{ - Miner: builtin0.SystemActorAddr, + Miner: system.Address, Ticket: genesisticket, Parents: []cid.Cid{filecoinGenesisCid}, Height: 0, diff --git a/chain/gen/genesis/miners.go b/chain/gen/genesis/miners.go index be83a871134..e6f17d6779a 100644 --- a/chain/gen/genesis/miners.go +++ b/chain/gen/genesis/miners.go @@ -6,6 +6,22 @@ import ( "fmt" "math/rand" + power4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/power" + + reward4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/reward" + + market4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/market" + + "github.com/filecoin-project/lotus/chain/actors" + + "github.com/filecoin-project/lotus/chain/actors/builtin" + + "github.com/filecoin-project/lotus/chain/actors/policy" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + "github.com/filecoin-project/go-state-types/network" + market0 "github.com/filecoin-project/specs-actors/actors/builtin/market" "github.com/filecoin-project/lotus/chain/actors/builtin/power" @@ -23,13 +39,11 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" - builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" power0 "github.com/filecoin-project/specs-actors/actors/builtin/power" reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward" - runtime2 "github.com/filecoin-project/specs-actors/v2/actors/runtime" + runtime5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/store" @@ -48,7 +62,7 @@ func MinerAddress(genesisIndex uint64) address.Address { } type fakedSigSyscalls struct { - runtime2.Syscalls + runtime5.Syscalls } func (fss *fakedSigSyscalls) VerifySignature(signature crypto.Signature, signer address.Address, plaintext []byte) error { @@ -56,14 +70,19 @@ func (fss *fakedSigSyscalls) VerifySignature(signature crypto.Signature, signer } func mkFakedSigSyscalls(base vm.SyscallBuilder) vm.SyscallBuilder { - return func(ctx context.Context, rt *vm.Runtime) runtime2.Syscalls { + return func(ctx context.Context, rt *vm.Runtime) runtime5.Syscalls { return &fakedSigSyscalls{ base(ctx, rt), } } } -func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid, miners []genesis.Miner) (cid.Cid, error) { +// Note: Much of this is brittle, if the methodNum / param / return changes, it will break things +func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid, miners []genesis.Miner, nv network.Version) (cid.Cid, error) { + + cst := cbor.NewCborStore(cs.StateBlockstore()) + av := actors.VersionForNetwork(nv) + csc := func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error) { return big.Zero(), nil } @@ -72,11 +91,13 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid StateBase: sroot, Epoch: 0, Rand: &fakeRand{}, - Bstore: cs.Blockstore(), + Bstore: cs.StateBlockstore(), Syscalls: mkFakedSigSyscalls(cs.VMSys()), CircSupplyCalc: csc, - NtwkVersion: genesisNetworkVersion, - BaseFee: types.NewInt(0), + NtwkVersion: func(_ context.Context, _ abi.ChainEpoch) network.Version { + return nv + }, + BaseFee: types.NewInt(0), } vm, err := vm.NewVM(ctx, vmopt) @@ -96,12 +117,13 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid dealIDs []abi.DealID }, len(miners)) + maxPeriods := policy.GetMaxSectorExpirationExtension() / miner.WPoStProvingPeriod for i, m := range miners { // Create miner through power actor i := i m := m - spt, err := ffiwrapper.SealProofTypeFromSectorSize(m.SectorSize) + spt, err := miner.SealProofTypeFromSectorSize(m.SectorSize, nv) if err != nil { return cid.Undef, err } @@ -115,7 +137,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid } params := mustEnc(constructorParams) - rval, err := doExecValue(ctx, vm, power.Address, m.Owner, m.PowerBalance, builtin0.MethodsPower.CreateMiner, params) + rval, err := doExecValue(ctx, vm, power.Address, m.Owner, m.PowerBalance, power.Methods.CreateMiner, params) if err != nil { return cid.Undef, xerrors.Errorf("failed to create genesis miner: %w", err) } @@ -131,23 +153,34 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid } minerInfos[i].maddr = ma.IDAddress - // TODO: ActorUpgrade - err = vm.MutateState(ctx, minerInfos[i].maddr, func(cst cbor.IpldStore, st *miner0.State) error { - maxPeriods := miner0.MaxSectorExpirationExtension / miner0.WPoStProvingPeriod - minerInfos[i].presealExp = (maxPeriods-1)*miner0.WPoStProvingPeriod + st.ProvingPeriodStart - 1 + _, err = vm.Flush(ctx) + if err != nil { + return cid.Undef, xerrors.Errorf("flushing vm: %w", err) + } - return nil - }) + mact, err := vm.StateTree().GetActor(minerInfos[i].maddr) if err != nil { - return cid.Undef, xerrors.Errorf("mutating state: %w", err) + return cid.Undef, xerrors.Errorf("getting newly created miner actor: %w", err) } + + mst, err := miner.Load(adt.WrapStore(ctx, cst), mact) + if err != nil { + return cid.Undef, xerrors.Errorf("getting newly created miner state: %w", err) + } + + pps, err := mst.GetProvingPeriodStart() + if err != nil { + return cid.Undef, xerrors.Errorf("getting newly created miner proving period start: %w", err) + } + + minerInfos[i].presealExp = (maxPeriods-1)*miner0.WPoStProvingPeriod + pps - 1 } // Add market funds if m.MarketBalance.GreaterThan(big.Zero()) { params := mustEnc(&minerInfos[i].maddr) - _, err := doExecValue(ctx, vm, market.Address, m.Worker, m.MarketBalance, builtin0.MethodsMarket.AddBalance, params) + _, err := doExecValue(ctx, vm, market.Address, m.Worker, m.MarketBalance, market.Methods.AddBalance, params) if err != nil { return cid.Undef, xerrors.Errorf("failed to create genesis miner (add balance): %w", err) } @@ -205,35 +238,66 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid for pi := range m.Sectors { rawPow = types.BigAdd(rawPow, types.NewInt(uint64(m.SectorSize))) - dweight, err := dealWeight(ctx, vm, minerInfos[i].maddr, []abi.DealID{minerInfos[i].dealIDs[pi]}, 0, minerInfos[i].presealExp) + dweight, vdweight, err := dealWeight(ctx, vm, minerInfos[i].maddr, []abi.DealID{minerInfos[i].dealIDs[pi]}, 0, minerInfos[i].presealExp, av) if err != nil { return cid.Undef, xerrors.Errorf("getting deal weight: %w", err) } - sectorWeight := miner0.QAPowerForWeight(m.SectorSize, minerInfos[i].presealExp, dweight.DealWeight, dweight.VerifiedDealWeight) + sectorWeight := builtin.QAPowerForWeight(m.SectorSize, minerInfos[i].presealExp, dweight, vdweight) qaPow = types.BigAdd(qaPow, sectorWeight) } } - err = vm.MutateState(ctx, power.Address, func(cst cbor.IpldStore, st *power0.State) error { - st.TotalQualityAdjPower = qaPow - st.TotalRawBytePower = rawPow + _, err = vm.Flush(ctx) + if err != nil { + return cid.Undef, xerrors.Errorf("flushing vm: %w", err) + } + + pact, err := vm.StateTree().GetActor(power.Address) + if err != nil { + return cid.Undef, xerrors.Errorf("getting power actor: %w", err) + } + + pst, err := power.Load(adt.WrapStore(ctx, cst), pact) + if err != nil { + return cid.Undef, xerrors.Errorf("getting power state: %w", err) + } + + if err = pst.SetTotalQualityAdjPower(qaPow); err != nil { + return cid.Undef, xerrors.Errorf("setting TotalQualityAdjPower in power state: %w", err) + } + + if err = pst.SetTotalRawBytePower(rawPow); err != nil { + return cid.Undef, xerrors.Errorf("setting TotalRawBytePower in power state: %w", err) + } + + if err = pst.SetThisEpochQualityAdjPower(qaPow); err != nil { + return cid.Undef, xerrors.Errorf("setting ThisEpochQualityAdjPower in power state: %w", err) + } + + if err = pst.SetThisEpochRawBytePower(rawPow); err != nil { + return cid.Undef, xerrors.Errorf("setting ThisEpochRawBytePower in power state: %w", err) + } - st.ThisEpochQualityAdjPower = qaPow - st.ThisEpochRawBytePower = rawPow - return nil - }) + pcid, err := cst.Put(ctx, pst.GetState()) if err != nil { - return cid.Undef, xerrors.Errorf("mutating state: %w", err) + return cid.Undef, xerrors.Errorf("putting power state: %w", err) + } + + pact.Head = pcid + + if err = vm.StateTree().SetActor(power.Address, pact); err != nil { + return cid.Undef, xerrors.Errorf("setting power state: %w", err) } - err = vm.MutateState(ctx, reward.Address, func(sct cbor.IpldStore, st *reward0.State) error { - *st = *reward0.ConstructState(qaPow) - return nil - }) + rewact, err := SetupRewardActor(ctx, cs.StateBlockstore(), big.Zero(), actors.VersionForNetwork(nv)) if err != nil { - return cid.Undef, xerrors.Errorf("mutating state: %w", err) + return cid.Undef, xerrors.Errorf("setup reward actor: %w", err) + } + + if err = vm.StateTree().SetActor(reward.Address, rewact); err != nil { + return cid.Undef, xerrors.Errorf("set reward actor: %w", err) } } @@ -250,24 +314,55 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid Expiration: minerInfos[i].presealExp, // TODO: Allow setting externally! } - dweight, err := dealWeight(ctx, vm, minerInfos[i].maddr, params.DealIDs, 0, minerInfos[i].presealExp) + dweight, vdweight, err := dealWeight(ctx, vm, minerInfos[i].maddr, params.DealIDs, 0, minerInfos[i].presealExp, av) if err != nil { return cid.Undef, xerrors.Errorf("getting deal weight: %w", err) } - sectorWeight := miner0.QAPowerForWeight(m.SectorSize, minerInfos[i].presealExp, dweight.DealWeight, dweight.VerifiedDealWeight) + sectorWeight := builtin.QAPowerForWeight(m.SectorSize, minerInfos[i].presealExp, dweight, vdweight) // we've added fake power for this sector above, remove it now - err = vm.MutateState(ctx, power.Address, func(cst cbor.IpldStore, st *power0.State) error { - st.TotalQualityAdjPower = types.BigSub(st.TotalQualityAdjPower, sectorWeight) //nolint:scopelint - st.TotalRawBytePower = types.BigSub(st.TotalRawBytePower, types.NewInt(uint64(m.SectorSize))) - return nil - }) + + _, err = vm.Flush(ctx) + if err != nil { + return cid.Undef, xerrors.Errorf("flushing vm: %w", err) + } + + pact, err := vm.StateTree().GetActor(power.Address) if err != nil { - return cid.Undef, xerrors.Errorf("removing fake power: %w", err) + return cid.Undef, xerrors.Errorf("getting power actor: %w", err) + } + + pst, err := power.Load(adt.WrapStore(ctx, cst), pact) + if err != nil { + return cid.Undef, xerrors.Errorf("getting power state: %w", err) + } + + pc, err := pst.TotalPower() + if err != nil { + return cid.Undef, xerrors.Errorf("getting total power: %w", err) + } + + if err = pst.SetTotalRawBytePower(types.BigSub(pc.RawBytePower, types.NewInt(uint64(m.SectorSize)))); err != nil { + return cid.Undef, xerrors.Errorf("setting TotalRawBytePower in power state: %w", err) + } + + if err = pst.SetTotalQualityAdjPower(types.BigSub(pc.QualityAdjPower, sectorWeight)); err != nil { + return cid.Undef, xerrors.Errorf("setting TotalQualityAdjPower in power state: %w", err) + } + + pcid, err := cst.Put(ctx, pst.GetState()) + if err != nil { + return cid.Undef, xerrors.Errorf("putting power state: %w", err) + } + + pact.Head = pcid + + if err = vm.StateTree().SetActor(power.Address, pact); err != nil { + return cid.Undef, xerrors.Errorf("setting power state: %w", err) } - epochReward, err := currentEpochBlockReward(ctx, vm, minerInfos[i].maddr) + baselinePower, rewardSmoothed, err := currentEpochBlockReward(ctx, vm, minerInfos[i].maddr, av) if err != nil { return cid.Undef, xerrors.Errorf("getting current epoch reward: %w", err) } @@ -277,13 +372,13 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid return cid.Undef, xerrors.Errorf("getting current total power: %w", err) } - pcd := miner0.PreCommitDepositForPower(epochReward.ThisEpochRewardSmoothed, tpow.QualityAdjPowerSmoothed, sectorWeight) + pcd := miner0.PreCommitDepositForPower(&rewardSmoothed, tpow.QualityAdjPowerSmoothed, sectorWeight) pledge := miner0.InitialPledgeForPower( sectorWeight, - epochReward.ThisEpochBaselinePower, + baselinePower, tpow.PledgeCollateral, - epochReward.ThisEpochRewardSmoothed, + &rewardSmoothed, tpow.QualityAdjPowerSmoothed, circSupply(ctx, vm, minerInfos[i].maddr), ) @@ -291,7 +386,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid pledge = big.Add(pcd, pledge) fmt.Println(types.FIL(pledge)) - _, err = doExecValue(ctx, vm, minerInfos[i].maddr, m.Worker, pledge, builtin0.MethodsMiner.PreCommitSector, mustEnc(params)) + _, err = doExecValue(ctx, vm, minerInfos[i].maddr, m.Worker, pledge, miner.Methods.PreCommitSector, mustEnc(params)) if err != nil { return cid.Undef, xerrors.Errorf("failed to confirm presealed sectors: %w", err) } @@ -301,28 +396,84 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid Sectors: []abi.SectorNumber{preseal.SectorID}, } - _, err = doExecValue(ctx, vm, minerInfos[i].maddr, power.Address, big.Zero(), builtin0.MethodsMiner.ConfirmSectorProofsValid, mustEnc(confirmParams)) + _, err = doExecValue(ctx, vm, minerInfos[i].maddr, power.Address, big.Zero(), miner.Methods.ConfirmSectorProofsValid, mustEnc(confirmParams)) if err != nil { return cid.Undef, xerrors.Errorf("failed to confirm presealed sectors: %w", err) } + + if av > actors.Version2 { + // post v2, we need to explicitly Claim this power since ConfirmSectorProofsValid doesn't do it anymore + claimParams := &power4.UpdateClaimedPowerParams{ + RawByteDelta: types.NewInt(uint64(m.SectorSize)), + QualityAdjustedDelta: sectorWeight, + } + + _, err = doExecValue(ctx, vm, power.Address, minerInfos[i].maddr, big.Zero(), power.Methods.UpdateClaimedPower, mustEnc(claimParams)) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to confirm presealed sectors: %w", err) + } + + _, err = vm.Flush(ctx) + if err != nil { + return cid.Undef, xerrors.Errorf("flushing vm: %w", err) + } + + mact, err := vm.StateTree().GetActor(minerInfos[i].maddr) + if err != nil { + return cid.Undef, xerrors.Errorf("getting miner actor: %w", err) + } + + mst, err := miner.Load(adt.WrapStore(ctx, cst), mact) + if err != nil { + return cid.Undef, xerrors.Errorf("getting miner state: %w", err) + } + + if err = mst.EraseAllUnproven(); err != nil { + return cid.Undef, xerrors.Errorf("failed to erase unproven sectors: %w", err) + } + + mcid, err := cst.Put(ctx, mst.GetState()) + if err != nil { + return cid.Undef, xerrors.Errorf("putting miner state: %w", err) + } + + mact.Head = mcid + + if err = vm.StateTree().SetActor(minerInfos[i].maddr, mact); err != nil { + return cid.Undef, xerrors.Errorf("setting miner state: %w", err) + } + } } } } // Sanity-check total network power - err = vm.MutateState(ctx, power.Address, func(cst cbor.IpldStore, st *power0.State) error { - if !st.TotalRawBytePower.Equals(rawPow) { - return xerrors.Errorf("st.TotalRawBytePower doesn't match previously calculated rawPow") - } + _, err = vm.Flush(ctx) + if err != nil { + return cid.Undef, xerrors.Errorf("flushing vm: %w", err) + } - if !st.TotalQualityAdjPower.Equals(qaPow) { - return xerrors.Errorf("st.TotalQualityAdjPower doesn't match previously calculated qaPow") - } + pact, err := vm.StateTree().GetActor(power.Address) + if err != nil { + return cid.Undef, xerrors.Errorf("getting power actor: %w", err) + } - return nil - }) + pst, err := power.Load(adt.WrapStore(ctx, cst), pact) if err != nil { - return cid.Undef, xerrors.Errorf("mutating state: %w", err) + return cid.Undef, xerrors.Errorf("getting power state: %w", err) + } + + pc, err := pst.TotalPower() + if err != nil { + return cid.Undef, xerrors.Errorf("getting total power: %w", err) + } + + if !pc.RawBytePower.Equals(rawPow) { + return cid.Undef, xerrors.Errorf("TotalRawBytePower (%s) doesn't match previously calculated rawPow (%s)", pc.RawBytePower, rawPow) + } + + if !pc.QualityAdjPower.Equals(qaPow) { + return cid.Undef, xerrors.Errorf("QualityAdjPower (%s) doesn't match previously calculated qaPow (%s)", pc.QualityAdjPower, qaPow) } // TODO: Should we re-ConstructState for the reward actor using rawPow as currRealizedPower here? @@ -337,13 +488,25 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid // TODO: copied from actors test harness, deduplicate or remove from here type fakeRand struct{} -func (fr *fakeRand) GetChainRandomness(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (fr *fakeRand) GetChainRandomnessLookingForward(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { + out := make([]byte, 32) + _, _ = rand.New(rand.NewSource(int64(randEpoch * 1000))).Read(out) //nolint + return out, nil +} + +func (fr *fakeRand) GetChainRandomnessLookingBack(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { out := make([]byte, 32) _, _ = rand.New(rand.NewSource(int64(randEpoch * 1000))).Read(out) //nolint return out, nil } -func (fr *fakeRand) GetBeaconRandomness(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (fr *fakeRand) GetBeaconRandomnessLookingForward(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { + out := make([]byte, 32) + _, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint + return out, nil +} + +func (fr *fakeRand) GetBeaconRandomnessLookingBack(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { out := make([]byte, 32) _, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint return out, nil @@ -362,43 +525,79 @@ func currentTotalPower(ctx context.Context, vm *vm.VM, maddr address.Address) (* return &pwr, nil } -func dealWeight(ctx context.Context, vm *vm.VM, maddr address.Address, dealIDs []abi.DealID, sectorStart, sectorExpiry abi.ChainEpoch) (market0.VerifyDealsForActivationReturn, error) { - params := &market.VerifyDealsForActivationParams{ - DealIDs: dealIDs, - SectorStart: sectorStart, - SectorExpiry: sectorExpiry, +func dealWeight(ctx context.Context, vm *vm.VM, maddr address.Address, dealIDs []abi.DealID, sectorStart, sectorExpiry abi.ChainEpoch, av actors.Version) (abi.DealWeight, abi.DealWeight, error) { + // TODO: This hack should move to market actor wrapper + if av <= actors.Version2 { + params := &market0.VerifyDealsForActivationParams{ + DealIDs: dealIDs, + SectorStart: sectorStart, + SectorExpiry: sectorExpiry, + } + + var dealWeights market0.VerifyDealsForActivationReturn + ret, err := doExecValue(ctx, vm, + market.Address, + maddr, + abi.NewTokenAmount(0), + builtin0.MethodsMarket.VerifyDealsForActivation, + mustEnc(params), + ) + if err != nil { + return big.Zero(), big.Zero(), err + } + if err := dealWeights.UnmarshalCBOR(bytes.NewReader(ret)); err != nil { + return big.Zero(), big.Zero(), err + } + + return dealWeights.DealWeight, dealWeights.VerifiedDealWeight, nil } + params := &market4.VerifyDealsForActivationParams{Sectors: []market4.SectorDeals{{ + SectorExpiry: sectorExpiry, + DealIDs: dealIDs, + }}} - var dealWeights market0.VerifyDealsForActivationReturn + var dealWeights market4.VerifyDealsForActivationReturn ret, err := doExecValue(ctx, vm, market.Address, maddr, abi.NewTokenAmount(0), - builtin0.MethodsMarket.VerifyDealsForActivation, + market.Methods.VerifyDealsForActivation, mustEnc(params), ) if err != nil { - return market0.VerifyDealsForActivationReturn{}, err + return big.Zero(), big.Zero(), err } if err := dealWeights.UnmarshalCBOR(bytes.NewReader(ret)); err != nil { - return market0.VerifyDealsForActivationReturn{}, err + return big.Zero(), big.Zero(), err } - return dealWeights, nil + return dealWeights.Sectors[0].DealWeight, dealWeights.Sectors[0].VerifiedDealWeight, nil } -func currentEpochBlockReward(ctx context.Context, vm *vm.VM, maddr address.Address) (*reward0.ThisEpochRewardReturn, error) { - rwret, err := doExecValue(ctx, vm, reward.Address, maddr, big.Zero(), builtin0.MethodsReward.ThisEpochReward, nil) +func currentEpochBlockReward(ctx context.Context, vm *vm.VM, maddr address.Address, av actors.Version) (abi.StoragePower, builtin.FilterEstimate, error) { + rwret, err := doExecValue(ctx, vm, reward.Address, maddr, big.Zero(), reward.Methods.ThisEpochReward, nil) if err != nil { - return nil, err + return big.Zero(), builtin.FilterEstimate{}, err } - var epochReward reward0.ThisEpochRewardReturn + // TODO: This hack should move to reward actor wrapper + if av <= actors.Version2 { + var epochReward reward0.ThisEpochRewardReturn + + if err := epochReward.UnmarshalCBOR(bytes.NewReader(rwret)); err != nil { + return big.Zero(), builtin.FilterEstimate{}, err + } + + return epochReward.ThisEpochBaselinePower, *epochReward.ThisEpochRewardSmoothed, nil + } + + var epochReward reward4.ThisEpochRewardReturn + if err := epochReward.UnmarshalCBOR(bytes.NewReader(rwret)); err != nil { - return nil, err + return big.Zero(), builtin.FilterEstimate{}, err } - return &epochReward, nil + return epochReward.ThisEpochBaselinePower, builtin.FilterEstimate(epochReward.ThisEpochRewardSmoothed), nil } func circSupply(ctx context.Context, vmi *vm.VM, maddr address.Address) abi.TokenAmount { diff --git a/chain/gen/genesis/t00_system.go b/chain/gen/genesis/t00_system.go deleted file mode 100644 index 6e6cc976aba..00000000000 --- a/chain/gen/genesis/t00_system.go +++ /dev/null @@ -1,31 +0,0 @@ -package genesis - -import ( - "context" - - "github.com/filecoin-project/specs-actors/actors/builtin/system" - - "github.com/filecoin-project/specs-actors/actors/builtin" - cbor "github.com/ipfs/go-ipld-cbor" - - "github.com/filecoin-project/lotus/chain/types" - bstore "github.com/filecoin-project/lotus/lib/blockstore" -) - -func SetupSystemActor(bs bstore.Blockstore) (*types.Actor, error) { - var st system.State - - cst := cbor.NewCborStore(bs) - - statecid, err := cst.Put(context.TODO(), &st) - if err != nil { - return nil, err - } - - act := &types.Actor{ - Code: builtin.SystemActorCodeID, - Head: statecid, - } - - return act, nil -} diff --git a/chain/gen/genesis/t02_reward.go b/chain/gen/genesis/t02_reward.go deleted file mode 100644 index 92531051b14..00000000000 --- a/chain/gen/genesis/t02_reward.go +++ /dev/null @@ -1,32 +0,0 @@ -package genesis - -import ( - "context" - - "github.com/filecoin-project/go-state-types/big" - - "github.com/filecoin-project/specs-actors/actors/builtin" - reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward" - cbor "github.com/ipfs/go-ipld-cbor" - - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/types" - bstore "github.com/filecoin-project/lotus/lib/blockstore" -) - -func SetupRewardActor(bs bstore.Blockstore, qaPower big.Int) (*types.Actor, error) { - cst := cbor.NewCborStore(bs) - - st := reward0.ConstructState(qaPower) - - hcid, err := cst.Put(context.TODO(), st) - if err != nil { - return nil, err - } - - return &types.Actor{ - Code: builtin.RewardActorCodeID, - Balance: types.BigInt{Int: build.InitialRewardBalance}, - Head: hcid, - }, nil -} diff --git a/chain/gen/genesis/t03_cron.go b/chain/gen/genesis/t03_cron.go deleted file mode 100644 index cf2c0d7a741..00000000000 --- a/chain/gen/genesis/t03_cron.go +++ /dev/null @@ -1,29 +0,0 @@ -package genesis - -import ( - "context" - - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/builtin/cron" - cbor "github.com/ipfs/go-ipld-cbor" - - "github.com/filecoin-project/lotus/chain/types" - bstore "github.com/filecoin-project/lotus/lib/blockstore" -) - -func SetupCronActor(bs bstore.Blockstore) (*types.Actor, error) { - cst := cbor.NewCborStore(bs) - cas := cron.ConstructState(cron.BuiltInEntries()) - - stcid, err := cst.Put(context.TODO(), cas) - if err != nil { - return nil, err - } - - return &types.Actor{ - Code: builtin.CronActorCodeID, - Head: stcid, - Nonce: 0, - Balance: types.NewInt(0), - }, nil -} diff --git a/chain/gen/genesis/t04_power.go b/chain/gen/genesis/t04_power.go deleted file mode 100644 index 2f1303ba46c..00000000000 --- a/chain/gen/genesis/t04_power.go +++ /dev/null @@ -1,46 +0,0 @@ -package genesis - -import ( - "context" - - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/util/adt" - - power0 "github.com/filecoin-project/specs-actors/actors/builtin/power" - cbor "github.com/ipfs/go-ipld-cbor" - - "github.com/filecoin-project/lotus/chain/types" - bstore "github.com/filecoin-project/lotus/lib/blockstore" -) - -func SetupStoragePowerActor(bs bstore.Blockstore) (*types.Actor, error) { - store := adt.WrapStore(context.TODO(), cbor.NewCborStore(bs)) - emptyMap, err := adt.MakeEmptyMap(store).Root() - if err != nil { - return nil, err - } - - multiMap, err := adt.AsMultimap(store, emptyMap) - if err != nil { - return nil, err - } - - emptyMultiMap, err := multiMap.Root() - if err != nil { - return nil, err - } - - sms := power0.ConstructState(emptyMap, emptyMultiMap) - - stcid, err := store.Put(store.Context(), sms) - if err != nil { - return nil, err - } - - return &types.Actor{ - Code: builtin.StoragePowerActorCodeID, - Head: stcid, - Nonce: 0, - Balance: types.NewInt(0), - }, nil -} diff --git a/chain/gen/genesis/t05_market.go b/chain/gen/genesis/t05_market.go deleted file mode 100644 index 615e8370ba5..00000000000 --- a/chain/gen/genesis/t05_market.go +++ /dev/null @@ -1,41 +0,0 @@ -package genesis - -import ( - "context" - - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/actors/util/adt" - cbor "github.com/ipfs/go-ipld-cbor" - - "github.com/filecoin-project/lotus/chain/types" - bstore "github.com/filecoin-project/lotus/lib/blockstore" -) - -func SetupStorageMarketActor(bs bstore.Blockstore) (*types.Actor, error) { - store := adt.WrapStore(context.TODO(), cbor.NewCborStore(bs)) - - a, err := adt.MakeEmptyArray(store).Root() - if err != nil { - return nil, err - } - h, err := adt.MakeEmptyMap(store).Root() - if err != nil { - return nil, err - } - - sms := market.ConstructState(a, h, h) - - stcid, err := store.Put(store.Context(), sms) - if err != nil { - return nil, err - } - - act := &types.Actor{ - Code: builtin.StorageMarketActorCodeID, - Head: stcid, - Balance: types.NewInt(0), - } - - return act, nil -} diff --git a/chain/gen/genesis/t06_vreg.go b/chain/gen/genesis/t06_vreg.go deleted file mode 100644 index 1709b205f1f..00000000000 --- a/chain/gen/genesis/t06_vreg.go +++ /dev/null @@ -1,51 +0,0 @@ -package genesis - -import ( - "context" - - "github.com/filecoin-project/go-address" - cbor "github.com/ipfs/go-ipld-cbor" - - "github.com/filecoin-project/specs-actors/actors/builtin" - verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" - "github.com/filecoin-project/specs-actors/actors/util/adt" - - "github.com/filecoin-project/lotus/chain/types" - bstore "github.com/filecoin-project/lotus/lib/blockstore" -) - -var RootVerifierID address.Address - -func init() { - - idk, err := address.NewFromString("t080") - if err != nil { - panic(err) - } - - RootVerifierID = idk -} - -func SetupVerifiedRegistryActor(bs bstore.Blockstore) (*types.Actor, error) { - store := adt.WrapStore(context.TODO(), cbor.NewCborStore(bs)) - - h, err := adt.MakeEmptyMap(store).Root() - if err != nil { - return nil, err - } - - sms := verifreg0.ConstructState(h, RootVerifierID) - - stcid, err := store.Put(store.Context(), sms) - if err != nil { - return nil, err - } - - act := &types.Actor{ - Code: builtin.VerifiedRegistryActorCodeID, - Head: stcid, - Balance: types.NewInt(0), - } - - return act, nil -} diff --git a/chain/gen/genesis/util.go b/chain/gen/genesis/util.go index 54cc30cc168..67a4e9579a7 100644 --- a/chain/gen/genesis/util.go +++ b/chain/gen/genesis/util.go @@ -3,9 +3,6 @@ package genesis import ( "context" - "github.com/filecoin-project/go-state-types/network" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" cbg "github.com/whyrusleeping/cbor-gen" @@ -49,29 +46,3 @@ func doExecValue(ctx context.Context, vm *vm.VM, to, from address.Address, value return ret.Return, nil } - -// TODO: Get from build -// TODO: make a list/schedule of these. -var GenesisNetworkVersion = func() network.Version { - // returns the version _before_ the first upgrade. - if build.UpgradeBreezeHeight >= 0 { - return network.Version0 - } - if build.UpgradeSmokeHeight >= 0 { - return network.Version1 - } - if build.UpgradeIgnitionHeight >= 0 { - return network.Version2 - } - if build.UpgradeActorsV2Height >= 0 { - return network.Version3 - } - if build.UpgradeLiftoffHeight >= 0 { - return network.Version3 - } - return build.ActorUpgradeNetworkVersion - 1 // genesis requires actors v0. -}() - -func genesisNetworkVersion(context.Context, abi.ChainEpoch) network.Version { // TODO: Get from build/ - return GenesisNetworkVersion // TODO: Get from build/ -} // TODO: Get from build/ diff --git a/chain/gen/mining.go b/chain/gen/mining.go index cca4b61699a..1400c12c51f 100644 --- a/chain/gen/mining.go +++ b/chain/gen/mining.go @@ -9,13 +9,13 @@ import ( cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" + ffi "github.com/filecoin-project/filecoin-ffi" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/sigs/bls" ) -func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.WalletAPI, bt *api.BlockTemplate) (*types.FullBlock, error) { +func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.Wallet, bt *api.BlockTemplate) (*types.FullBlock, error) { pts, err := sm.ChainStore().LoadTipSet(bt.Parents) if err != nil { @@ -79,7 +79,7 @@ func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.WalletA } } - store := sm.ChainStore().Store(ctx) + store := sm.ChainStore().ActorStore(ctx) blsmsgroot, err := toArray(store, blsMsgCids) if err != nil { return nil, xerrors.Errorf("building bls amt: %w", err) @@ -140,35 +140,29 @@ func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.WalletA } func aggregateSignatures(sigs []crypto.Signature) (*crypto.Signature, error) { - sigsS := make([][]byte, len(sigs)) + sigsS := make([]ffi.Signature, len(sigs)) for i := 0; i < len(sigs); i++ { - sigsS[i] = sigs[i].Data + copy(sigsS[i][:], sigs[i].Data[:ffi.SignatureBytes]) } - aggregator := new(bls.AggregateSignature).AggregateCompressed(sigsS) - if aggregator == nil { + aggSig := ffi.Aggregate(sigsS) + if aggSig == nil { if len(sigs) > 0 { return nil, xerrors.Errorf("bls.Aggregate returned nil with %d signatures", len(sigs)) } + zeroSig := ffi.CreateZeroSignature() + // Note: for blst this condition should not happen - nil should not // be returned return &crypto.Signature{ Type: crypto.SigTypeBLS, - Data: new(bls.Signature).Compress(), - }, nil - } - aggSigAff := aggregator.ToAffine() - if aggSigAff == nil { - return &crypto.Signature{ - Type: crypto.SigTypeBLS, - Data: new(bls.Signature).Compress(), + Data: zeroSig[:], }, nil } - aggSig := aggSigAff.Compress() return &crypto.Signature{ Type: crypto.SigTypeBLS, - Data: aggSig, + Data: aggSig[:], }, nil } diff --git a/chain/gen/slashfilter/slashfilter.go b/chain/gen/slashfilter/slashfilter.go index ee04351566d..5edcd5439df 100644 --- a/chain/gen/slashfilter/slashfilter.go +++ b/chain/gen/slashfilter/slashfilter.go @@ -3,6 +3,8 @@ package slashfilter import ( "fmt" + "github.com/filecoin-project/lotus/build" + "golang.org/x/xerrors" "github.com/ipfs/go-cid" @@ -26,6 +28,10 @@ func New(dstore ds.Batching) *SlashFilter { } func (f *SlashFilter) MinedBlock(bh *types.BlockHeader, parentEpoch abi.ChainEpoch) error { + if build.IsNearUpgrade(bh.Height, build.UpgradeOrangeHeight) { + return nil + } + epochKey := ds.NewKey(fmt.Sprintf("/%s/%d", bh.Miner, bh.Height)) { // double-fork mining (2 blocks at one epoch) diff --git a/chain/market/cbor_gen.go b/chain/market/cbor_gen.go new file mode 100644 index 00000000000..7d9e55b3619 --- /dev/null +++ b/chain/market/cbor_gen.go @@ -0,0 +1,116 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package market + +import ( + "fmt" + "io" + "sort" + + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = sort.Sort + +var lengthBufFundedAddressState = []byte{131} + +func (t *FundedAddressState) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufFundedAddressState); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Addr (address.Address) (struct) + if err := t.Addr.MarshalCBOR(w); err != nil { + return err + } + + // t.AmtReserved (big.Int) (struct) + if err := t.AmtReserved.MarshalCBOR(w); err != nil { + return err + } + + // t.MsgCid (cid.Cid) (struct) + + if t.MsgCid == nil { + if _, err := w.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCidBuf(scratch, w, *t.MsgCid); err != nil { + return xerrors.Errorf("failed to write cid field t.MsgCid: %w", err) + } + } + + return nil +} + +func (t *FundedAddressState) UnmarshalCBOR(r io.Reader) error { + *t = FundedAddressState{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Addr (address.Address) (struct) + + { + + if err := t.Addr.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Addr: %w", err) + } + + } + // t.AmtReserved (big.Int) (struct) + + { + + if err := t.AmtReserved.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.AmtReserved: %w", err) + } + + } + // t.MsgCid (cid.Cid) (struct) + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.MsgCid: %w", err) + } + + t.MsgCid = &c + } + + } + return nil +} diff --git a/chain/market/fundmanager.go b/chain/market/fundmanager.go new file mode 100644 index 00000000000..5becfdfa717 --- /dev/null +++ b/chain/market/fundmanager.go @@ -0,0 +1,726 @@ +package market + +import ( + "context" + "fmt" + "sync" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/node/impl/full" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + logging "github.com/ipfs/go-log/v2" + "go.uber.org/fx" + "golang.org/x/xerrors" +) + +var log = logging.Logger("market_adapter") + +// API is the fx dependencies need to run a fund manager +type FundManagerAPI struct { + fx.In + + full.StateAPI + full.MpoolAPI +} + +// fundManagerAPI is the specific methods called by the FundManager +// (used by the tests) +type fundManagerAPI interface { + MpoolPushMessage(context.Context, *types.Message, *api.MessageSendSpec) (*types.SignedMessage, error) + StateMarketBalance(context.Context, address.Address, types.TipSetKey) (api.MarketBalance, error) + StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) +} + +// FundManager keeps track of funds in a set of addresses +type FundManager struct { + ctx context.Context + shutdown context.CancelFunc + api fundManagerAPI + str *Store + + lk sync.Mutex + fundedAddrs map[address.Address]*fundedAddress +} + +func NewFundManager(lc fx.Lifecycle, api FundManagerAPI, ds dtypes.MetadataDS) *FundManager { + fm := newFundManager(&api, ds) + lc.Append(fx.Hook{ + OnStart: func(ctx context.Context) error { + return fm.Start() + }, + OnStop: func(ctx context.Context) error { + fm.Stop() + return nil + }, + }) + return fm +} + +// newFundManager is used by the tests +func newFundManager(api fundManagerAPI, ds datastore.Batching) *FundManager { + ctx, cancel := context.WithCancel(context.Background()) + return &FundManager{ + ctx: ctx, + shutdown: cancel, + api: api, + str: newStore(ds), + fundedAddrs: make(map[address.Address]*fundedAddress), + } +} + +func (fm *FundManager) Stop() { + fm.shutdown() +} + +func (fm *FundManager) Start() error { + fm.lk.Lock() + defer fm.lk.Unlock() + + // TODO: + // To save memory: + // - in State() only load addresses with in-progress messages + // - load the others just-in-time from getFundedAddress + // - delete(fm.fundedAddrs, addr) when the queue has been processed + return fm.str.forEach(func(state *FundedAddressState) { + fa := newFundedAddress(fm, state.Addr) + fa.state = state + fm.fundedAddrs[fa.state.Addr] = fa + fa.start() + }) +} + +// Creates a fundedAddress if it doesn't already exist, and returns it +func (fm *FundManager) getFundedAddress(addr address.Address) *fundedAddress { + fm.lk.Lock() + defer fm.lk.Unlock() + + fa, ok := fm.fundedAddrs[addr] + if !ok { + fa = newFundedAddress(fm, addr) + fm.fundedAddrs[addr] = fa + } + return fa +} + +// Reserve adds amt to `reserved`. If there are not enough available funds for +// the address, submits a message on chain to top up available funds. +// Returns the cid of the message that was submitted on chain, or cid.Undef if +// the required funds were already available. +func (fm *FundManager) Reserve(ctx context.Context, wallet, addr address.Address, amt abi.TokenAmount) (cid.Cid, error) { + return fm.getFundedAddress(addr).reserve(ctx, wallet, amt) +} + +// Subtract from `reserved`. +func (fm *FundManager) Release(addr address.Address, amt abi.TokenAmount) error { + return fm.getFundedAddress(addr).release(amt) +} + +// Withdraw unreserved funds. Only succeeds if there are enough unreserved +// funds for the address. +// Returns the cid of the message that was submitted on chain. +func (fm *FundManager) Withdraw(ctx context.Context, wallet, addr address.Address, amt abi.TokenAmount) (cid.Cid, error) { + return fm.getFundedAddress(addr).withdraw(ctx, wallet, amt) +} + +// GetReserved returns the amount that is currently reserved for the address +func (fm *FundManager) GetReserved(addr address.Address) abi.TokenAmount { + return fm.getFundedAddress(addr).getReserved() +} + +// FundedAddressState keeps track of the state of an address with funds in the +// datastore +type FundedAddressState struct { + Addr address.Address + // AmtReserved is the amount that must be kept in the address (cannot be + // withdrawn) + AmtReserved abi.TokenAmount + // MsgCid is the cid of an in-progress on-chain message + MsgCid *cid.Cid +} + +// fundedAddress keeps track of the state and request queues for a +// particular address +type fundedAddress struct { + ctx context.Context + env *fundManagerEnvironment + str *Store + + lk sync.RWMutex + state *FundedAddressState + + // Note: These request queues are ephemeral, they are not saved to store + reservations []*fundRequest + releases []*fundRequest + withdrawals []*fundRequest + + // Used by the tests + onProcessStartListener func() bool +} + +func newFundedAddress(fm *FundManager, addr address.Address) *fundedAddress { + return &fundedAddress{ + ctx: fm.ctx, + env: &fundManagerEnvironment{api: fm.api}, + str: fm.str, + state: &FundedAddressState{ + Addr: addr, + AmtReserved: abi.NewTokenAmount(0), + }, + } +} + +// If there is an in-progress on-chain message, don't submit any more messages +// on chain until it completes +func (a *fundedAddress) start() { + a.lk.Lock() + defer a.lk.Unlock() + + if a.state.MsgCid != nil { + a.debugf("restart: wait for %s", a.state.MsgCid) + a.startWaitForResults(*a.state.MsgCid) + } +} + +func (a *fundedAddress) getReserved() abi.TokenAmount { + a.lk.RLock() + defer a.lk.RUnlock() + + return a.state.AmtReserved +} + +func (a *fundedAddress) reserve(ctx context.Context, wallet address.Address, amt abi.TokenAmount) (cid.Cid, error) { + return a.requestAndWait(ctx, wallet, amt, &a.reservations) +} + +func (a *fundedAddress) release(amt abi.TokenAmount) error { + _, err := a.requestAndWait(context.Background(), address.Undef, amt, &a.releases) + return err +} + +func (a *fundedAddress) withdraw(ctx context.Context, wallet address.Address, amt abi.TokenAmount) (cid.Cid, error) { + return a.requestAndWait(ctx, wallet, amt, &a.withdrawals) +} + +func (a *fundedAddress) requestAndWait(ctx context.Context, wallet address.Address, amt abi.TokenAmount, reqs *[]*fundRequest) (cid.Cid, error) { + // Create a request and add it to the request queue + req := newFundRequest(ctx, wallet, amt) + + a.lk.Lock() + *reqs = append(*reqs, req) + a.lk.Unlock() + + // Process the queue + go a.process() + + // Wait for the results + select { + case <-ctx.Done(): + return cid.Undef, ctx.Err() + case r := <-req.Result: + return r.msgCid, r.err + } +} + +// Used by the tests +func (a *fundedAddress) onProcessStart(fn func() bool) { + a.lk.Lock() + defer a.lk.Unlock() + + a.onProcessStartListener = fn +} + +// Process queued requests +func (a *fundedAddress) process() { + a.lk.Lock() + defer a.lk.Unlock() + + // Used by the tests + if a.onProcessStartListener != nil { + done := a.onProcessStartListener() + if !done { + return + } + a.onProcessStartListener = nil + } + + // Check if we're still waiting for the response to a message + if a.state.MsgCid != nil { + return + } + + // Check if there's anything to do + haveReservations := len(a.reservations) > 0 || len(a.releases) > 0 + haveWithdrawals := len(a.withdrawals) > 0 + if !haveReservations && !haveWithdrawals { + return + } + + // Process reservations / releases + if haveReservations { + res, err := a.processReservations(a.reservations, a.releases) + if err == nil { + a.applyStateChange(res.msgCid, res.amtReserved) + } + a.reservations = filterOutProcessedReqs(a.reservations) + a.releases = filterOutProcessedReqs(a.releases) + } + + // If there was no message sent on chain by adding reservations, and all + // reservations have completed processing, process withdrawals + if haveWithdrawals && a.state.MsgCid == nil && len(a.reservations) == 0 { + withdrawalCid, err := a.processWithdrawals(a.withdrawals) + if err == nil && withdrawalCid != cid.Undef { + a.applyStateChange(&withdrawalCid, types.EmptyInt) + } + a.withdrawals = filterOutProcessedReqs(a.withdrawals) + } + + // If a message was sent on-chain + if a.state.MsgCid != nil { + // Start waiting for results of message (async) + a.startWaitForResults(*a.state.MsgCid) + } + + // Process any remaining queued requests + go a.process() +} + +// Filter out completed requests +func filterOutProcessedReqs(reqs []*fundRequest) []*fundRequest { + filtered := make([]*fundRequest, 0, len(reqs)) + for _, req := range reqs { + if !req.Completed() { + filtered = append(filtered, req) + } + } + return filtered +} + +// Apply the results of processing queues and save to the datastore +func (a *fundedAddress) applyStateChange(msgCid *cid.Cid, amtReserved abi.TokenAmount) { + a.state.MsgCid = msgCid + if !amtReserved.Nil() { + a.state.AmtReserved = amtReserved + } + a.saveState() +} + +// Clear the pending message cid so that a new message can be sent +func (a *fundedAddress) clearWaitState() { + a.state.MsgCid = nil + a.saveState() +} + +// Save state to datastore +func (a *fundedAddress) saveState() { + // Not much we can do if saving to the datastore fails, just log + err := a.str.save(a.state) + if err != nil { + log.Errorf("saving state to store for addr %s: %v", a.state.Addr, err) + } +} + +// The result of processing the reservation / release queues +type processResult struct { + // Requests that completed without adding funds + covered []*fundRequest + // Requests that added funds + added []*fundRequest + + // The new reserved amount + amtReserved abi.TokenAmount + // The message cid, if a message was submitted on-chain + msgCid *cid.Cid +} + +// process reservations and releases, and return the resulting changes to state +func (a *fundedAddress) processReservations(reservations []*fundRequest, releases []*fundRequest) (pr *processResult, prerr error) { + // When the function returns + defer func() { + // If there's an error, mark all requests as errored + if prerr != nil { + for _, req := range append(reservations, releases...) { + req.Complete(cid.Undef, prerr) + } + return + } + + // Complete all release requests + for _, req := range releases { + req.Complete(cid.Undef, nil) + } + + // Complete all requests that were covered by released amounts + for _, req := range pr.covered { + req.Complete(cid.Undef, nil) + } + + // If a message was sent + if pr.msgCid != nil { + // Complete all add funds requests + for _, req := range pr.added { + req.Complete(*pr.msgCid, nil) + } + } + }() + + // Split reservations into those that are covered by released amounts, + // and those to add to the reserved amount. + // Note that we process requests from the same wallet in batches. So some + // requests may not be included in covered if they don't match the first + // covered request's wallet. These will be processed on a subsequent + // invocation of processReservations. + toCancel, toAdd, reservedDelta := splitReservations(reservations, releases) + + // Apply the reserved delta to the reserved amount + reserved := types.BigAdd(a.state.AmtReserved, reservedDelta) + if reserved.LessThan(abi.NewTokenAmount(0)) { + reserved = abi.NewTokenAmount(0) + } + res := &processResult{ + amtReserved: reserved, + covered: toCancel, + } + + // Work out the amount to add to the balance + amtToAdd := abi.NewTokenAmount(0) + if len(toAdd) > 0 && reserved.GreaterThan(abi.NewTokenAmount(0)) { + // Get available funds for address + avail, err := a.env.AvailableFunds(a.ctx, a.state.Addr) + if err != nil { + return res, err + } + + // amount to add = new reserved amount - available + amtToAdd = types.BigSub(reserved, avail) + a.debugf("reserved %d - avail %d = to add %d", reserved, avail, amtToAdd) + } + + // If there's nothing to add to the balance, bail out + if amtToAdd.LessThanEqual(abi.NewTokenAmount(0)) { + res.covered = append(res.covered, toAdd...) + return res, nil + } + + // Add funds to address + a.debugf("add funds %d", amtToAdd) + addFundsCid, err := a.env.AddFunds(a.ctx, toAdd[0].Wallet, a.state.Addr, amtToAdd) + if err != nil { + return res, err + } + + // Mark reservation requests as complete + res.added = toAdd + + // Save the message CID to state + res.msgCid = &addFundsCid + return res, nil +} + +// Split reservations into those that are under the total release amount +// (covered) and those that exceed it (to add). +// Note that we process requests from the same wallet in batches. So some +// requests may not be included in covered if they don't match the first +// covered request's wallet. +func splitReservations(reservations []*fundRequest, releases []*fundRequest) ([]*fundRequest, []*fundRequest, abi.TokenAmount) { + toCancel := make([]*fundRequest, 0, len(reservations)) + toAdd := make([]*fundRequest, 0, len(reservations)) + toAddAmt := abi.NewTokenAmount(0) + + // Sum release amounts + releaseAmt := abi.NewTokenAmount(0) + for _, req := range releases { + releaseAmt = types.BigAdd(releaseAmt, req.Amount()) + } + + // We only want to combine requests that come from the same wallet + batchWallet := address.Undef + for _, req := range reservations { + amt := req.Amount() + + // If the amount to add to the reserve is cancelled out by a release + if amt.LessThanEqual(releaseAmt) { + // Cancel the request and update the release total + releaseAmt = types.BigSub(releaseAmt, amt) + toCancel = append(toCancel, req) + continue + } + + // The amount to add is greater that the release total so we want + // to send an add funds request + + // The first time the wallet will be undefined + if batchWallet == address.Undef { + batchWallet = req.Wallet + } + // If this request's wallet is the same as the batch wallet, + // the requests will be combined + if batchWallet == req.Wallet { + delta := types.BigSub(amt, releaseAmt) + toAddAmt = types.BigAdd(toAddAmt, delta) + releaseAmt = abi.NewTokenAmount(0) + toAdd = append(toAdd, req) + } + } + + // The change in the reserved amount is "amount to add" - "amount to release" + reservedDelta := types.BigSub(toAddAmt, releaseAmt) + + return toCancel, toAdd, reservedDelta +} + +// process withdrawal queue +func (a *fundedAddress) processWithdrawals(withdrawals []*fundRequest) (msgCid cid.Cid, prerr error) { + // If there's an error, mark all withdrawal requests as errored + defer func() { + if prerr != nil { + for _, req := range withdrawals { + req.Complete(cid.Undef, prerr) + } + } + }() + + // Get the net available balance + avail, err := a.env.AvailableFunds(a.ctx, a.state.Addr) + if err != nil { + return cid.Undef, err + } + + netAvail := types.BigSub(avail, a.state.AmtReserved) + + // Fit as many withdrawals as possible into the available balance, and fail + // the rest + withdrawalAmt := abi.NewTokenAmount(0) + allowedAmt := abi.NewTokenAmount(0) + allowed := make([]*fundRequest, 0, len(withdrawals)) + var batchWallet address.Address + for _, req := range withdrawals { + amt := req.Amount() + if amt.IsZero() { + // If the context for the request was cancelled, bail out + req.Complete(cid.Undef, err) + continue + } + + // If the amount would exceed the available amount, complete the + // request with an error + newWithdrawalAmt := types.BigAdd(withdrawalAmt, amt) + if newWithdrawalAmt.GreaterThan(netAvail) { + msg := fmt.Sprintf("insufficient funds for withdrawal of %s: ", types.FIL(amt)) + msg += fmt.Sprintf("net available (%s) = available (%s) - reserved (%s)", + types.FIL(types.BigSub(netAvail, withdrawalAmt)), types.FIL(avail), types.FIL(a.state.AmtReserved)) + if !withdrawalAmt.IsZero() { + msg += fmt.Sprintf(" - queued withdrawals (%s)", types.FIL(withdrawalAmt)) + } + err := xerrors.Errorf(msg) + a.debugf("%s", err) + req.Complete(cid.Undef, err) + continue + } + + // If this is the first allowed withdrawal request in this batch, save + // its wallet address + if batchWallet == address.Undef { + batchWallet = req.Wallet + } + // If the request wallet doesn't match the batch wallet, bail out + // (the withdrawal will be processed after the current batch has + // completed) + if req.Wallet != batchWallet { + continue + } + + // Include this withdrawal request in the batch + withdrawalAmt = newWithdrawalAmt + a.debugf("withdraw %d", amt) + allowed = append(allowed, req) + allowedAmt = types.BigAdd(allowedAmt, amt) + } + + // Check if there is anything to withdraw. + // Note that if the context for a request is cancelled, + // req.Amount() returns zero + if allowedAmt.Equals(abi.NewTokenAmount(0)) { + // Mark allowed requests as complete + for _, req := range allowed { + req.Complete(cid.Undef, nil) + } + return cid.Undef, nil + } + + // Withdraw funds + a.debugf("withdraw funds %d", allowedAmt) + withdrawFundsCid, err := a.env.WithdrawFunds(a.ctx, allowed[0].Wallet, a.state.Addr, allowedAmt) + if err != nil { + return cid.Undef, err + } + + // Mark allowed requests as complete + for _, req := range allowed { + req.Complete(withdrawFundsCid, nil) + } + + // Save the message CID to state + return withdrawFundsCid, nil +} + +// asynchonously wait for results of message +func (a *fundedAddress) startWaitForResults(msgCid cid.Cid) { + go func() { + err := a.env.WaitMsg(a.ctx, msgCid) + if err != nil { + // We don't really care about the results here, we're just waiting + // so as to only process one on-chain message at a time + log.Errorf("waiting for results of message %s for addr %s: %v", msgCid, a.state.Addr, err) + } + + a.lk.Lock() + a.debugf("complete wait") + a.clearWaitState() + a.lk.Unlock() + + a.process() + }() +} + +func (a *fundedAddress) debugf(args ...interface{}) { + fmtStr := args[0].(string) + args = args[1:] + log.Debugf(a.state.Addr.String()+": "+fmtStr, args...) +} + +// The result of a fund request +type reqResult struct { + msgCid cid.Cid + err error +} + +// A request to change funds +type fundRequest struct { + ctx context.Context + amt abi.TokenAmount + completed chan struct{} + Wallet address.Address + Result chan reqResult +} + +func newFundRequest(ctx context.Context, wallet address.Address, amt abi.TokenAmount) *fundRequest { + return &fundRequest{ + ctx: ctx, + amt: amt, + Wallet: wallet, + Result: make(chan reqResult), + completed: make(chan struct{}), + } +} + +// Amount returns zero if the context has expired +func (frp *fundRequest) Amount() abi.TokenAmount { + if frp.ctx.Err() != nil { + return abi.NewTokenAmount(0) + } + return frp.amt +} + +// Complete is called with the message CID when the funds request has been +// started or with the error if there was an error +func (frp *fundRequest) Complete(msgCid cid.Cid, err error) { + select { + case <-frp.completed: + case <-frp.ctx.Done(): + case frp.Result <- reqResult{msgCid: msgCid, err: err}: + } + close(frp.completed) +} + +// Completed indicates if Complete has already been called +func (frp *fundRequest) Completed() bool { + select { + case <-frp.completed: + return true + default: + return false + } +} + +// fundManagerEnvironment simplifies some API calls +type fundManagerEnvironment struct { + api fundManagerAPI +} + +func (env *fundManagerEnvironment) AvailableFunds(ctx context.Context, addr address.Address) (abi.TokenAmount, error) { + bal, err := env.api.StateMarketBalance(ctx, addr, types.EmptyTSK) + if err != nil { + return abi.NewTokenAmount(0), err + } + + return types.BigSub(bal.Escrow, bal.Locked), nil +} + +func (env *fundManagerEnvironment) AddFunds( + ctx context.Context, + wallet address.Address, + addr address.Address, + amt abi.TokenAmount, +) (cid.Cid, error) { + params, err := actors.SerializeParams(&addr) + if err != nil { + return cid.Undef, err + } + + smsg, aerr := env.api.MpoolPushMessage(ctx, &types.Message{ + To: market.Address, + From: wallet, + Value: amt, + Method: market.Methods.AddBalance, + Params: params, + }, nil) + + if aerr != nil { + return cid.Undef, aerr + } + + return smsg.Cid(), nil +} + +func (env *fundManagerEnvironment) WithdrawFunds( + ctx context.Context, + wallet address.Address, + addr address.Address, + amt abi.TokenAmount, +) (cid.Cid, error) { + params, err := actors.SerializeParams(&market.WithdrawBalanceParams{ + ProviderOrClientAddress: addr, + Amount: amt, + }) + if err != nil { + return cid.Undef, xerrors.Errorf("serializing params: %w", err) + } + + smsg, aerr := env.api.MpoolPushMessage(ctx, &types.Message{ + To: market.Address, + From: wallet, + Value: types.NewInt(0), + Method: market.Methods.WithdrawBalance, + Params: params, + }, nil) + + if aerr != nil { + return cid.Undef, aerr + } + + return smsg.Cid(), nil +} + +func (env *fundManagerEnvironment) WaitMsg(ctx context.Context, c cid.Cid) error { + _, err := env.api.StateWaitMsg(ctx, c, build.MessageConfidence, api.LookbackNoLimit, true) + return err +} diff --git a/chain/market/fundmanager_test.go b/chain/market/fundmanager_test.go new file mode 100644 index 00000000000..12530434311 --- /dev/null +++ b/chain/market/fundmanager_test.go @@ -0,0 +1,820 @@ +package market + +import ( + "bytes" + "context" + "sync" + "testing" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/wallet" + tutils "github.com/filecoin-project/specs-actors/v2/support/testing" + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + ds_sync "github.com/ipfs/go-datastore/sync" + "github.com/stretchr/testify/require" +) + +// TestFundManagerBasic verifies that the basic fund manager operations work +func TestFundManagerBasic(t *testing.T) { + s := setup(t) + defer s.fm.Stop() + + // Reserve 10 + // balance: 0 -> 10 + // reserved: 0 -> 10 + amt := abi.NewTokenAmount(10) + sentinel, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + + msg := s.mockApi.getSentMessage(sentinel) + checkAddMessageFields(t, msg, s.walletAddr, s.acctAddr, amt) + + s.mockApi.completeMsg(sentinel) + + // Reserve 7 + // balance: 10 -> 17 + // reserved: 10 -> 17 + amt = abi.NewTokenAmount(7) + sentinel, err = s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + + msg = s.mockApi.getSentMessage(sentinel) + checkAddMessageFields(t, msg, s.walletAddr, s.acctAddr, amt) + + s.mockApi.completeMsg(sentinel) + + // Release 5 + // balance: 17 + // reserved: 17 -> 12 + amt = abi.NewTokenAmount(5) + err = s.fm.Release(s.acctAddr, amt) + require.NoError(t, err) + + // Withdraw 2 + // balance: 17 -> 15 + // reserved: 12 + amt = abi.NewTokenAmount(2) + sentinel, err = s.fm.Withdraw(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + + msg = s.mockApi.getSentMessage(sentinel) + checkWithdrawMessageFields(t, msg, s.walletAddr, s.acctAddr, amt) + + s.mockApi.completeMsg(sentinel) + + // Reserve 3 + // balance: 15 + // reserved: 12 -> 15 + // Note: reserved (15) is <= balance (15) so should not send on-chain + // message + msgCount := s.mockApi.messageCount() + amt = abi.NewTokenAmount(3) + sentinel, err = s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + require.Equal(t, msgCount, s.mockApi.messageCount()) + require.Equal(t, sentinel, cid.Undef) + + // Reserve 1 + // balance: 15 -> 16 + // reserved: 15 -> 16 + // Note: reserved (16) is above balance (15) so *should* send on-chain + // message to top up balance + amt = abi.NewTokenAmount(1) + topUp := abi.NewTokenAmount(1) + sentinel, err = s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + + s.mockApi.completeMsg(sentinel) + msg = s.mockApi.getSentMessage(sentinel) + checkAddMessageFields(t, msg, s.walletAddr, s.acctAddr, topUp) + + // Withdraw 1 + // balance: 16 + // reserved: 16 + // Note: Expect failure because there is no available balance to withdraw: + // balance - reserved = 16 - 16 = 0 + amt = abi.NewTokenAmount(1) + sentinel, err = s.fm.Withdraw(s.ctx, s.walletAddr, s.acctAddr, amt) + require.Error(t, err) +} + +// TestFundManagerParallel verifies that operations can be run in parallel +func TestFundManagerParallel(t *testing.T) { + s := setup(t) + defer s.fm.Stop() + + // Reserve 10 + amt := abi.NewTokenAmount(10) + sentinelReserve10, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + + // Wait until all the subsequent requests are queued up + queueReady := make(chan struct{}) + fa := s.fm.getFundedAddress(s.acctAddr) + fa.onProcessStart(func() bool { + if len(fa.withdrawals) == 1 && len(fa.reservations) == 2 && len(fa.releases) == 1 { + close(queueReady) + return true + } + return false + }) + + // Withdraw 5 (should not run until after reserves / releases) + withdrawReady := make(chan error) + go func() { + amt = abi.NewTokenAmount(5) + _, err := s.fm.Withdraw(s.ctx, s.walletAddr, s.acctAddr, amt) + withdrawReady <- err + }() + + reserveSentinels := make(chan cid.Cid) + + // Reserve 3 + go func() { + amt := abi.NewTokenAmount(3) + sentinelReserve3, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + reserveSentinels <- sentinelReserve3 + }() + + // Reserve 5 + go func() { + amt := abi.NewTokenAmount(5) + sentinelReserve5, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + reserveSentinels <- sentinelReserve5 + }() + + // Release 2 + go func() { + amt := abi.NewTokenAmount(2) + err = s.fm.Release(s.acctAddr, amt) + require.NoError(t, err) + }() + + // Everything is queued up + <-queueReady + + // Complete the "Reserve 10" message + s.mockApi.completeMsg(sentinelReserve10) + msg := s.mockApi.getSentMessage(sentinelReserve10) + checkAddMessageFields(t, msg, s.walletAddr, s.acctAddr, abi.NewTokenAmount(10)) + + // The other requests should now be combined and be submitted on-chain as + // a single message + rs1 := <-reserveSentinels + rs2 := <-reserveSentinels + require.Equal(t, rs1, rs2) + + // Withdraw should not have been called yet, because reserve / release + // requests run first + select { + case <-withdrawReady: + require.Fail(t, "Withdraw should run after reserve / release") + default: + } + + // Complete the message + s.mockApi.completeMsg(rs1) + msg = s.mockApi.getSentMessage(rs1) + + // "Reserve 3" +3 + // "Reserve 5" +5 + // "Release 2" -2 + // Result: 6 + checkAddMessageFields(t, msg, s.walletAddr, s.acctAddr, abi.NewTokenAmount(6)) + + // Expect withdraw to fail because not enough available funds + err = <-withdrawReady + require.Error(t, err) +} + +// TestFundManagerReserveByWallet verifies that reserve requests are grouped by wallet +func TestFundManagerReserveByWallet(t *testing.T) { + s := setup(t) + defer s.fm.Stop() + + walletAddrA, err := s.wllt.WalletNew(context.Background(), types.KTSecp256k1) + require.NoError(t, err) + walletAddrB, err := s.wllt.WalletNew(context.Background(), types.KTSecp256k1) + require.NoError(t, err) + + // Wait until all the reservation requests are queued up + walletAQueuedUp := make(chan struct{}) + queueReady := make(chan struct{}) + fa := s.fm.getFundedAddress(s.acctAddr) + fa.onProcessStart(func() bool { + if len(fa.reservations) == 1 { + close(walletAQueuedUp) + } + if len(fa.reservations) == 3 { + close(queueReady) + return true + } + return false + }) + + type reserveResult struct { + ws cid.Cid + err error + } + results := make(chan *reserveResult) + + amtA1 := abi.NewTokenAmount(1) + go func() { + // Wallet A: Reserve 1 + sentinelA1, err := s.fm.Reserve(s.ctx, walletAddrA, s.acctAddr, amtA1) + results <- &reserveResult{ + ws: sentinelA1, + err: err, + } + }() + + amtB1 := abi.NewTokenAmount(2) + amtB2 := abi.NewTokenAmount(3) + go func() { + // Wait for reservation for wallet A to be queued up + <-walletAQueuedUp + + // Wallet B: Reserve 2 + go func() { + sentinelB1, err := s.fm.Reserve(s.ctx, walletAddrB, s.acctAddr, amtB1) + results <- &reserveResult{ + ws: sentinelB1, + err: err, + } + }() + + // Wallet B: Reserve 3 + sentinelB2, err := s.fm.Reserve(s.ctx, walletAddrB, s.acctAddr, amtB2) + results <- &reserveResult{ + ws: sentinelB2, + err: err, + } + }() + + // All reservation requests are queued up + <-queueReady + + resA := <-results + sentinelA1 := resA.ws + + // Should send to wallet A + msg := s.mockApi.getSentMessage(sentinelA1) + checkAddMessageFields(t, msg, walletAddrA, s.acctAddr, amtA1) + + // Complete wallet A message + s.mockApi.completeMsg(sentinelA1) + + resB1 := <-results + resB2 := <-results + require.NoError(t, resB1.err) + require.NoError(t, resB2.err) + sentinelB1 := resB1.ws + sentinelB2 := resB2.ws + + // Should send different message to wallet B + require.NotEqual(t, sentinelA1, sentinelB1) + // Should be single message combining amount 1 and 2 + require.Equal(t, sentinelB1, sentinelB2) + msg = s.mockApi.getSentMessage(sentinelB1) + checkAddMessageFields(t, msg, walletAddrB, s.acctAddr, types.BigAdd(amtB1, amtB2)) +} + +// TestFundManagerWithdrawal verifies that as many withdraw operations as +// possible are processed +func TestFundManagerWithdrawalLimit(t *testing.T) { + s := setup(t) + defer s.fm.Stop() + + // Reserve 10 + amt := abi.NewTokenAmount(10) + sentinelReserve10, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + + // Complete the "Reserve 10" message + s.mockApi.completeMsg(sentinelReserve10) + + // Release 10 + err = s.fm.Release(s.acctAddr, amt) + require.NoError(t, err) + + // Queue up withdraw requests + queueReady := make(chan struct{}) + fa := s.fm.getFundedAddress(s.acctAddr) + withdrawalReqTotal := 3 + withdrawalReqEnqueued := 0 + withdrawalReqQueue := make(chan func(), withdrawalReqTotal) + fa.onProcessStart(func() bool { + // If a new withdrawal request was enqueued + if len(fa.withdrawals) > withdrawalReqEnqueued { + withdrawalReqEnqueued++ + + // Pop the next request and run it + select { + case fn := <-withdrawalReqQueue: + go fn() + default: + } + } + // Once all the requests have arrived, we're ready to process the queue + if withdrawalReqEnqueued == withdrawalReqTotal { + close(queueReady) + return true + } + return false + }) + + type withdrawResult struct { + reqIndex int + ws cid.Cid + err error + } + withdrawRes := make(chan *withdrawResult) + + // Queue up three "Withdraw 5" requests + enqueuedCount := 0 + for i := 0; i < withdrawalReqTotal; i++ { + withdrawalReqQueue <- func() { + idx := enqueuedCount + enqueuedCount++ + + amt := abi.NewTokenAmount(5) + ws, err := s.fm.Withdraw(s.ctx, s.walletAddr, s.acctAddr, amt) + withdrawRes <- &withdrawResult{reqIndex: idx, ws: ws, err: err} + } + } + // Start the first request + fn := <-withdrawalReqQueue + go fn() + + // All withdrawal requests are queued up and ready to be processed + <-queueReady + + // Organize results in request order + results := make([]*withdrawResult, withdrawalReqTotal) + for i := 0; i < 3; i++ { + res := <-withdrawRes + results[res.reqIndex] = res + } + + // Available 10 + // Withdraw 5 + // Expect Success + require.NoError(t, results[0].err) + // Available 5 + // Withdraw 5 + // Expect Success + require.NoError(t, results[1].err) + // Available 0 + // Withdraw 5 + // Expect FAIL + require.Error(t, results[2].err) + + // Expect withdrawal requests that fit under reserved amount to be combined + // into a single message on-chain + require.Equal(t, results[0].ws, results[1].ws) +} + +// TestFundManagerWithdrawByWallet verifies that withdraw requests are grouped by wallet +func TestFundManagerWithdrawByWallet(t *testing.T) { + s := setup(t) + defer s.fm.Stop() + + walletAddrA, err := s.wllt.WalletNew(context.Background(), types.KTSecp256k1) + require.NoError(t, err) + walletAddrB, err := s.wllt.WalletNew(context.Background(), types.KTSecp256k1) + require.NoError(t, err) + + // Reserve 10 + reserveAmt := abi.NewTokenAmount(10) + sentinelReserve, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, reserveAmt) + require.NoError(t, err) + s.mockApi.completeMsg(sentinelReserve) + + time.Sleep(10 * time.Millisecond) + + // Release 10 + err = s.fm.Release(s.acctAddr, reserveAmt) + require.NoError(t, err) + + type withdrawResult struct { + ws cid.Cid + err error + } + results := make(chan *withdrawResult) + + // Wait until withdrawals are queued up + walletAQueuedUp := make(chan struct{}) + queueReady := make(chan struct{}) + withdrawalCount := 0 + fa := s.fm.getFundedAddress(s.acctAddr) + fa.onProcessStart(func() bool { + if len(fa.withdrawals) == withdrawalCount { + return false + } + withdrawalCount = len(fa.withdrawals) + + if withdrawalCount == 1 { + close(walletAQueuedUp) + } else if withdrawalCount == 3 { + close(queueReady) + return true + } + return false + }) + + amtA1 := abi.NewTokenAmount(1) + go func() { + // Wallet A: Withdraw 1 + sentinelA1, err := s.fm.Withdraw(s.ctx, walletAddrA, s.acctAddr, amtA1) + results <- &withdrawResult{ + ws: sentinelA1, + err: err, + } + }() + + amtB1 := abi.NewTokenAmount(2) + amtB2 := abi.NewTokenAmount(3) + go func() { + // Wait until withdraw for wallet A is queued up + <-walletAQueuedUp + + // Wallet B: Withdraw 2 + go func() { + sentinelB1, err := s.fm.Withdraw(s.ctx, walletAddrB, s.acctAddr, amtB1) + results <- &withdrawResult{ + ws: sentinelB1, + err: err, + } + }() + + // Wallet B: Withdraw 3 + sentinelB2, err := s.fm.Withdraw(s.ctx, walletAddrB, s.acctAddr, amtB2) + results <- &withdrawResult{ + ws: sentinelB2, + err: err, + } + }() + + // Withdrawals are queued up + <-queueReady + + // Should withdraw from wallet A first + resA1 := <-results + sentinelA1 := resA1.ws + msg := s.mockApi.getSentMessage(sentinelA1) + checkWithdrawMessageFields(t, msg, walletAddrA, s.acctAddr, amtA1) + + // Complete wallet A message + s.mockApi.completeMsg(sentinelA1) + + resB1 := <-results + resB2 := <-results + require.NoError(t, resB1.err) + require.NoError(t, resB2.err) + sentinelB1 := resB1.ws + sentinelB2 := resB2.ws + + // Should send different message for wallet B from wallet A + require.NotEqual(t, sentinelA1, sentinelB1) + // Should be single message combining amount 1 and 2 + require.Equal(t, sentinelB1, sentinelB2) + msg = s.mockApi.getSentMessage(sentinelB1) + checkWithdrawMessageFields(t, msg, walletAddrB, s.acctAddr, types.BigAdd(amtB1, amtB2)) +} + +// TestFundManagerRestart verifies that waiting for incomplete requests resumes +// on restart +func TestFundManagerRestart(t *testing.T) { + s := setup(t) + defer s.fm.Stop() + + acctAddr2 := tutils.NewActorAddr(t, "addr2") + + // Address 1: Reserve 10 + amt := abi.NewTokenAmount(10) + sentinelAddr1, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + + msg := s.mockApi.getSentMessage(sentinelAddr1) + checkAddMessageFields(t, msg, s.walletAddr, s.acctAddr, amt) + + // Address 2: Reserve 7 + amt2 := abi.NewTokenAmount(7) + sentinelAddr2Res7, err := s.fm.Reserve(s.ctx, s.walletAddr, acctAddr2, amt2) + require.NoError(t, err) + + msg2 := s.mockApi.getSentMessage(sentinelAddr2Res7) + checkAddMessageFields(t, msg2, s.walletAddr, acctAddr2, amt2) + + // Complete "Address 1: Reserve 10" + s.mockApi.completeMsg(sentinelAddr1) + + // Give the completed state a moment to be stored before restart + time.Sleep(time.Millisecond * 10) + + // Restart + mockApiAfter := s.mockApi + fmAfter := newFundManager(mockApiAfter, s.ds) + err = fmAfter.Start() + require.NoError(t, err) + + amt3 := abi.NewTokenAmount(9) + reserveSentinel := make(chan cid.Cid) + go func() { + // Address 2: Reserve 9 + sentinel3, err := fmAfter.Reserve(s.ctx, s.walletAddr, acctAddr2, amt3) + require.NoError(t, err) + reserveSentinel <- sentinel3 + }() + + // Expect no message to be sent, because still waiting for previous + // message "Address 2: Reserve 7" to complete on-chain + select { + case <-reserveSentinel: + require.Fail(t, "Expected no message to be sent") + case <-time.After(10 * time.Millisecond): + } + + // Complete "Address 2: Reserve 7" + mockApiAfter.completeMsg(sentinelAddr2Res7) + + // Expect waiting message to now be sent + sentinel3 := <-reserveSentinel + msg3 := mockApiAfter.getSentMessage(sentinel3) + checkAddMessageFields(t, msg3, s.walletAddr, acctAddr2, amt3) +} + +// TestFundManagerReleaseAfterPublish verifies that release is successful in +// the following scenario: +// 1. Deal A adds 5 to addr1: reserved 0 -> 5 available 0 -> 5 +// 2. Deal B adds 7 to addr1: reserved 5 -> 12 available 5 -> 12 +// 3. Deal B completes, reducing addr1 by 7: reserved 12 available 12 -> 5 +// 4. Deal A releases 5 from addr1: reserved 12 -> 7 available 5 +func TestFundManagerReleaseAfterPublish(t *testing.T) { + s := setup(t) + defer s.fm.Stop() + + // Deal A: Reserve 5 + // balance: 0 -> 5 + // reserved: 0 -> 5 + amt := abi.NewTokenAmount(5) + sentinel, err := s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + s.mockApi.completeMsg(sentinel) + + // Deal B: Reserve 7 + // balance: 5 -> 12 + // reserved: 5 -> 12 + amt = abi.NewTokenAmount(7) + sentinel, err = s.fm.Reserve(s.ctx, s.walletAddr, s.acctAddr, amt) + require.NoError(t, err) + s.mockApi.completeMsg(sentinel) + + // Deal B: Publish (removes Deal B amount from balance) + // balance: 12 -> 5 + // reserved: 12 + amt = abi.NewTokenAmount(7) + s.mockApi.publish(s.acctAddr, amt) + + // Deal A: Release 5 + // balance: 5 + // reserved: 12 -> 7 + amt = abi.NewTokenAmount(5) + err = s.fm.Release(s.acctAddr, amt) + require.NoError(t, err) + + // Deal B: Release 7 + // balance: 5 + // reserved: 12 -> 7 + amt = abi.NewTokenAmount(5) + err = s.fm.Release(s.acctAddr, amt) + require.NoError(t, err) +} + +type scaffold struct { + ctx context.Context + ds *ds_sync.MutexDatastore + wllt *wallet.LocalWallet + walletAddr address.Address + acctAddr address.Address + mockApi *mockFundManagerAPI + fm *FundManager +} + +func setup(t *testing.T) *scaffold { + ctx := context.Background() + + wllt, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + walletAddr, err := wllt.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + + acctAddr := tutils.NewActorAddr(t, "addr") + + mockApi := newMockFundManagerAPI(walletAddr) + dstore := ds_sync.MutexWrap(ds.NewMapDatastore()) + fm := newFundManager(mockApi, dstore) + return &scaffold{ + ctx: ctx, + ds: dstore, + wllt: wllt, + walletAddr: walletAddr, + acctAddr: acctAddr, + mockApi: mockApi, + fm: fm, + } +} + +func checkAddMessageFields(t *testing.T, msg *types.Message, from address.Address, to address.Address, amt abi.TokenAmount) { + require.Equal(t, from, msg.From) + require.Equal(t, market.Address, msg.To) + require.Equal(t, amt, msg.Value) + + var paramsTo address.Address + err := paramsTo.UnmarshalCBOR(bytes.NewReader(msg.Params)) + require.NoError(t, err) + require.Equal(t, to, paramsTo) +} + +func checkWithdrawMessageFields(t *testing.T, msg *types.Message, from address.Address, addr address.Address, amt abi.TokenAmount) { + require.Equal(t, from, msg.From) + require.Equal(t, market.Address, msg.To) + require.Equal(t, abi.NewTokenAmount(0), msg.Value) + + var params market.WithdrawBalanceParams + err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)) + require.NoError(t, err) + require.Equal(t, addr, params.ProviderOrClientAddress) + require.Equal(t, amt, params.Amount) +} + +type sentMsg struct { + msg *types.SignedMessage + ready chan struct{} +} + +type mockFundManagerAPI struct { + wallet address.Address + + lk sync.Mutex + escrow map[address.Address]abi.TokenAmount + sentMsgs map[cid.Cid]*sentMsg + completedMsgs map[cid.Cid]struct{} + waitingFor map[cid.Cid]chan struct{} +} + +func newMockFundManagerAPI(wallet address.Address) *mockFundManagerAPI { + return &mockFundManagerAPI{ + wallet: wallet, + escrow: make(map[address.Address]abi.TokenAmount), + sentMsgs: make(map[cid.Cid]*sentMsg), + completedMsgs: make(map[cid.Cid]struct{}), + waitingFor: make(map[cid.Cid]chan struct{}), + } +} + +func (mapi *mockFundManagerAPI) MpoolPushMessage(ctx context.Context, message *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) { + mapi.lk.Lock() + defer mapi.lk.Unlock() + + smsg := &types.SignedMessage{Message: *message} + mapi.sentMsgs[smsg.Cid()] = &sentMsg{msg: smsg, ready: make(chan struct{})} + + return smsg, nil +} + +func (mapi *mockFundManagerAPI) getSentMessage(c cid.Cid) *types.Message { + mapi.lk.Lock() + defer mapi.lk.Unlock() + + for i := 0; i < 1000; i++ { + if pending, ok := mapi.sentMsgs[c]; ok { + return &pending.msg.Message + } + time.Sleep(time.Millisecond) + } + panic("expected message to be sent") +} + +func (mapi *mockFundManagerAPI) messageCount() int { + mapi.lk.Lock() + defer mapi.lk.Unlock() + + return len(mapi.sentMsgs) +} + +func (mapi *mockFundManagerAPI) completeMsg(msgCid cid.Cid) { + mapi.lk.Lock() + + pmsg, ok := mapi.sentMsgs[msgCid] + if ok { + if pmsg.msg.Message.Method == market.Methods.AddBalance { + var escrowAcct address.Address + err := escrowAcct.UnmarshalCBOR(bytes.NewReader(pmsg.msg.Message.Params)) + if err != nil { + panic(err) + } + + escrow := mapi.getEscrow(escrowAcct) + before := escrow + escrow = types.BigAdd(escrow, pmsg.msg.Message.Value) + mapi.escrow[escrowAcct] = escrow + log.Debugf("%s: escrow %d -> %d", escrowAcct, before, escrow) + } else { + var params market.WithdrawBalanceParams + err := params.UnmarshalCBOR(bytes.NewReader(pmsg.msg.Message.Params)) + if err != nil { + panic(err) + } + escrowAcct := params.ProviderOrClientAddress + + escrow := mapi.getEscrow(escrowAcct) + before := escrow + escrow = types.BigSub(escrow, params.Amount) + mapi.escrow[escrowAcct] = escrow + log.Debugf("%s: escrow %d -> %d", escrowAcct, before, escrow) + } + } + + mapi.completedMsgs[msgCid] = struct{}{} + + ready, ok := mapi.waitingFor[msgCid] + + mapi.lk.Unlock() + + if ok { + close(ready) + } +} + +func (mapi *mockFundManagerAPI) StateMarketBalance(ctx context.Context, a address.Address, key types.TipSetKey) (api.MarketBalance, error) { + mapi.lk.Lock() + defer mapi.lk.Unlock() + + return api.MarketBalance{ + Locked: abi.NewTokenAmount(0), + Escrow: mapi.getEscrow(a), + }, nil +} + +func (mapi *mockFundManagerAPI) getEscrow(a address.Address) abi.TokenAmount { + escrow := mapi.escrow[a] + if escrow.Nil() { + return abi.NewTokenAmount(0) + } + return escrow +} + +func (mapi *mockFundManagerAPI) publish(addr address.Address, amt abi.TokenAmount) { + mapi.lk.Lock() + defer mapi.lk.Unlock() + + escrow := mapi.escrow[addr] + if escrow.Nil() { + return + } + escrow = types.BigSub(escrow, amt) + if escrow.LessThan(abi.NewTokenAmount(0)) { + escrow = abi.NewTokenAmount(0) + } + mapi.escrow[addr] = escrow +} + +func (mapi *mockFundManagerAPI) StateWaitMsg(ctx context.Context, c cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) { + res := &api.MsgLookup{ + Message: c, + Receipt: types.MessageReceipt{ + ExitCode: 0, + Return: nil, + }, + } + ready := make(chan struct{}) + + mapi.lk.Lock() + _, ok := mapi.completedMsgs[c] + if !ok { + mapi.waitingFor[c] = ready + } + mapi.lk.Unlock() + + if !ok { + select { + case <-ctx.Done(): + case <-ready: + } + } + return res, nil +} diff --git a/chain/market/fundmgr.go b/chain/market/fundmgr.go deleted file mode 100644 index 50467a6e153..00000000000 --- a/chain/market/fundmgr.go +++ /dev/null @@ -1,166 +0,0 @@ -package market - -import ( - "context" - "sync" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log" - "go.uber.org/fx" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/actors/builtin/market" - "github.com/filecoin-project/lotus/chain/events" - "github.com/filecoin-project/lotus/chain/events/state" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/impl/full" -) - -var log = logging.Logger("market_adapter") - -// API is the dependencies need to run a fund manager -type API struct { - fx.In - - full.ChainAPI - full.StateAPI - full.MpoolAPI -} - -// FundMgr monitors available balances and adds funds when EnsureAvailable is called -type FundMgr struct { - api fundMgrAPI - - lk sync.RWMutex - available map[address.Address]types.BigInt -} - -// StartFundManager creates a new fund manager and sets up event hooks to manage state changes -func StartFundManager(lc fx.Lifecycle, api API) *FundMgr { - fm := newFundMgr(&api) - lc.Append(fx.Hook{ - OnStart: func(ctx context.Context) error { - ev := events.NewEvents(ctx, &api) - preds := state.NewStatePredicates(&api) - dealDiffFn := preds.OnStorageMarketActorChanged(preds.OnBalanceChanged(preds.AvailableBalanceChangedForAddresses(fm.getAddresses))) - match := func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) { - return dealDiffFn(ctx, oldTs.Key(), newTs.Key()) - } - return ev.StateChanged(fm.checkFunc, fm.stateChanged, fm.revert, 0, events.NoTimeout, match) - }, - }) - return fm -} - -type fundMgrAPI interface { - StateMarketBalance(context.Context, address.Address, types.TipSetKey) (api.MarketBalance, error) - MpoolPushMessage(context.Context, *types.Message, *api.MessageSendSpec) (*types.SignedMessage, error) - StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) -} - -func newFundMgr(api fundMgrAPI) *FundMgr { - return &FundMgr{ - api: api, - available: map[address.Address]types.BigInt{}, - } -} - -// checkFunc tells the events api to simply proceed (we always want to watch) -func (fm *FundMgr) checkFunc(ts *types.TipSet) (done bool, more bool, err error) { - return false, true, nil -} - -// revert handles reverts to balances -func (fm *FundMgr) revert(ctx context.Context, ts *types.TipSet) error { - // TODO: Is it ok to just ignore this? - log.Warn("balance change reverted; TODO: actually handle this!") - return nil -} - -// stateChanged handles balance changes monitored on the chain from one tipset to the next -func (fm *FundMgr) stateChanged(ts *types.TipSet, ts2 *types.TipSet, states events.StateChange, h abi.ChainEpoch) (more bool, err error) { - changedBalances, ok := states.(state.ChangedBalances) - if !ok { - panic("Expected state.ChangedBalances") - } - // overwrite our in memory cache with new values from chain (chain is canonical) - fm.lk.Lock() - for addr, balanceChange := range changedBalances { - if fm.available[addr].Int != nil { - log.Infof("State balance change recorded, prev: %s, new: %s", fm.available[addr].String(), balanceChange.To.String()) - } - - fm.available[addr] = balanceChange.To - } - fm.lk.Unlock() - return true, nil -} - -func (fm *FundMgr) getAddresses() []address.Address { - fm.lk.RLock() - defer fm.lk.RUnlock() - addrs := make([]address.Address, 0, len(fm.available)) - for addr := range fm.available { - addrs = append(addrs, addr) - } - return addrs -} - -// EnsureAvailable looks at the available balance in escrow for a given -// address, and if less than the passed in amount, adds the difference -func (fm *FundMgr) EnsureAvailable(ctx context.Context, addr, wallet address.Address, amt types.BigInt) (cid.Cid, error) { - idAddr, err := fm.api.StateLookupID(ctx, addr, types.EmptyTSK) - if err != nil { - return cid.Undef, err - } - fm.lk.Lock() - defer fm.lk.Unlock() - - bal, err := fm.api.StateMarketBalance(ctx, addr, types.EmptyTSK) - if err != nil { - return cid.Undef, err - } - - stateAvail := types.BigSub(bal.Escrow, bal.Locked) - - avail, ok := fm.available[idAddr] - if !ok { - avail = stateAvail - } - - toAdd := types.BigSub(amt, avail) - if toAdd.LessThan(types.NewInt(0)) { - toAdd = types.NewInt(0) - } - fm.available[idAddr] = big.Add(avail, toAdd) - - log.Infof("Funds operation w/ Expected Balance: %s, In State: %s, Requested: %s, Adding: %s", avail.String(), stateAvail.String(), amt.String(), toAdd.String()) - - if toAdd.LessThanEqual(big.Zero()) { - return cid.Undef, nil - } - - params, err := actors.SerializeParams(&addr) - if err != nil { - fm.available[idAddr] = avail - return cid.Undef, err - } - - smsg, err := fm.api.MpoolPushMessage(ctx, &types.Message{ - To: market.Address, - From: wallet, - Value: toAdd, - Method: market.Methods.AddBalance, - Params: params, - }, nil) - if err != nil { - fm.available[idAddr] = avail - return cid.Undef, err - } - - return smsg.Cid(), nil -} diff --git a/chain/market/fundmgr_test.go b/chain/market/fundmgr_test.go deleted file mode 100644 index 88ca2e16fa4..00000000000 --- a/chain/market/fundmgr_test.go +++ /dev/null @@ -1,199 +0,0 @@ -package market - -import ( - "context" - "errors" - "math/rand" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/crypto" - - tutils "github.com/filecoin-project/specs-actors/v2/support/testing" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/actors/builtin/market" - "github.com/filecoin-project/lotus/chain/types" -) - -type fakeAPI struct { - returnedBalance api.MarketBalance - returnedBalanceErr error - signature crypto.Signature - receivedMessage *types.Message - pushMessageErr error - lookupIDErr error -} - -func (fapi *fakeAPI) StateLookupID(_ context.Context, addr address.Address, _ types.TipSetKey) (address.Address, error) { - return addr, fapi.lookupIDErr -} -func (fapi *fakeAPI) StateMarketBalance(context.Context, address.Address, types.TipSetKey) (api.MarketBalance, error) { - return fapi.returnedBalance, fapi.returnedBalanceErr -} - -func (fapi *fakeAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) { - fapi.receivedMessage = msg - return &types.SignedMessage{ - Message: *msg, - Signature: fapi.signature, - }, fapi.pushMessageErr -} - -func addFundsMsg(toAdd abi.TokenAmount, addr address.Address, wallet address.Address) *types.Message { - params, _ := actors.SerializeParams(&addr) - return &types.Message{ - To: market.Address, - From: wallet, - Value: toAdd, - Method: market.Methods.AddBalance, - Params: params, - } -} - -type expectedResult struct { - addAmt abi.TokenAmount - shouldAdd bool - err error - cachedAvailable abi.TokenAmount -} - -func TestAddFunds(t *testing.T) { - ctx := context.Background() - testCases := map[string]struct { - returnedBalanceErr error - returnedBalance api.MarketBalance - addAmounts []abi.TokenAmount - pushMessageErr error - expectedResults []expectedResult - lookupIDErr error - }{ - "succeeds, trivial case": { - returnedBalance: api.MarketBalance{Escrow: abi.NewTokenAmount(0), Locked: abi.NewTokenAmount(0)}, - addAmounts: []abi.TokenAmount{abi.NewTokenAmount(100)}, - expectedResults: []expectedResult{ - { - addAmt: abi.NewTokenAmount(100), - shouldAdd: true, - err: nil, - }, - }, - }, - "succeeds, money already present": { - returnedBalance: api.MarketBalance{Escrow: abi.NewTokenAmount(150), Locked: abi.NewTokenAmount(50)}, - addAmounts: []abi.TokenAmount{abi.NewTokenAmount(100)}, - expectedResults: []expectedResult{ - { - shouldAdd: false, - err: nil, - cachedAvailable: abi.NewTokenAmount(100), - }, - }, - }, - "succeeds, multiple adds": { - returnedBalance: api.MarketBalance{Escrow: abi.NewTokenAmount(150), Locked: abi.NewTokenAmount(50)}, - addAmounts: []abi.TokenAmount{abi.NewTokenAmount(100), abi.NewTokenAmount(200), abi.NewTokenAmount(250), abi.NewTokenAmount(250)}, - expectedResults: []expectedResult{ - { - shouldAdd: false, - err: nil, - }, - { - addAmt: abi.NewTokenAmount(100), - shouldAdd: true, - err: nil, - cachedAvailable: abi.NewTokenAmount(200), - }, - { - addAmt: abi.NewTokenAmount(50), - shouldAdd: true, - err: nil, - cachedAvailable: abi.NewTokenAmount(250), - }, - { - shouldAdd: false, - err: nil, - cachedAvailable: abi.NewTokenAmount(250), - }, - }, - }, - "error on market balance": { - returnedBalanceErr: errors.New("something went wrong"), - addAmounts: []abi.TokenAmount{abi.NewTokenAmount(100)}, - expectedResults: []expectedResult{ - { - err: errors.New("something went wrong"), - }, - }, - }, - "error on push message": { - returnedBalance: api.MarketBalance{Escrow: abi.NewTokenAmount(0), Locked: abi.NewTokenAmount(0)}, - pushMessageErr: errors.New("something went wrong"), - addAmounts: []abi.TokenAmount{abi.NewTokenAmount(100)}, - expectedResults: []expectedResult{ - { - err: errors.New("something went wrong"), - cachedAvailable: abi.NewTokenAmount(0), - }, - }, - }, - "error looking up address": { - lookupIDErr: errors.New("something went wrong"), - addAmounts: []abi.TokenAmount{abi.NewTokenAmount(100)}, - expectedResults: []expectedResult{ - { - err: errors.New("something went wrong"), - }, - }, - }, - } - - for testCase, data := range testCases { - //nolint:scopelint - t.Run(testCase, func(t *testing.T) { - ctx, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - sig := make([]byte, 100) - _, err := rand.Read(sig) - require.NoError(t, err) - fapi := &fakeAPI{ - returnedBalance: data.returnedBalance, - returnedBalanceErr: data.returnedBalanceErr, - signature: crypto.Signature{ - Type: crypto.SigTypeUnknown, - Data: sig, - }, - pushMessageErr: data.pushMessageErr, - lookupIDErr: data.lookupIDErr, - } - fundMgr := newFundMgr(fapi) - addr := tutils.NewIDAddr(t, uint64(rand.Uint32())) - wallet := tutils.NewIDAddr(t, uint64(rand.Uint32())) - for i, amount := range data.addAmounts { - fapi.receivedMessage = nil - _, err := fundMgr.EnsureAvailable(ctx, addr, wallet, amount) - expected := data.expectedResults[i] - if expected.err == nil { - require.NoError(t, err) - if expected.shouldAdd { - expectedMessage := addFundsMsg(expected.addAmt, addr, wallet) - require.Equal(t, expectedMessage, fapi.receivedMessage) - } else { - require.Nil(t, fapi.receivedMessage) - } - } else { - require.EqualError(t, err, expected.err.Error()) - } - - if !expected.cachedAvailable.Nil() { - require.Equal(t, expected.cachedAvailable, fundMgr.available[addr]) - } - } - }) - } -} diff --git a/chain/market/store.go b/chain/market/store.go new file mode 100644 index 00000000000..e0d0e10be38 --- /dev/null +++ b/chain/market/store.go @@ -0,0 +1,90 @@ +package market + +import ( + "bytes" + + cborrpc "github.com/filecoin-project/go-cbor-util" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + dsq "github.com/ipfs/go-datastore/query" + + "github.com/filecoin-project/go-address" + + "github.com/filecoin-project/lotus/node/modules/dtypes" +) + +const dsKeyAddr = "Addr" + +type Store struct { + ds datastore.Batching +} + +func newStore(ds dtypes.MetadataDS) *Store { + ds = namespace.Wrap(ds, datastore.NewKey("/fundmgr/")) + return &Store{ + ds: ds, + } +} + +// save the state to the datastore +func (ps *Store) save(state *FundedAddressState) error { + k := dskeyForAddr(state.Addr) + + b, err := cborrpc.Dump(state) + if err != nil { + return err + } + + return ps.ds.Put(k, b) +} + +// get the state for the given address +func (ps *Store) get(addr address.Address) (*FundedAddressState, error) { + k := dskeyForAddr(addr) + + data, err := ps.ds.Get(k) + if err != nil { + return nil, err + } + + var state FundedAddressState + err = cborrpc.ReadCborRPC(bytes.NewReader(data), &state) + if err != nil { + return nil, err + } + return &state, nil +} + +// forEach calls iter with each address in the datastore +func (ps *Store) forEach(iter func(*FundedAddressState)) error { + res, err := ps.ds.Query(dsq.Query{Prefix: dsKeyAddr}) + if err != nil { + return err + } + defer res.Close() //nolint:errcheck + + for { + res, ok := res.NextSync() + if !ok { + break + } + + if res.Error != nil { + return err + } + + var stored FundedAddressState + if err := stored.UnmarshalCBOR(bytes.NewReader(res.Value)); err != nil { + return err + } + + iter(&stored) + } + + return nil +} + +// The datastore key used to identify the address state +func dskeyForAddr(addr address.Address) datastore.Key { + return datastore.KeyWithNamespaces([]string{dsKeyAddr, addr.String()}) +} diff --git a/chain/messagepool/check.go b/chain/messagepool/check.go new file mode 100644 index 00000000000..11203e7dffd --- /dev/null +++ b/chain/messagepool/check.go @@ -0,0 +1,431 @@ +package messagepool + +import ( + "context" + "fmt" + stdbig "math/big" + "sort" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/vm" +) + +var baseFeeUpperBoundFactor = types.NewInt(10) + +// CheckMessages performs a set of logic checks for a list of messages, prior to submitting it to the mpool +func (mp *MessagePool) CheckMessages(ctx context.Context, protos []*api.MessagePrototype) ([][]api.MessageCheckStatus, error) { + flex := make([]bool, len(protos)) + msgs := make([]*types.Message, len(protos)) + for i, p := range protos { + flex[i] = !p.ValidNonce + msgs[i] = &p.Message + } + return mp.checkMessages(ctx, msgs, false, flex) +} + +// CheckPendingMessages performs a set of logical sets for all messages pending from a given actor +func (mp *MessagePool) CheckPendingMessages(ctx context.Context, from address.Address) ([][]api.MessageCheckStatus, error) { + var msgs []*types.Message + mp.lk.Lock() + mset, ok := mp.pending[from] + if ok { + for _, sm := range mset.msgs { + msgs = append(msgs, &sm.Message) + } + } + mp.lk.Unlock() + + if len(msgs) == 0 { + return nil, nil + } + + sort.Slice(msgs, func(i, j int) bool { + return msgs[i].Nonce < msgs[j].Nonce + }) + + return mp.checkMessages(ctx, msgs, true, nil) +} + +// CheckReplaceMessages performs a set of logical checks for related messages while performing a +// replacement. +func (mp *MessagePool) CheckReplaceMessages(ctx context.Context, replace []*types.Message) ([][]api.MessageCheckStatus, error) { + msgMap := make(map[address.Address]map[uint64]*types.Message) + count := 0 + + mp.lk.Lock() + for _, m := range replace { + mmap, ok := msgMap[m.From] + if !ok { + mmap = make(map[uint64]*types.Message) + msgMap[m.From] = mmap + mset, ok := mp.pending[m.From] + if ok { + count += len(mset.msgs) + for _, sm := range mset.msgs { + mmap[sm.Message.Nonce] = &sm.Message + } + } else { + count++ + } + } + mmap[m.Nonce] = m + } + mp.lk.Unlock() + + msgs := make([]*types.Message, 0, count) + start := 0 + for _, mmap := range msgMap { + end := start + len(mmap) + + for _, m := range mmap { + msgs = append(msgs, m) + } + + sort.Slice(msgs[start:end], func(i, j int) bool { + return msgs[start+i].Nonce < msgs[start+j].Nonce + }) + + start = end + } + + return mp.checkMessages(ctx, msgs, true, nil) +} + +// flexibleNonces should be either nil or of len(msgs), it signifies that message at given index +// has non-determied nonce at this point +func (mp *MessagePool) checkMessages(ctx context.Context, msgs []*types.Message, interned bool, flexibleNonces []bool) (result [][]api.MessageCheckStatus, err error) { + if mp.api.IsLite() { + return nil, nil + } + mp.curTsLk.Lock() + curTs := mp.curTs + mp.curTsLk.Unlock() + + epoch := curTs.Height() + + var baseFee big.Int + if len(curTs.Blocks()) > 0 { + baseFee = curTs.Blocks()[0].ParentBaseFee + } else { + baseFee, err = mp.api.ChainComputeBaseFee(context.Background(), curTs) + if err != nil { + return nil, xerrors.Errorf("error computing basefee: %w", err) + } + } + + baseFeeLowerBound := getBaseFeeLowerBound(baseFee, baseFeeLowerBoundFactor) + baseFeeUpperBound := types.BigMul(baseFee, baseFeeUpperBoundFactor) + + type actorState struct { + nextNonce uint64 + requiredFunds *stdbig.Int + } + + state := make(map[address.Address]*actorState) + balances := make(map[address.Address]big.Int) + + result = make([][]api.MessageCheckStatus, len(msgs)) + + for i, m := range msgs { + // pre-check: actor nonce + check := api.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: api.CheckStatus{ + Code: api.CheckStatusMessageGetStateNonce, + }, + } + + st, ok := state[m.From] + if !ok { + mp.lk.Lock() + mset, ok := mp.pending[m.From] + if ok && !interned { + st = &actorState{nextNonce: mset.nextNonce, requiredFunds: mset.requiredFunds} + for _, m := range mset.msgs { + st.requiredFunds = new(stdbig.Int).Add(st.requiredFunds, m.Message.Value.Int) + } + state[m.From] = st + mp.lk.Unlock() + + check.OK = true + check.Hint = map[string]interface{}{ + "nonce": st.nextNonce, + } + } else { + mp.lk.Unlock() + + stateNonce, err := mp.getStateNonce(ctx, m.From, curTs) + if err != nil { + check.OK = false + check.Err = fmt.Sprintf("error retrieving state nonce: %s", err.Error()) + } else { + check.OK = true + check.Hint = map[string]interface{}{ + "nonce": stateNonce, + } + } + + st = &actorState{nextNonce: stateNonce, requiredFunds: new(stdbig.Int)} + state[m.From] = st + } + } else { + check.OK = true + } + + result[i] = append(result[i], check) + if !check.OK { + continue + } + + // pre-check: actor balance + check = api.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: api.CheckStatus{ + Code: api.CheckStatusMessageGetStateBalance, + }, + } + + balance, ok := balances[m.From] + if !ok { + balance, err = mp.getStateBalance(ctx, m.From, curTs) + if err != nil { + check.OK = false + check.Err = fmt.Sprintf("error retrieving state balance: %s", err) + } else { + check.OK = true + check.Hint = map[string]interface{}{ + "balance": balance, + } + } + + balances[m.From] = balance + } else { + check.OK = true + check.Hint = map[string]interface{}{ + "balance": balance, + } + } + + result[i] = append(result[i], check) + if !check.OK { + continue + } + + // 1. Serialization + check = api.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: api.CheckStatus{ + Code: api.CheckStatusMessageSerialize, + }, + } + + bytes, err := m.Serialize() + if err != nil { + check.OK = false + check.Err = err.Error() + } else { + check.OK = true + } + + result[i] = append(result[i], check) + + // 2. Message size + check = api.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: api.CheckStatus{ + Code: api.CheckStatusMessageSize, + }, + } + + if len(bytes) > MaxMessageSize-128 { // 128 bytes to account for signature size + check.OK = false + check.Err = "message too big" + } else { + check.OK = true + } + + result[i] = append(result[i], check) + + // 3. Syntactic validation + check = api.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: api.CheckStatus{ + Code: api.CheckStatusMessageValidity, + }, + } + + if err := m.ValidForBlockInclusion(0, build.NewestNetworkVersion); err != nil { + check.OK = false + check.Err = fmt.Sprintf("syntactically invalid message: %s", err.Error()) + } else { + check.OK = true + } + + result[i] = append(result[i], check) + if !check.OK { + // skip remaining checks if it is a syntatically invalid message + continue + } + + // gas checks + + // 4. Min Gas + minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength()) + + check = api.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: api.CheckStatus{ + Code: api.CheckStatusMessageMinGas, + Hint: map[string]interface{}{ + "minGas": minGas, + }, + }, + } + + if m.GasLimit < minGas.Total() { + check.OK = false + check.Err = "GasLimit less than epoch minimum gas" + } else { + check.OK = true + } + + result[i] = append(result[i], check) + + // 5. Min Base Fee + check = api.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: api.CheckStatus{ + Code: api.CheckStatusMessageMinBaseFee, + }, + } + + if m.GasFeeCap.LessThan(minimumBaseFee) { + check.OK = false + check.Err = "GasFeeCap less than minimum base fee" + } else { + check.OK = true + } + + result[i] = append(result[i], check) + if !check.OK { + goto checkState + } + + // 6. Base Fee + check = api.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: api.CheckStatus{ + Code: api.CheckStatusMessageBaseFee, + Hint: map[string]interface{}{ + "baseFee": baseFee, + }, + }, + } + + if m.GasFeeCap.LessThan(baseFee) { + check.OK = false + check.Err = "GasFeeCap less than current base fee" + } else { + check.OK = true + } + + result[i] = append(result[i], check) + + // 7. Base Fee lower bound + check = api.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: api.CheckStatus{ + Code: api.CheckStatusMessageBaseFeeLowerBound, + Hint: map[string]interface{}{ + "baseFeeLowerBound": baseFeeLowerBound, + "baseFee": baseFee, + }, + }, + } + + if m.GasFeeCap.LessThan(baseFeeLowerBound) { + check.OK = false + check.Err = "GasFeeCap less than base fee lower bound for inclusion in next 20 epochs" + } else { + check.OK = true + } + + result[i] = append(result[i], check) + + // 8. Base Fee upper bound + check = api.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: api.CheckStatus{ + Code: api.CheckStatusMessageBaseFeeUpperBound, + Hint: map[string]interface{}{ + "baseFeeUpperBound": baseFeeUpperBound, + "baseFee": baseFee, + }, + }, + } + + if m.GasFeeCap.LessThan(baseFeeUpperBound) { + check.OK = true // on purpose, the checks is more of a warning + check.Err = "GasFeeCap less than base fee upper bound for inclusion in next 20 epochs" + } else { + check.OK = true + } + + result[i] = append(result[i], check) + + // stateful checks + checkState: + // 9. Message Nonce + check = api.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: api.CheckStatus{ + Code: api.CheckStatusMessageNonce, + Hint: map[string]interface{}{ + "nextNonce": st.nextNonce, + }, + }, + } + + if (flexibleNonces == nil || !flexibleNonces[i]) && st.nextNonce != m.Nonce { + check.OK = false + check.Err = fmt.Sprintf("message nonce doesn't match next nonce (%d)", st.nextNonce) + } else { + check.OK = true + st.nextNonce++ + } + + result[i] = append(result[i], check) + + // check required funds -vs- balance + st.requiredFunds = new(stdbig.Int).Add(st.requiredFunds, m.RequiredFunds().Int) + st.requiredFunds.Add(st.requiredFunds, m.Value.Int) + + // 10. Balance + check = api.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: api.CheckStatus{ + Code: api.CheckStatusMessageBalance, + Hint: map[string]interface{}{ + "requiredFunds": big.Int{Int: stdbig.NewInt(0).Set(st.requiredFunds)}, + }, + }, + } + + if balance.Int.Cmp(st.requiredFunds) < 0 { + check.OK = false + check.Err = "insufficient balance" + } else { + check.OK = true + } + + result[i] = append(result[i], check) + } + + return result, nil +} diff --git a/chain/messagepool/config.go b/chain/messagepool/config.go index f8f0ee98583..a511f84b7f4 100644 --- a/chain/messagepool/config.go +++ b/chain/messagepool/config.go @@ -48,9 +48,13 @@ func saveConfig(cfg *types.MpoolConfig, ds dtypes.MetadataDS) error { } func (mp *MessagePool) GetConfig() *types.MpoolConfig { - mp.cfgLk.Lock() - defer mp.cfgLk.Unlock() - return mp.cfg.Clone() + return mp.getConfig().Clone() +} + +func (mp *MessagePool) getConfig() *types.MpoolConfig { + mp.cfgLk.RLock() + defer mp.cfgLk.RUnlock() + return mp.cfg } func validateConfg(cfg *types.MpoolConfig) error { diff --git a/chain/messagepool/messagepool.go b/chain/messagepool/messagepool.go index 79ab572ba91..f6c8e3ac998 100644 --- a/chain/messagepool/messagepool.go +++ b/chain/messagepool/messagepool.go @@ -34,6 +34,7 @@ import ( "github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/journal" "github.com/filecoin-project/lotus/lib/sigs" + "github.com/filecoin-project/lotus/metrics" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/raulk/clock" @@ -59,7 +60,7 @@ var MaxUntrustedActorPendingMessages = 10 var MaxNonceGap = uint64(4) -var DefaultMaxFee = abi.TokenAmount(types.MustParseFIL("0.007")) +const MaxMessageSize = 64 << 10 // 64KiB var ( ErrMessageTooBig = errors.New("message too big") @@ -128,14 +129,18 @@ type MessagePool struct { republished map[cid.Cid]struct{} + // do NOT access this map directly, use isLocal, setLocal, and forEachLocal respectively localAddrs map[address.Address]struct{} + // do NOT access this map directly, use getPendingMset, setPendingMset, deletePendingMset, forEachPending, and clearPending respectively pending map[address.Address]*msgSet + keyCache map[address.Address]address.Address + curTsLk sync.Mutex // DO NOT LOCK INSIDE lk curTs *types.TipSet - cfgLk sync.Mutex + cfgLk sync.RWMutex cfg *types.MpoolConfig api Provider @@ -183,9 +188,18 @@ func ComputeMinRBF(curPrem abi.TokenAmount) abi.TokenAmount { return types.BigAdd(minPrice, types.NewInt(1)) } -func CapGasFee(msg *types.Message, maxFee abi.TokenAmount) { - if maxFee.Equals(big.Zero()) { - maxFee = DefaultMaxFee +func CapGasFee(mff dtypes.DefaultMaxFeeFunc, msg *types.Message, sendSepc *api.MessageSendSpec) { + var maxFee abi.TokenAmount + if sendSepc != nil { + maxFee = sendSepc.MaxFee + } + if maxFee.Int == nil || maxFee.Equals(big.Zero()) { + mf, err := mff() + if err != nil { + log.Errorf("failed to get default max gas fee: %+v", err) + mf = big.Zero() + } + maxFee = mf } gl := types.NewInt(uint64(msg.GasLimit)) @@ -236,10 +250,13 @@ func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool, strict, untrusted // check if RBF passes minPrice := ComputeMinRBF(exms.Message.GasPremium) if types.BigCmp(m.Message.GasPremium, minPrice) >= 0 { - log.Infow("add with RBF", "oldpremium", exms.Message.GasPremium, + log.Debugw("add with RBF", "oldpremium", exms.Message.GasPremium, "newpremium", m.Message.GasPremium, "addr", m.Message.From, "nonce", m.Message.Nonce) } else { - log.Info("add with duplicate nonce") + log.Debugf("add with duplicate nonce. message from %s with nonce %d already in mpool,"+ + " increase GasPremium to %s from %s to trigger replace by fee: %s", + m.Message.From, m.Message.Nonce, minPrice, m.Message.GasPremium, + ErrRBFTooLowPremium) return false, xerrors.Errorf("message from %s with nonce %d already in mpool,"+ " increase GasPremium to %s from %s to trigger replace by fee: %w", m.Message.From, m.Message.Nonce, minPrice, m.Message.GasPremium, @@ -260,7 +277,7 @@ func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool, strict, untrusted } if strict && nonceGap { - log.Warnf("adding nonce-gapped message from %s (nonce: %d, nextNonce: %d)", + log.Debugf("adding nonce-gapped message from %s (nonce: %d, nextNonce: %d)", m.Message.From, m.Message.Nonce, nextNonce) } @@ -319,6 +336,20 @@ func (ms *msgSet) getRequiredFunds(nonce uint64) types.BigInt { return types.BigInt{Int: requiredFunds} } +func (ms *msgSet) toSlice() []*types.SignedMessage { + set := make([]*types.SignedMessage, 0, len(ms.msgs)) + + for _, m := range ms.msgs { + set = append(set, m) + } + + sort.Slice(set, func(i, j int) bool { + return set[i].Message.Nonce < set[j].Message.Nonce + }) + + return set +} + func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journal.Journal) (*MessagePool, error) { cache, _ := lru.New2Q(build.BlsSignatureCacheSize) verifcache, _ := lru.New2Q(build.VerifSigCacheSize) @@ -340,6 +371,7 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journ repubTrigger: make(chan struct{}, 1), localAddrs: make(map[address.Address]struct{}), pending: make(map[address.Address]*msgSet), + keyCache: make(map[address.Address]address.Address), minGasPrice: types.NewInt(0), pruneTrigger: make(chan struct{}, 1), pruneCooldown: make(chan struct{}, 1), @@ -361,9 +393,11 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journ // enable initial prunes mp.pruneCooldown <- struct{}{} + ctx, cancel := context.WithCancel(context.TODO()) + // load the current tipset and subscribe to head changes _before_ loading local messages mp.curTs = api.SubscribeHeadChanges(func(rev, app []*types.TipSet) error { - err := mp.HeadChange(rev, app) + err := mp.HeadChange(ctx, rev, app) if err != nil { log.Errorf("mpool head notif handler error: %+v", err) } @@ -374,7 +408,8 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journ mp.lk.Lock() go func() { - err := mp.loadLocal() + defer cancel() + err := mp.loadLocal(ctx) mp.lk.Unlock() mp.curTsLk.Unlock() @@ -385,12 +420,127 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journ log.Info("mpool ready") - mp.runLoop() + mp.runLoop(ctx) }() return mp, nil } +func (mp *MessagePool) ForEachPendingMessage(f func(cid.Cid) error) error { + mp.lk.Lock() + defer mp.lk.Unlock() + + for _, mset := range mp.pending { + for _, m := range mset.msgs { + err := f(m.Cid()) + if err != nil { + return err + } + + err = f(m.Message.Cid()) + if err != nil { + return err + } + } + } + + return nil +} + +func (mp *MessagePool) resolveToKey(ctx context.Context, addr address.Address) (address.Address, error) { + // check the cache + a, f := mp.keyCache[addr] + if f { + return a, nil + } + + // resolve the address + ka, err := mp.api.StateAccountKeyAtFinality(ctx, addr, mp.curTs) + if err != nil { + return address.Undef, err + } + + // place both entries in the cache (may both be key addresses, which is fine) + mp.keyCache[addr] = ka + mp.keyCache[ka] = ka + + return ka, nil +} + +func (mp *MessagePool) getPendingMset(ctx context.Context, addr address.Address) (*msgSet, bool, error) { + ra, err := mp.resolveToKey(ctx, addr) + if err != nil { + return nil, false, err + } + + ms, f := mp.pending[ra] + + return ms, f, nil +} + +func (mp *MessagePool) setPendingMset(ctx context.Context, addr address.Address, ms *msgSet) error { + ra, err := mp.resolveToKey(ctx, addr) + if err != nil { + return err + } + + mp.pending[ra] = ms + + return nil +} + +// This method isn't strictly necessary, since it doesn't resolve any addresses, but it's safer to have +func (mp *MessagePool) forEachPending(f func(address.Address, *msgSet)) { + for la, ms := range mp.pending { + f(la, ms) + } +} + +func (mp *MessagePool) deletePendingMset(ctx context.Context, addr address.Address) error { + ra, err := mp.resolveToKey(ctx, addr) + if err != nil { + return err + } + + delete(mp.pending, ra) + + return nil +} + +// This method isn't strictly necessary, since it doesn't resolve any addresses, but it's safer to have +func (mp *MessagePool) clearPending() { + mp.pending = make(map[address.Address]*msgSet) +} + +func (mp *MessagePool) isLocal(ctx context.Context, addr address.Address) (bool, error) { + ra, err := mp.resolveToKey(ctx, addr) + if err != nil { + return false, err + } + + _, f := mp.localAddrs[ra] + + return f, nil +} + +func (mp *MessagePool) setLocal(ctx context.Context, addr address.Address) error { + ra, err := mp.resolveToKey(ctx, addr) + if err != nil { + return err + } + + mp.localAddrs[ra] = struct{}{} + + return nil +} + +// This method isn't strictly necessary, since it doesn't resolve any addresses, but it's safer to have +func (mp *MessagePool) forEachLocal(ctx context.Context, f func(context.Context, address.Address)) { + for la := range mp.localAddrs { + f(ctx, la) + } +} + func (mp *MessagePool) Close() error { close(mp.closer) return nil @@ -408,15 +558,15 @@ func (mp *MessagePool) Prune() { mp.pruneTrigger <- struct{}{} } -func (mp *MessagePool) runLoop() { +func (mp *MessagePool) runLoop(ctx context.Context) { for { select { case <-mp.repubTk.C: - if err := mp.republishPendingMessages(); err != nil { + if err := mp.republishPendingMessages(ctx); err != nil { log.Errorf("error while republishing messages: %s", err) } case <-mp.repubTrigger: - if err := mp.republishPendingMessages(); err != nil { + if err := mp.republishPendingMessages(ctx); err != nil { log.Errorf("error while republishing messages: %s", err) } @@ -432,8 +582,10 @@ func (mp *MessagePool) runLoop() { } } -func (mp *MessagePool) addLocal(m *types.SignedMessage) error { - mp.localAddrs[m.Message.From] = struct{}{} +func (mp *MessagePool) addLocal(ctx context.Context, m *types.SignedMessage) error { + if err := mp.setLocal(ctx, m.Message.From); err != nil { + return err + } msgb, err := m.Serialize() if err != nil { @@ -447,7 +599,7 @@ func (mp *MessagePool) addLocal(m *types.SignedMessage) error { return nil } -// verifyMsgBeforeAdd verifies that the message meets the minimum criteria for block inclusio +// verifyMsgBeforeAdd verifies that the message meets the minimum criteria for block inclusion // and whether the message has enough funds to be included in the next 20 blocks. // If the message is not valid for block inclusion, it returns an error. // For local messages, if the message can be included in the next 20 blocks, it returns true to @@ -461,11 +613,11 @@ func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.T epoch := curTs.Height() minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength()) - if err := m.VMMessage().ValidForBlockInclusion(minGas.Total()); err != nil { + if err := m.VMMessage().ValidForBlockInclusion(minGas.Total(), build.NewestNetworkVersion); err != nil { return false, xerrors.Errorf("message will not be included in a block: %w", err) } - // this checks if the GasFeeCap is suffisciently high for inclusion in the next 20 blocks + // this checks if the GasFeeCap is sufficiently high for inclusion in the next 20 blocks // if the GasFeeCap is too low, we soft reject the message (Ignore in pubsub) and rely // on republish to push it through later, if the baseFee has fallen. // this is a defensive check that stops minimum baseFee spam attacks from overloading validation @@ -500,7 +652,10 @@ func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.T return publish, nil } -func (mp *MessagePool) Push(m *types.SignedMessage) (cid.Cid, error) { +func (mp *MessagePool) Push(ctx context.Context, m *types.SignedMessage) (cid.Cid, error) { + done := metrics.Timer(ctx, metrics.MpoolPushDuration) + defer done() + err := mp.checkMessage(m) if err != nil { return cid.Undef, err @@ -513,7 +668,7 @@ func (mp *MessagePool) Push(m *types.SignedMessage) (cid.Cid, error) { }() mp.curTsLk.Lock() - publish, err := mp.addTs(m, mp.curTs, true, false) + publish, err := mp.addTs(ctx, m, mp.curTs, true, false) if err != nil { mp.curTsLk.Unlock() return cid.Undef, err @@ -537,12 +692,12 @@ func (mp *MessagePool) Push(m *types.SignedMessage) (cid.Cid, error) { func (mp *MessagePool) checkMessage(m *types.SignedMessage) error { // big messages are bad, anti DOS - if m.Size() > 32*1024 { + if m.Size() > MaxMessageSize { return xerrors.Errorf("mpool message too large (%dB): %w", m.Size(), ErrMessageTooBig) } // Perform syntactic validation, minGas=0 as we check the actual mingas before we add it - if err := m.Message.ValidForBlockInclusion(0); err != nil { + if err := m.Message.ValidForBlockInclusion(0, build.NewestNetworkVersion); err != nil { return xerrors.Errorf("message not valid for block inclusion: %w", err) } @@ -566,7 +721,10 @@ func (mp *MessagePool) checkMessage(m *types.SignedMessage) error { return nil } -func (mp *MessagePool) Add(m *types.SignedMessage) error { +func (mp *MessagePool) Add(ctx context.Context, m *types.SignedMessage) error { + done := metrics.Timer(ctx, metrics.MpoolAddDuration) + defer done() + err := mp.checkMessage(m) if err != nil { return err @@ -581,7 +739,7 @@ func (mp *MessagePool) Add(m *types.SignedMessage) error { mp.curTsLk.Lock() defer mp.curTsLk.Unlock() - _, err = mp.addTs(m, mp.curTs, false, false) + _, err = mp.addTs(ctx, m, mp.curTs, false, false) return err } @@ -621,8 +779,8 @@ func (mp *MessagePool) VerifyMsgSig(m *types.SignedMessage) error { return nil } -func (mp *MessagePool) checkBalance(m *types.SignedMessage, curTs *types.TipSet) error { - balance, err := mp.getStateBalance(m.Message.From, curTs) +func (mp *MessagePool) checkBalance(ctx context.Context, m *types.SignedMessage, curTs *types.TipSet) error { + balance, err := mp.getStateBalance(ctx, m.Message.From, curTs) if err != nil { return xerrors.Errorf("failed to check sender balance: %s: %w", err, ErrSoftValidationFailure) } @@ -635,7 +793,12 @@ func (mp *MessagePool) checkBalance(m *types.SignedMessage, curTs *types.TipSet) // add Value for soft failure check //requiredFunds = types.BigAdd(requiredFunds, m.Message.Value) - mset, ok := mp.pending[m.Message.From] + mset, ok, err := mp.getPendingMset(ctx, m.Message.From) + if err != nil { + log.Debugf("mpoolcheckbalance failed to get pending mset: %s", err) + return err + } + if ok { requiredFunds = types.BigAdd(requiredFunds, mset.getRequiredFunds(m.Message.Nonce)) } @@ -649,8 +812,11 @@ func (mp *MessagePool) checkBalance(m *types.SignedMessage, curTs *types.TipSet) return nil } -func (mp *MessagePool) addTs(m *types.SignedMessage, curTs *types.TipSet, local, untrusted bool) (bool, error) { - snonce, err := mp.getStateNonce(m.Message.From, curTs) +func (mp *MessagePool) addTs(ctx context.Context, m *types.SignedMessage, curTs *types.TipSet, local, untrusted bool) (bool, error) { + done := metrics.Timer(ctx, metrics.MpoolAddTsDuration) + defer done() + + snonce, err := mp.getStateNonce(ctx, m.Message.From, curTs) if err != nil { return false, xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure) } @@ -667,17 +833,17 @@ func (mp *MessagePool) addTs(m *types.SignedMessage, curTs *types.TipSet, local, return false, err } - if err := mp.checkBalance(m, curTs); err != nil { + if err := mp.checkBalance(ctx, m, curTs); err != nil { return false, err } - err = mp.addLocked(m, !local, untrusted) + err = mp.addLocked(ctx, m, !local, untrusted) if err != nil { return false, err } if local { - err = mp.addLocal(m) + err = mp.addLocal(ctx, m) if err != nil { return false, xerrors.Errorf("error persisting local message: %w", err) } @@ -686,7 +852,7 @@ func (mp *MessagePool) addTs(m *types.SignedMessage, curTs *types.TipSet, local, return publish, nil } -func (mp *MessagePool) addLoaded(m *types.SignedMessage) error { +func (mp *MessagePool) addLoaded(ctx context.Context, m *types.SignedMessage) error { err := mp.checkMessage(m) if err != nil { return err @@ -698,7 +864,7 @@ func (mp *MessagePool) addLoaded(m *types.SignedMessage) error { return xerrors.Errorf("current tipset not loaded") } - snonce, err := mp.getStateNonce(m.Message.From, curTs) + snonce, err := mp.getStateNonce(ctx, m.Message.From, curTs) if err != nil { return xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure) } @@ -712,21 +878,21 @@ func (mp *MessagePool) addLoaded(m *types.SignedMessage) error { return err } - if err := mp.checkBalance(m, curTs); err != nil { + if err := mp.checkBalance(ctx, m, curTs); err != nil { return err } - return mp.addLocked(m, false, false) + return mp.addLocked(ctx, m, false, false) } -func (mp *MessagePool) addSkipChecks(m *types.SignedMessage) error { +func (mp *MessagePool) addSkipChecks(ctx context.Context, m *types.SignedMessage) error { mp.lk.Lock() defer mp.lk.Unlock() - return mp.addLocked(m, false, false) + return mp.addLocked(ctx, m, false, false) } -func (mp *MessagePool) addLocked(m *types.SignedMessage, strict, untrusted bool) error { +func (mp *MessagePool) addLocked(ctx context.Context, m *types.SignedMessage, strict, untrusted bool) error { log.Debugf("mpooladd: %s %d", m.Message.From, m.Message.Nonce) if m.Signature.Type == crypto.SigTypeBLS { mp.blsSigCache.Add(m.Cid(), m.Signature) @@ -742,15 +908,23 @@ func (mp *MessagePool) addLocked(m *types.SignedMessage, strict, untrusted bool) return err } - mset, ok := mp.pending[m.Message.From] + // Note: If performance becomes an issue, making this getOrCreatePendingMset will save some work + mset, ok, err := mp.getPendingMset(ctx, m.Message.From) + if err != nil { + log.Debug(err) + return err + } + if !ok { - nonce, err := mp.getStateNonce(m.Message.From, mp.curTs) + nonce, err := mp.getStateNonce(ctx, m.Message.From, mp.curTs) if err != nil { return xerrors.Errorf("failed to get initial actor nonce: %w", err) } mset = newMsgSet(nonce) - mp.pending[m.Message.From] = mset + if err = mp.setPendingMset(ctx, m.Message.From, mset); err != nil { + return xerrors.Errorf("failed to set pending mset: %w", err) + } } incr, err := mset.add(m, mp, strict, untrusted) @@ -761,7 +935,7 @@ func (mp *MessagePool) addLocked(m *types.SignedMessage, strict, untrusted bool) if incr { mp.currentSize++ - if mp.currentSize > mp.cfg.SizeLimitHigh { + if mp.currentSize > mp.getConfig().SizeLimitHigh { // send signal to prune messages if it hasnt already been sent select { case mp.pruneTrigger <- struct{}{}: @@ -785,23 +959,35 @@ func (mp *MessagePool) addLocked(m *types.SignedMessage, strict, untrusted bool) return nil } -func (mp *MessagePool) GetNonce(addr address.Address) (uint64, error) { +func (mp *MessagePool) GetNonce(ctx context.Context, addr address.Address, _ types.TipSetKey) (uint64, error) { mp.curTsLk.Lock() defer mp.curTsLk.Unlock() mp.lk.Lock() defer mp.lk.Unlock() - return mp.getNonceLocked(addr, mp.curTs) + return mp.getNonceLocked(ctx, addr, mp.curTs) +} + +// GetActor should not be used. It is only here to satisfy interface mess caused by lite node handling +func (mp *MessagePool) GetActor(_ context.Context, addr address.Address, _ types.TipSetKey) (*types.Actor, error) { + mp.curTsLk.Lock() + defer mp.curTsLk.Unlock() + return mp.api.GetActorAfter(addr, mp.curTs) } -func (mp *MessagePool) getNonceLocked(addr address.Address, curTs *types.TipSet) (uint64, error) { - stateNonce, err := mp.getStateNonce(addr, curTs) // sanity check +func (mp *MessagePool) getNonceLocked(ctx context.Context, addr address.Address, curTs *types.TipSet) (uint64, error) { + stateNonce, err := mp.getStateNonce(ctx, addr, curTs) // sanity check if err != nil { return 0, err } - mset, ok := mp.pending[addr] + mset, ok, err := mp.getPendingMset(ctx, addr) + if err != nil { + log.Debugf("mpoolgetnonce failed to get mset: %s", err) + return 0, err + } + if ok { if stateNonce > mset.nextNonce { log.Errorf("state nonce was larger than mset.nextNonce (%d > %d)", stateNonce, mset.nextNonce) @@ -815,8 +1001,11 @@ func (mp *MessagePool) getNonceLocked(addr address.Address, curTs *types.TipSet) return stateNonce, nil } -func (mp *MessagePool) getStateNonce(addr address.Address, curTs *types.TipSet) (uint64, error) { - act, err := mp.api.GetActorAfter(addr, curTs) +func (mp *MessagePool) getStateNonce(ctx context.Context, addr address.Address, ts *types.TipSet) (uint64, error) { + done := metrics.Timer(ctx, metrics.MpoolGetNonceDuration) + defer done() + + act, err := mp.api.GetActorAfter(addr, ts) if err != nil { return 0, err } @@ -824,7 +1013,10 @@ func (mp *MessagePool) getStateNonce(addr address.Address, curTs *types.TipSet) return act.Nonce, nil } -func (mp *MessagePool) getStateBalance(addr address.Address, ts *types.TipSet) (types.BigInt, error) { +func (mp *MessagePool) getStateBalance(ctx context.Context, addr address.Address, ts *types.TipSet) (types.BigInt, error) { + done := metrics.Timer(ctx, metrics.MpoolGetBalanceDuration) + defer done() + act, err := mp.api.GetActorAfter(addr, ts) if err != nil { return types.EmptyInt, err @@ -838,7 +1030,7 @@ func (mp *MessagePool) getStateBalance(addr address.Address, ts *types.TipSet) ( // - strict checks are enabled // - extra strict add checks are used when adding the messages to the msgSet // that means: no nonce gaps, at most 10 pending messages for the actor -func (mp *MessagePool) PushUntrusted(m *types.SignedMessage) (cid.Cid, error) { +func (mp *MessagePool) PushUntrusted(ctx context.Context, m *types.SignedMessage) (cid.Cid, error) { err := mp.checkMessage(m) if err != nil { return cid.Undef, err @@ -851,7 +1043,7 @@ func (mp *MessagePool) PushUntrusted(m *types.SignedMessage) (cid.Cid, error) { }() mp.curTsLk.Lock() - publish, err := mp.addTs(m, mp.curTs, false, true) + publish, err := mp.addTs(ctx, m, mp.curTs, true, true) if err != nil { mp.curTsLk.Unlock() return cid.Undef, err @@ -873,15 +1065,20 @@ func (mp *MessagePool) PushUntrusted(m *types.SignedMessage) (cid.Cid, error) { return m.Cid(), nil } -func (mp *MessagePool) Remove(from address.Address, nonce uint64, applied bool) { +func (mp *MessagePool) Remove(ctx context.Context, from address.Address, nonce uint64, applied bool) { mp.lk.Lock() defer mp.lk.Unlock() - mp.remove(from, nonce, applied) + mp.remove(ctx, from, nonce, applied) } -func (mp *MessagePool) remove(from address.Address, nonce uint64, applied bool) { - mset, ok := mp.pending[from] +func (mp *MessagePool) remove(ctx context.Context, from address.Address, nonce uint64, applied bool) { + mset, ok, err := mp.getPendingMset(ctx, from) + if err != nil { + log.Debugf("mpoolremove failed to get mset: %s", err) + return + } + if !ok { return } @@ -906,58 +1103,57 @@ func (mp *MessagePool) remove(from address.Address, nonce uint64, applied bool) mset.rm(nonce, applied) if len(mset.msgs) == 0 { - delete(mp.pending, from) + if err = mp.deletePendingMset(ctx, from); err != nil { + log.Debugf("mpoolremove failed to delete mset: %s", err) + return + } } } -func (mp *MessagePool) Pending() ([]*types.SignedMessage, *types.TipSet) { +func (mp *MessagePool) Pending(ctx context.Context) ([]*types.SignedMessage, *types.TipSet) { mp.curTsLk.Lock() defer mp.curTsLk.Unlock() mp.lk.Lock() defer mp.lk.Unlock() - return mp.allPending() + return mp.allPending(ctx) } -func (mp *MessagePool) allPending() ([]*types.SignedMessage, *types.TipSet) { +func (mp *MessagePool) allPending(ctx context.Context) ([]*types.SignedMessage, *types.TipSet) { out := make([]*types.SignedMessage, 0) - for a := range mp.pending { - out = append(out, mp.pendingFor(a)...) - } + + mp.forEachPending(func(a address.Address, mset *msgSet) { + out = append(out, mset.toSlice()...) + }) return out, mp.curTs } -func (mp *MessagePool) PendingFor(a address.Address) ([]*types.SignedMessage, *types.TipSet) { +func (mp *MessagePool) PendingFor(ctx context.Context, a address.Address) ([]*types.SignedMessage, *types.TipSet) { mp.curTsLk.Lock() defer mp.curTsLk.Unlock() mp.lk.Lock() defer mp.lk.Unlock() - return mp.pendingFor(a), mp.curTs + return mp.pendingFor(ctx, a), mp.curTs } -func (mp *MessagePool) pendingFor(a address.Address) []*types.SignedMessage { - mset := mp.pending[a] - if mset == nil || len(mset.msgs) == 0 { +func (mp *MessagePool) pendingFor(ctx context.Context, a address.Address) []*types.SignedMessage { + mset, ok, err := mp.getPendingMset(ctx, a) + if err != nil { + log.Debugf("mpoolpendingfor failed to get mset: %s", err) return nil } - set := make([]*types.SignedMessage, 0, len(mset.msgs)) - - for _, m := range mset.msgs { - set = append(set, m) + if mset == nil || !ok || len(mset.msgs) == 0 { + return nil } - sort.Slice(set, func(i, j int) bool { - return set[i].Message.Nonce < set[j].Message.Nonce - }) - - return set + return mset.toSlice() } -func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet) error { +func (mp *MessagePool) HeadChange(ctx context.Context, revert []*types.TipSet, apply []*types.TipSet) error { mp.curTsLk.Lock() defer mp.curTsLk.Unlock() @@ -974,7 +1170,7 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet) rm := func(from address.Address, nonce uint64) { s, ok := rmsgs[from] if !ok { - mp.Remove(from, nonce, true) + mp.Remove(ctx, from, nonce, true) return } @@ -983,7 +1179,7 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet) return } - mp.Remove(from, nonce, true) + mp.Remove(ctx, from, nonce, true) } maybeRepub := func(cid cid.Cid) { @@ -1054,7 +1250,7 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet) for _, s := range rmsgs { for _, msg := range s { - if err := mp.addSkipChecks(msg); err != nil { + if err := mp.addSkipChecks(ctx, msg); err != nil { log.Errorf("Failed to readd message from reorg to mpool: %s", err) } } @@ -1062,7 +1258,7 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet) if len(revert) > 0 && futureDebug { mp.lk.Lock() - msgs, ts := mp.allPending() + msgs, ts := mp.allPending(ctx) mp.lk.Unlock() buckets := map[address.Address]*statBucket{} @@ -1215,7 +1411,7 @@ func (mp *MessagePool) MessagesForBlocks(blks []*types.BlockHeader) ([]*types.Si if smsg != nil { out = append(out, smsg) } else { - log.Warnf("could not recover signature for bls message %s", msg.Cid()) + log.Debugf("could not recover signature for bls message %s", msg.Cid()) } } } @@ -1269,7 +1465,7 @@ func (mp *MessagePool) Updates(ctx context.Context) (<-chan api.MpoolUpdate, err return out, nil } -func (mp *MessagePool) loadLocal() error { +func (mp *MessagePool) loadLocal(ctx context.Context) error { res, err := mp.localMsgs.Query(query.Query{}) if err != nil { return xerrors.Errorf("query local messages: %w", err) @@ -1285,7 +1481,7 @@ func (mp *MessagePool) loadLocal() error { return xerrors.Errorf("unmarshaling local message: %w", err) } - if err := mp.addLoaded(&sm); err != nil { + if err := mp.addLoaded(ctx, &sm); err != nil { if xerrors.Is(err, ErrNonceTooLow) { continue // todo: drop the message from local cache (if above certain confidence threshold) } @@ -1293,47 +1489,61 @@ func (mp *MessagePool) loadLocal() error { log.Errorf("adding local message: %+v", err) } - mp.localAddrs[sm.Message.From] = struct{}{} + if err = mp.setLocal(ctx, sm.Message.From); err != nil { + log.Debugf("mpoolloadLocal errored: %s", err) + return err + } } return nil } -func (mp *MessagePool) Clear(local bool) { +func (mp *MessagePool) Clear(ctx context.Context, local bool) { mp.lk.Lock() defer mp.lk.Unlock() // remove everything if local is true, including removing local messages from // the datastore if local { - for a := range mp.localAddrs { - mset, ok := mp.pending[a] - if !ok { - continue + mp.forEachLocal(ctx, func(ctx context.Context, la address.Address) { + mset, ok, err := mp.getPendingMset(ctx, la) + if err != nil { + log.Warnf("errored while getting pending mset: %w", err) + return } - for _, m := range mset.msgs { - err := mp.localMsgs.Delete(datastore.NewKey(string(m.Cid().Bytes()))) - if err != nil { - log.Warnf("error deleting local message: %s", err) + if ok { + for _, m := range mset.msgs { + err := mp.localMsgs.Delete(datastore.NewKey(string(m.Cid().Bytes()))) + if err != nil { + log.Warnf("error deleting local message: %s", err) + } } } - } + }) - mp.pending = make(map[address.Address]*msgSet) + mp.clearPending() mp.republished = nil return } - // remove everything except the local messages - for a := range mp.pending { - _, isLocal := mp.localAddrs[a] + mp.forEachPending(func(a address.Address, ms *msgSet) { + isLocal, err := mp.isLocal(ctx, a) + if err != nil { + log.Warnf("errored while determining isLocal: %w", err) + return + } + if isLocal { - continue + return } - delete(mp.pending, a) - } + + if err = mp.deletePendingMset(ctx, a); err != nil { + log.Warnf("errored while deleting mset: %w", err) + return + } + }) } func getBaseFeeLowerBound(baseFee, factor types.BigInt) types.BigInt { diff --git a/chain/messagepool/messagepool_test.go b/chain/messagepool/messagepool_test.go index e31df936c8f..2ea8fdec054 100644 --- a/chain/messagepool/messagepool_test.go +++ b/chain/messagepool/messagepool_test.go @@ -14,12 +14,14 @@ import ( builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/messagepool/gasguess" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/mock" "github.com/filecoin-project/lotus/chain/wallet" _ "github.com/filecoin-project/lotus/lib/sigs/bls" _ "github.com/filecoin-project/lotus/lib/sigs/secp" + "github.com/stretchr/testify/assert" ) func init() { @@ -104,6 +106,10 @@ func (tma *testMpoolAPI) PutMessage(m types.ChainMsg) (cid.Cid, error) { return cid.Undef, nil } +func (tma *testMpoolAPI) IsLite() bool { + return false +} + func (tma *testMpoolAPI) PubSubPublish(string, []byte) error { tma.published++ return nil @@ -150,7 +156,7 @@ func (tma *testMpoolAPI) GetActorAfter(addr address.Address, ts *types.TipSet) ( }, nil } -func (tma *testMpoolAPI) StateAccountKey(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { +func (tma *testMpoolAPI) StateAccountKeyAtFinality(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { if addr.Protocol() != address.BLS && addr.Protocol() != address.SECP256K1 { return address.Undef, fmt.Errorf("given address was not a key addr") } @@ -199,7 +205,7 @@ func (tma *testMpoolAPI) ChainComputeBaseFee(ctx context.Context, ts *types.TipS func assertNonce(t *testing.T, mp *MessagePool, addr address.Address, val uint64) { t.Helper() - n, err := mp.GetNonce(addr) + n, err := mp.GetNonce(context.TODO(), addr, types.EmptyTSK) if err != nil { t.Fatal(err) } @@ -211,7 +217,7 @@ func assertNonce(t *testing.T, mp *MessagePool, addr address.Address, val uint64 func mustAdd(t *testing.T, mp *MessagePool, msg *types.SignedMessage) { t.Helper() - if err := mp.Add(msg); err != nil { + if err := mp.Add(context.TODO(), msg); err != nil { t.Fatal(err) } } @@ -257,6 +263,72 @@ func TestMessagePool(t *testing.T) { assertNonce(t, mp, sender, 2) } +func TestCheckMessageBig(t *testing.T) { + tma := newTestMpoolAPI() + + w, err := wallet.NewWallet(wallet.NewMemKeyStore()) + assert.NoError(t, err) + + from, err := w.WalletNew(context.Background(), types.KTBLS) + assert.NoError(t, err) + + tma.setBalance(from, 1000e9) + + ds := datastore.NewMapDatastore() + + mp, err := New(tma, ds, "mptest", nil) + assert.NoError(t, err) + + to := mock.Address(1001) + + { + msg := &types.Message{ + To: to, + From: from, + Value: types.NewInt(1), + Nonce: 0, + GasLimit: 50000000, + GasFeeCap: types.NewInt(100), + GasPremium: types.NewInt(1), + Params: make([]byte, 41<<10), // 41KiB payload + } + + sig, err := w.WalletSign(context.TODO(), from, msg.Cid().Bytes(), api.MsgMeta{}) + if err != nil { + panic(err) + } + sm := &types.SignedMessage{ + Message: *msg, + Signature: *sig, + } + mustAdd(t, mp, sm) + } + + { + msg := &types.Message{ + To: to, + From: from, + Value: types.NewInt(1), + Nonce: 0, + GasLimit: 50000000, + GasFeeCap: types.NewInt(100), + GasPremium: types.NewInt(1), + Params: make([]byte, 64<<10), // 64KiB payload + } + + sig, err := w.WalletSign(context.TODO(), from, msg.Cid().Bytes(), api.MsgMeta{}) + if err != nil { + panic(err) + } + sm := &types.SignedMessage{ + Message: *msg, + Signature: *sig, + } + err = mp.Add(context.TODO(), sm) + assert.ErrorIs(t, err, ErrMessageTooBig) + } +} + func TestMessagePoolMessagesInEachBlock(t *testing.T) { tma := newTestMpoolAPI() @@ -293,9 +365,9 @@ func TestMessagePoolMessagesInEachBlock(t *testing.T) { tma.applyBlock(t, a) tsa := mock.TipSet(a) - _, _ = mp.Pending() + _, _ = mp.Pending(context.TODO()) - selm, _ := mp.SelectMessages(tsa, 1) + selm, _ := mp.SelectMessages(context.Background(), tsa, 1) if len(selm) == 0 { t.Fatal("should have returned the rest of the messages") } @@ -355,7 +427,7 @@ func TestRevertMessages(t *testing.T) { assertNonce(t, mp, sender, 4) - p, _ := mp.Pending() + p, _ := mp.Pending(context.TODO()) fmt.Printf("%+v\n", p) if len(p) != 3 { t.Fatal("expected three messages in mempool") @@ -396,14 +468,14 @@ func TestPruningSimple(t *testing.T) { for i := 0; i < 5; i++ { smsg := mock.MkMessage(sender, target, uint64(i), w) - if err := mp.Add(smsg); err != nil { + if err := mp.Add(context.TODO(), smsg); err != nil { t.Fatal(err) } } for i := 10; i < 50; i++ { smsg := mock.MkMessage(sender, target, uint64(i), w) - if err := mp.Add(smsg); err != nil { + if err := mp.Add(context.TODO(), smsg); err != nil { t.Fatal(err) } } @@ -413,7 +485,7 @@ func TestPruningSimple(t *testing.T) { mp.Prune() - msgs, _ := mp.Pending() + msgs, _ := mp.Pending(context.TODO()) if len(msgs) != 5 { t.Fatal("expected only 5 messages in pool, got: ", len(msgs)) } @@ -455,7 +527,7 @@ func TestLoadLocal(t *testing.T) { msgs := make(map[cid.Cid]struct{}) for i := 0; i < 10; i++ { m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) - cid, err := mp.Push(m) + cid, err := mp.Push(context.TODO(), m) if err != nil { t.Fatal(err) } @@ -471,7 +543,7 @@ func TestLoadLocal(t *testing.T) { t.Fatal(err) } - pmsgs, _ := mp.Pending() + pmsgs, _ := mp.Pending(context.TODO()) if len(msgs) != len(pmsgs) { t.Fatalf("expected %d messages, but got %d", len(msgs), len(pmsgs)) } @@ -526,7 +598,7 @@ func TestClearAll(t *testing.T) { gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] for i := 0; i < 10; i++ { m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) - _, err := mp.Push(m) + _, err := mp.Push(context.TODO(), m) if err != nil { t.Fatal(err) } @@ -537,9 +609,9 @@ func TestClearAll(t *testing.T) { mustAdd(t, mp, m) } - mp.Clear(true) + mp.Clear(context.Background(), true) - pending, _ := mp.Pending() + pending, _ := mp.Pending(context.TODO()) if len(pending) > 0 { t.Fatalf("cleared the mpool, but got %d pending messages", len(pending)) } @@ -581,7 +653,7 @@ func TestClearNonLocal(t *testing.T) { gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] for i := 0; i < 10; i++ { m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) - _, err := mp.Push(m) + _, err := mp.Push(context.TODO(), m) if err != nil { t.Fatal(err) } @@ -592,9 +664,9 @@ func TestClearNonLocal(t *testing.T) { mustAdd(t, mp, m) } - mp.Clear(false) + mp.Clear(context.Background(), false) - pending, _ := mp.Pending() + pending, _ := mp.Pending(context.TODO()) if len(pending) != 10 { t.Fatalf("expected 10 pending messages, but got %d instead", len(pending)) } @@ -651,7 +723,7 @@ func TestUpdates(t *testing.T) { for i := 0; i < 10; i++ { m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) - _, err := mp.Push(m) + _, err := mp.Push(context.TODO(), m) if err != nil { t.Fatal(err) } diff --git a/chain/messagepool/provider.go b/chain/messagepool/provider.go index 347e90044d5..0f904c52c49 100644 --- a/chain/messagepool/provider.go +++ b/chain/messagepool/provider.go @@ -2,39 +2,67 @@ package messagepool import ( "context" + "time" "github.com/ipfs/go-cid" pubsub "github.com/libp2p/go-libp2p-pubsub" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/chain/messagesigner" "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" ) +var ( + HeadChangeCoalesceMinDelay = 2 * time.Second + HeadChangeCoalesceMaxDelay = 6 * time.Second + HeadChangeCoalesceMergeInterval = time.Second +) + type Provider interface { SubscribeHeadChanges(func(rev, app []*types.TipSet) error) *types.TipSet PutMessage(m types.ChainMsg) (cid.Cid, error) PubSubPublish(string, []byte) error GetActorAfter(address.Address, *types.TipSet) (*types.Actor, error) - StateAccountKey(context.Context, address.Address, *types.TipSet) (address.Address, error) + StateAccountKeyAtFinality(context.Context, address.Address, *types.TipSet) (address.Address, error) MessagesForBlock(*types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) MessagesForTipset(*types.TipSet) ([]types.ChainMsg, error) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) ChainComputeBaseFee(ctx context.Context, ts *types.TipSet) (types.BigInt, error) + IsLite() bool } type mpoolProvider struct { sm *stmgr.StateManager ps *pubsub.PubSub + + lite messagesigner.MpoolNonceAPI } +var _ Provider = (*mpoolProvider)(nil) + func NewProvider(sm *stmgr.StateManager, ps *pubsub.PubSub) Provider { return &mpoolProvider{sm: sm, ps: ps} } +func NewProviderLite(sm *stmgr.StateManager, ps *pubsub.PubSub, noncer messagesigner.MpoolNonceAPI) Provider { + return &mpoolProvider{sm: sm, ps: ps, lite: noncer} +} + +func (mpp *mpoolProvider) IsLite() bool { + return mpp.lite != nil +} + func (mpp *mpoolProvider) SubscribeHeadChanges(cb func(rev, app []*types.TipSet) error) *types.TipSet { - mpp.sm.ChainStore().SubscribeHeadChanges(cb) + mpp.sm.ChainStore().SubscribeHeadChanges( + store.WrapHeadChangeCoalescer( + cb, + HeadChangeCoalesceMinDelay, + HeadChangeCoalesceMaxDelay, + HeadChangeCoalesceMergeInterval, + )) return mpp.sm.ChainStore().GetHeaviestTipSet() } @@ -47,6 +75,19 @@ func (mpp *mpoolProvider) PubSubPublish(k string, v []byte) error { } func (mpp *mpoolProvider) GetActorAfter(addr address.Address, ts *types.TipSet) (*types.Actor, error) { + if mpp.IsLite() { + n, err := mpp.lite.GetNonce(context.TODO(), addr, ts.Key()) + if err != nil { + return nil, xerrors.Errorf("getting nonce over lite: %w", err) + } + a, err := mpp.lite.GetActor(context.TODO(), addr, ts.Key()) + if err != nil { + return nil, xerrors.Errorf("getting actor over lite: %w", err) + } + a.Nonce = n + return a, nil + } + stcid, _, err := mpp.sm.TipSetState(context.TODO(), ts) if err != nil { return nil, xerrors.Errorf("computing tipset state for GetActor: %w", err) @@ -58,8 +99,8 @@ func (mpp *mpoolProvider) GetActorAfter(addr address.Address, ts *types.TipSet) return st.GetActor(addr) } -func (mpp *mpoolProvider) StateAccountKey(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { - return mpp.sm.ResolveToKeyAddress(ctx, addr, ts) +func (mpp *mpoolProvider) StateAccountKeyAtFinality(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { + return mpp.sm.ResolveToKeyAddressAtFinality(ctx, addr, ts) } func (mpp *mpoolProvider) MessagesForBlock(h *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { diff --git a/chain/messagepool/pruning.go b/chain/messagepool/pruning.go index d0e53795ad6..c10239b8e4a 100644 --- a/chain/messagepool/pruning.go +++ b/chain/messagepool/pruning.go @@ -19,7 +19,8 @@ func (mp *MessagePool) pruneExcessMessages() error { mp.lk.Lock() defer mp.lk.Unlock() - if mp.currentSize < mp.cfg.SizeLimitHigh { + mpCfg := mp.getConfig() + if mp.currentSize < mpCfg.SizeLimitHigh { return nil } @@ -27,7 +28,7 @@ func (mp *MessagePool) pruneExcessMessages() error { case <-mp.pruneCooldown: err := mp.pruneMessages(context.TODO(), ts) go func() { - time.Sleep(mp.cfg.PruneCooldown) + time.Sleep(mpCfg.PruneCooldown) mp.pruneCooldown <- struct{}{} }() return err @@ -53,15 +54,21 @@ func (mp *MessagePool) pruneMessages(ctx context.Context, ts *types.TipSet) erro // protected actors -- not pruned protected := make(map[address.Address]struct{}) + mpCfg := mp.getConfig() // we never prune priority addresses - for _, actor := range mp.cfg.PriorityAddrs { - protected[actor] = struct{}{} + for _, actor := range mpCfg.PriorityAddrs { + pk, err := mp.resolveToKey(ctx, actor) + if err != nil { + log.Debugf("pruneMessages failed to resolve priority address: %s", err) + } + + protected[pk] = struct{}{} } // we also never prune locally published messages - for actor := range mp.localAddrs { + mp.forEachLocal(ctx, func(ctx context.Context, actor address.Address) { protected[actor] = struct{}{} - } + }) // Collect all messages to track which ones to remove and create chains for block inclusion pruneMsgs := make(map[cid.Cid]*types.SignedMessage, mp.currentSize) @@ -90,7 +97,7 @@ func (mp *MessagePool) pruneMessages(ctx context.Context, ts *types.TipSet) erro }) // Keep messages (remove them from pruneMsgs) from chains while we are under the low water mark - loWaterMark := mp.cfg.SizeLimitLow + loWaterMark := mpCfg.SizeLimitLow keepLoop: for _, chain := range chains { for _, m := range chain.msgs { @@ -106,7 +113,7 @@ keepLoop: // and remove all messages that are still in pruneMsgs after processing the chains log.Infof("Pruning %d messages", len(pruneMsgs)) for _, m := range pruneMsgs { - mp.remove(m.Message.From, m.Message.Nonce, false) + mp.remove(ctx, m.Message.From, m.Message.Nonce, false) } return nil diff --git a/chain/messagepool/repub.go b/chain/messagepool/repub.go index cdd169e1d38..4323bdee197 100644 --- a/chain/messagepool/repub.go +++ b/chain/messagepool/repub.go @@ -18,7 +18,7 @@ const repubMsgLimit = 30 var RepublishBatchDelay = 100 * time.Millisecond -func (mp *MessagePool) republishPendingMessages() error { +func (mp *MessagePool) republishPendingMessages(ctx context.Context) error { mp.curTsLk.Lock() ts := mp.curTs @@ -32,13 +32,18 @@ func (mp *MessagePool) republishPendingMessages() error { pending := make(map[address.Address]map[uint64]*types.SignedMessage) mp.lk.Lock() mp.republished = nil // clear this to avoid races triggering an early republish - for actor := range mp.localAddrs { - mset, ok := mp.pending[actor] + mp.forEachLocal(ctx, func(ctx context.Context, actor address.Address) { + mset, ok, err := mp.getPendingMset(ctx, actor) + if err != nil { + log.Debugf("failed to get mset: %w", err) + return + } + if !ok { - continue + return } if len(mset.msgs) == 0 { - continue + return } // we need to copy this while holding the lock to avoid races with concurrent modification pend := make(map[uint64]*types.SignedMessage, len(mset.msgs)) @@ -46,7 +51,8 @@ func (mp *MessagePool) republishPendingMessages() error { pend[nonce] = m } pending[actor] = pend - } + }) + mp.lk.Unlock() mp.curTsLk.Unlock() @@ -100,7 +106,7 @@ loop: // check the baseFee lower bound -- only republish messages that can be included in the chain // within the next 20 blocks. for _, m := range chain.msgs { - if !allowNegativeChains(ts.Height()) && m.Message.GasFeeCap.LessThan(baseFeeLowerBound) { + if m.Message.GasFeeCap.LessThan(baseFeeLowerBound) { chain.Invalidate() continue loop } @@ -115,7 +121,7 @@ loop: // we can't fit the current chain but there is gas to spare // trim it and push it down - chain.Trim(gasLimit, mp, baseFee, true) + chain.Trim(gasLimit, mp, baseFee) for j := i; j < len(chains)-1; j++ { if chains[j].Before(chains[j+1]) { break diff --git a/chain/messagepool/repub_test.go b/chain/messagepool/repub_test.go index 8da64f97493..580231f7af5 100644 --- a/chain/messagepool/repub_test.go +++ b/chain/messagepool/repub_test.go @@ -56,7 +56,7 @@ func TestRepubMessages(t *testing.T) { for i := 0; i < 10; i++ { m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) - _, err := mp.Push(m) + _, err := mp.Push(context.TODO(), m) if err != nil { t.Fatal(err) } diff --git a/chain/messagepool/selection.go b/chain/messagepool/selection.go index 4ade92a799c..611ab8e5fcd 100644 --- a/chain/messagepool/selection.go +++ b/chain/messagepool/selection.go @@ -10,7 +10,6 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" tbig "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/lotus/build" @@ -21,11 +20,7 @@ import ( var bigBlockGasLimit = big.NewInt(build.BlockGasLimit) -// this is *temporary* mutilation until we have implemented uncapped miner penalties -- it will go -// away in the next fork. -func allowNegativeChains(epoch abi.ChainEpoch) bool { - return epoch < build.UpgradeBreezeHeight+5 -} +var MaxBlockMessages = 16000 const MaxBlocks = 15 @@ -43,7 +38,7 @@ type msgChain struct { prev *msgChain } -func (mp *MessagePool) SelectMessages(ts *types.TipSet, tq float64) ([]*types.SignedMessage, error) { +func (mp *MessagePool) SelectMessages(ctx context.Context, ts *types.TipSet, tq float64) (msgs []*types.SignedMessage, err error) { mp.curTsLk.Lock() defer mp.curTsLk.Unlock() @@ -54,13 +49,23 @@ func (mp *MessagePool) SelectMessages(ts *types.TipSet, tq float64) ([]*types.Si // than any other block, then we don't bother with optimal selection because the // first block will always have higher effective performance if tq > 0.84 { - return mp.selectMessagesGreedy(mp.curTs, ts) + msgs, err = mp.selectMessagesGreedy(ctx, mp.curTs, ts) + } else { + msgs, err = mp.selectMessagesOptimal(ctx, mp.curTs, ts, tq) + } + + if err != nil { + return nil, err + } + + if len(msgs) > MaxBlockMessages { + msgs = msgs[:MaxBlockMessages] } - return mp.selectMessagesOptimal(mp.curTs, ts, tq) + return msgs, nil } -func (mp *MessagePool) selectMessagesOptimal(curTs, ts *types.TipSet, tq float64) ([]*types.SignedMessage, error) { +func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *types.TipSet, tq float64) ([]*types.SignedMessage, error) { start := time.Now() baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts) @@ -86,7 +91,7 @@ func (mp *MessagePool) selectMessagesOptimal(curTs, ts *types.TipSet, tq float64 // 0b. Select all priority messages that fit in the block minGas := int64(gasguess.MinGas) - result, gasLimit := mp.selectPriorityMessages(pending, baseFee, ts) + result, gasLimit := mp.selectPriorityMessages(ctx, pending, baseFee, ts) // have we filled the block? if gasLimit < minGas { @@ -109,7 +114,7 @@ func (mp *MessagePool) selectMessagesOptimal(curTs, ts *types.TipSet, tq float64 return chains[i].Before(chains[j]) }) - if !allowNegativeChains(curTs.Height()) && len(chains) != 0 && chains[0].gasPerf < 0 { + if len(chains) != 0 && chains[0].gasPerf < 0 { log.Warnw("all messages in mpool have non-positive gas performance", "bestGasPerf", chains[0].gasPerf) return result, nil } @@ -162,7 +167,7 @@ func (mp *MessagePool) selectMessagesOptimal(curTs, ts *types.TipSet, tq float64 last := len(chains) for i, chain := range chains { // did we run out of performing chains? - if !allowNegativeChains(curTs.Height()) && chain.gasPerf < 0 { + if chain.gasPerf < 0 { break } @@ -228,7 +233,7 @@ tailLoop: for gasLimit >= minGas && last < len(chains) { // trim if necessary if chains[last].gasLimit > gasLimit { - chains[last].Trim(gasLimit, mp, baseFee, allowNegativeChains(curTs.Height())) + chains[last].Trim(gasLimit, mp, baseFee) } // push down if it hasn't been invalidated @@ -254,7 +259,7 @@ tailLoop: } // if gasPerf < 0 we have no more profitable chains - if !allowNegativeChains(curTs.Height()) && chain.gasPerf < 0 { + if chain.gasPerf < 0 { break tailLoop } @@ -295,7 +300,7 @@ tailLoop: } // dependencies fit, just trim it - chain.Trim(gasLimit-depGasLimit, mp, baseFee, allowNegativeChains(curTs.Height())) + chain.Trim(gasLimit-depGasLimit, mp, baseFee) last += i continue tailLoop } @@ -328,7 +333,7 @@ tailLoop: } // is it negative? - if !allowNegativeChains(curTs.Height()) && chain.gasPerf < 0 { + if chain.gasPerf < 0 { continue } @@ -350,7 +355,7 @@ tailLoop: // do they fit as is? if it doesn't, trim to make it fit if possible if chainGasLimit > gasLimit { - chain.Trim(gasLimit-depGasLimit, mp, baseFee, allowNegativeChains(curTs.Height())) + chain.Trim(gasLimit-depGasLimit, mp, baseFee) if !chain.valid { continue @@ -384,7 +389,7 @@ tailLoop: return result, nil } -func (mp *MessagePool) selectMessagesGreedy(curTs, ts *types.TipSet) ([]*types.SignedMessage, error) { +func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTs, ts *types.TipSet) ([]*types.SignedMessage, error) { start := time.Now() baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts) @@ -410,7 +415,7 @@ func (mp *MessagePool) selectMessagesGreedy(curTs, ts *types.TipSet) ([]*types.S // 0b. Select all priority messages that fit in the block minGas := int64(gasguess.MinGas) - result, gasLimit := mp.selectPriorityMessages(pending, baseFee, ts) + result, gasLimit := mp.selectPriorityMessages(ctx, pending, baseFee, ts) // have we filled the block? if gasLimit < minGas { @@ -433,7 +438,7 @@ func (mp *MessagePool) selectMessagesGreedy(curTs, ts *types.TipSet) ([]*types.S return chains[i].Before(chains[j]) }) - if !allowNegativeChains(curTs.Height()) && len(chains) != 0 && chains[0].gasPerf < 0 { + if len(chains) != 0 && chains[0].gasPerf < 0 { log.Warnw("all messages in mpool have non-positive gas performance", "bestGasPerf", chains[0].gasPerf) return result, nil } @@ -444,7 +449,7 @@ func (mp *MessagePool) selectMessagesGreedy(curTs, ts *types.TipSet) ([]*types.S last := len(chains) for i, chain := range chains { // did we run out of performing chains? - if !allowNegativeChains(curTs.Height()) && chain.gasPerf < 0 { + if chain.gasPerf < 0 { break } @@ -473,7 +478,7 @@ func (mp *MessagePool) selectMessagesGreedy(curTs, ts *types.TipSet) ([]*types.S tailLoop: for gasLimit >= minGas && last < len(chains) { // trim - chains[last].Trim(gasLimit, mp, baseFee, allowNegativeChains(curTs.Height())) + chains[last].Trim(gasLimit, mp, baseFee) // push down if it hasn't been invalidated if chains[last].valid { @@ -493,7 +498,7 @@ tailLoop: } // if gasPerf < 0 we have no more profitable chains - if !allowNegativeChains(curTs.Height()) && chain.gasPerf < 0 { + if chain.gasPerf < 0 { break tailLoop } @@ -520,26 +525,32 @@ tailLoop: return result, nil } -func (mp *MessagePool) selectPriorityMessages(pending map[address.Address]map[uint64]*types.SignedMessage, baseFee types.BigInt, ts *types.TipSet) ([]*types.SignedMessage, int64) { +func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[address.Address]map[uint64]*types.SignedMessage, baseFee types.BigInt, ts *types.TipSet) ([]*types.SignedMessage, int64) { start := time.Now() defer func() { if dt := time.Since(start); dt > time.Millisecond { log.Infow("select priority messages done", "took", dt) } }() - - result := make([]*types.SignedMessage, 0, mp.cfg.SizeLimitLow) + mpCfg := mp.getConfig() + result := make([]*types.SignedMessage, 0, mpCfg.SizeLimitLow) gasLimit := int64(build.BlockGasLimit) minGas := int64(gasguess.MinGas) // 1. Get priority actor chains var chains []*msgChain - priority := mp.cfg.PriorityAddrs + priority := mpCfg.PriorityAddrs for _, actor := range priority { - mset, ok := pending[actor] + pk, err := mp.resolveToKey(ctx, actor) + if err != nil { + log.Debugf("mpooladdlocal failed to resolve sender: %s", err) + return nil, gasLimit + } + + mset, ok := pending[pk] if ok { // remove actor from pending set as we are already processed these messages - delete(pending, actor) + delete(pending, pk) // create chains for the priority actor next := mp.createMessageChains(actor, mset, baseFee, ts) chains = append(chains, next...) @@ -555,7 +566,7 @@ func (mp *MessagePool) selectPriorityMessages(pending map[address.Address]map[ui return chains[i].Before(chains[j]) }) - if !allowNegativeChains(ts.Height()) && len(chains) != 0 && chains[0].gasPerf < 0 { + if len(chains) != 0 && chains[0].gasPerf < 0 { log.Warnw("all priority messages in mpool have negative gas performance", "bestGasPerf", chains[0].gasPerf) return nil, gasLimit } @@ -563,7 +574,7 @@ func (mp *MessagePool) selectPriorityMessages(pending map[address.Address]map[ui // 3. Merge chains until the block limit, as long as they have non-negative gas performance last := len(chains) for i, chain := range chains { - if !allowNegativeChains(ts.Height()) && chain.gasPerf < 0 { + if chain.gasPerf < 0 { break } @@ -581,7 +592,7 @@ func (mp *MessagePool) selectPriorityMessages(pending map[address.Address]map[ui tailLoop: for gasLimit >= minGas && last < len(chains) { // trim, discarding negative performing messages - chains[last].Trim(gasLimit, mp, baseFee, allowNegativeChains(ts.Height())) + chains[last].Trim(gasLimit, mp, baseFee) // push down if it hasn't been invalidated if chains[last].valid { @@ -601,7 +612,7 @@ tailLoop: } // if gasPerf < 0 we have no more profitable chains - if !allowNegativeChains(ts.Height()) && chain.gasPerf < 0 { + if chain.gasPerf < 0 { break tailLoop } @@ -641,8 +652,7 @@ func (mp *MessagePool) getPendingMessages(curTs, ts *types.TipSet) (map[address. inSync = true } - // first add our current pending messages - for a, mset := range mp.pending { + mp.forEachPending(func(a address.Address, mset *msgSet) { if inSync { // no need to copy the map result[a] = mset.msgs @@ -655,7 +665,7 @@ func (mp *MessagePool) getPendingMessages(curTs, ts *types.TipSet) (map[address. result[a] = msetCopy } - } + }) // we are in sync, that's the happy path if inSync { @@ -677,6 +687,10 @@ func (*MessagePool) getGasReward(msg *types.SignedMessage, baseFee types.BigInt) } gasReward := tbig.Mul(maxPremium, types.NewInt(uint64(msg.Message.GasLimit))) + if gasReward.Sign() == -1 { + // penalty multiplier + gasReward = tbig.Mul(gasReward, types.NewInt(3)) + } return gasReward.Int } @@ -710,7 +724,7 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6 // the balance a, err := mp.api.GetActorAfter(actor, ts) if err != nil { - log.Errorf("failed to load actor state, not building chain for %s: %w", actor, err) + log.Errorf("failed to load actor state, not building chain for %s: %v", actor, err) return nil } @@ -749,14 +763,11 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6 if balance.Cmp(required) < 0 { break } + balance = new(big.Int).Sub(balance, required) value := m.Message.Value.Int - if balance.Cmp(value) >= 0 { - // Note: we only account for the value if the balance doesn't drop below 0 - // otherwise the message will fail and the miner can reap the gas rewards - balance = new(big.Int).Sub(balance, value) - } + balance = new(big.Int).Sub(balance, value) gasReward := mp.getGasReward(m, baseFee) rewards = append(rewards, gasReward) @@ -859,9 +870,9 @@ func (mc *msgChain) Before(other *msgChain) bool { (mc.gasPerf == other.gasPerf && mc.gasReward.Cmp(other.gasReward) > 0) } -func (mc *msgChain) Trim(gasLimit int64, mp *MessagePool, baseFee types.BigInt, allowNegative bool) { +func (mc *msgChain) Trim(gasLimit int64, mp *MessagePool, baseFee types.BigInt) { i := len(mc.msgs) - 1 - for i >= 0 && (mc.gasLimit > gasLimit || (!allowNegative && mc.gasPerf < 0)) { + for i >= 0 && (mc.gasLimit > gasLimit || mc.gasPerf < 0) { gasReward := mp.getGasReward(mc.msgs[i], baseFee) mc.gasReward = new(big.Int).Sub(mc.gasReward, gasReward) mc.gasLimit -= mc.msgs[i].Message.GasLimit diff --git a/chain/messagepool/selection_test.go b/chain/messagepool/selection_test.go index 08cf286c8d8..4634732298f 100644 --- a/chain/messagepool/selection_test.go +++ b/chain/messagepool/selection_test.go @@ -16,7 +16,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" @@ -427,7 +427,7 @@ func TestBasicMessageSelection(t *testing.T) { mustAdd(t, mp, m) } - msgs, err := mp.SelectMessages(ts, 1.0) + msgs, err := mp.SelectMessages(context.Background(), ts, 1.0) if err != nil { t.Fatal(err) } @@ -464,7 +464,7 @@ func TestBasicMessageSelection(t *testing.T) { tma.applyBlock(t, block2) // we should have no pending messages in the mpool - pend, _ := mp.Pending() + pend, _ := mp.Pending(context.TODO()) if len(pend) != 0 { t.Fatalf("expected no pending messages, but got %d", len(pend)) } @@ -495,7 +495,7 @@ func TestBasicMessageSelection(t *testing.T) { tma.setStateNonce(a1, 10) tma.setStateNonce(a2, 10) - msgs, err = mp.SelectMessages(ts3, 1.0) + msgs, err = mp.SelectMessages(context.Background(), ts3, 1.0) if err != nil { t.Fatal(err) } @@ -569,7 +569,7 @@ func TestMessageSelectionTrimming(t *testing.T) { mustAdd(t, mp, m) } - msgs, err := mp.SelectMessages(ts, 1.0) + msgs, err := mp.SelectMessages(context.Background(), ts, 1.0) if err != nil { t.Fatal(err) } @@ -633,7 +633,7 @@ func TestPriorityMessageSelection(t *testing.T) { mustAdd(t, mp, m) } - msgs, err := mp.SelectMessages(ts, 1.0) + msgs, err := mp.SelectMessages(context.Background(), ts, 1.0) if err != nil { t.Fatal(err) } @@ -712,7 +712,7 @@ func TestPriorityMessageSelection2(t *testing.T) { mustAdd(t, mp, m) } - msgs, err := mp.SelectMessages(ts, 1.0) + msgs, err := mp.SelectMessages(context.Background(), ts, 1.0) if err != nil { t.Fatal(err) } @@ -736,8 +736,6 @@ func TestPriorityMessageSelection2(t *testing.T) { } func TestPriorityMessageSelection3(t *testing.T) { - t.Skip("reenable after removing allow negative") - mp, tma := makeTestMpool() // the actors @@ -784,7 +782,7 @@ func TestPriorityMessageSelection3(t *testing.T) { } // test greedy selection - msgs, err := mp.SelectMessages(ts, 1.0) + msgs, err := mp.SelectMessages(context.Background(), ts, 1.0) if err != nil { t.Fatal(err) } @@ -807,7 +805,7 @@ func TestPriorityMessageSelection3(t *testing.T) { } // test optimal selection - msgs, err = mp.SelectMessages(ts, 0.1) + msgs, err = mp.SelectMessages(context.Background(), ts, 0.1) if err != nil { t.Fatal(err) } @@ -874,7 +872,7 @@ func TestOptimalMessageSelection1(t *testing.T) { mustAdd(t, mp, m) } - msgs, err := mp.SelectMessages(ts, 0.25) + msgs, err := mp.SelectMessages(context.Background(), ts, 0.25) if err != nil { t.Fatal(err) } @@ -943,7 +941,7 @@ func TestOptimalMessageSelection2(t *testing.T) { mustAdd(t, mp, m) } - msgs, err := mp.SelectMessages(ts, 0.1) + msgs, err := mp.SelectMessages(context.Background(), ts, 0.1) if err != nil { t.Fatal(err) } @@ -1022,7 +1020,7 @@ func TestOptimalMessageSelection3(t *testing.T) { } } - msgs, err := mp.SelectMessages(ts, 0.1) + msgs, err := mp.SelectMessages(context.Background(), ts, 0.1) if err != nil { t.Fatal(err) } @@ -1110,7 +1108,7 @@ func testCompetitiveMessageSelection(t *testing.T, rng *rand.Rand, getPremium fu logging.SetLogLevel("messagepool", "error") // 1. greedy selection - greedyMsgs, err := mp.selectMessagesGreedy(ts, ts) + greedyMsgs, err := mp.selectMessagesGreedy(context.Background(), ts, ts) if err != nil { t.Fatal(err) } @@ -1139,7 +1137,7 @@ func testCompetitiveMessageSelection(t *testing.T, rng *rand.Rand, getPremium fu var bestMsgs []*types.SignedMessage for j := 0; j < nMiners; j++ { tq := rng.Float64() - msgs, err := mp.SelectMessages(ts, tq) + msgs, err := mp.SelectMessages(context.Background(), ts, tq) if err != nil { t.Fatal(err) } @@ -1241,6 +1239,9 @@ func TestCompetitiveMessageSelectionExp(t *testing.T) { } func TestCompetitiveMessageSelectionZipf(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } var capacityBoost, rewardBoost, tqReward float64 seeds := []int64{1947, 1976, 2020, 2100, 10000, 143324, 432432, 131, 32, 45} for _, seed := range seeds { @@ -1268,9 +1269,9 @@ func TestGasReward(t *testing.T) { GasReward int64 }{ {Premium: 100, FeeCap: 200, BaseFee: 100, GasReward: 100}, - {Premium: 100, FeeCap: 200, BaseFee: 210, GasReward: -10}, + {Premium: 100, FeeCap: 200, BaseFee: 210, GasReward: -10 * 3}, {Premium: 200, FeeCap: 250, BaseFee: 210, GasReward: 40}, - {Premium: 200, FeeCap: 250, BaseFee: 2000, GasReward: -1750}, + {Premium: 200, FeeCap: 250, BaseFee: 2000, GasReward: -1750 * 3}, } mp := new(MessagePool) @@ -1332,7 +1333,7 @@ readLoop: } actorMap := make(map[address.Address]address.Address) - actorWallets := make(map[address.Address]api.WalletAPI) + actorWallets := make(map[address.Address]api.Wallet) for _, m := range msgs { baseNonce := baseNonces[m.Message.From] @@ -1395,7 +1396,7 @@ readLoop: minGasLimit := int64(0.9 * float64(build.BlockGasLimit)) // greedy first - selected, err := mp.SelectMessages(ts, 1.0) + selected, err := mp.SelectMessages(context.Background(), ts, 1.0) if err != nil { t.Fatal(err) } @@ -1409,7 +1410,7 @@ readLoop: } // high quality ticket - selected, err = mp.SelectMessages(ts, .8) + selected, err = mp.SelectMessages(context.Background(), ts, .8) if err != nil { t.Fatal(err) } @@ -1423,7 +1424,7 @@ readLoop: } // mid quality ticket - selected, err = mp.SelectMessages(ts, .4) + selected, err = mp.SelectMessages(context.Background(), ts, .4) if err != nil { t.Fatal(err) } @@ -1437,7 +1438,7 @@ readLoop: } // low quality ticket - selected, err = mp.SelectMessages(ts, .1) + selected, err = mp.SelectMessages(context.Background(), ts, .1) if err != nil { t.Fatal(err) } @@ -1451,7 +1452,7 @@ readLoop: } // very low quality ticket - selected, err = mp.SelectMessages(ts, .01) + selected, err = mp.SelectMessages(context.Background(), ts, .01) if err != nil { t.Fatal(err) } diff --git a/chain/messagesigner/messagesigner.go b/chain/messagesigner/messagesigner.go index 9b8f86b6459..063d1aa7d1a 100644 --- a/chain/messagesigner/messagesigner.go +++ b/chain/messagesigner/messagesigner.go @@ -23,19 +23,20 @@ const dsKeyActorNonce = "ActorNextNonce" var log = logging.Logger("messagesigner") type MpoolNonceAPI interface { - GetNonce(address.Address) (uint64, error) + GetNonce(context.Context, address.Address, types.TipSetKey) (uint64, error) + GetActor(context.Context, address.Address, types.TipSetKey) (*types.Actor, error) } // MessageSigner keeps track of nonces per address, and increments the nonce // when signing a message type MessageSigner struct { - wallet api.WalletAPI + wallet api.Wallet lk sync.Mutex mpool MpoolNonceAPI ds datastore.Batching } -func NewMessageSigner(wallet api.WalletAPI, mpool MpoolNonceAPI, ds dtypes.MetadataDS) *MessageSigner { +func NewMessageSigner(wallet api.Wallet, mpool MpoolNonceAPI, ds dtypes.MetadataDS) *MessageSigner { ds = namespace.Wrap(ds, datastore.NewKey("/message-signer/")) return &MessageSigner{ wallet: wallet, @@ -51,7 +52,7 @@ func (ms *MessageSigner) SignMessage(ctx context.Context, msg *types.Message, cb defer ms.lk.Unlock() // Get the next message nonce - nonce, err := ms.nextNonce(msg.From) + nonce, err := ms.nextNonce(ctx, msg.From) if err != nil { return nil, xerrors.Errorf("failed to create nonce: %w", err) } @@ -92,12 +93,12 @@ func (ms *MessageSigner) SignMessage(ctx context.Context, msg *types.Message, cb // nextNonce gets the next nonce for the given address. // If there is no nonce in the datastore, gets the nonce from the message pool. -func (ms *MessageSigner) nextNonce(addr address.Address) (uint64, error) { +func (ms *MessageSigner) nextNonce(ctx context.Context, addr address.Address) (uint64, error) { // Nonces used to be created by the mempool and we need to support nodes // that have mempool nonces, so first check the mempool for a nonce for // this address. Note that the mempool returns the actor state's nonce // by default. - nonce, err := ms.mpool.GetNonce(addr) + nonce, err := ms.mpool.GetNonce(ctx, addr, types.EmptyTSK) if err != nil { return 0, xerrors.Errorf("failed to get nonce from mempool: %w", err) } diff --git a/chain/messagesigner/messagesigner_test.go b/chain/messagesigner/messagesigner_test.go index 5eebd36da5e..20d9af38bb1 100644 --- a/chain/messagesigner/messagesigner_test.go +++ b/chain/messagesigner/messagesigner_test.go @@ -24,6 +24,8 @@ type mockMpool struct { nonces map[address.Address]uint64 } +var _ MpoolNonceAPI = (*mockMpool)(nil) + func newMockMpool() *mockMpool { return &mockMpool{nonces: make(map[address.Address]uint64)} } @@ -35,12 +37,15 @@ func (mp *mockMpool) setNonce(addr address.Address, nonce uint64) { mp.nonces[addr] = nonce } -func (mp *mockMpool) GetNonce(addr address.Address) (uint64, error) { +func (mp *mockMpool) GetNonce(_ context.Context, addr address.Address, _ types.TipSetKey) (uint64, error) { mp.lk.RLock() defer mp.lk.RUnlock() return mp.nonces[addr], nil } +func (mp *mockMpool) GetActor(_ context.Context, addr address.Address, _ types.TipSetKey) (*types.Actor, error) { + panic("don't use it") +} func TestMessageSignerSignMessage(t *testing.T) { ctx := context.Background() diff --git a/chain/state/statetree.go b/chain/state/statetree.go index 7fa55b31c8d..8705aeff81b 100644 --- a/chain/state/statetree.go +++ b/chain/state/statetree.go @@ -14,12 +14,17 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/network" - "github.com/filecoin-project/lotus/chain/actors" init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" cbg "github.com/whyrusleeping/cbor-gen" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/types" + + states0 "github.com/filecoin-project/specs-actors/actors/states" + states2 "github.com/filecoin-project/specs-actors/v2/actors/states" + states3 "github.com/filecoin-project/specs-actors/v3/actors/states" + states4 "github.com/filecoin-project/specs-actors/v4/actors/states" + states5 "github.com/filecoin-project/specs-actors/v5/actors/states" ) var log = logging.Logger("statetree") @@ -137,21 +142,20 @@ func (ss *stateSnaps) deleteActor(addr address.Address) { // VersionForNetwork returns the state tree version for the given network // version. -func VersionForNetwork(ver network.Version) types.StateTreeVersion { - if actors.VersionForNetwork(ver) == actors.Version0 { - return types.StateTreeVersion0 - } - return types.StateTreeVersion1 -} - -func adtForSTVersion(ver types.StateTreeVersion) actors.Version { +func VersionForNetwork(ver network.Version) (types.StateTreeVersion, error) { switch ver { - case types.StateTreeVersion0: - return actors.Version0 - case types.StateTreeVersion1: - return actors.Version2 + case network.Version0, network.Version1, network.Version2, network.Version3: + return types.StateTreeVersion0, nil + case network.Version4, network.Version5, network.Version6, network.Version7, network.Version8, network.Version9: + return types.StateTreeVersion1, nil + case network.Version10, network.Version11: + return types.StateTreeVersion2, nil + case network.Version12: + return types.StateTreeVersion3, nil + case network.Version13: + return types.StateTreeVersion4, nil default: - panic("unhandled state tree version") + panic(fmt.Sprintf("unsupported network version %d", ver)) } } @@ -160,7 +164,7 @@ func NewStateTree(cst cbor.IpldStore, ver types.StateTreeVersion) (*StateTree, e switch ver { case types.StateTreeVersion0: // info is undefined - case types.StateTreeVersion1: + case types.StateTreeVersion1, types.StateTreeVersion2, types.StateTreeVersion3, types.StateTreeVersion4: var err error info, err = cst.Put(context.TODO(), new(types.StateInfo0)) if err != nil { @@ -169,13 +173,46 @@ func NewStateTree(cst cbor.IpldStore, ver types.StateTreeVersion) (*StateTree, e default: return nil, xerrors.Errorf("unsupported state tree version: %d", ver) } - root, err := adt.NewMap(adt.WrapStore(context.TODO(), cst), adtForSTVersion(ver)) - if err != nil { - return nil, err + + store := adt.WrapStore(context.TODO(), cst) + var hamt adt.Map + switch ver { + case types.StateTreeVersion0: + tree, err := states0.NewTree(store) + if err != nil { + return nil, xerrors.Errorf("failed to create state tree: %w", err) + } + hamt = tree.Map + case types.StateTreeVersion1: + tree, err := states2.NewTree(store) + if err != nil { + return nil, xerrors.Errorf("failed to create state tree: %w", err) + } + hamt = tree.Map + case types.StateTreeVersion2: + tree, err := states3.NewTree(store) + if err != nil { + return nil, xerrors.Errorf("failed to create state tree: %w", err) + } + hamt = tree.Map + case types.StateTreeVersion3: + tree, err := states4.NewTree(store) + if err != nil { + return nil, xerrors.Errorf("failed to create state tree: %w", err) + } + hamt = tree.Map + case types.StateTreeVersion4: + tree, err := states5.NewTree(store) + if err != nil { + return nil, xerrors.Errorf("failed to create state tree: %w", err) + } + hamt = tree.Map + default: + return nil, xerrors.Errorf("unsupported state tree version: %d", ver) } s := &StateTree{ - root: root, + root: hamt, info: info, version: ver, Store: cst, @@ -194,30 +231,61 @@ func LoadStateTree(cst cbor.IpldStore, c cid.Cid) (*StateTree, error) { root.Version = types.StateTreeVersion0 } + store := adt.WrapStore(context.TODO(), cst) + + var ( + hamt adt.Map + err error + ) switch root.Version { - case types.StateTreeVersion0, types.StateTreeVersion1: - // Load the actual state-tree HAMT. - nd, err := adt.AsMap( - adt.WrapStore(context.TODO(), cst), root.Actors, - adtForSTVersion(root.Version), - ) - if err != nil { - log.Errorf("loading hamt node %s failed: %s", c, err) - return nil, err + case types.StateTreeVersion0: + var tree *states0.Tree + tree, err = states0.LoadTree(store, root.Actors) + if tree != nil { + hamt = tree.Map } - - s := &StateTree{ - root: nd, - info: root.Info, - version: root.Version, - Store: cst, - snaps: newStateSnaps(), + case types.StateTreeVersion1: + var tree *states2.Tree + tree, err = states2.LoadTree(store, root.Actors) + if tree != nil { + hamt = tree.Map + } + case types.StateTreeVersion2: + var tree *states3.Tree + tree, err = states3.LoadTree(store, root.Actors) + if tree != nil { + hamt = tree.Map + } + case types.StateTreeVersion3: + var tree *states4.Tree + tree, err = states4.LoadTree(store, root.Actors) + if tree != nil { + hamt = tree.Map + } + case types.StateTreeVersion4: + var tree *states5.Tree + tree, err = states5.LoadTree(store, root.Actors) + if tree != nil { + hamt = tree.Map } - s.lookupIDFun = s.lookupIDinternal - return s, nil default: return nil, xerrors.Errorf("unsupported state tree version: %d", root.Version) } + if err != nil { + log.Errorf("failed to load state tree: %s", err) + return nil, xerrors.Errorf("failed to load state tree: %w", err) + } + + s := &StateTree{ + root: hamt, + info: root.Info, + version: root.Version, + Store: cst, + snaps: newStateSnaps(), + } + s.lookupIDFun = s.lookupIDinternal + + return s, nil } func (st *StateTree) SetActor(addr address.Address, act *types.Actor) error { @@ -436,6 +504,26 @@ func (st *StateTree) MutateActor(addr address.Address, f func(*types.Actor) erro } func (st *StateTree) ForEach(f func(address.Address, *types.Actor) error) error { + // Walk through layers, if any. + seen := make(map[address.Address]struct{}) + for i := len(st.snaps.layers) - 1; i >= 0; i-- { + for addr, op := range st.snaps.layers[i].actors { + if _, ok := seen[addr]; ok { + continue + } + seen[addr] = struct{}{} + if op.Delete { + continue + } + act := op.Act // copy + if err := f(addr, &act); err != nil { + return err + } + } + + } + + // Now walk through the saved actors. var act types.Actor return st.root.ForEach(&act, func(k string) error { act := act // copy @@ -444,6 +532,12 @@ func (st *StateTree) ForEach(f func(address.Address, *types.Actor) error) error return xerrors.Errorf("invalid address (%x) found in state tree key: %w", []byte(k), err) } + // no need to record anything here, there are no duplicates in the actors HAMT + // iself. + if _, ok := seen[addr]; ok { + return nil + } + return f(addr, &act) }) } @@ -453,7 +547,7 @@ func (st *StateTree) Version() types.StateTreeVersion { return st.version } -func Diff(oldTree, newTree *StateTree) (map[string]types.Actor, error) { +func Diff(ctx context.Context, oldTree, newTree *StateTree) (map[string]types.Actor, error) { out := map[string]types.Actor{} var ( @@ -461,33 +555,38 @@ func Diff(oldTree, newTree *StateTree) (map[string]types.Actor, error) { buf = bytes.NewReader(nil) ) if err := newTree.root.ForEach(&ncval, func(k string) error { - var act types.Actor - - addr, err := address.NewFromBytes([]byte(k)) - if err != nil { - return xerrors.Errorf("address in state tree was not valid: %w", err) - } + select { + case <-ctx.Done(): + return ctx.Err() + default: + var act types.Actor + + addr, err := address.NewFromBytes([]byte(k)) + if err != nil { + return xerrors.Errorf("address in state tree was not valid: %w", err) + } - found, err := oldTree.root.Get(abi.AddrKey(addr), &ocval) - if err != nil { - return err - } + found, err := oldTree.root.Get(abi.AddrKey(addr), &ocval) + if err != nil { + return err + } - if found && bytes.Equal(ocval.Raw, ncval.Raw) { - return nil // not changed - } + if found && bytes.Equal(ocval.Raw, ncval.Raw) { + return nil // not changed + } - buf.Reset(ncval.Raw) - err = act.UnmarshalCBOR(buf) - buf.Reset(nil) + buf.Reset(ncval.Raw) + err = act.UnmarshalCBOR(buf) + buf.Reset(nil) - if err != nil { - return err - } + if err != nil { + return err + } - out[addr.String()] = act + out[addr.String()] = act - return nil + return nil + } }); err != nil { return nil, err } diff --git a/chain/state/statetree_test.go b/chain/state/statetree_test.go index 91674337b88..9177af31219 100644 --- a/chain/state/statetree_test.go +++ b/chain/state/statetree_test.go @@ -5,11 +5,12 @@ import ( "fmt" "testing" + "github.com/filecoin-project/go-state-types/network" + "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" address "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/network" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" "github.com/filecoin-project/lotus/build" @@ -45,7 +46,12 @@ func BenchmarkStateTreeSet(b *testing.B) { func BenchmarkStateTreeSetFlush(b *testing.B) { cst := cbor.NewMemCborStore() - st, err := NewStateTree(cst, VersionForNetwork(build.NewestNetworkVersion)) + sv, err := VersionForNetwork(build.NewestNetworkVersion) + if err != nil { + b.Fatal(err) + } + + st, err := NewStateTree(cst, sv) if err != nil { b.Fatal(err) } @@ -75,7 +81,12 @@ func BenchmarkStateTreeSetFlush(b *testing.B) { func TestResolveCache(t *testing.T) { cst := cbor.NewMemCborStore() - st, err := NewStateTree(cst, VersionForNetwork(build.NewestNetworkVersion)) + sv, err := VersionForNetwork(build.NewestNetworkVersion) + if err != nil { + t.Fatal(err) + } + + st, err := NewStateTree(cst, sv) if err != nil { t.Fatal(err) } @@ -172,7 +183,12 @@ func TestResolveCache(t *testing.T) { func BenchmarkStateTree10kGetActor(b *testing.B) { cst := cbor.NewMemCborStore() - st, err := NewStateTree(cst, VersionForNetwork(build.NewestNetworkVersion)) + sv, err := VersionForNetwork(build.NewestNetworkVersion) + if err != nil { + b.Fatal(err) + } + + st, err := NewStateTree(cst, sv) if err != nil { b.Fatal(err) } @@ -214,7 +230,12 @@ func BenchmarkStateTree10kGetActor(b *testing.B) { func TestSetCache(t *testing.T) { cst := cbor.NewMemCborStore() - st, err := NewStateTree(cst, VersionForNetwork(build.NewestNetworkVersion)) + sv, err := VersionForNetwork(build.NewestNetworkVersion) + if err != nil { + t.Fatal(err) + } + + st, err := NewStateTree(cst, sv) if err != nil { t.Fatal(err) } @@ -251,7 +272,13 @@ func TestSetCache(t *testing.T) { func TestSnapshots(t *testing.T) { ctx := context.Background() cst := cbor.NewMemCborStore() - st, err := NewStateTree(cst, VersionForNetwork(build.NewestNetworkVersion)) + + sv, err := VersionForNetwork(build.NewestNetworkVersion) + if err != nil { + t.Fatal(err) + } + + st, err := NewStateTree(cst, sv) if err != nil { t.Fatal(err) } @@ -334,8 +361,15 @@ func assertNotHas(t *testing.T, st *StateTree, addr address.Address) { func TestStateTreeConsistency(t *testing.T) { cst := cbor.NewMemCborStore() + // TODO: ActorUpgrade: this test tests pre actors v2 - st, err := NewStateTree(cst, VersionForNetwork(network.Version3)) + + sv, err := VersionForNetwork(network.Version3) + if err != nil { + t.Fatal(err) + } + + st, err := NewStateTree(cst, sv) if err != nil { t.Fatal(err) } diff --git a/chain/stmgr/call.go b/chain/stmgr/call.go index bb0f0e5ecd1..dc6da0f9c19 100644 --- a/chain/stmgr/call.go +++ b/chain/stmgr/call.go @@ -39,27 +39,31 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types. } bstate := ts.ParentState() - bheight := ts.Height() + pts, err := sm.cs.LoadTipSet(ts.Parents()) + if err != nil { + return nil, xerrors.Errorf("failed to load parent tipset: %w", err) + } + pheight := pts.Height() // If we have to run an expensive migration, and we're not at genesis, // return an error because the migration will take too long. // // We allow this at height 0 for at-genesis migrations (for testing). - if bheight-1 > 0 && sm.hasExpensiveFork(ctx, bheight-1) { + if pheight > 0 && sm.hasExpensiveFork(ctx, pheight) { return nil, ErrExpensiveFork } // Run the (not expensive) migration. - bstate, err := sm.handleStateForks(ctx, bstate, bheight-1, nil, ts) + bstate, err = sm.handleStateForks(ctx, bstate, pheight, nil, ts) if err != nil { return nil, fmt.Errorf("failed to handle fork: %w", err) } vmopt := &vm.VMOpts{ StateBase: bstate, - Epoch: bheight, + Epoch: pheight + 1, Rand: store.NewChainRand(sm.cs, ts.Cids()), - Bstore: sm.cs.Blockstore(), + Bstore: sm.cs.StateBlockstore(), Syscalls: sm.cs.VMSys(), CircSupplyCalc: sm.GetVMCirculatingSupply, NtwkVersion: sm.GetNtwkVersion, @@ -174,7 +178,7 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri StateBase: state, Epoch: ts.Height() + 1, Rand: r, - Bstore: sm.cs.Blockstore(), + Bstore: sm.cs.StateBlockstore(), Syscalls: sm.cs.VMSys(), CircSupplyCalc: sm.GetVMCirculatingSupply, NtwkVersion: sm.GetNtwkVersion, @@ -244,24 +248,18 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri var errHaltExecution = fmt.Errorf("halt") func (sm *StateManager) Replay(ctx context.Context, ts *types.TipSet, mcid cid.Cid) (*types.Message, *vm.ApplyRet, error) { - var outm *types.Message - var outr *vm.ApplyRet - - _, _, err := sm.computeTipSetState(ctx, ts, func(c cid.Cid, m *types.Message, ret *vm.ApplyRet) error { - if c == mcid { - outm = m - outr = ret - return errHaltExecution - } - return nil - }) - if err != nil && err != errHaltExecution { + var finder messageFinder + // message to find + finder.mcid = mcid + + _, _, err := sm.computeTipSetState(ctx, ts, &finder) + if err != nil && !xerrors.Is(err, errHaltExecution) { return nil, nil, xerrors.Errorf("unexpected error during execution: %w", err) } - if outr == nil { + if finder.outr == nil { return nil, nil, xerrors.Errorf("given message not found in tipset") } - return outm, outr, nil + return finder.outm, finder.outr, nil } diff --git a/chain/stmgr/forks.go b/chain/stmgr/forks.go index e089a108467..bb87da44cf4 100644 --- a/chain/stmgr/forks.go +++ b/chain/stmgr/forks.go @@ -4,57 +4,127 @@ import ( "bytes" "context" "encoding/binary" - "math" + "runtime" + "sort" + "sync" + "time" - "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/specs-actors/v5/actors/migration/nv13" + + "github.com/filecoin-project/go-state-types/rt" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/network" - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - "golang.org/x/xerrors" - - builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" - miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" - multisig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" - power0 "github.com/filecoin-project/specs-actors/actors/builtin/power" - adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" - - "github.com/filecoin-project/specs-actors/actors/migration/nv3" - m2 "github.com/filecoin-project/specs-actors/v2/actors/migration" - + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" "github.com/filecoin-project/lotus/chain/actors/builtin/multisig" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" - bstore "github.com/filecoin-project/lotus/lib/blockstore" - "github.com/filecoin-project/lotus/lib/bufbstore" + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + multisig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" + power0 "github.com/filecoin-project/specs-actors/actors/builtin/power" + "github.com/filecoin-project/specs-actors/actors/migration/nv3" + adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" + "github.com/filecoin-project/specs-actors/v2/actors/migration/nv4" + "github.com/filecoin-project/specs-actors/v2/actors/migration/nv7" + "github.com/filecoin-project/specs-actors/v3/actors/migration/nv10" + "github.com/filecoin-project/specs-actors/v4/actors/migration/nv12" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + "golang.org/x/xerrors" ) -// UpgradeFunc is a migration function run at every upgrade. +// MigrationCache can be used to cache information used by a migration. This is primarily useful to +// "pre-compute" some migration state ahead of time, and make it accessible in the migration itself. +type MigrationCache interface { + Write(key string, value cid.Cid) error + Read(key string) (bool, cid.Cid, error) + Load(key string, loadFunc func() (cid.Cid, error)) (cid.Cid, error) +} + +// MigrationFunc is a migration function run at every upgrade. // +// - The cache is a per-upgrade cache, pre-populated by pre-migrations. // - The oldState is the state produced by the upgrade epoch. // - The returned newState is the new state that will be used by the next epoch. // - The height is the upgrade epoch height (already executed). // - The tipset is the tipset for the last non-null block before the upgrade. Do // not assume that ts.Height() is the upgrade height. -type UpgradeFunc func(ctx context.Context, sm *StateManager, cb ExecCallback, oldState cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (newState cid.Cid, err error) +type MigrationFunc func( + ctx context.Context, + sm *StateManager, cache MigrationCache, + cb ExecMonitor, oldState cid.Cid, + height abi.ChainEpoch, ts *types.TipSet, +) (newState cid.Cid, err error) + +// PreMigrationFunc is a function run _before_ a network upgrade to pre-compute part of the network +// upgrade and speed it up. +type PreMigrationFunc func( + ctx context.Context, + sm *StateManager, cache MigrationCache, + oldState cid.Cid, + height abi.ChainEpoch, ts *types.TipSet, +) error + +// PreMigration describes a pre-migration step to prepare for a network state upgrade. Pre-migrations +// are optimizations, are not guaranteed to run, and may be canceled and/or run multiple times. +type PreMigration struct { + // PreMigration is the pre-migration function to run at the specified time. This function is + // run asynchronously and must abort promptly when canceled. + PreMigration PreMigrationFunc + + // StartWithin specifies that this pre-migration should be started at most StartWithin + // epochs before the upgrade. + StartWithin abi.ChainEpoch + + // DontStartWithin specifies that this pre-migration should not be started DontStartWithin + // epochs before the final upgrade epoch. + // + // This should be set such that the pre-migration is likely to complete before StopWithin. + DontStartWithin abi.ChainEpoch + + // StopWithin specifies that this pre-migration should be stopped StopWithin epochs of the + // final upgrade epoch. + StopWithin abi.ChainEpoch +} type Upgrade struct { Height abi.ChainEpoch Network network.Version Expensive bool - Migration UpgradeFunc + Migration MigrationFunc + + // PreMigrations specifies a set of pre-migration functions to run at the indicated epochs. + // These functions should fill the given cache with information that can speed up the + // eventual full migration at the upgrade epoch. + PreMigrations []PreMigration } type UpgradeSchedule []Upgrade +type migrationLogger struct{} + +func (ml migrationLogger) Log(level rt.LogLevel, msg string, args ...interface{}) { + switch level { + case rt.DEBUG: + log.Debugf(msg, args...) + case rt.INFO: + log.Infof(msg, args...) + case rt.WARN: + log.Warnf(msg, args...) + case rt.ERROR: + log.Errorf(msg, args...) + } +} + func DefaultUpgradeSchedule() UpgradeSchedule { var us UpgradeSchedule @@ -75,13 +145,14 @@ func DefaultUpgradeSchedule() UpgradeSchedule { Network: network.Version3, Migration: UpgradeRefuel, }, { - Height: build.UpgradeActorsV2Height, + Height: build.UpgradeAssemblyHeight, Network: network.Version4, Expensive: true, Migration: UpgradeActorsV2, }, { - Height: build.UpgradeTapeHeight, - Network: network.Version5, + Height: build.UpgradeTapeHeight, + Network: network.Version5, + Migration: nil, }, { Height: build.UpgradeLiftoffHeight, Network: network.Version5, @@ -90,31 +161,70 @@ func DefaultUpgradeSchedule() UpgradeSchedule { Height: build.UpgradeKumquatHeight, Network: network.Version6, Migration: nil, - }} - - if build.UpgradeActorsV2Height == math.MaxInt64 { // disable actors upgrade - updates = []Upgrade{{ - Height: build.UpgradeBreezeHeight, - Network: network.Version1, - Migration: UpgradeFaucetBurnRecovery, - }, { - Height: build.UpgradeSmokeHeight, - Network: network.Version2, - Migration: nil, + }, { + Height: build.UpgradeCalicoHeight, + Network: network.Version7, + Migration: UpgradeCalico, + }, { + Height: build.UpgradePersianHeight, + Network: network.Version8, + Migration: nil, + }, { + Height: build.UpgradeOrangeHeight, + Network: network.Version9, + Migration: nil, + }, { + Height: build.UpgradeTrustHeight, + Network: network.Version10, + Migration: UpgradeActorsV3, + PreMigrations: []PreMigration{{ + PreMigration: PreUpgradeActorsV3, + StartWithin: 120, + DontStartWithin: 60, + StopWithin: 35, }, { - Height: build.UpgradeIgnitionHeight, - Network: network.Version3, - Migration: UpgradeIgnition, + PreMigration: PreUpgradeActorsV3, + StartWithin: 30, + DontStartWithin: 15, + StopWithin: 5, + }}, + Expensive: true, + }, { + Height: build.UpgradeNorwegianHeight, + Network: network.Version11, + Migration: nil, + }, { + Height: build.UpgradeTurboHeight, + Network: network.Version12, + Migration: UpgradeActorsV4, + PreMigrations: []PreMigration{{ + PreMigration: PreUpgradeActorsV4, + StartWithin: 120, + DontStartWithin: 60, + StopWithin: 35, }, { - Height: build.UpgradeRefuelHeight, - Network: network.Version3, - Migration: UpgradeRefuel, + PreMigration: PreUpgradeActorsV4, + StartWithin: 30, + DontStartWithin: 15, + StopWithin: 5, + }}, + Expensive: true, + }, { + Height: build.UpgradeHyperdriveHeight, + Network: network.Version13, + Migration: UpgradeActorsV5, + PreMigrations: []PreMigration{{ + PreMigration: PreUpgradeActorsV5, + StartWithin: 120, + DontStartWithin: 60, + StopWithin: 35, }, { - Height: build.UpgradeLiftoffHeight, - Network: network.Version3, - Migration: UpgradeLiftoff, - }} - } + PreMigration: PreUpgradeActorsV5, + StartWithin: 30, + DontStartWithin: 15, + StopWithin: 5, + }}, + Expensive: true}} for _, u := range updates { if u.Height < 0 { @@ -127,14 +237,43 @@ func DefaultUpgradeSchedule() UpgradeSchedule { } func (us UpgradeSchedule) Validate() error { - // Make sure we're not trying to upgrade to version 0. + // Make sure each upgrade is valid. for _, u := range us { if u.Network <= 0 { return xerrors.Errorf("cannot upgrade to version <= 0: %d", u.Network) } + + for _, m := range u.PreMigrations { + if m.StartWithin <= 0 { + return xerrors.Errorf("pre-migration must specify a positive start-within epoch") + } + + if m.DontStartWithin < 0 || m.StopWithin < 0 { + return xerrors.Errorf("pre-migration must specify non-negative epochs") + } + + if m.StartWithin <= m.StopWithin { + return xerrors.Errorf("pre-migration start-within must come before stop-within") + } + + // If we have a dont-start-within. + if m.DontStartWithin != 0 { + if m.DontStartWithin < m.StopWithin { + return xerrors.Errorf("pre-migration dont-start-within must come before stop-within") + } + if m.StartWithin <= m.DontStartWithin { + return xerrors.Errorf("pre-migration start-within must come after dont-start-within") + } + } + } + if !sort.SliceIsSorted(u.PreMigrations, func(i, j int) bool { + return u.PreMigrations[i].StartWithin > u.PreMigrations[j].StartWithin //nolint:scopelint,gosec + }) { + return xerrors.Errorf("pre-migrations must be sorted by start epoch") + } } - // Make sure all the upgrades make sense. + // Make sure the upgrade order makes sense. for i := 1; i < len(us); i++ { prev := &us[i-1] curr := &us[i] @@ -153,15 +292,31 @@ func (us UpgradeSchedule) Validate() error { return nil } -func (sm *StateManager) handleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, cb ExecCallback, ts *types.TipSet) (cid.Cid, error) { +func (sm *StateManager) handleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, cb ExecMonitor, ts *types.TipSet) (cid.Cid, error) { retCid := root var err error - f, ok := sm.stateMigrations[height] - if ok { - retCid, err = f(ctx, sm, cb, root, height, ts) + u := sm.stateMigrations[height] + if u != nil && u.upgrade != nil { + startTime := time.Now() + log.Warnw("STARTING migration", "height", height, "from", root) + // Yes, we clone the cache, even for the final upgrade epoch. Why? Reverts. We may + // have to migrate multiple times. + tmpCache := u.cache.Clone() + retCid, err = u.upgrade(ctx, sm, tmpCache, cb, root, height, ts) if err != nil { + log.Errorw("FAILED migration", "height", height, "from", root, "error", err) return cid.Undef, err } + // Yes, we update the cache, even for the final upgrade epoch. Why? Reverts. This + // can save us a _lot_ of time because very few actors will have changed if we + // do a small revert then need to re-run the migration. + u.cache.Update(tmpCache) + log.Warnw("COMPLETED migration", + "height", height, + "from", root, + "to", retCid, + "duration", time.Since(startTime), + ) } return retCid, nil @@ -172,6 +327,109 @@ func (sm *StateManager) hasExpensiveFork(ctx context.Context, height abi.ChainEp return ok } +func runPreMigration(ctx context.Context, sm *StateManager, fn PreMigrationFunc, cache *nv10.MemMigrationCache, ts *types.TipSet) { + height := ts.Height() + parent := ts.ParentState() + + startTime := time.Now() + + log.Warn("STARTING pre-migration") + // Clone the cache so we don't actually _update_ it + // till we're done. Otherwise, if we fail, the next + // migration to use the cache may assume that + // certain blocks exist, even if they don't. + tmpCache := cache.Clone() + err := fn(ctx, sm, tmpCache, parent, height, ts) + if err != nil { + log.Errorw("FAILED pre-migration", "error", err) + return + } + // Finally, if everything worked, update the cache. + cache.Update(tmpCache) + log.Warnw("COMPLETED pre-migration", "duration", time.Since(startTime)) +} + +func (sm *StateManager) preMigrationWorker(ctx context.Context) { + defer close(sm.shutdown) + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + type op struct { + after abi.ChainEpoch + notAfter abi.ChainEpoch + run func(ts *types.TipSet) + } + + var wg sync.WaitGroup + defer wg.Wait() + + // Turn each pre-migration into an operation in a schedule. + var schedule []op + for upgradeEpoch, migration := range sm.stateMigrations { + cache := migration.cache + for _, prem := range migration.preMigrations { + preCtx, preCancel := context.WithCancel(ctx) + migrationFunc := prem.PreMigration + + afterEpoch := upgradeEpoch - prem.StartWithin + notAfterEpoch := upgradeEpoch - prem.DontStartWithin + stopEpoch := upgradeEpoch - prem.StopWithin + // We can't start after we stop. + if notAfterEpoch > stopEpoch { + notAfterEpoch = stopEpoch - 1 + } + + // Add an op to start a pre-migration. + schedule = append(schedule, op{ + after: afterEpoch, + notAfter: notAfterEpoch, + + // TODO: are these values correct? + run: func(ts *types.TipSet) { + wg.Add(1) + go func() { + defer wg.Done() + runPreMigration(preCtx, sm, migrationFunc, cache, ts) + }() + }, + }) + + // Add an op to cancel the pre-migration if it's still running. + schedule = append(schedule, op{ + after: stopEpoch, + notAfter: -1, + run: func(ts *types.TipSet) { preCancel() }, + }) + } + } + + // Then sort by epoch. + sort.Slice(schedule, func(i, j int) bool { + return schedule[i].after < schedule[j].after + }) + + // Finally, when the head changes, see if there's anything we need to do. + // + // We're intentionally ignoring reorgs as they don't matter for our purposes. + for change := range sm.cs.SubHeadChanges(ctx) { + for _, head := range change { + for len(schedule) > 0 { + op := &schedule[0] + if head.Val.Height() < op.after { + break + } + + // If we haven't passed the pre-migration height... + if op.notAfter < 0 || head.Val.Height() < op.notAfter { + op.run(head.Val) + } + schedule = schedule[1:] + } + } + } +} + func doTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmount, cb func(trace types.ExecutionTrace)) error { fromAct, err := tree.GetActor(from) if err != nil { @@ -201,20 +459,9 @@ func doTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmo if cb != nil { // record the transfer in execution traces - fakeMsg := &types.Message{ - From: from, - To: to, - Value: amt, - } - fakeRct := &types.MessageReceipt{ - ExitCode: 0, - Return: nil, - GasUsed: 0, - } - cb(types.ExecutionTrace{ - Msg: fakeMsg, - MsgRct: fakeRct, + Msg: makeFakeMsg(from, to, amt, 0), + MsgRct: makeFakeRct(), Error: "", Duration: 0, GasCharges: nil, @@ -225,7 +472,7 @@ func doTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmo return nil } -func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { +func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ MigrationCache, em ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { // Some initial parameters FundsForMiners := types.FromFil(1_000_000) LookbackEpoch := abi.ChainEpoch(32000) @@ -295,7 +542,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCal } case builtin0.StorageMinerActorCodeID: var st miner0.State - if err := sm.ChainStore().Store(ctx).Get(ctx, act.Head, &st); err != nil { + if err := sm.ChainStore().ActorStore(ctx).Get(ctx, act.Head, &st); err != nil { return xerrors.Errorf("failed to load miner state: %w", err) } @@ -339,7 +586,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCal return cid.Undef, xerrors.Errorf("failed to load power actor: %w", err) } - cst := cbor.NewCborStore(sm.ChainStore().Blockstore()) + cst := cbor.NewCborStore(sm.ChainStore().StateBlockstore()) if err := cst.Get(ctx, powAct.Head, &ps); err != nil { return cid.Undef, xerrors.Errorf("failed to get power actor state: %w", err) } @@ -373,7 +620,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCal } case builtin0.StorageMinerActorCodeID: var st miner0.State - if err := sm.ChainStore().Store(ctx).Get(ctx, act.Head, &st); err != nil { + if err := sm.ChainStore().ActorStore(ctx).Get(ctx, act.Head, &st); err != nil { return xerrors.Errorf("failed to load miner state: %w", err) } @@ -382,7 +629,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCal return xerrors.Errorf("failed to get miner info: %w", err) } - sectorsArr, err := adt0.AsArray(sm.ChainStore().Store(ctx), st.Sectors) + sectorsArr, err := adt0.AsArray(sm.ChainStore().ActorStore(ctx), st.Sectors) if err != nil { return xerrors.Errorf("failed to load sectors array: %w", err) } @@ -402,11 +649,11 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCal lbact, err := lbtree.GetActor(addr) if err == nil { var lbst miner0.State - if err := sm.ChainStore().Store(ctx).Get(ctx, lbact.Head, &lbst); err != nil { + if err := sm.ChainStore().ActorStore(ctx).Get(ctx, lbact.Head, &lbst); err != nil { return xerrors.Errorf("failed to load miner state: %w", err) } - lbsectors, err := adt0.AsArray(sm.ChainStore().Store(ctx), lbst.Sectors) + lbsectors, err := adt0.AsArray(sm.ChainStore().ActorStore(ctx), lbst.Sectors) if err != nil { return xerrors.Errorf("failed to load lb sectors array: %w", err) } @@ -475,27 +722,17 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCal return cid.Undef, xerrors.Errorf("resultant state tree account balance was not correct: %s", total) } - if cb != nil { + if em != nil { // record the transfer in execution traces - fakeMsg := &types.Message{ - From: builtin.SystemActorAddr, - To: builtin.SystemActorAddr, - Value: big.Zero(), - Nonce: uint64(epoch), - } - fakeRct := &types.MessageReceipt{ - ExitCode: 0, - Return: nil, - GasUsed: 0, - } + fakeMsg := makeFakeMsg(builtin.SystemActorAddr, builtin.SystemActorAddr, big.Zero(), uint64(epoch)) - if err := cb(fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{ - MessageReceipt: *fakeRct, + if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{ + MessageReceipt: *makeFakeRct(), ActorErr: nil, ExecutionTrace: types.ExecutionTrace{ Msg: fakeMsg, - MsgRct: fakeRct, + MsgRct: makeFakeRct(), Error: "", Duration: 0, GasCharges: nil, @@ -503,7 +740,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCal }, Duration: 0, GasCosts: nil, - }); err != nil { + }, false); err != nil { return cid.Undef, xerrors.Errorf("recording transfers: %w", err) } } @@ -511,8 +748,8 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCal return tree.Flush(ctx) } -func UpgradeIgnition(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { - store := sm.cs.Store(ctx) +func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + store := sm.cs.ActorStore(ctx) if build.UpgradeLiftoffHeight <= epoch { return cid.Undef, xerrors.Errorf("liftoff height must be beyond ignition height") @@ -548,12 +785,12 @@ func UpgradeIgnition(ctx context.Context, sm *StateManager, cb ExecCallback, roo return cid.Undef, xerrors.Errorf("resetting genesis msig start epochs: %w", err) } - err = splitGenesisMultisig0(ctx, cb, split1, store, tree, 50, epoch) + err = splitGenesisMultisig0(ctx, cb, split1, store, tree, 50, epoch, ts) if err != nil { return cid.Undef, xerrors.Errorf("splitting first msig: %w", err) } - err = splitGenesisMultisig0(ctx, cb, split2, store, tree, 50, epoch) + err = splitGenesisMultisig0(ctx, cb, split2, store, tree, 50, epoch, ts) if err != nil { return cid.Undef, xerrors.Errorf("splitting second msig: %w", err) } @@ -566,9 +803,9 @@ func UpgradeIgnition(ctx context.Context, sm *StateManager, cb ExecCallback, roo return tree.Flush(ctx) } -func UpgradeRefuel(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { +func UpgradeRefuel(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { - store := sm.cs.Store(ctx) + store := sm.cs.ActorStore(ctx) tree, err := sm.StateTree(root) if err != nil { return cid.Undef, xerrors.Errorf("getting state tree: %w", err) @@ -592,8 +829,8 @@ func UpgradeRefuel(ctx context.Context, sm *StateManager, cb ExecCallback, root return tree.Flush(ctx) } -func UpgradeActorsV2(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { - buf := bufbstore.NewTieredBstore(sm.cs.Blockstore(), bstore.NewTemporarySync()) +func UpgradeActorsV2(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync()) store := store.ActorStore(ctx, buf) info, err := store.Put(ctx, new(types.StateInfo0)) @@ -601,7 +838,7 @@ func UpgradeActorsV2(ctx context.Context, sm *StateManager, cb ExecCallback, roo return cid.Undef, xerrors.Errorf("failed to create new state info for actors v2: %w", err) } - newHamtRoot, err := m2.MigrateStateTree(ctx, store, root, epoch, m2.DefaultConfig()) + newHamtRoot, err := nv4.MigrateStateTree(ctx, store, root, epoch, nv4.DefaultConfig()) if err != nil { return cid.Undef, xerrors.Errorf("upgrading to actors v2: %w", err) } @@ -638,13 +875,13 @@ func UpgradeActorsV2(ctx context.Context, sm *StateManager, cb ExecCallback, roo return newRoot, nil } -func UpgradeLiftoff(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { +func UpgradeLiftoff(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { tree, err := sm.StateTree(root) if err != nil { return cid.Undef, xerrors.Errorf("getting state tree: %w", err) } - err = setNetworkName(ctx, sm.cs.Store(ctx), tree, "mainnet") + err = setNetworkName(ctx, sm.cs.ActorStore(ctx), tree, "mainnet") if err != nil { return cid.Undef, xerrors.Errorf("setting network name: %w", err) } @@ -652,6 +889,386 @@ func UpgradeLiftoff(ctx context.Context, sm *StateManager, cb ExecCallback, root return tree.Flush(ctx) } +func UpgradeCalico(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + if build.BuildType != build.BuildMainnet { + return root, nil + } + + store := sm.cs.ActorStore(ctx) + var stateRoot types.StateRoot + if err := store.Get(ctx, root, &stateRoot); err != nil { + return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err) + } + + if stateRoot.Version != types.StateTreeVersion1 { + return cid.Undef, xerrors.Errorf( + "expected state root version 1 for calico upgrade, got %d", + stateRoot.Version, + ) + } + + newHamtRoot, err := nv7.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, nv7.DefaultConfig()) + if err != nil { + return cid.Undef, xerrors.Errorf("running nv7 migration: %w", err) + } + + newRoot, err := store.Put(ctx, &types.StateRoot{ + Version: stateRoot.Version, + Actors: newHamtRoot, + Info: stateRoot.Info, + }) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err) + } + + // perform some basic sanity checks to make sure everything still works. + if newSm, err := state.LoadStateTree(store, newRoot); err != nil { + return cid.Undef, xerrors.Errorf("state tree sanity load failed: %w", err) + } else if newRoot2, err := newSm.Flush(ctx); err != nil { + return cid.Undef, xerrors.Errorf("state tree sanity flush failed: %w", err) + } else if newRoot2 != newRoot { + return cid.Undef, xerrors.Errorf("state-root mismatch: %s != %s", newRoot, newRoot2) + } else if _, err := newSm.GetActor(builtin0.InitActorAddr); err != nil { + return cid.Undef, xerrors.Errorf("failed to load init actor after upgrade: %w", err) + } + + return newRoot, nil +} + +func terminateActor(ctx context.Context, tree *state.StateTree, addr address.Address, em ExecMonitor, epoch abi.ChainEpoch, ts *types.TipSet) error { + a, err := tree.GetActor(addr) + if xerrors.Is(err, types.ErrActorNotFound) { + return types.ErrActorNotFound + } else if err != nil { + return xerrors.Errorf("failed to get actor to delete: %w", err) + } + + var trace types.ExecutionTrace + if err := doTransfer(tree, addr, builtin.BurntFundsActorAddr, a.Balance, func(t types.ExecutionTrace) { + trace = t + }); err != nil { + return xerrors.Errorf("transferring terminated actor's balance: %w", err) + } + + if em != nil { + // record the transfer in execution traces + + fakeMsg := makeFakeMsg(builtin.SystemActorAddr, addr, big.Zero(), uint64(epoch)) + + if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{ + MessageReceipt: *makeFakeRct(), + ActorErr: nil, + ExecutionTrace: trace, + Duration: 0, + GasCosts: nil, + }, false); err != nil { + return xerrors.Errorf("recording transfers: %w", err) + } + } + + err = tree.DeleteActor(addr) + if err != nil { + return xerrors.Errorf("deleting actor from tree: %w", err) + } + + ia, err := tree.GetActor(init_.Address) + if err != nil { + return xerrors.Errorf("loading init actor: %w", err) + } + + ias, err := init_.Load(&state.AdtStore{IpldStore: tree.Store}, ia) + if err != nil { + return xerrors.Errorf("loading init actor state: %w", err) + } + + if err := ias.Remove(addr); err != nil { + return xerrors.Errorf("deleting entry from address map: %w", err) + } + + nih, err := tree.Store.Put(ctx, ias) + if err != nil { + return xerrors.Errorf("writing new init actor state: %w", err) + } + + ia.Head = nih + + return tree.SetActor(init_.Address, ia) +} + +func UpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + // Use all the CPUs except 3. + workerCount := runtime.NumCPU() - 3 + if workerCount <= 0 { + workerCount = 1 + } + + config := nv10.Config{ + MaxWorkers: uint(workerCount), + JobQueueSize: 1000, + ResultQueueSize: 100, + ProgressLogPeriod: 10 * time.Second, + } + newRoot, err := upgradeActorsV3Common(ctx, sm, cache, root, epoch, ts, config) + if err != nil { + return cid.Undef, xerrors.Errorf("migrating actors v3 state: %w", err) + } + + tree, err := sm.StateTree(newRoot) + if err != nil { + return cid.Undef, xerrors.Errorf("getting state tree: %w", err) + } + + if build.BuildType == build.BuildMainnet { + err := terminateActor(ctx, tree, build.ZeroAddress, cb, epoch, ts) + if err != nil && !xerrors.Is(err, types.ErrActorNotFound) { + return cid.Undef, xerrors.Errorf("deleting zero bls actor: %w", err) + } + + newRoot, err = tree.Flush(ctx) + if err != nil { + return cid.Undef, xerrors.Errorf("flushing state tree: %w", err) + } + } + + return newRoot, nil +} + +func PreUpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error { + // Use half the CPUs for pre-migration, but leave at least 3. + workerCount := runtime.NumCPU() + if workerCount <= 4 { + workerCount = 1 + } else { + workerCount /= 2 + } + config := nv10.Config{MaxWorkers: uint(workerCount)} + _, err := upgradeActorsV3Common(ctx, sm, cache, root, epoch, ts, config) + return err +} + +func upgradeActorsV3Common( + ctx context.Context, sm *StateManager, cache MigrationCache, + root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet, + config nv10.Config, +) (cid.Cid, error) { + buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync()) + store := store.ActorStore(ctx, buf) + + // Load the state root. + var stateRoot types.StateRoot + if err := store.Get(ctx, root, &stateRoot); err != nil { + return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err) + } + + if stateRoot.Version != types.StateTreeVersion1 { + return cid.Undef, xerrors.Errorf( + "expected state root version 1 for actors v3 upgrade, got %d", + stateRoot.Version, + ) + } + + // Perform the migration + newHamtRoot, err := nv10.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache) + if err != nil { + return cid.Undef, xerrors.Errorf("upgrading to actors v3: %w", err) + } + + // Persist the result. + newRoot, err := store.Put(ctx, &types.StateRoot{ + Version: types.StateTreeVersion2, + Actors: newHamtRoot, + Info: stateRoot.Info, + }) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err) + } + + // Persist the new tree. + + { + from := buf + to := buf.Read() + + if err := vm.Copy(ctx, from, to, newRoot); err != nil { + return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err) + } + } + + return newRoot, nil +} + +func UpgradeActorsV4(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + // Use all the CPUs except 3. + workerCount := runtime.NumCPU() - 3 + if workerCount <= 0 { + workerCount = 1 + } + + config := nv12.Config{ + MaxWorkers: uint(workerCount), + JobQueueSize: 1000, + ResultQueueSize: 100, + ProgressLogPeriod: 10 * time.Second, + } + + newRoot, err := upgradeActorsV4Common(ctx, sm, cache, root, epoch, ts, config) + if err != nil { + return cid.Undef, xerrors.Errorf("migrating actors v4 state: %w", err) + } + + return newRoot, nil +} + +func PreUpgradeActorsV4(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error { + // Use half the CPUs for pre-migration, but leave at least 3. + workerCount := runtime.NumCPU() + if workerCount <= 4 { + workerCount = 1 + } else { + workerCount /= 2 + } + config := nv12.Config{MaxWorkers: uint(workerCount)} + _, err := upgradeActorsV4Common(ctx, sm, cache, root, epoch, ts, config) + return err +} + +func upgradeActorsV4Common( + ctx context.Context, sm *StateManager, cache MigrationCache, + root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet, + config nv12.Config, +) (cid.Cid, error) { + buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync()) + store := store.ActorStore(ctx, buf) + + // Load the state root. + var stateRoot types.StateRoot + if err := store.Get(ctx, root, &stateRoot); err != nil { + return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err) + } + + if stateRoot.Version != types.StateTreeVersion2 { + return cid.Undef, xerrors.Errorf( + "expected state root version 2 for actors v4 upgrade, got %d", + stateRoot.Version, + ) + } + + // Perform the migration + newHamtRoot, err := nv12.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache) + if err != nil { + return cid.Undef, xerrors.Errorf("upgrading to actors v4: %w", err) + } + + // Persist the result. + newRoot, err := store.Put(ctx, &types.StateRoot{ + Version: types.StateTreeVersion3, + Actors: newHamtRoot, + Info: stateRoot.Info, + }) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err) + } + + // Persist the new tree. + + { + from := buf + to := buf.Read() + + if err := vm.Copy(ctx, from, to, newRoot); err != nil { + return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err) + } + } + + return newRoot, nil +} + +func UpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + // Use all the CPUs except 3. + workerCount := runtime.NumCPU() - 3 + if workerCount <= 0 { + workerCount = 1 + } + + config := nv13.Config{ + MaxWorkers: uint(workerCount), + JobQueueSize: 1000, + ResultQueueSize: 100, + ProgressLogPeriod: 10 * time.Second, + } + + newRoot, err := upgradeActorsV5Common(ctx, sm, cache, root, epoch, ts, config) + if err != nil { + return cid.Undef, xerrors.Errorf("migrating actors v5 state: %w", err) + } + + return newRoot, nil +} + +func PreUpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error { + // Use half the CPUs for pre-migration, but leave at least 3. + workerCount := runtime.NumCPU() + if workerCount <= 4 { + workerCount = 1 + } else { + workerCount /= 2 + } + config := nv13.Config{MaxWorkers: uint(workerCount)} + _, err := upgradeActorsV5Common(ctx, sm, cache, root, epoch, ts, config) + return err +} + +func upgradeActorsV5Common( + ctx context.Context, sm *StateManager, cache MigrationCache, + root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet, + config nv13.Config, +) (cid.Cid, error) { + buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync()) + store := store.ActorStore(ctx, buf) + + // Load the state root. + var stateRoot types.StateRoot + if err := store.Get(ctx, root, &stateRoot); err != nil { + return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err) + } + + if stateRoot.Version != types.StateTreeVersion3 { + return cid.Undef, xerrors.Errorf( + "expected state root version 3 for actors v5 upgrade, got %d", + stateRoot.Version, + ) + } + + // Perform the migration + newHamtRoot, err := nv13.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache) + if err != nil { + return cid.Undef, xerrors.Errorf("upgrading to actors v5: %w", err) + } + + // Persist the result. + newRoot, err := store.Put(ctx, &types.StateRoot{ + Version: types.StateTreeVersion4, + Actors: newHamtRoot, + Info: stateRoot.Info, + }) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err) + } + + // Persist the new tree. + + { + from := buf + to := buf.Read() + + if err := vm.Copy(ctx, from, to, newRoot); err != nil { + return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err) + } + } + + return newRoot, nil +} + func setNetworkName(ctx context.Context, store adt.Store, tree *state.StateTree, name string) error { ia, err := tree.GetActor(builtin0.InitActorAddr) if err != nil { @@ -679,7 +1296,7 @@ func setNetworkName(ctx context.Context, store adt.Store, tree *state.StateTree, return nil } -func splitGenesisMultisig0(ctx context.Context, cb ExecCallback, addr address.Address, store adt0.Store, tree *state.StateTree, portions uint64, epoch abi.ChainEpoch) error { +func splitGenesisMultisig0(ctx context.Context, em ExecMonitor, addr address.Address, store adt0.Store, tree *state.StateTree, portions uint64, epoch abi.ChainEpoch, ts *types.TipSet) error { if portions < 1 { return xerrors.Errorf("cannot split into 0 portions") } @@ -776,27 +1393,17 @@ func splitGenesisMultisig0(ctx context.Context, cb ExecCallback, addr address.Ad i++ } - if cb != nil { + if em != nil { // record the transfer in execution traces - fakeMsg := &types.Message{ - From: builtin.SystemActorAddr, - To: addr, - Value: big.Zero(), - Nonce: uint64(epoch), - } - fakeRct := &types.MessageReceipt{ - ExitCode: 0, - Return: nil, - GasUsed: 0, - } + fakeMsg := makeFakeMsg(builtin.SystemActorAddr, addr, big.Zero(), uint64(epoch)) - if err := cb(fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{ - MessageReceipt: *fakeRct, + if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{ + MessageReceipt: *makeFakeRct(), ActorErr: nil, ExecutionTrace: types.ExecutionTrace{ Msg: fakeMsg, - MsgRct: fakeRct, + MsgRct: makeFakeRct(), Error: "", Duration: 0, GasCharges: nil, @@ -804,7 +1411,7 @@ func splitGenesisMultisig0(ctx context.Context, cb ExecCallback, addr address.Ad }, Duration: 0, GasCosts: nil, - }); err != nil { + }, false); err != nil { return xerrors.Errorf("recording transfers: %w", err) } } @@ -846,7 +1453,7 @@ func resetGenesisMsigs0(ctx context.Context, sm *StateManager, store adt0.Store, return xerrors.Errorf("getting genesis tipset: %w", err) } - cst := cbor.NewCborStore(sm.cs.Blockstore()) + cst := cbor.NewCborStore(sm.cs.StateBlockstore()) genesisTree, err := state.LoadStateTree(cst, gts.ParentState()) if err != nil { return xerrors.Errorf("loading state tree: %w", err) @@ -915,3 +1522,20 @@ func resetMultisigVesting0(ctx context.Context, store adt0.Store, tree *state.St return nil } + +func makeFakeMsg(from address.Address, to address.Address, amt abi.TokenAmount, nonce uint64) *types.Message { + return &types.Message{ + From: from, + To: to, + Value: amt, + Nonce: nonce, + } +} + +func makeFakeRct() *types.MessageReceipt { + return &types.MessageReceipt{ + ExitCode: 0, + Return: nil, + GasUsed: 0, + } +} diff --git a/chain/stmgr/forks_test.go b/chain/stmgr/forks_test.go index a2b7a179fba..6d22a294d71 100644 --- a/chain/stmgr/forks_test.go +++ b/chain/stmgr/forks_test.go @@ -4,11 +4,12 @@ import ( "context" "fmt" "io" + "sync" "testing" "github.com/ipfs/go-cid" ipldcbor "github.com/ipfs/go-ipld-cbor" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" "github.com/stretchr/testify/require" cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" @@ -122,9 +123,9 @@ func TestForkHeightTriggers(t *testing.T) { cg.ChainStore(), UpgradeSchedule{{ Network: 1, Height: testForkHeight, - Migration: func(ctx context.Context, sm *StateManager, cb ExecCallback, + Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { - cst := ipldcbor.NewCborStore(sm.ChainStore().Blockstore()) + cst := ipldcbor.NewCborStore(sm.ChainStore().StateBlockstore()) st, err := sm.StateTree(root) if err != nil { @@ -252,7 +253,7 @@ func TestForkRefuseCall(t *testing.T) { Network: 1, Expensive: true, Height: testForkHeight, - Migration: func(ctx context.Context, sm *StateManager, cb ExecCallback, + Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { return root, nil }}}) @@ -296,24 +297,191 @@ func TestForkRefuseCall(t *testing.T) { t.Fatal(err) } + pts, err := cg.ChainStore().LoadTipSet(ts.TipSet.TipSet().Parents()) + require.NoError(t, err) + parentHeight := pts.Height() + currentHeight := ts.TipSet.TipSet().Height() + + // CallWithGas calls _at_ the current tipset. ret, err := sm.CallWithGas(ctx, m, nil, ts.TipSet.TipSet()) - switch ts.TipSet.TipSet().Height() { - case testForkHeight, testForkHeight + 1: + if parentHeight <= testForkHeight && currentHeight >= testForkHeight { // If I had a fork, or I _will_ have a fork, it should fail. require.Equal(t, ErrExpensiveFork, err) - default: + } else { require.NoError(t, err) require.True(t, ret.MsgRct.ExitCode.IsSuccess()) } - // Call just runs on the parent state for a tipset, so we only - // expect an error at the fork height. + + // Call always applies the message to the "next block" after the tipset's parent state. ret, err = sm.Call(ctx, m, ts.TipSet.TipSet()) - switch ts.TipSet.TipSet().Height() { - case testForkHeight + 1: + if parentHeight == testForkHeight { require.Equal(t, ErrExpensiveFork, err) - default: + } else { require.NoError(t, err) require.True(t, ret.MsgRct.ExitCode.IsSuccess()) } } } + +func TestForkPreMigration(t *testing.T) { + logging.SetAllLoggers(logging.LevelInfo) + + cg, err := gen.NewGenerator() + if err != nil { + t.Fatal(err) + } + + fooCid, err := abi.CidBuilder.Sum([]byte("foo")) + require.NoError(t, err) + + barCid, err := abi.CidBuilder.Sum([]byte("bar")) + require.NoError(t, err) + + failCid, err := abi.CidBuilder.Sum([]byte("fail")) + require.NoError(t, err) + + var wait20 sync.WaitGroup + wait20.Add(3) + + wasCanceled := make(chan struct{}) + + checkCache := func(t *testing.T, cache MigrationCache) { + found, value, err := cache.Read("foo") + require.NoError(t, err) + require.True(t, found) + require.Equal(t, fooCid, value) + + found, value, err = cache.Read("bar") + require.NoError(t, err) + require.True(t, found) + require.Equal(t, barCid, value) + + found, _, err = cache.Read("fail") + require.NoError(t, err) + require.False(t, found) + } + + counter := make(chan struct{}, 10) + + sm, err := NewStateManagerWithUpgradeSchedule( + cg.ChainStore(), UpgradeSchedule{{ + Network: 1, + Height: testForkHeight, + Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, + root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + + // Make sure the test that should be canceled, is canceled. + select { + case <-wasCanceled: + case <-ctx.Done(): + return cid.Undef, ctx.Err() + } + + // the cache should be setup correctly. + checkCache(t, cache) + + counter <- struct{}{} + + return root, nil + }, + PreMigrations: []PreMigration{{ + StartWithin: 20, + PreMigration: func(ctx context.Context, _ *StateManager, cache MigrationCache, + _ cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) error { + wait20.Done() + wait20.Wait() + + err := cache.Write("foo", fooCid) + require.NoError(t, err) + + counter <- struct{}{} + + return nil + }, + }, { + StartWithin: 20, + PreMigration: func(ctx context.Context, _ *StateManager, cache MigrationCache, + _ cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) error { + wait20.Done() + wait20.Wait() + + err := cache.Write("bar", barCid) + require.NoError(t, err) + + counter <- struct{}{} + + return nil + }, + }, { + StartWithin: 20, + PreMigration: func(ctx context.Context, _ *StateManager, cache MigrationCache, + _ cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) error { + wait20.Done() + wait20.Wait() + + err := cache.Write("fail", failCid) + require.NoError(t, err) + + counter <- struct{}{} + + // Fail this migration. The cached entry should not be persisted. + return fmt.Errorf("failed") + }, + }, { + StartWithin: 15, + StopWithin: 5, + PreMigration: func(ctx context.Context, _ *StateManager, cache MigrationCache, + _ cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) error { + + <-ctx.Done() + close(wasCanceled) + + counter <- struct{}{} + + return nil + }, + }, { + StartWithin: 10, + PreMigration: func(ctx context.Context, _ *StateManager, cache MigrationCache, + _ cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) error { + + checkCache(t, cache) + + counter <- struct{}{} + + return nil + }, + }}}, + }) + if err != nil { + t.Fatal(err) + } + require.NoError(t, sm.Start(context.Background())) + defer func() { + require.NoError(t, sm.Stop(context.Background())) + }() + + inv := vm.NewActorRegistry() + inv.Register(nil, testActor{}) + + sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (*vm.VM, error) { + nvm, err := vm.NewVM(ctx, vmopt) + if err != nil { + return nil, err + } + nvm.SetInvoker(inv) + return nvm, nil + }) + + cg.SetStateManager(sm) + + for i := 0; i < 50; i++ { + _, err := cg.NextTipSet() + if err != nil { + t.Fatal(err) + } + } + // We have 5 pre-migration steps, and the migration. They should all have written something + // to this channel. + require.Equal(t, 6, len(counter)) +} diff --git a/chain/stmgr/read.go b/chain/stmgr/read.go index 9a9b8026576..3c7fb5d91e8 100644 --- a/chain/stmgr/read.go +++ b/chain/stmgr/read.go @@ -22,7 +22,7 @@ func (sm *StateManager) ParentStateTsk(tsk types.TipSetKey) (*state.StateTree, e } func (sm *StateManager) ParentState(ts *types.TipSet) (*state.StateTree, error) { - cst := cbor.NewCborStore(sm.cs.Blockstore()) + cst := cbor.NewCborStore(sm.cs.StateBlockstore()) state, err := state.LoadStateTree(cst, sm.parentState(ts)) if err != nil { return nil, xerrors.Errorf("load state tree: %w", err) @@ -32,7 +32,7 @@ func (sm *StateManager) ParentState(ts *types.TipSet) (*state.StateTree, error) } func (sm *StateManager) StateTree(st cid.Cid) (*state.StateTree, error) { - cst := cbor.NewCborStore(sm.cs.Blockstore()) + cst := cbor.NewCborStore(sm.cs.StateBlockstore()) state, err := state.LoadStateTree(cst, st) if err != nil { return nil, xerrors.Errorf("load state tree: %w", err) diff --git a/node/modules/rpcstatemanager.go b/chain/stmgr/rpc/rpcstatemanager.go similarity index 88% rename from node/modules/rpcstatemanager.go rename to chain/stmgr/rpc/rpcstatemanager.go index 7d7b9243798..dc719eb55e3 100644 --- a/node/modules/rpcstatemanager.go +++ b/chain/stmgr/rpc/rpcstatemanager.go @@ -1,4 +1,4 @@ -package modules +package rpcstmgr import ( "context" @@ -7,7 +7,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/apibstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin/paych" "github.com/filecoin-project/lotus/chain/stmgr" @@ -16,12 +16,12 @@ import ( ) type RPCStateManager struct { - gapi api.GatewayAPI + gapi api.Gateway cstore *cbor.BasicIpldStore } -func NewRPCStateManager(api api.GatewayAPI) *RPCStateManager { - cstore := cbor.NewCborStore(apibstore.NewAPIBlockstore(api)) +func NewRPCStateManager(api api.Gateway) *RPCStateManager { + cstore := cbor.NewCborStore(blockstore.NewAPIBlockstore(api)) return &RPCStateManager{gapi: api, cstore: cstore} } diff --git a/chain/stmgr/stmgr.go b/chain/stmgr/stmgr.go index 7e5809a840c..4f1351d2c58 100644 --- a/chain/stmgr/stmgr.go +++ b/chain/stmgr/stmgr.go @@ -5,11 +5,15 @@ import ( "errors" "fmt" "sync" + "sync/atomic" + + "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" logging "github.com/ipfs/go-log/v2" cbg "github.com/whyrusleeping/cbor-gen" + "go.opencensus.io/stats" "go.opencensus.io/trace" "golang.org/x/xerrors" @@ -20,6 +24,10 @@ import ( // Used for genesis. msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" + "github.com/filecoin-project/specs-actors/v3/actors/migration/nv10" + + // we use the same adt for all receipts + blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" @@ -39,9 +47,11 @@ import ( "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/metrics" ) -const LookbackNoLimit = abi.ChainEpoch(-1) +const LookbackNoLimit = api.LookbackNoLimit +const ReceiptAmtBitwidth = 3 var log = logging.Logger("statemgr") @@ -58,27 +68,49 @@ type versionSpec struct { atOrBelow abi.ChainEpoch } +type migration struct { + upgrade MigrationFunc + preMigrations []PreMigration + cache *nv10.MemMigrationCache +} + type StateManager struct { cs *store.ChainStore + cancel context.CancelFunc + shutdown chan struct{} + // Determines the network version at any given epoch. networkVersions []versionSpec latestVersion network.Version - // Maps chain epochs to upgrade functions. - stateMigrations map[abi.ChainEpoch]UpgradeFunc + // Maps chain epochs to migrations. + stateMigrations map[abi.ChainEpoch]*migration // A set of potentially expensive/time consuming upgrades. Explicit // calls for, e.g., gas estimation fail against this epoch with // ErrExpensiveFork. expensiveUpgrades map[abi.ChainEpoch]struct{} - stCache map[string][]cid.Cid - compWait map[string]chan struct{} - stlk sync.Mutex - genesisMsigLk sync.Mutex - newVM func(context.Context, *vm.VMOpts) (*vm.VM, error) - preIgnitionGenInfos *genesisInfo - postIgnitionGenInfos *genesisInfo + stCache map[string][]cid.Cid + tCache treeCache + compWait map[string]chan struct{} + stlk sync.Mutex + genesisMsigLk sync.Mutex + newVM func(context.Context, *vm.VMOpts) (*vm.VM, error) + preIgnitionVesting []msig0.State + postIgnitionVesting []msig0.State + postCalicoVesting []msig0.State + + genesisPledge abi.TokenAmount + genesisMarketFunds abi.TokenAmount + + tsExecMonitor ExecMonitor +} + +// Caches a single state tree +type treeCache struct { + root cid.Cid + tree *state.StateTree } func NewStateManager(cs *store.ChainStore) *StateManager { @@ -95,7 +127,7 @@ func NewStateManagerWithUpgradeSchedule(cs *store.ChainStore, us UpgradeSchedule return nil, err } - stateMigrations := make(map[abi.ChainEpoch]UpgradeFunc, len(us)) + stateMigrations := make(map[abi.ChainEpoch]*migration, len(us)) expensiveUpgrades := make(map[abi.ChainEpoch]struct{}, len(us)) var networkVersions []versionSpec lastVersion := network.Version0 @@ -103,8 +135,13 @@ func NewStateManagerWithUpgradeSchedule(cs *store.ChainStore, us UpgradeSchedule // If we have any upgrades, process them and create a version // schedule. for _, upgrade := range us { - if upgrade.Migration != nil { - stateMigrations[upgrade.Height] = upgrade.Migration + if upgrade.Migration != nil || upgrade.PreMigrations != nil { + migration := &migration{ + upgrade: upgrade.Migration, + preMigrations: upgrade.PreMigrations, + cache: nv10.NewMemMigrationCache(), + } + stateMigrations[upgrade.Height] = migration } if upgrade.Expensive { expensiveUpgrades[upgrade.Height] = struct{}{} @@ -128,10 +165,23 @@ func NewStateManagerWithUpgradeSchedule(cs *store.ChainStore, us UpgradeSchedule newVM: vm.NewVM, cs: cs, stCache: make(map[string][]cid.Cid), - compWait: make(map[string]chan struct{}), + tCache: treeCache{ + root: cid.Undef, + tree: nil, + }, + compWait: make(map[string]chan struct{}), }, nil } +func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, us UpgradeSchedule, em ExecMonitor) (*StateManager, error) { + sm, err := NewStateManagerWithUpgradeSchedule(cs, us) + if err != nil { + return nil, err + } + sm.tsExecMonitor = em + return sm, nil +} + func cidsToKey(cids []cid.Cid) string { var out string for _, c := range cids { @@ -140,6 +190,33 @@ func cidsToKey(cids []cid.Cid) string { return out } +// Start starts the state manager's optional background processes. At the moment, this schedules +// pre-migration functions to run ahead of network upgrades. +// +// This method is not safe to invoke from multiple threads or concurrently with Stop. +func (sm *StateManager) Start(context.Context) error { + var ctx context.Context + ctx, sm.cancel = context.WithCancel(context.Background()) + sm.shutdown = make(chan struct{}) + go sm.preMigrationWorker(ctx) + return nil +} + +// Stop starts the state manager's background processes. +// +// This method is not safe to invoke concurrently with Start. +func (sm *StateManager) Stop(ctx context.Context) error { + if sm.cancel != nil { + sm.cancel() + select { + case <-sm.shutdown: + case <-ctx.Done(): + return ctx.Err() + } + } + return nil +} + func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st cid.Cid, rec cid.Cid, err error) { ctx, span := trace.StartSpan(ctx, "tipSetState") defer span.End() @@ -189,7 +266,7 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c return ts.Blocks()[0].ParentStateRoot, ts.Blocks()[0].ParentMessageReceipts, nil } - st, rec, err = sm.computeTipSetState(ctx, ts, nil) + st, rec, err = sm.computeTipSetState(ctx, ts, sm.tsExecMonitor) if err != nil { return cid.Undef, cid.Undef, err } @@ -197,46 +274,35 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c return st, rec, nil } -func traceFunc(trace *[]*api.InvocResult) func(mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet) error { - return func(mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet) error { - ir := &api.InvocResult{ - MsgCid: mcid, - Msg: msg, - MsgRct: &ret.MessageReceipt, - ExecutionTrace: ret.ExecutionTrace, - Duration: ret.Duration, - } - if ret.ActorErr != nil { - ir.Error = ret.ActorErr.Error() - } - if ret.GasCosts != nil { - ir.GasCost = MakeMsgGasCost(msg, ret) - } - *trace = append(*trace, ir) - return nil - } +func (sm *StateManager) ExecutionTraceWithMonitor(ctx context.Context, ts *types.TipSet, em ExecMonitor) (cid.Cid, error) { + st, _, err := sm.computeTipSetState(ctx, ts, em) + return st, err } func (sm *StateManager) ExecutionTrace(ctx context.Context, ts *types.TipSet) (cid.Cid, []*api.InvocResult, error) { - var trace []*api.InvocResult - st, _, err := sm.computeTipSetState(ctx, ts, traceFunc(&trace)) + var invocTrace []*api.InvocResult + st, err := sm.ExecutionTraceWithMonitor(ctx, ts, &InvocationTracer{trace: &invocTrace}) if err != nil { return cid.Undef, nil, err } - - return st, trace, nil + return st, invocTrace, nil } -type ExecCallback func(cid.Cid, *types.Message, *vm.ApplyRet) error +func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEpoch, pstate cid.Cid, bms []store.BlockMessages, epoch abi.ChainEpoch, r vm.Rand, em ExecMonitor, baseFee abi.TokenAmount, ts *types.TipSet) (cid.Cid, cid.Cid, error) { + done := metrics.Timer(ctx, metrics.VMApplyBlocksTotal) + defer done() -func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEpoch, pstate cid.Cid, bms []store.BlockMessages, epoch abi.ChainEpoch, r vm.Rand, cb ExecCallback, baseFee abi.TokenAmount, ts *types.TipSet) (cid.Cid, cid.Cid, error) { + partDone := metrics.Timer(ctx, metrics.VMApplyEarly) + defer func() { + partDone() + }() makeVmWithBaseState := func(base cid.Cid) (*vm.VM, error) { vmopt := &vm.VMOpts{ StateBase: base, Epoch: epoch, Rand: r, - Bstore: sm.cs.Blockstore(), + Bstore: sm.cs.StateBlockstore(), Syscalls: sm.cs.VMSys(), CircSupplyCalc: sm.GetVMCirculatingSupply, NtwkVersion: sm.GetNtwkVersion, @@ -253,7 +319,6 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp } runCron := func(epoch abi.ChainEpoch) error { - cronMsg := &types.Message{ To: cron.Address, From: builtin.SystemActorAddr, @@ -269,8 +334,8 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp if err != nil { return err } - if cb != nil { - if err := cb(cronMsg.Cid(), cronMsg, ret); err != nil { + if em != nil { + if err := em.MessageApplied(ctx, ts, cronMsg.Cid(), cronMsg, ret, true); err != nil { return xerrors.Errorf("callback failed on cron message: %w", err) } } @@ -296,7 +361,7 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp // handle state forks // XXX: The state tree - newState, err := sm.handleStateForks(ctx, pstate, i, cb, ts) + newState, err := sm.handleStateForks(ctx, pstate, i, em, ts) if err != nil { return cid.Undef, cid.Undef, xerrors.Errorf("error handling state forks: %w", err) } @@ -312,6 +377,9 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp pstate = newState } + partDone() + partDone = metrics.Timer(ctx, metrics.VMApplyMessages) + var receipts []cbg.CBORMarshaler processedMsgs := make(map[cid.Cid]struct{}) for _, b := range bms { @@ -332,8 +400,8 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp gasReward = big.Add(gasReward, r.GasCosts.MinerTip) penalty = big.Add(penalty, r.GasCosts.MinerPenalty) - if cb != nil { - if err := cb(cm.Cid(), m, r); err != nil { + if em != nil { + if err := em.MessageApplied(ctx, ts, cm.Cid(), m, r, false); err != nil { return cid.Undef, cid.Undef, err } } @@ -365,8 +433,8 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp if actErr != nil { return cid.Undef, cid.Undef, xerrors.Errorf("failed to apply reward message for miner %s: %w", b.Miner, actErr) } - if cb != nil { - if err := cb(rwMsg.Cid(), rwMsg, ret); err != nil { + if em != nil { + if err := em.MessageApplied(ctx, ts, rwMsg.Cid(), rwMsg, ret, true); err != nil { return cid.Undef, cid.Undef, xerrors.Errorf("callback failed on reward message: %w", err) } } @@ -376,15 +444,17 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp } } + partDone() + partDone = metrics.Timer(ctx, metrics.VMApplyCron) + if err := runCron(epoch); err != nil { return cid.Cid{}, cid.Cid{}, err } - // XXX: Is the height correct? Or should it be epoch-1? - rectarr, err := adt.NewArray(sm.cs.Store(ctx), actors.VersionForNetwork(sm.GetNtwkVersion(ctx, epoch))) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("failed to create receipts amt: %w", err) - } + partDone() + partDone = metrics.Timer(ctx, metrics.VMApplyFlush) + + rectarr := blockadt.MakeEmptyArray(sm.cs.ActorStore(ctx)) for i, receipt := range receipts { if err := rectarr.Set(uint64(i), receipt); err != nil { return cid.Undef, cid.Undef, xerrors.Errorf("failed to build receipts amt: %w", err) @@ -400,10 +470,13 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp return cid.Undef, cid.Undef, xerrors.Errorf("vm flush failed: %w", err) } + stats.Record(ctx, metrics.VMSends.M(int64(atomic.LoadUint64(&vm.StatSends))), + metrics.VMApplied.M(int64(atomic.LoadUint64(&vm.StatApplied)))) + return st, rectroot, nil } -func (sm *StateManager) computeTipSetState(ctx context.Context, ts *types.TipSet, cb ExecCallback) (cid.Cid, cid.Cid, error) { +func (sm *StateManager) computeTipSetState(ctx context.Context, ts *types.TipSet, em ExecMonitor) (cid.Cid, cid.Cid, error) { ctx, span := trace.StartSpan(ctx, "computeTipSetState") defer span.End() @@ -439,7 +512,7 @@ func (sm *StateManager) computeTipSetState(ctx context.Context, ts *types.TipSet baseFee := blks[0].ParentBaseFee - return sm.ApplyBlocks(ctx, parentEpoch, pstate, blkmsgs, blks[0].Height, r, cb, baseFee, ts) + return sm.ApplyBlocks(ctx, parentEpoch, pstate, blkmsgs, blks[0].Height, r, em, baseFee, ts) } func (sm *StateManager) parentState(ts *types.TipSet) cid.Cid { @@ -469,13 +542,26 @@ func (sm *StateManager) ResolveToKeyAddress(ctx context.Context, addr address.Ad ts = sm.cs.GetHeaviestTipSet() } + cst := cbor.NewCborStore(sm.cs.StateBlockstore()) + + // First try to resolve the actor in the parent state, so we don't have to compute anything. + tree, err := state.LoadStateTree(cst, ts.ParentState()) + if err != nil { + return address.Undef, xerrors.Errorf("failed to load parent state tree: %w", err) + } + + resolved, err := vm.ResolveToKeyAddr(tree, cst, addr) + if err == nil { + return resolved, nil + } + + // If that fails, compute the tip-set and try again. st, _, err := sm.TipSetState(ctx, ts) if err != nil { return address.Undef, xerrors.Errorf("resolve address failed to get tipset state: %w", err) } - cst := cbor.NewCborStore(sm.cs.Blockstore()) - tree, err := state.LoadStateTree(cst, st) + tree, err = state.LoadStateTree(cst, st) if err != nil { return address.Undef, xerrors.Errorf("failed to load state tree") } @@ -483,6 +569,52 @@ func (sm *StateManager) ResolveToKeyAddress(ctx context.Context, addr address.Ad return vm.ResolveToKeyAddr(tree, cst, addr) } +// ResolveToKeyAddressAtFinality is similar to stmgr.ResolveToKeyAddress but fails if the ID address being resolved isn't reorg-stable yet. +// It should not be used for consensus-critical subsystems. +func (sm *StateManager) ResolveToKeyAddressAtFinality(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { + switch addr.Protocol() { + case address.BLS, address.SECP256K1: + return addr, nil + case address.Actor: + return address.Undef, xerrors.New("cannot resolve actor address to key address") + default: + } + + if ts == nil { + ts = sm.cs.GetHeaviestTipSet() + } + + var err error + if ts.Height() > policy.ChainFinality { + ts, err = sm.ChainStore().GetTipsetByHeight(ctx, ts.Height()-policy.ChainFinality, ts, true) + if err != nil { + return address.Undef, xerrors.Errorf("failed to load lookback tipset: %w", err) + } + } + + cst := cbor.NewCborStore(sm.cs.StateBlockstore()) + tree := sm.tCache.tree + + if tree == nil || sm.tCache.root != ts.ParentState() { + tree, err = state.LoadStateTree(cst, ts.ParentState()) + if err != nil { + return address.Undef, xerrors.Errorf("failed to load parent state tree: %w", err) + } + + sm.tCache = treeCache{ + root: ts.ParentState(), + tree: tree, + } + } + + resolved, err := vm.ResolveToKeyAddr(tree, cst, addr) + if err == nil { + return resolved, nil + } + + return address.Undef, xerrors.New("ID address not found in lookback state") +} + func (sm *StateManager) GetBlsPublicKey(ctx context.Context, addr address.Address, ts *types.TipSet) (pubk []byte, err error) { kaddr, err := sm.ResolveToKeyAddress(ctx, addr, ts) if err != nil { @@ -497,7 +629,7 @@ func (sm *StateManager) GetBlsPublicKey(ctx context.Context, addr address.Addres } func (sm *StateManager) LookupID(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { - cst := cbor.NewCborStore(sm.cs.Blockstore()) + cst := cbor.NewCborStore(sm.cs.StateBlockstore()) state, err := state.LoadStateTree(cst, sm.parentState(ts)) if err != nil { return address.Undef, xerrors.Errorf("load state tree: %w", err) @@ -505,24 +637,10 @@ func (sm *StateManager) LookupID(ctx context.Context, addr address.Address, ts * return state.LookupID(addr) } -func (sm *StateManager) GetReceipt(ctx context.Context, msg cid.Cid, ts *types.TipSet) (*types.MessageReceipt, error) { - m, err := sm.cs.GetCMessage(msg) - if err != nil { - return nil, fmt.Errorf("failed to load message: %w", err) - } - - _, r, _, err := sm.searchBackForMsg(ctx, ts, m, LookbackNoLimit) - if err != nil { - return nil, fmt.Errorf("failed to look back through chain for message: %w", err) - } - - return r, nil -} - // WaitForMessage blocks until a message appears on chain. It looks backwards in the chain to see if this has already // happened, with an optional limit to how many epochs it will search. It guarantees that the message has been on // chain for at least confidence epochs without being reverted before returning. -func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confidence uint64, lookbackLimit abi.ChainEpoch) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) { +func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confidence uint64, lookbackLimit abi.ChainEpoch, allowReplaced bool) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) { ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -546,7 +664,7 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid return nil, nil, cid.Undef, fmt.Errorf("expected current head on SHC stream (got %s)", head[0].Type) } - r, foundMsg, err := sm.tipsetExecutedMessage(head[0].Val, mcid, msg.VMMessage()) + r, foundMsg, err := sm.tipsetExecutedMessage(head[0].Val, mcid, msg.VMMessage(), allowReplaced) if err != nil { return nil, nil, cid.Undef, err } @@ -560,9 +678,9 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid var backFm cid.Cid backSearchWait := make(chan struct{}) go func() { - fts, r, foundMsg, err := sm.searchBackForMsg(ctx, head[0].Val, msg, lookbackLimit) + fts, r, foundMsg, err := sm.searchBackForMsg(ctx, head[0].Val, msg, lookbackLimit, allowReplaced) if err != nil { - log.Warnf("failed to look back through chain for message: %w", err) + log.Warnf("failed to look back through chain for message: %v", err) return } @@ -599,7 +717,7 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid if candidateTs != nil && val.Val.Height() >= candidateTs.Height()+abi.ChainEpoch(confidence) { return candidateTs, candidateRcp, candidateFm, nil } - r, foundMsg, err := sm.tipsetExecutedMessage(val.Val, mcid, msg.VMMessage()) + r, foundMsg, err := sm.tipsetExecutedMessage(val.Val, mcid, msg.VMMessage(), allowReplaced) if err != nil { return nil, nil, cid.Undef, err } @@ -635,15 +753,13 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid } } -func (sm *StateManager) SearchForMessage(ctx context.Context, mcid cid.Cid) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) { +func (sm *StateManager) SearchForMessage(ctx context.Context, head *types.TipSet, mcid cid.Cid, lookbackLimit abi.ChainEpoch, allowReplaced bool) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) { msg, err := sm.cs.GetCMessage(mcid) if err != nil { return nil, nil, cid.Undef, fmt.Errorf("failed to load message: %w", err) } - head := sm.cs.GetHeaviestTipSet() - - r, foundMsg, err := sm.tipsetExecutedMessage(head, mcid, msg.VMMessage()) + r, foundMsg, err := sm.tipsetExecutedMessage(head, mcid, msg.VMMessage(), allowReplaced) if err != nil { return nil, nil, cid.Undef, err } @@ -652,7 +768,7 @@ func (sm *StateManager) SearchForMessage(ctx context.Context, mcid cid.Cid) (*ty return head, r, foundMsg, nil } - fts, r, foundMsg, err := sm.searchBackForMsg(ctx, head, msg, LookbackNoLimit) + fts, r, foundMsg, err := sm.searchBackForMsg(ctx, head, msg, lookbackLimit, allowReplaced) if err != nil { log.Warnf("failed to look back through chain for message %s", mcid) @@ -672,7 +788,7 @@ func (sm *StateManager) SearchForMessage(ctx context.Context, mcid cid.Cid) (*ty // - 0 then no tipsets are searched // - 5 then five tipset are searched // - LookbackNoLimit then there is no limit -func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet, m types.ChainMsg, limit abi.ChainEpoch) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) { +func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet, m types.ChainMsg, limit abi.ChainEpoch, allowReplaced bool) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) { limitHeight := from.Height() - limit noLimit := limit == LookbackNoLimit @@ -722,7 +838,7 @@ func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet // check that between cur and parent tipset the nonce fell into range of our message if actorNoExist || (curActor.Nonce > mNonce && act.Nonce <= mNonce) { - r, foundMsg, err := sm.tipsetExecutedMessage(cur, m.Cid(), m.VMMessage()) + r, foundMsg, err := sm.tipsetExecutedMessage(cur, m.Cid(), m.VMMessage(), allowReplaced) if err != nil { return nil, nil, cid.Undef, xerrors.Errorf("checking for message execution during lookback: %w", err) } @@ -737,7 +853,7 @@ func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet } } -func (sm *StateManager) tipsetExecutedMessage(ts *types.TipSet, msg cid.Cid, vmm *types.Message) (*types.MessageReceipt, cid.Cid, error) { +func (sm *StateManager) tipsetExecutedMessage(ts *types.TipSet, msg cid.Cid, vmm *types.Message, allowReplaced bool) (*types.MessageReceipt, cid.Cid, error) { // The genesis block did not execute any messages if ts.Height() == 0 { return nil, cid.Undef, nil @@ -760,7 +876,7 @@ func (sm *StateManager) tipsetExecutedMessage(ts *types.TipSet, msg cid.Cid, vmm if m.VMMessage().From == vmm.From { // cheaper to just check origin first if m.VMMessage().Nonce == vmm.Nonce { - if m.VMMessage().EqualCall(vmm) { + if allowReplaced && m.VMMessage().EqualCall(vmm) { if m.Cid() != msg { log.Warnw("found message with equal nonce and call params but different CID", "wanted", msg, "found", m.Cid(), "nonce", vmm.Nonce, "from", vmm.From) @@ -790,10 +906,7 @@ func (sm *StateManager) ListAllActors(ctx context.Context, ts *types.TipSet) ([] if ts == nil { ts = sm.cs.GetHeaviestTipSet() } - st, _, err := sm.TipSetState(ctx, ts) - if err != nil { - return nil, err - } + st := ts.ParentState() stateTree, err := sm.StateTree(st) if err != nil { @@ -823,7 +936,7 @@ func (sm *StateManager) MarketBalance(ctx context.Context, addr address.Address, return api.MarketBalance{}, err } - mstate, err := market.Load(sm.cs.Store(ctx), act) + mstate, err := market.Load(sm.cs.ActorStore(ctx), act) if err != nil { return api.MarketBalance{}, err } @@ -889,23 +1002,8 @@ func (sm *StateManager) SetVMConstructor(nvm func(context.Context, *vm.VMOpts) ( sm.newVM = nvm } -type genesisInfo struct { - genesisMsigs []msig0.State - // info about the Accounts in the genesis state - genesisActors []genesisActor - genesisPledge abi.TokenAmount - genesisMarketFunds abi.TokenAmount -} - -type genesisActor struct { - addr address.Address - initBal abi.TokenAmount -} - -// sets up information about the actors in the genesis state -func (sm *StateManager) setupGenesisActors(ctx context.Context) error { - - gi := genesisInfo{} +// sets up information about the vesting schedule +func (sm *StateManager) setupGenesisVestingSchedule(ctx context.Context) error { gb, err := sm.cs.GetGenesis() if err != nil { @@ -922,133 +1020,64 @@ func (sm *StateManager) setupGenesisActors(ctx context.Context) error { return xerrors.Errorf("getting genesis tipset state: %w", err) } - cst := cbor.NewCborStore(sm.cs.Blockstore()) + cst := cbor.NewCborStore(sm.cs.StateBlockstore()) sTree, err := state.LoadStateTree(cst, st) if err != nil { return xerrors.Errorf("loading state tree: %w", err) } - gi.genesisMarketFunds, err = getFilMarketLocked(ctx, sTree) + gmf, err := getFilMarketLocked(ctx, sTree) if err != nil { return xerrors.Errorf("setting up genesis market funds: %w", err) } - gi.genesisPledge, err = getFilPowerLocked(ctx, sTree) + gp, err := getFilPowerLocked(ctx, sTree) if err != nil { return xerrors.Errorf("setting up genesis pledge: %w", err) } - totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount) - err = sTree.ForEach(func(kaddr address.Address, act *types.Actor) error { - if builtin.IsMultisigActor(act.Code) { - s, err := multisig.Load(sm.cs.Store(ctx), act) - if err != nil { - return err - } - - se, err := s.StartEpoch() - if err != nil { - return err - } - - if se != 0 { - return xerrors.New("genesis multisig doesn't start vesting at epoch 0!") - } - - ud, err := s.UnlockDuration() - if err != nil { - return err - } + sm.genesisMarketFunds = gmf + sm.genesisPledge = gp - ib, err := s.InitialBalance() - if err != nil { - return err - } + totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount) - ot, f := totalsByEpoch[ud] - if f { - totalsByEpoch[ud] = big.Add(ot, ib) - } else { - totalsByEpoch[ud] = ib - } + // 6 months + sixMonths := abi.ChainEpoch(183 * builtin.EpochsInDay) + totalsByEpoch[sixMonths] = big.NewInt(49_929_341) + totalsByEpoch[sixMonths] = big.Add(totalsByEpoch[sixMonths], big.NewInt(32_787_700)) - } else if builtin.IsAccountActor(act.Code) { - // should exclude burnt funds actor and "remainder account actor" - // should only ever be "faucet" accounts in testnets - if kaddr == builtin.BurntFundsActorAddr { - return nil - } + // 1 year + oneYear := abi.ChainEpoch(365 * builtin.EpochsInDay) + totalsByEpoch[oneYear] = big.NewInt(22_421_712) - kid, err := sTree.LookupID(kaddr) - if err != nil { - return xerrors.Errorf("resolving address: %w", err) - } + // 2 years + twoYears := abi.ChainEpoch(2 * 365 * builtin.EpochsInDay) + totalsByEpoch[twoYears] = big.NewInt(7_223_364) - gi.genesisActors = append(gi.genesisActors, genesisActor{ - addr: kid, - initBal: act.Balance, - }) - } - return nil - }) + // 3 years + threeYears := abi.ChainEpoch(3 * 365 * builtin.EpochsInDay) + totalsByEpoch[threeYears] = big.NewInt(87_637_883) - if err != nil { - return xerrors.Errorf("error setting up genesis infos: %w", err) - } + // 6 years + sixYears := abi.ChainEpoch(6 * 365 * builtin.EpochsInDay) + totalsByEpoch[sixYears] = big.NewInt(100_000_000) + totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(300_000_000)) - // TODO: use network upgrade abstractions or always start at actors v0? - gi.genesisMsigs = make([]msig0.State, 0, len(totalsByEpoch)) + sm.preIgnitionVesting = make([]msig0.State, 0, len(totalsByEpoch)) for k, v := range totalsByEpoch { ns := msig0.State{ InitialBalance: v, UnlockDuration: k, PendingTxns: cid.Undef, } - gi.genesisMsigs = append(gi.genesisMsigs, ns) + sm.preIgnitionVesting = append(sm.preIgnitionVesting, ns) } - sm.preIgnitionGenInfos = &gi - return nil } -// sets up information about the actors in the genesis state -// For testnet we use a hardcoded set of multisig states, instead of what's actually in the genesis multisigs -// We also do not consider ANY account actors (including the faucet) -func (sm *StateManager) setupPreIgnitionGenesisActorsTestnet(ctx context.Context) error { - - gi := genesisInfo{} - - gb, err := sm.cs.GetGenesis() - if err != nil { - return xerrors.Errorf("getting genesis block: %w", err) - } - - gts, err := types.NewTipSet([]*types.BlockHeader{gb}) - if err != nil { - return xerrors.Errorf("getting genesis tipset: %w", err) - } - - st, _, err := sm.TipSetState(ctx, gts) - if err != nil { - return xerrors.Errorf("getting genesis tipset state: %w", err) - } - - cst := cbor.NewCborStore(sm.cs.Blockstore()) - sTree, err := state.LoadStateTree(cst, st) - if err != nil { - return xerrors.Errorf("loading state tree: %w", err) - } - - gi.genesisMarketFunds, err = getFilMarketLocked(ctx, sTree) - if err != nil { - return xerrors.Errorf("setting up genesis market funds: %w", err) - } - - gi.genesisPledge, err = getFilPowerLocked(ctx, sTree) - if err != nil { - return xerrors.Errorf("setting up genesis pledge: %w", err) - } +// sets up information about the vesting schedule post the ignition upgrade +func (sm *StateManager) setupPostIgnitionVesting(ctx context.Context) error { totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount) @@ -1074,69 +1103,40 @@ func (sm *StateManager) setupPreIgnitionGenesisActorsTestnet(ctx context.Context totalsByEpoch[sixYears] = big.NewInt(100_000_000) totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(300_000_000)) - gi.genesisMsigs = make([]msig0.State, 0, len(totalsByEpoch)) + sm.postIgnitionVesting = make([]msig0.State, 0, len(totalsByEpoch)) for k, v := range totalsByEpoch { ns := msig0.State{ - InitialBalance: v, + // In the pre-ignition logic, we incorrectly set this value in Fil, not attoFil, an off-by-10^18 error + InitialBalance: big.Mul(v, big.NewInt(int64(build.FilecoinPrecision))), UnlockDuration: k, PendingTxns: cid.Undef, + // In the pre-ignition logic, the start epoch was 0. This changes in the fork logic of the Ignition upgrade itself. + StartEpoch: build.UpgradeLiftoffHeight, } - gi.genesisMsigs = append(gi.genesisMsigs, ns) + sm.postIgnitionVesting = append(sm.postIgnitionVesting, ns) } - sm.preIgnitionGenInfos = &gi - return nil } -// sets up information about the actors in the genesis state, post the ignition fork -func (sm *StateManager) setupPostIgnitionGenesisActors(ctx context.Context) error { - - gi := genesisInfo{} - - gb, err := sm.cs.GetGenesis() - if err != nil { - return xerrors.Errorf("getting genesis block: %w", err) - } - - gts, err := types.NewTipSet([]*types.BlockHeader{gb}) - if err != nil { - return xerrors.Errorf("getting genesis tipset: %w", err) - } - - st, _, err := sm.TipSetState(ctx, gts) - if err != nil { - return xerrors.Errorf("getting genesis tipset state: %w", err) - } - - cst := cbor.NewCborStore(sm.cs.Blockstore()) - sTree, err := state.LoadStateTree(cst, st) - if err != nil { - return xerrors.Errorf("loading state tree: %w", err) - } - - // Unnecessary, should be removed - gi.genesisMarketFunds, err = getFilMarketLocked(ctx, sTree) - if err != nil { - return xerrors.Errorf("setting up genesis market funds: %w", err) - } - - // Unnecessary, should be removed - gi.genesisPledge, err = getFilPowerLocked(ctx, sTree) - if err != nil { - return xerrors.Errorf("setting up genesis pledge: %w", err) - } +// sets up information about the vesting schedule post the calico upgrade +func (sm *StateManager) setupPostCalicoVesting(ctx context.Context) error { totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount) + // 0 days + zeroDays := abi.ChainEpoch(0) + totalsByEpoch[zeroDays] = big.NewInt(10_632_000) + // 6 months sixMonths := abi.ChainEpoch(183 * builtin.EpochsInDay) - totalsByEpoch[sixMonths] = big.NewInt(49_929_341) + totalsByEpoch[sixMonths] = big.NewInt(19_015_887) totalsByEpoch[sixMonths] = big.Add(totalsByEpoch[sixMonths], big.NewInt(32_787_700)) // 1 year oneYear := abi.ChainEpoch(365 * builtin.EpochsInDay) totalsByEpoch[oneYear] = big.NewInt(22_421_712) + totalsByEpoch[oneYear] = big.Add(totalsByEpoch[oneYear], big.NewInt(9_400_000)) // 2 years twoYears := abi.ChainEpoch(2 * 365 * builtin.EpochsInDay) @@ -1145,27 +1145,25 @@ func (sm *StateManager) setupPostIgnitionGenesisActors(ctx context.Context) erro // 3 years threeYears := abi.ChainEpoch(3 * 365 * builtin.EpochsInDay) totalsByEpoch[threeYears] = big.NewInt(87_637_883) + totalsByEpoch[threeYears] = big.Add(totalsByEpoch[threeYears], big.NewInt(898_958)) // 6 years sixYears := abi.ChainEpoch(6 * 365 * builtin.EpochsInDay) totalsByEpoch[sixYears] = big.NewInt(100_000_000) totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(300_000_000)) + totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(9_805_053)) - gi.genesisMsigs = make([]msig0.State, 0, len(totalsByEpoch)) + sm.postCalicoVesting = make([]msig0.State, 0, len(totalsByEpoch)) for k, v := range totalsByEpoch { ns := msig0.State{ - // In the pre-ignition logic, we incorrectly set this value in Fil, not attoFil, an off-by-10^18 error InitialBalance: big.Mul(v, big.NewInt(int64(build.FilecoinPrecision))), UnlockDuration: k, PendingTxns: cid.Undef, - // In the pre-ignition logic, the start epoch was 0. This changes in the fork logic of the Ignition upgrade itself. - StartEpoch: build.UpgradeLiftoffHeight, + StartEpoch: build.UpgradeLiftoffHeight, } - gi.genesisMsigs = append(gi.genesisMsigs, ns) + sm.postCalicoVesting = append(sm.postCalicoVesting, ns) } - sm.postIgnitionGenInfos = &gi - return nil } @@ -1175,39 +1173,32 @@ func (sm *StateManager) setupPostIgnitionGenesisActors(ctx context.Context) erro func (sm *StateManager) GetFilVested(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (abi.TokenAmount, error) { vf := big.Zero() if height <= build.UpgradeIgnitionHeight { - for _, v := range sm.preIgnitionGenInfos.genesisMsigs { + for _, v := range sm.preIgnitionVesting { au := big.Sub(v.InitialBalance, v.AmountLocked(height)) vf = big.Add(vf, au) } - } else { - for _, v := range sm.postIgnitionGenInfos.genesisMsigs { + } else if height <= build.UpgradeCalicoHeight { + for _, v := range sm.postIgnitionVesting { // In the pre-ignition logic, we simply called AmountLocked(height), assuming startEpoch was 0. // The start epoch changed in the Ignition upgrade. au := big.Sub(v.InitialBalance, v.AmountLocked(height-v.StartEpoch)) vf = big.Add(vf, au) } - } - - // there should not be any such accounts in testnet (and also none in mainnet?) - // continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch - for _, v := range sm.preIgnitionGenInfos.genesisActors { - act, err := st.GetActor(v.addr) - if err != nil { - return big.Zero(), xerrors.Errorf("failed to get actor: %w", err) - } - - diff := big.Sub(v.initBal, act.Balance) - if diff.GreaterThan(big.Zero()) { - vf = big.Add(vf, diff) + } else { + for _, v := range sm.postCalicoVesting { + // In the pre-ignition logic, we simply called AmountLocked(height), assuming startEpoch was 0. + // The start epoch changed in the Ignition upgrade. + au := big.Sub(v.InitialBalance, v.AmountLocked(height-v.StartEpoch)) + vf = big.Add(vf, au) } } - // After UpgradeActorsV2Height these funds are accounted for in GetFilReserveDisbursed - if height <= build.UpgradeActorsV2Height { + // After UpgradeAssemblyHeight these funds are accounted for in GetFilReserveDisbursed + if height <= build.UpgradeAssemblyHeight { // continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch - vf = big.Add(vf, sm.preIgnitionGenInfos.genesisPledge) + vf = big.Add(vf, sm.genesisPledge) // continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch - vf = big.Add(vf, sm.preIgnitionGenInfos.genesisMarketFunds) + vf = big.Add(vf, sm.genesisMarketFunds) } return vf, nil @@ -1301,16 +1292,22 @@ func (sm *StateManager) GetVMCirculatingSupply(ctx context.Context, height abi.C func (sm *StateManager) GetVMCirculatingSupplyDetailed(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (api.CirculatingSupply, error) { sm.genesisMsigLk.Lock() defer sm.genesisMsigLk.Unlock() - if sm.preIgnitionGenInfos == nil { - err := sm.setupPreIgnitionGenesisActorsTestnet(ctx) + if sm.preIgnitionVesting == nil || sm.genesisPledge.IsZero() || sm.genesisMarketFunds.IsZero() { + err := sm.setupGenesisVestingSchedule(ctx) + if err != nil { + return api.CirculatingSupply{}, xerrors.Errorf("failed to setup pre-ignition vesting schedule: %w", err) + } + } + if sm.postIgnitionVesting == nil { + err := sm.setupPostIgnitionVesting(ctx) if err != nil { - return api.CirculatingSupply{}, xerrors.Errorf("failed to setup pre-ignition genesis information: %w", err) + return api.CirculatingSupply{}, xerrors.Errorf("failed to setup post-ignition vesting schedule: %w", err) } } - if sm.postIgnitionGenInfos == nil { - err := sm.setupPostIgnitionGenesisActors(ctx) + if sm.postCalicoVesting == nil { + err := sm.setupPostCalicoVesting(ctx) if err != nil { - return api.CirculatingSupply{}, xerrors.Errorf("failed to setup post-ignition genesis information: %w", err) + return api.CirculatingSupply{}, xerrors.Errorf("failed to setup post-calico vesting schedule: %w", err) } } @@ -1320,7 +1317,7 @@ func (sm *StateManager) GetVMCirculatingSupplyDetailed(ctx context.Context, heig } filReserveDisbursed := big.Zero() - if height > build.UpgradeActorsV2Height { + if height > build.UpgradeAssemblyHeight { filReserveDisbursed, err = GetFilReserveDisbursed(ctx, st) if err != nil { return api.CirculatingSupply{}, xerrors.Errorf("failed to calculate filReserveDisbursed: %w", err) @@ -1352,11 +1349,12 @@ func (sm *StateManager) GetVMCirculatingSupplyDetailed(ctx context.Context, heig } return api.CirculatingSupply{ - FilVested: filVested, - FilMined: filMined, - FilBurnt: filBurnt, - FilLocked: filLocked, - FilCirculating: ret, + FilVested: filVested, + FilMined: filMined, + FilBurnt: filBurnt, + FilLocked: filLocked, + FilCirculating: ret, + FilReserveDisbursed: filReserveDisbursed, }, nil } @@ -1382,7 +1380,7 @@ func (sm *StateManager) GetCirculatingSupply(ctx context.Context, height abi.Cha unCirc = big.Add(unCirc, actor.Balance) case a == market.Address: - mst, err := market.Load(sm.cs.Store(ctx), actor) + mst, err := market.Load(sm.cs.ActorStore(ctx), actor) if err != nil { return err } @@ -1399,7 +1397,7 @@ func (sm *StateManager) GetCirculatingSupply(ctx context.Context, height abi.Cha circ = big.Add(circ, actor.Balance) case builtin.IsStorageMinerActor(actor.Code): - mst, err := miner.Load(sm.cs.Store(ctx), actor) + mst, err := miner.Load(sm.cs.ActorStore(ctx), actor) if err != nil { return err } @@ -1416,7 +1414,7 @@ func (sm *StateManager) GetCirculatingSupply(ctx context.Context, height abi.Cha } case builtin.IsMultisigActor(actor.Code): - mst, err := multisig.Load(sm.cs.Store(ctx), actor) + mst, err := multisig.Load(sm.cs.ActorStore(ctx), actor) if err != nil { return err } @@ -1470,7 +1468,7 @@ func (sm *StateManager) GetPaychState(ctx context.Context, addr address.Address, return nil, nil, err } - actState, err := paych.Load(sm.cs.Store(ctx), act) + actState, err := paych.Load(sm.cs.ActorStore(ctx), act) if err != nil { return nil, nil, err } @@ -1488,7 +1486,7 @@ func (sm *StateManager) GetMarketState(ctx context.Context, ts *types.TipSet) (m return nil, err } - actState, err := market.Load(sm.cs.Store(ctx), act) + actState, err := market.Load(sm.cs.ActorStore(ctx), act) if err != nil { return nil, err } diff --git a/chain/stmgr/tracers.go b/chain/stmgr/tracers.go new file mode 100644 index 00000000000..6bcd7bc1595 --- /dev/null +++ b/chain/stmgr/tracers.go @@ -0,0 +1,56 @@ +package stmgr + +import ( + "context" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/vm" + "github.com/ipfs/go-cid" +) + +type ExecMonitor interface { + // MessageApplied is called after a message has been applied. Returning an error will halt execution of any further messages. + MessageApplied(ctx context.Context, ts *types.TipSet, mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet, implicit bool) error +} + +var _ ExecMonitor = (*InvocationTracer)(nil) + +type InvocationTracer struct { + trace *[]*api.InvocResult +} + +func (i *InvocationTracer) MessageApplied(ctx context.Context, ts *types.TipSet, mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet, implicit bool) error { + ir := &api.InvocResult{ + MsgCid: mcid, + Msg: msg, + MsgRct: &ret.MessageReceipt, + ExecutionTrace: ret.ExecutionTrace, + Duration: ret.Duration, + } + if ret.ActorErr != nil { + ir.Error = ret.ActorErr.Error() + } + if ret.GasCosts != nil { + ir.GasCost = MakeMsgGasCost(msg, ret) + } + *i.trace = append(*i.trace, ir) + return nil +} + +var _ ExecMonitor = (*messageFinder)(nil) + +type messageFinder struct { + mcid cid.Cid // the message cid to find + outm *types.Message + outr *vm.ApplyRet +} + +func (m *messageFinder) MessageApplied(ctx context.Context, ts *types.TipSet, mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet, implicit bool) error { + if m.mcid == mcid { + m.outm = msg + m.outr = ret + return errHaltExecution // message was found, no need to continue + } + return nil +} diff --git a/chain/stmgr/utils.go b/chain/stmgr/utils.go index 5b144281d53..d2a2c6e604c 100644 --- a/chain/stmgr/utils.go +++ b/chain/stmgr/utils.go @@ -9,6 +9,8 @@ import ( "runtime" "strings" + exported5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/exported" + "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/network" @@ -25,6 +27,8 @@ import ( exported0 "github.com/filecoin-project/specs-actors/actors/builtin/exported" exported2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/exported" + exported3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/exported" + exported4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/exported" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -47,7 +51,7 @@ func GetNetworkName(ctx context.Context, sm *StateManager, st cid.Cid) (dtypes.N if err != nil { return "", err } - ias, err := init_.Load(sm.cs.Store(ctx), act) + ias, err := init_.Load(sm.cs.ActorStore(ctx), act) if err != nil { return "", err } @@ -64,7 +68,7 @@ func GetMinerWorkerRaw(ctx context.Context, sm *StateManager, st cid.Cid, maddr if err != nil { return address.Undef, xerrors.Errorf("(get sset) failed to load miner actor: %w", err) } - mas, err := miner.Load(sm.cs.Store(ctx), act) + mas, err := miner.Load(sm.cs.ActorStore(ctx), act) if err != nil { return address.Undef, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err) } @@ -74,7 +78,7 @@ func GetMinerWorkerRaw(ctx context.Context, sm *StateManager, st cid.Cid, maddr return address.Undef, xerrors.Errorf("failed to load actor info: %w", err) } - return vm.ResolveToKeyAddr(state, sm.cs.Store(ctx), info.Worker) + return vm.ResolveToKeyAddr(state, sm.cs.ActorStore(ctx), info.Worker) } func GetPower(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (power.Claim, power.Claim, bool, error) { @@ -87,7 +91,7 @@ func GetPowerRaw(ctx context.Context, sm *StateManager, st cid.Cid, maddr addres return power.Claim{}, power.Claim{}, false, xerrors.Errorf("(get sset) failed to load power actor state: %w", err) } - pas, err := power.Load(sm.cs.Store(ctx), act) + pas, err := power.Load(sm.cs.ActorStore(ctx), act) if err != nil { return power.Claim{}, power.Claim{}, false, err } @@ -103,8 +107,7 @@ func GetPowerRaw(ctx context.Context, sm *StateManager, st cid.Cid, maddr addres var found bool mpow, found, err = pas.MinerPower(maddr) if err != nil || !found { - // TODO: return an error when not found? - return power.Claim{}, power.Claim{}, false, err + return power.Claim{}, tpow, false, err } minpow, err = pas.MinerNominalPowerMeetsConsensusMinimum(maddr) @@ -122,7 +125,7 @@ func PreCommitInfo(ctx context.Context, sm *StateManager, maddr address.Address, return nil, xerrors.Errorf("(get sset) failed to load miner actor: %w", err) } - mas, err := miner.Load(sm.cs.Store(ctx), act) + mas, err := miner.Load(sm.cs.ActorStore(ctx), act) if err != nil { return nil, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err) } @@ -136,7 +139,7 @@ func MinerSectorInfo(ctx context.Context, sm *StateManager, maddr address.Addres return nil, xerrors.Errorf("(get sset) failed to load miner actor: %w", err) } - mas, err := miner.Load(sm.cs.Store(ctx), act) + mas, err := miner.Load(sm.cs.ActorStore(ctx), act) if err != nil { return nil, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err) } @@ -144,46 +147,38 @@ func MinerSectorInfo(ctx context.Context, sm *StateManager, maddr address.Addres return mas.GetSector(sid) } -func GetMinerSectorSet(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address, snos *bitfield.BitField) ([]*miner.SectorOnChainInfo, error) { - act, err := sm.LoadActor(ctx, maddr, ts) - if err != nil { - return nil, xerrors.Errorf("(get sset) failed to load miner actor: %w", err) - } - - mas, err := miner.Load(sm.cs.Store(ctx), act) - if err != nil { - return nil, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err) - } - - return mas.LoadSectors(snos) -} - -func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]builtin.SectorInfo, error) { +func GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]builtin.SectorInfo, error) { act, err := sm.LoadActorRaw(ctx, maddr, st) if err != nil { return nil, xerrors.Errorf("failed to load miner actor: %w", err) } - mas, err := miner.Load(sm.cs.Store(ctx), act) + mas, err := miner.Load(sm.cs.ActorStore(ctx), act) if err != nil { return nil, xerrors.Errorf("failed to load miner actor state: %w", err) } - // TODO (!!): Actor Update: Make this active sectors - - allSectors, err := miner.AllPartSectors(mas, miner.Partition.AllSectors) - if err != nil { - return nil, xerrors.Errorf("get all sectors: %w", err) - } + var provingSectors bitfield.BitField + if nv < network.Version7 { + allSectors, err := miner.AllPartSectors(mas, miner.Partition.AllSectors) + if err != nil { + return nil, xerrors.Errorf("get all sectors: %w", err) + } - faultySectors, err := miner.AllPartSectors(mas, miner.Partition.FaultySectors) - if err != nil { - return nil, xerrors.Errorf("get faulty sectors: %w", err) - } + faultySectors, err := miner.AllPartSectors(mas, miner.Partition.FaultySectors) + if err != nil { + return nil, xerrors.Errorf("get faulty sectors: %w", err) + } - provingSectors, err := bitfield.SubtractBitField(allSectors, faultySectors) // TODO: This is wrong, as it can contain faaults, change to just ActiveSectors in an upgrade - if err != nil { - return nil, xerrors.Errorf("calc proving sectors: %w", err) + provingSectors, err = bitfield.SubtractBitField(allSectors, faultySectors) + if err != nil { + return nil, xerrors.Errorf("calc proving sectors: %w", err) + } + } else { + provingSectors, err = miner.AllPartSectors(mas, miner.Partition.ActiveSectors) + if err != nil { + return nil, xerrors.Errorf("get active sectors sectors: %w", err) + } } numProvSect, err := provingSectors.Count() @@ -201,22 +196,17 @@ func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *S return nil, xerrors.Errorf("getting miner info: %w", err) } - spt, err := ffiwrapper.SealProofTypeFromSectorSize(info.SectorSize) - if err != nil { - return nil, xerrors.Errorf("getting seal proof type: %w", err) - } - - wpt, err := spt.RegisteredWinningPoStProof() + mid, err := address.IDFromAddress(maddr) if err != nil { - return nil, xerrors.Errorf("getting window proof type: %w", err) + return nil, xerrors.Errorf("getting miner ID: %w", err) } - mid, err := address.IDFromAddress(maddr) + proofType, err := miner.WinningPoStProofTypeFromWindowPoStProofType(nv, info.WindowPoStProofType) if err != nil { - return nil, xerrors.Errorf("getting miner ID: %w", err) + return nil, xerrors.Errorf("determining winning post proof type: %w", err) } - ids, err := pv.GenerateWinningPoStSectorChallenge(ctx, wpt, abi.ActorID(mid), rand, numProvSect) + ids, err := pv.GenerateWinningPoStSectorChallenge(ctx, proofType, abi.ActorID(mid), rand, numProvSect) if err != nil { return nil, xerrors.Errorf("generating winning post challenges: %w", err) } @@ -246,7 +236,7 @@ func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *S out := make([]builtin.SectorInfo, len(sectors)) for i, sinfo := range sectors { out[i] = builtin.SectorInfo{ - SealProof: spt, + SealProof: sinfo.SealProof, SectorNumber: sinfo.SectorNumber, SealedCID: sinfo.SealedCID, } @@ -261,7 +251,7 @@ func GetMinerSlashed(ctx context.Context, sm *StateManager, ts *types.TipSet, ma return false, xerrors.Errorf("failed to load power actor: %w", err) } - spas, err := power.Load(sm.cs.Store(ctx), act) + spas, err := power.Load(sm.cs.ActorStore(ctx), act) if err != nil { return false, xerrors.Errorf("failed to load power actor state: %w", err) } @@ -284,7 +274,7 @@ func GetStorageDeal(ctx context.Context, sm *StateManager, dealID abi.DealID, ts return nil, xerrors.Errorf("failed to load market actor: %w", err) } - state, err := market.Load(sm.cs.Store(ctx), act) + state, err := market.Load(sm.cs.ActorStore(ctx), act) if err != nil { return nil, xerrors.Errorf("failed to load market actor state: %w", err) } @@ -299,7 +289,11 @@ func GetStorageDeal(ctx context.Context, sm *StateManager, dealID abi.DealID, ts if err != nil { return nil, err } else if !found { - return nil, xerrors.Errorf("deal %d not found", dealID) + return nil, xerrors.Errorf( + "deal %d not found "+ + "- deal may not have completed sealing before deal proposal "+ + "start epoch, or deal may have been slashed", + dealID) } states, err := state.States() @@ -328,7 +322,7 @@ func ListMinerActors(ctx context.Context, sm *StateManager, ts *types.TipSet) ([ return nil, xerrors.Errorf("failed to load power actor: %w", err) } - powState, err := power.Load(sm.cs.Store(ctx), act) + powState, err := power.Load(sm.cs.ActorStore(ctx), act) if err != nil { return nil, xerrors.Errorf("failed to load power actor state: %w", err) } @@ -348,7 +342,7 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch, for i := ts.Height(); i < height; i++ { // handle state forks - base, err = sm.handleStateForks(ctx, base, i, traceFunc(&trace), ts) + base, err = sm.handleStateForks(ctx, base, i, &InvocationTracer{trace: &trace}, ts) if err != nil { return cid.Undef, nil, xerrors.Errorf("error handling state forks: %w", err) } @@ -361,7 +355,7 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch, StateBase: base, Epoch: height, Rand: r, - Bstore: sm.cs.Blockstore(), + Bstore: sm.cs.StateBlockstore(), Syscalls: sm.cs.VMSys(), CircSupplyCalc: sm.GetVMCirculatingSupply, NtwkVersion: sm.GetNtwkVersion, @@ -482,7 +476,7 @@ func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcs beacon.Schedule return nil, xerrors.Errorf("failed to load miner actor: %w", err) } - mas, err := miner.Load(sm.cs.Store(ctx), act) + mas, err := miner.Load(sm.cs.ActorStore(ctx), act) if err != nil { return nil, xerrors.Errorf("failed to load miner actor state: %w", err) } @@ -497,7 +491,9 @@ func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcs beacon.Schedule return nil, xerrors.Errorf("failed to get randomness for winning post: %w", err) } - sectors, err := GetSectorsForWinningPoSt(ctx, pv, sm, lbst, maddr, prand) + nv := sm.GetNtwkVersion(ctx, ts.Height()) + + sectors, err := GetSectorsForWinningPoSt(ctx, nv, pv, sm, lbst, maddr, prand) if err != nil { return nil, xerrors.Errorf("getting winning post proving set: %w", err) } @@ -553,6 +549,9 @@ func init() { var actors []rt.VMActor actors = append(actors, exported0.BuiltinActors()...) actors = append(actors, exported2.BuiltinActors()...) + actors = append(actors, exported3.BuiltinActors()...) + actors = append(actors, exported4.BuiltinActors()...) + actors = append(actors, exported5.BuiltinActors()...) for _, actor := range actors { exports := actor.Exports() @@ -614,13 +613,21 @@ func GetReturnType(ctx context.Context, sm *StateManager, to address.Address, me return reflect.New(m.Ret.Elem()).Interface().(cbg.CBORUnmarshaler), nil } +func GetParamType(actCode cid.Cid, method abi.MethodNum) (cbg.CBORUnmarshaler, error) { + m, found := MethodsMap[actCode][method] + if !found { + return nil, fmt.Errorf("unknown method %d for actor %s", method, actCode) + } + return reflect.New(m.Params.Elem()).Interface().(cbg.CBORUnmarshaler), nil +} + func minerHasMinPower(ctx context.Context, sm *StateManager, addr address.Address, ts *types.TipSet) (bool, error) { pact, err := sm.LoadActor(ctx, power.Address, ts) if err != nil { return false, xerrors.Errorf("loading power actor state: %w", err) } - ps, err := power.Load(sm.cs.Store(ctx), pact) + ps, err := power.Load(sm.cs.ActorStore(ctx), pact) if err != nil { return false, err } @@ -651,7 +658,7 @@ func MinerEligibleToMine(ctx context.Context, sm *StateManager, addr address.Add return false, xerrors.Errorf("loading power actor state: %w", err) } - pstate, err := power.Load(sm.cs.Store(ctx), pact) + pstate, err := power.Load(sm.cs.ActorStore(ctx), pact) if err != nil { return false, err } @@ -661,7 +668,7 @@ func MinerEligibleToMine(ctx context.Context, sm *StateManager, addr address.Add return false, xerrors.Errorf("loading miner actor state: %w", err) } - mstate, err := miner.Load(sm.cs.Store(ctx), mact) + mstate, err := miner.Load(sm.cs.ActorStore(ctx), mact) if err != nil { return false, err } @@ -693,7 +700,7 @@ func MinerEligibleToMine(ctx context.Context, sm *StateManager, addr address.Add } func CheckTotalFIL(ctx context.Context, sm *StateManager, ts *types.TipSet) (abi.TokenAmount, error) { - str, err := state.LoadStateTree(sm.ChainStore().Store(ctx), ts.ParentState()) + str, err := state.LoadStateTree(sm.ChainStore().ActorStore(ctx), ts.ParentState()) if err != nil { return abi.TokenAmount{}, err } diff --git a/chain/store/checkpoint_test.go b/chain/store/checkpoint_test.go new file mode 100644 index 00000000000..81bbab6ea43 --- /dev/null +++ b/chain/store/checkpoint_test.go @@ -0,0 +1,89 @@ +package store_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/lotus/chain/gen" +) + +func TestChainCheckpoint(t *testing.T) { + cg, err := gen.NewGenerator() + if err != nil { + t.Fatal(err) + } + + // Let the first miner mine some blocks. + last := cg.CurTipset.TipSet() + for i := 0; i < 4; i++ { + ts, err := cg.NextTipSetFromMiners(last, cg.Miners[:1], 0) + require.NoError(t, err) + + last = ts.TipSet.TipSet() + } + + cs := cg.ChainStore() + + checkpoint := last + checkpointParents, err := cs.GetTipSetFromKey(checkpoint.Parents()) + require.NoError(t, err) + + // Set the head to the block before the checkpoint. + err = cs.SetHead(checkpointParents) + require.NoError(t, err) + + // Verify it worked. + head := cs.GetHeaviestTipSet() + require.True(t, head.Equals(checkpointParents)) + + // Try to set the checkpoint in the future, it should fail. + err = cs.SetCheckpoint(checkpoint) + require.Error(t, err) + + // Then move the head back. + err = cs.SetHead(checkpoint) + require.NoError(t, err) + + // Verify it worked. + head = cs.GetHeaviestTipSet() + require.True(t, head.Equals(checkpoint)) + + // And checkpoint it. + err = cs.SetCheckpoint(checkpoint) + require.NoError(t, err) + + // Let the second miner miner mine a fork + last = checkpointParents + for i := 0; i < 4; i++ { + ts, err := cg.NextTipSetFromMiners(last, cg.Miners[1:], 0) + require.NoError(t, err) + + last = ts.TipSet.TipSet() + } + + // See if the chain will take the fork, it shouldn't. + err = cs.MaybeTakeHeavierTipSet(context.Background(), last) + require.NoError(t, err) + head = cs.GetHeaviestTipSet() + require.True(t, head.Equals(checkpoint)) + + // Remove the checkpoint. + err = cs.RemoveCheckpoint() + require.NoError(t, err) + + // Now switch to the other fork. + err = cs.MaybeTakeHeavierTipSet(context.Background(), last) + require.NoError(t, err) + head = cs.GetHeaviestTipSet() + require.True(t, head.Equals(last)) + + // Setting a checkpoint on the other fork should fail. + err = cs.SetCheckpoint(checkpoint) + require.Error(t, err) + + // Setting a checkpoint on this fork should succeed. + err = cs.SetCheckpoint(checkpointParents) + require.NoError(t, err) +} diff --git a/chain/store/coalescer.go b/chain/store/coalescer.go new file mode 100644 index 00000000000..a6d066bcaab --- /dev/null +++ b/chain/store/coalescer.go @@ -0,0 +1,213 @@ +package store + +import ( + "context" + "time" + + "github.com/filecoin-project/lotus/chain/types" +) + +// WrapHeadChangeCoalescer wraps a ReorgNotifee with a head change coalescer. +// minDelay is the minimum coalesce delay; when a head change is first received, the coalescer will +// wait for that long to coalesce more head changes. +// maxDelay is the maximum coalesce delay; the coalescer will not delay delivery of a head change +// more than that. +// mergeInterval is the interval that triggers additional coalesce delay; if the last head change was +// within the merge interval when the coalesce timer fires, then the coalesce time is extended +// by min delay and up to max delay total. +func WrapHeadChangeCoalescer(fn ReorgNotifee, minDelay, maxDelay, mergeInterval time.Duration) ReorgNotifee { + c := NewHeadChangeCoalescer(fn, minDelay, maxDelay, mergeInterval) + return c.HeadChange +} + +// HeadChangeCoalescer is a stateful reorg notifee which coalesces incoming head changes +// with pending head changes to reduce state computations from head change notifications. +type HeadChangeCoalescer struct { + notify ReorgNotifee + + ctx context.Context + cancel func() + + eventq chan headChange + + revert []*types.TipSet + apply []*types.TipSet +} + +type headChange struct { + revert, apply []*types.TipSet +} + +// NewHeadChangeCoalescer creates a HeadChangeCoalescer. +func NewHeadChangeCoalescer(fn ReorgNotifee, minDelay, maxDelay, mergeInterval time.Duration) *HeadChangeCoalescer { + ctx, cancel := context.WithCancel(context.Background()) + c := &HeadChangeCoalescer{ + notify: fn, + ctx: ctx, + cancel: cancel, + eventq: make(chan headChange), + } + + go c.background(minDelay, maxDelay, mergeInterval) + + return c +} + +// HeadChange is the ReorgNotifee callback for the stateful coalescer; it receives an incoming +// head change and schedules dispatch of a coalesced head change in the background. +func (c *HeadChangeCoalescer) HeadChange(revert, apply []*types.TipSet) error { + select { + case c.eventq <- headChange{revert: revert, apply: apply}: + return nil + case <-c.ctx.Done(): + return c.ctx.Err() + } +} + +// Close closes the coalescer and cancels the background dispatch goroutine. +// Any further notification will result in an error. +func (c *HeadChangeCoalescer) Close() error { + select { + case <-c.ctx.Done(): + default: + c.cancel() + } + + return nil +} + +// Implementation details + +func (c *HeadChangeCoalescer) background(minDelay, maxDelay, mergeInterval time.Duration) { + var timerC <-chan time.Time + var first, last time.Time + + for { + select { + case evt := <-c.eventq: + c.coalesce(evt.revert, evt.apply) + + now := time.Now() + last = now + if first.IsZero() { + first = now + } + + if timerC == nil { + timerC = time.After(minDelay) + } + + case now := <-timerC: + sinceFirst := now.Sub(first) + sinceLast := now.Sub(last) + + if sinceLast < mergeInterval && sinceFirst < maxDelay { + // coalesce some more + maxWait := maxDelay - sinceFirst + wait := minDelay + if maxWait < wait { + wait = maxWait + } + + timerC = time.After(wait) + } else { + // dispatch + c.dispatch() + + first = time.Time{} + last = time.Time{} + timerC = nil + } + + case <-c.ctx.Done(): + if c.revert != nil || c.apply != nil { + c.dispatch() + } + return + } + } +} + +func (c *HeadChangeCoalescer) coalesce(revert, apply []*types.TipSet) { + // newly reverted tipsets cancel out with pending applys. + // similarly, newly applied tipsets cancel out with pending reverts. + + // pending tipsets + pendRevert := make(map[types.TipSetKey]struct{}, len(c.revert)) + for _, ts := range c.revert { + pendRevert[ts.Key()] = struct{}{} + } + + pendApply := make(map[types.TipSetKey]struct{}, len(c.apply)) + for _, ts := range c.apply { + pendApply[ts.Key()] = struct{}{} + } + + // incoming tipsets + reverting := make(map[types.TipSetKey]struct{}, len(revert)) + for _, ts := range revert { + reverting[ts.Key()] = struct{}{} + } + + applying := make(map[types.TipSetKey]struct{}, len(apply)) + for _, ts := range apply { + applying[ts.Key()] = struct{}{} + } + + // coalesced revert set + // - pending reverts are cancelled by incoming applys + // - incoming reverts are cancelled by pending applys + newRevert := c.merge(c.revert, revert, pendApply, applying) + + // coalesced apply set + // - pending applys are cancelled by incoming reverts + // - incoming applys are cancelled by pending reverts + newApply := c.merge(c.apply, apply, pendRevert, reverting) + + // commit the coalesced sets + c.revert = newRevert + c.apply = newApply +} + +func (c *HeadChangeCoalescer) merge(pend, incoming []*types.TipSet, cancel1, cancel2 map[types.TipSetKey]struct{}) []*types.TipSet { + result := make([]*types.TipSet, 0, len(pend)+len(incoming)) + for _, ts := range pend { + _, cancel := cancel1[ts.Key()] + if cancel { + continue + } + + _, cancel = cancel2[ts.Key()] + if cancel { + continue + } + + result = append(result, ts) + } + + for _, ts := range incoming { + _, cancel := cancel1[ts.Key()] + if cancel { + continue + } + + _, cancel = cancel2[ts.Key()] + if cancel { + continue + } + + result = append(result, ts) + } + + return result +} + +func (c *HeadChangeCoalescer) dispatch() { + err := c.notify(c.revert, c.apply) + if err != nil { + log.Errorf("error dispatching coalesced head change notification: %s", err) + } + + c.revert = nil + c.apply = nil +} diff --git a/chain/store/coalescer_test.go b/chain/store/coalescer_test.go new file mode 100644 index 00000000000..d462851086e --- /dev/null +++ b/chain/store/coalescer_test.go @@ -0,0 +1,72 @@ +package store + +import ( + "testing" + "time" + + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/types/mock" +) + +func TestHeadChangeCoalescer(t *testing.T) { + notif := make(chan headChange, 1) + c := NewHeadChangeCoalescer(func(revert, apply []*types.TipSet) error { + notif <- headChange{apply: apply, revert: revert} + return nil + }, + 100*time.Millisecond, + 200*time.Millisecond, + 10*time.Millisecond, + ) + defer c.Close() //nolint + + b0 := mock.MkBlock(nil, 0, 0) + root := mock.TipSet(b0) + bA := mock.MkBlock(root, 1, 1) + tA := mock.TipSet(bA) + bB := mock.MkBlock(root, 1, 2) + tB := mock.TipSet(bB) + tAB := mock.TipSet(bA, bB) + bC := mock.MkBlock(root, 1, 3) + tABC := mock.TipSet(bA, bB, bC) + bD := mock.MkBlock(root, 1, 4) + tABCD := mock.TipSet(bA, bB, bC, bD) + bE := mock.MkBlock(root, 1, 5) + tABCDE := mock.TipSet(bA, bB, bC, bD, bE) + + c.HeadChange(nil, []*types.TipSet{tA}) //nolint + c.HeadChange(nil, []*types.TipSet{tB}) //nolint + c.HeadChange([]*types.TipSet{tA, tB}, []*types.TipSet{tAB}) //nolint + c.HeadChange([]*types.TipSet{tAB}, []*types.TipSet{tABC}) //nolint + + change := <-notif + + if len(change.revert) != 0 { + t.Fatalf("expected empty revert set but got %d elements", len(change.revert)) + } + if len(change.apply) != 1 { + t.Fatalf("expected single element apply set but got %d elements", len(change.apply)) + } + if change.apply[0] != tABC { + t.Fatalf("expected to apply tABC") + } + + c.HeadChange([]*types.TipSet{tABC}, []*types.TipSet{tABCD}) //nolint + c.HeadChange([]*types.TipSet{tABCD}, []*types.TipSet{tABCDE}) //nolint + + change = <-notif + + if len(change.revert) != 1 { + t.Fatalf("expected single element revert set but got %d elements", len(change.revert)) + } + if change.revert[0] != tABC { + t.Fatalf("expected to revert tABC") + } + if len(change.apply) != 1 { + t.Fatalf("expected single element apply set but got %d elements", len(change.apply)) + } + if change.apply[0] != tABCDE { + t.Fatalf("expected to revert tABC") + } + +} diff --git a/chain/store/index.go b/chain/store/index.go index a9da994af9d..324fb7a633a 100644 --- a/chain/store/index.go +++ b/chain/store/index.go @@ -107,6 +107,9 @@ func (ci *ChainIndex) fillCache(tsk types.TipSetKey) (*lbEntry, error) { } rheight -= ci.skipLength + if rheight < 0 { + rheight = 0 + } var skipTarget *types.TipSet if parent.Height() < rheight { diff --git a/chain/store/index_test.go b/chain/store/index_test.go index 5283d10dc3a..4470719016c 100644 --- a/chain/store/index_test.go +++ b/chain/store/index_test.go @@ -6,10 +6,10 @@ import ( "testing" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/gen" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types/mock" - "github.com/filecoin-project/lotus/lib/blockstore" datastore "github.com/ipfs/go-datastore" syncds "github.com/ipfs/go-datastore/sync" "github.com/stretchr/testify/assert" @@ -30,8 +30,9 @@ func TestIndexSeeks(t *testing.T) { ctx := context.TODO() - nbs := blockstore.NewTemporarySync() - cs := store.NewChainStore(nbs, syncds.MutexWrap(datastore.NewMapDatastore()), nil, nil) + nbs := blockstore.NewMemorySync() + cs := store.NewChainStore(nbs, nbs, syncds.MutexWrap(datastore.NewMapDatastore()), nil, nil) + defer cs.Close() //nolint:errcheck _, err = cs.Import(bytes.NewReader(gencar)) if err != nil { diff --git a/chain/store/store.go b/chain/store/store.go index 00a78500ef9..523726863f1 100644 --- a/chain/store/store.go +++ b/chain/store/store.go @@ -5,11 +5,15 @@ import ( "context" "encoding/binary" "encoding/json" + "errors" "io" "os" "strconv" "strings" "sync" + "time" + + "github.com/filecoin-project/lotus/chain/state" "golang.org/x/sync/errgroup" @@ -22,12 +26,12 @@ import ( blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" "github.com/filecoin-project/lotus/api" + bstore "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/journal" - bstore "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/metrics" "go.opencensus.io/stats" @@ -44,21 +48,26 @@ import ( "github.com/ipfs/go-datastore/query" cbor "github.com/ipfs/go-ipld-cbor" logging "github.com/ipfs/go-log/v2" - car "github.com/ipld/go-car" + "github.com/ipld/go-car" carutil "github.com/ipld/go-car/util" cbg "github.com/whyrusleeping/cbor-gen" - pubsub "github.com/whyrusleeping/pubsub" + "github.com/whyrusleeping/pubsub" "golang.org/x/xerrors" ) var log = logging.Logger("chainstore") -var chainHeadKey = dstore.NewKey("head") -var blockValidationCacheKeyPrefix = dstore.NewKey("blockValidation") +var ( + chainHeadKey = dstore.NewKey("head") + checkpointKey = dstore.NewKey("/chain/checks") + blockValidationCacheKeyPrefix = dstore.NewKey("blockValidation") +) var DefaultTipSetCacheSize = 8192 var DefaultMsgMetaCacheSize = 2048 +var ErrNotifeeDone = errors.New("notifee is done and should be removed") + func init() { if s := os.Getenv("LOTUS_CHAIN_TIPSET_CACHE"); s != "" { tscs, err := strconv.Atoi(s) @@ -78,7 +87,7 @@ func init() { } // ReorgNotifee represents a callback that gets called upon reorgs. -type ReorgNotifee func(rev, app []*types.TipSet) error +type ReorgNotifee = func(rev, app []*types.TipSet) error // Journal event types. const ( @@ -104,11 +113,15 @@ type HeadChangeEvt struct { // 1. a tipset cache // 2. a block => messages references cache. type ChainStore struct { - bs bstore.Blockstore - ds dstore.Batching + chainBlockstore bstore.Blockstore + stateBlockstore bstore.Blockstore + metadataDs dstore.Batching - heaviestLk sync.Mutex + chainLocalBlockstore bstore.Blockstore + + heaviestLk sync.RWMutex heaviest *types.TipSet + checkpoint *types.TipSet bestTips *pubsub.PubSub pubLk sync.Mutex @@ -128,23 +141,34 @@ type ChainStore struct { evtTypes [1]journal.EventType journal journal.Journal + + cancelFn context.CancelFunc + wg sync.WaitGroup } -func NewChainStore(bs bstore.Blockstore, ds dstore.Batching, vmcalls vm.SyscallBuilder, j journal.Journal) *ChainStore { +func NewChainStore(chainBs bstore.Blockstore, stateBs bstore.Blockstore, ds dstore.Batching, vmcalls vm.SyscallBuilder, j journal.Journal) *ChainStore { c, _ := lru.NewARC(DefaultMsgMetaCacheSize) tsc, _ := lru.NewARC(DefaultTipSetCacheSize) if j == nil { j = journal.NilJournal() } + + ctx, cancel := context.WithCancel(context.Background()) + // unwraps the fallback store in case one is configured. + // some methods _need_ to operate on a local blockstore only. + localbs, _ := bstore.UnwrapFallbackStore(chainBs) cs := &ChainStore{ - bs: bs, - ds: ds, - bestTips: pubsub.New(64), - tipsets: make(map[abi.ChainEpoch][]cid.Cid), - mmCache: c, - tsCache: tsc, - vmcalls: vmcalls, - journal: j, + chainBlockstore: chainBs, + stateBlockstore: stateBs, + chainLocalBlockstore: localbs, + metadataDs: ds, + bestTips: pubsub.New(64), + tipsets: make(map[abi.ChainEpoch][]cid.Cid), + mmCache: c, + tsCache: tsc, + vmcalls: vmcalls, + cancelFn: cancel, + journal: j, } cs.evtTypes = [1]journal.EventType{ @@ -179,21 +203,35 @@ func NewChainStore(bs bstore.Blockstore, ds dstore.Batching, vmcalls vm.SyscallB } hcmetric := func(rev, app []*types.TipSet) error { - ctx := context.Background() for _, r := range app { - stats.Record(ctx, metrics.ChainNodeHeight.M(int64(r.Height()))) + stats.Record(context.Background(), metrics.ChainNodeHeight.M(int64(r.Height()))) } return nil } cs.reorgNotifeeCh = make(chan ReorgNotifee) - cs.reorgCh = cs.reorgWorker(context.TODO(), []ReorgNotifee{hcnf, hcmetric}) + cs.reorgCh = cs.reorgWorker(ctx, []ReorgNotifee{hcnf, hcmetric}) return cs } +func (cs *ChainStore) Close() error { + cs.cancelFn() + cs.wg.Wait() + return nil +} + func (cs *ChainStore) Load() error { - head, err := cs.ds.Get(chainHeadKey) + if err := cs.loadHead(); err != nil { + return err + } + if err := cs.loadCheckpoint(); err != nil { + return err + } + return nil +} +func (cs *ChainStore) loadHead() error { + head, err := cs.metadataDs.Get(chainHeadKey) if err == dstore.ErrNotFound { log.Warn("no previous chain state found") return nil @@ -217,13 +255,38 @@ func (cs *ChainStore) Load() error { return nil } +func (cs *ChainStore) loadCheckpoint() error { + tskBytes, err := cs.metadataDs.Get(checkpointKey) + if err == dstore.ErrNotFound { + return nil + } + if err != nil { + return xerrors.Errorf("failed to load checkpoint from datastore: %w", err) + } + + var tsk types.TipSetKey + err = json.Unmarshal(tskBytes, &tsk) + if err != nil { + return err + } + + ts, err := cs.LoadTipSet(tsk) + if err != nil { + return xerrors.Errorf("loading tipset: %w", err) + } + + cs.checkpoint = ts + + return nil +} + func (cs *ChainStore) writeHead(ts *types.TipSet) error { data, err := json.Marshal(ts.Cids()) if err != nil { return xerrors.Errorf("failed to marshal tipset: %w", err) } - if err := cs.ds.Put(chainHeadKey, data); err != nil { + if err := cs.metadataDs.Put(chainHeadKey, data); err != nil { return xerrors.Errorf("failed to write chain head to datastore: %w", err) } @@ -259,7 +322,7 @@ func (cs *ChainStore) SubHeadChanges(ctx context.Context) chan []*api.HeadChange log.Warn("chain head sub exit loop") return } - if len(out) > 0 { + if len(out) > 5 { log.Warnf("head change sub is slow, has %d buffered entries", len(out)) } select { @@ -283,13 +346,13 @@ func (cs *ChainStore) SubscribeHeadChanges(f ReorgNotifee) { func (cs *ChainStore) IsBlockValidated(ctx context.Context, blkid cid.Cid) (bool, error) { key := blockValidationCacheKeyPrefix.Instance(blkid.String()) - return cs.ds.Has(key) + return cs.metadataDs.Has(key) } func (cs *ChainStore) MarkBlockAsValidated(ctx context.Context, blkid cid.Cid) error { key := blockValidationCacheKeyPrefix.Instance(blkid.String()) - if err := cs.ds.Put(key, []byte{0}); err != nil { + if err := cs.metadataDs.Put(key, []byte{0}); err != nil { return xerrors.Errorf("cache block validation: %w", err) } @@ -299,7 +362,7 @@ func (cs *ChainStore) MarkBlockAsValidated(ctx context.Context, blkid cid.Cid) e func (cs *ChainStore) UnmarkBlockAsValidated(ctx context.Context, blkid cid.Cid) error { key := blockValidationCacheKeyPrefix.Instance(blkid.String()) - if err := cs.ds.Delete(key); err != nil { + if err := cs.metadataDs.Delete(key); err != nil { return xerrors.Errorf("removing from valid block cache: %w", err) } @@ -316,7 +379,7 @@ func (cs *ChainStore) SetGenesis(b *types.BlockHeader) error { return err } - return cs.ds.Put(dstore.NewKey("0"), b.Cid().Bytes()) + return cs.metadataDs.Put(dstore.NewKey("0"), b.Cid().Bytes()) } func (cs *ChainStore) PutTipSet(ctx context.Context, ts *types.TipSet) error { @@ -340,9 +403,22 @@ func (cs *ChainStore) PutTipSet(ctx context.Context, ts *types.TipSet) error { // MaybeTakeHeavierTipSet evaluates the incoming tipset and locks it in our // internal state as our new head, if and only if it is heavier than the current -// head. +// head and does not exceed the maximum fork length. func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipSet) error { - cs.heaviestLk.Lock() + for { + cs.heaviestLk.Lock() + if len(cs.reorgCh) < reorgChBuf/2 { + break + } + cs.heaviestLk.Unlock() + log.Errorf("reorg channel is heavily backlogged, waiting a bit before trying to take process new tipsets") + select { + case <-time.After(time.Second / 2): + case <-ctx.Done(): + return ctx.Err() + } + } + defer cs.heaviestLk.Unlock() w, err := cs.Weight(ctx, ts) if err != nil { @@ -357,22 +433,126 @@ func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipS // TODO: don't do this for initial sync. Now that we don't have a // difference between 'bootstrap sync' and 'caught up' sync, we need // some other heuristic. + + exceeds, err := cs.exceedsForkLength(cs.heaviest, ts) + if err != nil { + return err + } + if exceeds { + return nil + } + return cs.takeHeaviestTipSet(ctx, ts) + } else if w.Equals(heaviestW) && !ts.Equals(cs.heaviest) { + log.Errorw("weight draw", "currTs", cs.heaviest, "ts", ts) } return nil } +// Check if the two tipsets have a fork length above `ForkLengthThreshold`. +// `synced` is the head of the chain we are currently synced to and `external` +// is the incoming tipset potentially belonging to a forked chain. It assumes +// the external chain has already been validated and available in the ChainStore. +// The "fast forward" case is covered in this logic as a valid fork of length 0. +// +// FIXME: We may want to replace some of the logic in `syncFork()` with this. +// `syncFork()` counts the length on both sides of the fork at the moment (we +// need to settle on that) but here we just enforce it on the `synced` side. +func (cs *ChainStore) exceedsForkLength(synced, external *types.TipSet) (bool, error) { + if synced == nil || external == nil { + // FIXME: If `cs.heaviest` is nil we should just bypass the entire + // `MaybeTakeHeavierTipSet` logic (instead of each of the called + // functions having to handle the nil case on their own). + return false, nil + } + + var err error + // `forkLength`: number of tipsets we need to walk back from the our `synced` + // chain to the common ancestor with the new `external` head in order to + // adopt the fork. + for forkLength := 0; forkLength < int(build.ForkLengthThreshold); forkLength++ { + // First walk back as many tipsets in the external chain to match the + // `synced` height to compare them. If we go past the `synced` height + // the subsequent match will fail but it will still be useful to get + // closer to the `synced` head parent's height in the next loop. + for external.Height() > synced.Height() { + if external.Height() == 0 { + // We reached the genesis of the external chain without a match; + // this is considered a fork outside the allowed limit (of "infinite" + // length). + return true, nil + } + external, err = cs.LoadTipSet(external.Parents()) + if err != nil { + return false, xerrors.Errorf("failed to load parent tipset in external chain: %w", err) + } + } + + // Now check if we arrived at the common ancestor. + if synced.Equals(external) { + return false, nil + } + + // Now check to see if we've walked back to the checkpoint. + if synced.Equals(cs.checkpoint) { + return true, nil + } + + // If we didn't, go back *one* tipset on the `synced` side (incrementing + // the `forkLength`). + if synced.Height() == 0 { + // Same check as the `external` side, if we reach the start (genesis) + // there is no common ancestor. + return true, nil + } + synced, err = cs.LoadTipSet(synced.Parents()) + if err != nil { + return false, xerrors.Errorf("failed to load parent tipset in synced chain: %w", err) + } + } + + // We traversed the fork length allowed without finding a common ancestor. + return true, nil +} + +// ForceHeadSilent forces a chain head tipset without triggering a reorg +// operation. +// +// CAUTION: Use it only for testing, such as to teleport the chain to a +// particular tipset to carry out a benchmark, verification, etc. on a chain +// segment. +func (cs *ChainStore) ForceHeadSilent(_ context.Context, ts *types.TipSet) error { + log.Warnf("(!!!) forcing a new head silently; new head: %s", ts) + + cs.heaviestLk.Lock() + defer cs.heaviestLk.Unlock() + if err := cs.removeCheckpoint(); err != nil { + return err + } + cs.heaviest = ts + + err := cs.writeHead(ts) + if err != nil { + err = xerrors.Errorf("failed to write chain head: %s", err) + } + return err +} + type reorg struct { old *types.TipSet new *types.TipSet } +const reorgChBuf = 32 + func (cs *ChainStore) reorgWorker(ctx context.Context, initialNotifees []ReorgNotifee) chan<- reorg { - out := make(chan reorg, 32) + out := make(chan reorg, reorgChBuf) notifees := make([]ReorgNotifee, len(initialNotifees)) copy(notifees, initialNotifees) + cs.wg.Add(1) go func() { + defer cs.wg.Done() defer log.Warn("reorgWorker quit") for { @@ -404,11 +584,36 @@ func (cs *ChainStore) reorgWorker(ctx context.Context, initialNotifees []ReorgNo apply[i], apply[opp] = apply[opp], apply[i] } - for _, hcf := range notifees { - if err := hcf(revert, apply); err != nil { + var toremove map[int]struct{} + for i, hcf := range notifees { + err := hcf(revert, apply) + + switch err { + case nil: + + case ErrNotifeeDone: + if toremove == nil { + toremove = make(map[int]struct{}) + } + toremove[i] = struct{}{} + + default: log.Error("head change func errored (BAD): ", err) } } + + if len(toremove) > 0 { + newNotifees := make([]ReorgNotifee, 0, len(notifees)-len(toremove)) + for i, hcf := range notifees { + _, remove := toremove[i] + if remove { + continue + } + newNotifees = append(newNotifees, hcf) + } + notifees = newNotifees + } + case <-ctx.Done(): return } @@ -452,9 +657,13 @@ func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet) // FlushValidationCache removes all results of block validation from the // chain metadata store. Usually the first step after a new chain import. func (cs *ChainStore) FlushValidationCache() error { + return FlushValidationCache(cs.metadataDs) +} + +func FlushValidationCache(ds datastore.Batching) error { log.Infof("clearing block validation cache...") - dsWalk, err := cs.ds.Query(query.Query{ + dsWalk, err := ds.Query(query.Query{ // Potential TODO: the validation cache is not a namespace on its own // but is rather constructed as prefixed-key `foo:bar` via .Instance(), which // in turn does not work with the filter, which can match only on `foo/bar` @@ -474,7 +683,7 @@ func (cs *ChainStore) FlushValidationCache() error { return xerrors.Errorf("failed to run key listing query: %w", err) } - batch, err := cs.ds.Batch() + batch, err := ds.Batch() if err != nil { return xerrors.Errorf("failed to open a DS batch: %w", err) } @@ -497,17 +706,84 @@ func (cs *ChainStore) FlushValidationCache() error { } // SetHead sets the chainstores current 'best' head node. -// This should only be called if something is broken and needs fixing +// This should only be called if something is broken and needs fixing. +// +// This function will bypass and remove any checkpoints. func (cs *ChainStore) SetHead(ts *types.TipSet) error { cs.heaviestLk.Lock() defer cs.heaviestLk.Unlock() + if err := cs.removeCheckpoint(); err != nil { + return err + } return cs.takeHeaviestTipSet(context.TODO(), ts) } +// RemoveCheckpoint removes the current checkpoint. +func (cs *ChainStore) RemoveCheckpoint() error { + cs.heaviestLk.Lock() + defer cs.heaviestLk.Unlock() + return cs.removeCheckpoint() +} + +func (cs *ChainStore) removeCheckpoint() error { + if err := cs.metadataDs.Delete(checkpointKey); err != nil { + return err + } + cs.checkpoint = nil + return nil +} + +// SetCheckpoint will set a checkpoint past which the chainstore will not allow forks. +// +// NOTE: Checkpoints cannot be set beyond ForkLengthThreshold epochs in the past. +func (cs *ChainStore) SetCheckpoint(ts *types.TipSet) error { + tskBytes, err := json.Marshal(ts.Key()) + if err != nil { + return err + } + + cs.heaviestLk.Lock() + defer cs.heaviestLk.Unlock() + + if ts.Height() > cs.heaviest.Height() { + return xerrors.Errorf("cannot set a checkpoint in the future") + } + + // Otherwise, this operation could get _very_ expensive. + if cs.heaviest.Height()-ts.Height() > build.ForkLengthThreshold { + return xerrors.Errorf("cannot set a checkpoint before the fork threshold") + } + + if !ts.Equals(cs.heaviest) { + anc, err := cs.IsAncestorOf(ts, cs.heaviest) + if err != nil { + return xerrors.Errorf("cannot determine whether checkpoint tipset is in main-chain: %w", err) + } + + if !anc { + return xerrors.Errorf("cannot mark tipset as checkpoint, since it isn't in the main-chain: %w", err) + } + } + err = cs.metadataDs.Put(checkpointKey, tskBytes) + if err != nil { + return err + } + + cs.checkpoint = ts + return nil +} + +func (cs *ChainStore) GetCheckpoint() *types.TipSet { + cs.heaviestLk.RLock() + chkpt := cs.checkpoint + cs.heaviestLk.RUnlock() + return chkpt +} + // Contains returns whether our BlockStore has all blocks in the supplied TipSet. func (cs *ChainStore) Contains(ts *types.TipSet) (bool, error) { for _, c := range ts.Cids() { - has, err := cs.bs.Has(c) + has, err := cs.chainBlockstore.Has(c) if err != nil { return false, err } @@ -522,12 +798,12 @@ func (cs *ChainStore) Contains(ts *types.TipSet) (bool, error) { // GetBlock fetches a BlockHeader with the supplied CID. It returns // blockstore.ErrNotFound if the block was not found in the BlockStore. func (cs *ChainStore) GetBlock(c cid.Cid) (*types.BlockHeader, error) { - sb, err := cs.bs.Get(c) - if err != nil { - return nil, err - } - - return types.DecodeBlock(sb.RawData()) + var blk *types.BlockHeader + err := cs.chainLocalBlockstore.View(c, func(b []byte) (err error) { + blk, err = types.DecodeBlock(b) + return err + }) + return blk, err } func (cs *ChainStore) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) { @@ -595,6 +871,14 @@ func (cs *ChainStore) NearestCommonAncestor(a, b *types.TipSet) (*types.TipSet, return cs.LoadTipSet(l[len(l)-1].Parents()) } +// ReorgOps takes two tipsets (which can be at different heights), and walks +// their corresponding chains backwards one step at a time until we find +// a common ancestor. It then returns the respective chain segments that fork +// from the identified ancestor, in reverse order, where the first element of +// each slice is the supplied tipset, and the last element is the common +// ancestor. +// +// If an error happens along the way, we return the error with nil slices. func (cs *ChainStore) ReorgOps(a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) { return ReorgOps(cs.LoadTipSet, a, b) } @@ -630,10 +914,11 @@ func ReorgOps(lts func(types.TipSetKey) (*types.TipSet, error), a, b *types.TipS } // GetHeaviestTipSet returns the current heaviest tipset known (i.e. our head). -func (cs *ChainStore) GetHeaviestTipSet() *types.TipSet { - cs.heaviestLk.Lock() - defer cs.heaviestLk.Unlock() - return cs.heaviest +func (cs *ChainStore) GetHeaviestTipSet() (ts *types.TipSet) { + cs.heaviestLk.RLock() + ts = cs.heaviest + cs.heaviestLk.RUnlock() + return } func (cs *ChainStore) AddToTipSetTracker(b *types.BlockHeader) error { @@ -646,12 +931,32 @@ func (cs *ChainStore) AddToTipSetTracker(b *types.BlockHeader) error { log.Debug("tried to add block to tipset tracker that was already there") return nil } + h, err := cs.GetBlock(oc) + if err == nil && h != nil { + if h.Miner == b.Miner { + log.Warnf("Have multiple blocks from miner %s at height %d in our tipset cache %s-%s", b.Miner, b.Height, b.Cid(), h.Cid()) + } + } + } + // This function is called 5 times per epoch on average + // It is also called with tipsets that are done with initial validation + // so they cannot be from the future. + // We are guaranteed not to use tipsets older than 900 epochs (fork limit) + // This means that we ideally want to keep only most recent 900 epochs in here + // Golang's map iteration starts at a random point in a map. + // With 5 tries per epoch, and 900 entries to keep, on average we will have + // ~136 garbage entires in the `cs.tipsets` map. (solve for 1-(1-x/(900+x))^5 == 0.5) + // Seems good enough to me + + for height := range cs.tipsets { + if height < b.Height-build.Finality { + delete(cs.tipsets, height) + } + break } cs.tipsets[b.Height] = append(tss, b.Cid()) - // TODO: do we want to look for slashable submissions here? might as well... - return nil } @@ -677,7 +982,7 @@ func (cs *ChainStore) PersistBlockHeaders(b ...*types.BlockHeader) error { end = len(b) } - err = multierr.Append(err, cs.bs.PutMany(sbs[start:end])) + err = multierr.Append(err, cs.chainLocalBlockstore.PutMany(sbs[start:end])) } return err @@ -701,7 +1006,7 @@ func PutMessage(bs bstore.Blockstore, m storable) (cid.Cid, error) { } func (cs *ChainStore) PutMessage(m storable) (cid.Cid, error) { - return PutMessage(cs.bs, m) + return PutMessage(cs.chainBlockstore, m) } func (cs *ChainStore) expandTipset(b *types.BlockHeader) (*types.TipSet, error) { @@ -717,7 +1022,7 @@ func (cs *ChainStore) expandTipset(b *types.BlockHeader) (*types.TipSet, error) return types.NewTipSet(all) } - inclMiners := map[address.Address]bool{b.Miner: true} + inclMiners := map[address.Address]cid.Cid{b.Miner: b.Cid()} for _, bhc := range tsets { if bhc == b.Cid() { continue @@ -728,14 +1033,14 @@ func (cs *ChainStore) expandTipset(b *types.BlockHeader) (*types.TipSet, error) return nil, xerrors.Errorf("failed to load block (%s) for tipset expansion: %w", bhc, err) } - if inclMiners[h.Miner] { - log.Warnf("Have multiple blocks from miner %s at height %d in our tipset cache", h.Miner, h.Height) + if cid, found := inclMiners[h.Miner]; found { + log.Warnf("Have multiple blocks from miner %s at height %d in our tipset cache %s-%s", h.Miner, h.Height, h.Cid(), cid) continue } if types.CidArrsEqual(h.Parents, b.Parents) { all = append(all, h) - inclMiners[h.Miner] = true + inclMiners[h.Miner] = bhc } } @@ -762,7 +1067,7 @@ func (cs *ChainStore) AddBlock(ctx context.Context, b *types.BlockHeader) error } func (cs *ChainStore) GetGenesis() (*types.BlockHeader, error) { - data, err := cs.ds.Get(dstore.NewKey("0")) + data, err := cs.metadataDs.Get(dstore.NewKey("0")) if err != nil { return nil, err } @@ -772,12 +1077,7 @@ func (cs *ChainStore) GetGenesis() (*types.BlockHeader, error) { return nil, err } - genb, err := cs.bs.Get(c) - if err != nil { - return nil, err - } - - return types.DecodeBlock(genb.RawData()) + return cs.GetBlock(c) } func (cs *ChainStore) GetCMessage(c cid.Cid) (types.ChainMsg, error) { @@ -793,29 +1093,27 @@ func (cs *ChainStore) GetCMessage(c cid.Cid) (types.ChainMsg, error) { } func (cs *ChainStore) GetMessage(c cid.Cid) (*types.Message, error) { - sb, err := cs.bs.Get(c) - if err != nil { - log.Errorf("get message get failed: %s: %s", c, err) - return nil, err - } - - return types.DecodeMessage(sb.RawData()) + var msg *types.Message + err := cs.chainLocalBlockstore.View(c, func(b []byte) (err error) { + msg, err = types.DecodeMessage(b) + return err + }) + return msg, err } func (cs *ChainStore) GetSignedMessage(c cid.Cid) (*types.SignedMessage, error) { - sb, err := cs.bs.Get(c) - if err != nil { - log.Errorf("get message get failed: %s: %s", c, err) - return nil, err - } - - return types.DecodeSignedMessage(sb.RawData()) + var msg *types.SignedMessage + err := cs.chainLocalBlockstore.View(c, func(b []byte) (err error) { + msg, err = types.DecodeSignedMessage(b) + return err + }) + return msg, err } func (cs *ChainStore) readAMTCids(root cid.Cid) ([]cid.Cid, error) { ctx := context.TODO() // block headers use adt0, for now. - a, err := blockadt.AsArray(cs.Store(ctx), root) + a, err := blockadt.AsArray(cs.ActorStore(ctx), root) if err != nil { return nil, xerrors.Errorf("amt load: %w", err) } @@ -849,17 +1147,33 @@ type BlockMessages struct { func (cs *ChainStore) BlockMsgsForTipset(ts *types.TipSet) ([]BlockMessages, error) { applied := make(map[address.Address]uint64) + cst := cbor.NewCborStore(cs.stateBlockstore) + st, err := state.LoadStateTree(cst, ts.Blocks()[0].ParentStateRoot) + if err != nil { + return nil, xerrors.Errorf("failed to load state tree") + } + selectMsg := func(m *types.Message) (bool, error) { + var sender address.Address + if ts.Height() >= build.UpgradeHyperdriveHeight { + sender, err = st.LookupID(m.From) + if err != nil { + return false, err + } + } else { + sender = m.From + } + // The first match for a sender is guaranteed to have correct nonce -- the block isn't valid otherwise - if _, ok := applied[m.From]; !ok { - applied[m.From] = m.Nonce + if _, ok := applied[sender]; !ok { + applied[sender] = m.Nonce } - if applied[m.From] != m.Nonce { + if applied[sender] != m.Nonce { return false, nil } - applied[m.From]++ + applied[sender]++ return true, nil } @@ -939,7 +1253,7 @@ func (cs *ChainStore) ReadMsgMetaCids(mmc cid.Cid) ([]cid.Cid, []cid.Cid, error) return mmcids.bls, mmcids.secpk, nil } - cst := cbor.NewCborStore(cs.bs) + cst := cbor.NewCborStore(cs.chainLocalBlockstore) var msgmeta types.MsgMeta if err := cst.Get(context.TODO(), mmc, &msgmeta); err != nil { return nil, nil, xerrors.Errorf("failed to load msgmeta (%s): %w", mmc, err) @@ -963,6 +1277,9 @@ func (cs *ChainStore) ReadMsgMetaCids(mmc cid.Cid) ([]cid.Cid, []cid.Cid, error) return blscids, secpkcids, nil } +// GetPath returns the sequence of atomic head change operations that +// need to be applied in order to switch the head of the chain from the `from` +// tipset to the `to` tipset. func (cs *ChainStore) GetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*api.HeadChange, error) { fts, err := cs.LoadTipSet(from) if err != nil { @@ -1009,7 +1326,7 @@ func (cs *ChainStore) MessagesForBlock(b *types.BlockHeader) ([]*types.Message, func (cs *ChainStore) GetParentReceipt(b *types.BlockHeader, i int) (*types.MessageReceipt, error) { ctx := context.TODO() // block headers use adt0, for now. - a, err := blockadt.AsArray(cs.Store(ctx), b.ParentMessageReceipts) + a, err := blockadt.AsArray(cs.ActorStore(ctx), b.ParentMessageReceipts) if err != nil { return nil, xerrors.Errorf("amt load: %w", err) } @@ -1052,16 +1369,26 @@ func (cs *ChainStore) LoadSignedMessagesFromCids(cids []cid.Cid) ([]*types.Signe return msgs, nil } -func (cs *ChainStore) Blockstore() bstore.Blockstore { - return cs.bs +// ChainBlockstore returns the chain blockstore. Currently the chain and state +// // stores are both backed by the same physical store, albeit with different +// // caching policies, but in the future they will segregate. +func (cs *ChainStore) ChainBlockstore() bstore.Blockstore { + return cs.chainBlockstore +} + +// StateBlockstore returns the state blockstore. Currently the chain and state +// stores are both backed by the same physical store, albeit with different +// caching policies, but in the future they will segregate. +func (cs *ChainStore) StateBlockstore() bstore.Blockstore { + return cs.stateBlockstore } func ActorStore(ctx context.Context, bs bstore.Blockstore) adt.Store { return adt.WrapStore(ctx, cbor.NewCborStore(bs)) } -func (cs *ChainStore) Store(ctx context.Context) adt.Store { - return ActorStore(ctx, cs.bs) +func (cs *ChainStore) ActorStore(ctx context.Context) adt.Store { + return ActorStore(ctx, cs.stateBlockstore) } func (cs *ChainStore) VMSys() vm.SyscallBuilder { @@ -1111,7 +1438,15 @@ func DrawRandomness(rbase []byte, pers crypto.DomainSeparationTag, round abi.Cha return h.Sum(nil), nil } -func (cs *ChainStore) GetBeaconRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (cs *ChainStore) GetBeaconRandomnessLookingBack(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return cs.GetBeaconRandomness(ctx, blks, pers, round, entropy, true) +} + +func (cs *ChainStore) GetBeaconRandomnessLookingForward(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return cs.GetBeaconRandomness(ctx, blks, pers, round, entropy, false) +} + +func (cs *ChainStore) GetBeaconRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) { _, span := trace.StartSpan(ctx, "store.GetBeaconRandomness") defer span.End() span.AddAttributes(trace.Int64Attribute("round", int64(round))) @@ -1130,7 +1465,7 @@ func (cs *ChainStore) GetBeaconRandomness(ctx context.Context, blks []cid.Cid, p searchHeight = 0 } - randTs, err := cs.GetTipsetByHeight(ctx, searchHeight, ts, true) + randTs, err := cs.GetTipsetByHeight(ctx, searchHeight, ts, lookback) if err != nil { return nil, err } @@ -1145,7 +1480,15 @@ func (cs *ChainStore) GetBeaconRandomness(ctx context.Context, blks []cid.Cid, p return DrawRandomness(be.Data, pers, round, entropy) } -func (cs *ChainStore) GetChainRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (cs *ChainStore) GetChainRandomnessLookingBack(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return cs.GetChainRandomness(ctx, blks, pers, round, entropy, true) +} + +func (cs *ChainStore) GetChainRandomnessLookingForward(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return cs.GetChainRandomness(ctx, blks, pers, round, entropy, false) +} + +func (cs *ChainStore) GetChainRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) { _, span := trace.StartSpan(ctx, "store.GetChainRandomness") defer span.End() span.AddAttributes(trace.Int64Attribute("round", int64(round))) @@ -1164,7 +1507,7 @@ func (cs *ChainStore) GetChainRandomness(ctx context.Context, blks []cid.Cid, pe searchHeight = 0 } - randTs, err := cs.GetTipsetByHeight(ctx, searchHeight, ts, true) + randTs, err := cs.GetTipsetByHeight(ctx, searchHeight, ts, lookback) if err != nil { return nil, err } @@ -1259,8 +1602,9 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRo return xerrors.Errorf("failed to write car header: %s", err) } - return cs.WalkSnapshot(ctx, ts, inclRecentRoots, skipOldMsgs, func(c cid.Cid) error { - blk, err := cs.bs.Get(c) + unionBs := bstore.Union(cs.stateBlockstore, cs.chainBlockstore) + return cs.WalkSnapshot(ctx, ts, inclRecentRoots, skipOldMsgs, true, func(c cid.Cid) error { + blk, err := unionBs.Get(c) if err != nil { return xerrors.Errorf("writing object to car, bs.Get: %w", err) } @@ -1273,7 +1617,7 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRo }) } -func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRecentRoots abi.ChainEpoch, skipOldMsgs bool, cb func(cid.Cid) error) error { +func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRecentRoots abi.ChainEpoch, skipOldMsgs, skipMsgReceipts bool, cb func(cid.Cid) error) error { if ts == nil { ts = cs.GetHeaviestTipSet() } @@ -1293,7 +1637,7 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe return err } - data, err := cs.bs.Get(blk) + data, err := cs.chainBlockstore.Get(blk) if err != nil { return xerrors.Errorf("getting block: %w", err) } @@ -1313,7 +1657,7 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe var cids []cid.Cid if !skipOldMsgs || b.Height > ts.Height()-inclRecentRoots { if walked.Visit(b.Messages) { - mcids, err := recurseLinks(cs.bs, walked, b.Messages, []cid.Cid{b.Messages}) + mcids, err := recurseLinks(cs.chainBlockstore, walked, b.Messages, []cid.Cid{b.Messages}) if err != nil { return xerrors.Errorf("recursing messages failed: %w", err) } @@ -1334,13 +1678,17 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe if b.Height == 0 || b.Height > ts.Height()-inclRecentRoots { if walked.Visit(b.ParentStateRoot) { - cids, err := recurseLinks(cs.bs, walked, b.ParentStateRoot, []cid.Cid{b.ParentStateRoot}) + cids, err := recurseLinks(cs.stateBlockstore, walked, b.ParentStateRoot, []cid.Cid{b.ParentStateRoot}) if err != nil { return xerrors.Errorf("recursing genesis state failed: %w", err) } out = append(out, cids...) } + + if !skipMsgReceipts && walked.Visit(b.ParentMessageReceipts) { + out = append(out, b.ParentMessageReceipts) + } } for _, c := range out { @@ -1376,7 +1724,12 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe } func (cs *ChainStore) Import(r io.Reader) (*types.TipSet, error) { - header, err := car.LoadCar(cs.Blockstore(), r) + // TODO: writing only to the state blockstore is incorrect. + // At this time, both the state and chain blockstores are backed by the + // universal store. When we physically segregate the stores, we will need + // to route state objects to the state blockstore, and chain objects to + // the chain blockstore. + header, err := car.LoadCar(cs.StateBlockstore(), r) if err != nil { return nil, xerrors.Errorf("loadcar failed: %w", err) } @@ -1429,12 +1782,20 @@ func NewChainRand(cs *ChainStore, blks []cid.Cid) vm.Rand { } } -func (cr *chainRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return cr.cs.GetChainRandomness(ctx, cr.blks, pers, round, entropy) +func (cr *chainRand) GetChainRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return cr.cs.GetChainRandomnessLookingBack(ctx, cr.blks, pers, round, entropy) +} + +func (cr *chainRand) GetChainRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return cr.cs.GetChainRandomnessLookingForward(ctx, cr.blks, pers, round, entropy) +} + +func (cr *chainRand) GetBeaconRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return cr.cs.GetBeaconRandomnessLookingBack(ctx, cr.blks, pers, round, entropy) } -func (cr *chainRand) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return cr.cs.GetBeaconRandomness(ctx, cr.blks, pers, round, entropy) +func (cr *chainRand) GetBeaconRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return cr.cs.GetBeaconRandomnessLookingForward(ctx, cr.blks, pers, round, entropy) } func (cs *ChainStore) GetTipSetFromKey(tsk types.TipSetKey) (*types.TipSet, error) { diff --git a/chain/store/store_test.go b/chain/store/store_test.go index 16052710446..62a0430e301 100644 --- a/chain/store/store_test.go +++ b/chain/store/store_test.go @@ -3,6 +3,7 @@ package store_test import ( "bytes" "context" + "io" "testing" datastore "github.com/ipfs/go-datastore" @@ -10,12 +11,12 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/gen" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/node/repo" ) @@ -51,24 +52,31 @@ func BenchmarkGetRandomness(b *testing.B) { b.Fatal(err) } - bds, err := lr.Datastore("/chain") + bs, err := lr.Blockstore(context.TODO(), repo.UniversalBlockstore) if err != nil { b.Fatal(err) } - mds, err := lr.Datastore("/metadata") + defer func() { + if c, ok := bs.(io.Closer); ok { + if err := c.Close(); err != nil { + b.Logf("WARN: failed to close blockstore: %s", err) + } + } + }() + + mds, err := lr.Datastore(context.Background(), "/metadata") if err != nil { b.Fatal(err) } - bs := blockstore.NewBlockstore(bds) - - cs := store.NewChainStore(bs, mds, nil, nil) + cs := store.NewChainStore(bs, bs, mds, nil, nil) + defer cs.Close() //nolint:errcheck b.ResetTimer() for i := 0; i < b.N; i++ { - _, err := cs.GetChainRandomness(context.TODO(), last.Cids(), crypto.DomainSeparationTag_SealRandomness, 500, nil) + _, err := cs.GetChainRandomnessLookingBack(context.TODO(), last.Cids(), crypto.DomainSeparationTag_SealRandomness, 500, nil) if err != nil { b.Fatal(err) } @@ -96,8 +104,9 @@ func TestChainExportImport(t *testing.T) { t.Fatal(err) } - nbs := blockstore.NewTemporary() - cs := store.NewChainStore(nbs, datastore.NewMapDatastore(), nil, nil) + nbs := blockstore.NewMemory() + cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), nil, nil) + defer cs.Close() //nolint:errcheck root, err := cs.Import(buf) if err != nil { @@ -130,8 +139,10 @@ func TestChainExportImportFull(t *testing.T) { t.Fatal(err) } - nbs := blockstore.NewTemporary() - cs := store.NewChainStore(nbs, datastore.NewMapDatastore(), nil, nil) + nbs := blockstore.NewMemory() + cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), nil, nil) + defer cs.Close() //nolint:errcheck + root, err := cs.Import(buf) if err != nil { t.Fatal(err) diff --git a/chain/store/weight.go b/chain/store/weight.go index 9100df31547..42546d5e3d9 100644 --- a/chain/store/weight.go +++ b/chain/store/weight.go @@ -28,7 +28,7 @@ func (cs *ChainStore) Weight(ctx context.Context, ts *types.TipSet) (types.BigIn tpow := big2.Zero() { - cst := cbor.NewCborStore(cs.Blockstore()) + cst := cbor.NewCborStore(cs.StateBlockstore()) state, err := state.LoadStateTree(cst, ts.ParentState()) if err != nil { return types.NewInt(0), xerrors.Errorf("load state tree: %w", err) @@ -39,7 +39,7 @@ func (cs *ChainStore) Weight(ctx context.Context, ts *types.TipSet) (types.BigIn return types.NewInt(0), xerrors.Errorf("get power actor: %w", err) } - powState, err := power.Load(cs.Store(ctx), act) + powState, err := power.Load(cs.ActorStore(ctx), act) if err != nil { return types.NewInt(0), xerrors.Errorf("failed to load power actor state: %w", err) } diff --git a/chain/sub/incoming.go b/chain/sub/incoming.go index 1701866eb6f..115c3326193 100644 --- a/chain/sub/incoming.go +++ b/chain/sub/incoming.go @@ -6,9 +6,18 @@ import ( "fmt" "time" - "golang.org/x/xerrors" - address "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain" + "github.com/filecoin-project/lotus/chain/messagepool" + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/lib/sigs" + "github.com/filecoin-project/lotus/metrics" + "github.com/filecoin-project/lotus/node/impl/client" + blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" lru "github.com/hashicorp/golang-lru" blocks "github.com/ipfs/go-block-format" bserv "github.com/ipfs/go-blockservice" @@ -21,19 +30,7 @@ import ( cbg "github.com/whyrusleeping/cbor-gen" "go.opencensus.io/stats" "go.opencensus.io/tag" - - blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" - - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain" - "github.com/filecoin-project/lotus/chain/messagepool" - "github.com/filecoin-project/lotus/chain/stmgr" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/blockstore" - "github.com/filecoin-project/lotus/lib/sigs" - "github.com/filecoin-project/lotus/metrics" - "github.com/filecoin-project/lotus/node/impl/client" + "golang.org/x/xerrors" ) var log = logging.Logger("sub") @@ -84,20 +81,27 @@ func HandleIncomingBlocks(ctx context.Context, bsub *pubsub.Subscription, s *cha log.Debug("about to fetch messages for block from pubsub") bmsgs, err := FetchMessagesByCids(ctx, ses, blk.BlsMessages) if err != nil { - log.Errorf("failed to fetch all bls messages for block received over pubusb: %s; source: %s", err, src) + log.Errorf("failed to fetch all bls messages for block received over pubsub: %s; source: %s", err, src) return } smsgs, err := FetchSignedMessagesByCids(ctx, ses, blk.SecpkMessages) if err != nil { - log.Errorf("failed to fetch all secpk messages for block received over pubusb: %s; source: %s", err, src) + log.Errorf("failed to fetch all secpk messages for block received over pubsub: %s; source: %s", err, src) return } took := build.Clock.Since(start) - log.Infow("new block over pubsub", "cid", blk.Header.Cid(), "source", msg.GetFrom(), "msgfetch", took) + log.Debugw("new block over pubsub", "cid", blk.Header.Cid(), "source", msg.GetFrom(), "msgfetch", took) + if took > 3*time.Second { + log.Warnw("Slow msg fetch", "cid", blk.Header.Cid(), "source", msg.GetFrom(), "msgfetch", took) + } if delay := build.Clock.Now().Unix() - int64(blk.Header.Timestamp); delay > 5 { - log.Warnf("Received block with large delay %d from miner %s", delay, blk.Header.Miner) + _ = stats.RecordWithTags(ctx, + []tag.Mutator{tag.Insert(metrics.MinerID, blk.Header.Miner.String())}, + metrics.BlockDelay.M(delay), + ) + log.Warnw("received block with large delay from miner", "block", blk.Cid(), "delay", delay, "miner", blk.Header.Miner) } if s.InformNewBlock(msg.ReceivedFrom, &types.FullBlock{ @@ -337,11 +341,16 @@ func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub func (bv *BlockValidator) validateLocalBlock(ctx context.Context, msg *pubsub.Message) pubsub.ValidationResult { stats.Record(ctx, metrics.BlockPublished.M(1)) + if size := msg.Size(); size > 1<<20-1<<15 { + log.Errorf("ignoring oversize block (%dB)", size) + recordFailure(ctx, metrics.BlockValidationFailure, "oversize_block") + return pubsub.ValidationIgnore + } + blk, what, err := bv.decodeAndCheckBlock(msg) if err != nil { log.Errorf("got invalid local block: %s", err) - ctx, _ = tag.New(ctx, tag.Insert(metrics.FailureType, what)) - stats.Record(ctx, metrics.BlockValidationFailure.M(1)) + recordFailure(ctx, metrics.BlockValidationFailure, what) return pubsub.ValidationIgnore } @@ -383,7 +392,7 @@ func (bv *BlockValidator) isChainNearSynced() bool { func (bv *BlockValidator) validateMsgMeta(ctx context.Context, msg *types.BlockMsg) error { // TODO there has to be a simpler way to do this without the blockstore dance // block headers use adt0 - store := blockadt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewTemporary())) + store := blockadt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewMemory())) bmArr := blockadt.MakeEmptyArray(store) smArr := blockadt.MakeEmptyArray(store) @@ -498,6 +507,12 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs return mv.validateLocalMessage(ctx, msg) } + start := time.Now() + defer func() { + ms := time.Now().Sub(start).Microseconds() + stats.Record(ctx, metrics.MessageValidationDuration.M(float64(ms)/1000)) + }() + stats.Record(ctx, metrics.MessageReceived.M(1)) m, err := types.DecodeSignedMessage(msg.Message.GetData()) if err != nil { @@ -507,7 +522,7 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs return pubsub.ValidationReject } - if err := mv.mpool.Add(m); err != nil { + if err := mv.mpool.Add(ctx, m); err != nil { log.Debugf("failed to add message from network to message pool (From: %s, To: %s, Nonce: %d, Value: %s): %s", m.Message.From, m.Message.To, m.Message.Nonce, types.FIL(m.Message.Value), err) ctx, _ = tag.New( ctx, @@ -529,6 +544,12 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs return pubsub.ValidationReject } } + + ctx, _ = tag.New( + ctx, + tag.Upsert(metrics.MsgValid, "true"), + ) + stats.Record(ctx, metrics.MessageValidationSuccess.M(1)) return pubsub.ValidationAccept } @@ -538,6 +559,13 @@ func (mv *MessageValidator) validateLocalMessage(ctx context.Context, msg *pubsu ctx, tag.Upsert(metrics.Local, "true"), ) + + start := time.Now() + defer func() { + ms := time.Now().Sub(start).Microseconds() + stats.Record(ctx, metrics.MessageValidationDuration.M(float64(ms)/1000)) + }() + // do some lightweight validation stats.Record(ctx, metrics.MessagePublished.M(1)) @@ -548,7 +576,7 @@ func (mv *MessageValidator) validateLocalMessage(ctx context.Context, msg *pubsu return pubsub.ValidationIgnore } - if m.Size() > 32*1024 { + if m.Size() > messagepool.MaxMessageSize { log.Warnf("local message is too large! (%dB)", m.Size()) recordFailure(ctx, metrics.MessageValidationFailure, "oversize") return pubsub.ValidationIgnore @@ -572,6 +600,11 @@ func (mv *MessageValidator) validateLocalMessage(ctx context.Context, msg *pubsu return pubsub.ValidationIgnore } + ctx, _ = tag.New( + ctx, + tag.Upsert(metrics.MsgValid, "true"), + ) + stats.Record(ctx, metrics.MessageValidationSuccess.M(1)) return pubsub.ValidationAccept } diff --git a/chain/sync.go b/chain/sync.go index 1410dd2a707..167856927f3 100644 --- a/chain/sync.go +++ b/chain/sync.go @@ -32,8 +32,10 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" - blst "github.com/supranational/blst/bindings/go" + + ffi "github.com/filecoin-project/filecoin-ffi" // named msgarray here to make it clear that these are the types used by // messages, regardless of specs-actors version. @@ -42,6 +44,7 @@ import ( proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" "github.com/filecoin-project/lotus/api" + bstore "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/builtin/power" "github.com/filecoin-project/lotus/chain/beacon" @@ -52,9 +55,7 @@ import ( "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" - bstore "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/lib/sigs" - "github.com/filecoin-project/lotus/lib/sigs/bls" "github.com/filecoin-project/lotus/metrics" ) @@ -130,10 +131,6 @@ type Syncer struct { tickerCtxCancel context.CancelFunc - checkptLk sync.Mutex - - checkpt types.TipSetKey - ds dtypes.MetadataDS } @@ -151,14 +148,8 @@ func NewSyncer(ds dtypes.MetadataDS, sm *stmgr.StateManager, exchange exchange.C return nil, err } - cp, err := loadCheckpoint(ds) - if err != nil { - return nil, xerrors.Errorf("error loading mpool config: %w", err) - } - s := &Syncer{ ds: ds, - checkpt: cp, beacon: beacon, bad: NewBadBlockCache(), Genesis: gent, @@ -249,18 +240,6 @@ func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool { syncer.incoming.Pub(fts.TipSet().Blocks(), LocalIncoming) - if from == syncer.self { - // TODO: this is kindof a hack... - log.Debug("got block from ourselves") - - if err := syncer.Sync(ctx, fts.TipSet()); err != nil { - log.Errorf("failed to sync our own block %s: %+v", fts.TipSet().Cids(), err) - return false - } - - return true - } - // TODO: IMPORTANT(GARBAGE) this needs to be put in the 'temporary' side of // the blockstore if err := syncer.store.PersistBlockHeaders(fts.TipSet().Blocks()...); err != nil { @@ -278,7 +257,7 @@ func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool { for _, blk := range fts.TipSet().Blocks() { miners = append(miners, blk.Miner.String()) } - log.Infow("incoming tipset does not appear to be better than our best chain, ignoring for now", "miners", miners, "bestPweight", bestPweight, "bestTS", hts.Cids(), "incomingWeight", targetWeight, "incomingTS", fts.TipSet().Cids()) + log.Debugw("incoming tipset does not appear to be better than our best chain, ignoring for now", "miners", miners, "bestPweight", bestPweight, "bestTS", hts.Cids(), "incomingWeight", targetWeight, "incomingTS", fts.TipSet().Cids()) return false } @@ -332,7 +311,7 @@ func (syncer *Syncer) ValidateMsgMeta(fblk *types.FullBlock) error { // We use a temporary bstore here to avoid writing intermediate pieces // into the blockstore. - blockstore := bstore.NewTemporary() + blockstore := bstore.NewMemory() cst := cbor.NewCborStore(blockstore) var bcids, scids []cid.Cid @@ -365,7 +344,7 @@ func (syncer *Syncer) ValidateMsgMeta(fblk *types.FullBlock) error { } // Finally, flush. - return vm.Copy(context.TODO(), blockstore, syncer.store.Blockstore(), smroot) + return vm.Copy(context.TODO(), blockstore, syncer.store.ChainBlockstore(), smroot) } func (syncer *Syncer) LocalPeer() peer.ID { @@ -563,15 +542,16 @@ func (syncer *Syncer) Sync(ctx context.Context, maybeHead *types.TipSet) error { ) } - if syncer.store.GetHeaviestTipSet().ParentWeight().GreaterThan(maybeHead.ParentWeight()) { + hts := syncer.store.GetHeaviestTipSet() + + if hts.ParentWeight().GreaterThan(maybeHead.ParentWeight()) { return nil } - - if syncer.Genesis.Equals(maybeHead) || syncer.store.GetHeaviestTipSet().Equals(maybeHead) { + if syncer.Genesis.Equals(maybeHead) || hts.Equals(maybeHead) { return nil } - if err := syncer.collectChain(ctx, maybeHead); err != nil { + if err := syncer.collectChain(ctx, maybeHead, hts, false); err != nil { span.AddAttributes(trace.StringAttribute("col_error", err.Error())) span.SetStatus(trace.Status{ Code: 13, @@ -650,7 +630,7 @@ func (syncer *Syncer) minerIsValid(ctx context.Context, maddr address.Address, b return xerrors.Errorf("failed to load power actor: %w", err) } - powState, err := power.Load(syncer.store.Store(ctx), act) + powState, err := power.Load(syncer.store.ActorStore(ctx), act) if err != nil { return xerrors.Errorf("failed to load power actor state: %w", err) } @@ -686,6 +666,10 @@ func blockSanityChecks(h *types.BlockHeader) error { return xerrors.Errorf("block had nil bls aggregate signature") } + if h.Miner.Protocol() != address.ID { + return xerrors.Errorf("block had non-ID miner address") + } + return nil } @@ -730,6 +714,8 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, use return xerrors.Errorf("load parent tipset failed (%s): %w", h.Parents, err) } + winPoStNv := syncer.sm.GetNtwkVersion(ctx, baseTs.Height()) + lbts, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, syncer.sm, baseTs, h.Height) if err != nil { return xerrors.Errorf("failed to get lookback tipset for block: %w", err) @@ -755,6 +741,10 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, use } msgsCheck := async.Err(func() error { + if b.Cid() == build.WhitelistedBlock { + return nil + } + if err := syncer.checkBlockMessages(ctx, b, baseTs); err != nil { return xerrors.Errorf("block had invalid messages: %w", err) } @@ -923,7 +913,7 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, use }) wproofCheck := async.Err(func() error { - if err := syncer.VerifyWinningPoStProof(ctx, h, *prevBeacon, lbst, waddr); err != nil { + if err := syncer.VerifyWinningPoStProof(ctx, winPoStNv, h, *prevBeacon, lbst, waddr); err != nil { return xerrors.Errorf("invalid election post: %w", err) } return nil @@ -975,7 +965,7 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, use return nil } -func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, h *types.BlockHeader, prevBeacon types.BeaconEntry, lbst cid.Cid, waddr address.Address) error { +func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, nv network.Version, h *types.BlockHeader, prevBeacon types.BeaconEntry, lbst cid.Cid, waddr address.Address) error { if build.InsecurePoStValidation { if len(h.WinPoStProof) == 0 { return xerrors.Errorf("[INSECURE-POST-VALIDATION] No winning post proof given") @@ -1007,7 +997,7 @@ func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, h *types.Block return xerrors.Errorf("failed to get ID from miner address %s: %w", h.Miner, err) } - sectors, err := stmgr.GetSectorsForWinningPoSt(ctx, syncer.verifier, syncer.sm, lbst, h.Miner, rand) + sectors, err := stmgr.GetSectorsForWinningPoSt(ctx, nv, syncer.verifier, syncer.sm, lbst, h.Miner, rand) if err != nil { return xerrors.Errorf("getting winning post sector set: %w", err) } @@ -1059,7 +1049,7 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock return err } - st, err := state.LoadStateTree(syncer.store.Store(ctx), stateroot) + st, err := state.LoadStateTree(syncer.store.ActorStore(ctx), stateroot) if err != nil { return xerrors.Errorf("failed to load base state tree: %w", err) } @@ -1071,7 +1061,7 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock // Phase 1: syntactic validation, as defined in the spec minGas := pl.OnChainMessage(msg.ChainLength()) - if err := m.ValidForBlockInclusion(minGas.Total()); err != nil { + if err := m.ValidForBlockInclusion(minGas.Total(), syncer.sm.GetNtwkVersion(ctx, b.Header.Height)); err != nil { return err } @@ -1084,9 +1074,19 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock // Phase 2: (Partial) semantic validation: // the sender exists and is an account actor, and the nonces make sense - if _, ok := nonces[m.From]; !ok { + var sender address.Address + if syncer.sm.GetNtwkVersion(ctx, b.Header.Height) >= network.Version13 { + sender, err = st.LookupID(m.From) + if err != nil { + return err + } + } else { + sender = m.From + } + + if _, ok := nonces[sender]; !ok { // `GetActor` does not validate that this is an account actor. - act, err := st.GetActor(m.From) + act, err := st.GetActor(sender) if err != nil { return xerrors.Errorf("failed to get actor: %w", err) } @@ -1094,19 +1094,19 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock if !builtin.IsAccountActor(act.Code) { return xerrors.New("Sender must be an account actor") } - nonces[m.From] = act.Nonce + nonces[sender] = act.Nonce } - if nonces[m.From] != m.Nonce { - return xerrors.Errorf("wrong nonce (exp: %d, got: %d)", nonces[m.From], m.Nonce) + if nonces[sender] != m.Nonce { + return xerrors.Errorf("wrong nonce (exp: %d, got: %d)", nonces[sender], m.Nonce) } - nonces[m.From]++ + nonces[sender]++ return nil } // Validate message arrays in a temporary blockstore. - tmpbs := bstore.NewTemporary() + tmpbs := bstore.NewMemory() tmpstore := blockadt.WrapStore(ctx, cbor.NewCborStore(tmpbs)) bmArr := blockadt.MakeEmptyArray(tmpstore) @@ -1176,7 +1176,7 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock } // Finally, flush. - return vm.Copy(ctx, tmpbs, syncer.store.Blockstore(), mrcid) + return vm.Copy(ctx, tmpbs, syncer.store.ChainBlockstore(), mrcid) } func (syncer *Syncer) verifyBlsAggregate(ctx context.Context, sig *crypto.Signature, msgs []cid.Cid, pubks [][]byte) error { @@ -1186,17 +1186,21 @@ func (syncer *Syncer) verifyBlsAggregate(ctx context.Context, sig *crypto.Signat trace.Int64Attribute("msgCount", int64(len(msgs))), ) - msgsS := make([]blst.Message, len(msgs)) + msgsS := make([]ffi.Message, len(msgs)) + pubksS := make([]ffi.PublicKey, len(msgs)) for i := 0; i < len(msgs); i++ { msgsS[i] = msgs[i].Bytes() + copy(pubksS[i][:], pubks[i][:ffi.PublicKeyBytes]) } + sigS := new(ffi.Signature) + copy(sigS[:], sig.Data[:ffi.SignatureBytes]) + if len(msgs) == 0 { return nil } - valid := new(bls.Signature).AggregateVerifyCompressed(sig.Data, pubks, - msgsS, []byte(bls.DST)) + valid := ffi.HashVerify(sigS, msgsS, pubksS) if !valid { return xerrors.New("bls aggregate signature failed to verify") } @@ -1243,7 +1247,7 @@ func extractSyncState(ctx context.Context) *SyncerState { // // All throughout the process, we keep checking if the received blocks are in // the deny list, and short-circuit the process if so. -func (syncer *Syncer) collectHeaders(ctx context.Context, incoming *types.TipSet, known *types.TipSet) ([]*types.TipSet, error) { +func (syncer *Syncer) collectHeaders(ctx context.Context, incoming *types.TipSet, known *types.TipSet, ignoreCheckpoint bool) ([]*types.TipSet, error) { ctx, span := trace.StartSpan(ctx, "collectHeaders") defer span.End() ss := extractSyncState(ctx) @@ -1327,7 +1331,7 @@ loop: continue } if !xerrors.Is(err, bstore.ErrNotFound) { - log.Warn("loading local tipset: %s", err) + log.Warnf("loading local tipset: %s", err) } // NB: GetBlocks validates that the blocks are in-fact the ones we @@ -1412,7 +1416,7 @@ loop: // We have now ascertained that this is *not* a 'fast forward' log.Warnf("(fork detected) synced header chain (%s - %d) does not link to our best block (%s - %d)", incoming.Cids(), incoming.Height(), known.Cids(), known.Height()) - fork, err := syncer.syncFork(ctx, base, known) + fork, err := syncer.syncFork(ctx, base, known, ignoreCheckpoint) if err != nil { if xerrors.Is(err, ErrForkTooLong) || xerrors.Is(err, ErrForkCheckpoint) { // TODO: we're marking this block bad in the same way that we mark invalid blocks bad. Maybe distinguish? @@ -1438,14 +1442,17 @@ var ErrForkCheckpoint = fmt.Errorf("fork would require us to diverge from checkp // If the fork is too long (build.ForkLengthThreshold), or would cause us to diverge from the checkpoint (ErrForkCheckpoint), // we add the entire subchain to the denylist. Else, we find the common ancestor, and add the missing chain // fragment until the fork point to the returned []TipSet. -func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, known *types.TipSet) ([]*types.TipSet, error) { +func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, known *types.TipSet, ignoreCheckpoint bool) ([]*types.TipSet, error) { - chkpt := syncer.GetCheckpoint() - if known.Key() == chkpt { - return nil, ErrForkCheckpoint + var chkpt *types.TipSet + if !ignoreCheckpoint { + chkpt = syncer.store.GetCheckpoint() + if known.Equals(chkpt) { + return nil, ErrForkCheckpoint + } } - // TODO: Does this mean we always ask for ForkLengthThreshold blocks from the network, even if we just need, like, 2? + // TODO: Does this mean we always ask for ForkLengthThreshold blocks from the network, even if we just need, like, 2? Yes. // Would it not be better to ask in smaller chunks, given that an ~ForkLengthThreshold is very rare? tips, err := syncer.Exchange.GetBlocks(ctx, incoming.Parents(), int(build.ForkLengthThreshold)) if err != nil { @@ -1456,6 +1463,10 @@ func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, know if err != nil { return nil, xerrors.Errorf("failed to load next local tipset: %w", err) } + // Track the fork length on our side of the synced chain to enforce + // `ForkLengthThreshold`. Initialized to 1 because we already walked back + // one tipset from `known` (our synced head). + forkLengthInHead := 1 for cur := 0; cur < len(tips); { if nts.Height() == 0 { @@ -1472,8 +1483,15 @@ func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, know if nts.Height() < tips[cur].Height() { cur++ } else { + // Walk back one block in our synced chain to try to meet the fork's + // height. + forkLengthInHead++ + if forkLengthInHead > int(build.ForkLengthThreshold) { + return nil, ErrForkTooLong + } + // We will be forking away from nts, check that it isn't checkpointed - if nts.Key() == chkpt { + if nts.Equals(chkpt) { return nil, ErrForkCheckpoint } @@ -1542,7 +1560,7 @@ func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipS for bsi := 0; bsi < len(bstout); bsi++ { // temp storage so we don't persist data we dont want to - bs := bstore.NewTemporary() + bs := bstore.NewMemory() blks := cbor.NewCborStore(bs) this := headers[i-bsi] @@ -1563,7 +1581,7 @@ func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipS return err } - if err := copyBlockstore(ctx, bs, syncer.store.Blockstore()); err != nil { + if err := copyBlockstore(ctx, bs, syncer.store.ChainBlockstore()); err != nil { return xerrors.Errorf("message processing failed: %w", err) } } @@ -1684,14 +1702,14 @@ func persistMessages(ctx context.Context, bs bstore.Blockstore, bst *exchange.Co // // 3. StageMessages: having acquired the headers and found a common tipset, // we then move forward, requesting the full blocks, including the messages. -func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet) error { +func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet, hts *types.TipSet, ignoreCheckpoint bool) error { ctx, span := trace.StartSpan(ctx, "collectChain") defer span.End() ss := extractSyncState(ctx) - ss.Init(syncer.store.GetHeaviestTipSet(), ts) + ss.Init(hts, ts) - headers, err := syncer.collectHeaders(ctx, ts, syncer.store.GetHeaviestTipSet()) + headers, err := syncer.collectHeaders(ctx, ts, hts, ignoreCheckpoint) if err != nil { ss.Error(err) return err @@ -1780,11 +1798,10 @@ func (syncer *Syncer) getLatestBeaconEntry(_ context.Context, ts *types.TipSet) } func (syncer *Syncer) IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool { - g, err := syncer.store.GetGenesis() - if err != nil { + if syncer.Genesis == nil { return false } now := uint64(build.Clock.Now().Unix()) - return epoch > (abi.ChainEpoch((now-g.Timestamp)/build.BlockDelaySecs) + MaxHeightDrift) + return epoch > (abi.ChainEpoch((now-syncer.Genesis.MinTimestamp())/build.BlockDelaySecs) + MaxHeightDrift) } diff --git a/chain/sync_manager.go b/chain/sync_manager.go index c25068f60c2..685e05df6ca 100644 --- a/chain/sync_manager.go +++ b/chain/sync_manager.go @@ -4,30 +4,43 @@ import ( "context" "os" "sort" + "strconv" "strings" "sync" + "time" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" + peer "github.com/libp2p/go-libp2p-core/peer" ) -const BootstrapPeerThreshold = 2 +var ( + BootstrapPeerThreshold = build.BootstrapPeerThreshold + + RecentSyncBufferSize = 10 + MaxSyncWorkers = 5 + SyncWorkerHistory = 3 -var coalesceForksParents = false + InitialSyncTimeThreshold = 15 * time.Minute + + coalesceTipsets = false +) func init() { - if os.Getenv("LOTUS_SYNC_REL_PARENT") == "yes" { - coalesceForksParents = true + coalesceTipsets = os.Getenv("LOTUS_SYNC_FORMTS_PEND") == "yes" + + if bootstrapPeerThreshold := os.Getenv("LOTUS_SYNC_BOOTSTRAP_PEERS"); bootstrapPeerThreshold != "" { + threshold, err := strconv.Atoi(bootstrapPeerThreshold) + if err != nil { + log.Errorf("failed to parse 'LOTUS_SYNC_BOOTSTRAP_PEERS' env var: %s", err) + } else { + BootstrapPeerThreshold = threshold + } } } -const ( - BSStateInit = 0 - BSStateSelected = 1 - BSStateScheduled = 2 - BSStateComplete = 3 -) - type SyncFunc func(context.Context, *types.TipSet) error // SyncManager manages the chain synchronization process, both at bootstrap time @@ -52,108 +65,468 @@ type SyncManager interface { } type syncManager struct { - lk sync.Mutex - peerHeads map[peer.ID]*types.TipSet + ctx context.Context + cancel func() - bssLk sync.Mutex - bootstrapState int + workq chan peerHead + statusq chan workerStatus - bspThresh int + nextWorker uint64 + pend syncBucketSet + deferred syncBucketSet + heads map[peer.ID]*types.TipSet + recent *syncBuffer - incomingTipSets chan *types.TipSet - syncTargets chan *types.TipSet - syncResults chan *syncResult + initialSyncDone bool - syncStates []*SyncerState - - // Normally this handler is set to `(*Syncer).Sync()`. - doSync func(context.Context, *types.TipSet) error + mx sync.Mutex + state map[uint64]*workerState - stop chan struct{} + history []*workerState + historyI int - // Sync Scheduler fields - activeSyncs map[types.TipSetKey]*types.TipSet - syncQueue syncBucketSet - activeSyncTips syncBucketSet - nextSyncTarget *syncTargetBucket - workerChan chan *types.TipSet + doSync func(context.Context, *types.TipSet) error } var _ SyncManager = (*syncManager)(nil) -type syncResult struct { - ts *types.TipSet - success bool +type peerHead struct { + p peer.ID + ts *types.TipSet } -const syncWorkerCount = 3 +type workerState struct { + id uint64 + ts *types.TipSet + ss *SyncerState + dt time.Duration +} +type workerStatus struct { + id uint64 + err error +} + +// sync manager interface func NewSyncManager(sync SyncFunc) SyncManager { - sm := &syncManager{ - bspThresh: 1, - peerHeads: make(map[peer.ID]*types.TipSet), - syncTargets: make(chan *types.TipSet), - syncResults: make(chan *syncResult), - syncStates: make([]*SyncerState, syncWorkerCount), - incomingTipSets: make(chan *types.TipSet), - activeSyncs: make(map[types.TipSetKey]*types.TipSet), - doSync: sync, - stop: make(chan struct{}), - } - for i := range sm.syncStates { - sm.syncStates[i] = new(SyncerState) + ctx, cancel := context.WithCancel(context.Background()) + return &syncManager{ + ctx: ctx, + cancel: cancel, + + workq: make(chan peerHead), + statusq: make(chan workerStatus), + + heads: make(map[peer.ID]*types.TipSet), + state: make(map[uint64]*workerState), + recent: newSyncBuffer(RecentSyncBufferSize), + history: make([]*workerState, SyncWorkerHistory), + + doSync: sync, } - return sm } func (sm *syncManager) Start() { - go sm.syncScheduler() - for i := 0; i < syncWorkerCount; i++ { - go sm.syncWorker(i) - } + go sm.scheduler() } func (sm *syncManager) Stop() { - close(sm.stop) + select { + case <-sm.ctx.Done(): + default: + sm.cancel() + } } func (sm *syncManager) SetPeerHead(ctx context.Context, p peer.ID, ts *types.TipSet) { - sm.lk.Lock() - defer sm.lk.Unlock() - sm.peerHeads[p] = ts - - if sm.getBootstrapState() == BSStateInit { - spc := sm.syncedPeerCount() - if spc >= sm.bspThresh { - // Its go time! - target, err := sm.selectSyncTarget() - if err != nil { - log.Error("failed to select sync target: ", err) - return + select { + case sm.workq <- peerHead{p: p, ts: ts}: + case <-sm.ctx.Done(): + case <-ctx.Done(): + } +} + +func (sm *syncManager) State() []SyncerStateSnapshot { + sm.mx.Lock() + workerStates := make([]*workerState, 0, len(sm.state)+len(sm.history)) + for _, ws := range sm.state { + workerStates = append(workerStates, ws) + } + for _, ws := range sm.history { + if ws != nil { + workerStates = append(workerStates, ws) + } + } + sm.mx.Unlock() + + sort.Slice(workerStates, func(i, j int) bool { + return workerStates[i].id < workerStates[j].id + }) + + result := make([]SyncerStateSnapshot, 0, len(workerStates)) + for _, ws := range workerStates { + result = append(result, ws.ss.Snapshot()) + } + + return result +} + +// sync manager internals +func (sm *syncManager) scheduler() { + ticker := time.NewTicker(time.Minute) + tickerC := ticker.C + for { + select { + case head := <-sm.workq: + sm.handlePeerHead(head) + case status := <-sm.statusq: + sm.handleWorkerStatus(status) + case <-tickerC: + if sm.initialSyncDone { + ticker.Stop() + tickerC = nil + sm.handleInitialSyncDone() } - sm.setBootstrapState(BSStateSelected) + case <-sm.ctx.Done(): + return + } + } +} + +func (sm *syncManager) handlePeerHead(head peerHead) { + log.Debugf("new peer head: %s %s", head.p, head.ts) + + // have we started syncing yet? + if sm.nextWorker == 0 { + // track the peer head until we start syncing + sm.heads[head.p] = head.ts + + // not yet; do we have enough peers? + if len(sm.heads) < BootstrapPeerThreshold { + log.Debugw("not tracking enough peers to start sync worker", "have", len(sm.heads), "need", BootstrapPeerThreshold) + // not enough peers; track it and wait + return + } - sm.incomingTipSets <- target + // we are ready to start syncing; select the sync target and spawn a worker + target, err := sm.selectInitialSyncTarget() + if err != nil { + log.Errorf("failed to select initial sync target: %s", err) + return } - log.Infof("sync bootstrap has %d peers", spc) + + log.Infof("selected initial sync target: %s", target) + sm.spawnWorker(target) + return + } + + // we have started syncing, add peer head to the queue if applicable and maybe spawn a worker + // if there is work to do (possibly in a fork) + target, work, err := sm.addSyncTarget(head.ts) + if err != nil { + log.Warnf("failed to add sync target: %s", err) return } - sm.incomingTipSets <- ts + if work { + log.Infof("selected sync target: %s", target) + sm.spawnWorker(target) + } } -func (sm *syncManager) State() []SyncerStateSnapshot { - ret := make([]SyncerStateSnapshot, 0, len(sm.syncStates)) - for _, s := range sm.syncStates { - ret = append(ret, s.Snapshot()) +func (sm *syncManager) handleWorkerStatus(status workerStatus) { + log.Debugf("worker %d done; status error: %s", status.id, status.err) + + sm.mx.Lock() + ws := sm.state[status.id] + delete(sm.state, status.id) + + // we track the last few workers for debug purposes + sm.history[sm.historyI] = ws + sm.historyI++ + sm.historyI %= len(sm.history) + sm.mx.Unlock() + + if status.err != nil { + // we failed to sync this target -- log it and try to work on an extended chain + // if there is nothing related to be worked on, we stop working on this chain. + log.Errorf("error during sync in %s: %s", ws.ts, status.err) + } else { + // add to the recently synced buffer + sm.recent.Push(ws.ts) + // if we are still in initial sync and this was fast enough, mark the end of the initial sync + if !sm.initialSyncDone && ws.dt < InitialSyncTimeThreshold { + sm.initialSyncDone = true + } + } + + // we are done with this target, select the next sync target and spawn a worker if there is work + // to do, because of an extension of this chain. + target, work, err := sm.selectSyncTarget(ws.ts) + if err != nil { + log.Warnf("failed to select sync target: %s", err) + return + } + + if work { + log.Infof("selected sync target: %s", target) + sm.spawnWorker(target) + } +} + +func (sm *syncManager) handleInitialSyncDone() { + // we have just finished the initial sync; spawn some additional workers in deferred syncs + // as needed (and up to MaxSyncWorkers) to ramp up chain sync + for len(sm.state) < MaxSyncWorkers { + target, work, err := sm.selectDeferredSyncTarget() + if err != nil { + log.Errorf("error selecting deferred sync target: %s", err) + return + } + + if !work { + return + } + + log.Infof("selected deferred sync target: %s", target) + sm.spawnWorker(target) + } +} + +func (sm *syncManager) spawnWorker(target *types.TipSet) { + id := sm.nextWorker + sm.nextWorker++ + ws := &workerState{ + id: id, + ts: target, + ss: new(SyncerState), + } + ws.ss.data.WorkerID = id + + sm.mx.Lock() + sm.state[id] = ws + sm.mx.Unlock() + + go sm.worker(ws) +} + +func (sm *syncManager) worker(ws *workerState) { + log.Infof("worker %d syncing in %s", ws.id, ws.ts) + + start := build.Clock.Now() + + ctx := context.WithValue(sm.ctx, syncStateKey{}, ws.ss) + err := sm.doSync(ctx, ws.ts) + + ws.dt = build.Clock.Since(start) + log.Infof("worker %d done; took %s", ws.id, ws.dt) + select { + case sm.statusq <- workerStatus{id: ws.id, err: err}: + case <-sm.ctx.Done(): + } +} + +// selects the initial sync target by examining known peer heads; only called once for the initial +// sync. +func (sm *syncManager) selectInitialSyncTarget() (*types.TipSet, error) { + var buckets syncBucketSet + + var peerHeads []*types.TipSet + for _, ts := range sm.heads { + peerHeads = append(peerHeads, ts) + } + // clear the map, we don't use it any longer + sm.heads = nil + + sort.Slice(peerHeads, func(i, j int) bool { + return peerHeads[i].Height() < peerHeads[j].Height() + }) + + for _, ts := range peerHeads { + buckets.Insert(ts) + } + + if len(buckets.buckets) > 1 { + log.Warn("caution, multiple distinct chains seen during head selections") + // TODO: we *could* refuse to sync here without user intervention. + // For now, just select the best cluster + } + + return buckets.Heaviest(), nil +} + +// adds a tipset to the potential sync targets; returns true if there is a a tipset to work on. +// this could be either a restart, eg because there is no currently scheduled sync work or a worker +// failed or a potential fork. +func (sm *syncManager) addSyncTarget(ts *types.TipSet) (*types.TipSet, bool, error) { + // Note: we don't need the state lock here to access the active worker states, as the only + // competing threads that may access it do so through State() which is read only. + + // if we have recently synced this or any heavier tipset we just ignore it; this can happen + // with an empty worker set after we just finished syncing to a target + if sm.recent.Synced(ts) { + return nil, false, nil + } + + // if the worker set is empty, we have finished syncing and were waiting for the next tipset + // in this case, we just return the tipset as work to be done + if len(sm.state) == 0 { + return ts, true, nil + } + + // check if it is related to any active sync; if so insert into the pending sync queue + for _, ws := range sm.state { + if ts.Equals(ws.ts) { + // ignore it, we are already syncing it + return nil, false, nil + } + + if ts.Parents() == ws.ts.Key() { + // schedule for syncing next; it's an extension of an active sync + sm.pend.Insert(ts) + return nil, false, nil + } + } + + // check to see if it is related to any pending sync; if so insert it into the pending sync queue + if sm.pend.RelatedToAny(ts) { + sm.pend.Insert(ts) + return nil, false, nil + } + + // it's not related to any active or pending sync; this could be a fork in which case we + // start a new worker to sync it, if it is *heavier* than any active or pending set; + // if it is not, we ignore it. + for _, ws := range sm.state { + if isHeavier(ws.ts, ts) { + return nil, false, nil + } + } + + pendHeaviest := sm.pend.Heaviest() + if pendHeaviest != nil && isHeavier(pendHeaviest, ts) { + return nil, false, nil + } + + // if we have not finished the initial sync or have too many workers, add it to the deferred queue; + // it will be processed once a worker is freed from syncing a chain (or the initial sync finishes) + if !sm.initialSyncDone || len(sm.state) >= MaxSyncWorkers { + log.Debugf("deferring sync on %s", ts) + sm.deferred.Insert(ts) + return nil, false, nil + } + + // start a new worker, seems heavy enough and unrelated to active or pending syncs + return ts, true, nil +} + +// selects the next sync target after a worker sync has finished; returns true and a target +// TipSet if this chain should continue to sync because there is a heavier related tipset. +func (sm *syncManager) selectSyncTarget(done *types.TipSet) (*types.TipSet, bool, error) { + // we pop the related bucket and if there is any related tipset, we work on the heaviest one next + // if we are not already working on a heavier tipset + related := sm.pend.PopRelated(done) + if related == nil { + return sm.selectDeferredSyncTarget() + } + + heaviest := related.heaviestTipSet() + if isHeavier(done, heaviest) { + return sm.selectDeferredSyncTarget() + } + + for _, ws := range sm.state { + if isHeavier(ws.ts, heaviest) { + return sm.selectDeferredSyncTarget() + } + } + + if sm.recent.Synced(heaviest) { + return sm.selectDeferredSyncTarget() + } + + return heaviest, true, nil +} + +// selects a deferred sync target if there is any; these are sync targets that were not related to +// active syncs and were deferred because there were too many workers running +func (sm *syncManager) selectDeferredSyncTarget() (*types.TipSet, bool, error) { +deferredLoop: + for !sm.deferred.Empty() { + bucket := sm.deferred.Pop() + heaviest := bucket.heaviestTipSet() + + if sm.recent.Synced(heaviest) { + // we have synced it or something heavier recently, skip it + continue deferredLoop + } + + if sm.pend.RelatedToAny(heaviest) { + // this has converged to a pending sync, insert it to the pending queue + sm.pend.Insert(heaviest) + continue deferredLoop + } + + for _, ws := range sm.state { + if ws.ts.Equals(heaviest) || isHeavier(ws.ts, heaviest) { + // we have converged and are already syncing it or we are syncing on something heavier + // ignore it and pop the next deferred bucket + continue deferredLoop + } + + if heaviest.Parents() == ws.ts.Key() { + // we have converged and we are syncing its parent; insert it to the pending queue + sm.pend.Insert(heaviest) + continue deferredLoop + } + + // it's not related to any active or pending sync and this worker is free, so sync it! + return heaviest, true, nil + } } - return ret + + return nil, false, nil } +func isHeavier(a, b *types.TipSet) bool { + return a.ParentWeight().GreaterThan(b.ParentWeight()) +} + +// sync buffer -- this is a circular buffer of recently synced tipsets +type syncBuffer struct { + buf []*types.TipSet + next int +} + +func newSyncBuffer(size int) *syncBuffer { + return &syncBuffer{buf: make([]*types.TipSet, size)} +} + +func (sb *syncBuffer) Push(ts *types.TipSet) { + sb.buf[sb.next] = ts + sb.next++ + sb.next %= len(sb.buf) +} + +func (sb *syncBuffer) Synced(ts *types.TipSet) bool { + for _, rts := range sb.buf { + if rts != nil && (rts.Equals(ts) || isHeavier(rts, ts)) { + return true + } + } + + return false +} + +// sync buckets and related utilities type syncBucketSet struct { buckets []*syncTargetBucket } +type syncTargetBucket struct { + tips []*types.TipSet +} + func newSyncTargetBucket(tipsets ...*types.TipSet) *syncTargetBucket { var stb syncTargetBucket for _, ts := range tipsets { @@ -250,10 +623,6 @@ func (sbs *syncBucketSet) Empty() bool { return len(sbs.buckets) == 0 } -type syncTargetBucket struct { - tips []*types.TipSet -} - func (stb *syncTargetBucket) sameChainAs(ts *types.TipSet) bool { for _, t := range stb.tips { if ts.Equals(t) { @@ -265,19 +634,43 @@ func (stb *syncTargetBucket) sameChainAs(ts *types.TipSet) bool { if ts.Parents() == t.Key() { return true } - if coalesceForksParents && ts.Parents() == t.Parents() { - return true - } } return false } func (stb *syncTargetBucket) add(ts *types.TipSet) { - - for _, t := range stb.tips { + for i, t := range stb.tips { if t.Equals(ts) { return } + if coalesceTipsets && t.Height() == ts.Height() && + types.CidArrsEqual(t.Blocks()[0].Parents, ts.Blocks()[0].Parents) { + miners := make(map[address.Address]struct{}) + newTs := []*types.BlockHeader{} + for _, b := range t.Blocks() { + _, have := miners[b.Miner] + if !have { + newTs = append(newTs, b) + miners[b.Miner] = struct{}{} + } + } + for _, b := range ts.Blocks() { + _, have := miners[b.Miner] + if !have { + newTs = append(newTs, b) + miners[b.Miner] = struct{}{} + } + } + + ts2, err := types.NewTipSet(newTs) + if err != nil { + log.Warnf("error while trying to recombine a tipset in a bucket: %+v", err) + continue + } + stb.tips[i] = ts2 + return + } + } stb.tips = append(stb.tips, ts) @@ -296,196 +689,3 @@ func (stb *syncTargetBucket) heaviestTipSet() *types.TipSet { } return best } - -func (sm *syncManager) selectSyncTarget() (*types.TipSet, error) { - var buckets syncBucketSet - - var peerHeads []*types.TipSet - for _, ts := range sm.peerHeads { - peerHeads = append(peerHeads, ts) - } - sort.Slice(peerHeads, func(i, j int) bool { - return peerHeads[i].Height() < peerHeads[j].Height() - }) - - for _, ts := range peerHeads { - buckets.Insert(ts) - } - - if len(buckets.buckets) > 1 { - log.Warn("caution, multiple distinct chains seen during head selections") - // TODO: we *could* refuse to sync here without user intervention. - // For now, just select the best cluster - } - - return buckets.Heaviest(), nil -} - -func (sm *syncManager) syncScheduler() { - for { - select { - case ts, ok := <-sm.incomingTipSets: - if !ok { - log.Info("shutting down sync scheduler") - return - } - - sm.scheduleIncoming(ts) - case res := <-sm.syncResults: - sm.scheduleProcessResult(res) - case sm.workerChan <- sm.nextSyncTarget.heaviestTipSet(): - sm.scheduleWorkSent() - case <-sm.stop: - log.Info("sync scheduler shutting down") - return - } - } -} - -func (sm *syncManager) scheduleIncoming(ts *types.TipSet) { - log.Debug("scheduling incoming tipset sync: ", ts.Cids()) - if sm.getBootstrapState() == BSStateSelected { - sm.setBootstrapState(BSStateScheduled) - sm.syncTargets <- ts - return - } - - var relatedToActiveSync bool - for _, acts := range sm.activeSyncs { - if ts.Equals(acts) { - // ignore, we are already syncing it - return - } - - if ts.Parents() == acts.Key() { - // sync this next, after that sync process finishes - relatedToActiveSync = true - } - } - - if !relatedToActiveSync && sm.activeSyncTips.RelatedToAny(ts) { - relatedToActiveSync = true - } - - // if this is related to an active sync process, immediately bucket it - // we don't want to start a parallel sync process that duplicates work - if relatedToActiveSync { - sm.activeSyncTips.Insert(ts) - return - } - - if sm.getBootstrapState() == BSStateScheduled { - sm.syncQueue.Insert(ts) - return - } - - if sm.nextSyncTarget != nil && sm.nextSyncTarget.sameChainAs(ts) { - sm.nextSyncTarget.add(ts) - } else { - sm.syncQueue.Insert(ts) - - if sm.nextSyncTarget == nil { - sm.nextSyncTarget = sm.syncQueue.Pop() - sm.workerChan = sm.syncTargets - } - } -} - -func (sm *syncManager) scheduleProcessResult(res *syncResult) { - if res.success && sm.getBootstrapState() != BSStateComplete { - sm.setBootstrapState(BSStateComplete) - } - - delete(sm.activeSyncs, res.ts.Key()) - relbucket := sm.activeSyncTips.PopRelated(res.ts) - if relbucket != nil { - if res.success { - if sm.nextSyncTarget == nil { - sm.nextSyncTarget = relbucket - sm.workerChan = sm.syncTargets - } else { - for _, t := range relbucket.tips { - sm.syncQueue.Insert(t) - } - } - return - } - // TODO: this is the case where we try to sync a chain, and - // fail, and we have more blocks on top of that chain that - // have come in since. The question is, should we try to - // sync these? or just drop them? - log.Error("failed to sync chain but have new unconnected blocks from chain") - } - - if sm.nextSyncTarget == nil && !sm.syncQueue.Empty() { - next := sm.syncQueue.Pop() - if next != nil { - sm.nextSyncTarget = next - sm.workerChan = sm.syncTargets - } - } -} - -func (sm *syncManager) scheduleWorkSent() { - hts := sm.nextSyncTarget.heaviestTipSet() - sm.activeSyncs[hts.Key()] = hts - - if !sm.syncQueue.Empty() { - sm.nextSyncTarget = sm.syncQueue.Pop() - } else { - sm.nextSyncTarget = nil - sm.workerChan = nil - } -} - -func (sm *syncManager) syncWorker(id int) { - ss := sm.syncStates[id] - for { - select { - case ts, ok := <-sm.syncTargets: - if !ok { - log.Info("sync manager worker shutting down") - return - } - - ctx := context.WithValue(context.TODO(), syncStateKey{}, ss) - err := sm.doSync(ctx, ts) - if err != nil { - log.Errorf("sync error: %+v", err) - } - - sm.syncResults <- &syncResult{ - ts: ts, - success: err == nil, - } - } - } -} - -func (sm *syncManager) syncedPeerCount() int { - var count int - for _, ts := range sm.peerHeads { - if ts.Height() > 0 { - count++ - } - } - return count -} - -func (sm *syncManager) getBootstrapState() int { - sm.bssLk.Lock() - defer sm.bssLk.Unlock() - return sm.bootstrapState -} - -func (sm *syncManager) setBootstrapState(v int) { - sm.bssLk.Lock() - defer sm.bssLk.Unlock() - sm.bootstrapState = v -} - -func (sm *syncManager) IsBootstrapped() bool { - sm.bssLk.Lock() - defer sm.bssLk.Unlock() - return sm.bootstrapState == BSStateComplete -} diff --git a/chain/sync_manager_test.go b/chain/sync_manager_test.go index 709e03a4108..5f23e67c071 100644 --- a/chain/sync_manager_test.go +++ b/chain/sync_manager_test.go @@ -10,6 +10,10 @@ import ( "github.com/filecoin-project/lotus/chain/types/mock" ) +func init() { + BootstrapPeerThreshold = 1 +} + var genTs = mock.TipSet(mock.MkBlock(nil, 0, 0)) type syncOp struct { @@ -28,7 +32,12 @@ func runSyncMgrTest(t *testing.T, tname string, thresh int, tf func(*testing.T, <-ch return nil }).(*syncManager) - sm.bspThresh = thresh + + oldBootstrapPeerThreshold := BootstrapPeerThreshold + BootstrapPeerThreshold = thresh + defer func() { + BootstrapPeerThreshold = oldBootstrapPeerThreshold + }() sm.Start() defer sm.Stop() @@ -87,47 +96,67 @@ func TestSyncManagerEdgeCase(t *testing.T) { runSyncMgrTest(t, "edgeCase", 1, func(t *testing.T, sm *syncManager, stc chan *syncOp) { sm.SetPeerHead(ctx, "peer1", a) - assertGetSyncOp(t, stc, a) sm.SetPeerHead(ctx, "peer1", b1) sm.SetPeerHead(ctx, "peer1", b2) - // b1 and b2 are being processed - b1op := <-stc - b2op := <-stc - if !b1op.ts.Equals(b1) { - b1op, b2op = b2op, b1op + assertGetSyncOp(t, stc, a) + + // b1 and b2 are in queue after a; the sync manager should pick the heaviest one which is b2 + bop := <-stc + if !bop.ts.Equals(b2) { + t.Fatalf("Expected tipset %s to sync, but got %s", b2, bop.ts) } - sm.SetPeerHead(ctx, "peer2", c2) // c2 is put into activeSyncTips at index 0 - sm.SetPeerHead(ctx, "peer2", c1) // c1 is put into activeSyncTips at index 1 - sm.SetPeerHead(ctx, "peer3", b2) // b2 is related to c2 and even though it is actively synced it is put into activeSyncTips index 0 - sm.SetPeerHead(ctx, "peer1", a) // a is related to b2 and is put into activeSyncTips index 0 + sm.SetPeerHead(ctx, "peer2", c2) + sm.SetPeerHead(ctx, "peer2", c1) + sm.SetPeerHead(ctx, "peer3", b2) + sm.SetPeerHead(ctx, "peer1", a) - b1op.done() // b1 completes first, is related to a, so it pops activeSyncTips index 0 - // even though correct one is index 1 + bop.done() - b2op.done() - // b2 completes and is not related to c1, so it leaves activeSyncTips as it is + // get the next sync target; it should be c1 as the heaviest tipset but added last (same weight as c2) + bop = <-stc + if bop.ts.Equals(c2) { + // there's a small race and we might get c2 first. + // But we should still end on c1. + bop.done() + bop = <-stc + } - waitUntilAllWorkersAreDone(stc) + if !bop.ts.Equals(c1) { + t.Fatalf("Expected tipset %s to sync, but got %s", c1, bop.ts) + } - if len(sm.activeSyncTips.buckets) != 0 { - t.Errorf("activeSyncTips expected empty but got: %s", sm.activeSyncTips.String()) + sm.SetPeerHead(ctx, "peer4", d1) + sm.SetPeerHead(ctx, "peer5", e1) + bop.done() + + // get the last sync target; it should be e1 + var last *types.TipSet + for i := 0; i < 10; { + select { + case bop = <-stc: + bop.done() + if last == nil || bop.ts.Height() > last.Height() { + last = bop.ts + } + default: + i++ + time.Sleep(10 * time.Millisecond) + } + } + if !last.Equals(e1) { + t.Fatalf("Expected tipset %s to sync, but got %s", e1, last) } - }) -} -func waitUntilAllWorkersAreDone(stc chan *syncOp) { - for i := 0; i < 10; { - select { - case so := <-stc: - so.done() - default: - i++ - time.Sleep(10 * time.Millisecond) + sm.mx.Lock() + activeSyncs := len(sm.state) + sm.mx.Unlock() + if activeSyncs != 0 { + t.Errorf("active syncs expected empty but got: %d", activeSyncs) } - } + }) } func TestSyncManager(t *testing.T) { diff --git a/chain/sync_test.go b/chain/sync_test.go index 559a73bf525..5312dff0bed 100644 --- a/chain/sync_test.go +++ b/chain/sync_test.go @@ -7,6 +7,11 @@ import ( "testing" "time" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" @@ -80,6 +85,7 @@ type syncTestUtil struct { blocks []*store.FullTipSet nds []api.FullNode + us stmgr.UpgradeSchedule } func prepSyncTest(t testing.TB, h int) *syncTestUtil { @@ -99,9 +105,11 @@ func prepSyncTest(t testing.TB, h int) *syncTestUtil { mn: mocknet.New(ctx), g: g, + us: stmgr.DefaultUpgradeSchedule(), } tu.addSourceNode(h) + //tu.checkHeight("source", source, h) // separate logs @@ -110,6 +118,54 @@ func prepSyncTest(t testing.TB, h int) *syncTestUtil { return tu } +func prepSyncTestWithV5Height(t testing.TB, h int, v5height abi.ChainEpoch) *syncTestUtil { + logging.SetLogLevel("*", "INFO") + + sched := stmgr.UpgradeSchedule{{ + // prepare for upgrade. + Network: network.Version9, + Height: 1, + Migration: stmgr.UpgradeActorsV2, + }, { + Network: network.Version10, + Height: 2, + Migration: stmgr.UpgradeActorsV3, + }, { + Network: network.Version12, + Height: 3, + Migration: stmgr.UpgradeActorsV4, + }, { + Network: network.Version13, + Height: v5height, + Migration: stmgr.UpgradeActorsV5, + }} + + g, err := gen.NewGeneratorWithUpgradeSchedule(sched) + + if err != nil { + t.Fatalf("%+v", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + + tu := &syncTestUtil{ + t: t, + ctx: ctx, + cancel: cancel, + + mn: mocknet.New(ctx), + g: g, + us: sched, + } + + tu.addSourceNode(h) + //tu.checkHeight("source", source, h) + + // separate logs + fmt.Println("\x1b[31m///////////////////////////////////////////////////\x1b[39b") + return tu +} + func (tu *syncTestUtil) Shutdown() { tu.cancel() } @@ -174,7 +230,7 @@ func (tu *syncTestUtil) pushTsExpectErr(to int, fts *store.FullTipSet, experr bo } } -func (tu *syncTestUtil) mineOnBlock(blk *store.FullTipSet, to int, miners []int, wait, fail bool, msgs [][]*types.SignedMessage) *store.FullTipSet { +func (tu *syncTestUtil) mineOnBlock(blk *store.FullTipSet, to int, miners []int, wait, fail bool, msgs [][]*types.SignedMessage, nulls abi.ChainEpoch) *store.FullTipSet { if miners == nil { for i := range tu.g.Miners { miners = append(miners, i) @@ -191,10 +247,10 @@ func (tu *syncTestUtil) mineOnBlock(blk *store.FullTipSet, to int, miners []int, var nts *store.FullTipSet var err error if msgs != nil { - nts, err = tu.g.NextTipSetFromMinersWithMessages(blk.TipSet(), maddrs, msgs) + nts, err = tu.g.NextTipSetFromMinersWithMessagesAndNulls(blk.TipSet(), maddrs, msgs, 0) require.NoError(tu.t, err) } else { - mt, err := tu.g.NextTipSetFromMiners(blk.TipSet(), maddrs) + mt, err := tu.g.NextTipSetFromMiners(blk.TipSet(), maddrs, nulls) require.NoError(tu.t, err) nts = mt.TipSet } @@ -209,7 +265,7 @@ func (tu *syncTestUtil) mineOnBlock(blk *store.FullTipSet, to int, miners []int, } func (tu *syncTestUtil) mineNewBlock(src int, miners []int) { - mts := tu.mineOnBlock(tu.g.CurTipset, src, miners, true, false, nil) + mts := tu.mineOnBlock(tu.g.CurTipset, src, miners, true, false, nil, 0) tu.g.CurTipset = mts } @@ -223,12 +279,13 @@ func (tu *syncTestUtil) addSourceNode(gen int) { stop, err := node.New(tu.ctx, node.FullAPI(&out), - node.Online(), + node.Base(), node.Repo(sourceRepo), node.MockHost(tu.mn), node.Test(), node.Override(new(modules.Genesis), modules.LoadGenesis(genesis)), + node.Override(new(stmgr.UpgradeSchedule), tu.us), ) require.NoError(tu.t, err) tu.t.Cleanup(func() { _ = stop(context.Background()) }) @@ -253,14 +310,16 @@ func (tu *syncTestUtil) addClientNode() int { var out api.FullNode + r := repo.NewMemory(nil) stop, err := node.New(tu.ctx, node.FullAPI(&out), - node.Online(), - node.Repo(repo.NewMemory(nil)), + node.Base(), + node.Repo(r), node.MockHost(tu.mn), node.Test(), node.Override(new(modules.Genesis), modules.LoadGenesis(tu.genesis)), + node.Override(new(stmgr.UpgradeSchedule), tu.us), ) require.NoError(tu.t, err) tu.t.Cleanup(func() { _ = stop(context.Background()) }) @@ -346,12 +405,15 @@ func (tu *syncTestUtil) checkpointTs(node int, tsk types.TipSetKey) { require.NoError(tu.t, tu.nds[node].SyncCheckpoint(context.TODO(), tsk)) } +func (tu *syncTestUtil) nodeHasTs(node int, tsk types.TipSetKey) bool { + _, err := tu.nds[node].ChainGetTipSet(context.TODO(), tsk) + return err == nil +} + func (tu *syncTestUtil) waitUntilNodeHasTs(node int, tsk types.TipSetKey) { - for { - _, err := tu.nds[node].ChainGetTipSet(context.TODO(), tsk) - if err != nil { - break - } + for !tu.nodeHasTs(node, tsk) { + // Time to allow for syncing and validation + time.Sleep(10 * time.Millisecond) } // Time to allow for syncing and validation @@ -376,12 +438,18 @@ func (tu *syncTestUtil) waitUntilSyncTarget(to int, target *types.TipSet) { tu.t.Fatal(err) } - // TODO: some sort of timeout? - for n := range hc { - for _, c := range n { - if c.Val.Equals(target) { - return + timeout := time.After(5 * time.Second) + + for { + select { + case n := <-hc: + for _, c := range n { + if c.Val.Equals(target) { + return + } } + case <-timeout: + tu.t.Fatal("waitUntilSyncTarget timeout") } } } @@ -442,7 +510,7 @@ func TestSyncBadTimestamp(t *testing.T) { fmt.Println("BASE: ", base.Cids()) tu.printHeads() - a1 := tu.mineOnBlock(base, 0, nil, false, true, nil) + a1 := tu.mineOnBlock(base, 0, nil, false, true, nil, 0) tu.g.Timestamper = nil require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet())) @@ -451,7 +519,7 @@ func TestSyncBadTimestamp(t *testing.T) { fmt.Println("After mine bad block!") tu.printHeads() - a2 := tu.mineOnBlock(base, 0, nil, true, false, nil) + a2 := tu.mineOnBlock(base, 0, nil, true, false, nil, 0) tu.waitUntilSync(0, client) @@ -495,7 +563,7 @@ func TestSyncBadWinningPoSt(t *testing.T) { tu.g.SetWinningPoStProver(tu.g.Miners[1], &badWpp{}) // now ensure that new blocks are not accepted - tu.mineOnBlock(base, client, nil, false, true, nil) + tu.mineOnBlock(base, client, nil, false, true, nil, 0) } func (tu *syncTestUtil) loadChainToNode(to int) { @@ -518,15 +586,20 @@ func TestSyncFork(t *testing.T) { tu.loadChainToNode(p1) tu.loadChainToNode(p2) - phead := func() { + printHead := func() { h1, err := tu.nds[1].ChainHead(tu.ctx) require.NoError(tu.t, err) h2, err := tu.nds[2].ChainHead(tu.ctx) require.NoError(tu.t, err) - fmt.Println("Node 1: ", h1.Cids(), h1.Parents(), h1.Height()) - fmt.Println("Node 2: ", h2.Cids(), h1.Parents(), h2.Height()) + w1, err := tu.nds[1].(*impl.FullNodeAPI).ChainAPI.Chain.Weight(tu.ctx, h1) + require.NoError(tu.t, err) + w2, err := tu.nds[2].(*impl.FullNodeAPI).ChainAPI.Chain.Weight(tu.ctx, h2) + require.NoError(tu.t, err) + + fmt.Println("Node 1: ", h1.Cids(), h1.Parents(), h1.Height(), w1) + fmt.Println("Node 2: ", h2.Cids(), h2.Parents(), h2.Height(), w2) //time.Sleep(time.Second * 2) fmt.Println() fmt.Println() @@ -534,26 +607,28 @@ func TestSyncFork(t *testing.T) { fmt.Println() } - phead() + printHead() base := tu.g.CurTipset fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height()) // The two nodes fork at this point into 'a' and 'b' - a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil) - a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil) - a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil) + a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil, 0) + a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil, 0) + a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0) require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet())) // chain B will now be heaviest - b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil) - b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) - b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) - b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) + b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil, 0) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0) fmt.Println("A: ", a.Cids(), a.TipSet().Height()) fmt.Println("B: ", b.Cids(), b.TipSet().Height()) + printHead() + // Now for the fun part!! require.NoError(t, tu.mn.LinkAll()) @@ -561,7 +636,7 @@ func TestSyncFork(t *testing.T) { tu.waitUntilSyncTarget(p1, b.TipSet()) tu.waitUntilSyncTarget(p2, b.TipSet()) - phead() + printHead() } // This test crafts a tipset with 2 blocks, A and B. @@ -573,11 +648,14 @@ func TestDuplicateNonce(t *testing.T) { base := tu.g.CurTipset + // Get the banker from computed tipset state, not the parent. + st, _, err := tu.g.StateManager().TipSetState(context.TODO(), base.TipSet()) + require.NoError(t, err) + ba, err := tu.g.StateManager().LoadActorRaw(context.TODO(), tu.g.Banker(), st) + require.NoError(t, err) + // Produce a message from the banker to the rcvr makeMsg := func(rcvr address.Address) *types.SignedMessage { - - ba, err := tu.nds[0].StateGetActor(context.TODO(), tu.g.Banker(), base.TipSet().Key()) - require.NoError(t, err) msg := types.Message{ To: rcvr, From: tu.g.Banker(), @@ -608,28 +686,28 @@ func TestDuplicateNonce(t *testing.T) { msgs[k] = []*types.SignedMessage{makeMsg(tu.g.Miners[k])} } - ts1 := tu.mineOnBlock(base, 0, []int{0, 1}, true, false, msgs) + ts1 := tu.mineOnBlock(base, 0, []int{0, 1}, true, false, msgs, 0) tu.waitUntilSyncTarget(0, ts1.TipSet()) // mine another tipset - ts2 := tu.mineOnBlock(ts1, 0, []int{0, 1}, true, false, make([][]*types.SignedMessage, 2)) + ts2 := tu.mineOnBlock(ts1, 0, []int{0, 1}, true, false, make([][]*types.SignedMessage, 2), 0) tu.waitUntilSyncTarget(0, ts2.TipSet()) var includedMsg cid.Cid var skippedMsg cid.Cid - r0, err0 := tu.nds[0].StateGetReceipt(context.TODO(), msgs[0][0].Cid(), ts2.TipSet().Key()) - r1, err1 := tu.nds[0].StateGetReceipt(context.TODO(), msgs[1][0].Cid(), ts2.TipSet().Key()) + r0, err0 := tu.nds[0].StateSearchMsg(context.TODO(), ts2.TipSet().Key(), msgs[0][0].Cid(), api.LookbackNoLimit, true) + r1, err1 := tu.nds[0].StateSearchMsg(context.TODO(), ts2.TipSet().Key(), msgs[1][0].Cid(), api.LookbackNoLimit, true) if err0 == nil { require.Error(t, err1, "at least one of the StateGetReceipt calls should fail") - require.True(t, r0.ExitCode.IsSuccess()) + require.True(t, r0.Receipt.ExitCode.IsSuccess()) includedMsg = msgs[0][0].Message.Cid() skippedMsg = msgs[1][0].Message.Cid() } else { require.NoError(t, err1, "both the StateGetReceipt calls should not fail") - require.True(t, r1.ExitCode.IsSuccess()) + require.True(t, r1.Receipt.ExitCode.IsSuccess()) includedMsg = msgs[1][0].Message.Cid() skippedMsg = msgs[0][0].Message.Cid() } @@ -665,11 +743,14 @@ func TestBadNonce(t *testing.T) { base := tu.g.CurTipset + // Get the banker from computed tipset state, not the parent. + st, _, err := tu.g.StateManager().TipSetState(context.TODO(), base.TipSet()) + require.NoError(t, err) + ba, err := tu.g.StateManager().LoadActorRaw(context.TODO(), tu.g.Banker(), st) + require.NoError(t, err) + // Produce a message from the banker with a bad nonce makeBadMsg := func() *types.SignedMessage { - - ba, err := tu.nds[0].StateGetActor(context.TODO(), tu.g.Banker(), base.TipSet().Key()) - require.NoError(t, err) msg := types.Message{ To: tu.g.Banker(), From: tu.g.Banker(), @@ -697,7 +778,115 @@ func TestBadNonce(t *testing.T) { msgs := make([][]*types.SignedMessage, 1) msgs[0] = []*types.SignedMessage{makeBadMsg()} - tu.mineOnBlock(base, 0, []int{0}, true, true, msgs) + tu.mineOnBlock(base, 0, []int{0}, true, true, msgs, 0) +} + +// This test introduces a block that has 2 messages, with the same sender, and same nonce. +// One of the messages uses the sender's robust address, the other uses the ID address. +// Such a block is invalid and should not sync. +func TestMismatchedNoncesRobustID(t *testing.T) { + v5h := abi.ChainEpoch(4) + tu := prepSyncTestWithV5Height(t, int(v5h+5), v5h) + + base := tu.g.CurTipset + + // Get the banker from computed tipset state, not the parent. + st, _, err := tu.g.StateManager().TipSetState(context.TODO(), base.TipSet()) + require.NoError(t, err) + ba, err := tu.g.StateManager().LoadActorRaw(context.TODO(), tu.g.Banker(), st) + require.NoError(t, err) + + // Produce a message from the banker + makeMsg := func(id bool) *types.SignedMessage { + sender := tu.g.Banker() + if id { + s, err := tu.nds[0].StateLookupID(context.TODO(), sender, base.TipSet().Key()) + require.NoError(t, err) + sender = s + } + + msg := types.Message{ + To: tu.g.Banker(), + From: sender, + + Nonce: ba.Nonce, + + Value: types.NewInt(1), + + Method: 0, + + GasLimit: 100_000_000, + GasFeeCap: types.NewInt(0), + GasPremium: types.NewInt(0), + } + + sig, err := tu.g.Wallet().WalletSign(context.TODO(), tu.g.Banker(), msg.Cid().Bytes(), api.MsgMeta{}) + require.NoError(t, err) + + return &types.SignedMessage{ + Message: msg, + Signature: *sig, + } + } + + msgs := make([][]*types.SignedMessage, 1) + msgs[0] = []*types.SignedMessage{makeMsg(false), makeMsg(true)} + + tu.mineOnBlock(base, 0, []int{0}, true, true, msgs, 0) +} + +// This test introduces a block that has 2 messages, with the same sender, and nonces N and N+1 (so both can be included in a block) +// One of the messages uses the sender's robust address, the other uses the ID address. +// Such a block is valid and should sync. +func TestMatchedNoncesRobustID(t *testing.T) { + v5h := abi.ChainEpoch(4) + tu := prepSyncTestWithV5Height(t, int(v5h+5), v5h) + + base := tu.g.CurTipset + + // Get the banker from computed tipset state, not the parent. + st, _, err := tu.g.StateManager().TipSetState(context.TODO(), base.TipSet()) + require.NoError(t, err) + ba, err := tu.g.StateManager().LoadActorRaw(context.TODO(), tu.g.Banker(), st) + require.NoError(t, err) + + // Produce a message from the banker with specified nonce + makeMsg := func(n uint64, id bool) *types.SignedMessage { + sender := tu.g.Banker() + if id { + s, err := tu.nds[0].StateLookupID(context.TODO(), sender, base.TipSet().Key()) + require.NoError(t, err) + sender = s + } + + msg := types.Message{ + To: tu.g.Banker(), + From: sender, + + Nonce: n, + + Value: types.NewInt(1), + + Method: 0, + + GasLimit: 100_000_000, + GasFeeCap: types.NewInt(0), + GasPremium: types.NewInt(0), + } + + sig, err := tu.g.Wallet().WalletSign(context.TODO(), tu.g.Banker(), msg.Cid().Bytes(), api.MsgMeta{}) + require.NoError(t, err) + + return &types.SignedMessage{ + Message: msg, + Signature: *sig, + } + } + + msgs := make([][]*types.SignedMessage, 1) + msgs[0] = []*types.SignedMessage{makeMsg(ba.Nonce, false), makeMsg(ba.Nonce+1, true)} + + tu.mineOnBlock(base, 0, []int{0}, true, false, msgs, 0) } func BenchmarkSyncBasic(b *testing.B) { @@ -762,19 +951,19 @@ func TestSyncCheckpointHead(t *testing.T) { fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height()) // The two nodes fork at this point into 'a' and 'b' - a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil) - a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil) - a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil) + a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil, 0) + a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil, 0) + a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0) tu.waitUntilSyncTarget(p1, a.TipSet()) tu.checkpointTs(p1, a.TipSet().Key()) require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet())) // chain B will now be heaviest - b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil) - b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) - b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) - b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) + b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil, 0) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0) fmt.Println("A: ", a.Cids(), a.TipSet().Height()) fmt.Println("B: ", b.Cids(), b.TipSet().Height()) @@ -785,8 +974,13 @@ func TestSyncCheckpointHead(t *testing.T) { tu.connect(p1, p2) tu.waitUntilNodeHasTs(p1, b.TipSet().Key()) p1Head := tu.getHead(p1) - require.Equal(tu.t, p1Head, a.TipSet()) + require.True(tu.t, p1Head.Equals(a.TipSet())) tu.assertBad(p1, b.TipSet()) + + // Should be able to switch forks. + tu.checkpointTs(p1, b.TipSet().Key()) + p1Head = tu.getHead(p1) + require.True(tu.t, p1Head.Equals(b.TipSet())) } func TestSyncCheckpointEarlierThanHead(t *testing.T) { @@ -804,19 +998,19 @@ func TestSyncCheckpointEarlierThanHead(t *testing.T) { fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height()) // The two nodes fork at this point into 'a' and 'b' - a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil) - a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil) - a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil) + a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil, 0) + a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil, 0) + a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0) tu.waitUntilSyncTarget(p1, a.TipSet()) tu.checkpointTs(p1, a1.TipSet().Key()) require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet())) // chain B will now be heaviest - b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil) - b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) - b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) - b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) + b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil, 0) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0) fmt.Println("A: ", a.Cids(), a.TipSet().Height()) fmt.Println("B: ", b.Cids(), b.TipSet().Height()) @@ -827,6 +1021,86 @@ func TestSyncCheckpointEarlierThanHead(t *testing.T) { tu.connect(p1, p2) tu.waitUntilNodeHasTs(p1, b.TipSet().Key()) p1Head := tu.getHead(p1) - require.Equal(tu.t, p1Head, a.TipSet()) + require.True(tu.t, p1Head.Equals(a.TipSet())) tu.assertBad(p1, b.TipSet()) + + // Should be able to switch forks. + tu.checkpointTs(p1, b.TipSet().Key()) + p1Head = tu.getHead(p1) + require.True(tu.t, p1Head.Equals(b.TipSet())) +} + +func TestDrandNull(t *testing.T) { + H := 10 + v5h := abi.ChainEpoch(50) + ov5h := build.UpgradeHyperdriveHeight + build.UpgradeHyperdriveHeight = v5h + tu := prepSyncTestWithV5Height(t, H, v5h) + + p0 := tu.addClientNode() + p1 := tu.addClientNode() + + tu.loadChainToNode(p0) + tu.loadChainToNode(p1) + + entropy := []byte{0, 2, 3, 4} + // arbitrarily chosen + pers := crypto.DomainSeparationTag_WinningPoStChallengeSeed + + beforeNull := tu.g.CurTipset + afterNull := tu.mineOnBlock(beforeNull, p0, nil, false, false, nil, 2) + nullHeight := beforeNull.TipSet().Height() + 1 + if afterNull.TipSet().Height() == nullHeight { + t.Fatal("didn't inject nulls as expected") + } + + rand, err := tu.nds[p0].ChainGetRandomnessFromBeacon(tu.ctx, afterNull.TipSet().Key(), pers, nullHeight, entropy) + require.NoError(t, err) + + // calculate the expected randomness based on the beacon BEFORE the null + expectedBE := beforeNull.Blocks[0].Header.BeaconEntries + expectedRand, err := store.DrawRandomness(expectedBE[len(expectedBE)-1].Data, pers, nullHeight, entropy) + require.NoError(t, err) + + require.Equal(t, []byte(rand), expectedRand) + + // zoom zoom to past the v5 upgrade by injecting many many nulls + postUpgrade := tu.mineOnBlock(afterNull, p0, nil, false, false, nil, v5h) + nv, err := tu.nds[p0].StateNetworkVersion(tu.ctx, postUpgrade.TipSet().Key()) + require.NoError(t, err) + if nv != network.Version13 { + t.Fatal("expect to be v13 by now") + } + + afterNull = tu.mineOnBlock(postUpgrade, p0, nil, false, false, nil, 2) + nullHeight = postUpgrade.TipSet().Height() + 1 + if afterNull.TipSet().Height() == nullHeight { + t.Fatal("didn't inject nulls as expected") + } + + rand0, err := tu.nds[p0].ChainGetRandomnessFromBeacon(tu.ctx, afterNull.TipSet().Key(), pers, nullHeight, entropy) + require.NoError(t, err) + + // calculate the expected randomness based on the beacon AFTER the null + expectedBE = afterNull.Blocks[0].Header.BeaconEntries + expectedRand, err = store.DrawRandomness(expectedBE[len(expectedBE)-1].Data, pers, nullHeight, entropy) + require.NoError(t, err) + + require.Equal(t, []byte(rand0), expectedRand) + + // Introduce p1 to friendly p0 who has all the blocks + require.NoError(t, tu.mn.LinkAll()) + tu.connect(p0, p1) + tu.waitUntilNodeHasTs(p1, afterNull.TipSet().Key()) + p1Head := tu.getHead(p1) + + // Yes, p1 syncs well to p0's chain + require.Equal(tu.t, p1Head.Key(), afterNull.TipSet().Key()) + + // Yes, p1 sources the same randomness as p0 + rand1, err := tu.nds[p1].ChainGetRandomnessFromBeacon(tu.ctx, afterNull.TipSet().Key(), pers, nullHeight, entropy) + require.NoError(t, err) + require.Equal(t, rand0, rand1) + + build.UpgradeHyperdriveHeight = ov5h } diff --git a/chain/syncstate.go b/chain/syncstate.go index 26f9f1c39f0..527d6be4832 100644 --- a/chain/syncstate.go +++ b/chain/syncstate.go @@ -12,13 +12,14 @@ import ( ) type SyncerStateSnapshot struct { - Target *types.TipSet - Base *types.TipSet - Stage api.SyncStateStage - Height abi.ChainEpoch - Message string - Start time.Time - End time.Time + WorkerID uint64 + Target *types.TipSet + Base *types.TipSet + Stage api.SyncStateStage + Height abi.ChainEpoch + Message string + Start time.Time + End time.Time } type SyncerState struct { diff --git a/chain/types/bigint.go b/chain/types/bigint.go index da4857d5b4d..72ef5212862 100644 --- a/chain/types/bigint.go +++ b/chain/types/bigint.go @@ -47,6 +47,11 @@ func BigDiv(a, b BigInt) BigInt { return BigInt{Int: big.NewInt(0).Div(a.Int, b.Int)} } +func BigDivFloat(num, den BigInt) float64 { + res, _ := new(big.Rat).SetFrac(num.Int, den.Int).Float64() + return res +} + func BigMod(a, b BigInt) BigInt { return BigInt{Int: big.NewInt(0).Mod(a.Int, b.Int)} } diff --git a/chain/types/blockheader.go b/chain/types/blockheader.go index 4db6788e1b8..66e711cabe9 100644 --- a/chain/types/blockheader.go +++ b/chain/types/blockheader.go @@ -47,41 +47,24 @@ func NewBeaconEntry(round uint64, data []byte) BeaconEntry { } type BlockHeader struct { - Miner address.Address // 0 - - Ticket *Ticket // 1 - - ElectionProof *ElectionProof // 2 - - BeaconEntries []BeaconEntry // 3 - - WinPoStProof []proof2.PoStProof // 4 - - Parents []cid.Cid // 5 - - ParentWeight BigInt // 6 - - Height abi.ChainEpoch // 7 - - ParentStateRoot cid.Cid // 8 - - ParentMessageReceipts cid.Cid // 8 - - Messages cid.Cid // 10 - - BLSAggregate *crypto.Signature // 11 - - Timestamp uint64 // 12 - - BlockSig *crypto.Signature // 13 - - ForkSignaling uint64 // 14 - - // ParentBaseFee is the base fee after executing parent tipset - ParentBaseFee abi.TokenAmount // 15 - - // internal - validated bool // true if the signature has been validated + Miner address.Address // 0 unique per block/miner + Ticket *Ticket // 1 unique per block/miner: should be a valid VRF + ElectionProof *ElectionProof // 2 unique per block/miner: should be a valid VRF + BeaconEntries []BeaconEntry // 3 identical for all blocks in same tipset + WinPoStProof []proof2.PoStProof // 4 unique per block/miner + Parents []cid.Cid // 5 identical for all blocks in same tipset + ParentWeight BigInt // 6 identical for all blocks in same tipset + Height abi.ChainEpoch // 7 identical for all blocks in same tipset + ParentStateRoot cid.Cid // 8 identical for all blocks in same tipset + ParentMessageReceipts cid.Cid // 9 identical for all blocks in same tipset + Messages cid.Cid // 10 unique per block + BLSAggregate *crypto.Signature // 11 unique per block: aggrregate of BLS messages from above + Timestamp uint64 // 12 identical for all blocks in same tipset / hard-tied to the value of Height above + BlockSig *crypto.Signature // 13 unique per block/miner: miner signature + ForkSignaling uint64 // 14 currently unused/undefined + ParentBaseFee abi.TokenAmount // 15 identical for all blocks in same tipset: the base fee after executing parent tipset + + validated bool // internal, true if the signature has been validated } func (blk *BlockHeader) ToStorageBlock() (block.Block, error) { diff --git a/chain/types/cbor_gen.go b/chain/types/cbor_gen.go index d063ce8c9f7..db1f3cdb238 100644 --- a/chain/types/cbor_gen.go +++ b/chain/types/cbor_gen.go @@ -5,6 +5,7 @@ package types import ( "fmt" "io" + "sort" abi "github.com/filecoin-project/go-state-types/abi" crypto "github.com/filecoin-project/go-state-types/crypto" @@ -16,6 +17,8 @@ import ( ) var _ = xerrors.Errorf +var _ = cid.Undef +var _ = sort.Sort var lengthBufBlockHeader = []byte{144} diff --git a/chain/types/fil.go b/chain/types/fil.go index 0ea77660c3a..21125e6d617 100644 --- a/chain/types/fil.go +++ b/chain/types/fil.go @@ -23,6 +23,43 @@ func (f FIL) Unitless() string { return strings.TrimRight(strings.TrimRight(r.FloatString(18), "0"), ".") } +var AttoFil = NewInt(1) +var FemtoFil = BigMul(AttoFil, NewInt(1000)) +var PicoFil = BigMul(FemtoFil, NewInt(1000)) +var NanoFil = BigMul(PicoFil, NewInt(1000)) + +var unitPrefixes = []string{"a", "f", "p", "n", "μ", "m"} + +func (f FIL) Short() string { + n := BigInt(f).Abs() + + dn := uint64(1) + var prefix string + for _, p := range unitPrefixes { + if n.LessThan(NewInt(dn * 1000)) { + prefix = p + break + } + dn *= 1000 + } + + r := new(big.Rat).SetFrac(f.Int, big.NewInt(int64(dn))) + if r.Sign() == 0 { + return "0" + } + + return strings.TrimRight(strings.TrimRight(r.FloatString(3), "0"), ".") + " " + prefix + "FIL" +} + +func (f FIL) Nano() string { + r := new(big.Rat).SetFrac(f.Int, big.NewInt(int64(1e9))) + if r.Sign() == 0 { + return "0" + } + + return strings.TrimRight(strings.TrimRight(r.FloatString(9), "0"), ".") + " nFIL" +} + func (f FIL) Format(s fmt.State, ch rune) { switch ch { case 's', 'v': @@ -47,7 +84,7 @@ func (f FIL) UnmarshalText(text []byte) error { } func ParseFIL(s string) (FIL, error) { - suffix := strings.TrimLeft(s, ".1234567890") + suffix := strings.TrimLeft(s, "-.1234567890") s = s[:len(s)-len(suffix)] var attofil bool if suffix != "" { @@ -61,6 +98,10 @@ func ParseFIL(s string) (FIL, error) { } } + if len(s) > 50 { + return FIL{}, fmt.Errorf("string length too large: %d", len(s)) + } + r, ok := new(big.Rat).SetString(s) if !ok { return FIL{}, fmt.Errorf("failed to parse %q as a decimal number", s) diff --git a/chain/types/fil_test.go b/chain/types/fil_test.go new file mode 100644 index 00000000000..7bf2a802ede --- /dev/null +++ b/chain/types/fil_test.go @@ -0,0 +1,114 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestFilShort(t *testing.T) { + for _, s := range []struct { + fil string + expect string + }{ + + {fil: "1", expect: "1 FIL"}, + {fil: "1.1", expect: "1.1 FIL"}, + {fil: "12", expect: "12 FIL"}, + {fil: "123", expect: "123 FIL"}, + {fil: "123456", expect: "123456 FIL"}, + {fil: "123.23", expect: "123.23 FIL"}, + {fil: "123456.234", expect: "123456.234 FIL"}, + {fil: "123456.2341234", expect: "123456.234 FIL"}, + {fil: "123456.234123445", expect: "123456.234 FIL"}, + + {fil: "0.1", expect: "100 mFIL"}, + {fil: "0.01", expect: "10 mFIL"}, + {fil: "0.001", expect: "1 mFIL"}, + + {fil: "0.0001", expect: "100 μFIL"}, + {fil: "0.00001", expect: "10 μFIL"}, + {fil: "0.000001", expect: "1 μFIL"}, + + {fil: "0.0000001", expect: "100 nFIL"}, + {fil: "0.00000001", expect: "10 nFIL"}, + {fil: "0.000000001", expect: "1 nFIL"}, + + {fil: "0.0000000001", expect: "100 pFIL"}, + {fil: "0.00000000001", expect: "10 pFIL"}, + {fil: "0.000000000001", expect: "1 pFIL"}, + + {fil: "0.0000000000001", expect: "100 fFIL"}, + {fil: "0.00000000000001", expect: "10 fFIL"}, + {fil: "0.000000000000001", expect: "1 fFIL"}, + + {fil: "0.0000000000000001", expect: "100 aFIL"}, + {fil: "0.00000000000000001", expect: "10 aFIL"}, + {fil: "0.000000000000000001", expect: "1 aFIL"}, + + {fil: "0.0000012", expect: "1.2 μFIL"}, + {fil: "0.00000123", expect: "1.23 μFIL"}, + {fil: "0.000001234", expect: "1.234 μFIL"}, + {fil: "0.0000012344", expect: "1.234 μFIL"}, + {fil: "0.00000123444", expect: "1.234 μFIL"}, + + {fil: "0.0002212", expect: "221.2 μFIL"}, + {fil: "0.00022123", expect: "221.23 μFIL"}, + {fil: "0.000221234", expect: "221.234 μFIL"}, + {fil: "0.0002212344", expect: "221.234 μFIL"}, + {fil: "0.00022123444", expect: "221.234 μFIL"}, + + {fil: "-1", expect: "-1 FIL"}, + {fil: "-1.1", expect: "-1.1 FIL"}, + {fil: "-12", expect: "-12 FIL"}, + {fil: "-123", expect: "-123 FIL"}, + {fil: "-123456", expect: "-123456 FIL"}, + {fil: "-123.23", expect: "-123.23 FIL"}, + {fil: "-123456.234", expect: "-123456.234 FIL"}, + {fil: "-123456.2341234", expect: "-123456.234 FIL"}, + {fil: "-123456.234123445", expect: "-123456.234 FIL"}, + + {fil: "-0.1", expect: "-100 mFIL"}, + {fil: "-0.01", expect: "-10 mFIL"}, + {fil: "-0.001", expect: "-1 mFIL"}, + + {fil: "-0.0001", expect: "-100 μFIL"}, + {fil: "-0.00001", expect: "-10 μFIL"}, + {fil: "-0.000001", expect: "-1 μFIL"}, + + {fil: "-0.0000001", expect: "-100 nFIL"}, + {fil: "-0.00000001", expect: "-10 nFIL"}, + {fil: "-0.000000001", expect: "-1 nFIL"}, + + {fil: "-0.0000000001", expect: "-100 pFIL"}, + {fil: "-0.00000000001", expect: "-10 pFIL"}, + {fil: "-0.000000000001", expect: "-1 pFIL"}, + + {fil: "-0.0000000000001", expect: "-100 fFIL"}, + {fil: "-0.00000000000001", expect: "-10 fFIL"}, + {fil: "-0.000000000000001", expect: "-1 fFIL"}, + + {fil: "-0.0000000000000001", expect: "-100 aFIL"}, + {fil: "-0.00000000000000001", expect: "-10 aFIL"}, + {fil: "-0.000000000000000001", expect: "-1 aFIL"}, + + {fil: "-0.0000012", expect: "-1.2 μFIL"}, + {fil: "-0.00000123", expect: "-1.23 μFIL"}, + {fil: "-0.000001234", expect: "-1.234 μFIL"}, + {fil: "-0.0000012344", expect: "-1.234 μFIL"}, + {fil: "-0.00000123444", expect: "-1.234 μFIL"}, + + {fil: "-0.0002212", expect: "-221.2 μFIL"}, + {fil: "-0.00022123", expect: "-221.23 μFIL"}, + {fil: "-0.000221234", expect: "-221.234 μFIL"}, + {fil: "-0.0002212344", expect: "-221.234 μFIL"}, + {fil: "-0.00022123444", expect: "-221.234 μFIL"}, + } { + s := s + t.Run(s.fil, func(t *testing.T) { + f, err := ParseFIL(s.fil) + require.NoError(t, err) + require.Equal(t, s.expect, f.Short()) + }) + } +} diff --git a/chain/types/message.go b/chain/types/message.go index c53ecc7c160..4f6bb78224b 100644 --- a/chain/types/message.go +++ b/chain/types/message.go @@ -5,6 +5,8 @@ import ( "encoding/json" "fmt" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/lotus/build" @@ -144,7 +146,7 @@ func (m *Message) EqualCall(o *Message) bool { return (&m1).Equals(&m2) } -func (m *Message) ValidForBlockInclusion(minGas int64) error { +func (m *Message) ValidForBlockInclusion(minGas int64, version network.Version) error { if m.Version != 0 { return xerrors.New("'Version' unsupported") } @@ -153,6 +155,10 @@ func (m *Message) ValidForBlockInclusion(minGas int64) error { return xerrors.New("'To' address cannot be empty") } + if m.To == build.ZeroAddress && version >= network.Version7 { + return xerrors.New("invalid 'To' address") + } + if m.From == address.Undef { return xerrors.New("'From' address cannot be empty") } diff --git a/chain/types/state.go b/chain/types/state.go index a96883604be..c8f8f1cd984 100644 --- a/chain/types/state.go +++ b/chain/types/state.go @@ -9,8 +9,14 @@ type StateTreeVersion uint64 const ( // StateTreeVersion0 corresponds to actors < v2. StateTreeVersion0 StateTreeVersion = iota - // StateTreeVersion1 corresponds to actors >= v2. + // StateTreeVersion1 corresponds to actors v2 StateTreeVersion1 + // StateTreeVersion2 corresponds to actors v3. + StateTreeVersion2 + // StateTreeVersion3 corresponds to actors v4. + StateTreeVersion3 + // StateTreeVersion4 corresponds to actors v5. + StateTreeVersion4 ) type StateRoot struct { diff --git a/chain/types/tipset_key.go b/chain/types/tipset_key.go index e5bc7750de3..9f98877964b 100644 --- a/chain/types/tipset_key.go +++ b/chain/types/tipset_key.go @@ -47,7 +47,7 @@ func NewTipSetKey(cids ...cid.Cid) TipSetKey { func TipSetKeyFromBytes(encoded []byte) (TipSetKey, error) { _, err := decodeKey(encoded) if err != nil { - return TipSetKey{}, err + return EmptyTSK, err } return TipSetKey{string(encoded)}, nil } diff --git a/chain/types/tipset_key_test.go b/chain/types/tipset_key_test.go index 7b3ce439db9..73c1ca9df43 100644 --- a/chain/types/tipset_key_test.go +++ b/chain/types/tipset_key_test.go @@ -19,7 +19,7 @@ func TestTipSetKey(t *testing.T) { fmt.Println(len(c1.Bytes())) t.Run("zero value", func(t *testing.T) { - assert.Equal(t, TipSetKey{}, NewTipSetKey()) + assert.Equal(t, EmptyTSK, NewTipSetKey()) }) t.Run("CID extraction", func(t *testing.T) { diff --git a/chain/vm/burn.go b/chain/vm/burn.go index 9f9b95755b7..a214d198b66 100644 --- a/chain/vm/burn.go +++ b/chain/vm/burn.go @@ -67,7 +67,7 @@ func ComputeGasOverestimationBurn(gasUsed, gasLimit int64) (int64, int64) { return gasLimit - gasUsed - gasToBurn.Int64(), gasToBurn.Int64() } -func ComputeGasOutputs(gasUsed, gasLimit int64, baseFee, feeCap, gasPremium abi.TokenAmount) GasOutputs { +func ComputeGasOutputs(gasUsed, gasLimit int64, baseFee, feeCap, gasPremium abi.TokenAmount, chargeNetworkFee bool) GasOutputs { gasUsedBig := big.NewInt(gasUsed) out := ZeroGasOutputs() @@ -76,7 +76,12 @@ func ComputeGasOutputs(gasUsed, gasLimit int64, baseFee, feeCap, gasPremium abi. baseFeeToPay = feeCap out.MinerPenalty = big.Mul(big.Sub(baseFee, feeCap), gasUsedBig) } - out.BaseFeeBurn = big.Mul(baseFeeToPay, gasUsedBig) + + // If chargeNetworkFee is disabled, just skip computing the BaseFeeBurn. However, + // we charge all the other fees regardless. + if chargeNetworkFee { + out.BaseFeeBurn = big.Mul(baseFeeToPay, gasUsedBig) + } minerTip := gasPremium if big.Cmp(big.Add(baseFeeToPay, minerTip), feeCap) > 0 { diff --git a/chain/vm/burn_test.go b/chain/vm/burn_test.go index 58e1336057b..e4fc69affd6 100644 --- a/chain/vm/burn_test.go +++ b/chain/vm/burn_test.go @@ -63,7 +63,7 @@ func TestGasOutputs(t *testing.T) { for _, test := range tests { test := test t.Run(fmt.Sprintf("%v", test), func(t *testing.T) { - output := ComputeGasOutputs(test.used, test.limit, baseFee, types.NewInt(test.feeCap), types.NewInt(test.premium)) + output := ComputeGasOutputs(test.used, test.limit, baseFee, types.NewInt(test.feeCap), types.NewInt(test.premium), true) i2s := func(i uint64) string { return fmt.Sprintf("%d", i) } diff --git a/chain/vm/gas.go b/chain/vm/gas.go index cbe5bab13e1..c860ce9a0c2 100644 --- a/chain/vm/gas.go +++ b/chain/vm/gas.go @@ -3,21 +3,17 @@ package vm import ( "fmt" - vmr2 "github.com/filecoin-project/specs-actors/v2/actors/runtime" - proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/go-address" addr "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" + vmr5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/ipfs/go-cid" ) -const ( - GasStorageMulti = 1000 - GasComputeMulti = 1 -) - type GasCharge struct { Name string Extra interface{} @@ -30,7 +26,7 @@ type GasCharge struct { } func (g GasCharge) Total() int64 { - return g.ComputeGas*GasComputeMulti + g.StorageGas*GasStorageMulti + return g.ComputeGas + g.StorageGas } func (g GasCharge) WithVirtual(compute, storage int64) GasCharge { out := g @@ -78,13 +74,17 @@ type Pricelist interface { OnVerifySignature(sigType crypto.SigType, planTextSize int) (GasCharge, error) OnHashing(dataSize int) GasCharge OnComputeUnsealedSectorCid(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) GasCharge - OnVerifySeal(info proof2.SealVerifyInfo) GasCharge - OnVerifyPost(info proof2.WindowPoStVerifyInfo) GasCharge + OnVerifySeal(info proof5.SealVerifyInfo) GasCharge + OnVerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) GasCharge + OnVerifyPost(info proof5.WindowPoStVerifyInfo) GasCharge OnVerifyConsensusFault() GasCharge } var prices = map[abi.ChainEpoch]Pricelist{ abi.ChainEpoch(0): &pricelistV0{ + computeGasMulti: 1, + storageGasMulti: 1000, + onChainMessageComputeBase: 38863, onChainMessageStorageBase: 36, onChainMessageStoragePerByte: 1, @@ -112,6 +112,7 @@ var prices = map[abi.ChainEpoch]Pricelist{ hashingBase: 31355, computeUnsealedSectorCidBase: 98647, verifySealBase: 2000, // TODO gas , it VerifySeal syscall is not used + verifyAggregateSealBase: 0, verifyPostLookup: map[abi.RegisteredPoStProof]scalingCost{ abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: { flat: 123861062, @@ -126,6 +127,83 @@ var prices = map[abi.ChainEpoch]Pricelist{ scale: 85639, }, }, + verifyPostDiscount: true, + verifyConsensusFault: 495422, + }, + abi.ChainEpoch(build.UpgradeCalicoHeight): &pricelistV0{ + computeGasMulti: 1, + storageGasMulti: 1300, + + onChainMessageComputeBase: 38863, + onChainMessageStorageBase: 36, + onChainMessageStoragePerByte: 1, + + onChainReturnValuePerByte: 1, + + sendBase: 29233, + sendTransferFunds: 27500, + sendTransferOnlyPremium: 159672, + sendInvokeMethod: -5377, + + ipldGetBase: 114617, + ipldPutBase: 353640, + ipldPutPerByte: 1, + + createActorCompute: 1108454, + createActorStorage: 36 + 40, + deleteActor: -(36 + 40), // -createActorStorage + + verifySignature: map[crypto.SigType]int64{ + crypto.SigTypeBLS: 16598605, + crypto.SigTypeSecp256k1: 1637292, + }, + + hashingBase: 31355, + computeUnsealedSectorCidBase: 98647, + verifySealBase: 2000, // TODO gas, it VerifySeal syscall is not used + + verifyAggregateSealPer: map[abi.RegisteredSealProof]int64{ + abi.RegisteredSealProof_StackedDrg32GiBV1_1: 449900, + abi.RegisteredSealProof_StackedDrg64GiBV1_1: 359272, + }, + verifyAggregateSealSteps: map[abi.RegisteredSealProof]stepCost{ + abi.RegisteredSealProof_StackedDrg32GiBV1_1: { + {4, 103994170}, + {7, 112356810}, + {13, 122912610}, + {26, 137559930}, + {52, 162039100}, + {103, 210960780}, + {205, 318351180}, + {410, 528274980}, + }, + abi.RegisteredSealProof_StackedDrg64GiBV1_1: { + {4, 102581240}, + {7, 110803030}, + {13, 120803700}, + {26, 134642130}, + {52, 157357890}, + {103, 203017690}, + {205, 304253590}, + {410, 509880640}, + }, + }, + + verifyPostLookup: map[abi.RegisteredPoStProof]scalingCost{ + abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: { + flat: 117680921, + scale: 43780, + }, + abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: { + flat: 117680921, + scale: 43780, + }, + abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: { + flat: 117680921, + scale: 43780, + }, + }, + verifyPostDiscount: false, verifyConsensusFault: 495422, }, } @@ -150,7 +228,7 @@ func PricelistByEpoch(epoch abi.ChainEpoch) Pricelist { } type pricedSyscalls struct { - under vmr2.Syscalls + under vmr5.Syscalls pl Pricelist chargeGas func(GasCharge) } @@ -184,7 +262,7 @@ func (ps pricedSyscalls) ComputeUnsealedSectorCID(reg abi.RegisteredSealProof, p } // Verifies a sector seal proof. -func (ps pricedSyscalls) VerifySeal(vi proof2.SealVerifyInfo) error { +func (ps pricedSyscalls) VerifySeal(vi proof5.SealVerifyInfo) error { ps.chargeGas(ps.pl.OnVerifySeal(vi)) defer ps.chargeGas(gasOnActorExec) @@ -192,7 +270,7 @@ func (ps pricedSyscalls) VerifySeal(vi proof2.SealVerifyInfo) error { } // Verifies a proof of spacetime. -func (ps pricedSyscalls) VerifyPoSt(vi proof2.WindowPoStVerifyInfo) error { +func (ps pricedSyscalls) VerifyPoSt(vi proof5.WindowPoStVerifyInfo) error { ps.chargeGas(ps.pl.OnVerifyPost(vi)) defer ps.chargeGas(gasOnActorExec) @@ -209,14 +287,14 @@ func (ps pricedSyscalls) VerifyPoSt(vi proof2.WindowPoStVerifyInfo) error { // the "parent grinding fault", in which case it must be the sibling of h1 (same parent tipset) and one of the // blocks in the parent of h2 (i.e. h2's grandparent). // Returns nil and an error if the headers don't prove a fault. -func (ps pricedSyscalls) VerifyConsensusFault(h1 []byte, h2 []byte, extra []byte) (*vmr2.ConsensusFault, error) { +func (ps pricedSyscalls) VerifyConsensusFault(h1 []byte, h2 []byte, extra []byte) (*vmr5.ConsensusFault, error) { ps.chargeGas(ps.pl.OnVerifyConsensusFault()) defer ps.chargeGas(gasOnActorExec) return ps.under.VerifyConsensusFault(h1, h2, extra) } -func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]proof2.SealVerifyInfo) (map[address.Address][]bool, error) { +func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]proof5.SealVerifyInfo) (map[address.Address][]bool, error) { count := int64(0) for _, svis := range inp { count += int64(len(svis)) @@ -229,3 +307,10 @@ func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]proof2.SealV return ps.under.BatchVerifySeals(inp) } + +func (ps pricedSyscalls) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) error { + ps.chargeGas(ps.pl.OnVerifyAggregateSeals(aggregate)) + defer ps.chargeGas(gasOnActorExec) + + return ps.under.VerifyAggregateSeals(aggregate) +} diff --git a/chain/vm/gas_v0.go b/chain/vm/gas_v0.go index 7a7fb364d18..13c5fdd86ad 100644 --- a/chain/vm/gas_v0.go +++ b/chain/vm/gas_v0.go @@ -4,6 +4,7 @@ import ( "fmt" proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" @@ -17,7 +18,31 @@ type scalingCost struct { scale int64 } +type stepCost []step + +type step struct { + start int64 + cost int64 +} + +func (sc stepCost) Lookup(x int64) int64 { + i := 0 + for ; i < len(sc); i++ { + if sc[i].start > x { + break + } + } + i-- // look at previous item + if i < 0 { + return 0 + } + + return sc[i].cost +} + type pricelistV0 struct { + computeGasMulti int64 + storageGasMulti int64 /////////////////////////////////////////////////////////////////////////// // System operations /////////////////////////////////////////////////////////////////////////// @@ -89,8 +114,13 @@ type pricelistV0 struct { computeUnsealedSectorCidBase int64 verifySealBase int64 - verifyPostLookup map[abi.RegisteredPoStProof]scalingCost - verifyConsensusFault int64 + verifyAggregateSealBase int64 + verifyAggregateSealPer map[abi.RegisteredSealProof]int64 + verifyAggregateSealSteps map[abi.RegisteredSealProof]stepCost + + verifyPostLookup map[abi.RegisteredPoStProof]scalingCost + verifyPostDiscount bool + verifyConsensusFault int64 } var _ Pricelist = (*pricelistV0)(nil) @@ -98,12 +128,12 @@ var _ Pricelist = (*pricelistV0)(nil) // OnChainMessage returns the gas used for storing a message of a given size in the chain. func (pl *pricelistV0) OnChainMessage(msgSize int) GasCharge { return newGasCharge("OnChainMessage", pl.onChainMessageComputeBase, - pl.onChainMessageStorageBase+pl.onChainMessageStoragePerByte*int64(msgSize)) + (pl.onChainMessageStorageBase+pl.onChainMessageStoragePerByte*int64(msgSize))*pl.storageGasMulti) } // OnChainReturnValue returns the gas used for storing the response of a message in the chain. func (pl *pricelistV0) OnChainReturnValue(dataSize int) GasCharge { - return newGasCharge("OnChainReturnValue", 0, int64(dataSize)*pl.onChainReturnValuePerByte) + return newGasCharge("OnChainReturnValue", 0, int64(dataSize)*pl.onChainReturnValuePerByte*pl.storageGasMulti) } // OnMethodInvocation returns the gas used when invoking a method. @@ -130,23 +160,23 @@ func (pl *pricelistV0) OnMethodInvocation(value abi.TokenAmount, methodNum abi.M // OnIpldGet returns the gas used for storing an object func (pl *pricelistV0) OnIpldGet() GasCharge { - return newGasCharge("OnIpldGet", pl.ipldGetBase, 0) + return newGasCharge("OnIpldGet", pl.ipldGetBase, 0).WithVirtual(114617, 0) } // OnIpldPut returns the gas used for storing an object func (pl *pricelistV0) OnIpldPut(dataSize int) GasCharge { - return newGasCharge("OnIpldPut", pl.ipldPutBase, int64(dataSize)*pl.ipldPutPerByte). - WithExtra(dataSize) + return newGasCharge("OnIpldPut", pl.ipldPutBase, int64(dataSize)*pl.ipldPutPerByte*pl.storageGasMulti). + WithExtra(dataSize).WithVirtual(400000, int64(dataSize)*1300) } // OnCreateActor returns the gas used for creating an actor func (pl *pricelistV0) OnCreateActor() GasCharge { - return newGasCharge("OnCreateActor", pl.createActorCompute, pl.createActorStorage) + return newGasCharge("OnCreateActor", pl.createActorCompute, pl.createActorStorage*pl.storageGasMulti) } // OnDeleteActor returns the gas used for deleting an actor func (pl *pricelistV0) OnDeleteActor() GasCharge { - return newGasCharge("OnDeleteActor", 0, pl.deleteActor) + return newGasCharge("OnDeleteActor", 0, pl.deleteActor*pl.storageGasMulti) } // OnVerifySignature @@ -182,6 +212,22 @@ func (pl *pricelistV0) OnVerifySeal(info proof2.SealVerifyInfo) GasCharge { return newGasCharge("OnVerifySeal", pl.verifySealBase, 0) } +// OnVerifyAggregateSeals +func (pl *pricelistV0) OnVerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) GasCharge { + proofType := aggregate.SealProof + perProof, ok := pl.verifyAggregateSealPer[proofType] + if !ok { + perProof = pl.verifyAggregateSealPer[abi.RegisteredSealProof_StackedDrg32GiBV1_1] + } + + step, ok := pl.verifyAggregateSealSteps[proofType] + if !ok { + step = pl.verifyAggregateSealSteps[abi.RegisteredSealProof_StackedDrg32GiBV1_1] + } + num := int64(len(aggregate.Infos)) + return newGasCharge("OnVerifyAggregateSeals", perProof*num+step.Lookup(num), 0) +} + // OnVerifyPost func (pl *pricelistV0) OnVerifyPost(info proof2.WindowPoStVerifyInfo) GasCharge { sectorSize := "unknown" @@ -201,9 +247,12 @@ func (pl *pricelistV0) OnVerifyPost(info proof2.WindowPoStVerifyInfo) GasCharge } gasUsed := cost.flat + int64(len(info.ChallengedSectors))*cost.scale - gasUsed /= 2 // XXX: this is an artificial discount + if pl.verifyPostDiscount { + gasUsed /= 2 // XXX: this is an artificial discount + } return newGasCharge("OnVerifyPost", gasUsed, 0). + WithVirtual(117680921+43780*int64(len(info.ChallengedSectors)), 0). WithExtra(map[string]interface{}{ "type": sectorSize, "size": len(info.ChallengedSectors), diff --git a/chain/vm/gas_v0_test.go b/chain/vm/gas_v0_test.go new file mode 100644 index 00000000000..447e4f70c5c --- /dev/null +++ b/chain/vm/gas_v0_test.go @@ -0,0 +1,32 @@ +package vm + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStepGasCost(t *testing.T) { + s := stepCost{ + {4, 103994170}, + {7, 112356810}, + {13, 122912610}, + {26, 137559930}, + {52, 162039100}, + {103, 210960780}, + {205, 318351180}, + {410, 528274980}, + } + + assert.EqualValues(t, 0, s.Lookup(0)) + assert.EqualValues(t, 0, s.Lookup(3)) + assert.EqualValues(t, 103994170, s.Lookup(4)) + assert.EqualValues(t, 103994170, s.Lookup(6)) + assert.EqualValues(t, 112356810, s.Lookup(7)) + assert.EqualValues(t, 210960780, s.Lookup(103)) + assert.EqualValues(t, 210960780, s.Lookup(204)) + assert.EqualValues(t, 318351180, s.Lookup(205)) + assert.EqualValues(t, 318351180, s.Lookup(409)) + assert.EqualValues(t, 528274980, s.Lookup(410)) + assert.EqualValues(t, 528274980, s.Lookup(10000000000)) +} diff --git a/chain/vm/invoker.go b/chain/vm/invoker.go index 661e31178ee..e4b15403187 100644 --- a/chain/vm/invoker.go +++ b/chain/vm/invoker.go @@ -6,6 +6,8 @@ import ( "fmt" "reflect" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/ipfs/go-cid" @@ -14,7 +16,10 @@ import ( exported0 "github.com/filecoin-project/specs-actors/actors/builtin/exported" exported2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/exported" - vmr "github.com/filecoin-project/specs-actors/v2/actors/runtime" + exported3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/exported" + exported4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/exported" + exported5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/exported" + vmr "github.com/filecoin-project/specs-actors/v5/actors/runtime" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/exitcode" @@ -34,9 +39,9 @@ type ActorPredicate func(vmr.Runtime, rtt.VMActor) error func ActorsVersionPredicate(ver actors.Version) ActorPredicate { return func(rt vmr.Runtime, v rtt.VMActor) error { - nver := actors.VersionForNetwork(rt.NetworkVersion()) - if nver != ver { - return xerrors.Errorf("actor %s is a version %d actor; chain only supports actor version %d at height %d", v.Code(), ver, nver, rt.CurrEpoch()) + aver := actors.VersionForNetwork(rt.NetworkVersion()) + if aver != ver { + return xerrors.Errorf("actor %s is a version %d actor; chain only supports actor version %d at height %d and nver %d", v.Code(), ver, aver, rt.CurrEpoch(), rt.NetworkVersion()) } return nil } @@ -60,6 +65,9 @@ func NewActorRegistry() *ActorRegistry { // add builtInCode using: register(cid, singleton) inv.Register(ActorsVersionPredicate(actors.Version0), exported0.BuiltinActors()...) inv.Register(ActorsVersionPredicate(actors.Version2), exported2.BuiltinActors()...) + inv.Register(ActorsVersionPredicate(actors.Version3), exported3.BuiltinActors()...) + inv.Register(ActorsVersionPredicate(actors.Version4), exported4.BuiltinActors()...) + inv.Register(ActorsVersionPredicate(actors.Version5), exported5.BuiltinActors()...) return inv } @@ -147,7 +155,7 @@ func (*ActorRegistry) transform(instance invokee) (nativeCode, error) { "vmr.Runtime, ") } if !runtimeType.Implements(t.In(0)) { - return nil, newErr("first arguemnt should be vmr.Runtime") + return nil, newErr("first argument should be vmr.Runtime") } if t.In(1).Kind() != reflect.Ptr { return nil, newErr("second argument should be of kind reflect.Ptr") @@ -173,9 +181,14 @@ func (*ActorRegistry) transform(instance invokee) (nativeCode, error) { paramT := meth.Type().In(1).Elem() param := reflect.New(paramT) + rt := in[0].Interface().(*Runtime) inBytes := in[1].Interface().([]byte) if err := DecodeParams(inBytes, param.Interface()); err != nil { - aerr := aerrors.Absorb(err, 1, "failed to decode parameters") + ec := exitcode.ErrSerialization + if rt.NetworkVersion() < network.Version7 { + ec = 1 + } + aerr := aerrors.Absorb(err, ec, "failed to decode parameters") return []reflect.Value{ reflect.ValueOf([]byte{}), // Below is a hack, fixed in Go 1.13 @@ -183,7 +196,6 @@ func (*ActorRegistry) transform(instance invokee) (nativeCode, error) { reflect.ValueOf(&aerr).Elem(), } } - rt := in[0].Interface().(*Runtime) rval, aerror := rt.shimCall(func() interface{} { ret := meth.Call([]reflect.Value{ reflect.ValueOf(rt), diff --git a/chain/vm/invoker_test.go b/chain/vm/invoker_test.go index bce385b02ba..6822e2371f5 100644 --- a/chain/vm/invoker_test.go +++ b/chain/vm/invoker_test.go @@ -1,10 +1,13 @@ package vm import ( + "context" "fmt" "io" "testing" + "github.com/filecoin-project/go-state-types/network" + cbor "github.com/ipfs/go-ipld-cbor" "github.com/stretchr/testify/assert" cbg "github.com/whyrusleeping/cbor-gen" @@ -105,10 +108,27 @@ func TestInvokerBasic(t *testing.T) { } } - _, aerr := code[1](&Runtime{}, []byte{99}) - if aerrors.IsFatal(aerr) { - t.Fatal("err should not be fatal") + { + _, aerr := code[1](&Runtime{ + vm: &VM{ntwkVersion: func(ctx context.Context, epoch abi.ChainEpoch) network.Version { + return network.Version0 + }}, + }, []byte{99}) + if aerrors.IsFatal(aerr) { + t.Fatal("err should not be fatal") + } + assert.Equal(t, exitcode.ExitCode(1), aerrors.RetCode(aerr), "return code should be 1") } - assert.Equal(t, exitcode.ExitCode(1), aerrors.RetCode(aerr), "return code should be 1") + { + _, aerr := code[1](&Runtime{ + vm: &VM{ntwkVersion: func(ctx context.Context, epoch abi.ChainEpoch) network.Version { + return network.Version7 + }}, + }, []byte{99}) + if aerrors.IsFatal(aerr) { + t.Fatal("err should not be fatal") + } + assert.Equal(t, exitcode.ErrSerialization, aerrors.RetCode(aerr), "return code should be %s", 1) + } } diff --git a/chain/vm/mkactor.go b/chain/vm/mkactor.go index 885d3c0db2b..669c1450f1a 100644 --- a/chain/vm/mkactor.go +++ b/chain/vm/mkactor.go @@ -3,6 +3,10 @@ package vm import ( "context" + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/lotus/chain/actors" @@ -12,6 +16,9 @@ import ( builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" "github.com/filecoin-project/go-address" "github.com/filecoin-project/lotus/chain/actors/aerrors" @@ -38,6 +45,10 @@ func TryCreateAccountActor(rt *Runtime, addr address.Address) (*types.Actor, add return nil, address.Undef, err } + if addr == build.ZeroAddress && rt.NetworkVersion() >= network.Version10 { + return nil, address.Undef, aerrors.New(exitcode.ErrIllegalArgument, "cannot create the zero bls actor") + } + addrID, err := rt.state.RegisterNewAddress(addr) if err != nil { return nil, address.Undef, aerrors.Escalate(err, "registering actor address") @@ -91,6 +102,12 @@ func newAccountActor(ver actors.Version) *types.Actor { code = builtin0.AccountActorCodeID case actors.Version2: code = builtin2.AccountActorCodeID + case actors.Version3: + code = builtin3.AccountActorCodeID + case actors.Version4: + code = builtin4.AccountActorCodeID + case actors.Version5: + code = builtin5.AccountActorCodeID default: panic("unsupported actors version") } diff --git a/chain/vm/runtime.go b/chain/vm/runtime.go index 6e36e8e8739..2845c7696ea 100644 --- a/chain/vm/runtime.go +++ b/chain/vm/runtime.go @@ -5,6 +5,7 @@ import ( "context" "encoding/binary" "fmt" + gruntime "runtime" "time" "github.com/filecoin-project/go-address" @@ -15,7 +16,7 @@ import ( "github.com/filecoin-project/go-state-types/network" rtt "github.com/filecoin-project/go-state-types/rt" rt0 "github.com/filecoin-project/specs-actors/actors/runtime" - rt2 "github.com/filecoin-project/specs-actors/v2/actors/runtime" + rt5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" "github.com/ipfs/go-cid" ipldcbor "github.com/ipfs/go-ipld-cbor" "go.opencensus.io/trace" @@ -53,8 +54,8 @@ func (m *Message) ValueReceived() abi.TokenAmount { var EnableGasTracing = false type Runtime struct { - rt2.Message - rt2.Syscalls + rt5.Message + rt5.Syscalls ctx context.Context @@ -80,6 +81,10 @@ type Runtime struct { lastGasCharge *types.GasTrace } +func (rt *Runtime) BaseFee() abi.TokenAmount { + return rt.vm.baseFee +} + func (rt *Runtime) NetworkVersion() network.Version { return rt.vm.GetNtwkVersion(rt.ctx, rt.CurrEpoch()) } @@ -135,7 +140,7 @@ func (rt *Runtime) StorePut(x cbor.Marshaler) cid.Cid { } var _ rt0.Runtime = (*Runtime)(nil) -var _ rt2.Runtime = (*Runtime)(nil) +var _ rt5.Runtime = (*Runtime)(nil) func (rt *Runtime) shimCall(f func() interface{}) (rval []byte, aerr aerrors.ActorError) { defer func() { @@ -207,17 +212,31 @@ func (rt *Runtime) GetActorCodeCID(addr address.Address) (ret cid.Cid, ok bool) } func (rt *Runtime) GetRandomnessFromTickets(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness { - res, err := rt.vm.rand.GetChainRandomness(rt.ctx, personalization, randEpoch, entropy) + var err error + var res []byte + if randEpoch > build.UpgradeHyperdriveHeight { + res, err = rt.vm.rand.GetChainRandomnessLookingForward(rt.ctx, personalization, randEpoch, entropy) + } else { + res, err = rt.vm.rand.GetChainRandomnessLookingBack(rt.ctx, personalization, randEpoch, entropy) + } + if err != nil { - panic(aerrors.Fatalf("could not get randomness: %s", err)) + panic(aerrors.Fatalf("could not get ticket randomness: %s", err)) } return res } func (rt *Runtime) GetRandomnessFromBeacon(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness { - res, err := rt.vm.rand.GetBeaconRandomness(rt.ctx, personalization, randEpoch, entropy) + var err error + var res []byte + if randEpoch > build.UpgradeHyperdriveHeight { + res, err = rt.vm.rand.GetBeaconRandomnessLookingForward(rt.ctx, personalization, randEpoch, entropy) + } else { + res, err = rt.vm.rand.GetBeaconRandomnessLookingBack(rt.ctx, personalization, randEpoch, entropy) + } + if err != nil { - panic(aerrors.Fatalf("could not get randomness: %s", err)) + panic(aerrors.Fatalf("could not get beacon randomness: %s", err)) } return res } @@ -244,20 +263,23 @@ func (rt *Runtime) NewActorAddress() address.Address { return addr } -func (rt *Runtime) CreateActor(codeID cid.Cid, address address.Address) { +func (rt *Runtime) CreateActor(codeID cid.Cid, addr address.Address) { + if addr == address.Undef && rt.NetworkVersion() >= network.Version7 { + rt.Abortf(exitcode.SysErrorIllegalArgument, "CreateActor with Undef address") + } act, aerr := rt.vm.areg.Create(codeID, rt) if aerr != nil { rt.Abortf(aerr.RetCode(), aerr.Error()) } - _, err := rt.state.GetActor(address) + _, err := rt.state.GetActor(addr) if err == nil { rt.Abortf(exitcode.SysErrorIllegalArgument, "Actor address already exists") } rt.chargeGas(rt.Pricelist().OnCreateActor()) - err = rt.state.SetActor(address, act) + err = rt.state.SetActor(addr, act) if err != nil { panic(aerrors.Fatalf("creating actor entry: %v", err)) } @@ -266,7 +288,7 @@ func (rt *Runtime) CreateActor(codeID cid.Cid, address address.Address) { // DeleteActor deletes the executing actor from the state tree, transferring // any balance to beneficiary. -// Aborts if the beneficiary does not exist. +// Aborts if the beneficiary does not exist or is the calling actor. // May only be called by the actor itself. func (rt *Runtime) DeleteActor(beneficiary address.Address) { rt.chargeGas(rt.Pricelist().OnDeleteActor()) @@ -278,6 +300,19 @@ func (rt *Runtime) DeleteActor(beneficiary address.Address) { panic(aerrors.Fatalf("failed to get actor: %s", err)) } if !act.Balance.IsZero() { + // TODO: Should be safe to drop the version-check, + // since only the paych actor called this pre-version 7, but let's leave it for now + if rt.NetworkVersion() >= network.Version7 { + beneficiaryId, found := rt.ResolveAddress(beneficiary) + if !found { + rt.Abortf(exitcode.SysErrorIllegalArgument, "beneficiary doesn't exist") + } + + if beneficiaryId == rt.Receiver() { + rt.Abortf(exitcode.SysErrorIllegalArgument, "benefactor cannot be beneficiary") + } + } + // Transfer the executing actor's balance to the beneficiary if err := rt.vm.transfer(rt.Receiver(), beneficiary, act.Balance); err != nil { panic(aerrors.Fatalf("failed to transfer balance to beneficiary actor: %s", err)) @@ -518,7 +553,7 @@ func (rt *Runtime) chargeGasInternal(gas GasCharge, skip int) aerrors.ActorError if EnableGasTracing { var callers [10]uintptr - cout := 0 //gruntime.Callers(2+skip, callers[:]) + cout := gruntime.Callers(2+skip, callers[:]) now := build.Clock.Now() if rt.lastGasCharge != nil { @@ -533,12 +568,19 @@ func (rt *Runtime) chargeGasInternal(gas GasCharge, skip int) aerrors.ActorError ComputeGas: gas.ComputeGas, StorageGas: gas.StorageGas, - TotalVirtualGas: gas.VirtualCompute*GasComputeMulti + gas.VirtualStorage*GasStorageMulti, VirtualComputeGas: gas.VirtualCompute, VirtualStorageGas: gas.VirtualStorage, Callers: callers[:cout], } + if gasTrace.VirtualStorageGas == 0 { + gasTrace.VirtualStorageGas = gasTrace.StorageGas + } + if gasTrace.VirtualComputeGas == 0 { + gasTrace.VirtualComputeGas = gasTrace.ComputeGas + } + gasTrace.TotalVirtualGas = gasTrace.VirtualComputeGas + gasTrace.VirtualStorageGas + rt.executionTrace.GasCharges = append(rt.executionTrace.GasCharges, &gasTrace) rt.lastGasChargeTime = now rt.lastGasCharge = &gasTrace @@ -546,9 +588,10 @@ func (rt *Runtime) chargeGasInternal(gas GasCharge, skip int) aerrors.ActorError // overflow safe if rt.gasUsed > rt.gasAvailable-toUse { + gasUsed := rt.gasUsed rt.gasUsed = rt.gasAvailable - return aerrors.Newf(exitcode.SysErrOutOfGas, "not enough gas: used=%d, available=%d", - rt.gasUsed, rt.gasAvailable) + return aerrors.Newf(exitcode.SysErrOutOfGas, "not enough gas: used=%d, available=%d, use=%d", + gasUsed, rt.gasAvailable, toUse) } rt.gasUsed += toUse return nil diff --git a/chain/vm/syscalls.go b/chain/vm/syscalls.go index d2f1f77d314..0cbefd1fd7f 100644 --- a/chain/vm/syscalls.go +++ b/chain/vm/syscalls.go @@ -7,25 +7,27 @@ import ( goruntime "runtime" "sync" - "github.com/filecoin-project/go-address" "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" "github.com/minio/blake2b-simd" mh "github.com/multiformats/go-multihash" "golang.org/x/xerrors" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/lib/sigs" - runtime2 "github.com/filecoin-project/specs-actors/v2/actors/runtime" - proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" - - "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + runtime5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" ) func init() { @@ -34,13 +36,15 @@ func init() { // Actual type is defined in chain/types/vmcontext.go because the VMContext interface is there -type SyscallBuilder func(ctx context.Context, rt *Runtime) runtime2.Syscalls +type SyscallBuilder func(ctx context.Context, rt *Runtime) runtime5.Syscalls func Syscalls(verifier ffiwrapper.Verifier) SyscallBuilder { - return func(ctx context.Context, rt *Runtime) runtime2.Syscalls { + return func(ctx context.Context, rt *Runtime) runtime5.Syscalls { return &syscallShim{ - ctx: ctx, + ctx: ctx, + epoch: rt.CurrEpoch(), + networkVersion: rt.NetworkVersion(), actor: rt.Receiver(), cstate: rt.state, @@ -55,11 +59,13 @@ func Syscalls(verifier ffiwrapper.Verifier) SyscallBuilder { type syscallShim struct { ctx context.Context - lbState LookbackStateGetter - actor address.Address - cstate *state.StateTree - cst cbor.IpldStore - verifier ffiwrapper.Verifier + epoch abi.ChainEpoch + networkVersion network.Version + lbState LookbackStateGetter + actor address.Address + cstate *state.StateTree + cst cbor.IpldStore + verifier ffiwrapper.Verifier } func (ss *syscallShim) ComputeUnsealedSectorCID(st abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) { @@ -84,7 +90,7 @@ func (ss *syscallShim) HashBlake2b(data []byte) [32]byte { // Checks validity of the submitted consensus fault with the two block headers needed to prove the fault // and an optional extra one to check common ancestry (as needed). // Note that the blocks are ordered: the method requires a.Epoch() <= b.Epoch(). -func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime2.ConsensusFault, error) { +func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime5.ConsensusFault, error) { // Note that block syntax is not validated. Any validly signed block will be accepted pursuant to the below conditions. // Whether or not it could ever have been accepted in a chain is not checked/does not matter here. // for that reason when checking block parent relationships, rather than instantiating a Tipset to do so @@ -102,11 +108,18 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime2.Conse return nil, xerrors.Errorf("cannot decode second block header: %f", decodeErr) } + // workaround chain halt + if build.IsNearUpgrade(blockA.Height, build.UpgradeOrangeHeight) { + return nil, xerrors.Errorf("consensus reporting disabled around Upgrade Orange") + } + if build.IsNearUpgrade(blockB.Height, build.UpgradeOrangeHeight) { + return nil, xerrors.Errorf("consensus reporting disabled around Upgrade Orange") + } + // are blocks the same? if blockA.Cid().Equals(blockB.Cid()) { return nil, fmt.Errorf("no consensus fault: submitted blocks are the same") } - // (1) check conditions necessary to any consensus fault // were blocks mined by same miner? @@ -120,14 +133,14 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime2.Conse } // (2) check for the consensus faults themselves - var consensusFault *runtime2.ConsensusFault + var consensusFault *runtime5.ConsensusFault // (a) double-fork mining fault if blockA.Height == blockB.Height { - consensusFault = &runtime2.ConsensusFault{ + consensusFault = &runtime5.ConsensusFault{ Target: blockA.Miner, Epoch: blockB.Height, - Type: runtime2.ConsensusFaultDoubleForkMining, + Type: runtime5.ConsensusFaultDoubleForkMining, } } @@ -135,10 +148,10 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime2.Conse // strictly speaking no need to compare heights based on double fork mining check above, // but at same height this would be a different fault. if types.CidArrsEqual(blockA.Parents, blockB.Parents) && blockA.Height != blockB.Height { - consensusFault = &runtime2.ConsensusFault{ + consensusFault = &runtime5.ConsensusFault{ Target: blockA.Miner, Epoch: blockB.Height, - Type: runtime2.ConsensusFaultTimeOffsetMining, + Type: runtime5.ConsensusFaultTimeOffsetMining, } } @@ -158,10 +171,10 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime2.Conse if types.CidArrsEqual(blockA.Parents, blockC.Parents) && blockA.Height == blockC.Height && types.CidArrsContains(blockB.Parents, blockC.Cid()) && !types.CidArrsContains(blockB.Parents, blockA.Cid()) { - consensusFault = &runtime2.ConsensusFault{ + consensusFault = &runtime5.ConsensusFault{ Target: blockA.Miner, Epoch: blockB.Height, - Type: runtime2.ConsensusFaultParentGrinding, + Type: runtime5.ConsensusFaultParentGrinding, } } } @@ -202,6 +215,10 @@ func (ss *syscallShim) VerifyBlockSig(blk *types.BlockHeader) error { } func (ss *syscallShim) workerKeyAtLookback(height abi.ChainEpoch) (address.Address, error) { + if ss.networkVersion >= network.Version7 && height < ss.epoch-policy.ChainFinality { + return address.Undef, xerrors.Errorf("cannot get worker key (currEpoch %d, height %d)", ss.epoch, height) + } + lbState, err := ss.lbState(ss.ctx, height) if err != nil { return address.Undef, err @@ -226,7 +243,7 @@ func (ss *syscallShim) workerKeyAtLookback(height abi.ChainEpoch) (address.Addre return ResolveToKeyAddr(ss.cstate, ss.cst, info.Worker) } -func (ss *syscallShim) VerifyPoSt(proof proof2.WindowPoStVerifyInfo) error { +func (ss *syscallShim) VerifyPoSt(proof proof5.WindowPoStVerifyInfo) error { ok, err := ss.verifier.VerifyWindowPoSt(context.TODO(), proof) if err != nil { return err @@ -237,7 +254,7 @@ func (ss *syscallShim) VerifyPoSt(proof proof2.WindowPoStVerifyInfo) error { return nil } -func (ss *syscallShim) VerifySeal(info proof2.SealVerifyInfo) error { +func (ss *syscallShim) VerifySeal(info proof5.SealVerifyInfo) error { //_, span := trace.StartSpan(ctx, "ValidatePoRep") //defer span.End() @@ -250,7 +267,7 @@ func (ss *syscallShim) VerifySeal(info proof2.SealVerifyInfo) error { proof := info.Proof seed := []byte(info.InteractiveRandomness) - log.Debugf("Verif r:%x; d:%x; m:%s; t:%x; s:%x; N:%d; p:%x", info.SealedCID, info.UnsealedCID, miner, ticket, seed, info.SectorID.Number, proof) + log.Debugf("Verif r:%s; d:%s; m:%s; t:%x; s:%x; N:%d; p:%x", info.SealedCID, info.UnsealedCID, miner, ticket, seed, info.SectorID.Number, proof) //func(ctx context.Context, maddr address.Address, ssize abi.SectorSize, commD, commR, ticket, proof, seed []byte, sectorID abi.SectorNumber) ok, err := ss.verifier.VerifySeal(info) @@ -264,6 +281,18 @@ func (ss *syscallShim) VerifySeal(info proof2.SealVerifyInfo) error { return nil } +func (ss *syscallShim) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) error { + ok, err := ss.verifier.VerifyAggregateSeals(aggregate) + if err != nil { + return xerrors.Errorf("failed to verify aggregated PoRep: %w", err) + } + if !ok { + return fmt.Errorf("invalid aggregate proof") + } + + return nil +} + func (ss *syscallShim) VerifySignature(sig crypto.Signature, addr address.Address, input []byte) error { // TODO: in genesis setup, we are currently faking signatures @@ -277,7 +306,7 @@ func (ss *syscallShim) VerifySignature(sig crypto.Signature, addr address.Addres var BatchSealVerifyParallelism = goruntime.NumCPU() -func (ss *syscallShim) BatchVerifySeals(inp map[address.Address][]proof2.SealVerifyInfo) (map[address.Address][]bool, error) { +func (ss *syscallShim) BatchVerifySeals(inp map[address.Address][]proof5.SealVerifyInfo) (map[address.Address][]bool, error) { out := make(map[address.Address][]bool) sema := make(chan struct{}, BatchSealVerifyParallelism) @@ -289,12 +318,12 @@ func (ss *syscallShim) BatchVerifySeals(inp map[address.Address][]proof2.SealVer for i, s := range seals { wg.Add(1) - go func(ma address.Address, ix int, svi proof2.SealVerifyInfo, res []bool) { + go func(ma address.Address, ix int, svi proof5.SealVerifyInfo, res []bool) { defer wg.Done() sema <- struct{}{} if err := ss.VerifySeal(svi); err != nil { - log.Warnw("seal verify in batch failed", "miner", ma, "index", ix, "err", err) + log.Warnw("seal verify in batch failed", "miner", ma, "sectorNumber", svi.SectorID.Number, "err", err) res[ix] = false } else { res[ix] = true diff --git a/chain/vm/vm.go b/chain/vm/vm.go index 8b7f78074a0..5a31187b7b9 100644 --- a/chain/vm/vm.go +++ b/chain/vm/vm.go @@ -4,11 +4,11 @@ import ( "bytes" "context" "fmt" - "reflect" "sync/atomic" "time" "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/metrics" block "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" @@ -16,6 +16,7 @@ import ( logging "github.com/ipfs/go-log/v2" mh "github.com/multiformats/go-multihash" cbg "github.com/whyrusleeping/cbor-gen" + "go.opencensus.io/stats" "go.opencensus.io/trace" "golang.org/x/xerrors" @@ -26,23 +27,24 @@ import ( "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/aerrors" "github.com/filecoin-project/lotus/chain/actors/builtin/account" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/reward" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/blockstore" - bstore "github.com/filecoin-project/lotus/lib/blockstore" - "github.com/filecoin-project/lotus/lib/bufbstore" ) const MaxCallDepth = 4096 -var log = logging.Logger("vm") -var actorLog = logging.Logger("actors") -var gasOnActorExec = newGasCharge("OnActorExec", 0, 0) +var ( + log = logging.Logger("vm") + actorLog = logging.Logger("actors") + gasOnActorExec = newGasCharge("OnActorExec", 0, 0) +) // stat counters var ( @@ -69,7 +71,10 @@ func ResolveToKeyAddr(state types.StateTree, cst cbor.IpldStore, addr address.Ad return aast.PubkeyAddress() } -var _ cbor.IpldBlockstore = (*gasChargingBlocks)(nil) +var ( + _ cbor.IpldBlockstore = (*gasChargingBlocks)(nil) + _ blockstore.Viewer = (*gasChargingBlocks)(nil) +) type gasChargingBlocks struct { chargeGas func(GasCharge) @@ -77,6 +82,24 @@ type gasChargingBlocks struct { under cbor.IpldBlockstore } +func (bs *gasChargingBlocks) View(c cid.Cid, cb func([]byte) error) error { + if v, ok := bs.under.(blockstore.Viewer); ok { + bs.chargeGas(bs.pricelist.OnIpldGet()) + return v.View(c, func(b []byte) error { + // we have successfully retrieved the value; charge for it, even if the user-provided function fails. + bs.chargeGas(newGasCharge("OnIpldViewEnd", 0, 0).WithExtra(len(b))) + bs.chargeGas(gasOnActorExec) + return cb(b) + }) + } + // the underlying blockstore doesn't implement the viewer interface, fall back to normal Get behaviour. + blk, err := bs.Get(c) + if err == nil && blk != nil { + return cb(blk.RawData()) + } + return err +} + func (bs *gasChargingBlocks) Get(c cid.Cid) (block.Block, error) { bs.chargeGas(bs.pricelist.OnIpldGet()) blk, err := bs.under.Get(c) @@ -119,6 +142,10 @@ func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runti } if parent != nil { + // TODO: The version check here should be unnecessary, but we can wait to take it out + if !parent.allowInternal && rt.NetworkVersion() >= network.Version7 { + rt.Abortf(exitcode.SysErrForbidden, "internal calls currently disabled") + } rt.gasUsed = parent.gasUsed rt.origin = parent.origin rt.originNonce = parent.originNonce @@ -130,10 +157,10 @@ func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runti rt.Abortf(exitcode.SysErrForbidden, "message execution exceeds call depth") } - rt.cst = &cbor.BasicIpldStore{ - Blocks: &gasChargingBlocks{rt.chargeGasFunc(2), rt.pricelist, vm.cst.Blocks}, - Atlas: vm.cst.Atlas, - } + cbb := &gasChargingBlocks{rt.chargeGasFunc(2), rt.pricelist, vm.cst.Blocks} + cst := cbor.NewCborStore(cbb) + cst.Atlas = vm.cst.Atlas // associate the atlas. + rt.cst = cst vmm := *msg resF, ok := rt.ResolveAddress(msg.From) @@ -168,15 +195,18 @@ func (vm *UnsafeVM) MakeRuntime(ctx context.Context, msg *types.Message) *Runtim return vm.VM.makeRuntime(ctx, msg, nil) } -type CircSupplyCalculator func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error) -type NtwkVersionGetter func(context.Context, abi.ChainEpoch) network.Version -type LookbackStateGetter func(context.Context, abi.ChainEpoch) (*state.StateTree, error) +type ( + CircSupplyCalculator func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error) + NtwkVersionGetter func(context.Context, abi.ChainEpoch) network.Version + LookbackStateGetter func(context.Context, abi.ChainEpoch) (*state.StateTree, error) +) type VM struct { - cstate *state.StateTree + cstate *state.StateTree + // TODO: Is base actually used? Can we delete it? base cid.Cid cst *cbor.BasicIpldStore - buf *bufbstore.BufferedBS + buf *blockstore.BufferedBlockstore blockHeight abi.ChainEpoch areg *ActorRegistry rand Rand @@ -192,7 +222,7 @@ type VMOpts struct { StateBase cid.Cid Epoch abi.ChainEpoch Rand Rand - Bstore bstore.Blockstore + Bstore blockstore.Blockstore Syscalls SyscallBuilder CircSupplyCalc CircSupplyCalculator NtwkVersion NtwkVersionGetter // TODO: stebalien: In what cases do we actually need this? It seems like even when creating new networks we want to use the 'global'/build-default version getter @@ -201,7 +231,7 @@ type VMOpts struct { } func NewVM(ctx context.Context, opts *VMOpts) (*VM, error) { - buf := bufbstore.NewBufferedBstore(opts.Bstore) + buf := blockstore.NewBuffered(opts.Bstore) cst := cbor.NewCborStore(buf) state, err := state.LoadStateTree(cst, opts.StateBase) if err != nil { @@ -225,8 +255,10 @@ func NewVM(ctx context.Context, opts *VMOpts) (*VM, error) { } type Rand interface { - GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) - GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) + GetChainRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) + GetChainRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) + GetBeaconRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) + GetBeaconRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) } type ApplyRet struct { @@ -239,7 +271,6 @@ type ApplyRet struct { func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime, gasCharge *GasCharge, start time.Time) ([]byte, aerrors.ActorError, *Runtime) { - defer atomic.AddUint64(&StatSends, 1) st := vm.cstate @@ -408,6 +439,8 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, }, GasCosts: &gasOutputs, Duration: time.Since(start), + ActorErr: aerrors.Newf(exitcode.SysErrOutOfGas, + "message gas limit does not cover on-chain gas costs"), }, nil } @@ -536,7 +569,13 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, if gasUsed < 0 { gasUsed = 0 } - gasOutputs := ComputeGasOutputs(gasUsed, msg.GasLimit, vm.baseFee, msg.GasFeeCap, msg.GasPremium) + + burn, err := vm.ShouldBurn(ctx, st, msg, errcode) + if err != nil { + return nil, xerrors.Errorf("deciding whether should burn failed: %w", err) + } + + gasOutputs := ComputeGasOutputs(gasUsed, msg.GasLimit, vm.baseFee, msg.GasFeeCap, msg.GasPremium, burn) if err := vm.transferFromGasHolder(builtin.BurntFundsActorAddr, gasHolder, gasOutputs.BaseFeeBurn); err != nil { @@ -574,6 +613,34 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, }, nil } +func (vm *VM) ShouldBurn(ctx context.Context, st *state.StateTree, msg *types.Message, errcode exitcode.ExitCode) (bool, error) { + if vm.ntwkVersion(ctx, vm.blockHeight) <= network.Version12 { + // Check to see if we should burn funds. We avoid burning on successful + // window post. This won't catch _indirect_ window post calls, but this + // is the best we can get for now. + if vm.blockHeight > build.UpgradeClausHeight && errcode == exitcode.Ok && msg.Method == miner.Methods.SubmitWindowedPoSt { + // Ok, we've checked the _method_, but we still need to check + // the target actor. It would be nice if we could just look at + // the trace, but I'm not sure if that's safe? + if toActor, err := st.GetActor(msg.To); err != nil { + // If the actor wasn't found, we probably deleted it or something. Move on. + if !xerrors.Is(err, types.ErrActorNotFound) { + // Otherwise, this should never fail and something is very wrong. + return false, xerrors.Errorf("failed to lookup target actor: %w", err) + } + } else if builtin.IsStorageMinerActor(toActor.Code) { + // Ok, this is a storage miner and we've processed a window post. Remove the burn. + return false, nil + } + } + + return true, nil + } + + // Any "don't burn" rules from Network v13 onwards go here, for now we always return true + return true, nil +} + func (vm *VM) ActorBalance(addr address.Address) (types.BigInt, aerrors.ActorError) { act, err := vm.cstate.GetActor(addr) if err != nil { @@ -583,6 +650,8 @@ func (vm *VM) ActorBalance(addr address.Address) (types.BigInt, aerrors.ActorErr return act.Balance, nil } +type vmFlushKey struct{} + func (vm *VM) Flush(ctx context.Context) (cid.Cid, error) { _, span := trace.StartSpan(ctx, "vm.Flush") defer span.End() @@ -595,42 +664,17 @@ func (vm *VM) Flush(ctx context.Context) (cid.Cid, error) { return cid.Undef, xerrors.Errorf("flushing vm: %w", err) } - if err := Copy(ctx, from, to, root); err != nil { + if err := Copy(context.WithValue(ctx, vmFlushKey{}, true), from, to, root); err != nil { return cid.Undef, xerrors.Errorf("copying tree: %w", err) } return root, nil } -// MutateState usage: MutateState(ctx, idAddr, func(cst cbor.IpldStore, st *ActorStateType) error {...}) -func (vm *VM) MutateState(ctx context.Context, addr address.Address, fn interface{}) error { - act, err := vm.cstate.GetActor(addr) - if err != nil { - return xerrors.Errorf("actor not found: %w", err) - } - - st := reflect.New(reflect.TypeOf(fn).In(1).Elem()) - if err := vm.cst.Get(ctx, act.Head, st.Interface()); err != nil { - return xerrors.Errorf("read actor head: %w", err) - } - - out := reflect.ValueOf(fn).Call([]reflect.Value{reflect.ValueOf(vm.cst), st}) - if !out[0].IsNil() && out[0].Interface().(error) != nil { - return out[0].Interface().(error) - } - - head, err := vm.cst.Put(ctx, st.Interface()) - if err != nil { - return xerrors.Errorf("put new actor head: %w", err) - } - - act.Head = head - - if err := vm.cstate.SetActor(addr, act); err != nil { - return xerrors.Errorf("set actor: %w", err) - } - - return nil +// Get the buffered blockstore associated with the VM. This includes any temporary blocks produced +// during this VM's execution. +func (vm *VM) ActorStore(ctx context.Context) adt.Store { + return adt.WrapStore(ctx, vm.cst) } func linksForObj(blk block.Block, cb func(cid.Cid)) error { @@ -652,21 +696,48 @@ func linksForObj(blk block.Block, cb func(cid.Cid)) error { func Copy(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid) error { ctx, span := trace.StartSpan(ctx, "vm.Copy") // nolint defer span.End() + start := time.Now() var numBlocks int var totalCopySize int - var batch []block.Block + const batchSize = 128 + const bufCount = 3 + freeBufs := make(chan []block.Block, bufCount) + toFlush := make(chan []block.Block, bufCount) + for i := 0; i < bufCount; i++ { + freeBufs <- make([]block.Block, 0, batchSize) + } + + errFlushChan := make(chan error) + + go func() { + for b := range toFlush { + if err := to.PutMany(b); err != nil { + close(freeBufs) + errFlushChan <- xerrors.Errorf("batch put in copy: %w", err) + return + } + freeBufs <- b[:0] + } + close(errFlushChan) + close(freeBufs) + }() + + batch := <-freeBufs batchCp := func(blk block.Block) error { numBlocks++ totalCopySize += len(blk.RawData()) batch = append(batch, blk) - if len(batch) > 100 { - if err := to.PutMany(batch); err != nil { - return xerrors.Errorf("batch put in copy: %w", err) + + if len(batch) >= batchSize { + toFlush <- batch + var ok bool + batch, ok = <-freeBufs + if !ok { + return <-errFlushChan } - batch = batch[:0] } return nil } @@ -676,15 +747,22 @@ func Copy(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid) err } if len(batch) > 0 { - if err := to.PutMany(batch); err != nil { - return xerrors.Errorf("batch put in copy: %w", err) - } + toFlush <- batch + } + close(toFlush) // close the toFlush triggering the loop to end + err := <-errFlushChan // get error out or get nil if it was closed + if err != nil { + return err } span.AddAttributes( trace.Int64Attribute("numBlocks", int64(numBlocks)), trace.Int64Attribute("copySize", int64(totalCopySize)), ) + if yes, ok := ctx.Value(vmFlushKey{}).(bool); yes && ok { + took := metrics.SinceInMilliseconds(start) + stats.Record(ctx, metrics.VMFlushCopyCount.M(int64(numBlocks)), metrics.VMFlushCopyDuration.M(took)) + } return nil } diff --git a/chain/wallet/ledger/ledger.go b/chain/wallet/ledger/ledger.go index 07f92e7ff77..eb16f646036 100644 --- a/chain/wallet/ledger/ledger.go +++ b/chain/wallet/ledger/ledger.go @@ -9,7 +9,7 @@ import ( "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/query" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" ledgerfil "github.com/whyrusleeping/ledger-filecoin-go" "golang.org/x/xerrors" @@ -36,7 +36,7 @@ type LedgerKeyInfo struct { Path []uint32 } -var _ api.WalletAPI = (*LedgerWallet)(nil) +var _ api.Wallet = (*LedgerWallet)(nil) func (lw LedgerWallet) WalletSign(ctx context.Context, signer address.Address, toSign []byte, meta api.MsgMeta) (*crypto.Signature, error) { ki, err := lw.getKeyInfo(signer) @@ -227,7 +227,7 @@ func (lw LedgerWallet) WalletNew(ctx context.Context, t types.KeyType) (address. return lw.importKey(lki) } -func (lw *LedgerWallet) Get() api.WalletAPI { +func (lw *LedgerWallet) Get() api.Wallet { if lw == nil { return nil } diff --git a/chain/wallet/multi.go b/chain/wallet/multi.go index 532ad217bf8..a88475c2e3e 100644 --- a/chain/wallet/multi.go +++ b/chain/wallet/multi.go @@ -4,6 +4,7 @@ import ( "context" "go.uber.org/fx" + "go.uber.org/multierr" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" @@ -24,13 +25,13 @@ type MultiWallet struct { } type getif interface { - api.WalletAPI + api.Wallet // workaround for the fact that iface(*struct(nil)) != nil - Get() api.WalletAPI + Get() api.Wallet } -func firstNonNil(wallets ...getif) api.WalletAPI { +func firstNonNil(wallets ...getif) api.Wallet { for _, w := range wallets { if w.Get() != nil { return w @@ -40,8 +41,8 @@ func firstNonNil(wallets ...getif) api.WalletAPI { return nil } -func nonNil(wallets ...getif) []api.WalletAPI { - var out []api.WalletAPI +func nonNil(wallets ...getif) []api.Wallet { + var out []api.Wallet for _, w := range wallets { if w.Get() == nil { continue @@ -53,21 +54,21 @@ func nonNil(wallets ...getif) []api.WalletAPI { return out } -func (m MultiWallet) find(ctx context.Context, address address.Address, wallets ...getif) (api.WalletAPI, error) { +func (m MultiWallet) find(ctx context.Context, address address.Address, wallets ...getif) (api.Wallet, error) { ws := nonNil(wallets...) + var merr error + for _, w := range ws { have, err := w.WalletHas(ctx, address) - if err != nil { - return nil, err - } + merr = multierr.Append(merr, err) - if have { + if err == nil && have { return w, nil } } - return nil, nil + return nil, merr } func (m MultiWallet) WalletNew(ctx context.Context, keyType types.KeyType) (address.Address, error) { @@ -90,7 +91,7 @@ func (m MultiWallet) WalletHas(ctx context.Context, address address.Address) (bo } func (m MultiWallet) WalletList(ctx context.Context) ([]address.Address, error) { - var out []address.Address + out := make([]address.Address, 0) seen := map[address.Address]struct{}{} ws := nonNil(m.Remote, m.Ledger, m.Local) @@ -167,4 +168,4 @@ func (m MultiWallet) WalletDelete(ctx context.Context, address address.Address) } } -var _ api.WalletAPI = MultiWallet{} +var _ api.Wallet = MultiWallet{} diff --git a/chain/wallet/remotewallet/remote.go b/chain/wallet/remotewallet/remote.go index aa44271326f..d1734518e1b 100644 --- a/chain/wallet/remotewallet/remote.go +++ b/chain/wallet/remotewallet/remote.go @@ -13,19 +13,19 @@ import ( ) type RemoteWallet struct { - api.WalletAPI + api.Wallet } func SetupRemoteWallet(info string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (*RemoteWallet, error) { return func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (*RemoteWallet, error) { ai := cliutil.ParseApiInfo(info) - url, err := ai.DialArgs() + url, err := ai.DialArgs("v0") if err != nil { return nil, err } - wapi, closer, err := client.NewWalletRPC(mctx, url, ai.AuthHeader()) + wapi, closer, err := client.NewWalletRPCV0(mctx, url, ai.AuthHeader()) if err != nil { return nil, xerrors.Errorf("creating jsonrpc client: %w", err) } @@ -41,7 +41,7 @@ func SetupRemoteWallet(info string) func(mctx helpers.MetricsCtx, lc fx.Lifecycl } } -func (w *RemoteWallet) Get() api.WalletAPI { +func (w *RemoteWallet) Get() api.Wallet { if w == nil { return nil } diff --git a/chain/wallet/wallet.go b/chain/wallet/wallet.go index 33fa3135e3b..cbe78a9e8fa 100644 --- a/chain/wallet/wallet.go +++ b/chain/wallet/wallet.go @@ -6,18 +6,16 @@ import ( "strings" "sync" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/crypto" logging "github.com/ipfs/go-log/v2" "golang.org/x/xerrors" - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/api" - _ "github.com/filecoin-project/lotus/lib/sigs/bls" // enable bls signatures - _ "github.com/filecoin-project/lotus/lib/sigs/secp" // enable secp signatures - "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/lib/sigs" + _ "github.com/filecoin-project/lotus/lib/sigs/bls" // enable bls signatures + _ "github.com/filecoin-project/lotus/lib/sigs/secp" // enable secp signatures ) var log = logging.Logger("wallet") @@ -270,7 +268,7 @@ func (w *LocalWallet) WalletHas(ctx context.Context, addr address.Address) (bool return k != nil, nil } -func (w *LocalWallet) WalletDelete(ctx context.Context, addr address.Address) error { +func (w *LocalWallet) walletDelete(ctx context.Context, addr address.Address) error { k, err := w.findKey(addr) if err != nil { @@ -308,7 +306,30 @@ func (w *LocalWallet) WalletDelete(ctx context.Context, addr address.Address) er return nil } -func (w *LocalWallet) Get() api.WalletAPI { +func (w *LocalWallet) deleteDefault() { + w.lk.Lock() + defer w.lk.Unlock() + if err := w.keystore.Delete(KDefault); err != nil { + if !xerrors.Is(err, types.ErrKeyInfoNotFound) { + log.Warnf("failed to unregister current default key: %s", err) + } + } +} + +func (w *LocalWallet) WalletDelete(ctx context.Context, addr address.Address) error { + if err := w.walletDelete(ctx, addr); err != nil { + return xerrors.Errorf("wallet delete: %w", err) + } + + if def, err := w.GetDefault(); err == nil { + if def == addr { + w.deleteDefault() + } + } + return nil +} + +func (w *LocalWallet) Get() api.Wallet { if w == nil { return nil } @@ -316,7 +337,7 @@ func (w *LocalWallet) Get() api.WalletAPI { return w } -var _ api.WalletAPI = &LocalWallet{} +var _ api.Wallet = &LocalWallet{} func swapMainnetForTestnetPrefix(addr string) (string, error) { aChars := []rune(addr) diff --git a/cli/auth.go b/cli/auth.go index ba20b2bccf6..20b9bb39428 100644 --- a/cli/auth.go +++ b/cli/auth.go @@ -8,20 +8,21 @@ import ( "github.com/filecoin-project/go-jsonrpc/auth" - "github.com/filecoin-project/lotus/api/apistruct" + "github.com/filecoin-project/lotus/api" + cliutil "github.com/filecoin-project/lotus/cli/util" "github.com/filecoin-project/lotus/node/repo" ) -var authCmd = &cli.Command{ +var AuthCmd = &cli.Command{ Name: "auth", Usage: "Manage RPC permissions", Subcommands: []*cli.Command{ - authCreateAdminToken, - authApiInfoToken, + AuthCreateAdminToken, + AuthApiInfoToken, }, } -var authCreateAdminToken = &cli.Command{ +var AuthCreateAdminToken = &cli.Command{ Name: "create-token", Usage: "Create token", Flags: []cli.Flag{ @@ -46,18 +47,18 @@ var authCreateAdminToken = &cli.Command{ perm := cctx.String("perm") idx := 0 - for i, p := range apistruct.AllPermissions { + for i, p := range api.AllPermissions { if auth.Permission(perm) == p { idx = i + 1 } } if idx == 0 { - return fmt.Errorf("--perm flag has to be one of: %s", apistruct.AllPermissions) + return fmt.Errorf("--perm flag has to be one of: %s", api.AllPermissions) } // slice on [:idx] so for example: 'sign' gives you [read, write, sign] - token, err := napi.AuthNew(ctx, apistruct.AllPermissions[:idx]) + token, err := napi.AuthNew(ctx, api.AllPermissions[:idx]) if err != nil { return err } @@ -69,7 +70,7 @@ var authCreateAdminToken = &cli.Command{ }, } -var authApiInfoToken = &cli.Command{ +var AuthApiInfoToken = &cli.Command{ Name: "api-info", Usage: "Get token with API info required to connect to this node", Flags: []cli.Flag{ @@ -89,23 +90,23 @@ var authApiInfoToken = &cli.Command{ ctx := ReqContext(cctx) if !cctx.IsSet("perm") { - return xerrors.New("--perm flag not set") + return xerrors.New("--perm flag not set, use with one of: read, write, sign, admin") } perm := cctx.String("perm") idx := 0 - for i, p := range apistruct.AllPermissions { + for i, p := range api.AllPermissions { if auth.Permission(perm) == p { idx = i + 1 } } if idx == 0 { - return fmt.Errorf("--perm flag has to be one of: %s", apistruct.AllPermissions) + return fmt.Errorf("--perm flag has to be one of: %s", api.AllPermissions) } // slice on [:idx] so for example: 'sign' gives you [read, write, sign] - token, err := napi.AuthNew(ctx, apistruct.AllPermissions[:idx]) + token, err := napi.AuthNew(ctx, api.AllPermissions[:idx]) if err != nil { return err } @@ -127,7 +128,7 @@ var authApiInfoToken = &cli.Command{ // TODO: Log in audit log when it is implemented - fmt.Printf("%s=%s:%s\n", envForRepo(t), string(token), ainfo.Addr) + fmt.Printf("%s=%s:%s\n", cliutil.EnvForRepo(t), string(token), ainfo.Addr) return nil }, } diff --git a/cli/backup.go b/cli/backup.go index c748e47c438..856e098dd36 100644 --- a/cli/backup.go +++ b/cli/backup.go @@ -46,12 +46,15 @@ func BackupCmd(repoFlag string, rt repo.RepoType, getApi BackupApiFn) *cli.Comma } defer lr.Close() // nolint:errcheck - mds, err := lr.Datastore("/metadata") + mds, err := lr.Datastore(context.TODO(), "/metadata") if err != nil { return xerrors.Errorf("getting metadata datastore: %w", err) } - bds := backupds.Wrap(mds) + bds, err := backupds.Wrap(mds, backupds.NoLogdir) + if err != nil { + return err + } fpath, err := homedir.Expand(cctx.Args().First()) if err != nil { diff --git a/cli/chain.go b/cli/chain.go index e2d0ebb4ad6..e30a685dd84 100644 --- a/cli/chain.go +++ b/cli/chain.go @@ -3,12 +3,14 @@ package cli import ( "bytes" "context" + "encoding/base64" "encoding/hex" "encoding/json" "fmt" "os" "os/exec" "path" + "reflect" "sort" "strconv" "strings" @@ -29,36 +31,40 @@ import ( cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" + "github.com/filecoin-project/lotus/api" lapi "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/stmgr" types "github.com/filecoin-project/lotus/chain/types" ) -var chainCmd = &cli.Command{ +var ChainCmd = &cli.Command{ Name: "chain", Usage: "Interact with filecoin blockchain", Subcommands: []*cli.Command{ - chainHeadCmd, - chainGetBlock, - chainReadObjCmd, - chainDeleteObjCmd, - chainStatObjCmd, - chainGetMsgCmd, - chainSetHeadCmd, - chainListCmd, - chainGetCmd, - chainBisectCmd, - chainExportCmd, - slashConsensusFault, - chainGasPriceCmd, - chainInspectUsage, - chainDecodeCmd, + ChainHeadCmd, + ChainGetBlock, + ChainReadObjCmd, + ChainDeleteObjCmd, + ChainStatObjCmd, + ChainGetMsgCmd, + ChainSetHeadCmd, + ChainListCmd, + ChainGetCmd, + ChainBisectCmd, + ChainExportCmd, + SlashConsensusFault, + ChainGasPriceCmd, + ChainInspectUsage, + ChainDecodeCmd, + ChainEncodeCmd, + ChainDisputeSetCmd, }, } -var chainHeadCmd = &cli.Command{ +var ChainHeadCmd = &cli.Command{ Name: "head", Usage: "Print chain head", Action: func(cctx *cli.Context) error { @@ -81,7 +87,7 @@ var chainHeadCmd = &cli.Command{ }, } -var chainGetBlock = &cli.Command{ +var ChainGetBlock = &cli.Command{ Name: "getblock", Usage: "Get a block and print its details", ArgsUsage: "[blockCid]", @@ -172,7 +178,7 @@ func apiMsgCids(in []lapi.Message) []cid.Cid { return out } -var chainReadObjCmd = &cli.Command{ +var ChainReadObjCmd = &cli.Command{ Name: "read-obj", Usage: "Read the raw bytes of an object", ArgsUsage: "[objectCid]", @@ -199,7 +205,7 @@ var chainReadObjCmd = &cli.Command{ }, } -var chainDeleteObjCmd = &cli.Command{ +var ChainDeleteObjCmd = &cli.Command{ Name: "delete-obj", Usage: "Delete an object from the chain blockstore", Description: "WARNING: Removing wrong objects from the chain blockstore may lead to sync issues", @@ -236,7 +242,7 @@ var chainDeleteObjCmd = &cli.Command{ }, } -var chainStatObjCmd = &cli.Command{ +var ChainStatObjCmd = &cli.Command{ Name: "stat-obj", Usage: "Collect size and ipld link counts for objs", ArgsUsage: "[cid]", @@ -283,7 +289,7 @@ var chainStatObjCmd = &cli.Command{ }, } -var chainGetMsgCmd = &cli.Command{ +var ChainGetMsgCmd = &cli.Command{ Name: "getmessage", Usage: "Get and print a message by its cid", ArgsUsage: "[messageCid]", @@ -331,7 +337,7 @@ var chainGetMsgCmd = &cli.Command{ }, } -var chainSetHeadCmd = &cli.Command{ +var ChainSetHeadCmd = &cli.Command{ Name: "sethead", Usage: "manually set the local nodes head tipset (Caution: normally only used for recovery)", ArgsUsage: "[tipsetkey]", @@ -380,7 +386,7 @@ var chainSetHeadCmd = &cli.Command{ }, } -var chainInspectUsage = &cli.Command{ +var ChainInspectUsage = &cli.Command{ Name: "inspect-usage", Usage: "Inspect block space usage of a given tipset", Flags: []cli.Flag{ @@ -465,6 +471,9 @@ var chainInspectUsage = &cli.Command{ code, err := lookupActorCode(m.Message.To) if err != nil { + if strings.Contains(err.Error(), types.ErrActorNotFound.Error()) { + continue + } return err } @@ -522,12 +531,12 @@ var chainInspectUsage = &cli.Command{ }, } -var chainListCmd = &cli.Command{ +var ChainListCmd = &cli.Command{ Name: "list", Aliases: []string{"love"}, Usage: "View a segment of the chain", Flags: []cli.Flag{ - &cli.Uint64Flag{Name: "height"}, + &cli.Uint64Flag{Name: "height", DefaultText: "current head"}, &cli.IntFlag{Name: "count", Value: 30}, &cli.StringFlag{ Name: "format", @@ -635,7 +644,10 @@ var chainListCmd = &cli.Command{ gasUsed += r.GasUsed } - fmt.Printf("\ttipset: \t%d msgs, %d / %d (%0.2f%%)\n", len(msgs), gasUsed, limitSum, 100*float64(gasUsed)/float64(limitSum)) + gasEfficiency := 100 * float64(gasUsed) / float64(limitSum) + gasCapacity := 100 * float64(limitSum) / float64(build.BlockGasLimit) + + fmt.Printf("\ttipset: \t%d msgs, %d (%0.2f%%) / %d (%0.2f%%)\n", len(msgs), gasUsed, gasEfficiency, limitSum, gasCapacity) } fmt.Println() } @@ -648,7 +660,7 @@ var chainListCmd = &cli.Command{ }, } -var chainGetCmd = &cli.Command{ +var ChainGetCmd = &cli.Command{ Name: "get", Usage: "Get chain DAG node by path", ArgsUsage: "[path]", @@ -712,12 +724,6 @@ var chainGetCmd = &cli.Command{ return err } - if ts == nil { - ts, err = api.ChainHead(ctx) - if err != nil { - return err - } - } p = "/ipfs/" + ts.ParentState().String() + p if cctx.Bool("verbose") { fmt.Println(p) @@ -796,7 +802,7 @@ var chainGetCmd = &cli.Command{ type apiIpldStore struct { ctx context.Context - api lapi.FullNode + api v0api.FullNode } func (ht *apiIpldStore) Context() context.Context { @@ -824,7 +830,7 @@ func (ht *apiIpldStore) Put(ctx context.Context, v interface{}) (cid.Cid, error) panic("No mutations allowed") } -func handleAmt(ctx context.Context, api lapi.FullNode, r cid.Cid) error { +func handleAmt(ctx context.Context, api v0api.FullNode, r cid.Cid) error { s := &apiIpldStore{ctx, api} mp, err := adt.AsArray(s, r) if err != nil { @@ -837,7 +843,7 @@ func handleAmt(ctx context.Context, api lapi.FullNode, r cid.Cid) error { }) } -func handleHamtEpoch(ctx context.Context, api lapi.FullNode, r cid.Cid) error { +func handleHamtEpoch(ctx context.Context, api v0api.FullNode, r cid.Cid) error { s := &apiIpldStore{ctx, api} mp, err := adt.AsMap(s, r) if err != nil { @@ -855,7 +861,7 @@ func handleHamtEpoch(ctx context.Context, api lapi.FullNode, r cid.Cid) error { }) } -func handleHamtAddress(ctx context.Context, api lapi.FullNode, r cid.Cid) error { +func handleHamtAddress(ctx context.Context, api v0api.FullNode, r cid.Cid) error { s := &apiIpldStore{ctx, api} mp, err := adt.AsMap(s, r) if err != nil { @@ -895,7 +901,7 @@ func printTipSet(format string, ts *types.TipSet) { fmt.Println(format) } -var chainBisectCmd = &cli.Command{ +var ChainBisectCmd = &cli.Command{ Name: "bisect", Usage: "bisect chain for an event", ArgsUsage: "[minHeight maxHeight path shellCommand ]", @@ -1018,13 +1024,15 @@ var chainBisectCmd = &cli.Command{ }, } -var chainExportCmd = &cli.Command{ +var ChainExportCmd = &cli.Command{ Name: "export", Usage: "export chain to a car file", ArgsUsage: "[outputPath]", Flags: []cli.Flag{ &cli.StringFlag{ - Name: "tipset", + Name: "tipset", + Usage: "specify tipset to start the export from", + Value: "@head", }, &cli.Int64Flag{ Name: "recent-stateroots", @@ -1096,14 +1104,14 @@ var chainExportCmd = &cli.Command{ }, } -var slashConsensusFault = &cli.Command{ +var SlashConsensusFault = &cli.Command{ Name: "slash-consensus", Usage: "Report consensus fault", ArgsUsage: "[blockCid1 blockCid2]", Flags: []cli.Flag{ &cli.StringFlag{ - Name: "miner", - Usage: "Miner address", + Name: "from", + Usage: "optionally specify the account to report consensus from", }, &cli.StringFlag{ Name: "extra", @@ -1111,11 +1119,13 @@ var slashConsensusFault = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + a := srv.FullNodeAPI() ctx := ReqContext(cctx) c1, err := cid.Parse(cctx.Args().Get(0)) @@ -1123,7 +1133,7 @@ var slashConsensusFault = &cli.Command{ return xerrors.Errorf("parsing cid 1: %w", err) } - b1, err := api.ChainGetBlock(ctx, c1) + b1, err := a.ChainGetBlock(ctx, c1) if err != nil { return xerrors.Errorf("getting block 1: %w", err) } @@ -1133,14 +1143,30 @@ var slashConsensusFault = &cli.Command{ return xerrors.Errorf("parsing cid 2: %w", err) } - b2, err := api.ChainGetBlock(ctx, c2) + b2, err := a.ChainGetBlock(ctx, c2) if err != nil { return xerrors.Errorf("getting block 2: %w", err) } - def, err := api.WalletDefaultAddress(ctx) - if err != nil { - return err + if b1.Miner != b2.Miner { + return xerrors.Errorf("block1.miner:%s block2.miner:%s", b1.Miner, b2.Miner) + } + + var fromAddr address.Address + if from := cctx.String("from"); from == "" { + defaddr, err := a.WalletDefaultAddress(ctx) + if err != nil { + return err + } + + fromAddr = defaddr + } else { + addr, err := address.NewFromString(from) + if err != nil { + return err + } + + fromAddr = addr } bh1, err := cborutil.Dump(b1) @@ -1164,7 +1190,7 @@ var slashConsensusFault = &cli.Command{ return xerrors.Errorf("parsing cid extra: %w", err) } - bExtra, err := api.ChainGetBlock(ctx, cExtra) + bExtra, err := a.ChainGetBlock(ctx, cExtra) if err != nil { return xerrors.Errorf("getting block extra: %w", err) } @@ -1182,24 +1208,17 @@ var slashConsensusFault = &cli.Command{ return err } - if cctx.String("miner") == "" { - return xerrors.Errorf("--miner flag is required") - } - - maddr, err := address.NewFromString(cctx.String("miner")) - if err != nil { - return err - } - - msg := &types.Message{ - To: maddr, - From: def, - Value: types.NewInt(0), - Method: builtin.MethodsMiner.ReportConsensusFault, - Params: enc, + proto := &api.MessagePrototype{ + Message: types.Message{ + To: b2.Miner, + From: fromAddr, + Value: types.NewInt(0), + Method: builtin.MethodsMiner.ReportConsensusFault, + Params: enc, + }, } - smsg, err := api.MpoolPushMessage(ctx, msg, nil) + smsg, err := InteractiveSend(ctx, cctx, srv, proto) if err != nil { return err } @@ -1210,7 +1229,7 @@ var slashConsensusFault = &cli.Command{ }, } -var chainGasPriceCmd = &cli.Command{ +var ChainGasPriceCmd = &cli.Command{ Name: "gas-price", Usage: "Estimate gas prices", Action: func(cctx *cli.Context) error { @@ -1237,7 +1256,7 @@ var chainGasPriceCmd = &cli.Command{ }, } -var chainDecodeCmd = &cli.Command{ +var ChainDecodeCmd = &cli.Command{ Name: "decode", Usage: "decode various types", Subcommands: []*cli.Command{ @@ -1246,14 +1265,19 @@ var chainDecodeCmd = &cli.Command{ } var chainDecodeParamsCmd = &cli.Command{ - Name: "params", - Usage: "Decode message params", + Name: "params", + Usage: "Decode message params", + ArgsUsage: "[toAddr method params]", Flags: []cli.Flag{ &cli.StringFlag{ Name: "tipset", }, + &cli.StringFlag{ + Name: "encoding", + Value: "base64", + Usage: "specify input encoding to parse", + }, }, - ArgsUsage: "[toAddr method hexParams]", Action: func(cctx *cli.Context) error { api, closer, err := GetFullNodeAPI(cctx) if err != nil { @@ -1276,11 +1300,21 @@ var chainDecodeParamsCmd = &cli.Command{ return xerrors.Errorf("parsing method id: %w", err) } - params, err := hex.DecodeString(cctx.Args().Get(2)) - if err != nil { - return xerrors.Errorf("parsing hex params: %w", err) + var params []byte + switch cctx.String("encoding") { + case "base64": + params, err = base64.StdEncoding.DecodeString(cctx.Args().Get(2)) + if err != nil { + return xerrors.Errorf("decoding base64 value: %w", err) + } + case "hex": + params, err = hex.DecodeString(cctx.Args().Get(2)) + if err != nil { + return xerrors.Errorf("decoding hex value: %w", err) + } + default: + return xerrors.Errorf("unrecognized encoding: %s", cctx.String("encoding")) } - ts, err := LoadTipSet(ctx, cctx, api) if err != nil { return err @@ -1301,3 +1335,86 @@ var chainDecodeParamsCmd = &cli.Command{ return nil }, } + +var ChainEncodeCmd = &cli.Command{ + Name: "encode", + Usage: "encode various types", + Subcommands: []*cli.Command{ + chainEncodeParamsCmd, + }, +} + +var chainEncodeParamsCmd = &cli.Command{ + Name: "params", + Usage: "Encodes the given JSON params", + ArgsUsage: "[toAddr method params]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "tipset", + }, + &cli.StringFlag{ + Name: "encoding", + Value: "base64", + Usage: "specify input encoding to parse", + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + if cctx.Args().Len() != 3 { + return ShowHelp(cctx, fmt.Errorf("incorrect number of arguments")) + } + + to, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return xerrors.Errorf("parsing toAddr: %w", err) + } + + method, err := strconv.ParseInt(cctx.Args().Get(1), 10, 64) + if err != nil { + return xerrors.Errorf("parsing method id: %w", err) + } + + ts, err := LoadTipSet(ctx, cctx, api) + if err != nil { + return err + } + + act, err := api.StateGetActor(ctx, to, ts.Key()) + if err != nil { + return xerrors.Errorf("getting actor: %w", err) + } + + methodMeta, found := stmgr.MethodsMap[act.Code][abi.MethodNum(method)] + if !found { + return fmt.Errorf("method %d not found on actor %s", method, act.Code) + } + + p := reflect.New(methodMeta.Params.Elem()).Interface().(cbg.CBORMarshaler) + + if err := json.Unmarshal([]byte(cctx.Args().Get(2)), p); err != nil { + return fmt.Errorf("unmarshaling input into params type: %w", err) + } + + buf := new(bytes.Buffer) + if err := p.MarshalCBOR(buf); err != nil { + return err + } + + switch cctx.String("encoding") { + case "base64": + fmt.Println(base64.StdEncoding.EncodeToString(buf.Bytes())) + case "hex": + fmt.Println(hex.EncodeToString(buf.Bytes())) + default: + return xerrors.Errorf("unrecognized encoding: %s", cctx.String("encoding")) + } + + return nil + }, +} diff --git a/cli/client.go b/cli/client.go index 07e3cb2c877..774d9aa5ff9 100644 --- a/cli/client.go +++ b/cli/client.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "io" + "math" "math/rand" "os" "path/filepath" @@ -39,6 +40,7 @@ import ( "github.com/filecoin-project/lotus/api" lapi "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/market" @@ -81,15 +83,19 @@ var clientCmd = &cli.Command{ WithCategory("storage", clientListDeals), WithCategory("storage", clientGetDealCmd), WithCategory("storage", clientListAsksCmd), + WithCategory("storage", clientDealStatsCmd), + WithCategory("storage", clientInspectDealCmd), WithCategory("data", clientImportCmd), WithCategory("data", clientDropCmd), WithCategory("data", clientLocalCmd), WithCategory("data", clientStat), WithCategory("retrieval", clientFindCmd), WithCategory("retrieval", clientRetrieveCmd), + WithCategory("retrieval", clientCancelRetrievalDealCmd), + WithCategory("retrieval", clientListRetrievalsCmd), WithCategory("util", clientCommPCmd), WithCategory("util", clientCarGenCmd), - WithCategory("util", clientInfoCmd), + WithCategory("util", clientBalancesCmd), WithCategory("util", clientListTransfers), WithCategory("util", clientRestartTransfer), WithCategory("util", clientCancelTransfer), @@ -297,8 +303,16 @@ var clientLocalCmd = &cli.Command{ } var clientDealCmd = &cli.Command{ - Name: "deal", - Usage: "Initialize storage deal with a miner", + Name: "deal", + Usage: "Initialize storage deal with a miner", + Description: `Make a deal with a miner. +dataCid comes from running 'lotus client import'. +miner is the address of the miner you wish to make a deal with. +price is measured in FIL/Epoch. Miners usually don't accept a bid +lower than their advertised ask (which is in FIL/GiB/Epoch). You can check a miners listed price +with 'lotus client query-ask '. +duration is how long the miner should store the data for, in blocks. +The minimum value is 518400 (6 months).`, ArgsUsage: "[dataCid miner price duration]", Flags: []cli.Flag{ &cli.StringFlag{ @@ -309,6 +323,10 @@ var clientDealCmd = &cli.Command{ Name: "manual-piece-size", Usage: "if manually specifying piece cid, used to specify size (dataCid must be to a car file)", }, + &cli.BoolFlag{ + Name: "manual-stateless-deal", + Usage: "instructs the node to send an offline deal without registering it with the deallist/fsm", + }, &cli.StringFlag{ Name: "from", Usage: "specify address to fund the deal with", @@ -324,9 +342,9 @@ var clientDealCmd = &cli.Command{ Value: true, }, &cli.BoolFlag{ - Name: "verified-deal", - Usage: "indicate that the deal counts towards verified client total", - Value: false, + Name: "verified-deal", + Usage: "indicate that the deal counts towards verified client total", + DefaultText: "true if client is verified, false otherwise", }, &cli.StringFlag{ Name: "provider-collateral", @@ -385,6 +403,9 @@ var clientDealCmd = &cli.Command{ if abi.ChainEpoch(dur) < build.MinDealDuration { return xerrors.Errorf("minimum deal duration is %d blocks", build.MinDealDuration) } + if abi.ChainEpoch(dur) > build.MaxDealDuration { + return xerrors.Errorf("maximum deal duration is %d blocks", build.MaxDealDuration) + } var a address.Address if from := cctx.String("from"); from != "" { @@ -445,7 +466,7 @@ var clientDealCmd = &cli.Command{ isVerified = verifiedDealParam } - proposal, err := api.ClientStartDeal(ctx, &lapi.StartDealParams{ + sdParams := &lapi.StartDealParams{ Data: ref, Wallet: a, Miner: miner, @@ -455,7 +476,18 @@ var clientDealCmd = &cli.Command{ FastRetrieval: cctx.Bool("fast-retrieval"), VerifiedDeal: isVerified, ProviderCollateral: provCol, - }) + } + + var proposal *cid.Cid + if cctx.Bool("manual-stateless-deal") { + if ref.TransferType != storagemarket.TTManual || price.Int64() != 0 { + return xerrors.New("when manual-stateless-deal is enabled, you must also provide a 'price' of 0 and specify 'manual-piece-cid' and 'manual-piece-size'") + } + proposal, err = api.ClientStatelessDeal(ctx, sdParams) + } else { + proposal, err = api.ClientStartDeal(ctx, sdParams) + } + if err != nil { return err } @@ -496,9 +528,10 @@ func interactiveDeal(cctx *cli.Context) error { var ds lapi.DataCIDSize // find - var candidateAsks []*storagemarket.StorageAsk + var candidateAsks []QueriedAsk var budget types.FIL var dealCount int64 + var medianPing, maxAcceptablePing time.Duration var a address.Address if from := cctx.String("from"); from != "" { @@ -653,22 +686,72 @@ uiLoop: state = "find" } case "find": - asks, err := getAsks(ctx, api) + asks, err := GetAsks(ctx, api) if err != nil { return err } + if len(asks) == 0 { + printErr(xerrors.Errorf("no asks found")) + continue uiLoop + } + + medianPing = asks[len(asks)/2].Ping + var avgPing time.Duration + for _, ask := range asks { + avgPing += ask.Ping + } + avgPing /= time.Duration(len(asks)) + for _, ask := range asks { - if ask.MinPieceSize > ds.PieceSize { + if ask.Ask.MinPieceSize > ds.PieceSize { continue } - if ask.MaxPieceSize < ds.PieceSize { + if ask.Ask.MaxPieceSize < ds.PieceSize { continue } candidateAsks = append(candidateAsks, ask) } afmt.Printf("Found %d candidate asks\n", len(candidateAsks)) + afmt.Printf("Average network latency: %s; Median latency: %s\n", avgPing.Truncate(time.Millisecond), medianPing.Truncate(time.Millisecond)) + state = "max-ping" + case "max-ping": + maxAcceptablePing = medianPing + + afmt.Printf("Maximum network latency (default: %s) (ms): ", maxAcceptablePing.Truncate(time.Millisecond)) + _latStr, _, err := rl.ReadLine() + latStr := string(_latStr) + if err != nil { + printErr(xerrors.Errorf("reading maximum latency: %w", err)) + continue + } + + if latStr != "" { + maxMs, err := strconv.ParseInt(latStr, 10, 64) + if err != nil { + printErr(xerrors.Errorf("parsing FIL: %w", err)) + continue uiLoop + } + + maxAcceptablePing = time.Millisecond * time.Duration(maxMs) + } + + var goodAsks []QueriedAsk + for _, candidateAsk := range candidateAsks { + if candidateAsk.Ping < maxAcceptablePing { + goodAsks = append(goodAsks, candidateAsk) + } + } + + if len(goodAsks) == 0 { + afmt.Printf("no asks left after filtering for network latency\n") + continue uiLoop + } + + afmt.Printf("%d asks left after filtering for network latency\n", len(goodAsks)) + candidateAsks = goodAsks + state = "find-budget" case "find-budget": afmt.Printf("Proposing from %s, Current Balance: %s\n", a, types.FIL(fromBal)) @@ -687,11 +770,11 @@ uiLoop: continue uiLoop } - var goodAsks []*storagemarket.StorageAsk + var goodAsks []QueriedAsk for _, ask := range candidateAsks { - p := ask.Price + p := ask.Ask.Price if verified { - p = ask.VerifiedPrice + p = ask.Ask.VerifiedPrice } epochPrice := types.BigDiv(types.BigMul(p, types.NewInt(uint64(ds.PieceSize))), gib) @@ -731,9 +814,9 @@ uiLoop: pickedAsks = []*storagemarket.StorageAsk{} for _, ask := range candidateAsks { - p := ask.Price + p := ask.Ask.Price if verified { - p = ask.VerifiedPrice + p = ask.Ask.VerifiedPrice } epochPrice := types.BigDiv(types.BigMul(p, types.NewInt(uint64(ds.PieceSize))), gib) @@ -743,7 +826,7 @@ uiLoop: continue } - pickedAsks = append(pickedAsks, ask) + pickedAsks = append(pickedAsks, ask.Ask) remainingBudget = big.Sub(remainingBudget, totalPrice) if len(pickedAsks) == int(dealCount) { @@ -943,7 +1026,7 @@ var clientFindCmd = &cli.Command{ }, } -const DefaultMaxRetrievePrice = 1 +const DefaultMaxRetrievePrice = "0.01" var clientRetrieveCmd = &cli.Command{ Name: "retrieve", @@ -964,12 +1047,15 @@ var clientRetrieveCmd = &cli.Command{ }, &cli.StringFlag{ Name: "maxPrice", - Usage: fmt.Sprintf("maximum price the client is willing to consider (default: %d FIL)", DefaultMaxRetrievePrice), + Usage: fmt.Sprintf("maximum price the client is willing to consider (default: %s FIL)", DefaultMaxRetrievePrice), }, &cli.StringFlag{ Name: "pieceCid", Usage: "require data to be retrieved from a specific Piece CID", }, + &cli.BoolFlag{ + Name: "allow-local", + }, }, Action: func(cctx *cli.Context) error { if cctx.NArg() != 2 { @@ -999,18 +1085,6 @@ var clientRetrieveCmd = &cli.Command{ return err } - // Check if we already have this data locally - - /*has, err := api.ClientHasLocal(ctx, file) - if err != nil { - return err - } - - if has { - fmt.Println("Success: Already in local storage") - return nil - }*/ // TODO: fix - var pieceCid *cid.Cid if cctx.String("pieceCid") != "" { parsed, err := cid.Parse(cctx.String("pieceCid")) @@ -1020,73 +1094,99 @@ var clientRetrieveCmd = &cli.Command{ pieceCid = &parsed } - var offer api.QueryOffer - minerStrAddr := cctx.String("miner") - if minerStrAddr == "" { // Local discovery - offers, err := fapi.ClientFindData(ctx, file, pieceCid) + var order *lapi.RetrievalOrder + if cctx.Bool("allow-local") { + imports, err := fapi.ClientListImports(ctx) + if err != nil { + return err + } + + for _, i := range imports { + if i.Root != nil && i.Root.Equals(file) { + order = &lapi.RetrievalOrder{ + Root: file, + LocalStore: &i.Key, - var cleaned []api.QueryOffer - // filter out offers that errored - for _, o := range offers { - if o.Err == "" { - cleaned = append(cleaned, o) + Total: big.Zero(), + UnsealPrice: big.Zero(), + } + break } } + } - offers = cleaned + if order == nil { + var offer api.QueryOffer + minerStrAddr := cctx.String("miner") + if minerStrAddr == "" { // Local discovery + offers, err := fapi.ClientFindData(ctx, file, pieceCid) - // sort by price low to high - sort.Slice(offers, func(i, j int) bool { - return offers[i].MinPrice.LessThan(offers[j].MinPrice) - }) - if err != nil { - return err - } + var cleaned []api.QueryOffer + // filter out offers that errored + for _, o := range offers { + if o.Err == "" { + cleaned = append(cleaned, o) + } + } - // TODO: parse offer strings from `client find`, make this smarter - if len(offers) < 1 { - fmt.Println("Failed to find file") - return nil - } - offer = offers[0] - } else { // Directed retrieval - minerAddr, err := address.NewFromString(minerStrAddr) - if err != nil { - return err + offers = cleaned + + // sort by price low to high + sort.Slice(offers, func(i, j int) bool { + return offers[i].MinPrice.LessThan(offers[j].MinPrice) + }) + if err != nil { + return err + } + + // TODO: parse offer strings from `client find`, make this smarter + if len(offers) < 1 { + fmt.Println("Failed to find file") + return nil + } + offer = offers[0] + } else { // Directed retrieval + minerAddr, err := address.NewFromString(minerStrAddr) + if err != nil { + return err + } + offer, err = fapi.ClientMinerQueryOffer(ctx, minerAddr, file, pieceCid) + if err != nil { + return err + } } - offer, err = fapi.ClientMinerQueryOffer(ctx, minerAddr, file, pieceCid) - if err != nil { - return err + if offer.Err != "" { + return fmt.Errorf("The received offer errored: %s", offer.Err) } - } - if offer.Err != "" { - return fmt.Errorf("The received offer errored: %s", offer.Err) - } - maxPrice := types.FromFil(DefaultMaxRetrievePrice) + maxPrice := types.MustParseFIL(DefaultMaxRetrievePrice) - if cctx.String("maxPrice") != "" { - maxPriceFil, err := types.ParseFIL(cctx.String("maxPrice")) - if err != nil { - return xerrors.Errorf("parsing maxPrice: %w", err) + if cctx.String("maxPrice") != "" { + maxPrice, err = types.ParseFIL(cctx.String("maxPrice")) + if err != nil { + return xerrors.Errorf("parsing maxPrice: %w", err) + } } - maxPrice = types.BigInt(maxPriceFil) - } + if offer.MinPrice.GreaterThan(big.Int(maxPrice)) { + return xerrors.Errorf("failed to find offer satisfying maxPrice: %s", maxPrice) + } - if offer.MinPrice.GreaterThan(maxPrice) { - return xerrors.Errorf("failed to find offer satisfying maxPrice: %s", maxPrice) + o := offer.Order(payer) + order = &o } - ref := &lapi.FileRef{ Path: cctx.Args().Get(1), IsCAR: cctx.Bool("car"), } - updates, err := fapi.ClientRetrieveWithEvents(ctx, offer.Order(payer), ref) + + updates, err := fapi.ClientRetrieveWithEvents(ctx, *order, ref) if err != nil { return xerrors.Errorf("error setting up retrieval: %w", err) } + var prevStatus retrievalmarket.DealStatus + for { select { case evt, ok := <-updates: @@ -1097,14 +1197,23 @@ var clientRetrieveCmd = &cli.Command{ retrievalmarket.ClientEvents[evt.Event], retrievalmarket.DealStatuses[evt.Status], ) - } else { - afmt.Println("Success") - return nil + prevStatus = evt.Status } if evt.Err != "" { return xerrors.Errorf("retrieval failed: %s", evt.Err) } + + if !ok { + if prevStatus == retrievalmarket.DealStatusCompleted { + afmt.Println("Success") + } else { + afmt.Printf("saw final deal state %s instead of expected success state DealStatusCompleted\n", + retrievalmarket.DealStatuses[prevStatus]) + } + return nil + } + case <-ctx.Done(): return xerrors.Errorf("retrieval timed out") } @@ -1112,9 +1221,309 @@ var clientRetrieveCmd = &cli.Command{ }, } +var clientListRetrievalsCmd = &cli.Command{ + Name: "list-retrievals", + Usage: "List retrieval market deals", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "verbose", + Aliases: []string{"v"}, + Usage: "print verbose deal details", + }, + &cli.BoolFlag{ + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", + }, + &cli.BoolFlag{ + Name: "show-failed", + Usage: "show failed/failing deals", + Value: true, + }, + &cli.BoolFlag{ + Name: "completed", + Usage: "show completed retrievals", + }, + &cli.BoolFlag{ + Name: "watch", + Usage: "watch deal updates in real-time, rather than a one time list", + }, + }, + Action: func(cctx *cli.Context) error { + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } + + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + verbose := cctx.Bool("verbose") + watch := cctx.Bool("watch") + showFailed := cctx.Bool("show-failed") + completed := cctx.Bool("completed") + + localDeals, err := api.ClientListRetrievals(ctx) + if err != nil { + return err + } + + if watch { + updates, err := api.ClientGetRetrievalUpdates(ctx) + if err != nil { + return err + } + + for { + tm.Clear() + tm.MoveCursor(1, 1) + + err = outputRetrievalDeals(ctx, tm.Screen, localDeals, verbose, showFailed, completed) + if err != nil { + return err + } + + tm.Flush() + + select { + case <-ctx.Done(): + return nil + case updated := <-updates: + var found bool + for i, existing := range localDeals { + if existing.ID == updated.ID { + localDeals[i] = updated + found = true + break + } + } + if !found { + localDeals = append(localDeals, updated) + } + } + } + } + + return outputRetrievalDeals(ctx, cctx.App.Writer, localDeals, verbose, showFailed, completed) + }, +} + +func isTerminalError(status retrievalmarket.DealStatus) bool { + // should patch this in go-fil-markets but to solve the problem immediate and not have buggy output + return retrievalmarket.IsTerminalError(status) || status == retrievalmarket.DealStatusErrored || status == retrievalmarket.DealStatusCancelled +} +func outputRetrievalDeals(ctx context.Context, out io.Writer, localDeals []lapi.RetrievalInfo, verbose bool, showFailed bool, completed bool) error { + var deals []api.RetrievalInfo + for _, deal := range localDeals { + if !showFailed && isTerminalError(deal.Status) { + continue + } + if !completed && retrievalmarket.IsTerminalSuccess(deal.Status) { + continue + } + deals = append(deals, deal) + } + + tableColumns := []tablewriter.Column{ + tablewriter.Col("PayloadCID"), + tablewriter.Col("DealId"), + tablewriter.Col("Provider"), + tablewriter.Col("Status"), + tablewriter.Col("PricePerByte"), + tablewriter.Col("Received"), + tablewriter.Col("TotalPaid"), + } + + if verbose { + tableColumns = append(tableColumns, + tablewriter.Col("PieceCID"), + tablewriter.Col("UnsealPrice"), + tablewriter.Col("BytesPaidFor"), + tablewriter.Col("TransferChannelID"), + tablewriter.Col("TransferStatus"), + ) + } + tableColumns = append(tableColumns, tablewriter.NewLineCol("Message")) + + w := tablewriter.New(tableColumns...) + + for _, d := range deals { + w.Write(toRetrievalOutput(d, verbose)) + } + + return w.Flush(out) +} + +func toRetrievalOutput(d api.RetrievalInfo, verbose bool) map[string]interface{} { + + payloadCID := d.PayloadCID.String() + provider := d.Provider.String() + if !verbose { + payloadCID = ellipsis(payloadCID, 8) + provider = ellipsis(provider, 8) + } + + retrievalOutput := map[string]interface{}{ + "PayloadCID": payloadCID, + "DealId": d.ID, + "Provider": provider, + "Status": retrievalStatusString(d.Status), + "PricePerByte": types.FIL(d.PricePerByte), + "Received": units.BytesSize(float64(d.BytesReceived)), + "TotalPaid": types.FIL(d.TotalPaid), + "Message": d.Message, + } + + if verbose { + transferChannelID := "" + if d.TransferChannelID != nil { + transferChannelID = d.TransferChannelID.String() + } + transferStatus := "" + if d.DataTransfer != nil { + transferStatus = datatransfer.Statuses[d.DataTransfer.Status] + } + pieceCID := "" + if d.PieceCID != nil { + pieceCID = d.PieceCID.String() + } + + retrievalOutput["PieceCID"] = pieceCID + retrievalOutput["UnsealPrice"] = types.FIL(d.UnsealPrice) + retrievalOutput["BytesPaidFor"] = units.BytesSize(float64(d.BytesPaidFor)) + retrievalOutput["TransferChannelID"] = transferChannelID + retrievalOutput["TransferStatus"] = transferStatus + } + return retrievalOutput +} + +func retrievalStatusString(status retrievalmarket.DealStatus) string { + s := retrievalmarket.DealStatuses[status] + + switch { + case isTerminalError(status): + return color.RedString(s) + case retrievalmarket.IsTerminalSuccess(status): + return color.GreenString(s) + default: + return s + } +} + +var clientInspectDealCmd = &cli.Command{ + Name: "inspect-deal", + Usage: "Inspect detailed information about deal's lifecycle and the various stages it goes through", + Flags: []cli.Flag{ + &cli.IntFlag{ + Name: "deal-id", + }, + &cli.StringFlag{ + Name: "proposal-cid", + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := ReqContext(cctx) + return inspectDealCmd(ctx, api, cctx.String("proposal-cid"), cctx.Int("deal-id")) + }, +} + +var clientDealStatsCmd = &cli.Command{ + Name: "deal-stats", + Usage: "Print statistics about local storage deals", + Flags: []cli.Flag{ + &cli.DurationFlag{ + Name: "newer-than", + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + localDeals, err := api.ClientListDeals(ctx) + if err != nil { + return err + } + + var totalSize uint64 + byState := map[storagemarket.StorageDealStatus][]uint64{} + for _, deal := range localDeals { + if cctx.IsSet("newer-than") { + if time.Now().Sub(deal.CreationTime) > cctx.Duration("newer-than") { + continue + } + } + + totalSize += deal.Size + byState[deal.State] = append(byState[deal.State], deal.Size) + } + + fmt.Printf("Total: %d deals, %s\n", len(localDeals), types.SizeStr(types.NewInt(totalSize))) + + type stateStat struct { + state storagemarket.StorageDealStatus + count int + bytes uint64 + } + + stateStats := make([]stateStat, 0, len(byState)) + for state, deals := range byState { + if state == storagemarket.StorageDealActive { + state = math.MaxUint64 // for sort + } + + st := stateStat{ + state: state, + count: len(deals), + } + for _, b := range deals { + st.bytes += b + } + + stateStats = append(stateStats, st) + } + + sort.Slice(stateStats, func(i, j int) bool { + return int64(stateStats[i].state) < int64(stateStats[j].state) + }) + + for _, st := range stateStats { + if st.state == math.MaxUint64 { + st.state = storagemarket.StorageDealActive + } + fmt.Printf("%s: %d deals, %s\n", storagemarket.DealStates[st.state], st.count, types.SizeStr(types.NewInt(st.bytes))) + } + + return nil + }, +} + var clientListAsksCmd = &cli.Command{ Name: "list-asks", Usage: "List asks for top miners", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "by-ping", + Usage: "sort by ping", + }, + &cli.StringFlag{ + Name: "output-format", + Value: "text", + Usage: "Either 'text' or 'csv'", + }, + }, Action: func(cctx *cli.Context) error { api, closer, err := GetFullNodeAPI(cctx) if err != nil { @@ -1123,17 +1532,31 @@ var clientListAsksCmd = &cli.Command{ defer closer() ctx := ReqContext(cctx) - asks, err := getAsks(ctx, api) + asks, err := GetAsks(ctx, api) if err != nil { return err } - for _, ask := range asks { - fmt.Printf("%s: min:%s max:%s price:%s/GiB/Epoch verifiedPrice:%s/GiB/Epoch\n", ask.Miner, + if cctx.Bool("by-ping") { + sort.Slice(asks, func(i, j int) bool { + return asks[i].Ping < asks[j].Ping + }) + } + pfmt := "%s: min:%s max:%s price:%s/GiB/Epoch verifiedPrice:%s/GiB/Epoch ping:%s\n" + if cctx.String("output-format") == "csv" { + fmt.Printf("Miner,Min,Max,Price,VerifiedPrice,Ping\n") + pfmt = "%s,%s,%s,%s,%s,%s\n" + } + + for _, a := range asks { + ask := a.Ask + + fmt.Printf(pfmt, ask.Miner, types.SizeStr(types.NewInt(uint64(ask.MinPieceSize))), types.SizeStr(types.NewInt(uint64(ask.MaxPieceSize))), types.FIL(ask.Price), types.FIL(ask.VerifiedPrice), + a.Ping, ) } @@ -1141,8 +1564,19 @@ var clientListAsksCmd = &cli.Command{ }, } -func getAsks(ctx context.Context, api lapi.FullNode) ([]*storagemarket.StorageAsk, error) { - color.Blue(".. getting miner list") +type QueriedAsk struct { + Ask *storagemarket.StorageAsk + Ping time.Duration +} + +func GetAsks(ctx context.Context, api v0api.FullNode) ([]QueriedAsk, error) { + isTTY := true + if fileInfo, _ := os.Stdout.Stat(); (fileInfo.Mode() & os.ModeCharDevice) == 0 { + isTTY = false + } + if isTTY { + color.Blue(".. getting miner list") + } miners, err := api.StateListMiners(ctx, types.EmptyTSK) if err != nil { return nil, xerrors.Errorf("getting miner list: %w", err) @@ -1187,16 +1621,20 @@ loop: for { select { case <-time.After(150 * time.Millisecond): - fmt.Printf("\r* Found %d miners with power", atomic.LoadInt64(&found)) + if isTTY { + fmt.Printf("\r* Found %d miners with power", atomic.LoadInt64(&found)) + } case <-done: break loop } } - fmt.Printf("\r* Found %d miners with power\n", atomic.LoadInt64(&found)) + if isTTY { + fmt.Printf("\r* Found %d miners with power\n", atomic.LoadInt64(&found)) - color.Blue(".. querying asks") + color.Blue(".. querying asks") + } - var asks []*storagemarket.StorageAsk + var asks []QueriedAsk var queried, got int64 done = make(chan struct{}) @@ -1232,9 +1670,19 @@ loop: return } + rt := time.Now() + _, err = api.ClientQueryAsk(ctx, *mi.PeerId, miner) + if err != nil { + return + } + pingDuration := time.Now().Sub(rt) + atomic.AddInt64(&got, 1) lk.Lock() - asks = append(asks, ask) + asks = append(asks, QueriedAsk{ + Ask: ask, + Ping: pingDuration, + }) lk.Unlock() }(miner) } @@ -1244,15 +1692,19 @@ loop2: for { select { case <-time.After(150 * time.Millisecond): - fmt.Printf("\r* Queried %d asks, got %d responses", atomic.LoadInt64(&queried), atomic.LoadInt64(&got)) + if isTTY { + fmt.Printf("\r* Queried %d asks, got %d responses", atomic.LoadInt64(&queried), atomic.LoadInt64(&got)) + } case <-done: break loop2 } } - fmt.Printf("\r* Queried %d asks, got %d responses\n", atomic.LoadInt64(&queried), atomic.LoadInt64(&got)) + if isTTY { + fmt.Printf("\r* Queried %d asks, got %d responses\n", atomic.LoadInt64(&queried), atomic.LoadInt64(&got)) + } sort.Slice(asks, func(i, j int) bool { - return asks[i].Price.LessThan(asks[j].Price) + return asks[i].Ask.Price.LessThan(asks[j].Ask.Price) }) return asks, nil @@ -1308,7 +1760,7 @@ var clientQueryAskCmd = &cli.Command{ return xerrors.Errorf("failed to get peerID for miner: %w", err) } - if *mi.PeerId == peer.ID("SETME") { + if mi.PeerId == nil || *mi.PeerId == peer.ID("SETME") { return fmt.Errorf("the miner hasn't initialized yet") } @@ -1324,6 +1776,7 @@ var clientQueryAskCmd = &cli.Command{ afmt.Printf("Price per GiB: %s\n", types.FIL(ask.Price)) afmt.Printf("Verified Price per GiB: %s\n", types.FIL(ask.VerifiedPrice)) afmt.Printf("Max Piece size: %s\n", types.SizeStr(types.NewInt(uint64(ask.MaxPieceSize)))) + afmt.Printf("Min Piece size: %s\n", types.SizeStr(types.NewInt(uint64(ask.MinPieceSize)))) size := cctx.Int64("size") if size == 0 { @@ -1352,9 +1805,9 @@ var clientListDeals = &cli.Command{ Usage: "print verbose deal details", }, &cli.BoolFlag{ - Name: "color", - Usage: "use color in display output", - Value: true, + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", }, &cli.BoolFlag{ Name: "show-failed", @@ -1366,6 +1819,10 @@ var clientListDeals = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err @@ -1374,7 +1831,6 @@ var clientListDeals = &cli.Command{ ctx := ReqContext(cctx) verbose := cctx.Bool("verbose") - color := cctx.Bool("color") watch := cctx.Bool("watch") showFailed := cctx.Bool("show-failed") @@ -1393,7 +1849,7 @@ var clientListDeals = &cli.Command{ tm.Clear() tm.MoveCursor(1, 1) - err = outputStorageDeals(ctx, tm.Screen, api, localDeals, verbose, color, showFailed) + err = outputStorageDeals(ctx, tm.Screen, api, localDeals, verbose, showFailed) if err != nil { return err } @@ -1419,11 +1875,11 @@ var clientListDeals = &cli.Command{ } } - return outputStorageDeals(ctx, cctx.App.Writer, api, localDeals, cctx.Bool("verbose"), cctx.Bool("color"), showFailed) + return outputStorageDeals(ctx, cctx.App.Writer, api, localDeals, verbose, showFailed) }, } -func dealFromDealInfo(ctx context.Context, full api.FullNode, head *types.TipSet, v api.DealInfo) deal { +func dealFromDealInfo(ctx context.Context, full v0api.FullNode, head *types.TipSet, v api.DealInfo) deal { if v.DealID == 0 { return deal{ LocalDeal: v, @@ -1442,7 +1898,7 @@ func dealFromDealInfo(ctx context.Context, full api.FullNode, head *types.TipSet } } -func outputStorageDeals(ctx context.Context, out io.Writer, full lapi.FullNode, localDeals []lapi.DealInfo, verbose bool, color bool, showFailed bool) error { +func outputStorageDeals(ctx context.Context, out io.Writer, full v0api.FullNode, localDeals []lapi.DealInfo, verbose bool, showFailed bool) error { sort.Slice(localDeals, func(i, j int) bool { return localDeals[i].CreationTime.Before(localDeals[j].CreationTime) }) @@ -1461,7 +1917,7 @@ func outputStorageDeals(ctx context.Context, out io.Writer, full lapi.FullNode, if verbose { w := tabwriter.NewWriter(out, 2, 4, 2, ' ', 0) - fmt.Fprintf(w, "Created\tDealCid\tDealId\tProvider\tState\tOn Chain?\tSlashed?\tPieceCID\tSize\tPrice\tDuration\tVerified\tMessage\n") + fmt.Fprintf(w, "Created\tDealCid\tDealId\tProvider\tState\tOn Chain?\tSlashed?\tPieceCID\tSize\tPrice\tDuration\tTransferChannelID\tTransferStatus\tVerified\tMessage\n") for _, d := range deals { onChain := "N" if d.OnChainDealState.SectorStartEpoch != -1 { @@ -1474,7 +1930,37 @@ func outputStorageDeals(ctx context.Context, out io.Writer, full lapi.FullNode, } price := types.FIL(types.BigMul(d.LocalDeal.PricePerEpoch, types.NewInt(d.LocalDeal.Duration))) - fmt.Fprintf(w, "%s\t%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%d\t%v\t%s\n", d.LocalDeal.CreationTime.Format(time.Stamp), d.LocalDeal.ProposalCid, d.LocalDeal.DealID, d.LocalDeal.Provider, dealStateString(color, d.LocalDeal.State), onChain, slashed, d.LocalDeal.PieceCID, types.SizeStr(types.NewInt(d.LocalDeal.Size)), price, d.LocalDeal.Duration, d.LocalDeal.Verified, d.LocalDeal.Message) + transferChannelID := "" + if d.LocalDeal.TransferChannelID != nil { + transferChannelID = d.LocalDeal.TransferChannelID.String() + } + transferStatus := "" + if d.LocalDeal.DataTransfer != nil { + transferStatus = datatransfer.Statuses[d.LocalDeal.DataTransfer.Status] + // TODO: Include the transferred percentage once this bug is fixed: + // https://github.com/ipfs/go-graphsync/issues/126 + //fmt.Printf("transferred: %d / size: %d\n", d.LocalDeal.DataTransfer.Transferred, d.LocalDeal.Size) + //if d.LocalDeal.Size > 0 { + // pct := (100 * d.LocalDeal.DataTransfer.Transferred) / d.LocalDeal.Size + // transferPct = fmt.Sprintf("%d%%", pct) + //} + } + fmt.Fprintf(w, "%s\t%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%s\t%v\t%s\n", + d.LocalDeal.CreationTime.Format(time.Stamp), + d.LocalDeal.ProposalCid, + d.LocalDeal.DealID, + d.LocalDeal.Provider, + dealStateString(d.LocalDeal.State), + onChain, + slashed, + d.LocalDeal.PieceCID, + types.SizeStr(types.NewInt(d.LocalDeal.Size)), + price, + d.LocalDeal.Duration, + transferChannelID, + transferStatus, + d.LocalDeal.Verified, + d.LocalDeal.Message) } return w.Flush() } @@ -1513,7 +1999,7 @@ func outputStorageDeals(ctx context.Context, out io.Writer, full lapi.FullNode, "DealCid": propcid, "DealId": d.LocalDeal.DealID, "Provider": d.LocalDeal.Provider, - "State": dealStateString(color, d.LocalDeal.State), + "State": dealStateString(d.LocalDeal.State), "On Chain?": onChain, "Slashed?": slashed, "PieceCID": piece, @@ -1528,12 +2014,8 @@ func outputStorageDeals(ctx context.Context, out io.Writer, full lapi.FullNode, return w.Flush(out) } -func dealStateString(c bool, state storagemarket.StorageDealStatus) string { +func dealStateString(state storagemarket.StorageDealStatus) string { s := storagemarket.DealStates[state] - if !c { - return s - } - switch state { case storagemarket.StorageDealError, storagemarket.StorageDealExpired: return color.RedString(s) @@ -1596,9 +2078,9 @@ var clientGetDealCmd = &cli.Command{ }, } -var clientInfoCmd = &cli.Command{ - Name: "info", - Usage: "Print storage market client information", +var clientBalancesCmd = &cli.Command{ + Name: "balances", + Usage: "Print storage market client balances", Flags: []cli.Flag{ &cli.StringFlag{ Name: "client", @@ -1615,7 +2097,7 @@ var clientInfoCmd = &cli.Command{ var addr address.Address if clientFlag := cctx.String("client"); clientFlag != "" { - ca, err := address.NewFromString("client") + ca, err := address.NewFromString(clientFlag) if err != nil { return err } @@ -1634,10 +2116,22 @@ var clientInfoCmd = &cli.Command{ return err } - fmt.Printf("Client Market Info:\n") + reserved, err := api.MarketGetReserved(ctx, addr) + if err != nil { + return err + } + + avail := big.Sub(big.Sub(balance.Escrow, balance.Locked), reserved) + if avail.LessThan(big.Zero()) { + avail = big.Zero() + } - fmt.Printf("Locked Funds:\t%s\n", types.FIL(balance.Locked)) - fmt.Printf("Escrowed Funds:\t%s\n", types.FIL(balance.Escrow)) + fmt.Printf("Client Market Balance for address %s:\n", addr) + + fmt.Printf(" Escrowed Funds: %s\n", types.FIL(balance.Escrow)) + fmt.Printf(" Locked Funds: %s\n", types.FIL(balance.Locked)) + fmt.Printf(" Reserved Funds: %s\n", types.FIL(reserved)) + fmt.Printf(" Available to Withdraw: %s\n", types.FIL(avail)) return nil }, @@ -1749,6 +2243,11 @@ var clientCancelTransfer = &cli.Command{ Usage: "specify only transfers where peer is/is not initiator", Value: true, }, + &cli.DurationFlag{ + Name: "cancel-timeout", + Usage: "time to wait for cancel to be sent to storage provider", + Value: 5 * time.Second, + }, }, Action: func(cctx *cli.Context) error { if !cctx.Args().Present() { @@ -1792,7 +2291,36 @@ var clientCancelTransfer = &cli.Command{ } } - return api.ClientCancelDataTransfer(ctx, transferID, other, initiator) + timeoutCtx, cancel := context.WithTimeout(ctx, cctx.Duration("cancel-timeout")) + defer cancel() + return api.ClientCancelDataTransfer(timeoutCtx, transferID, other, initiator) + }, +} + +var clientCancelRetrievalDealCmd = &cli.Command{ + Name: "cancel-retrieval", + Usage: "Cancel a retrieval deal by deal ID; this also cancels the associated transfer", + Flags: []cli.Flag{ + &cli.Int64Flag{ + Name: "deal-id", + Usage: "specify retrieval deal by deal ID", + Required: true, + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + id := cctx.Int64("deal-id") + if id < 0 { + return errors.New("deal id cannot be negative") + } + + return api.ClientCancelRetrievalDeal(ctx, retrievalmarket.DealID(id)) }, } @@ -1801,9 +2329,14 @@ var clientListTransfers = &cli.Command{ Usage: "List ongoing data transfers for deals", Flags: []cli.Flag{ &cli.BoolFlag{ - Name: "color", - Usage: "use color in display output", - Value: true, + Name: "verbose", + Aliases: []string{"v"}, + Usage: "print verbose transfer details", + }, + &cli.BoolFlag{ + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", }, &cli.BoolFlag{ Name: "completed", @@ -1819,6 +2352,10 @@ var clientListTransfers = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err @@ -1831,8 +2368,8 @@ var clientListTransfers = &cli.Command{ return err } + verbose := cctx.Bool("verbose") completed := cctx.Bool("completed") - color := cctx.Bool("color") watch := cctx.Bool("watch") showFailed := cctx.Bool("show-failed") if watch { @@ -1846,7 +2383,7 @@ var clientListTransfers = &cli.Command{ tm.MoveCursor(1, 1) - OutputDataTransferChannels(tm.Screen, channels, completed, color, showFailed) + OutputDataTransferChannels(tm.Screen, channels, verbose, completed, showFailed) tm.Flush() @@ -1871,13 +2408,13 @@ var clientListTransfers = &cli.Command{ } } } - OutputDataTransferChannels(os.Stdout, channels, completed, color, showFailed) + OutputDataTransferChannels(os.Stdout, channels, verbose, completed, showFailed) return nil }, } // OutputDataTransferChannels generates table output for a list of channels -func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChannel, completed bool, color bool, showFailed bool) { +func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChannel, verbose, completed, showFailed bool) { sort.Slice(channels, func(i, j int) bool { return channels[i].TransferID < channels[j].TransferID }) @@ -1907,7 +2444,7 @@ func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChann tablewriter.Col("Voucher"), tablewriter.NewLineCol("Message")) for _, channel := range sendingChannels { - w.Write(toChannelOutput(color, "Sending To", channel)) + w.Write(toChannelOutput("Sending To", channel, verbose)) } w.Flush(out) //nolint:errcheck @@ -1921,17 +2458,13 @@ func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChann tablewriter.Col("Voucher"), tablewriter.NewLineCol("Message")) for _, channel := range receivingChannels { - w.Write(toChannelOutput(color, "Receiving From", channel)) + w.Write(toChannelOutput("Receiving From", channel, verbose)) } w.Flush(out) //nolint:errcheck } -func channelStatusString(useColor bool, status datatransfer.Status) string { +func channelStatusString(status datatransfer.Status) string { s := datatransfer.Statuses[status] - if !useColor { - return s - } - switch status { case datatransfer.Failed, datatransfer.Cancelled: return color.RedString(s) @@ -1942,9 +2475,13 @@ func channelStatusString(useColor bool, status datatransfer.Status) string { } } -func toChannelOutput(useColor bool, otherPartyColumn string, channel lapi.DataTransferChannel) map[string]interface{} { - rootCid := ellipsis(channel.BaseCID.String(), 8) - otherParty := ellipsis(channel.OtherPeer.String(), 8) +func toChannelOutput(otherPartyColumn string, channel lapi.DataTransferChannel, verbose bool) map[string]interface{} { + rootCid := channel.BaseCID.String() + otherParty := channel.OtherPeer.String() + if !verbose { + rootCid = ellipsis(rootCid, 8) + otherParty = ellipsis(otherParty, 8) + } initiated := "N" if channel.IsInitiator { @@ -1952,13 +2489,13 @@ func toChannelOutput(useColor bool, otherPartyColumn string, channel lapi.DataTr } voucher := channel.Voucher - if len(voucher) > 40 { + if len(voucher) > 40 && !verbose { voucher = ellipsis(voucher, 37) } return map[string]interface{}{ "ID": channel.TransferID, - "Status": channelStatusString(useColor, channel.Status), + "Status": channelStatusString(channel.Status), otherPartyColumn: otherParty, "Root Cid": rootCid, "Initiated?": initiated, @@ -1974,3 +2511,77 @@ func ellipsis(s string, length int) string { } return s } + +func inspectDealCmd(ctx context.Context, api v0api.FullNode, proposalCid string, dealId int) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + deals, err := api.ClientListDeals(ctx) + if err != nil { + return err + } + + var di *lapi.DealInfo + for i, cdi := range deals { + if proposalCid != "" && cdi.ProposalCid.String() == proposalCid { + di = &deals[i] + break + } + + if dealId != 0 && int(cdi.DealID) == dealId { + di = &deals[i] + break + } + } + + if di == nil { + if proposalCid != "" { + return fmt.Errorf("cannot find deal with proposal cid: %s", proposalCid) + } + if dealId != 0 { + return fmt.Errorf("cannot find deal with deal id: %v", dealId) + } + return errors.New("you must specify proposal cid or deal id in order to inspect a deal") + } + + // populate DealInfo.DealStages and DataTransfer.Stages + di, err = api.ClientGetDealInfo(ctx, di.ProposalCid) + if err != nil { + return fmt.Errorf("cannot get deal info for proposal cid: %v", di.ProposalCid) + } + + renderDeal(di) + + return nil +} + +func renderDeal(di *lapi.DealInfo) { + color.Blue("Deal ID: %d\n", int(di.DealID)) + color.Blue("Proposal CID: %s\n\n", di.ProposalCid.String()) + + if di.DealStages == nil { + color.Yellow("Deal was made with an older version of Lotus and Lotus did not collect detailed information about its stages") + return + } + + for _, stg := range di.DealStages.Stages { + msg := fmt.Sprintf("%s %s: %s (expected duration: %s)", color.BlueString("Stage:"), color.BlueString(strings.TrimPrefix(stg.Name, "StorageDeal")), stg.Description, color.GreenString(stg.ExpectedDuration)) + if stg.UpdatedTime.Time().IsZero() { + msg = color.YellowString(msg) + } + fmt.Println(msg) + + for _, l := range stg.Logs { + fmt.Printf(" %s %s\n", color.YellowString(l.UpdatedTime.Time().UTC().Round(time.Second).Format(time.Stamp)), l.Log) + } + + if stg.Name == "StorageDealStartDataTransfer" { + for _, dtStg := range di.DataTransfer.Stages.Stages { + fmt.Printf(" %s %s %s\n", color.YellowString(dtStg.CreatedTime.Time().UTC().Round(time.Second).Format(time.Stamp)), color.BlueString("Data transfer stage:"), color.BlueString(dtStg.Name)) + for _, l := range dtStg.Logs { + fmt.Printf(" %s %s\n", color.YellowString(l.UpdatedTime.Time().UTC().Round(time.Second).Format(time.Stamp)), l.Log) + } + } + } + } +} diff --git a/cli/client_test.go b/cli/client_test.go deleted file mode 100644 index f0e8efda846..00000000000 --- a/cli/client_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package cli - -import ( - "context" - "os" - "testing" - "time" - - clitest "github.com/filecoin-project/lotus/cli/test" -) - -// TestClient does a basic test to exercise the client CLI -// commands -func TestClient(t *testing.T) { - _ = os.Setenv("BELLMAN_NO_GPU", "1") - clitest.QuietMiningLogs() - - blocktime := 5 * time.Millisecond - ctx := context.Background() - clientNode, _ := clitest.StartOneNodeOneMiner(ctx, t, blocktime) - clitest.RunClientTest(t, Commands, clientNode) -} diff --git a/cli/cmd.go b/cli/cmd.go index 02ef06002af..630aae1bc75 100644 --- a/cli/cmd.go +++ b/cli/cmd.go @@ -1,34 +1,17 @@ package cli import ( - "context" - "fmt" - "net/http" - "net/url" - "os" - "os/signal" "strings" - "syscall" logging "github.com/ipfs/go-log/v2" - "github.com/mitchellh/go-homedir" "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-jsonrpc" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/client" cliutil "github.com/filecoin-project/lotus/cli/util" - "github.com/filecoin-project/lotus/node/repo" ) var log = logging.Logger("cli") -const ( - metadataTraceContext = "traceContext" -) - // custom CLI error type ErrCmdFailed struct { @@ -46,261 +29,40 @@ func NewCliError(s string) error { // ApiConnector returns API instance type ApiConnector func() api.FullNode -// The flag passed on the command line with the listen address of the API -// server (only used by the tests) -func flagForAPI(t repo.RepoType) string { - switch t { - case repo.FullNode: - return "api-url" - case repo.StorageMiner: - return "miner-api-url" - case repo.Worker: - return "worker-api-url" - default: - panic(fmt.Sprintf("Unknown repo type: %v", t)) - } -} - -func flagForRepo(t repo.RepoType) string { - switch t { - case repo.FullNode: - return "repo" - case repo.StorageMiner: - return "miner-repo" - case repo.Worker: - return "worker-repo" - default: - panic(fmt.Sprintf("Unknown repo type: %v", t)) - } -} - -func envForRepo(t repo.RepoType) string { - switch t { - case repo.FullNode: - return "FULLNODE_API_INFO" - case repo.StorageMiner: - return "MINER_API_INFO" - case repo.Worker: - return "WORKER_API_INFO" - default: - panic(fmt.Sprintf("Unknown repo type: %v", t)) - } -} - -// TODO remove after deprecation period -func envForRepoDeprecation(t repo.RepoType) string { - switch t { - case repo.FullNode: - return "FULLNODE_API_INFO" - case repo.StorageMiner: - return "STORAGE_API_INFO" - case repo.Worker: - return "WORKER_API_INFO" - default: - panic(fmt.Sprintf("Unknown repo type: %v", t)) - } -} - -func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (cliutil.APIInfo, error) { - // Check if there was a flag passed with the listen address of the API - // server (only used by the tests) - apiFlag := flagForAPI(t) - if ctx.IsSet(apiFlag) { - strma := ctx.String(apiFlag) - strma = strings.TrimSpace(strma) - - return cliutil.APIInfo{Addr: strma}, nil - } - - envKey := envForRepo(t) - env, ok := os.LookupEnv(envKey) - if !ok { - // TODO remove after deprecation period - envKey = envForRepoDeprecation(t) - env, ok = os.LookupEnv(envKey) - if ok { - log.Warnf("Use deprecation env(%s) value, please use env(%s) instead.", envKey, envForRepo(t)) - } - } - if ok { - return cliutil.ParseApiInfo(env), nil - } - - repoFlag := flagForRepo(t) - - p, err := homedir.Expand(ctx.String(repoFlag)) - if err != nil { - return cliutil.APIInfo{}, xerrors.Errorf("could not expand home dir (%s): %w", repoFlag, err) - } - - r, err := repo.NewFS(p) - if err != nil { - return cliutil.APIInfo{}, xerrors.Errorf("could not open repo at path: %s; %w", p, err) - } - - ma, err := r.APIEndpoint() - if err != nil { - return cliutil.APIInfo{}, xerrors.Errorf("could not get api endpoint: %w", err) - } - - token, err := r.APIToken() - if err != nil { - log.Warnf("Couldn't load CLI token, capabilities may be limited: %v", err) - } - - return cliutil.APIInfo{ - Addr: ma.String(), - Token: token, - }, nil -} - -func GetRawAPI(ctx *cli.Context, t repo.RepoType) (string, http.Header, error) { - ainfo, err := GetAPIInfo(ctx, t) - if err != nil { - return "", nil, xerrors.Errorf("could not get API info: %w", err) +func GetFullNodeServices(ctx *cli.Context) (ServicesAPI, error) { + if tn, ok := ctx.App.Metadata["test-services"]; ok { + return tn.(ServicesAPI), nil } - addr, err := ainfo.DialArgs() + api, c, err := GetFullNodeAPIV1(ctx) if err != nil { - return "", nil, xerrors.Errorf("could not get DialArgs: %w", err) + return nil, err } - return addr, ainfo.AuthHeader(), nil + return &ServicesImpl{api: api, closer: c}, nil } -func GetAPI(ctx *cli.Context) (api.Common, jsonrpc.ClientCloser, error) { - ti, ok := ctx.App.Metadata["repoType"] - if !ok { - log.Errorf("unknown repo type, are you sure you want to use GetAPI?") - ti = repo.FullNode - } - t, ok := ti.(repo.RepoType) - if !ok { - log.Errorf("repoType type does not match the type of repo.RepoType") - } +var GetAPIInfo = cliutil.GetAPIInfo +var GetRawAPI = cliutil.GetRawAPI +var GetAPI = cliutil.GetAPI - if tn, ok := ctx.App.Metadata["testnode-storage"]; ok { - return tn.(api.StorageMiner), func() {}, nil - } - if tn, ok := ctx.App.Metadata["testnode-full"]; ok { - return tn.(api.FullNode), func() {}, nil - } +var DaemonContext = cliutil.DaemonContext +var ReqContext = cliutil.ReqContext - addr, headers, err := GetRawAPI(ctx, t) - if err != nil { - return nil, nil, err - } +var GetFullNodeAPI = cliutil.GetFullNodeAPI +var GetFullNodeAPIV1 = cliutil.GetFullNodeAPIV1 +var GetGatewayAPI = cliutil.GetGatewayAPI - return client.NewCommonRPC(ctx.Context, addr, headers) -} - -func GetFullNodeAPI(ctx *cli.Context) (api.FullNode, jsonrpc.ClientCloser, error) { - if tn, ok := ctx.App.Metadata["testnode-full"]; ok { - return tn.(api.FullNode), func() {}, nil - } - - addr, headers, err := GetRawAPI(ctx, repo.FullNode) - if err != nil { - return nil, nil, err - } - - return client.NewFullNodeRPC(ctx.Context, addr, headers) -} - -type GetStorageMinerOptions struct { - PreferHttp bool -} - -type GetStorageMinerOption func(*GetStorageMinerOptions) - -func StorageMinerUseHttp(opts *GetStorageMinerOptions) { - opts.PreferHttp = true -} - -func GetStorageMinerAPI(ctx *cli.Context, opts ...GetStorageMinerOption) (api.StorageMiner, jsonrpc.ClientCloser, error) { - var options GetStorageMinerOptions - for _, opt := range opts { - opt(&options) - } - - if tn, ok := ctx.App.Metadata["testnode-storage"]; ok { - return tn.(api.StorageMiner), func() {}, nil - } - - addr, headers, err := GetRawAPI(ctx, repo.StorageMiner) - if err != nil { - return nil, nil, err - } - - if options.PreferHttp { - u, err := url.Parse(addr) - if err != nil { - return nil, nil, xerrors.Errorf("parsing miner api URL: %w", err) - } - - switch u.Scheme { - case "ws": - u.Scheme = "http" - case "wss": - u.Scheme = "https" - } - - addr = u.String() - } - - return client.NewStorageMinerRPC(ctx.Context, addr, headers) -} - -func GetWorkerAPI(ctx *cli.Context) (api.WorkerAPI, jsonrpc.ClientCloser, error) { - addr, headers, err := GetRawAPI(ctx, repo.Worker) - if err != nil { - return nil, nil, err - } - - return client.NewWorkerRPC(ctx.Context, addr, headers) -} - -func GetGatewayAPI(ctx *cli.Context) (api.GatewayAPI, jsonrpc.ClientCloser, error) { - addr, headers, err := GetRawAPI(ctx, repo.FullNode) - if err != nil { - return nil, nil, err - } - - return client.NewGatewayRPC(ctx.Context, addr, headers) -} - -func DaemonContext(cctx *cli.Context) context.Context { - if mtCtx, ok := cctx.App.Metadata[metadataTraceContext]; ok { - return mtCtx.(context.Context) - } - - return context.Background() -} - -// ReqContext returns context for cli execution. Calling it for the first time -// installs SIGTERM handler that will close returned context. -// Not safe for concurrent execution. -func ReqContext(cctx *cli.Context) context.Context { - tCtx := DaemonContext(cctx) - - ctx, done := context.WithCancel(tCtx) - sigChan := make(chan os.Signal, 2) - go func() { - <-sigChan - done() - }() - signal.Notify(sigChan, syscall.SIGTERM, syscall.SIGINT, syscall.SIGHUP) - - return ctx -} +var GetStorageMinerAPI = cliutil.GetStorageMinerAPI +var GetWorkerAPI = cliutil.GetWorkerAPI var CommonCommands = []*cli.Command{ - netCmd, - authCmd, - logCmd, - waitApiCmd, - fetchParamCmd, - pprofCmd, + NetCmd, + AuthCmd, + LogCmd, + WaitApiCmd, + FetchParamCmd, + PprofCmd, VersionCmd, } @@ -309,21 +71,23 @@ var Commands = []*cli.Command{ WithCategory("basic", walletCmd), WithCategory("basic", clientCmd), WithCategory("basic", multisigCmd), + WithCategory("basic", filplusCmd), WithCategory("basic", paychCmd), - WithCategory("developer", authCmd), - WithCategory("developer", mpoolCmd), - WithCategory("developer", stateCmd), - WithCategory("developer", chainCmd), - WithCategory("developer", logCmd), - WithCategory("developer", waitApiCmd), - WithCategory("developer", fetchParamCmd), - WithCategory("network", netCmd), - WithCategory("network", syncCmd), - pprofCmd, + WithCategory("developer", AuthCmd), + WithCategory("developer", MpoolCmd), + WithCategory("developer", StateCmd), + WithCategory("developer", ChainCmd), + WithCategory("developer", LogCmd), + WithCategory("developer", WaitApiCmd), + WithCategory("developer", FetchParamCmd), + WithCategory("network", NetCmd), + WithCategory("network", SyncCmd), + WithCategory("status", StatusCmd), + PprofCmd, VersionCmd, } func WithCategory(cat string, cmd *cli.Command) *cli.Command { - cmd.Category = cat + cmd.Category = strings.ToUpper(cat) return cmd } diff --git a/cli/disputer.go b/cli/disputer.go new file mode 100644 index 00000000000..ceebeb9397b --- /dev/null +++ b/cli/disputer.go @@ -0,0 +1,446 @@ +package cli + +import ( + "context" + "fmt" + "strconv" + "time" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/go-address" + + "github.com/filecoin-project/lotus/chain/actors" + + miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner" + + "github.com/filecoin-project/go-state-types/big" + lapi "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + "golang.org/x/xerrors" + + logging "github.com/ipfs/go-log/v2" + + "github.com/filecoin-project/lotus/api/v0api" + "github.com/filecoin-project/lotus/chain/store" + "github.com/urfave/cli/v2" +) + +var disputeLog = logging.Logger("disputer") + +const Confidence = 10 + +type minerDeadline struct { + miner address.Address + index uint64 +} + +var ChainDisputeSetCmd = &cli.Command{ + Name: "disputer", + Usage: "interact with the window post disputer", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "max-fee", + Usage: "Spend up to X FIL per DisputeWindowedPoSt message", + }, + &cli.StringFlag{ + Name: "from", + Usage: "optionally specify the account to send messages from", + }, + }, + Subcommands: []*cli.Command{ + disputerStartCmd, + disputerMsgCmd, + }, +} + +var disputerMsgCmd = &cli.Command{ + Name: "dispute", + Usage: "Send a specific DisputeWindowedPoSt message", + ArgsUsage: "[minerAddress index postIndex]", + Flags: []cli.Flag{}, + Action: func(cctx *cli.Context) error { + if cctx.NArg() != 3 { + fmt.Println("Usage: dispute [minerAddress index postIndex]") + return nil + } + + ctx := ReqContext(cctx) + + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + toa, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return fmt.Errorf("given 'miner' address %q was invalid: %w", cctx.Args().First(), err) + } + + deadline, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64) + if err != nil { + return err + } + + postIndex, err := strconv.ParseUint(cctx.Args().Get(2), 10, 64) + if err != nil { + return err + } + + fromAddr, err := getSender(ctx, api, cctx.String("from")) + if err != nil { + return err + } + + dpp, aerr := actors.SerializeParams(&miner3.DisputeWindowedPoStParams{ + Deadline: deadline, + PoStIndex: postIndex, + }) + + if aerr != nil { + return xerrors.Errorf("failed to serailize params: %w", aerr) + } + + dmsg := &types.Message{ + To: toa, + From: fromAddr, + Value: big.Zero(), + Method: builtin3.MethodsMiner.DisputeWindowedPoSt, + Params: dpp, + } + + rslt, err := api.StateCall(ctx, dmsg, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("failed to simulate dispute: %w", err) + } + + if rslt.MsgRct.ExitCode == 0 { + mss, err := getMaxFee(cctx.String("max-fee")) + if err != nil { + return err + } + + sm, err := api.MpoolPushMessage(ctx, dmsg, mss) + if err != nil { + return err + } + + fmt.Println("dispute message ", sm.Cid()) + } else { + fmt.Println("dispute is unsuccessful") + } + + return nil + }, +} + +var disputerStartCmd = &cli.Command{ + Name: "start", + Usage: "Start the window post disputer", + ArgsUsage: "[minerAddress]", + Flags: []cli.Flag{ + &cli.Uint64Flag{ + Name: "start-epoch", + Usage: "only start disputing PoSts after this epoch ", + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := ReqContext(cctx) + + fromAddr, err := getSender(ctx, api, cctx.String("from")) + if err != nil { + return err + } + + mss, err := getMaxFee(cctx.String("max-fee")) + if err != nil { + return err + } + + startEpoch := abi.ChainEpoch(0) + if cctx.IsSet("height") { + startEpoch = abi.ChainEpoch(cctx.Uint64("height")) + } + + disputeLog.Info("checking sync status") + + if err := SyncWait(ctx, api, false); err != nil { + return xerrors.Errorf("sync wait: %w", err) + } + + disputeLog.Info("setting up window post disputer") + + // subscribe to head changes and validate the current value + + headChanges, err := api.ChainNotify(ctx) + if err != nil { + return err + } + + head, ok := <-headChanges + if !ok { + return xerrors.Errorf("Notify stream was invalid") + } + + if len(head) != 1 { + return xerrors.Errorf("Notify first entry should have been one item") + } + + if head[0].Type != store.HCCurrent { + return xerrors.Errorf("expected current head on Notify stream (got %s)", head[0].Type) + } + + lastEpoch := head[0].Val.Height() + lastStatusCheckEpoch := lastEpoch + + // build initial deadlineMap + + minerList, err := api.StateListMiners(ctx, types.EmptyTSK) + if err != nil { + return err + } + + knownMiners := make(map[address.Address]struct{}) + deadlineMap := make(map[abi.ChainEpoch][]minerDeadline) + for _, miner := range minerList { + dClose, dl, err := makeMinerDeadline(ctx, api, miner) + if err != nil { + return xerrors.Errorf("making deadline: %w", err) + } + + deadlineMap[dClose+Confidence] = append(deadlineMap[dClose+Confidence], *dl) + + knownMiners[miner] = struct{}{} + } + + // when this fires, check for newly created miners, and purge any "missed" epochs from deadlineMap + statusCheckTicker := time.NewTicker(time.Hour) + defer statusCheckTicker.Stop() + + disputeLog.Info("starting up window post disputer") + + applyTsk := func(tsk types.TipSetKey) error { + disputeLog.Infow("last checked epoch", "epoch", lastEpoch) + dls, ok := deadlineMap[lastEpoch] + delete(deadlineMap, lastEpoch) + if !ok || startEpoch >= lastEpoch { + // no deadlines closed at this epoch - Confidence, or we haven't reached the start cutoff yet + return nil + } + + dpmsgs := make([]*types.Message, 0) + + startTime := time.Now() + proofsChecked := uint64(0) + + // TODO: Parallelizeable + for _, dl := range dls { + fullDeadlines, err := api.StateMinerDeadlines(ctx, dl.miner, tsk) + if err != nil { + return xerrors.Errorf("failed to load deadlines: %w", err) + } + + if int(dl.index) >= len(fullDeadlines) { + return xerrors.Errorf("deadline index %d not found in deadlines", dl.index) + } + + disputableProofs := fullDeadlines[dl.index].DisputableProofCount + proofsChecked += disputableProofs + + ms, err := makeDisputeWindowedPosts(ctx, api, dl, disputableProofs, fromAddr) + if err != nil { + return xerrors.Errorf("failed to check for disputes: %w", err) + } + + dpmsgs = append(dpmsgs, ms...) + + dClose, dl, err := makeMinerDeadline(ctx, api, dl.miner) + if err != nil { + return xerrors.Errorf("making deadline: %w", err) + } + + deadlineMap[dClose+Confidence] = append(deadlineMap[dClose+Confidence], *dl) + } + + disputeLog.Infow("checked proofs", "count", proofsChecked, "duration", time.Since(startTime)) + + // TODO: Parallelizeable / can be integrated into the previous deadline-iterating for loop + for _, dpmsg := range dpmsgs { + disputeLog.Infow("disputing a PoSt", "miner", dpmsg.To) + m, err := api.MpoolPushMessage(ctx, dpmsg, mss) + if err != nil { + disputeLog.Errorw("failed to dispute post message", "err", err.Error(), "miner", dpmsg.To) + } else { + disputeLog.Infow("submited dispute", "mcid", m.Cid(), "miner", dpmsg.To) + } + } + + return nil + } + + disputeLoop := func() error { + select { + case notif, ok := <-headChanges: + if !ok { + return xerrors.Errorf("head change channel errored") + } + + for _, val := range notif { + switch val.Type { + case store.HCApply: + for ; lastEpoch <= val.Val.Height(); lastEpoch++ { + err := applyTsk(val.Val.Key()) + if err != nil { + return err + } + } + case store.HCRevert: + // do nothing + default: + return xerrors.Errorf("unexpected head change type %s", val.Type) + } + } + case <-statusCheckTicker.C: + disputeLog.Infof("running status check") + + minerList, err = api.StateListMiners(ctx, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting miner list: %w", err) + } + + for _, m := range minerList { + _, ok := knownMiners[m] + if !ok { + dClose, dl, err := makeMinerDeadline(ctx, api, m) + if err != nil { + return xerrors.Errorf("making deadline: %w", err) + } + + deadlineMap[dClose+Confidence] = append(deadlineMap[dClose+Confidence], *dl) + + knownMiners[m] = struct{}{} + } + } + + for ; lastStatusCheckEpoch < lastEpoch; lastStatusCheckEpoch++ { + // if an epoch got "skipped" from the deadlineMap somehow, just fry it now instead of letting it sit around forever + _, ok := deadlineMap[lastStatusCheckEpoch] + if ok { + disputeLog.Infow("epoch skipped during execution, deleting it from deadlineMap", "epoch", lastStatusCheckEpoch) + delete(deadlineMap, lastStatusCheckEpoch) + } + } + + log.Infof("status check complete") + case <-ctx.Done(): + return ctx.Err() + } + + return nil + } + + for { + err := disputeLoop() + if err == context.Canceled { + disputeLog.Info("disputer shutting down") + break + } + if err != nil { + disputeLog.Errorw("disputer shutting down", "err", err) + return err + } + } + + return nil + }, +} + +// for a given miner, index, and maxPostIndex, tries to dispute posts from 0...postsSnapshotted-1 +// returns a list of DisputeWindowedPoSt msgs that are expected to succeed if sent +func makeDisputeWindowedPosts(ctx context.Context, api v0api.FullNode, dl minerDeadline, postsSnapshotted uint64, sender address.Address) ([]*types.Message, error) { + disputes := make([]*types.Message, 0) + + for i := uint64(0); i < postsSnapshotted; i++ { + + dpp, aerr := actors.SerializeParams(&miner3.DisputeWindowedPoStParams{ + Deadline: dl.index, + PoStIndex: i, + }) + + if aerr != nil { + return nil, xerrors.Errorf("failed to serailize params: %w", aerr) + } + + dispute := &types.Message{ + To: dl.miner, + From: sender, + Value: big.Zero(), + Method: builtin3.MethodsMiner.DisputeWindowedPoSt, + Params: dpp, + } + + rslt, err := api.StateCall(ctx, dispute, types.EmptyTSK) + if err == nil && rslt.MsgRct.ExitCode == 0 { + disputes = append(disputes, dispute) + } + + } + + return disputes, nil +} + +func makeMinerDeadline(ctx context.Context, api v0api.FullNode, mAddr address.Address) (abi.ChainEpoch, *minerDeadline, error) { + dl, err := api.StateMinerProvingDeadline(ctx, mAddr, types.EmptyTSK) + if err != nil { + return -1, nil, xerrors.Errorf("getting proving index list: %w", err) + } + + return dl.Close, &minerDeadline{ + miner: mAddr, + index: dl.Index, + }, nil +} + +func getSender(ctx context.Context, api v0api.FullNode, fromStr string) (address.Address, error) { + if fromStr == "" { + return api.WalletDefaultAddress(ctx) + } + + addr, err := address.NewFromString(fromStr) + if err != nil { + return address.Undef, err + } + + has, err := api.WalletHas(ctx, addr) + if err != nil { + return address.Undef, err + } + + if !has { + return address.Undef, xerrors.Errorf("wallet doesn't contain: %s ", addr) + } + + return addr, nil +} + +func getMaxFee(maxStr string) (*lapi.MessageSendSpec, error) { + if maxStr != "" { + maxFee, err := types.ParseFIL(maxStr) + if err != nil { + return nil, xerrors.Errorf("parsing max-fee: %w", err) + } + return &lapi.MessageSendSpec{ + MaxFee: types.BigInt(maxFee), + }, nil + } + + return nil, nil +} diff --git a/cli/filplus.go b/cli/filplus.go new file mode 100644 index 00000000000..007071ea297 --- /dev/null +++ b/cli/filplus.go @@ -0,0 +1,276 @@ +package cli + +import ( + "context" + "fmt" + + verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg" + + "github.com/filecoin-project/go-state-types/big" + + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/api/v0api" + "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" + "github.com/filecoin-project/lotus/chain/types" + cbor "github.com/ipfs/go-ipld-cbor" +) + +var filplusCmd = &cli.Command{ + Name: "filplus", + Usage: "Interact with the verified registry actor used by Filplus", + Flags: []cli.Flag{}, + Subcommands: []*cli.Command{ + filplusVerifyClientCmd, + filplusListNotariesCmd, + filplusListClientsCmd, + filplusCheckClientCmd, + filplusCheckNotaryCmd, + }, +} + +var filplusVerifyClientCmd = &cli.Command{ + Name: "grant-datacap", + Usage: "give allowance to the specified verified client address", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "from", + Usage: "specify your notary address to send the message from", + Required: true, + }, + }, + Action: func(cctx *cli.Context) error { + froms := cctx.String("from") + if froms == "" { + return fmt.Errorf("must specify from address with --from") + } + + fromk, err := address.NewFromString(froms) + if err != nil { + return err + } + + if cctx.Args().Len() != 2 { + return fmt.Errorf("must specify two arguments: address and allowance") + } + + target, err := address.NewFromString(cctx.Args().Get(0)) + if err != nil { + return err + } + + allowance, err := types.BigFromString(cctx.Args().Get(1)) + if err != nil { + return err + } + + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + found, dcap, err := checkNotary(ctx, api, fromk) + if err != nil { + return err + } + + if !found { + return xerrors.New("sender address must be a notary") + } + + if dcap.Cmp(allowance.Int) < 0 { + return xerrors.Errorf("cannot allot more allowance than notary data cap: %s < %s", dcap, allowance) + } + + // TODO: This should be abstracted over actor versions + params, err := actors.SerializeParams(&verifreg4.AddVerifiedClientParams{Address: target, Allowance: allowance}) + if err != nil { + return err + } + + msg := &types.Message{ + To: verifreg.Address, + From: fromk, + Method: verifreg.Methods.AddVerifiedClient, + Params: params, + } + + smsg, err := api.MpoolPushMessage(ctx, msg, nil) + if err != nil { + return err + } + + fmt.Printf("message sent, now waiting on cid: %s\n", smsg.Cid()) + + mwait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) + if err != nil { + return err + } + + if mwait.Receipt.ExitCode != 0 { + return fmt.Errorf("failed to add verified client: %d", mwait.Receipt.ExitCode) + } + + return nil + }, +} + +var filplusListNotariesCmd = &cli.Command{ + Name: "list-notaries", + Usage: "list all notaries", + Action: func(cctx *cli.Context) error { + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + act, err := api.StateGetActor(ctx, verifreg.Address, types.EmptyTSK) + if err != nil { + return err + } + + apibs := blockstore.NewAPIBlockstore(api) + store := adt.WrapStore(ctx, cbor.NewCborStore(apibs)) + + st, err := verifreg.Load(store, act) + if err != nil { + return err + } + return st.ForEachVerifier(func(addr address.Address, dcap abi.StoragePower) error { + _, err := fmt.Printf("%s: %s\n", addr, dcap) + return err + }) + }, +} + +var filplusListClientsCmd = &cli.Command{ + Name: "list-clients", + Usage: "list all verified clients", + Action: func(cctx *cli.Context) error { + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + act, err := api.StateGetActor(ctx, verifreg.Address, types.EmptyTSK) + if err != nil { + return err + } + + apibs := blockstore.NewAPIBlockstore(api) + store := adt.WrapStore(ctx, cbor.NewCborStore(apibs)) + + st, err := verifreg.Load(store, act) + if err != nil { + return err + } + return st.ForEachClient(func(addr address.Address, dcap abi.StoragePower) error { + _, err := fmt.Printf("%s: %s\n", addr, dcap) + return err + }) + }, +} + +var filplusCheckClientCmd = &cli.Command{ + Name: "check-client-datacap", + Usage: "check verified client remaining bytes", + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return fmt.Errorf("must specify client address to check") + } + + caddr, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return err + } + + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + dcap, err := api.StateVerifiedClientStatus(ctx, caddr, types.EmptyTSK) + if err != nil { + return err + } + if dcap == nil { + return xerrors.Errorf("client %s is not a verified client", caddr) + } + + fmt.Println(*dcap) + + return nil + }, +} + +var filplusCheckNotaryCmd = &cli.Command{ + Name: "check-notaries-datacap", + Usage: "check notaries remaining bytes", + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return fmt.Errorf("must specify notary address to check") + } + + vaddr, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return err + } + + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + found, dcap, err := checkNotary(ctx, api, vaddr) + if err != nil { + return err + } + if !found { + return fmt.Errorf("not found") + } + + fmt.Println(dcap) + + return nil + }, +} + +func checkNotary(ctx context.Context, api v0api.FullNode, vaddr address.Address) (bool, abi.StoragePower, error) { + vid, err := api.StateLookupID(ctx, vaddr, types.EmptyTSK) + if err != nil { + return false, big.Zero(), err + } + + act, err := api.StateGetActor(ctx, verifreg.Address, types.EmptyTSK) + if err != nil { + return false, big.Zero(), err + } + + apibs := blockstore.NewAPIBlockstore(api) + store := adt.WrapStore(ctx, cbor.NewCborStore(apibs)) + + st, err := verifreg.Load(store, act) + if err != nil { + return false, big.Zero(), err + } + + return st.VerifierDataCap(vid) +} diff --git a/cli/init_test.go b/cli/init_test.go new file mode 100644 index 00000000000..8c343bcfabe --- /dev/null +++ b/cli/init_test.go @@ -0,0 +1,9 @@ +package cli + +import ( + logging "github.com/ipfs/go-log/v2" +) + +func init() { + logging.SetLogLevel("watchdog", "ERROR") +} diff --git a/cli/log.go b/cli/log.go index ed624eb8df8..4ab6aa74813 100644 --- a/cli/log.go +++ b/cli/log.go @@ -7,16 +7,16 @@ import ( "golang.org/x/xerrors" ) -var logCmd = &cli.Command{ +var LogCmd = &cli.Command{ Name: "log", Usage: "Manage logging", Subcommands: []*cli.Command{ - logList, - logSetLevel, + LogList, + LogSetLevel, }, } -var logList = &cli.Command{ +var LogList = &cli.Command{ Name: "list", Usage: "List log systems", Action: func(cctx *cli.Context) error { @@ -41,7 +41,7 @@ var logList = &cli.Command{ }, } -var logSetLevel = &cli.Command{ +var LogSetLevel = &cli.Command{ Name: "set-level", Usage: "Set log level", ArgsUsage: "[level]", @@ -93,7 +93,7 @@ var logSetLevel = &cli.Command{ for _, system := range systems { if err := api.LogSetLevel(ctx, system, cctx.Args().First()); err != nil { - return xerrors.Errorf("setting log level on %s: %w", system, err) + return xerrors.Errorf("setting log level on %s: %v", system, err) } } diff --git a/cli/mpool.go b/cli/mpool.go index 8f3e937b6ad..b128ccc159f 100644 --- a/cli/mpool.go +++ b/cli/mpool.go @@ -19,24 +19,26 @@ import ( "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/node/config" ) -var mpoolCmd = &cli.Command{ +var MpoolCmd = &cli.Command{ Name: "mpool", Usage: "Manage message pool", Subcommands: []*cli.Command{ - mpoolPending, - mpoolClear, - mpoolSub, - mpoolStat, - mpoolReplaceCmd, - mpoolFindCmd, - mpoolConfig, - mpoolGasPerfCmd, + MpoolPending, + MpoolClear, + MpoolSub, + MpoolStat, + MpoolReplaceCmd, + MpoolFindCmd, + MpoolConfig, + MpoolGasPerfCmd, + mpoolManage, }, } -var mpoolPending = &cli.Command{ +var MpoolPending = &cli.Command{ Name: "pending", Usage: "Get pending messages", Flags: []cli.Flag{ @@ -48,6 +50,14 @@ var mpoolPending = &cli.Command{ Name: "cids", Usage: "only print cids of messages in output", }, + &cli.StringFlag{ + Name: "to", + Usage: "return messages to a given address", + }, + &cli.StringFlag{ + Name: "from", + Usage: "return messages from a given address", + }, }, Action: func(cctx *cli.Context) error { api, closer, err := GetFullNodeAPI(cctx) @@ -58,6 +68,23 @@ var mpoolPending = &cli.Command{ ctx := ReqContext(cctx) + var toa, froma address.Address + if tos := cctx.String("to"); tos != "" { + a, err := address.NewFromString(tos) + if err != nil { + return fmt.Errorf("given 'to' address %q was invalid: %w", tos, err) + } + toa = a + } + + if froms := cctx.String("from"); froms != "" { + a, err := address.NewFromString(froms) + if err != nil { + return fmt.Errorf("given 'from' address %q was invalid: %w", froms, err) + } + froma = a + } + var filter map[address.Address]struct{} if cctx.Bool("local") { filter = map[address.Address]struct{}{} @@ -84,6 +111,13 @@ var mpoolPending = &cli.Command{ } } + if toa != address.Undef && msg.Message.To != toa { + continue + } + if froma != address.Undef && msg.Message.From != froma { + continue + } + if cctx.Bool("cids") { fmt.Println(msg.Cid()) } else { @@ -99,9 +133,11 @@ var mpoolPending = &cli.Command{ }, } -var mpoolClear = &cli.Command{ - Name: "clear", - Usage: "Clear all pending messages from the mpool (USE WITH CARE)", +// Deprecated: MpoolClear is now available at `lotus-shed mpool clear` +var MpoolClear = &cli.Command{ + Name: "clear", + Usage: "Clear all pending messages from the mpool (USE WITH CARE) (DEPRECATED)", + Hidden: true, Flags: []cli.Flag{ &cli.BoolFlag{ Name: "local", @@ -113,6 +149,7 @@ var mpoolClear = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { + fmt.Println("DEPRECATED: This behavior is being moved to `lotus-shed mpool clear`") api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err @@ -132,7 +169,7 @@ var mpoolClear = &cli.Command{ }, } -var mpoolSub = &cli.Command{ +var MpoolSub = &cli.Command{ Name: "sub", Usage: "Subscribe to mpool changes", Action: func(cctx *cli.Context) error { @@ -164,7 +201,7 @@ var mpoolSub = &cli.Command{ }, } -var mpoolStat = &cli.Command{ +var MpoolStat = &cli.Command{ Name: "stat", Usage: "print mempool stats", Flags: []cli.Flag{ @@ -232,6 +269,7 @@ var mpoolStat = &cli.Command{ addr string past, cur, future uint64 belowCurr, belowPast uint64 + gasLimit big.Int } buckets := map[address.Address]*statBucket{} @@ -273,6 +311,7 @@ var mpoolStat = &cli.Command{ var s mpStat s.addr = a.String() + s.gasLimit = big.Zero() for _, m := range bkt.msgs { if m.Message.Nonce < act.Nonce { @@ -289,6 +328,8 @@ var mpoolStat = &cli.Command{ if m.Message.GasFeeCap.LessThan(minBF) { s.belowPast++ } + + s.gasLimit = big.Add(s.gasLimit, types.NewInt(uint64(m.Message.GasLimit))) } out = append(out, s) @@ -299,6 +340,7 @@ var mpoolStat = &cli.Command{ }) var total mpStat + total.gasLimit = big.Zero() for _, stat := range out { total.past += stat.past @@ -306,32 +348,33 @@ var mpoolStat = &cli.Command{ total.future += stat.future total.belowCurr += stat.belowCurr total.belowPast += stat.belowPast + total.gasLimit = big.Add(total.gasLimit, stat.gasLimit) - fmt.Printf("%s: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d \n", stat.addr, stat.past, stat.cur, stat.future, stat.belowCurr, cctx.Int("basefee-lookback"), stat.belowPast) + fmt.Printf("%s: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d, gasLimit: %s\n", stat.addr, stat.past, stat.cur, stat.future, stat.belowCurr, cctx.Int("basefee-lookback"), stat.belowPast, stat.gasLimit) } fmt.Println("-----") - fmt.Printf("total: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d \n", total.past, total.cur, total.future, total.belowCurr, cctx.Int("basefee-lookback"), total.belowPast) + fmt.Printf("total: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d, gasLimit: %s\n", total.past, total.cur, total.future, total.belowCurr, cctx.Int("basefee-lookback"), total.belowPast, total.gasLimit) return nil }, } -var mpoolReplaceCmd = &cli.Command{ +var MpoolReplaceCmd = &cli.Command{ Name: "replace", Usage: "replace a message in the mempool", Flags: []cli.Flag{ &cli.StringFlag{ Name: "gas-feecap", - Usage: "gas feecap for new message", + Usage: "gas feecap for new message (burn and pay to miner, attoFIL/GasUnit)", }, &cli.StringFlag{ Name: "gas-premium", - Usage: "gas price for new message", + Usage: "gas price for new message (pay to miner, attoFIL/GasUnit)", }, &cli.Int64Flag{ Name: "gas-limit", - Usage: "gas price for new message", + Usage: "gas limit for new message (GasUnit)", }, &cli.BoolFlag{ Name: "auto", @@ -339,7 +382,7 @@ var mpoolReplaceCmd = &cli.Command{ }, &cli.StringFlag{ Name: "max-fee", - Usage: "Spend up to X FIL for this message (applicable for auto mode)", + Usage: "Spend up to X attoFIL for this message (applicable for auto mode)", }, }, ArgsUsage: " | ", @@ -434,9 +477,16 @@ var mpoolReplaceCmd = &cli.Command{ msg.GasPremium = big.Max(retm.GasPremium, minRBF) msg.GasFeeCap = big.Max(retm.GasFeeCap, msg.GasPremium) - messagepool.CapGasFee(&msg, mss.Get().MaxFee) + + mff := func() (abi.TokenAmount, error) { + return abi.TokenAmount(config.DefaultDefaultMaxFee), nil + } + + messagepool.CapGasFee(mff, &msg, mss) } else { - msg.GasLimit = cctx.Int64("gas-limit") + if cctx.IsSet("gas-limit") { + msg.GasLimit = cctx.Int64("gas-limit") + } msg.GasPremium, err = types.BigFromString(cctx.String("gas-premium")) if err != nil { return fmt.Errorf("parsing gas-premium: %w", err) @@ -463,7 +513,7 @@ var mpoolReplaceCmd = &cli.Command{ }, } -var mpoolFindCmd = &cli.Command{ +var MpoolFindCmd = &cli.Command{ Name: "find", Usage: "find a message in the mempool", Flags: []cli.Flag{ @@ -546,7 +596,7 @@ var mpoolFindCmd = &cli.Command{ }, } -var mpoolConfig = &cli.Command{ +var MpoolConfig = &cli.Command{ Name: "config", Usage: "get or set current mpool configuration", ArgsUsage: "[new-config]", @@ -591,7 +641,7 @@ var mpoolConfig = &cli.Command{ }, } -var mpoolGasPerfCmd = &cli.Command{ +var MpoolGasPerfCmd = &cli.Command{ Name: "gas-perf", Usage: "Check gas performance of messages in mempool", Flags: []cli.Flag{ diff --git a/cli/mpool_manage.go b/cli/mpool_manage.go new file mode 100644 index 00000000000..164a0584241 --- /dev/null +++ b/cli/mpool_manage.go @@ -0,0 +1,360 @@ +package cli + +import ( + "context" + "fmt" + "sort" + + "github.com/Kubuxu/imtui" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/messagepool" + types "github.com/filecoin-project/lotus/chain/types" + "github.com/gdamore/tcell/v2" + cid "github.com/ipfs/go-cid" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" +) + +var mpoolManage = &cli.Command{ + Name: "manage", + Action: func(cctx *cli.Context) error { + srv, err := GetFullNodeServices(cctx) + if err != nil { + return err + } + defer srv.Close() //nolint:errcheck + + ctx := ReqContext(cctx) + + _, localAddr, err := srv.LocalAddresses(ctx) + if err != nil { + return xerrors.Errorf("getting local addresses: %w", err) + } + + msgs, err := srv.MpoolPendingFilter(ctx, func(sm *types.SignedMessage) bool { + if sm.Message.From.Empty() { + return false + } + for _, a := range localAddr { + if a == sm.Message.From { + return true + } + } + return false + }, types.EmptyTSK) + if err != nil { + return err + } + + t, err := imtui.NewTui() + if err != nil { + panic(err) + } + + mm := &mmUI{ + ctx: ctx, + srv: srv, + addrs: localAddr, + messages: msgs, + } + sort.Slice(mm.addrs, func(i, j int) bool { + return mm.addrs[i].String() < mm.addrs[j].String() + }) + t.PushScene(mm.addrSelect()) + + err = t.Run() + + if err != nil { + panic(err) + } + + return nil + }, +} + +type mmUI struct { + ctx context.Context + srv ServicesAPI + addrs []address.Address + messages []*types.SignedMessage +} + +func (mm *mmUI) addrSelect() func(*imtui.Tui) error { + rows := [][]string{{"Address", "No. Messages"}} + mCount := map[address.Address]int{} + for _, sm := range mm.messages { + mCount[sm.Message.From]++ + } + for _, a := range mm.addrs { + rows = append(rows, []string{a.String(), fmt.Sprintf("%d", mCount[a])}) + } + + flex := []int{4, 1} + sel := 0 + scroll := 0 + return func(t *imtui.Tui) error { + if t.CurrentKey != nil && t.CurrentKey.Key() == tcell.KeyEnter { + if sel > 0 { + t.ReplaceScene(mm.messageLising(mm.addrs[sel-1])) + } + } + t.FlexTable(0, 0, 0, &sel, &scroll, rows, flex, true) + return nil + } +} + +func errUI(err error) func(*imtui.Tui) error { + return func(t *imtui.Tui) error { + return err + } +} + +type msgInfo struct { + sm *types.SignedMessage + checks []api.MessageCheckStatus +} + +func (mi *msgInfo) Row() []string { + cidStr := mi.sm.Cid().String() + failedChecks := 0 + for _, c := range mi.checks { + if !c.OK { + failedChecks++ + } + } + shortAddr := mi.sm.Message.To.String() + if len(shortAddr) > 16 { + shortAddr = "…" + shortAddr[len(shortAddr)-16:] + } + var fCk string + if failedChecks == 0 { + fCk = "[:green:]OK" + } else { + fCk = "[:orange:]" + fmt.Sprintf("%d", failedChecks) + } + return []string{"…" + cidStr[len(cidStr)-32:], shortAddr, + fmt.Sprintf("%d", mi.sm.Message.Nonce), types.FIL(mi.sm.Message.Value).String(), + fmt.Sprintf("%d", mi.sm.Message.Method), fCk} + +} + +func (mm *mmUI) messageLising(a address.Address) func(*imtui.Tui) error { + genMsgInfos := func() ([]msgInfo, error) { + msgs, err := mm.srv.MpoolPendingFilter(mm.ctx, func(sm *types.SignedMessage) bool { + if sm.Message.From.Empty() { + return false + } + if a == sm.Message.From { + return true + } + return false + }, types.EmptyTSK) + + if err != nil { + return nil, xerrors.Errorf("getting pending: %w", err) + } + + msgIdx := map[cid.Cid]*types.SignedMessage{} + for _, sm := range msgs { + if sm.Message.From == a { + msgIdx[sm.Message.Cid()] = sm + msgIdx[sm.Cid()] = sm + } + } + + checks, err := mm.srv.MpoolCheckPendingMessages(mm.ctx, a) + if err != nil { + return nil, xerrors.Errorf("checking pending: %w", err) + } + msgInfos := make([]msgInfo, 0, len(checks)) + for _, msgChecks := range checks { + failingChecks := []api.MessageCheckStatus{} + for _, c := range msgChecks { + if !c.OK { + failingChecks = append(failingChecks, c) + } + } + msgInfos = append(msgInfos, msgInfo{ + sm: msgIdx[msgChecks[0].Cid], + checks: failingChecks, + }) + } + return msgInfos, nil + } + + sel := 0 + scroll := 0 + + var msgInfos []msgInfo + var rows [][]string + flex := []int{3, 2, 1, 1, 1, 1} + refresh := true + + return func(t *imtui.Tui) error { + if refresh { + var err error + msgInfos, err = genMsgInfos() + if err != nil { + return xerrors.Errorf("getting msgInfos: %w", err) + } + + rows = [][]string{{"Message Cid", "To", "Nonce", "Value", "Method", "Checks"}} + for _, mi := range msgInfos { + rows = append(rows, mi.Row()) + } + refresh = false + } + + if t.CurrentKey != nil && t.CurrentKey.Key() == tcell.KeyEnter { + if sel > 0 { + t.PushScene(mm.messageDetail(msgInfos[sel-1])) + refresh = true + return nil + } + } + + t.Label(0, 0, fmt.Sprintf("Address: %s", a), tcell.StyleDefault) + t.FlexTable(1, 0, 0, &sel, &scroll, rows, flex, true) + return nil + } +} + +func (mm *mmUI) messageDetail(mi msgInfo) func(*imtui.Tui) error { + baseFee, err := mm.srv.GetBaseFee(mm.ctx) + if err != nil { + return errUI(err) + } + _ = baseFee + + m := mi.sm.Message + maxFee := big.Mul(m.GasFeeCap, big.NewInt(m.GasLimit)) + + issues := [][]string{} + for _, c := range mi.checks { + issues = append(issues, []string{c.Code.String(), c.Err}) + } + issuesFlex := []int{1, 3} + var sel, scroll int + + executeReprice := false + executeNoop := false + return func(t *imtui.Tui) error { + if executeReprice { + m.GasFeeCap = big.Div(maxFee, big.NewInt(m.GasLimit)) + m.GasPremium = messagepool.ComputeMinRBF(m.GasPremium) + m.GasFeeCap = big.Max(m.GasFeeCap, m.GasPremium) + + _, _, err := mm.srv.PublishMessage(mm.ctx, &api.MessagePrototype{ + Message: m, + ValidNonce: true, + }, true) + if err != nil { + return err + } + t.PopScene() + return nil + } + if executeNoop { + nop := types.Message{ + To: builtin.BurntFundsActorAddr, + From: m.From, + + Nonce: m.Nonce, + Value: big.Zero(), + } + + nop.GasPremium = messagepool.ComputeMinRBF(m.GasPremium) + + _, _, err := mm.srv.PublishMessage(mm.ctx, &api.MessagePrototype{ + Message: nop, + ValidNonce: true, + }, true) + + if err != nil { + return xerrors.Errorf("publishing noop message: %w", err) + } + + t.PopScene() + return nil + } + + if t.CurrentKey != nil { + if t.CurrentKey.Key() == tcell.KeyLeft { + t.PopScene() + return nil + } + if t.CurrentKey.Key() == tcell.KeyRune { + switch t.CurrentKey.Rune() { + case 'R', 'r': + t.PushScene(feeUI(baseFee, m.GasLimit, &maxFee, &executeReprice)) + return nil + case 'N', 'n': + t.PushScene(confirmationScene( + &executeNoop, + "Are you sure you want to cancel the message by", + "replacing it with a message with no effects?")) + return nil + } + } + } + + row := 0 + defS := tcell.StyleDefault + display := func(f string, args ...interface{}) { + t.Label(0, row, fmt.Sprintf(f, args...), defS) + row++ + } + + display("Message CID: %s", m.Cid()) + display("Signed Message CID: %s", mi.sm.Cid()) + row++ + display("From: %s", m.From) + display("To: %s", m.To) + row++ + display("Nonce: %d", m.Nonce) + display("Value: %s", types.FIL(m.Value)) + row++ + display("GasLimit: %d", m.GasLimit) + display("GasPremium: %s", types.FIL(m.GasPremium).Short()) + display("GasFeeCap %s", types.FIL(m.GasFeeCap).Short()) + row++ + display("Press R to reprice this message") + display("Press N to replace this message with no-operation message") + row++ + + t.FlexTable(row, 0, 0, &sel, &scroll, issues, issuesFlex, false) + + return nil + } +} + +func confirmationScene(yes *bool, ask ...string) func(*imtui.Tui) error { + return func(t *imtui.Tui) error { + row := 0 + defS := tcell.StyleDefault + display := func(f string, args ...interface{}) { + t.Label(0, row, fmt.Sprintf(f, args...), defS) + row++ + } + + for _, a := range ask { + display(a) + } + row++ + display("Enter to confirm") + display("Esc to cancel") + + if t.CurrentKey != nil { + if t.CurrentKey.Key() == tcell.KeyEnter { + *yes = true + t.PopScene() + return nil + } + } + + return nil + } +} diff --git a/cli/multisig.go b/cli/multisig.go index 8abae51820b..c51677d85ca 100644 --- a/cli/multisig.go +++ b/cli/multisig.go @@ -29,7 +29,7 @@ import ( init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init" msig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig" - "github.com/filecoin-project/lotus/api/apibstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin/multisig" @@ -95,11 +95,13 @@ var msigCreateCmd = &cli.Command{ return ShowHelp(cctx, fmt.Errorf("multisigs must have at least one signer")) } - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() ctx := ReqContext(cctx) var addrs []address.Address @@ -146,13 +148,20 @@ var msigCreateCmd = &cli.Command{ gp := types.NewInt(1) - msgCid, err := api.MsigCreate(ctx, required, addrs, d, intVal, sendAddr, gp) + proto, err := api.MsigCreate(ctx, required, addrs, d, intVal, sendAddr, gp) + if err != nil { + return err + } + + sm, err := InteractiveSend(ctx, cctx, srv, proto) if err != nil { return err } + msgCid := sm.Cid() + // wait for it to get mined into a block - wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { return err } @@ -202,7 +211,7 @@ var msigInspectCmd = &cli.Command{ defer closer() ctx := ReqContext(cctx) - store := adt.WrapStore(ctx, cbor.NewCborStore(apibstore.NewAPIBlockstore(api))) + store := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(api))) maddr, err := address.NewFromString(cctx.Args().First()) if err != nil { @@ -364,11 +373,13 @@ var msigProposeCmd = &cli.Command{ return ShowHelp(cctx, fmt.Errorf("must either pass three or five arguments")) } - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() ctx := ReqContext(cctx) msig, err := address.NewFromString(cctx.Args().Get(0)) @@ -426,14 +437,21 @@ var msigProposeCmd = &cli.Command{ return fmt.Errorf("actor %s is not a multisig actor", msig) } - msgCid, err := api.MsigPropose(ctx, msig, dest, types.BigInt(value), from, method, params) + proto, err := api.MsigPropose(ctx, msig, dest, types.BigInt(value), from, method, params) + if err != nil { + return err + } + + sm, err := InteractiveSend(ctx, cctx, srv, proto) if err != nil { return err } + msgCid := sm.Cid() + fmt.Println("send proposal in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { return err } @@ -473,19 +491,21 @@ var msigApproveCmd = &cli.Command{ return ShowHelp(cctx, fmt.Errorf("must pass at least multisig address and message ID")) } - if cctx.Args().Len() > 5 && cctx.Args().Len() != 7 { - return ShowHelp(cctx, fmt.Errorf("usage: msig approve [ ]")) + if cctx.Args().Len() > 2 && cctx.Args().Len() < 5 { + return ShowHelp(cctx, fmt.Errorf("usage: msig approve ")) } - if cctx.Args().Len() > 2 && cctx.Args().Len() != 5 { - return ShowHelp(cctx, fmt.Errorf("usage: msig approve ")) + if cctx.Args().Len() > 5 && cctx.Args().Len() != 7 { + return ShowHelp(cctx, fmt.Errorf("usage: msig approve [ ]")) } - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() ctx := ReqContext(cctx) msig, err := address.NewFromString(cctx.Args().Get(0)) @@ -515,10 +535,17 @@ var msigApproveCmd = &cli.Command{ var msgCid cid.Cid if cctx.Args().Len() == 2 { - msgCid, err = api.MsigApprove(ctx, msig, txid, from) + proto, err := api.MsigApprove(ctx, msig, txid, from) + if err != nil { + return err + } + + sm, err := InteractiveSend(ctx, cctx, srv, proto) if err != nil { return err } + + msgCid = sm.Cid() } else { proposer, err := address.NewFromString(cctx.Args().Get(2)) if err != nil { @@ -558,15 +585,22 @@ var msigApproveCmd = &cli.Command{ params = p } - msgCid, err = api.MsigApproveTxnHash(ctx, msig, txid, proposer, dest, types.BigInt(value), from, method, params) + proto, err := api.MsigApproveTxnHash(ctx, msig, txid, proposer, dest, types.BigInt(value), from, method, params) if err != nil { return err } + + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err + } + + msgCid = sm.Cid() } fmt.Println("sent approval in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { return err } @@ -598,11 +632,13 @@ var msigRemoveProposeCmd = &cli.Command{ return ShowHelp(cctx, fmt.Errorf("must pass multisig address and signer address")) } - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() ctx := ReqContext(cctx) msig, err := address.NewFromString(cctx.Args().Get(0)) @@ -630,14 +666,21 @@ var msigRemoveProposeCmd = &cli.Command{ from = defaddr } - msgCid, err := api.MsigRemoveSigner(ctx, msig, from, addr, cctx.Bool("decrease-threshold")) + proto, err := api.MsigRemoveSigner(ctx, msig, from, addr, cctx.Bool("decrease-threshold")) + if err != nil { + return err + } + + sm, err := InteractiveSend(ctx, cctx, srv, proto) if err != nil { return err } + msgCid := sm.Cid() + fmt.Println("sent remove proposal in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { return err } @@ -676,11 +719,13 @@ var msigAddProposeCmd = &cli.Command{ return ShowHelp(cctx, fmt.Errorf("must pass multisig address and signer address")) } - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() ctx := ReqContext(cctx) msig, err := address.NewFromString(cctx.Args().Get(0)) @@ -708,14 +753,21 @@ var msigAddProposeCmd = &cli.Command{ from = defaddr } - msgCid, err := api.MsigAddPropose(ctx, msig, from, addr, cctx.Bool("increase-threshold")) + proto, err := api.MsigAddPropose(ctx, msig, from, addr, cctx.Bool("increase-threshold")) if err != nil { return err } + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err + } + + msgCid := sm.Cid() + fmt.Fprintln(cctx.App.Writer, "sent add proposal in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { return err } @@ -743,11 +795,13 @@ var msigAddApproveCmd = &cli.Command{ return ShowHelp(cctx, fmt.Errorf("must pass multisig address, proposer address, transaction id, new signer address, whether to increase threshold")) } - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() ctx := ReqContext(cctx) msig, err := address.NewFromString(cctx.Args().Get(0)) @@ -790,14 +844,21 @@ var msigAddApproveCmd = &cli.Command{ from = defaddr } - msgCid, err := api.MsigAddApprove(ctx, msig, from, txid, prop, newAdd, inc) + proto, err := api.MsigAddApprove(ctx, msig, from, txid, prop, newAdd, inc) if err != nil { return err } + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err + } + + msgCid := sm.Cid() + fmt.Println("sent add approval in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { return err } @@ -825,11 +886,13 @@ var msigAddCancelCmd = &cli.Command{ return ShowHelp(cctx, fmt.Errorf("must pass multisig address, transaction id, new signer address, whether to increase threshold")) } - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() ctx := ReqContext(cctx) msig, err := address.NewFromString(cctx.Args().Get(0)) @@ -867,14 +930,21 @@ var msigAddCancelCmd = &cli.Command{ from = defaddr } - msgCid, err := api.MsigAddCancel(ctx, msig, from, txid, newAdd, inc) + proto, err := api.MsigAddCancel(ctx, msig, from, txid, newAdd, inc) if err != nil { return err } + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err + } + + msgCid := sm.Cid() + fmt.Println("sent add cancellation in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { return err } @@ -902,11 +972,13 @@ var msigSwapProposeCmd = &cli.Command{ return ShowHelp(cctx, fmt.Errorf("must pass multisig address, old signer address, new signer address")) } - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() ctx := ReqContext(cctx) msig, err := address.NewFromString(cctx.Args().Get(0)) @@ -939,14 +1011,21 @@ var msigSwapProposeCmd = &cli.Command{ from = defaddr } - msgCid, err := api.MsigSwapPropose(ctx, msig, from, oldAdd, newAdd) + proto, err := api.MsigSwapPropose(ctx, msig, from, oldAdd, newAdd) + if err != nil { + return err + } + + sm, err := InteractiveSend(ctx, cctx, srv, proto) if err != nil { return err } + msgCid := sm.Cid() + fmt.Println("sent swap proposal in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { return err } @@ -974,11 +1053,13 @@ var msigSwapApproveCmd = &cli.Command{ return ShowHelp(cctx, fmt.Errorf("must pass multisig address, proposer address, transaction id, old signer address, new signer address")) } - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() ctx := ReqContext(cctx) msig, err := address.NewFromString(cctx.Args().Get(0)) @@ -1021,14 +1102,21 @@ var msigSwapApproveCmd = &cli.Command{ from = defaddr } - msgCid, err := api.MsigSwapApprove(ctx, msig, from, txid, prop, oldAdd, newAdd) + proto, err := api.MsigSwapApprove(ctx, msig, from, txid, prop, oldAdd, newAdd) if err != nil { return err } + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err + } + + msgCid := sm.Cid() + fmt.Println("sent swap approval in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { return err } @@ -1056,11 +1144,13 @@ var msigSwapCancelCmd = &cli.Command{ return ShowHelp(cctx, fmt.Errorf("must pass multisig address, transaction id, old signer address, new signer address")) } - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() ctx := ReqContext(cctx) msig, err := address.NewFromString(cctx.Args().Get(0)) @@ -1098,14 +1188,21 @@ var msigSwapCancelCmd = &cli.Command{ from = defaddr } - msgCid, err := api.MsigSwapCancel(ctx, msig, from, txid, oldAdd, newAdd) + proto, err := api.MsigSwapCancel(ctx, msig, from, txid, oldAdd, newAdd) if err != nil { return err } + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err + } + + msgCid := sm.Cid() + fmt.Println("sent swap cancellation in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { return err } @@ -1133,11 +1230,13 @@ var msigLockProposeCmd = &cli.Command{ return ShowHelp(cctx, fmt.Errorf("must pass multisig address, start epoch, unlock duration, and amount")) } - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() ctx := ReqContext(cctx) msig, err := address.NewFromString(cctx.Args().Get(0)) @@ -1178,21 +1277,28 @@ var msigLockProposeCmd = &cli.Command{ params, actErr := actors.SerializeParams(&msig2.LockBalanceParams{ StartEpoch: abi.ChainEpoch(start), UnlockDuration: abi.ChainEpoch(duration), - Amount: abi.NewTokenAmount(amount.Int64()), + Amount: big.Int(amount), }) if actErr != nil { return actErr } - msgCid, err := api.MsigPropose(ctx, msig, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params) + proto, err := api.MsigPropose(ctx, msig, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params) if err != nil { return err } + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err + } + + msgCid := sm.Cid() + fmt.Println("sent lock proposal in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { return err } @@ -1220,11 +1326,13 @@ var msigLockApproveCmd = &cli.Command{ return ShowHelp(cctx, fmt.Errorf("must pass multisig address, proposer address, tx id, start epoch, unlock duration, and amount")) } - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() ctx := ReqContext(cctx) msig, err := address.NewFromString(cctx.Args().Get(0)) @@ -1275,21 +1383,28 @@ var msigLockApproveCmd = &cli.Command{ params, actErr := actors.SerializeParams(&msig2.LockBalanceParams{ StartEpoch: abi.ChainEpoch(start), UnlockDuration: abi.ChainEpoch(duration), - Amount: abi.NewTokenAmount(amount.Int64()), + Amount: big.Int(amount), }) if actErr != nil { return actErr } - msgCid, err := api.MsigApproveTxnHash(ctx, msig, txid, prop, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params) + proto, err := api.MsigApproveTxnHash(ctx, msig, txid, prop, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params) if err != nil { return err } + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err + } + + msgCid := sm.Cid() + fmt.Println("sent lock approval in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { return err } @@ -1313,15 +1428,17 @@ var msigLockCancelCmd = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { - if cctx.Args().Len() != 6 { + if cctx.Args().Len() != 5 { return ShowHelp(cctx, fmt.Errorf("must pass multisig address, tx id, start epoch, unlock duration, and amount")) } - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() ctx := ReqContext(cctx) msig, err := address.NewFromString(cctx.Args().Get(0)) @@ -1367,21 +1484,28 @@ var msigLockCancelCmd = &cli.Command{ params, actErr := actors.SerializeParams(&msig2.LockBalanceParams{ StartEpoch: abi.ChainEpoch(start), UnlockDuration: abi.ChainEpoch(duration), - Amount: abi.NewTokenAmount(amount.Int64()), + Amount: big.Int(amount), }) if actErr != nil { return actErr } - msgCid, err := api.MsigCancel(ctx, msig, txid, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params) + proto, err := api.MsigCancel(ctx, msig, txid, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params) + if err != nil { + return err + } + + sm, err := InteractiveSend(ctx, cctx, srv, proto) if err != nil { return err } + msgCid := sm.Cid() + fmt.Println("sent lock cancellation in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { return err } @@ -1471,11 +1595,13 @@ var msigProposeThresholdCmd = &cli.Command{ return ShowHelp(cctx, fmt.Errorf("must pass multisig address and new threshold value")) } - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() ctx := ReqContext(cctx) msig, err := address.NewFromString(cctx.Args().Get(0)) @@ -1511,14 +1637,21 @@ var msigProposeThresholdCmd = &cli.Command{ return actErr } - msgCid, err := api.MsigPropose(ctx, msig, msig, types.NewInt(0), from, uint64(multisig.Methods.ChangeNumApprovalsThreshold), params) + proto, err := api.MsigPropose(ctx, msig, msig, types.NewInt(0), from, uint64(multisig.Methods.ChangeNumApprovalsThreshold), params) if err != nil { return fmt.Errorf("failed to propose change of threshold: %w", err) } + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err + } + + msgCid := sm.Cid() + fmt.Println("sent change threshold proposal in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { return err } diff --git a/cli/multisig_test.go b/cli/multisig_test.go deleted file mode 100644 index 82472cd627b..00000000000 --- a/cli/multisig_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package cli - -import ( - "context" - "os" - "testing" - "time" - - clitest "github.com/filecoin-project/lotus/cli/test" -) - -// TestMultisig does a basic test to exercise the multisig CLI -// commands -func TestMultisig(t *testing.T) { - _ = os.Setenv("BELLMAN_NO_GPU", "1") - clitest.QuietMiningLogs() - - blocktime := 5 * time.Millisecond - ctx := context.Background() - clientNode, _ := clitest.StartOneNodeOneMiner(ctx, t, blocktime) - clitest.RunMultisigTest(t, Commands, clientNode) -} diff --git a/cli/net.go b/cli/net.go index 9c40c70c7d7..fdd0a13d656 100644 --- a/cli/net.go +++ b/cli/net.go @@ -18,22 +18,24 @@ import ( "github.com/filecoin-project/go-address" + atypes "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/lib/addrutil" ) -var netCmd = &cli.Command{ +var NetCmd = &cli.Command{ Name: "net", Usage: "Manage P2P Network", Subcommands: []*cli.Command{ NetPeers, - netConnect, + NetConnect, NetListen, NetId, - netFindPeer, - netScores, + NetFindPeer, + NetScores, NetReachability, NetBandwidthCmd, + NetBlockCmd, }, } @@ -46,6 +48,11 @@ var NetPeers = &cli.Command{ Aliases: []string{"a"}, Usage: "Print agent name", }, + &cli.BoolFlag{ + Name: "extended", + Aliases: []string{"x"}, + Usage: "Print extended peer information in json", + }, }, Action: func(cctx *cli.Context) error { api, closer, err := GetAPI(cctx) @@ -63,31 +70,56 @@ var NetPeers = &cli.Command{ return strings.Compare(string(peers[i].ID), string(peers[j].ID)) > 0 }) - for _, peer := range peers { - var agent string - if cctx.Bool("agent") { - agent, err = api.NetAgentVersion(ctx, peer.ID) + if cctx.Bool("extended") { + // deduplicate + seen := make(map[peer.ID]struct{}) + + for _, peer := range peers { + _, dup := seen[peer.ID] + if dup { + continue + } + seen[peer.ID] = struct{}{} + + info, err := api.NetPeerInfo(ctx, peer.ID) if err != nil { - log.Warnf("getting agent version: %s", err) + log.Warnf("error getting extended peer info: %s", err) } else { - agent = ", " + agent + bytes, err := json.Marshal(&info) + if err != nil { + log.Warnf("error marshalling extended peer info: %s", err) + } else { + fmt.Println(string(bytes)) + } } } - - fmt.Printf("%s, %s%s\n", peer.ID, peer.Addrs, agent) + } else { + for _, peer := range peers { + var agent string + if cctx.Bool("agent") { + agent, err = api.NetAgentVersion(ctx, peer.ID) + if err != nil { + log.Warnf("getting agent version: %s", err) + } else { + agent = ", " + agent + } + } + fmt.Printf("%s, %s%s\n", peer.ID, peer.Addrs, agent) + } } return nil }, } -var netScores = &cli.Command{ +var NetScores = &cli.Command{ Name: "scores", Usage: "Print peers' pubsub scores", Flags: []cli.Flag{ &cli.BoolFlag{ - Name: "extended", - Usage: "print extended peer scores in json", + Name: "extended", + Aliases: []string{"x"}, + Usage: "print extended peer scores in json", }, }, Action: func(cctx *cli.Context) error { @@ -143,7 +175,7 @@ var NetListen = &cli.Command{ }, } -var netConnect = &cli.Command{ +var NetConnect = &cli.Command{ Name: "connect", Usage: "Connect to a peer", ArgsUsage: "[peerMultiaddr|minerActorAddress]", @@ -232,7 +264,7 @@ var NetId = &cli.Command{ }, } -var netFindPeer = &cli.Command{ +var NetFindPeer = &cli.Command{ Name: "findpeer", Usage: "Find the addresses of a given peerID", ArgsUsage: "[peerId]", @@ -375,3 +407,202 @@ var NetBandwidthCmd = &cli.Command{ }, } + +var NetBlockCmd = &cli.Command{ + Name: "block", + Usage: "Manage network connection gating rules", + Subcommands: []*cli.Command{ + NetBlockAddCmd, + NetBlockRemoveCmd, + NetBlockListCmd, + }, +} + +var NetBlockAddCmd = &cli.Command{ + Name: "add", + Usage: "Add connection gating rules", + Subcommands: []*cli.Command{ + NetBlockAddPeer, + NetBlockAddIP, + NetBlockAddSubnet, + }, +} + +var NetBlockAddPeer = &cli.Command{ + Name: "peer", + Usage: "Block a peer", + ArgsUsage: " ...", + Action: func(cctx *cli.Context) error { + api, closer, err := GetAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + var peers []peer.ID + for _, s := range cctx.Args().Slice() { + p, err := peer.Decode(s) + if err != nil { + return err + } + + peers = append(peers, p) + } + + return api.NetBlockAdd(ctx, atypes.NetBlockList{Peers: peers}) + }, +} + +var NetBlockAddIP = &cli.Command{ + Name: "ip", + Usage: "Block an IP address", + ArgsUsage: " ...", + Action: func(cctx *cli.Context) error { + api, closer, err := GetAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + return api.NetBlockAdd(ctx, atypes.NetBlockList{IPAddrs: cctx.Args().Slice()}) + }, +} + +var NetBlockAddSubnet = &cli.Command{ + Name: "subnet", + Usage: "Block an IP subnet", + ArgsUsage: " ...", + Action: func(cctx *cli.Context) error { + api, closer, err := GetAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + return api.NetBlockAdd(ctx, atypes.NetBlockList{IPSubnets: cctx.Args().Slice()}) + }, +} + +var NetBlockRemoveCmd = &cli.Command{ + Name: "remove", + Usage: "Remove connection gating rules", + Subcommands: []*cli.Command{ + NetBlockRemovePeer, + NetBlockRemoveIP, + NetBlockRemoveSubnet, + }, +} + +var NetBlockRemovePeer = &cli.Command{ + Name: "peer", + Usage: "Unblock a peer", + ArgsUsage: " ...", + Action: func(cctx *cli.Context) error { + api, closer, err := GetAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + var peers []peer.ID + for _, s := range cctx.Args().Slice() { + p, err := peer.Decode(s) + if err != nil { + return err + } + + peers = append(peers, p) + } + + return api.NetBlockRemove(ctx, atypes.NetBlockList{Peers: peers}) + }, +} + +var NetBlockRemoveIP = &cli.Command{ + Name: "ip", + Usage: "Unblock an IP address", + ArgsUsage: " ...", + Action: func(cctx *cli.Context) error { + api, closer, err := GetAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + return api.NetBlockRemove(ctx, atypes.NetBlockList{IPAddrs: cctx.Args().Slice()}) + }, +} + +var NetBlockRemoveSubnet = &cli.Command{ + Name: "subnet", + Usage: "Unblock an IP subnet", + ArgsUsage: " ...", + Action: func(cctx *cli.Context) error { + api, closer, err := GetAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + return api.NetBlockRemove(ctx, atypes.NetBlockList{IPSubnets: cctx.Args().Slice()}) + }, +} + +var NetBlockListCmd = &cli.Command{ + Name: "list", + Usage: "list connection gating rules", + Action: func(cctx *cli.Context) error { + api, closer, err := GetAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + acl, err := api.NetBlockList(ctx) + if err != nil { + return err + } + + if len(acl.Peers) != 0 { + sort.Slice(acl.Peers, func(i, j int) bool { + return strings.Compare(string(acl.Peers[i]), string(acl.Peers[j])) > 0 + }) + + fmt.Println("Blocked Peers:") + for _, p := range acl.Peers { + fmt.Printf("\t%s\n", p) + } + } + + if len(acl.IPAddrs) != 0 { + sort.Slice(acl.IPAddrs, func(i, j int) bool { + return strings.Compare(acl.IPAddrs[i], acl.IPAddrs[j]) < 0 + }) + + fmt.Println("Blocked IPs:") + for _, a := range acl.IPAddrs { + fmt.Printf("\t%s\n", a) + } + } + + if len(acl.IPSubnets) != 0 { + sort.Slice(acl.IPSubnets, func(i, j int) bool { + return strings.Compare(acl.IPSubnets[i], acl.IPSubnets[j]) < 0 + }) + + fmt.Println("Blocked Subnets:") + for _, n := range acl.IPSubnets { + fmt.Printf("\t%s\n", n) + } + } + + return nil + }, +} diff --git a/cli/params.go b/cli/params.go index 05c0a4cda7d..1aa6555c527 100644 --- a/cli/params.go +++ b/cli/params.go @@ -9,7 +9,7 @@ import ( "github.com/filecoin-project/lotus/build" ) -var fetchParamCmd = &cli.Command{ +var FetchParamCmd = &cli.Command{ Name: "fetch-params", Usage: "Fetch proving parameters", ArgsUsage: "[sectorSize]", @@ -23,7 +23,7 @@ var fetchParamCmd = &cli.Command{ } sectorSize := uint64(sectorSizeInt) - err = paramfetch.GetParams(ReqContext(cctx), build.ParametersJSON(), sectorSize) + err = paramfetch.GetParams(ReqContext(cctx), build.ParametersJSON(), build.SrsJSON(), sectorSize) if err != nil { return xerrors.Errorf("fetching proof parameters: %w", err) } diff --git a/cli/pprof.go b/cli/pprof.go index dccb97f9a9f..0da24591034 100644 --- a/cli/pprof.go +++ b/cli/pprof.go @@ -11,7 +11,7 @@ import ( "github.com/filecoin-project/lotus/node/repo" ) -var pprofCmd = &cli.Command{ +var PprofCmd = &cli.Command{ Name: "pprof", Hidden: true, Subcommands: []*cli.Command{ diff --git a/cli/send.go b/cli/send.go index 14c1b263b01..a5200d3b8e0 100644 --- a/cli/send.go +++ b/cli/send.go @@ -1,21 +1,16 @@ package cli import ( - "bytes" - "context" "encoding/hex" - "encoding/json" "fmt" - "reflect" "github.com/urfave/cli/v2" - cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/types" ) @@ -43,15 +38,15 @@ var sendCmd = &cli.Command{ Usage: "specify gas limit", Value: 0, }, - &cli.Int64Flag{ + &cli.Uint64Flag{ Name: "nonce", Usage: "specify the nonce to use", - Value: -1, + Value: 0, }, &cli.Uint64Flag{ Name: "method", Usage: "specify method to invoke", - Value: 0, + Value: uint64(builtin.MethodSend), }, &cli.StringFlag{ Name: "params-json", @@ -61,21 +56,30 @@ var sendCmd = &cli.Command{ Name: "params-hex", Usage: "specify invocation parameters in hex", }, + &cli.BoolFlag{ + Name: "force", + Usage: "Deprecated: use global 'force-send'", + }, }, Action: func(cctx *cli.Context) error { + if cctx.IsSet("force") { + fmt.Println("'force' flag is deprecated, use global flag 'force-send'") + } + if cctx.Args().Len() != 2 { return ShowHelp(cctx, fmt.Errorf("'send' expects two arguments, target and amount")) } - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck ctx := ReqContext(cctx) + var params SendParams - toAddr, err := address.NewFromString(cctx.Args().Get(0)) + params.To, err = address.NewFromString(cctx.Args().Get(0)) if err != nil { return ShowHelp(cctx, fmt.Errorf("failed to parse target address: %w", err)) } @@ -84,109 +88,74 @@ var sendCmd = &cli.Command{ if err != nil { return ShowHelp(cctx, fmt.Errorf("failed to parse amount: %w", err)) } + params.Val = abi.TokenAmount(val) - var fromAddr address.Address - if from := cctx.String("from"); from == "" { - defaddr, err := api.WalletDefaultAddress(ctx) + if from := cctx.String("from"); from != "" { + addr, err := address.NewFromString(from) if err != nil { return err } - fromAddr = defaddr - } else { - addr, err := address.NewFromString(from) + params.From = addr + } + + if cctx.IsSet("gas-premium") { + gp, err := types.BigFromString(cctx.String("gas-premium")) if err != nil { return err } - - fromAddr = addr + params.GasPremium = &gp } - gp, err := types.BigFromString(cctx.String("gas-premium")) - if err != nil { - return err + if cctx.IsSet("gas-feecap") { + gfc, err := types.BigFromString(cctx.String("gas-feecap")) + if err != nil { + return err + } + params.GasFeeCap = &gfc } - gfc, err := types.BigFromString(cctx.String("gas-feecap")) - if err != nil { - return err + + if cctx.IsSet("gas-limit") { + limit := cctx.Int64("gas-limit") + params.GasLimit = &limit } - method := abi.MethodNum(cctx.Uint64("method")) + params.Method = abi.MethodNum(cctx.Uint64("method")) - var params []byte if cctx.IsSet("params-json") { - decparams, err := decodeTypedParams(ctx, api, toAddr, method, cctx.String("params-json")) + decparams, err := srv.DecodeTypedParamsFromJSON(ctx, params.To, params.Method, cctx.String("params-json")) if err != nil { return fmt.Errorf("failed to decode json params: %w", err) } - params = decparams + params.Params = decparams } if cctx.IsSet("params-hex") { - if params != nil { + if params.Params != nil { return fmt.Errorf("can only specify one of 'params-json' and 'params-hex'") } decparams, err := hex.DecodeString(cctx.String("params-hex")) if err != nil { return fmt.Errorf("failed to decode hex params: %w", err) } - params = decparams + params.Params = decparams } - msg := &types.Message{ - From: fromAddr, - To: toAddr, - Value: types.BigInt(val), - GasPremium: gp, - GasFeeCap: gfc, - GasLimit: cctx.Int64("gas-limit"), - Method: method, - Params: params, + if cctx.IsSet("nonce") { + n := cctx.Uint64("nonce") + params.Nonce = &n } - if cctx.Int64("nonce") > 0 { - msg.Nonce = uint64(cctx.Int64("nonce")) - sm, err := api.WalletSignMessage(ctx, fromAddr, msg) - if err != nil { - return err - } + proto, err := srv.MessageForSend(ctx, params) + if err != nil { + return xerrors.Errorf("creating message prototype: %w", err) + } - _, err = api.MpoolPush(ctx, sm) - if err != nil { - return err - } - fmt.Println(sm.Cid()) - } else { - sm, err := api.MpoolPushMessage(ctx, msg, nil) - if err != nil { - return err - } - fmt.Println(sm.Cid()) + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err } + fmt.Fprintf(cctx.App.Writer, "%s\n", sm.Cid()) return nil }, } - -func decodeTypedParams(ctx context.Context, fapi api.FullNode, to address.Address, method abi.MethodNum, paramstr string) ([]byte, error) { - act, err := fapi.StateGetActor(ctx, to, types.EmptyTSK) - if err != nil { - return nil, err - } - - methodMeta, found := stmgr.MethodsMap[act.Code][method] - if !found { - return nil, fmt.Errorf("method %d not found on actor %s", method, act.Code) - } - - p := reflect.New(methodMeta.Params.Elem()).Interface().(cbg.CBORMarshaler) - - if err := json.Unmarshal([]byte(paramstr), p); err != nil { - return nil, fmt.Errorf("unmarshaling input into params type: %w", err) - } - - buf := new(bytes.Buffer) - if err := p.MarshalCBOR(buf); err != nil { - return nil, err - } - return buf.Bytes(), nil -} diff --git a/cli/send_test.go b/cli/send_test.go new file mode 100644 index 00000000000..52eafda67a7 --- /dev/null +++ b/cli/send_test.go @@ -0,0 +1,67 @@ +package cli + +import ( + "bytes" + "testing" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" + types "github.com/filecoin-project/lotus/chain/types" + gomock "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + ucli "github.com/urfave/cli/v2" +) + +func mustAddr(a address.Address, err error) address.Address { + if err != nil { + panic(err) + } + return a +} + +func newMockApp(t *testing.T, cmd *ucli.Command) (*ucli.App, *MockServicesAPI, *bytes.Buffer, func()) { + app := ucli.NewApp() + app.Commands = ucli.Commands{cmd} + app.Setup() + + mockCtrl := gomock.NewController(t) + mockSrvcs := NewMockServicesAPI(mockCtrl) + app.Metadata["test-services"] = mockSrvcs + + buf := &bytes.Buffer{} + app.Writer = buf + + return app, mockSrvcs, buf, mockCtrl.Finish +} + +func TestSendCLI(t *testing.T) { + oneFil := abi.TokenAmount(types.MustParseFIL("1")) + + t.Run("simple", func(t *testing.T) { + app, mockSrvcs, buf, done := newMockApp(t, sendCmd) + defer done() + + arbtProto := &api.MessagePrototype{ + Message: types.Message{ + From: mustAddr(address.NewIDAddress(1)), + To: mustAddr(address.NewIDAddress(1)), + Value: oneFil, + }, + } + sigMsg := fakeSign(&arbtProto.Message) + + gomock.InOrder( + mockSrvcs.EXPECT().MessageForSend(gomock.Any(), SendParams{ + To: mustAddr(address.NewIDAddress(1)), + Val: oneFil, + }).Return(arbtProto, nil), + mockSrvcs.EXPECT().PublishMessage(gomock.Any(), arbtProto, false). + Return(sigMsg, nil, nil), + mockSrvcs.EXPECT().Close(), + ) + err := app.Run([]string{"lotus", "send", "t01", "1"}) + assert.NoError(t, err) + assert.EqualValues(t, sigMsg.Cid().String()+"\n", buf.String()) + }) +} diff --git a/cli/sending_ui.go b/cli/sending_ui.go new file mode 100644 index 00000000000..a70abefb906 --- /dev/null +++ b/cli/sending_ui.go @@ -0,0 +1,264 @@ +package cli + +import ( + "context" + "errors" + "fmt" + "io" + "strings" + + "github.com/Kubuxu/imtui" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + types "github.com/filecoin-project/lotus/chain/types" + "github.com/gdamore/tcell/v2" + cid "github.com/ipfs/go-cid" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" +) + +func InteractiveSend(ctx context.Context, cctx *cli.Context, srv ServicesAPI, + proto *api.MessagePrototype) (*types.SignedMessage, error) { + + msg, checks, err := srv.PublishMessage(ctx, proto, cctx.Bool("force") || cctx.Bool("force-send")) + printer := cctx.App.Writer + if xerrors.Is(err, ErrCheckFailed) { + if !cctx.Bool("interactive") { + fmt.Fprintf(printer, "Following checks have failed:\n") + printChecks(printer, checks, proto.Message.Cid()) + } else { + proto, err = resolveChecks(ctx, srv, cctx.App.Writer, proto, checks) + if err != nil { + return nil, xerrors.Errorf("from UI: %w", err) + } + + msg, _, err = srv.PublishMessage(ctx, proto, true) + } + } + if err != nil { + return nil, xerrors.Errorf("publishing message: %w", err) + } + + return msg, nil +} + +var interactiveSolves = map[api.CheckStatusCode]bool{ + api.CheckStatusMessageMinBaseFee: true, + api.CheckStatusMessageBaseFee: true, + api.CheckStatusMessageBaseFeeLowerBound: true, + api.CheckStatusMessageBaseFeeUpperBound: true, +} + +func baseFeeFromHints(hint map[string]interface{}) big.Int { + bHint, ok := hint["baseFee"] + if !ok { + return big.Zero() + } + bHintS, ok := bHint.(string) + if !ok { + return big.Zero() + } + + var err error + baseFee, err := big.FromString(bHintS) + if err != nil { + return big.Zero() + } + return baseFee +} + +func resolveChecks(ctx context.Context, s ServicesAPI, printer io.Writer, + proto *api.MessagePrototype, checkGroups [][]api.MessageCheckStatus, +) (*api.MessagePrototype, error) { + + fmt.Fprintf(printer, "Following checks have failed:\n") + printChecks(printer, checkGroups, proto.Message.Cid()) + + if feeCapBad, baseFee := isFeeCapProblem(checkGroups, proto.Message.Cid()); feeCapBad { + fmt.Fprintf(printer, "Fee of the message can be adjusted\n") + if askUser(printer, "Do you wish to do that? [Yes/no]: ", true) { + var err error + proto, err = runFeeCapAdjustmentUI(proto, baseFee) + if err != nil { + return nil, err + } + } + checks, err := s.RunChecksForPrototype(ctx, proto) + if err != nil { + return nil, err + } + fmt.Fprintf(printer, "Following checks still failed:\n") + printChecks(printer, checks, proto.Message.Cid()) + } + + if !askUser(printer, "Do you wish to send this message? [yes/No]: ", false) { + return nil, ErrAbortedByUser + } + return proto, nil +} + +var ErrAbortedByUser = errors.New("aborted by user") + +func printChecks(printer io.Writer, checkGroups [][]api.MessageCheckStatus, protoCid cid.Cid) { + for _, checks := range checkGroups { + for _, c := range checks { + if c.OK { + continue + } + aboutProto := c.Cid.Equals(protoCid) + msgName := "current" + if !aboutProto { + msgName = c.Cid.String() + } + fmt.Fprintf(printer, "%s message failed a check %s: %s\n", msgName, c.Code, c.Err) + } + } +} + +func askUser(printer io.Writer, q string, def bool) bool { + var resp string + fmt.Fprint(printer, q) + fmt.Scanln(&resp) + resp = strings.ToLower(resp) + if len(resp) == 0 { + return def + } + return resp[0] == 'y' +} + +func isFeeCapProblem(checkGroups [][]api.MessageCheckStatus, protoCid cid.Cid) (bool, big.Int) { + baseFee := big.Zero() + yes := false + for _, checks := range checkGroups { + for _, c := range checks { + if c.OK { + continue + } + aboutProto := c.Cid.Equals(protoCid) + if aboutProto && interactiveSolves[c.Code] { + yes = true + if baseFee.IsZero() { + baseFee = baseFeeFromHints(c.Hint) + } + } + } + } + if baseFee.IsZero() { + // this will only be the case if failing check is: MessageMinBaseFee + baseFee = big.NewInt(build.MinimumBaseFee) + } + + return yes, baseFee +} + +func runFeeCapAdjustmentUI(proto *api.MessagePrototype, baseFee abi.TokenAmount) (*api.MessagePrototype, error) { + t, err := imtui.NewTui() + if err != nil { + return nil, err + } + + maxFee := big.Mul(proto.Message.GasFeeCap, big.NewInt(proto.Message.GasLimit)) + send := false + t.PushScene(feeUI(baseFee, proto.Message.GasLimit, &maxFee, &send)) + + err = t.Run() + if err != nil { + return nil, err + } + if !send { + return nil, fmt.Errorf("aborted by user") + } + + proto.Message.GasFeeCap = big.Div(maxFee, big.NewInt(proto.Message.GasLimit)) + + return proto, nil +} + +func feeUI(baseFee abi.TokenAmount, gasLimit int64, maxFee *abi.TokenAmount, send *bool) func(*imtui.Tui) error { + orignalMaxFee := *maxFee + required := big.Mul(baseFee, big.NewInt(gasLimit)) + safe := big.Mul(required, big.NewInt(10)) + + price := fmt.Sprintf("%s", types.FIL(*maxFee).Unitless()) + + return func(t *imtui.Tui) error { + if t.CurrentKey != nil { + if t.CurrentKey.Key() == tcell.KeyRune { + pF, err := types.ParseFIL(price) + switch t.CurrentKey.Rune() { + case 's', 'S': + price = types.FIL(safe).Unitless() + case '+': + if err == nil { + p := big.Mul(big.Int(pF), types.NewInt(11)) + p = big.Div(p, types.NewInt(10)) + price = fmt.Sprintf("%s", types.FIL(p).Unitless()) + } + case '-': + if err == nil { + p := big.Mul(big.Int(pF), types.NewInt(10)) + p = big.Div(p, types.NewInt(11)) + price = fmt.Sprintf("%s", types.FIL(p).Unitless()) + } + default: + } + } + + if t.CurrentKey.Key() == tcell.KeyEnter { + *send = true + t.PopScene() + return nil + } + } + + defS := tcell.StyleDefault + + row := 0 + t.Label(0, row, "Fee of the message is too low.", defS) + row++ + + t.Label(0, row, fmt.Sprintf("Your configured maximum fee is: %s FIL", + types.FIL(orignalMaxFee).Unitless()), defS) + row++ + t.Label(0, row, fmt.Sprintf("Required maximum fee for the message: %s FIL", + types.FIL(required).Unitless()), defS) + row++ + w := t.Label(0, row, fmt.Sprintf("Safe maximum fee for the message: %s FIL", + types.FIL(safe).Unitless()), defS) + t.Label(w, row, " Press S to use it", defS) + row++ + + w = t.Label(0, row, "Current Maximum Fee: ", defS) + + w += t.EditFieldFiltered(w, row, 14, &price, imtui.FilterDecimal, defS.Foreground(tcell.ColorWhite).Background(tcell.ColorBlack)) + + w += t.Label(w, row, " FIL", defS) + + pF, err := types.ParseFIL(price) + *maxFee = abi.TokenAmount(pF) + if err != nil { + w += t.Label(w, row, " invalid price", defS.Foreground(tcell.ColorMaroon).Bold(true)) + } else if maxFee.GreaterThanEqual(safe) { + w += t.Label(w, row, " SAFE", defS.Foreground(tcell.ColorDarkGreen).Bold(true)) + } else if maxFee.GreaterThanEqual(required) { + w += t.Label(w, row, " low", defS.Foreground(tcell.ColorYellow).Bold(true)) + over := big.Div(big.Mul(*maxFee, big.NewInt(100)), required) + w += t.Label(w, row, + fmt.Sprintf(" %.1fx over the minimum", float64(over.Int64())/100.0), defS) + } else { + w += t.Label(w, row, " too low", defS.Foreground(tcell.ColorRed).Bold(true)) + } + row += 2 + + t.Label(0, row, fmt.Sprintf("Current Base Fee is: %s", types.FIL(baseFee).Nano()), defS) + row++ + t.Label(0, row, fmt.Sprintf("Resulting FeeCap is: %s", + types.FIL(big.Div(*maxFee, big.NewInt(gasLimit))).Nano()), defS) + row++ + t.Label(0, row, "You can use '+' and '-' to adjust the fee.", defS) + + return nil + } +} diff --git a/cli/services.go b/cli/services.go new file mode 100644 index 00000000000..0923680aa08 --- /dev/null +++ b/cli/services.go @@ -0,0 +1,276 @@ +package cli + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "reflect" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-jsonrpc" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/stmgr" + types "github.com/filecoin-project/lotus/chain/types" + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" +) + +//go:generate go run github.com/golang/mock/mockgen -destination=servicesmock_test.go -package=cli -self_package github.com/filecoin-project/lotus/cli . ServicesAPI + +type ServicesAPI interface { + FullNodeAPI() api.FullNode + + GetBaseFee(ctx context.Context) (abi.TokenAmount, error) + + // MessageForSend creates a prototype of a message based on SendParams + MessageForSend(ctx context.Context, params SendParams) (*api.MessagePrototype, error) + + // DecodeTypedParamsFromJSON takes in information needed to identify a method and converts JSON + // parameters to bytes of their CBOR encoding + DecodeTypedParamsFromJSON(ctx context.Context, to address.Address, method abi.MethodNum, paramstr string) ([]byte, error) + + RunChecksForPrototype(ctx context.Context, prototype *api.MessagePrototype) ([][]api.MessageCheckStatus, error) + + // PublishMessage takes in a message prototype and publishes it + // before publishing the message, it runs checks on the node, message and mpool to verify that + // message is valid and won't be stuck. + // if `force` is true, it skips the checks + PublishMessage(ctx context.Context, prototype *api.MessagePrototype, force bool) (*types.SignedMessage, [][]api.MessageCheckStatus, error) + + LocalAddresses(ctx context.Context) (address.Address, []address.Address, error) + + MpoolPendingFilter(ctx context.Context, filter func(*types.SignedMessage) bool, tsk types.TipSetKey) ([]*types.SignedMessage, error) + MpoolCheckPendingMessages(ctx context.Context, a address.Address) ([][]api.MessageCheckStatus, error) + + // Close ends the session of services and disconnects from RPC, using Services after Close is called + // most likely will result in an error + // Should not be called concurrently + Close() error +} + +type ServicesImpl struct { + api api.FullNode + closer jsonrpc.ClientCloser +} + +func (s *ServicesImpl) FullNodeAPI() api.FullNode { + return s.api +} + +func (s *ServicesImpl) Close() error { + if s.closer == nil { + return xerrors.Errorf("Services already closed") + } + s.closer() + s.closer = nil + return nil +} + +func (s *ServicesImpl) GetBaseFee(ctx context.Context) (abi.TokenAmount, error) { + // not used but useful + + ts, err := s.api.ChainHead(ctx) + if err != nil { + return big.Zero(), xerrors.Errorf("getting head: %w", err) + } + return ts.MinTicketBlock().ParentBaseFee, nil +} + +func (s *ServicesImpl) DecodeTypedParamsFromJSON(ctx context.Context, to address.Address, method abi.MethodNum, paramstr string) ([]byte, error) { + act, err := s.api.StateGetActor(ctx, to, types.EmptyTSK) + if err != nil { + return nil, err + } + + methodMeta, found := stmgr.MethodsMap[act.Code][method] + if !found { + return nil, fmt.Errorf("method %d not found on actor %s", method, act.Code) + } + + p := reflect.New(methodMeta.Params.Elem()).Interface().(cbg.CBORMarshaler) + + if err := json.Unmarshal([]byte(paramstr), p); err != nil { + return nil, fmt.Errorf("unmarshaling input into params type: %w", err) + } + + buf := new(bytes.Buffer) + if err := p.MarshalCBOR(buf); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +type CheckInfo struct { + MessageTie cid.Cid + CurrentMessageTie bool + + Check api.MessageCheckStatus +} + +var ErrCheckFailed = fmt.Errorf("check has failed") + +func (s *ServicesImpl) RunChecksForPrototype(ctx context.Context, prototype *api.MessagePrototype) ([][]api.MessageCheckStatus, error) { + var outChecks [][]api.MessageCheckStatus + checks, err := s.api.MpoolCheckMessages(ctx, []*api.MessagePrototype{prototype}) + if err != nil { + return nil, xerrors.Errorf("message check: %w", err) + } + outChecks = append(outChecks, checks...) + + checks, err = s.api.MpoolCheckPendingMessages(ctx, prototype.Message.From) + if err != nil { + return nil, xerrors.Errorf("pending mpool check: %w", err) + } + outChecks = append(outChecks, checks...) + + return outChecks, nil +} + +// PublishMessage modifies prototype to include gas estimation +// Errors with ErrCheckFailed if any of the checks fail +// First group of checks is related to the message prototype +func (s *ServicesImpl) PublishMessage(ctx context.Context, + prototype *api.MessagePrototype, force bool) (*types.SignedMessage, [][]api.MessageCheckStatus, error) { + + gasedMsg, err := s.api.GasEstimateMessageGas(ctx, &prototype.Message, nil, types.EmptyTSK) + if err != nil { + return nil, nil, xerrors.Errorf("estimating gas: %w", err) + } + prototype.Message = *gasedMsg + + if !force { + checks, err := s.RunChecksForPrototype(ctx, prototype) + if err != nil { + return nil, nil, xerrors.Errorf("running checks: %w", err) + } + for _, chks := range checks { + for _, c := range chks { + if !c.OK { + return nil, checks, ErrCheckFailed + } + } + } + } + + if prototype.ValidNonce { + sm, err := s.api.WalletSignMessage(ctx, prototype.Message.From, &prototype.Message) + if err != nil { + return nil, nil, err + } + + _, err = s.api.MpoolPush(ctx, sm) + if err != nil { + return nil, nil, err + } + return sm, nil, nil + } + + sm, err := s.api.MpoolPushMessage(ctx, &prototype.Message, nil) + if err != nil { + return nil, nil, err + } + + return sm, nil, nil +} + +type SendParams struct { + To address.Address + From address.Address + Val abi.TokenAmount + + GasPremium *abi.TokenAmount + GasFeeCap *abi.TokenAmount + GasLimit *int64 + + Nonce *uint64 + Method abi.MethodNum + Params []byte +} + +func (s *ServicesImpl) MessageForSend(ctx context.Context, params SendParams) (*api.MessagePrototype, error) { + if params.From == address.Undef { + defaddr, err := s.api.WalletDefaultAddress(ctx) + if err != nil { + return nil, err + } + params.From = defaddr + } + + msg := types.Message{ + From: params.From, + To: params.To, + Value: params.Val, + + Method: params.Method, + Params: params.Params, + } + + if params.GasPremium != nil { + msg.GasPremium = *params.GasPremium + } else { + msg.GasPremium = types.NewInt(0) + } + if params.GasFeeCap != nil { + msg.GasFeeCap = *params.GasFeeCap + } else { + msg.GasFeeCap = types.NewInt(0) + } + if params.GasLimit != nil { + msg.GasLimit = *params.GasLimit + } else { + msg.GasLimit = 0 + } + validNonce := false + if params.Nonce != nil { + msg.Nonce = *params.Nonce + validNonce = true + } + + prototype := &api.MessagePrototype{ + Message: msg, + ValidNonce: validNonce, + } + return prototype, nil +} + +func (s *ServicesImpl) MpoolPendingFilter(ctx context.Context, filter func(*types.SignedMessage) bool, + tsk types.TipSetKey) ([]*types.SignedMessage, error) { + msgs, err := s.api.MpoolPending(ctx, types.EmptyTSK) + if err != nil { + return nil, xerrors.Errorf("getting pending messages: %w", err) + } + out := []*types.SignedMessage{} + for _, sm := range msgs { + if filter(sm) { + out = append(out, sm) + } + } + + return out, nil +} + +func (s *ServicesImpl) LocalAddresses(ctx context.Context) (address.Address, []address.Address, error) { + def, err := s.api.WalletDefaultAddress(ctx) + if err != nil { + return address.Undef, nil, xerrors.Errorf("getting default addr: %w", err) + } + + all, err := s.api.WalletList(ctx) + if err != nil { + return address.Undef, nil, xerrors.Errorf("getting list of addrs: %w", err) + } + + return def, all, nil +} + +func (s *ServicesImpl) MpoolCheckPendingMessages(ctx context.Context, a address.Address) ([][]api.MessageCheckStatus, error) { + checks, err := s.api.MpoolCheckPendingMessages(ctx, a) + if err != nil { + return nil, xerrors.Errorf("pending mpool check: %w", err) + } + return checks, nil +} diff --git a/cli/services_send_test.go b/cli/services_send_test.go new file mode 100644 index 00000000000..b7ed78f80db --- /dev/null +++ b/cli/services_send_test.go @@ -0,0 +1,215 @@ +package cli + +import ( + "context" + "fmt" + "testing" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/lotus/api" + mocks "github.com/filecoin-project/lotus/api/mocks" + types "github.com/filecoin-project/lotus/chain/types" + gomock "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +type markerKeyType struct{} + +var markerKey = markerKeyType{} + +type contextMatcher struct { + marker *int +} + +// Matches returns whether x is a match. +func (cm contextMatcher) Matches(x interface{}) bool { + ctx, ok := x.(context.Context) + if !ok { + return false + } + maybeMarker, ok := ctx.Value(markerKey).(*int) + if !ok { + return false + } + + return cm.marker == maybeMarker +} + +func (cm contextMatcher) String() string { + return fmt.Sprintf("Context with Value(%v/%T, %p)", markerKey, markerKey, cm.marker) +} + +func ContextWithMarker(ctx context.Context) (context.Context, gomock.Matcher) { + marker := new(int) + outCtx := context.WithValue(ctx, markerKey, marker) + return outCtx, contextMatcher{marker: marker} + +} + +func setupMockSrvcs(t *testing.T) (*ServicesImpl, *mocks.MockFullNode) { + mockCtrl := gomock.NewController(t) + + mockApi := mocks.NewMockFullNode(mockCtrl) + + srvcs := &ServicesImpl{ + api: mockApi, + closer: mockCtrl.Finish, + } + return srvcs, mockApi +} + +// linter doesn't like dead code, so these are commented out. +func fakeSign(msg *types.Message) *types.SignedMessage { + return &types.SignedMessage{ + Message: *msg, + Signature: crypto.Signature{Type: crypto.SigTypeSecp256k1, Data: make([]byte, 32)}, + } +} + +//func makeMessageSigner() (*cid.Cid, interface{}) { +//smCid := cid.Undef +//return &smCid, +//func(_ context.Context, msg *types.Message, _ *api.MessageSendSpec) (*types.SignedMessage, error) { +//sm := fakeSign(msg) +//smCid = sm.Cid() +//return sm, nil +//} +//} + +type MessageMatcher SendParams + +var _ gomock.Matcher = MessageMatcher{} + +// Matches returns whether x is a match. +func (mm MessageMatcher) Matches(x interface{}) bool { + proto, ok := x.(*api.MessagePrototype) + if !ok { + return false + } + + m := &proto.Message + + if mm.From != address.Undef && mm.From != m.From { + return false + } + if mm.To != address.Undef && mm.To != m.To { + return false + } + + if types.BigCmp(mm.Val, m.Value) != 0 { + return false + } + + if mm.Nonce != nil && *mm.Nonce != m.Nonce { + return false + } + + if mm.GasPremium != nil && big.Cmp(*mm.GasPremium, m.GasPremium) != 0 { + return false + } + if mm.GasPremium == nil && m.GasPremium.Sign() != 0 { + return false + } + + if mm.GasFeeCap != nil && big.Cmp(*mm.GasFeeCap, m.GasFeeCap) != 0 { + return false + } + if mm.GasFeeCap == nil && m.GasFeeCap.Sign() != 0 { + return false + } + + if mm.GasLimit != nil && *mm.GasLimit != m.GasLimit { + return false + } + + if mm.GasLimit == nil && m.GasLimit != 0 { + return false + } + // handle rest of options + return true +} + +// String describes what the matcher matches. +func (mm MessageMatcher) String() string { + return fmt.Sprintf("%#v", SendParams(mm)) +} + +func TestSendService(t *testing.T) { + addrGen := address.NewForTestGetter() + a1 := addrGen() + a2 := addrGen() + + const balance = 10000 + + params := SendParams{ + From: a1, + To: a2, + Val: types.NewInt(balance - 100), + } + + ctx, ctxM := ContextWithMarker(context.Background()) + + t.Run("happy", func(t *testing.T) { + params := params + srvcs, _ := setupMockSrvcs(t) + defer srvcs.Close() //nolint:errcheck + + proto, err := srvcs.MessageForSend(ctx, params) + assert.NoError(t, err) + assert.True(t, MessageMatcher(params).Matches(proto)) + }) + + t.Run("default-from", func(t *testing.T) { + params := params + params.From = address.Undef + mm := MessageMatcher(params) + mm.From = a1 + + srvcs, mockApi := setupMockSrvcs(t) + defer srvcs.Close() //nolint:errcheck + + gomock.InOrder( + mockApi.EXPECT().WalletDefaultAddress(ctxM).Return(a1, nil), + ) + + proto, err := srvcs.MessageForSend(ctx, params) + assert.NoError(t, err) + assert.True(t, mm.Matches(proto)) + }) + + t.Run("set-nonce", func(t *testing.T) { + params := params + n := uint64(5) + params.Nonce = &n + mm := MessageMatcher(params) + + srvcs, _ := setupMockSrvcs(t) + defer srvcs.Close() //nolint:errcheck + + proto, err := srvcs.MessageForSend(ctx, params) + assert.NoError(t, err) + assert.True(t, mm.Matches(proto)) + }) + + t.Run("gas-params", func(t *testing.T) { + params := params + limit := int64(1) + params.GasLimit = &limit + gfc := big.NewInt(100) + params.GasFeeCap = &gfc + gp := big.NewInt(10) + params.GasPremium = &gp + + mm := MessageMatcher(params) + + srvcs, _ := setupMockSrvcs(t) + defer srvcs.Close() //nolint:errcheck + + proto, err := srvcs.MessageForSend(ctx, params) + assert.NoError(t, err) + assert.True(t, mm.Matches(proto)) + + }) +} diff --git a/cli/servicesmock_test.go b/cli/servicesmock_test.go new file mode 100644 index 00000000000..5bae52a5ebc --- /dev/null +++ b/cli/servicesmock_test.go @@ -0,0 +1,190 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/filecoin-project/lotus/cli (interfaces: ServicesAPI) + +// Package cli is a generated GoMock package. +package cli + +import ( + context "context" + reflect "reflect" + + go_address "github.com/filecoin-project/go-address" + abi "github.com/filecoin-project/go-state-types/abi" + big "github.com/filecoin-project/go-state-types/big" + api "github.com/filecoin-project/lotus/api" + types "github.com/filecoin-project/lotus/chain/types" + gomock "github.com/golang/mock/gomock" +) + +// MockServicesAPI is a mock of ServicesAPI interface. +type MockServicesAPI struct { + ctrl *gomock.Controller + recorder *MockServicesAPIMockRecorder +} + +// MockServicesAPIMockRecorder is the mock recorder for MockServicesAPI. +type MockServicesAPIMockRecorder struct { + mock *MockServicesAPI +} + +// NewMockServicesAPI creates a new mock instance. +func NewMockServicesAPI(ctrl *gomock.Controller) *MockServicesAPI { + mock := &MockServicesAPI{ctrl: ctrl} + mock.recorder = &MockServicesAPIMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockServicesAPI) EXPECT() *MockServicesAPIMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockServicesAPI) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockServicesAPIMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockServicesAPI)(nil).Close)) +} + +// DecodeTypedParamsFromJSON mocks base method. +func (m *MockServicesAPI) DecodeTypedParamsFromJSON(arg0 context.Context, arg1 go_address.Address, arg2 abi.MethodNum, arg3 string) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DecodeTypedParamsFromJSON", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DecodeTypedParamsFromJSON indicates an expected call of DecodeTypedParamsFromJSON. +func (mr *MockServicesAPIMockRecorder) DecodeTypedParamsFromJSON(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DecodeTypedParamsFromJSON", reflect.TypeOf((*MockServicesAPI)(nil).DecodeTypedParamsFromJSON), arg0, arg1, arg2, arg3) +} + +// FullNodeAPI mocks base method. +func (m *MockServicesAPI) FullNodeAPI() api.FullNode { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FullNodeAPI") + ret0, _ := ret[0].(api.FullNode) + return ret0 +} + +// FullNodeAPI indicates an expected call of FullNodeAPI. +func (mr *MockServicesAPIMockRecorder) FullNodeAPI() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FullNodeAPI", reflect.TypeOf((*MockServicesAPI)(nil).FullNodeAPI)) +} + +// GetBaseFee mocks base method. +func (m *MockServicesAPI) GetBaseFee(arg0 context.Context) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBaseFee", arg0) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBaseFee indicates an expected call of GetBaseFee. +func (mr *MockServicesAPIMockRecorder) GetBaseFee(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBaseFee", reflect.TypeOf((*MockServicesAPI)(nil).GetBaseFee), arg0) +} + +// LocalAddresses mocks base method. +func (m *MockServicesAPI) LocalAddresses(arg0 context.Context) (go_address.Address, []go_address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LocalAddresses", arg0) + ret0, _ := ret[0].(go_address.Address) + ret1, _ := ret[1].([]go_address.Address) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// LocalAddresses indicates an expected call of LocalAddresses. +func (mr *MockServicesAPIMockRecorder) LocalAddresses(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LocalAddresses", reflect.TypeOf((*MockServicesAPI)(nil).LocalAddresses), arg0) +} + +// MessageForSend mocks base method. +func (m *MockServicesAPI) MessageForSend(arg0 context.Context, arg1 SendParams) (*api.MessagePrototype, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MessageForSend", arg0, arg1) + ret0, _ := ret[0].(*api.MessagePrototype) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MessageForSend indicates an expected call of MessageForSend. +func (mr *MockServicesAPIMockRecorder) MessageForSend(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MessageForSend", reflect.TypeOf((*MockServicesAPI)(nil).MessageForSend), arg0, arg1) +} + +// MpoolCheckPendingMessages mocks base method. +func (m *MockServicesAPI) MpoolCheckPendingMessages(arg0 context.Context, arg1 go_address.Address) ([][]api.MessageCheckStatus, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolCheckPendingMessages", arg0, arg1) + ret0, _ := ret[0].([][]api.MessageCheckStatus) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolCheckPendingMessages indicates an expected call of MpoolCheckPendingMessages. +func (mr *MockServicesAPIMockRecorder) MpoolCheckPendingMessages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolCheckPendingMessages", reflect.TypeOf((*MockServicesAPI)(nil).MpoolCheckPendingMessages), arg0, arg1) +} + +// MpoolPendingFilter mocks base method. +func (m *MockServicesAPI) MpoolPendingFilter(arg0 context.Context, arg1 func(*types.SignedMessage) bool, arg2 types.TipSetKey) ([]*types.SignedMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolPendingFilter", arg0, arg1, arg2) + ret0, _ := ret[0].([]*types.SignedMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolPendingFilter indicates an expected call of MpoolPendingFilter. +func (mr *MockServicesAPIMockRecorder) MpoolPendingFilter(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPendingFilter", reflect.TypeOf((*MockServicesAPI)(nil).MpoolPendingFilter), arg0, arg1, arg2) +} + +// PublishMessage mocks base method. +func (m *MockServicesAPI) PublishMessage(arg0 context.Context, arg1 *api.MessagePrototype, arg2 bool) (*types.SignedMessage, [][]api.MessageCheckStatus, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PublishMessage", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.SignedMessage) + ret1, _ := ret[1].([][]api.MessageCheckStatus) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// PublishMessage indicates an expected call of PublishMessage. +func (mr *MockServicesAPIMockRecorder) PublishMessage(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublishMessage", reflect.TypeOf((*MockServicesAPI)(nil).PublishMessage), arg0, arg1, arg2) +} + +// RunChecksForPrototype mocks base method. +func (m *MockServicesAPI) RunChecksForPrototype(arg0 context.Context, arg1 *api.MessagePrototype) ([][]api.MessageCheckStatus, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RunChecksForPrototype", arg0, arg1) + ret0, _ := ret[0].([][]api.MessageCheckStatus) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RunChecksForPrototype indicates an expected call of RunChecksForPrototype. +func (mr *MockServicesAPIMockRecorder) RunChecksForPrototype(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunChecksForPrototype", reflect.TypeOf((*MockServicesAPI)(nil).RunChecksForPrototype), arg0, arg1) +} diff --git a/cli/state.go b/cli/state.go index 13aa5c39b8c..d5251fb8595 100644 --- a/cli/state.go +++ b/cli/state.go @@ -3,6 +3,8 @@ package cli import ( "bytes" "context" + "encoding/base64" + "encoding/hex" "encoding/json" "fmt" "html/template" @@ -15,13 +17,16 @@ import ( "strings" "time" - "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/go-state-types/big" - "github.com/multiformats/go-multiaddr" + "github.com/filecoin-project/lotus/api/v0api" + + "github.com/fatih/color" + "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" - "github.com/libp2p/go-libp2p-core/peer" + "github.com/multiformats/go-multiaddr" "github.com/multiformats/go-multihash" "github.com/urfave/cli/v2" cbg "github.com/whyrusleeping/cbor-gen" @@ -29,19 +34,18 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/lotus/api" lapi "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/apibstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" ) -var stateCmd = &cli.Command{ +var StateCmd = &cli.Command{ Name: "state", Usage: "Interact with and query filecoin chain state", Flags: []cli.Flag{ @@ -51,31 +55,76 @@ var stateCmd = &cli.Command{ }, }, Subcommands: []*cli.Command{ - statePowerCmd, - stateSectorsCmd, - stateActiveSectorsCmd, - stateListActorsCmd, - stateListMinersCmd, - stateCircSupplyCmd, - stateSectorCmd, - stateGetActorCmd, - stateLookupIDCmd, - stateReplayCmd, - stateSectorSizeCmd, - stateReadStateCmd, - stateListMessagesCmd, - stateComputeStateCmd, - stateCallCmd, - stateGetDealSetCmd, - stateWaitMsgCmd, - stateSearchMsgCmd, - stateMinerInfo, - stateMarketCmd, - stateExecTraceCmd, + StatePowerCmd, + StateSectorsCmd, + StateActiveSectorsCmd, + StateListActorsCmd, + StateListMinersCmd, + StateCircSupplyCmd, + StateSectorCmd, + StateGetActorCmd, + StateLookupIDCmd, + StateReplayCmd, + StateSectorSizeCmd, + StateReadStateCmd, + StateListMessagesCmd, + StateComputeStateCmd, + StateCallCmd, + StateGetDealSetCmd, + StateWaitMsgCmd, + StateSearchMsgCmd, + StateMinerInfo, + StateMarketCmd, + StateExecTraceCmd, + StateNtwkVersionCmd, + StateMinerProvingDeadlineCmd, + }, +} + +var StateMinerProvingDeadlineCmd = &cli.Command{ + Name: "miner-proving-deadline", + Usage: "Retrieve information about a given miner's proving deadline", + ArgsUsage: "[minerAddress]", + Action: func(cctx *cli.Context) error { + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := ReqContext(cctx) + + if !cctx.Args().Present() { + return fmt.Errorf("must specify miner to get information for") + } + + addr, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return err + } + + ts, err := LoadTipSet(ctx, cctx, api) + if err != nil { + return err + } + + cd, err := api.StateMinerProvingDeadline(ctx, addr, ts.Key()) + if err != nil { + return xerrors.Errorf("getting miner info: %w", err) + } + + fmt.Printf("Period Start:\t%s\n", cd.PeriodStart) + fmt.Printf("Index:\t\t%d\n", cd.Index) + fmt.Printf("Open:\t\t%s\n", cd.Open) + fmt.Printf("Close:\t\t%s\n", cd.Close) + fmt.Printf("Challenge:\t%s\n", cd.Challenge) + fmt.Printf("FaultCutoff:\t%s\n", cd.FaultCutoff) + + return nil }, } -var stateMinerInfo = &cli.Command{ +var StateMinerInfo = &cli.Command{ Name: "miner-info", Usage: "Retrieve miner information", ArgsUsage: "[minerAddress]", @@ -107,14 +156,19 @@ var stateMinerInfo = &cli.Command{ return err } + availableBalance, err := api.StateMinerAvailableBalance(ctx, addr, ts.Key()) + if err != nil { + return xerrors.Errorf("getting miner available balance: %w", err) + } + fmt.Printf("Available Balance: %s\n", types.FIL(availableBalance)) fmt.Printf("Owner:\t%s\n", mi.Owner) fmt.Printf("Worker:\t%s\n", mi.Worker) for i, controlAddress := range mi.ControlAddresses { fmt.Printf("Control %d: \t%s\n", i, controlAddress) } + fmt.Printf("PeerID:\t%s\n", mi.PeerId) - fmt.Printf("SectorSize:\t%s (%d)\n", types.SizeStr(types.NewInt(uint64(mi.SectorSize))), mi.SectorSize) - fmt.Printf("Multiaddrs: \t") + fmt.Printf("Multiaddrs:\t") for _, addr := range mi.Multiaddrs { a, err := multiaddr.NewMultiaddrBytes(addr) if err != nil { @@ -122,6 +176,33 @@ var stateMinerInfo = &cli.Command{ } fmt.Printf("%s ", a) } + fmt.Println() + fmt.Printf("Consensus Fault End:\t%d\n", mi.ConsensusFaultElapsed) + + fmt.Printf("SectorSize:\t%s (%d)\n", types.SizeStr(types.NewInt(uint64(mi.SectorSize))), mi.SectorSize) + pow, err := api.StateMinerPower(ctx, addr, ts.Key()) + if err != nil { + return err + } + + fmt.Printf("Byte Power: %s / %s (%0.4f%%)\n", + color.BlueString(types.SizeStr(pow.MinerPower.RawBytePower)), + types.SizeStr(pow.TotalPower.RawBytePower), + types.BigDivFloat( + types.BigMul(pow.MinerPower.RawBytePower, big.NewInt(100)), + pow.TotalPower.RawBytePower, + ), + ) + + fmt.Printf("Actual Power: %s / %s (%0.4f%%)\n", + color.GreenString(types.DeciStr(pow.MinerPower.QualityAdjPower)), + types.DeciStr(pow.TotalPower.QualityAdjPower), + types.BigDivFloat( + types.BigMul(pow.MinerPower.QualityAdjPower, big.NewInt(100)), + pow.TotalPower.QualityAdjPower, + ), + ) + fmt.Println() cd, err := api.StateMinerProvingDeadline(ctx, addr, ts.Key()) @@ -150,16 +231,19 @@ func ParseTipSetString(ts string) ([]cid.Cid, error) { return cids, nil } -func LoadTipSet(ctx context.Context, cctx *cli.Context, api api.FullNode) (*types.TipSet, error) { +// LoadTipSet gets the tipset from the context, or the head from the API. +// +// It always gets the head from the API so commands use a consistent tipset even if time pases. +func LoadTipSet(ctx context.Context, cctx *cli.Context, api v0api.FullNode) (*types.TipSet, error) { tss := cctx.String("tipset") if tss == "" { - return nil, nil + return api.ChainHead(ctx) } return ParseTipSetRef(ctx, api, tss) } -func ParseTipSetRef(ctx context.Context, api api.FullNode, tss string) (*types.TipSet, error) { +func ParseTipSetRef(ctx context.Context, api v0api.FullNode, tss string) (*types.TipSet, error) { if tss[0] == '@' { if tss == "@head" { return api.ChainHead(ctx) @@ -191,7 +275,7 @@ func ParseTipSetRef(ctx context.Context, api api.FullNode, tss string) (*types.T return ts, nil } -var statePowerCmd = &cli.Command{ +var StatePowerCmd = &cli.Command{ Name: "power", Usage: "Query network or miner power", ArgsUsage: "[ (optional)]", @@ -204,17 +288,26 @@ var statePowerCmd = &cli.Command{ ctx := ReqContext(cctx) + ts, err := LoadTipSet(ctx, cctx, api) + if err != nil { + return err + } + var maddr address.Address if cctx.Args().Present() { maddr, err = address.NewFromString(cctx.Args().First()) if err != nil { return err } - } - ts, err := LoadTipSet(ctx, cctx, api) - if err != nil { - return err + ma, err := api.StateGetActor(ctx, maddr, ts.Key()) + if err != nil { + return err + } + + if !builtin.IsStorageMinerActor(ma.Code) { + return xerrors.New("provided address does not correspond to a miner actor") + } } power, err := api.StateMinerPower(ctx, maddr, ts.Key()) @@ -225,8 +318,15 @@ var statePowerCmd = &cli.Command{ tp := power.TotalPower if cctx.Args().Present() { mp := power.MinerPower - percI := types.BigDiv(types.BigMul(mp.QualityAdjPower, types.NewInt(1000000)), tp.QualityAdjPower) - fmt.Printf("%s(%s) / %s(%s) ~= %0.4f%%\n", mp.QualityAdjPower.String(), types.SizeStr(mp.QualityAdjPower), tp.QualityAdjPower.String(), types.SizeStr(tp.QualityAdjPower), float64(percI.Int64())/10000) + fmt.Printf( + "%s(%s) / %s(%s) ~= %0.4f%%\n", + mp.QualityAdjPower.String(), types.SizeStr(mp.QualityAdjPower), + tp.QualityAdjPower.String(), types.SizeStr(tp.QualityAdjPower), + types.BigDivFloat( + types.BigMul(mp.QualityAdjPower, big.NewInt(100)), + tp.QualityAdjPower, + ), + ) } else { fmt.Printf("%s(%s)\n", tp.QualityAdjPower.String(), types.SizeStr(tp.QualityAdjPower)) } @@ -235,7 +335,7 @@ var statePowerCmd = &cli.Command{ }, } -var stateSectorsCmd = &cli.Command{ +var StateSectorsCmd = &cli.Command{ Name: "sectors", Usage: "Query the sector set of a miner", ArgsUsage: "[minerAddress]", @@ -268,14 +368,14 @@ var stateSectorsCmd = &cli.Command{ } for _, s := range sectors { - fmt.Printf("%d: %x\n", s.SectorNumber, s.SealedCID) + fmt.Printf("%d: %s\n", s.SectorNumber, s.SealedCID) } return nil }, } -var stateActiveSectorsCmd = &cli.Command{ +var StateActiveSectorsCmd = &cli.Command{ Name: "active-sectors", Usage: "Query the active sector set of a miner", ArgsUsage: "[minerAddress]", @@ -308,14 +408,14 @@ var stateActiveSectorsCmd = &cli.Command{ } for _, s := range sectors { - fmt.Printf("%d: %x\n", s.SectorNumber, s.SealedCID) + fmt.Printf("%d: %s\n", s.SectorNumber, s.SealedCID) } return nil }, } -var stateExecTraceCmd = &cli.Command{ +var StateExecTraceCmd = &cli.Command{ Name: "exec-trace", Usage: "Get the execution trace of a given message", ArgsUsage: "", @@ -346,6 +446,9 @@ var stateExecTraceCmd = &cli.Command{ if err != nil { return err } + if lookup == nil { + return fmt.Errorf("failed to find message: %s", mcid) + } ts, err := capi.ChainGetTipSet(ctx, lookup.TipSet) if err != nil { @@ -383,7 +486,7 @@ var stateExecTraceCmd = &cli.Command{ }, } -var stateReplayCmd = &cli.Command{ +var StateReplayCmd = &cli.Command{ Name: "replay", Usage: "Replay a particular message", ArgsUsage: "", @@ -448,7 +551,7 @@ var stateReplayCmd = &cli.Command{ }, } -var stateGetDealSetCmd = &cli.Command{ +var StateGetDealSetCmd = &cli.Command{ Name: "get-deal", Usage: "View on-chain deal info", ArgsUsage: "[dealId]", @@ -490,7 +593,7 @@ var stateGetDealSetCmd = &cli.Command{ }, } -var stateListMinersCmd = &cli.Command{ +var StateListMinersCmd = &cli.Command{ Name: "list-miners", Usage: "list all miners in the network", Flags: []cli.Flag{ @@ -546,7 +649,7 @@ var stateListMinersCmd = &cli.Command{ }, } -func getDealsCounts(ctx context.Context, lapi api.FullNode) (map[address.Address]int, error) { +func getDealsCounts(ctx context.Context, lapi v0api.FullNode) (map[address.Address]int, error) { allDeals, err := lapi.StateMarketDeals(ctx, types.EmptyTSK) if err != nil { return nil, err @@ -562,7 +665,7 @@ func getDealsCounts(ctx context.Context, lapi api.FullNode) (map[address.Address return out, nil } -var stateListActorsCmd = &cli.Command{ +var StateListActorsCmd = &cli.Command{ Name: "list-actors", Usage: "list all actors in the network", Action: func(cctx *cli.Context) error { @@ -592,10 +695,10 @@ var stateListActorsCmd = &cli.Command{ }, } -var stateGetActorCmd = &cli.Command{ +var StateGetActorCmd = &cli.Command{ Name: "get-actor", Usage: "Print actor information", - ArgsUsage: "[actorrAddress]", + ArgsUsage: "[actorAddress]", Action: func(cctx *cli.Context) error { api, closer, err := GetFullNodeAPI(cctx) if err != nil { @@ -636,7 +739,7 @@ var stateGetActorCmd = &cli.Command{ }, } -var stateLookupIDCmd = &cli.Command{ +var StateLookupIDCmd = &cli.Command{ Name: "lookup", Usage: "Find corresponding ID address", ArgsUsage: "[address]", @@ -687,7 +790,7 @@ var stateLookupIDCmd = &cli.Command{ }, } -var stateSectorSizeCmd = &cli.Command{ +var StateSectorSizeCmd = &cli.Command{ Name: "sector-size", Usage: "Look up miners sector size", ArgsUsage: "[minerAddress]", @@ -724,7 +827,7 @@ var stateSectorSizeCmd = &cli.Command{ }, } -var stateReadStateCmd = &cli.Command{ +var StateReadStateCmd = &cli.Command{ Name: "read-state", Usage: "View a json representation of an actors state", ArgsUsage: "[actorAddress]", @@ -766,7 +869,7 @@ var stateReadStateCmd = &cli.Command{ }, } -var stateListMessagesCmd = &cli.Command{ +var StateListMessagesCmd = &cli.Command{ Name: "list-messages", Usage: "list messages on chain matching given criteria", Flags: []cli.Flag{ @@ -820,14 +923,6 @@ var stateListMessagesCmd = &cli.Command{ return err } - if ts == nil { - head, err := api.ChainHead(ctx) - if err != nil { - return err - } - ts = head - } - windowSize := abi.ChainEpoch(100) cur := ts @@ -879,7 +974,7 @@ var stateListMessagesCmd = &cli.Command{ }, } -var stateComputeStateCmd = &cli.Command{ +var StateComputeStateCmd = &cli.Command{ Name: "compute-state", Usage: "Perform state computations", Flags: []cli.Flag{ @@ -907,6 +1002,10 @@ var stateComputeStateCmd = &cli.Command{ Name: "compute-state-output", Usage: "a json file containing pre-existing compute-state output, to generate html reports without rerunning state changes", }, + &cli.BoolFlag{ + Name: "no-timing", + Usage: "don't show timing information in html traces", + }, }, Action: func(cctx *cli.Context) error { api, closer, err := GetFullNodeAPI(cctx) @@ -923,13 +1022,6 @@ var stateComputeStateCmd = &cli.Command{ } h := abi.ChainEpoch(cctx.Uint64("vm-height")) - if ts == nil { - head, err := api.ChainHead(ctx) - if err != nil { - return err - } - ts = head - } if h == 0 { h = ts.Height() } @@ -978,7 +1070,7 @@ var stateComputeStateCmd = &cli.Command{ } if cctx.Bool("html") { - st, err := state.LoadStateTree(cbor.NewCborStore(apibstore.NewAPIBlockstore(api)), stout.Root) + st, err := state.LoadStateTree(cbor.NewCborStore(blockstore.NewAPIBlockstore(api)), stout.Root) if err != nil { return xerrors.Errorf("loading state tree: %w", err) } @@ -998,7 +1090,9 @@ var stateComputeStateCmd = &cli.Command{ return c.Code, nil } - return ComputeStateHTMLTempl(os.Stdout, ts, stout, getCode) + _, _ = fmt.Fprintln(os.Stderr, "computed state cid: ", stout.Root) + + return ComputeStateHTMLTempl(os.Stdout, ts, stout, !cctx.Bool("no-timing"), getCode) } fmt.Println("computed state cid: ", stout.Root) @@ -1119,8 +1213,11 @@ var compStateMsg = ` {{if gt (len .Msg.Params) 0}}
{{JsonParams ($code) (.Msg.Method) (.Msg.Params) | html}}
{{end}} -
Took {{.Duration}}, Exit: {{.MsgRct.ExitCode}}{{if gt (len .MsgRct.Return) 0}}, Return{{end}}
- + {{if PrintTiming}} +
Took {{.Duration}}, Exit: {{.MsgRct.ExitCode}}{{if gt (len .MsgRct.Return) 0}}, Return{{end}}
+ {{else}} +
Exit: {{.MsgRct.ExitCode}}{{if gt (len .MsgRct.Return) 0}}, Return{{end}}
+ {{end}} {{if gt (len .MsgRct.Return) 0}}
{{JsonReturn ($code) (.Msg.Method) (.MsgRct.Return) | html}}
{{end}} @@ -1146,7 +1243,7 @@ var compStateMsg = ` {{range .GasCharges}} {{.Name}}{{if .Extra}}:{{.Extra}}{{end}} {{template "gasC" .}} - {{.TimeTaken}} + {{if PrintTiming}}{{.TimeTaken}}{{end}} {{ $fImp := FirstImportant .Location }} {{ if $fImp }} @@ -1185,7 +1282,7 @@ var compStateMsg = ` {{with SumGas .GasCharges}} Sum {{template "gasC" .}} - {{.TimeTaken}} + {{if PrintTiming}}{{.TimeTaken}}{{end}} {{end}} @@ -1206,19 +1303,20 @@ type compStateHTMLIn struct { Comp *api.ComputeStateOutput } -func ComputeStateHTMLTempl(w io.Writer, ts *types.TipSet, o *api.ComputeStateOutput, getCode func(addr address.Address) (cid.Cid, error)) error { +func ComputeStateHTMLTempl(w io.Writer, ts *types.TipSet, o *api.ComputeStateOutput, printTiming bool, getCode func(addr address.Address) (cid.Cid, error)) error { t, err := template.New("compute_state").Funcs(map[string]interface{}{ - "GetCode": getCode, - "GetMethod": getMethod, - "ToFil": toFil, - "JsonParams": JsonParams, - "JsonReturn": jsonReturn, - "IsSlow": isSlow, - "IsVerySlow": isVerySlow, - "IntExit": func(i exitcode.ExitCode) int64 { return int64(i) }, - "SumGas": sumGas, - "CodeStr": codeStr, - "Call": call, + "GetCode": getCode, + "GetMethod": getMethod, + "ToFil": toFil, + "JsonParams": JsonParams, + "JsonReturn": jsonReturn, + "IsSlow": isSlow, + "IsVerySlow": isVerySlow, + "IntExit": func(i exitcode.ExitCode) int64 { return int64(i) }, + "SumGas": sumGas, + "CodeStr": codeStr, + "Call": call, + "PrintTiming": func() bool { return printTiming }, "FirstImportant": func(locs []types.Loc) *types.Loc { if len(locs) != 0 { for _, l := range locs { @@ -1299,12 +1397,11 @@ func sumGas(changes []*types.GasTrace) types.GasTrace { } func JsonParams(code cid.Cid, method abi.MethodNum, params []byte) (string, error) { - methodMeta, found := stmgr.MethodsMap[code][method] - if !found { - return "", fmt.Errorf("method %d not found on actor %s", method, code) + p, err := stmgr.GetParamType(code, method) + if err != nil { + return "", err } - re := reflect.New(methodMeta.Params.Elem()) - p := re.Interface().(cbg.CBORUnmarshaler) + if err := p.UnmarshalCBOR(bytes.NewReader(params)); err != nil { return "", err } @@ -1328,7 +1425,7 @@ func jsonReturn(code cid.Cid, method abi.MethodNum, ret []byte) (string, error) return string(b), err } -var stateWaitMsgCmd = &cli.Command{ +var StateWaitMsgCmd = &cli.Command{ Name: "wait-msg", Usage: "Wait for a message to appear on chain", ArgsUsage: "[messageCid]", @@ -1366,35 +1463,11 @@ var stateWaitMsgCmd = &cli.Command{ return err } - fmt.Printf("message was executed in tipset: %s\n", mw.TipSet.Cids()) - fmt.Printf("Exit Code: %d\n", mw.Receipt.ExitCode) - fmt.Printf("Gas Used: %d\n", mw.Receipt.GasUsed) - fmt.Printf("Return: %x\n", mw.Receipt.Return) - if err := printReceiptReturn(ctx, api, m, mw.Receipt); err != nil { - return err - } - - return nil + return printMsg(ctx, api, msg, mw, m) }, } -func printReceiptReturn(ctx context.Context, api api.FullNode, m *types.Message, r types.MessageReceipt) error { - act, err := api.StateGetActor(ctx, m.To, types.EmptyTSK) - if err != nil { - return err - } - - jret, err := jsonReturn(act.Code, m.Method, r.Return) - if err != nil { - return err - } - - fmt.Println(jret) - - return nil -} - -var stateSearchMsgCmd = &cli.Command{ +var StateSearchMsgCmd = &cli.Command{ Name: "search-msg", Usage: "Search to see whether a message has appeared on chain", ArgsUsage: "[messageCid]", @@ -1421,22 +1494,64 @@ var stateSearchMsgCmd = &cli.Command{ return err } - if mw != nil { - fmt.Printf("message was executed in tipset: %s", mw.TipSet.Cids()) - fmt.Printf("\nExit Code: %d", mw.Receipt.ExitCode) - fmt.Printf("\nGas Used: %d", mw.Receipt.GasUsed) - fmt.Printf("\nReturn: %x", mw.Receipt.Return) - } else { - fmt.Print("message was not found on chain") + if mw == nil { + return fmt.Errorf("failed to find message: %s", msg) } - return nil + + m, err := api.ChainGetMessage(ctx, msg) + if err != nil { + return err + } + + return printMsg(ctx, api, msg, mw, m) }, } -var stateCallCmd = &cli.Command{ +func printReceiptReturn(ctx context.Context, api v0api.FullNode, m *types.Message, r types.MessageReceipt) error { + if len(r.Return) == 0 { + return nil + } + + act, err := api.StateGetActor(ctx, m.To, types.EmptyTSK) + if err != nil { + return err + } + + jret, err := jsonReturn(act.Code, m.Method, r.Return) + if err != nil { + return err + } + + fmt.Println("Decoded return value: ", jret) + + return nil +} + +func printMsg(ctx context.Context, api v0api.FullNode, msg cid.Cid, mw *lapi.MsgLookup, m *types.Message) error { + if mw == nil { + fmt.Println("message was not found on chain") + return nil + } + + if mw.Message != msg { + fmt.Printf("Message was replaced: %s\n", mw.Message) + } + + fmt.Printf("Executed in tipset: %s\n", mw.TipSet.Cids()) + fmt.Printf("Exit Code: %d\n", mw.Receipt.ExitCode) + fmt.Printf("Gas Used: %d\n", mw.Receipt.GasUsed) + fmt.Printf("Return: %x\n", mw.Receipt.Return) + if err := printReceiptReturn(ctx, api, m, mw.Receipt); err != nil { + return err + } + + return nil +} + +var StateCallCmd = &cli.Command{ Name: "call", Usage: "Invoke a method on an actor locally", - ArgsUsage: "[toAddress methodId (optional)]", + ArgsUsage: "[toAddress methodId params (optional)]", Flags: []cli.Flag{ &cli.StringFlag{ Name: "from", @@ -1450,8 +1565,13 @@ var stateCallCmd = &cli.Command{ }, &cli.StringFlag{ Name: "ret", - Usage: "specify how to parse output (auto, raw, addr, big)", - Value: "auto", + Usage: "specify how to parse output (raw, decoded, base64, hex)", + Value: "decoded", + }, + &cli.StringFlag{ + Name: "encoding", + Value: "base64", + Usage: "specify params encoding to parse (base64, hex)", }, }, Action: func(cctx *cli.Context) error { @@ -1492,14 +1612,23 @@ var stateCallCmd = &cli.Command{ return fmt.Errorf("failed to parse 'value': %s", err) } - act, err := api.StateGetActor(ctx, toa, ts.Key()) - if err != nil { - return fmt.Errorf("failed to lookup target actor: %s", err) - } - - params, err := parseParamsForMethod(act.Code, method, cctx.Args().Slice()[2:]) - if err != nil { - return fmt.Errorf("failed to parse params: %s", err) + var params []byte + // If params were passed in, decode them + if cctx.Args().Len() > 2 { + switch cctx.String("encoding") { + case "base64": + params, err = base64.StdEncoding.DecodeString(cctx.Args().Get(2)) + if err != nil { + return xerrors.Errorf("decoding base64 value: %w", err) + } + case "hex": + params, err = hex.DecodeString(cctx.Args().Get(2)) + if err != nil { + return xerrors.Errorf("decoding hex value: %w", err) + } + default: + return xerrors.Errorf("unrecognized encoding: %s", cctx.String("encoding")) + } } ret, err := api.StateCall(ctx, &types.Message{ @@ -1510,138 +1639,43 @@ var stateCallCmd = &cli.Command{ Params: params, }, ts.Key()) if err != nil { - return fmt.Errorf("state call failed: %s", err) + return fmt.Errorf("state call failed: %w", err) } if ret.MsgRct.ExitCode != 0 { return fmt.Errorf("invocation failed (exit: %d, gasUsed: %d): %s", ret.MsgRct.ExitCode, ret.MsgRct.GasUsed, ret.Error) } - s, err := formatOutput(cctx.String("ret"), ret.MsgRct.Return) - if err != nil { - return fmt.Errorf("failed to format output: %s", err) - } - - fmt.Printf("gas used: %d\n", ret.MsgRct.GasUsed) - fmt.Printf("return: %s\n", s) - - return nil - }, -} - -func formatOutput(t string, val []byte) (string, error) { - switch t { - case "raw", "hex": - return fmt.Sprintf("%x", val), nil - case "address", "addr", "a": - a, err := address.NewFromBytes(val) - if err != nil { - return "", err - } - return a.String(), nil - case "big", "int", "bigint": - bi := types.BigFromBytes(val) - return bi.String(), nil - case "fil": - bi := types.FIL(types.BigFromBytes(val)) - return bi.String(), nil - case "pid", "peerid", "peer": - pid, err := peer.IDFromBytes(val) - if err != nil { - return "", err - } - - return pid.Pretty(), nil - case "auto": - if len(val) == 0 { - return "", nil - } - - a, err := address.NewFromBytes(val) - if err == nil { - return "address: " + a.String(), nil - } - - pid, err := peer.IDFromBytes(val) - if err == nil { - return "peerID: " + pid.Pretty(), nil - } - - bi := types.BigFromBytes(val) - return "bigint: " + bi.String(), nil - default: - return "", fmt.Errorf("unrecognized output type: %q", t) - } -} + fmt.Println("Call receipt:") + fmt.Printf("Exit code: %d\n", ret.MsgRct.ExitCode) + fmt.Printf("Gas Used: %d\n", ret.MsgRct.GasUsed) -func parseParamsForMethod(act cid.Cid, method uint64, args []string) ([]byte, error) { - if len(args) == 0 { - return nil, nil - } - - // TODO: consider moving this to a dedicated helper - actMeta, ok := stmgr.MethodsMap[act] - if !ok { - return nil, fmt.Errorf("unknown actor %s", act) - } - - methodMeta, ok := actMeta[abi.MethodNum(method)] - if !ok { - return nil, fmt.Errorf("unknown method %d for actor %s", method, act) - } - - paramObj := methodMeta.Params - if paramObj.NumField() != len(args) { - return nil, fmt.Errorf("not enough arguments given to call that method (expecting %d)", paramObj.NumField()) - } - - p := reflect.New(paramObj) - for i := 0; i < len(args); i++ { - switch paramObj.Field(i).Type { - case reflect.TypeOf(address.Address{}): - a, err := address.NewFromString(args[i]) - if err != nil { - return nil, fmt.Errorf("failed to parse address: %s", err) - } - p.Elem().Field(i).Set(reflect.ValueOf(a)) - case reflect.TypeOf(uint64(0)): - val, err := strconv.ParseUint(args[i], 10, 64) + switch cctx.String("ret") { + case "decoded": + act, err := api.StateGetActor(ctx, toa, ts.Key()) if err != nil { - return nil, err + return xerrors.Errorf("getting actor: %w", err) } - p.Elem().Field(i).Set(reflect.ValueOf(val)) - case reflect.TypeOf(abi.ChainEpoch(0)): - val, err := strconv.ParseInt(args[i], 10, 64) - if err != nil { - return nil, err - } - p.Elem().Field(i).Set(reflect.ValueOf(abi.ChainEpoch(val))) - case reflect.TypeOf(big.Int{}): - val, err := big.FromString(args[i]) - if err != nil { - return nil, err - } - p.Elem().Field(i).Set(reflect.ValueOf(val)) - case reflect.TypeOf(peer.ID("")): - pid, err := peer.Decode(args[i]) + + retStr, err := jsonReturn(act.Code, abi.MethodNum(method), ret.MsgRct.Return) if err != nil { - return nil, fmt.Errorf("failed to parse peer ID: %s", err) + return xerrors.Errorf("decoding return: %w", err) } - p.Elem().Field(i).Set(reflect.ValueOf(pid)) - default: - return nil, fmt.Errorf("unsupported type for call (TODO): %s", paramObj.Field(i).Type) + + fmt.Printf("Return:\n%s\n", retStr) + case "raw": + fmt.Printf("Return: \n%s\n", ret.MsgRct.Return) + case "hex": + fmt.Printf("Return: \n%x\n", ret.MsgRct.Return) + case "base64": + fmt.Printf("Return: \n%s\n", base64.StdEncoding.EncodeToString(ret.MsgRct.Return)) } - } - m := p.Interface().(cbg.CBORMarshaler) - buf := new(bytes.Buffer) - if err := m.MarshalCBOR(buf); err != nil { - return nil, fmt.Errorf("failed to marshal param object: %s", err) - } - return buf.Bytes(), nil + return nil + }, } -var stateCircSupplyCmd = &cli.Command{ +var StateCircSupplyCmd = &cli.Command{ Name: "circulating-supply", Usage: "Get the exact current circulating supply of Filecoin", Flags: []cli.Flag{ @@ -1690,10 +1724,10 @@ var stateCircSupplyCmd = &cli.Command{ }, } -var stateSectorCmd = &cli.Command{ +var StateSectorCmd = &cli.Command{ Name: "sector", Usage: "Get miner sector info", - ArgsUsage: "[miner address] [sector number]", + ArgsUsage: "[minerAddress] [sectorNumber]", Action: func(cctx *cli.Context) error { api, closer, err := GetFullNodeAPI(cctx) if err != nil { @@ -1704,7 +1738,7 @@ var stateSectorCmd = &cli.Command{ ctx := ReqContext(cctx) if cctx.Args().Len() != 2 { - return xerrors.Errorf("expected 2 params") + return xerrors.Errorf("expected 2 params: minerAddress and sectorNumber") } ts, err := LoadTipSet(ctx, cctx, api) @@ -1712,13 +1746,6 @@ var stateSectorCmd = &cli.Command{ return err } - if ts == nil { - ts, err = api.ChainHead(ctx) - if err != nil { - return err - } - } - maddr, err := address.NewFromString(cctx.Args().Get(0)) if err != nil { return err @@ -1733,6 +1760,9 @@ var stateSectorCmd = &cli.Command{ if err != nil { return err } + if si == nil { + return xerrors.Errorf("sector %d for miner %s not found", sid, maddr) + } fmt.Println("SectorNumber: ", si.SectorNumber) fmt.Println("SealProof: ", si.SealProof) @@ -1761,7 +1791,7 @@ var stateSectorCmd = &cli.Command{ }, } -var stateMarketCmd = &cli.Command{ +var StateMarketCmd = &cli.Command{ Name: "market", Usage: "Inspect the storage market actor", Subcommands: []*cli.Command{ @@ -1806,3 +1836,35 @@ var stateMarketBalanceCmd = &cli.Command{ return nil }, } + +var StateNtwkVersionCmd = &cli.Command{ + Name: "network-version", + Usage: "Returns the network version", + Action: func(cctx *cli.Context) error { + if cctx.Args().Present() { + return ShowHelp(cctx, fmt.Errorf("doesn't expect any arguments")) + } + + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := ReqContext(cctx) + + ts, err := LoadTipSet(ctx, cctx, api) + if err != nil { + return err + } + + nv, err := api.StateNetworkVersion(ctx, ts.Key()) + if err != nil { + return err + } + + fmt.Printf("Network Version: %d\n", nv) + + return nil + }, +} diff --git a/cli/status.go b/cli/status.go new file mode 100644 index 00000000000..75f91196a1c --- /dev/null +++ b/cli/status.go @@ -0,0 +1,60 @@ +package cli + +import ( + "fmt" + + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/lotus/build" +) + +var StatusCmd = &cli.Command{ + Name: "status", + Usage: "Check node status", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "chain", + Usage: "include chain health status", + }, + }, + + Action: func(cctx *cli.Context) error { + apic, closer, err := GetFullNodeAPIV1(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + inclChainStatus := cctx.Bool("chain") + + status, err := apic.NodeStatus(ctx, inclChainStatus) + if err != nil { + return err + } + + fmt.Printf("Sync Epoch: %d\n", status.SyncStatus.Epoch) + fmt.Printf("Epochs Behind: %d\n", status.SyncStatus.Behind) + fmt.Printf("Peers to Publish Messages: %d\n", status.PeerStatus.PeersToPublishMsgs) + fmt.Printf("Peers to Publish Blocks: %d\n", status.PeerStatus.PeersToPublishBlocks) + + if inclChainStatus && status.SyncStatus.Epoch > uint64(build.Finality) { + var ok100, okFin string + if status.ChainStatus.BlocksPerTipsetLast100 >= 4.75 { + ok100 = "[OK]" + } else { + ok100 = "[UNHEALTHY]" + } + if status.ChainStatus.BlocksPerTipsetLastFinality >= 4.75 { + okFin = "[OK]" + } else { + okFin = "[UNHEALTHY]" + } + + fmt.Printf("Blocks per TipSet in last 100 epochs: %f %s\n", status.ChainStatus.BlocksPerTipsetLast100, ok100) + fmt.Printf("Blocks per TipSet in last finality: %f %s\n", status.ChainStatus.BlocksPerTipsetLastFinality, okFin) + } + + return nil + }, +} diff --git a/cli/sync.go b/cli/sync.go index c3f25eb1d56..c7b010111c3 100644 --- a/cli/sync.go +++ b/cli/sync.go @@ -12,23 +12,24 @@ import ( "github.com/urfave/cli/v2" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/build" ) -var syncCmd = &cli.Command{ +var SyncCmd = &cli.Command{ Name: "sync", Usage: "Inspect or interact with the chain syncer", Subcommands: []*cli.Command{ - syncStatusCmd, - syncWaitCmd, - syncMarkBadCmd, - syncUnmarkBadCmd, - syncCheckBadCmd, - syncCheckpointCmd, + SyncStatusCmd, + SyncWaitCmd, + SyncMarkBadCmd, + SyncUnmarkBadCmd, + SyncCheckBadCmd, + SyncCheckpointCmd, }, } -var syncStatusCmd = &cli.Command{ +var SyncStatusCmd = &cli.Command{ Name: "status", Usage: "check sync status", Action: func(cctx *cli.Context) error { @@ -45,8 +46,8 @@ var syncStatusCmd = &cli.Command{ } fmt.Println("sync status:") - for i, ss := range state.ActiveSyncs { - fmt.Printf("worker %d:\n", i) + for _, ss := range state.ActiveSyncs { + fmt.Printf("worker %d:\n", ss.WorkerID) var base, target []cid.Cid var heightDiff int64 var theight abi.ChainEpoch @@ -81,7 +82,7 @@ var syncStatusCmd = &cli.Command{ }, } -var syncWaitCmd = &cli.Command{ +var SyncWaitCmd = &cli.Command{ Name: "wait", Usage: "Wait for sync to be complete", Flags: []cli.Flag{ @@ -102,7 +103,7 @@ var syncWaitCmd = &cli.Command{ }, } -var syncMarkBadCmd = &cli.Command{ +var SyncMarkBadCmd = &cli.Command{ Name: "mark-bad", Usage: "Mark the given block as bad, will prevent syncing to a chain that contains it", ArgsUsage: "[blockCid]", @@ -127,7 +128,7 @@ var syncMarkBadCmd = &cli.Command{ }, } -var syncUnmarkBadCmd = &cli.Command{ +var SyncUnmarkBadCmd = &cli.Command{ Name: "unmark-bad", Usage: "Unmark the given block as bad, makes it possible to sync to a chain containing it", Flags: []cli.Flag{ @@ -162,7 +163,7 @@ var syncUnmarkBadCmd = &cli.Command{ }, } -var syncCheckBadCmd = &cli.Command{ +var SyncCheckBadCmd = &cli.Command{ Name: "check-bad", Usage: "check if the given block was marked bad, and for what reason", ArgsUsage: "[blockCid]", @@ -198,7 +199,7 @@ var syncCheckBadCmd = &cli.Command{ }, } -var syncCheckpointCmd = &cli.Command{ +var SyncCheckpointCmd = &cli.Command{ Name: "checkpoint", Usage: "mark a certain tipset as checkpointed; the node will never fork away from this tipset", ArgsUsage: "[tipsetKey]", @@ -240,7 +241,7 @@ var syncCheckpointCmd = &cli.Command{ }, } -func SyncWait(ctx context.Context, napi api.FullNode, watch bool) error { +func SyncWait(ctx context.Context, napi v0api.FullNode, watch bool) error { tick := time.Second / 4 lastLines := 0 @@ -263,12 +264,17 @@ func SyncWait(ctx context.Context, napi api.FullNode, watch bool) error { return err } + if len(state.ActiveSyncs) == 0 { + time.Sleep(time.Second) + continue + } + head, err := napi.ChainHead(ctx) if err != nil { return err } - working := 0 + working := -1 for i, ss := range state.ActiveSyncs { switch ss.Stage { case api.StageSyncComplete: @@ -279,7 +285,12 @@ func SyncWait(ctx context.Context, napi api.FullNode, watch bool) error { } } + if working == -1 { + working = len(state.ActiveSyncs) - 1 + } + ss := state.ActiveSyncs[working] + workerID := ss.WorkerID var baseHeight abi.ChainEpoch var target []cid.Cid @@ -302,7 +313,7 @@ func SyncWait(ctx context.Context, napi api.FullNode, watch bool) error { fmt.Print("\r\x1b[2K\x1b[A") } - fmt.Printf("Worker: %d; Base: %d; Target: %d (diff: %d)\n", working, baseHeight, theight, heightDiff) + fmt.Printf("Worker: %d; Base: %d; Target: %d (diff: %d)\n", workerID, baseHeight, theight, heightDiff) fmt.Printf("State: %s; Current Epoch: %d; Todo: %d\n", ss.Stage, ss.Height, theight-ss.Height) lastLines = 2 diff --git a/cli/test/net.go b/cli/test/net.go deleted file mode 100644 index 836b81a8f78..00000000000 --- a/cli/test/net.go +++ /dev/null @@ -1,87 +0,0 @@ -package test - -import ( - "context" - "testing" - "time" - - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/chain/types" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/api/test" - test2 "github.com/filecoin-project/lotus/node/test" -) - -func StartOneNodeOneMiner(ctx context.Context, t *testing.T, blocktime time.Duration) (test.TestNode, address.Address) { - n, sn := test2.RPCMockSbBuilder(t, test.OneFull, test.OneMiner) - - full := n[0] - miner := sn[0] - - // Get everyone connected - addrs, err := full.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrs); err != nil { - t.Fatal(err) - } - - // Start mining blocks - bm := test.NewBlockMiner(ctx, t, miner, blocktime) - bm.MineBlocks() - - // Get the full node's wallet address - fullAddr, err := full.WalletDefaultAddress(ctx) - if err != nil { - t.Fatal(err) - } - - // Create mock CLI - return full, fullAddr -} - -func StartTwoNodesOneMiner(ctx context.Context, t *testing.T, blocktime time.Duration) ([]test.TestNode, []address.Address) { - n, sn := test2.RPCMockSbBuilder(t, test.TwoFull, test.OneMiner) - - fullNode1 := n[0] - fullNode2 := n[1] - miner := sn[0] - - // Get everyone connected - addrs, err := fullNode1.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := fullNode2.NetConnect(ctx, addrs); err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrs); err != nil { - t.Fatal(err) - } - - // Start mining blocks - bm := test.NewBlockMiner(ctx, t, miner, blocktime) - bm.MineBlocks() - - // Send some funds to register the second node - fullNodeAddr2, err := fullNode2.WalletNew(ctx, types.KTSecp256k1) - if err != nil { - t.Fatal(err) - } - - test.SendFunds(ctx, t, fullNode1, fullNodeAddr2, abi.NewTokenAmount(1e18)) - - // Get the first node's address - fullNodeAddr1, err := fullNode1.WalletDefaultAddress(ctx) - if err != nil { - t.Fatal(err) - } - - // Create mock CLI - return n, []address.Address{fullNodeAddr1, fullNodeAddr2} -} diff --git a/cli/test/util.go b/cli/test/util.go deleted file mode 100644 index e3930dc832a..00000000000 --- a/cli/test/util.go +++ /dev/null @@ -1,12 +0,0 @@ -package test - -import "github.com/ipfs/go-log/v2" - -func QuietMiningLogs() { - _ = log.SetLogLevel("miner", "ERROR") - _ = log.SetLogLevel("chainstore", "ERROR") - _ = log.SetLogLevel("chain", "ERROR") - _ = log.SetLogLevel("sub", "ERROR") - _ = log.SetLogLevel("storageminer", "ERROR") - _ = log.SetLogLevel("pubsub", "ERROR") -} diff --git a/cli/util.go b/cli/util.go index fb555e320f0..73668742def 100644 --- a/cli/util.go +++ b/cli/util.go @@ -3,19 +3,29 @@ package cli import ( "context" "fmt" + "os" "time" + "github.com/fatih/color" "github.com/hako/durafmt" "github.com/ipfs/go-cid" + "github.com/mattn/go-isatty" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" ) -func parseTipSet(ctx context.Context, api api.FullNode, vals []string) (*types.TipSet, error) { +// Set the global default, to be overridden by individual cli flags in order +func init() { + color.NoColor = os.Getenv("GOLOG_LOG_FMT") != "color" && + !isatty.IsTerminal(os.Stdout.Fd()) && + !isatty.IsCygwinTerminal(os.Stdout.Fd()) +} + +func parseTipSet(ctx context.Context, api v0api.FullNode, vals []string) (*types.TipSet, error) { var headers []*types.BlockHeader for _, c := range vals { blkc, err := cid.Decode(c) diff --git a/cli/util/api.go b/cli/util/api.go new file mode 100644 index 00000000000..ecd2e927f82 --- /dev/null +++ b/cli/util/api.go @@ -0,0 +1,298 @@ +package cliutil + +import ( + "context" + "fmt" + "net/http" + "net/url" + "os" + "os/signal" + "strings" + "syscall" + + "github.com/mitchellh/go-homedir" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-jsonrpc" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/client" + "github.com/filecoin-project/lotus/api/v0api" + "github.com/filecoin-project/lotus/api/v1api" + "github.com/filecoin-project/lotus/node/repo" +) + +const ( + metadataTraceContext = "traceContext" +) + +// The flag passed on the command line with the listen address of the API +// server (only used by the tests) +func flagForAPI(t repo.RepoType) string { + switch t { + case repo.FullNode: + return "api-url" + case repo.StorageMiner: + return "miner-api-url" + case repo.Worker: + return "worker-api-url" + default: + panic(fmt.Sprintf("Unknown repo type: %v", t)) + } +} + +func flagForRepo(t repo.RepoType) string { + switch t { + case repo.FullNode: + return "repo" + case repo.StorageMiner: + return "miner-repo" + case repo.Worker: + return "worker-repo" + default: + panic(fmt.Sprintf("Unknown repo type: %v", t)) + } +} + +func EnvForRepo(t repo.RepoType) string { + switch t { + case repo.FullNode: + return "FULLNODE_API_INFO" + case repo.StorageMiner: + return "MINER_API_INFO" + case repo.Worker: + return "WORKER_API_INFO" + default: + panic(fmt.Sprintf("Unknown repo type: %v", t)) + } +} + +// TODO remove after deprecation period +func envForRepoDeprecation(t repo.RepoType) string { + switch t { + case repo.FullNode: + return "FULLNODE_API_INFO" + case repo.StorageMiner: + return "STORAGE_API_INFO" + case repo.Worker: + return "WORKER_API_INFO" + default: + panic(fmt.Sprintf("Unknown repo type: %v", t)) + } +} + +func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (APIInfo, error) { + // Check if there was a flag passed with the listen address of the API + // server (only used by the tests) + apiFlag := flagForAPI(t) + if ctx.IsSet(apiFlag) { + strma := ctx.String(apiFlag) + strma = strings.TrimSpace(strma) + + return APIInfo{Addr: strma}, nil + } + + envKey := EnvForRepo(t) + env, ok := os.LookupEnv(envKey) + if !ok { + // TODO remove after deprecation period + envKey = envForRepoDeprecation(t) + env, ok = os.LookupEnv(envKey) + if ok { + log.Warnf("Use deprecation env(%s) value, please use env(%s) instead.", envKey, EnvForRepo(t)) + } + } + if ok { + return ParseApiInfo(env), nil + } + + repoFlag := flagForRepo(t) + + p, err := homedir.Expand(ctx.String(repoFlag)) + if err != nil { + return APIInfo{}, xerrors.Errorf("could not expand home dir (%s): %w", repoFlag, err) + } + + r, err := repo.NewFS(p) + if err != nil { + return APIInfo{}, xerrors.Errorf("could not open repo at path: %s; %w", p, err) + } + + ma, err := r.APIEndpoint() + if err != nil { + return APIInfo{}, xerrors.Errorf("could not get api endpoint: %w", err) + } + + token, err := r.APIToken() + if err != nil { + log.Warnf("Couldn't load CLI token, capabilities may be limited: %v", err) + } + + return APIInfo{ + Addr: ma.String(), + Token: token, + }, nil +} + +func GetRawAPI(ctx *cli.Context, t repo.RepoType, version string) (string, http.Header, error) { + ainfo, err := GetAPIInfo(ctx, t) + if err != nil { + return "", nil, xerrors.Errorf("could not get API info: %w", err) + } + + addr, err := ainfo.DialArgs(version) + if err != nil { + return "", nil, xerrors.Errorf("could not get DialArgs: %w", err) + } + + return addr, ainfo.AuthHeader(), nil +} + +func GetAPI(ctx *cli.Context) (api.CommonNet, jsonrpc.ClientCloser, error) { + ti, ok := ctx.App.Metadata["repoType"] + if !ok { + log.Errorf("unknown repo type, are you sure you want to use GetAPI?") + ti = repo.FullNode + } + t, ok := ti.(repo.RepoType) + if !ok { + log.Errorf("repoType type does not match the type of repo.RepoType") + } + + if tn, ok := ctx.App.Metadata["testnode-storage"]; ok { + return tn.(api.StorageMiner), func() {}, nil + } + if tn, ok := ctx.App.Metadata["testnode-full"]; ok { + return tn.(api.FullNode), func() {}, nil + } + + addr, headers, err := GetRawAPI(ctx, t, "v0") + if err != nil { + return nil, nil, err + } + + return client.NewCommonRPCV0(ctx.Context, addr, headers) +} + +func GetFullNodeAPI(ctx *cli.Context) (v0api.FullNode, jsonrpc.ClientCloser, error) { + if tn, ok := ctx.App.Metadata["testnode-full"]; ok { + return &v0api.WrapperV1Full{FullNode: tn.(v1api.FullNode)}, func() {}, nil + } + + addr, headers, err := GetRawAPI(ctx, repo.FullNode, "v0") + if err != nil { + return nil, nil, err + } + + return client.NewFullNodeRPCV0(ctx.Context, addr, headers) +} + +func GetFullNodeAPIV1(ctx *cli.Context) (v1api.FullNode, jsonrpc.ClientCloser, error) { + if tn, ok := ctx.App.Metadata["testnode-full"]; ok { + return tn.(v1api.FullNode), func() {}, nil + } + + addr, headers, err := GetRawAPI(ctx, repo.FullNode, "v1") + if err != nil { + return nil, nil, err + } + + return client.NewFullNodeRPCV1(ctx.Context, addr, headers) +} + +type GetStorageMinerOptions struct { + PreferHttp bool +} + +type GetStorageMinerOption func(*GetStorageMinerOptions) + +func StorageMinerUseHttp(opts *GetStorageMinerOptions) { + opts.PreferHttp = true +} + +func GetStorageMinerAPI(ctx *cli.Context, opts ...GetStorageMinerOption) (api.StorageMiner, jsonrpc.ClientCloser, error) { + var options GetStorageMinerOptions + for _, opt := range opts { + opt(&options) + } + + if tn, ok := ctx.App.Metadata["testnode-storage"]; ok { + return tn.(api.StorageMiner), func() {}, nil + } + + addr, headers, err := GetRawAPI(ctx, repo.StorageMiner, "v0") + if err != nil { + return nil, nil, err + } + + if options.PreferHttp { + u, err := url.Parse(addr) + if err != nil { + return nil, nil, xerrors.Errorf("parsing miner api URL: %w", err) + } + + switch u.Scheme { + case "ws": + u.Scheme = "http" + case "wss": + u.Scheme = "https" + } + + addr = u.String() + } + + return client.NewStorageMinerRPCV0(ctx.Context, addr, headers) +} + +func GetWorkerAPI(ctx *cli.Context) (api.Worker, jsonrpc.ClientCloser, error) { + addr, headers, err := GetRawAPI(ctx, repo.Worker, "v0") + if err != nil { + return nil, nil, err + } + + return client.NewWorkerRPCV0(ctx.Context, addr, headers) +} + +func GetGatewayAPI(ctx *cli.Context) (api.Gateway, jsonrpc.ClientCloser, error) { + addr, headers, err := GetRawAPI(ctx, repo.FullNode, "v1") + if err != nil { + return nil, nil, err + } + + return client.NewGatewayRPCV1(ctx.Context, addr, headers) +} + +func GetGatewayAPIV0(ctx *cli.Context) (v0api.Gateway, jsonrpc.ClientCloser, error) { + addr, headers, err := GetRawAPI(ctx, repo.FullNode, "v0") + if err != nil { + return nil, nil, err + } + + return client.NewGatewayRPCV0(ctx.Context, addr, headers) +} + +func DaemonContext(cctx *cli.Context) context.Context { + if mtCtx, ok := cctx.App.Metadata[metadataTraceContext]; ok { + return mtCtx.(context.Context) + } + + return context.Background() +} + +// ReqContext returns context for cli execution. Calling it for the first time +// installs SIGTERM handler that will close returned context. +// Not safe for concurrent execution. +func ReqContext(cctx *cli.Context) context.Context { + tCtx := DaemonContext(cctx) + + ctx, done := context.WithCancel(tCtx) + sigChan := make(chan os.Signal, 2) + go func() { + <-sigChan + done() + }() + signal.Notify(sigChan, syscall.SIGTERM, syscall.SIGINT, syscall.SIGHUP) + + return ctx +} diff --git a/cli/util/apiinfo.go b/cli/util/apiinfo.go index 1f9a837697a..41ca18c6104 100644 --- a/cli/util/apiinfo.go +++ b/cli/util/apiinfo.go @@ -36,7 +36,7 @@ func ParseApiInfo(s string) APIInfo { } } -func (a APIInfo) DialArgs() (string, error) { +func (a APIInfo) DialArgs(version string) (string, error) { ma, err := multiaddr.NewMultiaddr(a.Addr) if err == nil { _, addr, err := manet.DialArgs(ma) @@ -44,14 +44,14 @@ func (a APIInfo) DialArgs() (string, error) { return "", err } - return "ws://" + addr + "/rpc/v0", nil + return "ws://" + addr + "/rpc/" + version, nil } _, err = url.Parse(a.Addr) if err != nil { return "", err } - return a.Addr + "/rpc/v0", nil + return a.Addr + "/rpc/" + version, nil } func (a APIInfo) Host() (string, error) { diff --git a/cli/wait.go b/cli/wait.go index ca8cdce3f56..ea897d5adb3 100644 --- a/cli/wait.go +++ b/cli/wait.go @@ -7,12 +7,12 @@ import ( "github.com/urfave/cli/v2" ) -var waitApiCmd = &cli.Command{ +var WaitApiCmd = &cli.Command{ Name: "wait-api", Usage: "Wait for lotus api to come online", Action: func(cctx *cli.Context) error { for i := 0; i < 30; i++ { - api, closer, err := GetFullNodeAPI(cctx) + api, closer, err := GetAPI(cctx) if err != nil { fmt.Printf("Not online yet... (%s)\n", err) time.Sleep(time.Second) diff --git a/cli/wallet.go b/cli/wallet.go index f6368cbfa44..802d85702e8 100644 --- a/cli/wallet.go +++ b/cli/wallet.go @@ -16,11 +16,8 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/v2/actors/builtin" - "github.com/filecoin-project/lotus/chain/actors" - types "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/lib/tablewriter" ) @@ -512,6 +509,7 @@ var walletMarket = &cli.Command{ Usage: "Interact with market balances", Subcommands: []*cli.Command{ walletMarketWithdraw, + walletMarketAdd, }, } @@ -521,13 +519,13 @@ var walletMarketWithdraw = &cli.Command{ ArgsUsage: "[amount (FIL) optional, otherwise will withdraw max available]", Flags: []cli.Flag{ &cli.StringFlag{ - Name: "from", - Usage: "Specify address to withdraw funds from, otherwise it will use the default wallet address", - Aliases: []string{"f"}, + Name: "wallet", + Usage: "Specify address to withdraw funds to, otherwise it will use the default wallet address", + Aliases: []string{"w"}, }, &cli.StringFlag{ Name: "address", - Usage: "Market address to withdraw from (account or miner actor address, defaults to --from address)", + Usage: "Market address to withdraw from (account or miner actor address, defaults to --wallet address)", Aliases: []string{"a"}, }, }, @@ -539,20 +537,20 @@ var walletMarketWithdraw = &cli.Command{ defer closer() ctx := ReqContext(cctx) - var from address.Address - if cctx.String("from") != "" { - from, err = address.NewFromString(cctx.String("from")) + var wallet address.Address + if cctx.String("wallet") != "" { + wallet, err = address.NewFromString(cctx.String("wallet")) if err != nil { return xerrors.Errorf("parsing from address: %w", err) } } else { - from, err = api.WalletDefaultAddress(ctx) + wallet, err = api.WalletDefaultAddress(ctx) if err != nil { return xerrors.Errorf("getting default wallet address: %w", err) } } - addr := from + addr := wallet if cctx.String("address") != "" { addr, err = address.NewFromString(cctx.String("address")) if err != nil { @@ -560,14 +558,34 @@ var walletMarketWithdraw = &cli.Command{ } } + // Work out if there are enough unreserved, unlocked funds to withdraw bal, err := api.StateMarketBalance(ctx, addr, types.EmptyTSK) if err != nil { return xerrors.Errorf("getting market balance for address %s: %w", addr.String(), err) } - avail := big.Subtract(bal.Escrow, bal.Locked) + reserved, err := api.MarketGetReserved(ctx, addr) + if err != nil { + return xerrors.Errorf("getting market reserved amount for address %s: %w", addr.String(), err) + } + + avail := big.Subtract(big.Subtract(bal.Escrow, bal.Locked), reserved) + + notEnoughErr := func(msg string) error { + return xerrors.Errorf("%s; "+ + "available (%s) = escrow (%s) - locked (%s) - reserved (%s)", + msg, types.FIL(avail), types.FIL(bal.Escrow), types.FIL(bal.Locked), types.FIL(reserved)) + } + + if avail.IsZero() || avail.LessThan(big.Zero()) { + avail = big.Zero() + return notEnoughErr("no funds available to withdraw") + } + + // Default to withdrawing all available funds amt := avail + // If there was an amount argument, only withdraw that amount if cctx.Args().Present() { f, err := types.ParseFIL(cctx.Args().First()) if err != nil { @@ -577,35 +595,95 @@ var walletMarketWithdraw = &cli.Command{ amt = abi.TokenAmount(f) } + // Check the amount is positive + if amt.IsZero() || amt.LessThan(big.Zero()) { + return xerrors.Errorf("amount must be > 0") + } + + // Check there are enough available funds if amt.GreaterThan(avail) { - return xerrors.Errorf("can't withdraw more funds than available; requested: %s; available: %s", types.FIL(amt), types.FIL(avail)) + msg := fmt.Sprintf("can't withdraw more funds than available; requested: %s", types.FIL(amt)) + return notEnoughErr(msg) } - if avail.IsZero() { - return xerrors.Errorf("zero unlocked funds available to withdraw") + fmt.Printf("Submitting WithdrawBalance message for amount %s for address %s\n", types.FIL(amt), wallet.String()) + smsg, err := api.MarketWithdraw(ctx, wallet, addr, amt) + if err != nil { + return xerrors.Errorf("fund manager withdraw error: %w", err) } - params, err := actors.SerializeParams(&market.WithdrawBalanceParams{ - ProviderOrClientAddress: addr, - Amount: amt, - }) + fmt.Printf("WithdrawBalance message cid: %s\n", smsg) + + return nil + }, +} + +var walletMarketAdd = &cli.Command{ + Name: "add", + Usage: "Add funds to the Storage Market Actor", + ArgsUsage: "", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "from", + Usage: "Specify address to move funds from, otherwise it will use the default wallet address", + Aliases: []string{"f"}, + }, + &cli.StringFlag{ + Name: "address", + Usage: "Market address to move funds to (account or miner actor address, defaults to --from address)", + Aliases: []string{"a"}, + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return xerrors.Errorf("getting node API: %w", err) + } + defer closer() + ctx := ReqContext(cctx) + + // Get amount param + if !cctx.Args().Present() { + return fmt.Errorf("must pass amount to add") + } + f, err := types.ParseFIL(cctx.Args().First()) if err != nil { - return xerrors.Errorf("serializing params: %w", err) + return xerrors.Errorf("parsing 'amount' argument: %w", err) + } + + amt := abi.TokenAmount(f) + + // Get from param + var from address.Address + if cctx.String("from") != "" { + from, err = address.NewFromString(cctx.String("from")) + if err != nil { + return xerrors.Errorf("parsing from address: %w", err) + } + } else { + from, err = api.WalletDefaultAddress(ctx) + if err != nil { + return xerrors.Errorf("getting default wallet address: %w", err) + } + } + + // Get address param + addr := from + if cctx.String("address") != "" { + addr, err = address.NewFromString(cctx.String("address")) + if err != nil { + return xerrors.Errorf("parsing market address: %w", err) + } } - fmt.Printf("Submitting WithdrawBalance message for amount %s for address %s\n", types.FIL(amt), from.String()) - smsg, err := api.MpoolPushMessage(ctx, &types.Message{ - To: builtin.StorageMarketActorAddr, - From: from, - Value: types.NewInt(0), - Method: builtin.MethodsMarket.WithdrawBalance, - Params: params, - }, nil) + // Add balance to market actor + fmt.Printf("Submitting Add Balance message for amount %s for address %s\n", types.FIL(amt), addr) + smsg, err := api.MarketAddBalance(ctx, from, addr, amt) if err != nil { - return xerrors.Errorf("submitting WithdrawBalance message: %w", err) + return xerrors.Errorf("add balance error: %w", err) } - fmt.Printf("WithdrawBalance message cid: %s\n", smsg.Cid()) + fmt.Printf("AddBalance message cid: %s\n", smsg) return nil }, diff --git a/cmd/chain-noise/main.go b/cmd/chain-noise/main.go index 81586e1b206..8106ce592b2 100644 --- a/cmd/chain-noise/main.go +++ b/cmd/chain-noise/main.go @@ -8,7 +8,7 @@ import ( "time" "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" @@ -27,6 +27,16 @@ func main() { Hidden: true, Value: "~/.lotus", // TODO: Consider XDG_DATA_HOME }, + &cli.IntFlag{ + Name: "limit", + Usage: "spam transaction count limit, <= 0 is no limit", + Value: 0, + }, + &cli.IntFlag{ + Name: "rate", + Usage: "spam transaction rate, count per second", + Value: 5, + }, }, Commands: []*cli.Command{runCmd}, } @@ -52,11 +62,17 @@ var runCmd = &cli.Command{ defer closer() ctx := lcli.ReqContext(cctx) - return sendSmallFundsTxs(ctx, api, addr, 5) + rate := cctx.Int("rate") + if rate <= 0 { + rate = 5 + } + limit := cctx.Int("limit") + + return sendSmallFundsTxs(ctx, api, addr, rate, limit) }, } -func sendSmallFundsTxs(ctx context.Context, api api.FullNode, from address.Address, rate int) error { +func sendSmallFundsTxs(ctx context.Context, api v0api.FullNode, from address.Address, rate, limit int) error { var sendSet []address.Address for i := 0; i < 20; i++ { naddr, err := api.WalletNew(ctx, types.KTSecp256k1) @@ -66,9 +82,14 @@ func sendSmallFundsTxs(ctx context.Context, api api.FullNode, from address.Addre sendSet = append(sendSet, naddr) } + count := limit tick := build.Clock.Ticker(time.Second / time.Duration(rate)) for { + if count <= 0 && limit > 0 { + fmt.Printf("%d messages sent.\n", limit) + return nil + } select { case <-tick.C: msg := &types.Message{ @@ -81,6 +102,7 @@ func sendSmallFundsTxs(ctx context.Context, api api.FullNode, from address.Addre if err != nil { return err } + count-- fmt.Println("Message sent: ", smsg.Cid()) case <-ctx.Done(): return nil diff --git a/cmd/lotus-bench/caching_verifier.go b/cmd/lotus-bench/caching_verifier.go index 5b434c762a3..f4cc0f83741 100644 --- a/cmd/lotus-bench/caching_verifier.go +++ b/cmd/lotus-bench/caching_verifier.go @@ -8,6 +8,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/ipfs/go-datastore" "github.com/minio/blake2b-simd" cbg "github.com/whyrusleeping/cbor-gen" @@ -96,4 +97,8 @@ func (cv *cachingVerifier) GenerateWinningPoStSectorChallenge(ctx context.Contex return cv.backend.GenerateWinningPoStSectorChallenge(ctx, proofType, a, rnd, u) } +func (cv cachingVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) { + return cv.backend.VerifyAggregateSeals(aggregate) +} + var _ ffiwrapper.Verifier = (*cachingVerifier)(nil) diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go index acbf9ebdcbf..4b464bebeb1 100644 --- a/cmd/lotus-bench/import.go +++ b/cmd/lotus-bench/import.go @@ -16,21 +16,29 @@ import ( "sort" "time" + ocprom "contrib.go.opencensus.io/exporter/prometheus" "github.com/cockroachdb/pebble" "github.com/cockroachdb/pebble/bloom" + "github.com/ipfs/go-cid" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/blockstore" + badgerbs "github.com/filecoin-project/lotus/blockstore/badger" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/lib/blockstore" + lcli "github.com/filecoin-project/lotus/cli" _ "github.com/filecoin-project/lotus/lib/sigs/bls" _ "github.com/filecoin-project/lotus/lib/sigs/secp" + "github.com/filecoin-project/lotus/node/repo" + + "github.com/filecoin-project/go-state-types/abi" metricsprometheus "github.com/ipfs/go-metrics-prometheus" "github.com/ipld/go-car" - "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" bdg "github.com/dgraph-io/badger/v2" @@ -51,14 +59,30 @@ type TipSetExec struct { var importBenchCmd = &cli.Command{ Name: "import", - Usage: "benchmark chain import and validation", + Usage: "Benchmark chain import and validation", Subcommands: []*cli.Command{ importAnalyzeCmd, }, Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "start-tipset", + Usage: "start validation at the given tipset key; in format cid1,cid2,cid3...", + }, + &cli.StringFlag{ + Name: "end-tipset", + Usage: "halt validation at the given tipset key; in format cid1,cid2,cid3...", + }, + &cli.StringFlag{ + Name: "genesis-tipset", + Usage: "genesis tipset key; in format cid1,cid2,cid3...", + }, + &cli.Int64Flag{ + Name: "start-height", + Usage: "start validation at given height; beware that chain traversal by height is very slow", + }, &cli.Int64Flag{ - Name: "height", - Usage: "halt validation after given height", + Name: "end-height", + Usage: "halt validation after given height; beware that chain traversal by height is very slow", }, &cli.IntFlag{ Name: "batch-seal-verify-threads", @@ -86,32 +110,52 @@ var importBenchCmd = &cli.Command{ Name: "global-profile", Value: true, }, - &cli.Int64Flag{ - Name: "start-at", - }, &cli.BoolFlag{ Name: "only-import", }, &cli.BoolFlag{ Name: "use-pebble", }, + &cli.BoolFlag{ + Name: "use-native-badger", + }, + &cli.StringFlag{ + Name: "car", + Usage: "path to CAR file; required for import; on validation, either " + + "a CAR path or the --head flag are required", + }, + &cli.StringFlag{ + Name: "head", + Usage: "tipset key of the head, useful when benchmarking validation " + + "on an existing chain store, where a CAR is not available; " + + "if both --car and --head are provided, --head takes precedence " + + "over the CAR root; the format is cid1,cid2,cid3...", + }, }, Action: func(cctx *cli.Context) error { metricsprometheus.Inject() //nolint:errcheck vm.BatchSealVerifyParallelism = cctx.Int("batch-seal-verify-threads") - if !cctx.Args().Present() { - fmt.Println("must pass car file of chain to benchmark importing") - return nil - } - - cfi, err := os.Open(cctx.Args().First()) - if err != nil { - return err - } - defer cfi.Close() //nolint:errcheck // read only file go func() { - http.Handle("/debug/metrics/prometheus", promhttp.Handler()) + // Prometheus globals are exposed as interfaces, but the prometheus + // OpenCensus exporter expects a concrete *Registry. The concrete type of + // the globals are actually *Registry, so we downcast them, staying + // defensive in case things change under the hood. + registry, ok := prometheus.DefaultRegisterer.(*prometheus.Registry) + if !ok { + log.Warnf("failed to export default prometheus registry; some metrics will be unavailable; unexpected type: %T", prometheus.DefaultRegisterer) + return + } + exporter, err := ocprom.NewExporter(ocprom.Options{ + Registry: registry, + Namespace: "lotus", + }) + if err != nil { + log.Fatalf("could not create the prometheus stats exporter: %v", err) + } + + http.Handle("/debug/metrics", exporter) + http.ListenAndServe("localhost:6060", nil) //nolint:errcheck }() @@ -126,17 +170,17 @@ var importBenchCmd = &cli.Command{ tdir = tmp } - bdgOpt := badger.DefaultOptions - bdgOpt.GcInterval = 0 - bdgOpt.Options = bdg.DefaultOptions("") - bdgOpt.Options.SyncWrites = false - bdgOpt.Options.Truncate = true - bdgOpt.Options.DetectConflicts = false + var ( + ds datastore.Batching + bs blockstore.Blockstore + err error + ) - var bds datastore.Batching - if cctx.Bool("use-pebble") { + switch { + case cctx.Bool("use-pebble"): + log.Info("using pebble") cache := 512 - bds, err = pebbleds.NewDatastore(tdir, &pebble.Options{ + ds, err = pebbleds.NewDatastore(tdir, &pebble.Options{ // Pebble has a single combined cache area and the write // buffers are taken from this too. Assign all available // memory allowance for cache. @@ -155,30 +199,45 @@ var importBenchCmd = &cli.Command{ }, Logger: log, }) - } else { - bds, err = badger.NewDatastore(tdir, &bdgOpt) + + case cctx.Bool("use-native-badger"): + log.Info("using native badger") + var opts badgerbs.Options + if opts, err = repo.BadgerBlockstoreOptions(repo.UniversalBlockstore, tdir, false); err != nil { + return err + } + opts.SyncWrites = false + bs, err = badgerbs.Open(opts) + + default: // legacy badger via datastore. + log.Info("using legacy badger") + bdgOpt := badger.DefaultOptions + bdgOpt.GcInterval = 0 + bdgOpt.Options = bdg.DefaultOptions("") + bdgOpt.Options.SyncWrites = false + bdgOpt.Options.Truncate = true + bdgOpt.Options.DetectConflicts = false + + ds, err = badger.NewDatastore(tdir, &bdgOpt) } + if err != nil { return err } - defer bds.Close() //nolint:errcheck - bds = measure.New("dsbench", bds) - - bs := blockstore.NewBlockstore(bds) - cacheOpts := blockstore.DefaultCacheOpts() - cacheOpts.HasBloomFilterSize = 0 + if ds != nil { + ds = measure.New("dsbench", ds) + defer ds.Close() //nolint:errcheck + bs = blockstore.FromDatastore(ds) + } - cbs, err := blockstore.CachedBlockstore(context.TODO(), bs, cacheOpts) - if err != nil { - return err + if c, ok := bs.(io.Closer); ok { + defer c.Close() //nolint:errcheck } - bs = cbs - ds := datastore.NewMapDatastore() var verifier ffiwrapper.Verifier = ffiwrapper.ProofVerifier if cctx.IsSet("syscall-cache") { - scds, err := badger.NewDatastore(cctx.String("syscall-cache"), &bdgOpt) + scds, err := badger.NewDatastore(cctx.String("syscall-cache"), &badger.DefaultOptions) if err != nil { return xerrors.Errorf("opening syscall-cache datastore: %w", err) } @@ -193,29 +252,111 @@ var importBenchCmd = &cli.Command{ return nil } - cs := store.NewChainStore(bs, ds, vm.Syscalls(verifier), nil) + metadataDs := datastore.NewMapDatastore() + cs := store.NewChainStore(bs, bs, metadataDs, vm.Syscalls(verifier), nil) + defer cs.Close() //nolint:errcheck + stm := stmgr.NewStateManager(cs) - if cctx.Bool("global-profile") { - prof, err := os.Create("import-bench.prof") + var carFile *os.File + // open the CAR file if one is provided. + if path := cctx.String("car"); path != "" { + var err error + if carFile, err = os.Open(path); err != nil { + return xerrors.Errorf("failed to open provided CAR file: %w", err) + } + } + + startTime := time.Now() + + // register a gauge that reports how long since the measurable + // operation began. + promauto.NewGaugeFunc(prometheus.GaugeOpts{ + Name: "lotus_bench_time_taken_secs", + }, func() float64 { + return time.Since(startTime).Seconds() + }) + + defer func() { + end := time.Now().Format(time.RFC3339) + + resp, err := http.Get("http://localhost:6060/debug/metrics") if err != nil { - return err + log.Warnf("failed to scape prometheus: %s", err) } - defer prof.Close() //nolint:errcheck - if err := pprof.StartCPUProfile(prof); err != nil { - return err + metricsfi, err := os.Create("bench.metrics") + if err != nil { + log.Warnf("failed to write prometheus data: %s", err) } - } + + _, _ = io.Copy(metricsfi, resp.Body) //nolint:errcheck + _ = metricsfi.Close() //nolint:errcheck + + writeProfile := func(name string) { + if file, err := os.Create(fmt.Sprintf("%s.%s.%s.pprof", name, startTime.Format(time.RFC3339), end)); err == nil { + if err := pprof.Lookup(name).WriteTo(file, 0); err != nil { + log.Warnf("failed to write %s pprof: %s", name, err) + } + _ = file.Close() + } else { + log.Warnf("failed to create %s pprof file: %s", name, err) + } + } + + writeProfile("heap") + writeProfile("allocs") + }() var head *types.TipSet + // --- IMPORT --- if !cctx.Bool("no-import") { - head, err = cs.Import(cfi) + if cctx.Bool("global-profile") { + prof, err := os.Create("bench.import.pprof") + if err != nil { + return err + } + defer prof.Close() //nolint:errcheck + + if err := pprof.StartCPUProfile(prof); err != nil { + return err + } + } + + // import is NOT suppressed; do it. + if carFile == nil { // a CAR is compulsory for the import. + return fmt.Errorf("no CAR file provided for import") + } + + head, err = cs.Import(carFile) if err != nil { return err } - } else { - cr, err := car.NewCarReader(cfi) + + pprof.StopCPUProfile() + } + + if cctx.Bool("only-import") { + return nil + } + + // --- VALIDATION --- + // + // we are now preparing for the validation benchmark. + // a HEAD needs to be set; --head takes precedence over the root + // of the CAR, if both are provided. + if h := cctx.String("head"); h != "" { + cids, err := lcli.ParseTipSetString(h) + if err != nil { + return xerrors.Errorf("failed to parse head tipset key: %w", err) + } + + head, err = cs.LoadTipSet(types.NewTipSetKey(cids...)) + if err != nil { + return err + } + } else if carFile != nil && head == nil { + cr, err := car.NewCarReader(carFile) if err != nil { return err } @@ -223,59 +364,99 @@ var importBenchCmd = &cli.Command{ if err != nil { return err } + } else if h == "" && carFile == nil { + return xerrors.Errorf("neither --car nor --head flags supplied") } - if cctx.Bool("only-import") { - return nil + log.Infof("chain head is tipset: %s", head.Key()) + + var genesis *types.TipSet + log.Infof("getting genesis block") + if tsk := cctx.String("genesis-tipset"); tsk != "" { + var cids []cid.Cid + if cids, err = lcli.ParseTipSetString(tsk); err != nil { + return xerrors.Errorf("failed to parse genesis tipset key: %w", err) + } + genesis, err = cs.LoadTipSet(types.NewTipSetKey(cids...)) + } else { + log.Warnf("getting genesis by height; this will be slow; pass in the genesis tipset through --genesis-tipset") + // fallback to the slow path of walking the chain. + genesis, err = cs.GetTipsetByHeight(context.TODO(), 0, head, true) } - gb, err := cs.GetTipsetByHeight(context.TODO(), 0, head, true) if err != nil { return err } - err = cs.SetGenesis(gb.Blocks()[0]) - if err != nil { + if err = cs.SetGenesis(genesis.Blocks()[0]); err != nil { return err } - startEpoch := abi.ChainEpoch(1) - if cctx.IsSet("start-at") { - startEpoch = abi.ChainEpoch(cctx.Int64("start-at")) - start, err := cs.GetTipsetByHeight(context.TODO(), abi.ChainEpoch(cctx.Int64("start-at")), head, true) - if err != nil { - return err + // Resolve the end tipset, falling back to head if not provided. + end := head + if tsk := cctx.String("end-tipset"); tsk != "" { + var cids []cid.Cid + if cids, err = lcli.ParseTipSetString(tsk); err != nil { + return xerrors.Errorf("failed to end genesis tipset key: %w", err) } + end, err = cs.LoadTipSet(types.NewTipSetKey(cids...)) + } else if h := cctx.Int64("end-height"); h != 0 { + log.Infof("getting end tipset at height %d...", h) + end, err = cs.GetTipsetByHeight(context.TODO(), abi.ChainEpoch(h), head, true) + } - err = cs.SetHead(start) - if err != nil { - return err + if err != nil { + return err + } + + // Resolve the start tipset, if provided; otherwise, fallback to + // height 1 for a start point. + var ( + startEpoch = abi.ChainEpoch(1) + start *types.TipSet + ) + + if tsk := cctx.String("start-tipset"); tsk != "" { + var cids []cid.Cid + if cids, err = lcli.ParseTipSetString(tsk); err != nil { + return xerrors.Errorf("failed to start genesis tipset key: %w", err) } + start, err = cs.LoadTipSet(types.NewTipSetKey(cids...)) + } else if h := cctx.Int64("start-height"); h != 0 { + log.Infof("getting start tipset at height %d...", h) + // lookback from the end tipset (which falls back to head if not supplied). + start, err = cs.GetTipsetByHeight(context.TODO(), abi.ChainEpoch(h), end, true) } - if h := cctx.Int64("height"); h != 0 { - tsh, err := cs.GetTipsetByHeight(context.TODO(), abi.ChainEpoch(h), head, true) - if err != nil { + if err != nil { + return err + } + + if start != nil { + startEpoch = start.Height() + if err := cs.ForceHeadSilent(context.Background(), start); err != nil { + // if err := cs.SetHead(start); err != nil { return err } - head = tsh } - ts := head - tschain := []*types.TipSet{ts} - for ts.Height() > startEpoch { + inverseChain := append(make([]*types.TipSet, 0, end.Height()), end) + for ts := end; ts.Height() > startEpoch; { + if h := ts.Height(); h%100 == 0 { + log.Infof("walking back the chain; loaded tipset at height %d...", h) + } next, err := cs.LoadTipSet(ts.Parents()) if err != nil { return err } - tschain = append(tschain, next) + inverseChain = append(inverseChain, next) ts = next } var enc *json.Encoder if cctx.Bool("export-traces") { - ibj, err := os.Create("import-bench.json") + ibj, err := os.Create("bench.json") if err != nil { return err } @@ -284,8 +465,20 @@ var importBenchCmd = &cli.Command{ enc = json.NewEncoder(ibj) } - for i := len(tschain) - 1; i >= 1; i-- { - cur := tschain[i] + if cctx.Bool("global-profile") { + prof, err := os.Create("bench.validation.pprof") + if err != nil { + return err + } + defer prof.Close() //nolint:errcheck + + if err := pprof.StartCPUProfile(prof); err != nil { + return err + } + } + + for i := len(inverseChain) - 1; i >= 1; i-- { + cur := inverseChain[i] start := time.Now() log.Infof("computing state (height: %d, ts=%s)", cur.Height(), cur.Cids()) st, trace, err := stm.ExecutionTrace(context.TODO(), cur) @@ -304,7 +497,7 @@ var importBenchCmd = &cli.Command{ return xerrors.Errorf("failed to write out tipsetexec: %w", err) } } - if tschain[i-1].ParentState() != st { + if inverseChain[i-1].ParentState() != st { stripCallers(tse.Trace) lastTrace := tse.Trace d, err := json.MarshalIndent(lastTrace, "", " ") @@ -320,23 +513,7 @@ var importBenchCmd = &cli.Command{ pprof.StopCPUProfile() - if true { - resp, err := http.Get("http://localhost:6060/debug/metrics/prometheus") - if err != nil { - return err - } - - metricsfi, err := os.Create("import-bench.metrics") - if err != nil { - return err - } - - io.Copy(metricsfi, resp.Body) //nolint:errcheck - metricsfi.Close() //nolint:errcheck - } - return nil - }, } diff --git a/cmd/lotus-bench/main.go b/cmd/lotus-bench/main.go index 1af5c1c62f5..0b8ec6fe3fc 100644 --- a/cmd/lotus-bench/main.go +++ b/cmd/lotus-bench/main.go @@ -31,7 +31,7 @@ import ( lapi "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/genesis" ) @@ -39,8 +39,12 @@ import ( var log = logging.Logger("lotus-bench") type BenchResults struct { - SectorSize abi.SectorSize + EnvVar map[string]string + SectorSize abi.SectorSize + SectorNumber int + + SealingSum SealingResult SealingResults []SealingResult PostGenerateCandidates time.Duration @@ -55,6 +59,26 @@ type BenchResults struct { VerifyWindowPostHot time.Duration } +func (bo *BenchResults) SumSealingTime() error { + if len(bo.SealingResults) <= 0 { + return xerrors.Errorf("BenchResults SealingResults len <= 0") + } + if len(bo.SealingResults) != bo.SectorNumber { + return xerrors.Errorf("BenchResults SealingResults len(%d) != bo.SectorNumber(%d)", len(bo.SealingResults), bo.SectorNumber) + } + + for _, sealing := range bo.SealingResults { + bo.SealingSum.AddPiece += sealing.AddPiece + bo.SealingSum.PreCommit1 += sealing.PreCommit1 + bo.SealingSum.PreCommit2 += sealing.PreCommit2 + bo.SealingSum.Commit1 += sealing.Commit1 + bo.SealingSum.Commit2 += sealing.Commit2 + bo.SealingSum.Verify += sealing.Verify + bo.SealingSum.Unseal += sealing.Unseal + } + return nil +} + type SealingResult struct { AddPiece time.Duration PreCommit1 time.Duration @@ -94,12 +118,13 @@ func main() { } var sealBenchCmd = &cli.Command{ - Name: "sealing", + Name: "sealing", + Usage: "Benchmark seal and winning post and window post", Flags: []cli.Flag{ &cli.StringFlag{ Name: "storage-dir", Value: "~/.lotus-bench", - Usage: "Path to the storage directory that will store sectors long term", + Usage: "path to the storage directory that will store sectors long term", }, &cli.StringFlag{ Name: "sector-size", @@ -131,22 +156,26 @@ var sealBenchCmd = &cli.Command{ Name: "skip-unseal", Usage: "skip the unseal portion of the benchmark", }, + &cli.StringFlag{ + Name: "ticket-preimage", + Usage: "ticket random", + }, &cli.StringFlag{ Name: "save-commit2-input", - Usage: "Save commit2 input to a file", + Usage: "save commit2 input to a file", }, &cli.IntFlag{ Name: "num-sectors", + Usage: "select number of sectors to seal", Value: 1, }, &cli.IntFlag{ Name: "parallel", + Usage: "num run in parallel", Value: 1, }, }, Action: func(c *cli.Context) error { - policy.AddSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) - if c.Bool("no-gpu") { err := os.Setenv("BELLMAN_NO_GPU", "1") if err != nil { @@ -211,18 +240,10 @@ var sealBenchCmd = &cli.Command{ } sectorSize := abi.SectorSize(sectorSizeInt) - spt, err := ffiwrapper.SealProofTypeFromSectorSize(sectorSize) - if err != nil { - return err - } - - cfg := &ffiwrapper.Config{ - SealProofType: spt, - } - // Only fetch parameters if actually needed - if !c.Bool("skip-commit2") { - if err := paramfetch.GetParams(lcli.ReqContext(c), build.ParametersJSON(), uint64(sectorSize)); err != nil { + skipc2 := c.Bool("skip-commit2") + if !skipc2 { + if err := paramfetch.GetParams(lcli.ReqContext(c), build.ParametersJSON(), build.SrsJSON(), uint64(sectorSize)); err != nil { return xerrors.Errorf("getting params: %w", err) } } @@ -231,11 +252,13 @@ var sealBenchCmd = &cli.Command{ Root: sbdir, } - sb, err := ffiwrapper.New(sbfs, cfg) + sb, err := ffiwrapper.New(sbfs) if err != nil { return err } + sectorNumber := c.Int("num-sectors") + var sealTimings []SealingResult var sealedSectors []saproof2.SectorInfo @@ -246,18 +269,11 @@ var sealBenchCmd = &cli.Command{ PreCommit2: 1, Commit: 1, } - sealTimings, sealedSectors, err = runSeals(sb, sbfs, c.Int("num-sectors"), parCfg, mid, sectorSize, []byte(c.String("ticket-preimage")), c.String("save-commit2-input"), c.Bool("skip-commit2"), c.Bool("skip-unseal")) + sealTimings, sealedSectors, err = runSeals(sb, sbfs, sectorNumber, parCfg, mid, sectorSize, []byte(c.String("ticket-preimage")), c.String("save-commit2-input"), skipc2, c.Bool("skip-unseal")) if err != nil { return xerrors.Errorf("failed to run seals: %w", err) } - } - - beforePost := time.Now() - - var challenge [32]byte - rand.Read(challenge[:]) - - if robench != "" { + } else { // TODO: implement sbfs.List() and use that for all cases (preexisting sectorbuilder or not) // TODO: this assumes we only ever benchmark a preseal @@ -290,12 +306,21 @@ var sealBenchCmd = &cli.Command{ bo := BenchResults{ SectorSize: sectorSize, + SectorNumber: sectorNumber, SealingResults: sealTimings, } + if err := bo.SumSealingTime(); err != nil { + return err + } - if !c.Bool("skip-commit2") { + var challenge [32]byte + rand.Read(challenge[:]) + + beforePost := time.Now() + + if !skipc2 { log.Info("generating winning post candidates") - wipt, err := spt.RegisteredWinningPoStProof() + wipt, err := spt(sectorSize).RegisteredWinningPoStProof() if err != nil { return err } @@ -420,6 +445,15 @@ var sealBenchCmd = &cli.Command{ bo.VerifyWindowPostHot = verifyWindowpost2.Sub(verifyWindowpost1) } + bo.EnvVar = make(map[string]string) + for _, envKey := range []string{"BELLMAN_NO_GPU", "FIL_PROOFS_MAXIMIZE_CACHING", "FIL_PROOFS_USE_GPU_COLUMN_BUILDER", + "FIL_PROOFS_USE_GPU_TREE_BUILDER", "FIL_PROOFS_USE_MULTICORE_SDR", "BELLMAN_CUSTOM_GPU"} { + envValue, found := os.LookupEnv(envKey) + if found { + bo.EnvVar[envKey] = envValue + } + } + if c.Bool("json-out") { data, err := json.MarshalIndent(bo, "", " ") if err != nil { @@ -428,21 +462,25 @@ var sealBenchCmd = &cli.Command{ fmt.Println(string(data)) } else { - fmt.Printf("----\nresults (v27) (%d)\n", sectorSize) + fmt.Println("environment variable list:") + for envKey, envValue := range bo.EnvVar { + fmt.Printf("%s=%s\n", envKey, envValue) + } + fmt.Printf("----\nresults (v28) SectorSize:(%d), SectorNumber:(%d)\n", sectorSize, sectorNumber) if robench == "" { - fmt.Printf("seal: addPiece: %s (%s)\n", bo.SealingResults[0].AddPiece, bps(bo.SectorSize, bo.SealingResults[0].AddPiece)) // TODO: average across multiple sealings - fmt.Printf("seal: preCommit phase 1: %s (%s)\n", bo.SealingResults[0].PreCommit1, bps(bo.SectorSize, bo.SealingResults[0].PreCommit1)) - fmt.Printf("seal: preCommit phase 2: %s (%s)\n", bo.SealingResults[0].PreCommit2, bps(bo.SectorSize, bo.SealingResults[0].PreCommit2)) - fmt.Printf("seal: commit phase 1: %s (%s)\n", bo.SealingResults[0].Commit1, bps(bo.SectorSize, bo.SealingResults[0].Commit1)) - fmt.Printf("seal: commit phase 2: %s (%s)\n", bo.SealingResults[0].Commit2, bps(bo.SectorSize, bo.SealingResults[0].Commit2)) - fmt.Printf("seal: verify: %s\n", bo.SealingResults[0].Verify) + fmt.Printf("seal: addPiece: %s (%s)\n", bo.SealingSum.AddPiece, bps(bo.SectorSize, bo.SectorNumber, bo.SealingSum.AddPiece)) + fmt.Printf("seal: preCommit phase 1: %s (%s)\n", bo.SealingSum.PreCommit1, bps(bo.SectorSize, bo.SectorNumber, bo.SealingSum.PreCommit1)) + fmt.Printf("seal: preCommit phase 2: %s (%s)\n", bo.SealingSum.PreCommit2, bps(bo.SectorSize, bo.SectorNumber, bo.SealingSum.PreCommit2)) + fmt.Printf("seal: commit phase 1: %s (%s)\n", bo.SealingSum.Commit1, bps(bo.SectorSize, bo.SectorNumber, bo.SealingSum.Commit1)) + fmt.Printf("seal: commit phase 2: %s (%s)\n", bo.SealingSum.Commit2, bps(bo.SectorSize, bo.SectorNumber, bo.SealingSum.Commit2)) + fmt.Printf("seal: verify: %s\n", bo.SealingSum.Verify) if !c.Bool("skip-unseal") { - fmt.Printf("unseal: %s (%s)\n", bo.SealingResults[0].Unseal, bps(bo.SectorSize, bo.SealingResults[0].Unseal)) + fmt.Printf("unseal: %s (%s)\n", bo.SealingSum.Unseal, bps(bo.SectorSize, bo.SectorNumber, bo.SealingSum.Unseal)) } fmt.Println("") } - if !c.Bool("skip-commit2") { - fmt.Printf("generate candidates: %s (%s)\n", bo.PostGenerateCandidates, bps(bo.SectorSize*abi.SectorSize(len(bo.SealingResults)), bo.PostGenerateCandidates)) + if !skipc2 { + fmt.Printf("generate candidates: %s (%s)\n", bo.PostGenerateCandidates, bps(bo.SectorSize, len(bo.SealingResults), bo.PostGenerateCandidates)) fmt.Printf("compute winning post proof (cold): %s\n", bo.PostWinningProofCold) fmt.Printf("compute winning post proof (hot): %s\n", bo.PostWinningProofHot) fmt.Printf("verify winning post proof (cold): %s\n", bo.VerifyWinningPostCold) @@ -475,11 +513,13 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par if numSectors%par.PreCommit1 != 0 { return nil, nil, fmt.Errorf("parallelism factor must cleanly divide numSectors") } - - for i := abi.SectorNumber(1); i <= abi.SectorNumber(numSectors); i++ { - sid := abi.SectorID{ - Miner: mid, - Number: i, + for i := abi.SectorNumber(0); i < abi.SectorNumber(numSectors); i++ { + sid := storage.SectorRef{ + ID: abi.SectorID{ + Miner: mid, + Number: i, + }, + ProofType: spt(sectorSize), } start := time.Now() @@ -494,7 +534,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par pieces = append(pieces, pi) - sealTimings[i-1].AddPiece = time.Since(start) + sealTimings[i].AddPiece = time.Since(start) } sectorsPerWorker := numSectors / par.PreCommit1 @@ -503,13 +543,15 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par for wid := 0; wid < par.PreCommit1; wid++ { go func(worker int) { sealerr := func() error { - start := 1 + (worker * sectorsPerWorker) + start := worker * sectorsPerWorker end := start + sectorsPerWorker for i := abi.SectorNumber(start); i < abi.SectorNumber(end); i++ { - ix := int(i - 1) - sid := abi.SectorID{ - Miner: mid, - Number: i, + sid := storage.SectorRef{ + ID: abi.SectorID{ + Miner: mid, + Number: i, + }, + ProofType: spt(sectorSize), } start := time.Now() @@ -518,8 +560,8 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par ticket := abi.SealRandomness(trand[:]) log.Infof("[%d] Running replication(1)...", i) - pieces := []abi.PieceInfo{pieces[ix]} - pc1o, err := sb.SealPreCommit1(context.TODO(), sid, ticket, pieces) + piece := []abi.PieceInfo{pieces[i]} + pc1o, err := sb.SealPreCommit1(context.TODO(), sid, ticket, piece) if err != nil { return xerrors.Errorf("commit: %w", err) } @@ -537,8 +579,8 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par precommit2 := time.Now() <-preCommit2Sema - sealedSectors[ix] = saproof2.SectorInfo{ - SealProof: sb.SealProofType(), + sealedSectors[i] = saproof2.SectorInfo{ + SealProof: sid.ProofType, SectorNumber: i, SealedCID: cids.Sealed, } @@ -551,7 +593,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par commitSema <- struct{}{} commitStart := time.Now() log.Infof("[%d] Generating PoRep for sector (1)", i) - c1o, err := sb.SealCommit1(context.TODO(), sid, ticket, seed.Value, pieces, cids) + c1o, err := sb.SealCommit1(context.TODO(), sid, ticket, seed.Value, piece, cids) if err != nil { return err } @@ -592,7 +634,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par svi := saproof2.SealVerifyInfo{ SectorID: abi.SectorID{Miner: mid, Number: i}, SealedCID: cids.Sealed, - SealProof: sb.SealProofType(), + SealProof: sid.ProofType, Proof: proof, DealIDs: nil, Randomness: ticket, @@ -614,7 +656,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par if !skipunseal { log.Infof("[%d] Unsealing sector", i) { - p, done, err := sbfs.AcquireSector(context.TODO(), abi.SectorID{Miner: mid, Number: 1}, storiface.FTUnsealed, storiface.FTNone, storiface.PathSealing) + p, done, err := sbfs.AcquireSector(context.TODO(), sid, storiface.FTUnsealed, storiface.FTNone, storiface.PathSealing) if err != nil { return xerrors.Errorf("acquire unsealed sector for removing: %w", err) } @@ -625,19 +667,19 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par } } - err := sb.UnsealPiece(context.TODO(), abi.SectorID{Miner: mid, Number: 1}, 0, abi.PaddedPieceSize(sectorSize).Unpadded(), ticket, cids.Unsealed) + err := sb.UnsealPiece(context.TODO(), sid, 0, abi.PaddedPieceSize(sectorSize).Unpadded(), ticket, cids.Unsealed) if err != nil { return err } } unseal := time.Now() - sealTimings[ix].PreCommit1 = precommit1.Sub(start) - sealTimings[ix].PreCommit2 = precommit2.Sub(pc2Start) - sealTimings[ix].Commit1 = sealcommit1.Sub(commitStart) - sealTimings[ix].Commit2 = sealcommit2.Sub(sealcommit1) - sealTimings[ix].Verify = verifySeal.Sub(sealcommit2) - sealTimings[ix].Unseal = unseal.Sub(verifySeal) + sealTimings[i].PreCommit1 = precommit1.Sub(start) + sealTimings[i].PreCommit2 = precommit2.Sub(pc2Start) + sealTimings[i].Commit1 = sealcommit1.Sub(commitStart) + sealTimings[i].Commit2 = sealcommit2.Sub(sealcommit1) + sealTimings[i].Verify = verifySeal.Sub(sealcommit2) + sealTimings[i].Unseal = unseal.Sub(verifySeal) } return nil }() @@ -660,8 +702,9 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par } var proveCmd = &cli.Command{ - Name: "prove", - Usage: "Benchmark a proof computation", + Name: "prove", + Usage: "Benchmark a proof computation", + ArgsUsage: "[input.json]", Flags: []cli.Flag{ &cli.BoolFlag{ Name: "no-gpu", @@ -695,7 +738,7 @@ var proveCmd = &cli.Command{ return xerrors.Errorf("unmarshalling input file: %w", err) } - if err := paramfetch.GetParams(lcli.ReqContext(c), build.ParametersJSON(), c2in.SectorSize); err != nil { + if err := paramfetch.GetParams(lcli.ReqContext(c), build.ParametersJSON(), build.SrsJSON(), c2in.SectorSize); err != nil { return xerrors.Errorf("getting params: %w", err) } @@ -708,23 +751,23 @@ var proveCmd = &cli.Command{ return err } - spt, err := ffiwrapper.SealProofTypeFromSectorSize(abi.SectorSize(c2in.SectorSize)) + sb, err := ffiwrapper.New(nil) if err != nil { return err } - cfg := &ffiwrapper.Config{ - SealProofType: spt, - } - - sb, err := ffiwrapper.New(nil, cfg) - if err != nil { - return err + ref := storage.SectorRef{ + ID: abi.SectorID{ + Miner: abi.ActorID(mid), + Number: abi.SectorNumber(c2in.SectorNum), + }, + ProofType: spt(abi.SectorSize(c2in.SectorSize)), } + fmt.Printf("----\nstart proof computation\n") start := time.Now() - proof, err := sb.SealCommit2(context.TODO(), abi.SectorID{Miner: abi.ActorID(mid), Number: abi.SectorNumber(c2in.SectorNum)}, c2in.Phase1Out) + proof, err := sb.SealCommit2(context.TODO(), ref, c2in.Phase1Out) if err != nil { return err } @@ -733,17 +776,27 @@ var proveCmd = &cli.Command{ fmt.Printf("proof: %x\n", proof) - fmt.Printf("----\nresults (v27) (%d)\n", c2in.SectorSize) + fmt.Printf("----\nresults (v28) (%d)\n", c2in.SectorSize) dur := sealCommit2.Sub(start) - fmt.Printf("seal: commit phase 2: %s (%s)\n", dur, bps(abi.SectorSize(c2in.SectorSize), dur)) + fmt.Printf("seal: commit phase 2: %s (%s)\n", dur, bps(abi.SectorSize(c2in.SectorSize), 1, dur)) return nil }, } -func bps(data abi.SectorSize, d time.Duration) string { - bdata := new(big.Int).SetUint64(uint64(data)) +func bps(sectorSize abi.SectorSize, sectorNum int, d time.Duration) string { + bdata := new(big.Int).SetUint64(uint64(sectorSize)) + bdata = bdata.Mul(bdata, big.NewInt(int64(sectorNum))) bdata = bdata.Mul(bdata, big.NewInt(time.Second.Nanoseconds())) bps := bdata.Div(bdata, big.NewInt(d.Nanoseconds())) return types.SizeStr(types.BigInt{Int: bps}) + "/s" } + +func spt(ssize abi.SectorSize) abi.RegisteredSealProof { + spt, err := miner.SealProofTypeFromSectorSize(ssize, build.NewestNetworkVersion) + if err != nil { + panic(err) + } + + return spt +} diff --git a/cmd/lotus-chainwatch/processor/miner.go b/cmd/lotus-chainwatch/processor/miner.go index 3a37a82f800..f3514df88ce 100644 --- a/cmd/lotus-chainwatch/processor/miner.go +++ b/cmd/lotus-chainwatch/processor/miner.go @@ -14,8 +14,8 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/apibstore" + "github.com/filecoin-project/lotus/api/v0api" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/power" "github.com/filecoin-project/lotus/chain/events/state" @@ -202,7 +202,7 @@ func (p *Processor) processMiners(ctx context.Context, minerTips map[types.TipSe log.Debugw("Processed Miners", "duration", time.Since(start).String()) }() - stor := store.ActorStore(ctx, apibstore.NewAPIBlockstore(p.node)) + stor := store.ActorStore(ctx, blockstore.NewAPIBlockstore(p.node)) var out []minerActorInfo // TODO add parallel calls if this becomes slow @@ -649,7 +649,7 @@ func (p *Processor) getMinerStateAt(ctx context.Context, maddr address.Address, if err != nil { return nil, err } - return miner.Load(store.ActorStore(ctx, apibstore.NewAPIBlockstore(p.node)), prevActor) + return miner.Load(store.ActorStore(ctx, blockstore.NewAPIBlockstore(p.node)), prevActor) } func (p *Processor) getMinerPreCommitChanges(ctx context.Context, m minerActorInfo) (*miner.PreCommitChanges, error) { @@ -1026,7 +1026,7 @@ func (p *Processor) storeMinersPower(miners []minerActorInfo) error { } // load the power actor state clam as an adt.Map at the tipset `ts`. -func getPowerActorState(ctx context.Context, api api.FullNode, ts types.TipSetKey) (power.State, error) { +func getPowerActorState(ctx context.Context, api v0api.FullNode, ts types.TipSetKey) (power.State, error) { powerActor, err := api.StateGetActor(ctx, power.Address, ts) if err != nil { return nil, err diff --git a/cmd/lotus-chainwatch/processor/processor.go b/cmd/lotus-chainwatch/processor/processor.go index 1f8b246ed29..af5935d4795 100644 --- a/cmd/lotus-chainwatch/processor/processor.go +++ b/cmd/lotus-chainwatch/processor/processor.go @@ -17,7 +17,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" - "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/chain/types" cw_util "github.com/filecoin-project/lotus/cmd/lotus-chainwatch/util" "github.com/filecoin-project/lotus/lib/parmap" @@ -28,7 +28,7 @@ var log = logging.Logger("processor") type Processor struct { db *sql.DB - node api.FullNode + node v0api.FullNode ctxStore *cw_util.APIIpldStore genesisTs *types.TipSet @@ -52,7 +52,7 @@ type actorInfo struct { state string } -func NewProcessor(ctx context.Context, db *sql.DB, node api.FullNode, batch int) *Processor { +func NewProcessor(ctx context.Context, db *sql.DB, node v0api.FullNode, batch int) *Processor { ctxStore := cw_util.NewAPIIpldStore(ctx, node) return &Processor{ db: db, @@ -146,7 +146,7 @@ func (p *Processor) Start(ctx context.Context) { go func() { defer grp.Done() if err := p.HandleMarketChanges(ctx, actorChanges[builtin2.StorageMarketActorCodeID]); err != nil { - log.Errorf("Failed to handle market changes: %w", err) + log.Errorf("Failed to handle market changes: %v", err) return } }() @@ -155,7 +155,7 @@ func (p *Processor) Start(ctx context.Context) { go func() { defer grp.Done() if err := p.HandleMinerChanges(ctx, actorChanges[builtin2.StorageMinerActorCodeID]); err != nil { - log.Errorf("Failed to handle miner changes: %w", err) + log.Errorf("Failed to handle miner changes: %v", err) return } }() @@ -164,7 +164,7 @@ func (p *Processor) Start(ctx context.Context) { go func() { defer grp.Done() if err := p.HandleRewardChanges(ctx, actorChanges[builtin2.RewardActorCodeID], nullRounds); err != nil { - log.Errorf("Failed to handle reward changes: %w", err) + log.Errorf("Failed to handle reward changes: %v", err) return } }() @@ -173,7 +173,7 @@ func (p *Processor) Start(ctx context.Context) { go func() { defer grp.Done() if err := p.HandlePowerChanges(ctx, actorChanges[builtin2.StoragePowerActorCodeID]); err != nil { - log.Errorf("Failed to handle power actor changes: %w", err) + log.Errorf("Failed to handle power actor changes: %v", err) return } }() @@ -182,7 +182,7 @@ func (p *Processor) Start(ctx context.Context) { go func() { defer grp.Done() if err := p.HandleMessageChanges(ctx, toProcess); err != nil { - log.Errorf("Failed to handle message changes: %w", err) + log.Errorf("Failed to handle message changes: %v", err) return } }() @@ -191,7 +191,7 @@ func (p *Processor) Start(ctx context.Context) { go func() { defer grp.Done() if err := p.HandleCommonActorsChanges(ctx, actorChanges); err != nil { - log.Errorf("Failed to handle common actor changes: %w", err) + log.Errorf("Failed to handle common actor changes: %v", err) return } }() diff --git a/cmd/lotus-chainwatch/run.go b/cmd/lotus-chainwatch/run.go index 64f242755ab..6e47a100d79 100644 --- a/cmd/lotus-chainwatch/run.go +++ b/cmd/lotus-chainwatch/run.go @@ -8,6 +8,8 @@ import ( "os" "strings" + "github.com/filecoin-project/lotus/api/v0api" + _ "github.com/lib/pq" "github.com/filecoin-project/go-jsonrpc" @@ -15,7 +17,6 @@ import ( "github.com/urfave/cli/v2" "golang.org/x/xerrors" - "github.com/filecoin-project/lotus/api" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/cmd/lotus-chainwatch/processor" "github.com/filecoin-project/lotus/cmd/lotus-chainwatch/scheduler" @@ -44,7 +45,7 @@ var runCmd = &cli.Command{ return err } - var api api.FullNode + var api v0api.FullNode var closer jsonrpc.ClientCloser var err error if tokenMaddr := cctx.String("api"); tokenMaddr != "" { diff --git a/cmd/lotus-chainwatch/syncer/sync.go b/cmd/lotus-chainwatch/syncer/sync.go index 37af9cce08c..b5e9c73d6f4 100644 --- a/cmd/lotus-chainwatch/syncer/sync.go +++ b/cmd/lotus-chainwatch/syncer/sync.go @@ -13,7 +13,7 @@ import ( "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log/v2" - "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" ) @@ -26,10 +26,10 @@ type Syncer struct { lookbackLimit uint64 headerLk sync.Mutex - node api.FullNode + node v0api.FullNode } -func NewSyncer(db *sql.DB, node api.FullNode, lookbackLimit uint64) *Syncer { +func NewSyncer(db *sql.DB, node v0api.FullNode, lookbackLimit uint64) *Syncer { return &Syncer{ db: db, node: node, diff --git a/cmd/lotus-chainwatch/util/api.go b/cmd/lotus-chainwatch/util/api.go index cfda833e023..f8f22cbbf67 100644 --- a/cmd/lotus-chainwatch/util/api.go +++ b/cmd/lotus-chainwatch/util/api.go @@ -5,13 +5,13 @@ import ( "net/http" "github.com/filecoin-project/go-jsonrpc" - "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api/client" + "github.com/filecoin-project/lotus/api/v0api" ma "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" ) -func GetFullNodeAPIUsingCredentials(ctx context.Context, listenAddr, token string) (api.FullNode, jsonrpc.ClientCloser, error) { +func GetFullNodeAPIUsingCredentials(ctx context.Context, listenAddr, token string) (v0api.FullNode, jsonrpc.ClientCloser, error) { parsedAddr, err := ma.NewMultiaddr(listenAddr) if err != nil { return nil, nil, err @@ -22,7 +22,7 @@ func GetFullNodeAPIUsingCredentials(ctx context.Context, listenAddr, token strin return nil, nil, err } - return client.NewFullNodeRPC(ctx, apiURI(addr), apiHeaders(token)) + return client.NewFullNodeRPCV0(ctx, apiURI(addr), apiHeaders(token)) } func apiURI(addr string) string { return "ws://" + addr + "/rpc/v0" diff --git a/cmd/lotus-chainwatch/util/contextStore.go b/cmd/lotus-chainwatch/util/contextStore.go index bd812581b13..c93f87f9b66 100644 --- a/cmd/lotus-chainwatch/util/contextStore.go +++ b/cmd/lotus-chainwatch/util/contextStore.go @@ -8,7 +8,7 @@ import ( "github.com/ipfs/go-cid" cbg "github.com/whyrusleeping/cbor-gen" - "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v0api" ) // TODO extract this to a common location in lotus and reuse the code @@ -16,10 +16,10 @@ import ( // APIIpldStore is required for AMT and HAMT access. type APIIpldStore struct { ctx context.Context - api api.FullNode + api v0api.FullNode } -func NewAPIIpldStore(ctx context.Context, api api.FullNode) *APIIpldStore { +func NewAPIIpldStore(ctx context.Context, api v0api.FullNode) *APIIpldStore { return &APIIpldStore{ ctx: ctx, api: api, diff --git a/cmd/lotus-fountain/main.go b/cmd/lotus-fountain/main.go index ea7190e8364..7ac598d8e9a 100644 --- a/cmd/lotus-fountain/main.go +++ b/cmd/lotus-fountain/main.go @@ -3,6 +3,7 @@ package main import ( "context" "fmt" + "html/template" "net" "net/http" "os" @@ -14,7 +15,7 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" @@ -68,6 +69,10 @@ var runCmd = &cli.Command{ EnvVars: []string{"LOTUS_FOUNTAIN_AMOUNT"}, Value: "50", }, + &cli.Float64Flag{ + Name: "captcha-threshold", + Value: 0.5, + }, }, Action: func(cctx *cli.Context) error { sendPerRequest, err := types.ParseFIL(cctx.String("amount")) @@ -87,7 +92,7 @@ var runCmd = &cli.Command{ return err } - log.Info("Remote version: %s", v.Version) + log.Infof("Remote version: %s", v.Version) from, err := address.NewFromString(cctx.String("from")) if err != nil { @@ -107,11 +112,13 @@ var runCmd = &cli.Command{ WalletRate: 15 * time.Minute, WalletBurst: 2, }), + recapThreshold: cctx.Float64("captcha-threshold"), } - http.Handle("/", http.FileServer(rice.MustFindBox("site").HTTPBox())) - http.HandleFunc("/send", h.send) - + box := rice.MustFindBox("site") + http.Handle("/", http.FileServer(box.HTTPBox())) + http.HandleFunc("/funds.html", prepFundsHtml(box)) + http.Handle("/send", h) fmt.Printf("Open http://%s\n", cctx.String("front")) go func() { @@ -123,22 +130,63 @@ var runCmd = &cli.Command{ }, } +func prepFundsHtml(box *rice.Box) http.HandlerFunc { + tmpl := template.Must(template.New("funds").Parse(box.MustString("funds.html"))) + return func(w http.ResponseWriter, r *http.Request) { + err := tmpl.Execute(w, os.Getenv("RECAPTCHA_SITE_KEY")) + if err != nil { + http.Error(w, err.Error(), http.StatusBadGateway) + return + } + } +} + type handler struct { ctx context.Context - api api.FullNode + api v0api.FullNode from address.Address sendPerRequest types.FIL - limiter *Limiter + limiter *Limiter + recapThreshold float64 } -func (h *handler) send(w http.ResponseWriter, r *http.Request) { +func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(w, "only POST is allowed", http.StatusBadRequest) + return + } + + reqIP := r.Header.Get("X-Real-IP") + if reqIP == "" { + h, _, err := net.SplitHostPort(r.RemoteAddr) + if err != nil { + log.Errorf("could not get ip from: %s, err: %s", r.RemoteAddr, err) + } + reqIP = h + } + + capResp, err := VerifyToken(r.FormValue("g-recaptcha-response"), reqIP) + if err != nil { + http.Error(w, err.Error(), http.StatusBadGateway) + return + } + if !capResp.Success || capResp.Score < h.recapThreshold { + log.Infow("spam", "capResp", capResp) + http.Error(w, "spam protection", http.StatusUnprocessableEntity) + return + } + to, err := address.NewFromString(r.FormValue("address")) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } + if to == address.Undef { + http.Error(w, "empty address", http.StatusBadRequest) + return + } // Limit based on wallet address limiter := h.limiter.GetWalletLimiter(to.String()) @@ -148,15 +196,6 @@ func (h *handler) send(w http.ResponseWriter, r *http.Request) { } // Limit based on IP - - reqIP := r.Header.Get("X-Real-IP") - if reqIP == "" { - h, _, err := net.SplitHostPort(r.RemoteAddr) - if err != nil { - log.Errorf("could not get ip from: %s, err: %s", r.RemoteAddr, err) - } - reqIP = h - } if i := net.ParseIP(reqIP); i != nil && i.IsLoopback() { log.Errorf("rate limiting localhost: %s", reqIP) } diff --git a/cmd/lotus-fountain/recaptcha.go b/cmd/lotus-fountain/recaptcha.go new file mode 100644 index 00000000000..69359faa3bc --- /dev/null +++ b/cmd/lotus-fountain/recaptcha.go @@ -0,0 +1,73 @@ +// From https://github.com/lukasaron/recaptcha +// BLS-3 Licensed +// Copyright (c) 2020, Lukas Aron +// Modified by Kubuxu +package main + +import ( + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + "os" + "time" +) + +// content type for communication with the verification server. +const ( + contentType = "application/json" +) + +// VerifyURL defines the endpoint which is called when a token needs to be verified. +var ( + VerifyURL, _ = url.Parse("https://www.google.com/recaptcha/api/siteverify") +) + +// Response defines the response format from the verification endpoint. +type Response struct { + Success bool `json:"success"` // status of the verification + TimeStamp time.Time `json:"challenge_ts"` // timestamp of the challenge load (ISO format) + HostName string `json:"hostname"` // the hostname of the site where the reCAPTCHA was solved + Score float64 `json:"score"` // the score for this request (0.0 - 1.0) + Action string `json:"action"` // the action name for this request + ErrorCodes []string `json:"error-codes"` // error codes + AndroidPackageName string `json:"apk_package_name"` // android related only +} + +// VerifyToken function implements the basic logic of verification of ReCaptcha token that is usually created +// on the user site (front-end) and then sent to verify on the server side (back-end). +// To provide a successful verification process the secret key is required. Based on the security recommendations +// the key has to be passed as an environmental variable SECRET_KEY. +// +// Token parameter is required, however remoteIP is optional. +func VerifyToken(token, remoteIP string) (Response, error) { + resp := Response{} + if len(token) == 0 { + resp.ErrorCodes = []string{"no-token"} + return resp, nil + } + + q := url.Values{} + q.Add("secret", os.Getenv("RECAPTCHA_SECRET_KEY")) + q.Add("response", token) + q.Add("remoteip", remoteIP) + + var u *url.URL + { + verifyCopy := *VerifyURL + u = &verifyCopy + } + u.RawQuery = q.Encode() + r, err := http.Post(u.String(), contentType, nil) + if err != nil { + return resp, err + } + + b, err := ioutil.ReadAll(r.Body) + _ = r.Body.Close() // close immediately after reading finished + if err != nil { + return resp, err + } + + return resp, json.Unmarshal(b, &resp) +} diff --git a/cmd/lotus-fountain/site/funds.html b/cmd/lotus-fountain/site/funds.html index cd26032f3a4..c6916239fcd 100644 --- a/cmd/lotus-fountain/site/funds.html +++ b/cmd/lotus-fountain/site/funds.html @@ -3,6 +3,13 @@ Sending Funds - Lotus Fountain + + +
@@ -11,10 +18,13 @@ [SENDING FUNDS]
-
+ Enter destination address: - - + +
diff --git a/cmd/lotus-gateway/api.go b/cmd/lotus-gateway/api.go deleted file mode 100644 index 875eaac7d6c..00000000000 --- a/cmd/lotus-gateway/api.go +++ /dev/null @@ -1,390 +0,0 @@ -package main - -import ( - "context" - "fmt" - "time" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-bitfield" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/go-state-types/dline" - "github.com/filecoin-project/go-state-types/network" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/sigs" - _ "github.com/filecoin-project/lotus/lib/sigs/bls" - _ "github.com/filecoin-project/lotus/lib/sigs/secp" - "github.com/filecoin-project/lotus/node/impl/full" - "github.com/ipfs/go-cid" -) - -const ( - LookbackCap = time.Hour * 24 - StateWaitLookbackLimit = abi.ChainEpoch(20) -) - -var ( - ErrLookbackTooLong = fmt.Errorf("lookbacks of more than %s are disallowed", LookbackCap) -) - -// gatewayDepsAPI defines the API methods that the GatewayAPI depends on -// (to make it easy to mock for tests) -type gatewayDepsAPI interface { - Version(context.Context) (api.Version, error) - ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error) - ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) - ChainGetNode(ctx context.Context, p string) (*api.IpldObject, error) - ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) - ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) - ChainHasObj(context.Context, cid.Cid) (bool, error) - ChainHead(ctx context.Context) (*types.TipSet, error) - ChainNotify(context.Context) (<-chan []*api.HeadChange, error) - ChainReadObj(context.Context, cid.Cid) ([]byte, error) - GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) - MpoolPushUntrusted(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) - MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) - MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) - StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) - StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) - StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) - StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) - StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) - StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) - StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) - StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) - StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error) - StateWaitMsgLimited(ctx context.Context, msg cid.Cid, confidence uint64, h abi.ChainEpoch) (*api.MsgLookup, error) - StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*api.ActorState, error) - StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) - StateMinerFaults(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) - StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) - StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error) - StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]api.Deadline, error) - StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) - StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) - StateCirculatingSupply(context.Context, types.TipSetKey) (abi.TokenAmount, error) - StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) - StateVMCirculatingSupplyInternal(context.Context, types.TipSetKey) (api.CirculatingSupply, error) -} - -type GatewayAPI struct { - api gatewayDepsAPI - lookbackCap time.Duration - stateWaitLookbackLimit abi.ChainEpoch -} - -// NewGatewayAPI creates a new GatewayAPI with the default lookback cap -func NewGatewayAPI(api gatewayDepsAPI) *GatewayAPI { - return newGatewayAPI(api, LookbackCap, StateWaitLookbackLimit) -} - -// used by the tests -func newGatewayAPI(api gatewayDepsAPI, lookbackCap time.Duration, stateWaitLookbackLimit abi.ChainEpoch) *GatewayAPI { - return &GatewayAPI{api: api, lookbackCap: lookbackCap, stateWaitLookbackLimit: stateWaitLookbackLimit} -} - -func (a *GatewayAPI) checkTipsetKey(ctx context.Context, tsk types.TipSetKey) error { - if tsk.IsEmpty() { - return nil - } - - ts, err := a.api.ChainGetTipSet(ctx, tsk) - if err != nil { - return err - } - - return a.checkTipset(ts) -} - -func (a *GatewayAPI) checkTipset(ts *types.TipSet) error { - at := time.Unix(int64(ts.Blocks()[0].Timestamp), 0) - if err := a.checkTimestamp(at); err != nil { - return fmt.Errorf("bad tipset: %w", err) - } - return nil -} - -func (a *GatewayAPI) checkTipsetHeight(ts *types.TipSet, h abi.ChainEpoch) error { - tsBlock := ts.Blocks()[0] - heightDelta := time.Duration(uint64(tsBlock.Height-h)*build.BlockDelaySecs) * time.Second - timeAtHeight := time.Unix(int64(tsBlock.Timestamp), 0).Add(-heightDelta) - - if err := a.checkTimestamp(timeAtHeight); err != nil { - return fmt.Errorf("bad tipset height: %w", err) - } - return nil -} - -func (a *GatewayAPI) checkTimestamp(at time.Time) error { - if time.Since(at) > a.lookbackCap { - return ErrLookbackTooLong - } - - return nil -} - -func (a *GatewayAPI) Version(ctx context.Context) (api.Version, error) { - return a.api.Version(ctx) -} - -func (a *GatewayAPI) ChainGetBlockMessages(ctx context.Context, c cid.Cid) (*api.BlockMessages, error) { - return a.api.ChainGetBlockMessages(ctx, c) -} - -func (a *GatewayAPI) ChainHasObj(ctx context.Context, c cid.Cid) (bool, error) { - return a.api.ChainHasObj(ctx, c) -} - -func (a *GatewayAPI) ChainHead(ctx context.Context) (*types.TipSet, error) { - // TODO: cache and invalidate cache when timestamp is up (or have internal ChainNotify) - - return a.api.ChainHead(ctx) -} - -func (a *GatewayAPI) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) { - return a.api.ChainGetMessage(ctx, mc) -} - -func (a *GatewayAPI) ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { - return a.api.ChainGetTipSet(ctx, tsk) -} - -func (a *GatewayAPI) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) { - var ts *types.TipSet - if tsk.IsEmpty() { - head, err := a.api.ChainHead(ctx) - if err != nil { - return nil, err - } - ts = head - } else { - gts, err := a.api.ChainGetTipSet(ctx, tsk) - if err != nil { - return nil, err - } - ts = gts - } - - // Check if the tipset key refers to a tipset that's too far in the past - if err := a.checkTipset(ts); err != nil { - return nil, err - } - - // Check if the height is too far in the past - if err := a.checkTipsetHeight(ts, h); err != nil { - return nil, err - } - - return a.api.ChainGetTipSetByHeight(ctx, h, tsk) -} - -func (a *GatewayAPI) ChainGetNode(ctx context.Context, p string) (*api.IpldObject, error) { - return a.api.ChainGetNode(ctx, p) -} - -func (a *GatewayAPI) ChainNotify(ctx context.Context) (<-chan []*api.HeadChange, error) { - return a.api.ChainNotify(ctx) -} - -func (a *GatewayAPI) ChainReadObj(ctx context.Context, c cid.Cid) ([]byte, error) { - return a.api.ChainReadObj(ctx, c) -} - -func (a *GatewayAPI) GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return nil, err - } - - return a.api.GasEstimateMessageGas(ctx, msg, spec, tsk) -} - -func (a *GatewayAPI) MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) { - // TODO: additional anti-spam checks - return a.api.MpoolPushUntrusted(ctx, sm) -} - -func (a *GatewayAPI) MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return types.NewInt(0), err - } - - return a.api.MsigGetAvailableBalance(ctx, addr, tsk) -} - -func (a *GatewayAPI) MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) { - if err := a.checkTipsetKey(ctx, start); err != nil { - return types.NewInt(0), err - } - if err := a.checkTipsetKey(ctx, end); err != nil { - return types.NewInt(0), err - } - - return a.api.MsigGetVested(ctx, addr, start, end) -} - -func (a *GatewayAPI) StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return address.Undef, err - } - - return a.api.StateAccountKey(ctx, addr, tsk) -} - -func (a *GatewayAPI) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return api.DealCollateralBounds{}, err - } - - return a.api.StateDealProviderCollateralBounds(ctx, size, verified, tsk) -} - -func (a *GatewayAPI) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return nil, err - } - - return a.api.StateGetActor(ctx, actor, tsk) -} - -func (a *GatewayAPI) StateGetReceipt(ctx context.Context, c cid.Cid, tsk types.TipSetKey) (*types.MessageReceipt, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return nil, err - } - - return a.api.StateGetReceipt(ctx, c, tsk) -} - -func (a *GatewayAPI) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return nil, err - } - - return a.api.StateListMiners(ctx, tsk) -} - -func (a *GatewayAPI) StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return address.Undef, err - } - - return a.api.StateLookupID(ctx, addr, tsk) -} - -func (a *GatewayAPI) StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return api.MarketBalance{}, err - } - - return a.api.StateMarketBalance(ctx, addr, tsk) -} - -func (a *GatewayAPI) StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return nil, err - } - - return a.api.StateMarketStorageDeal(ctx, dealId, tsk) -} - -func (a *GatewayAPI) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (network.Version, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return network.VersionMax, err - } - - return a.api.StateNetworkVersion(ctx, tsk) -} - -func (a *GatewayAPI) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error) { - return a.api.StateWaitMsgLimited(ctx, msg, confidence, a.stateWaitLookbackLimit) -} - -func (a *GatewayAPI) StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*api.ActorState, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return nil, err - } - return a.api.StateReadState(ctx, actor, tsk) -} - -func (a *GatewayAPI) StateMinerPower(ctx context.Context, m address.Address, tsk types.TipSetKey) (*api.MinerPower, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return nil, err - } - return a.api.StateMinerPower(ctx, m, tsk) -} - -func (a *GatewayAPI) StateMinerFaults(ctx context.Context, m address.Address, tsk types.TipSetKey) (bitfield.BitField, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return bitfield.BitField{}, err - } - return a.api.StateMinerFaults(ctx, m, tsk) -} -func (a *GatewayAPI) StateMinerRecoveries(ctx context.Context, m address.Address, tsk types.TipSetKey) (bitfield.BitField, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return bitfield.BitField{}, err - } - return a.api.StateMinerRecoveries(ctx, m, tsk) -} - -func (a *GatewayAPI) StateMinerInfo(ctx context.Context, m address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return miner.MinerInfo{}, err - } - return a.api.StateMinerInfo(ctx, m, tsk) -} - -func (a *GatewayAPI) StateMinerDeadlines(ctx context.Context, m address.Address, tsk types.TipSetKey) ([]api.Deadline, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return nil, err - } - return a.api.StateMinerDeadlines(ctx, m, tsk) -} - -func (a *GatewayAPI) StateMinerAvailableBalance(ctx context.Context, m address.Address, tsk types.TipSetKey) (types.BigInt, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return types.BigInt{}, err - } - return a.api.StateMinerAvailableBalance(ctx, m, tsk) -} - -func (a *GatewayAPI) StateMinerProvingDeadline(ctx context.Context, m address.Address, tsk types.TipSetKey) (*dline.Info, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return nil, err - } - return a.api.StateMinerProvingDeadline(ctx, m, tsk) -} - -func (a *GatewayAPI) StateCirculatingSupply(ctx context.Context, tsk types.TipSetKey) (abi.TokenAmount, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return types.BigInt{}, err - } - return a.api.StateCirculatingSupply(ctx, tsk) - -} - -func (a *GatewayAPI) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return nil, err - } - return a.api.StateVerifiedClientStatus(ctx, addr, tsk) -} - -func (a *GatewayAPI) StateVMCirculatingSupplyInternal(ctx context.Context, tsk types.TipSetKey) (api.CirculatingSupply, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return api.CirculatingSupply{}, err - } - return a.api.StateVMCirculatingSupplyInternal(ctx, tsk) -} - -func (a *GatewayAPI) WalletVerify(ctx context.Context, k address.Address, msg []byte, sig *crypto.Signature) (bool, error) { - return sigs.Verify(sig, k, msg) == nil, nil -} - -var _ api.GatewayAPI = (*GatewayAPI)(nil) -var _ full.ChainModuleAPI = (*GatewayAPI)(nil) -var _ full.GasModuleAPI = (*GatewayAPI)(nil) -var _ full.MpoolModuleAPI = (*GatewayAPI)(nil) -var _ full.StateModuleAPI = (*GatewayAPI)(nil) diff --git a/cmd/lotus-gateway/main.go b/cmd/lotus-gateway/main.go index 3fed88468ba..cfda02d86d8 100644 --- a/cmd/lotus-gateway/main.go +++ b/cmd/lotus-gateway/main.go @@ -2,23 +2,31 @@ package main import ( "context" + "fmt" "net" - "net/http" "os" + "github.com/urfave/cli/v2" + "go.opencensus.io/stats/view" + "golang.org/x/xerrors" + + logging "github.com/ipfs/go-log/v2" + "github.com/filecoin-project/go-jsonrpc" - "go.opencensus.io/tag" + "github.com/filecoin-project/go-state-types/abi" + manet "github.com/multiformats/go-multiaddr/net" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/api/client" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" + cliutil "github.com/filecoin-project/lotus/cli/util" + "github.com/filecoin-project/lotus/gateway" "github.com/filecoin-project/lotus/lib/lotuslog" "github.com/filecoin-project/lotus/metrics" - - logging "github.com/ipfs/go-log" - "go.opencensus.io/stats/view" - - "github.com/gorilla/mux" - "github.com/urfave/cli/v2" + "github.com/filecoin-project/lotus/node" ) var log = logging.Logger("gateway") @@ -28,6 +36,7 @@ func main() { local := []*cli.Command{ runCmd, + checkCmd, } app := &cli.App{ @@ -47,11 +56,60 @@ func main() { app.Setup() if err := app.Run(os.Args); err != nil { - log.Warnf("%+v", err) + log.Errorf("%+v", err) + os.Exit(1) return } } +var checkCmd = &cli.Command{ + Name: "check", + Usage: "performs a simple check to verify that a connection can be made to a gateway", + ArgsUsage: "[apiInfo]", + Description: `Any valid value for FULLNODE_API_INFO is a valid argument to the check command. + + Examples + - ws://127.0.0.1:2346 + - http://127.0.0.1:2346 + - /ip4/127.0.0.1/tcp/2346`, + Flags: []cli.Flag{}, + Action: func(cctx *cli.Context) error { + ctx := lcli.ReqContext(cctx) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + ainfo := cliutil.ParseApiInfo(cctx.Args().First()) + + darg, err := ainfo.DialArgs("v1") + if err != nil { + return err + } + + api, closer, err := client.NewFullNodeRPCV1(ctx, darg, nil) + if err != nil { + return err + } + + defer closer() + + addr, err := address.NewIDAddress(100) + if err != nil { + return err + } + + laddr, err := api.StateLookupID(ctx, addr, types.EmptyTSK) + if err != nil { + return err + } + + if laddr != addr { + return fmt.Errorf("looked up addresses does not match returned address, %s != %s", addr, laddr) + } + + return nil + }, +} + var runCmd = &cli.Command{ Name: "run", Usage: "Start api server", @@ -61,65 +119,75 @@ var runCmd = &cli.Command{ Usage: "host address and port the api server will listen on", Value: "0.0.0.0:2346", }, + &cli.IntFlag{ + Name: "api-max-req-size", + Usage: "maximum API request size accepted by the JSON RPC server", + }, + &cli.DurationFlag{ + Name: "api-max-lookback", + Usage: "maximum duration allowable for tipset lookbacks", + Value: gateway.DefaultLookbackCap, + }, + &cli.Int64Flag{ + Name: "api-wait-lookback-limit", + Usage: "maximum number of blocks to search back through for message inclusion", + Value: int64(gateway.DefaultStateWaitLookbackLimit), + }, }, Action: func(cctx *cli.Context) error { log.Info("Starting lotus gateway") - ctx := lcli.ReqContext(cctx) - ctx, cancel := context.WithCancel(ctx) - defer cancel() - // Register all metric views if err := view.Register( - metrics.DefaultViews..., + metrics.ChainNodeViews..., ); err != nil { log.Fatalf("Cannot register the view: %v", err) } - api, closer, err := lcli.GetFullNodeAPI(cctx) + api, closer, err := lcli.GetFullNodeAPIV1(cctx) if err != nil { return err } defer closer() - address := cctx.String("listen") - mux := mux.NewRouter() + var ( + lookbackCap = cctx.Duration("api-max-lookback") + address = cctx.String("listen") + waitLookback = abi.ChainEpoch(cctx.Int64("api-wait-lookback-limit")) + ) - log.Info("Setting up API endpoint at " + address) - - rpcServer := jsonrpc.NewServer() - rpcServer.Register("Filecoin", metrics.MetricedGatewayAPI(NewGatewayAPI(api))) + serverOptions := make([]jsonrpc.ServerOption, 0) + if maxRequestSize := cctx.Int("api-max-req-size"); maxRequestSize != 0 { + serverOptions = append(serverOptions, jsonrpc.WithMaxRequestSize(int64(maxRequestSize))) + } - mux.Handle("/rpc/v0", rpcServer) - mux.PathPrefix("/").Handler(http.DefaultServeMux) + log.Info("setting up API endpoint at " + address) - /*ah := &auth.Handler{ - Verify: nodeApi.AuthVerify, - Next: mux.ServeHTTP, - }*/ + addr, err := net.ResolveTCPAddr("tcp", address) + if err != nil { + return xerrors.Errorf("failed to resolve endpoint address: %w", err) + } - srv := &http.Server{ - Handler: mux, - BaseContext: func(listener net.Listener) context.Context { - ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-gateway")) - return ctx - }, + maddr, err := manet.FromNetAddr(addr) + if err != nil { + return xerrors.Errorf("failed to convert endpoint address to multiaddr: %w", err) } - go func() { - <-ctx.Done() - log.Warn("Shutting down...") - if err := srv.Shutdown(context.TODO()); err != nil { - log.Errorf("shutting down RPC server failed: %s", err) - } - log.Warn("Graceful shutdown successful") - }() + gwapi := gateway.NewNode(api, lookbackCap, waitLookback) + h, err := gateway.Handler(gwapi, serverOptions...) + if err != nil { + return xerrors.Errorf("failed to set up gateway HTTP handler") + } - nl, err := net.Listen("tcp", address) + stopFunc, err := node.ServeRPC(h, "lotus-gateway", maddr) if err != nil { - return err + return xerrors.Errorf("failed to serve rpc endpoint: %w", err) } - return srv.Serve(nl) + <-node.MonitorShutdown(nil, node.ShutdownHandler{ + Component: "rpc", + StopFunc: stopFunc, + }) + return nil }, } diff --git a/cmd/lotus-health/main.go b/cmd/lotus-health/main.go index e8a32a71946..da90242c888 100644 --- a/cmd/lotus-health/main.go +++ b/cmd/lotus-health/main.go @@ -8,13 +8,14 @@ import ( "syscall" "time" + "github.com/filecoin-project/lotus/api/v0api" + cid "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" "github.com/urfave/cli/v2" "github.com/filecoin-project/go-jsonrpc" - "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" @@ -180,7 +181,7 @@ func checkWindow(window CidWindow, t int) bool { * returns a slice of slices of Cids * len of slice <= `t` - threshold */ -func updateWindow(ctx context.Context, a api.FullNode, w CidWindow, t int, r int, to time.Duration) (CidWindow, error) { +func updateWindow(ctx context.Context, a v0api.FullNode, w CidWindow, t int, r int, to time.Duration) (CidWindow, error) { head, err := getHead(ctx, a, r, to) if err != nil { return nil, err @@ -194,7 +195,7 @@ func updateWindow(ctx context.Context, a api.FullNode, w CidWindow, t int, r int * retries if API no available * returns tipset */ -func getHead(ctx context.Context, a api.FullNode, r int, t time.Duration) (*types.TipSet, error) { +func getHead(ctx context.Context, a v0api.FullNode, r int, t time.Duration) (*types.TipSet, error) { for i := 0; i < r; i++ { head, err := a.ChainHead(ctx) if err != nil && i == (r-1) { @@ -226,7 +227,7 @@ func appendCIDsToWindow(w CidWindow, c []cid.Cid, t int) CidWindow { /* * wait for node to sync */ -func waitForSyncComplete(ctx context.Context, a api.FullNode, r int, t time.Duration) error { +func waitForSyncComplete(ctx context.Context, a v0api.FullNode, r int, t time.Duration) error { for { select { case <-ctx.Done(): @@ -248,7 +249,7 @@ func waitForSyncComplete(ctx context.Context, a api.FullNode, r int, t time.Dura * A thin wrapper around lotus cli GetFullNodeAPI * Adds retry logic */ -func getFullNodeAPI(ctx *cli.Context, r int, t time.Duration) (api.FullNode, jsonrpc.ClientCloser, error) { +func getFullNodeAPI(ctx *cli.Context, r int, t time.Duration) (v0api.FullNode, jsonrpc.ClientCloser, error) { for i := 0; i < r; i++ { api, closer, err := lcli.GetFullNodeAPI(ctx) if err != nil && i == (r-1) { diff --git a/cmd/lotus-keygen/main.go b/cmd/lotus-keygen/main.go index d296cb5da70..ebf981e8b7a 100644 --- a/cmd/lotus-keygen/main.go +++ b/cmd/lotus-keygen/main.go @@ -22,6 +22,11 @@ func main() { Value: "bls", Usage: "specify key type to generate (bls or secp256k1)", }, + &cli.StringFlag{ + Name: "out", + Aliases: []string{"o"}, + Usage: "specify key file name to generate", + }, } app.Action = func(cctx *cli.Context) error { memks := wallet.NewMemKeyStore() @@ -50,7 +55,11 @@ func main() { return err } - fi, err := os.Create(fmt.Sprintf("%s.key", kaddr)) + outFile := fmt.Sprintf("%s.key", kaddr) + if cctx.IsSet("out") { + outFile = fmt.Sprintf("%s.key", cctx.String("out")) + } + fi, err := os.Create(outFile) if err != nil { return err } diff --git a/cmd/lotus-seal-worker/cli.go b/cmd/lotus-seal-worker/cli.go new file mode 100644 index 00000000000..b1501fca745 --- /dev/null +++ b/cmd/lotus-seal-worker/cli.go @@ -0,0 +1,51 @@ +package main + +import ( + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + lcli "github.com/filecoin-project/lotus/cli" +) + +var setCmd = &cli.Command{ + Name: "set", + Usage: "Manage worker settings", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "enabled", + Usage: "enable/disable new task processing", + Value: true, + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetWorkerAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := lcli.ReqContext(cctx) + + if err := api.SetEnabled(ctx, cctx.Bool("enabled")); err != nil { + return xerrors.Errorf("SetEnabled: %w", err) + } + + return nil + }, +} + +var waitQuietCmd = &cli.Command{ + Name: "wait-quiet", + Usage: "Block until all running tasks exit", + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetWorkerAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := lcli.ReqContext(cctx) + + return api.WaitQuiet(ctx) + }, +} diff --git a/cmd/lotus-seal-worker/info.go b/cmd/lotus-seal-worker/info.go index 9b08a0c800e..6d5c2d64ebc 100644 --- a/cmd/lotus-seal-worker/info.go +++ b/cmd/lotus-seal-worker/info.go @@ -2,12 +2,14 @@ package main import ( "fmt" + "sort" "github.com/urfave/cli/v2" "golang.org/x/xerrors" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" ) var infoCmd = &cli.Command{ @@ -32,15 +34,39 @@ var infoCmd = &cli.Command{ cli.VersionPrinter(cctx) fmt.Println() + sess, err := api.ProcessSession(ctx) + if err != nil { + return xerrors.Errorf("getting session: %w", err) + } + fmt.Printf("Session: %s\n", sess) + + enabled, err := api.Enabled(ctx) + if err != nil { + return xerrors.Errorf("checking worker status: %w", err) + } + fmt.Printf("Enabled: %t\n", enabled) + info, err := api.Info(ctx) if err != nil { return xerrors.Errorf("getting info: %w", err) } + tt, err := api.TaskTypes(ctx) + if err != nil { + return xerrors.Errorf("getting task types: %w", err) + } + fmt.Printf("Hostname: %s\n", info.Hostname) fmt.Printf("CPUs: %d; GPUs: %v\n", info.Resources.CPUs, info.Resources.GPUs) fmt.Printf("RAM: %s; Swap: %s\n", types.SizeStr(types.NewInt(info.Resources.MemPhysical)), types.SizeStr(types.NewInt(info.Resources.MemSwap))) fmt.Printf("Reserved memory: %s\n", types.SizeStr(types.NewInt(info.Resources.MemReserved))) + + fmt.Printf("Task types: ") + for _, t := range ttList(tt) { + fmt.Printf("%s ", t.Short()) + } + fmt.Println() + fmt.Println() paths, err := api.Paths(ctx) @@ -52,7 +78,6 @@ var infoCmd = &cli.Command{ fmt.Printf("%s:\n", path.ID) fmt.Printf("\tWeight: %d; Use: ", path.Weight) if path.CanSeal || path.CanStore { - fmt.Printf("Weight: %d; Use: ", path.Weight) if path.CanSeal { fmt.Print("Seal ") } @@ -69,3 +94,14 @@ var infoCmd = &cli.Command{ return nil }, } + +func ttList(tt map[sealtasks.TaskType]struct{}) []sealtasks.TaskType { + tasks := make([]sealtasks.TaskType, 0, len(tt)) + for taskType := range tt { + tasks = append(tasks, taskType) + } + sort.Slice(tasks, func(i, j int) bool { + return tasks[i].Less(tasks[j]) + }) + return tasks +} diff --git a/cmd/lotus-seal-worker/main.go b/cmd/lotus-seal-worker/main.go index 36c9d5effc1..adcf0f86934 100644 --- a/cmd/lotus-seal-worker/main.go +++ b/cmd/lotus-seal-worker/main.go @@ -28,11 +28,10 @@ import ( "github.com/filecoin-project/go-statestore" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/apistruct" "github.com/filecoin-project/lotus/build" lcli "github.com/filecoin-project/lotus/cli" + cliutil "github.com/filecoin-project/lotus/cli/util" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" - "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" "github.com/filecoin-project/lotus/extern/sector-storage/stores" "github.com/filecoin-project/lotus/lib/lotuslog" @@ -50,7 +49,7 @@ const FlagWorkerRepo = "worker-repo" const FlagWorkerRepoDeprecation = "workerrepo" func main() { - build.RunningNodeType = build.NodeWorker + api.RunningNodeType = api.NodeWorker lotuslog.SetupLogLevels() @@ -58,12 +57,16 @@ func main() { runCmd, infoCmd, storageCmd, + setCmd, + waitQuietCmd, + tasksCmd, } app := &cli.App{ - Name: "lotus-worker", - Usage: "Remote miner worker", - Version: build.UserVersion(), + Name: "lotus-worker", + Usage: "Remote miner worker", + Version: build.UserVersion(), + EnableBashCompletion: true, Flags: []cli.Flag{ &cli.StringFlag{ Name: FlagWorkerRepo, @@ -181,7 +184,7 @@ var runCmd = &cli.Command{ var closer func() var err error for { - nodeApi, closer, err = lcli.GetStorageMinerAPI(cctx, lcli.StorageMinerUseHttp) + nodeApi, closer, err = lcli.GetStorageMinerAPI(cctx, cliutil.StorageMinerUseHttp) if err == nil { _, err = nodeApi.Version(ctx) if err == nil { @@ -208,8 +211,8 @@ var runCmd = &cli.Command{ if err != nil { return err } - if v.APIVersion != build.MinerAPIVersion { - return xerrors.Errorf("lotus-miner API version doesn't match: expected: %s", api.Version{APIVersion: build.MinerAPIVersion}) + if v.APIVersion != api.MinerAPIVersion0 { + return xerrors.Errorf("lotus-miner API version doesn't match: expected: %s", api.APIVersion{APIVersion: api.MinerAPIVersion0}) } log.Infof("Remote version %s", v) @@ -225,7 +228,7 @@ var runCmd = &cli.Command{ } if cctx.Bool("commit") { - if err := paramfetch.GetParams(ctx, build.ParametersJSON(), uint64(ssize)); err != nil { + if err := paramfetch.GetParams(ctx, build.ParametersJSON(), build.SrsJSON(), uint64(ssize)); err != nil { return xerrors.Errorf("get params: %w", err) } } @@ -306,7 +309,7 @@ var runCmd = &cli.Command{ { // init datastore for r.Exists - _, err := lr.Datastore("/metadata") + _, err := lr.Datastore(context.Background(), "/metadata") if err != nil { return err } @@ -325,7 +328,7 @@ var runCmd = &cli.Command{ log.Error("closing repo", err) } }() - ds, err := lr.Datastore("/metadata") + ds, err := lr.Datastore(context.Background(), "/metadata") if err != nil { return err } @@ -354,17 +357,24 @@ var runCmd = &cli.Command{ } // Setup remote sector store - spt, err := ffiwrapper.SealProofTypeFromSectorSize(ssize) - if err != nil { - return xerrors.Errorf("getting proof type: %w", err) - } - sminfo, err := lcli.GetAPIInfo(cctx, repo.StorageMiner) if err != nil { return xerrors.Errorf("could not get api info: %w", err) } - remote := stores.NewRemote(localStore, nodeApi, sminfo.AuthHeader(), cctx.Int("parallel-fetch-limit")) + remote := stores.NewRemote(localStore, nodeApi, sminfo.AuthHeader(), cctx.Int("parallel-fetch-limit"), + &stores.DefaultPartialFileHandler{}) + + fh := &stores.FetchHandler{Local: localStore, PfHandler: &stores.DefaultPartialFileHandler{}} + remoteHandler := func(w http.ResponseWriter, r *http.Request) { + if !auth.HasPerm(r.Context(), nil, api.PermAdmin) { + w.WriteHeader(401) + _ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing admin permission"}) + return + } + + fh.ServeHTTP(w, r) + } // Create / expose the worker @@ -372,7 +382,6 @@ var runCmd = &cli.Command{ workerApi := &worker{ LocalWorker: sectorstorage.NewLocalWorker(sectorstorage.WorkerConfig{ - SealProof: spt, TaskTypes: taskTypes, NoSwap: cctx.Bool("no-swap"), }, remote, localStore, nodeApi, nodeApi, wsts), @@ -386,11 +395,11 @@ var runCmd = &cli.Command{ readerHandler, readerServerOpt := rpcenc.ReaderParamDecoder() rpcServer := jsonrpc.NewServer(readerServerOpt) - rpcServer.Register("Filecoin", apistruct.PermissionedWorkerAPI(metrics.MetricedWorkerAPI(workerApi))) + rpcServer.Register("Filecoin", api.PermissionedWorkerAPI(metrics.MetricedWorkerAPI(workerApi))) mux.Handle("/rpc/v0", rpcServer) mux.Handle("/rpc/streams/v0/push/{uuid}", readerHandler) - mux.PathPrefix("/remote").HandlerFunc((&stores.FetchHandler{Local: localStore}).ServeHTTP) + mux.PathPrefix("/remote").HandlerFunc(remoteHandler) mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof ah := &auth.Handler{ @@ -451,14 +460,24 @@ var runCmd = &cli.Command{ return xerrors.Errorf("getting miner session: %w", err) } + waitQuietCh := func() chan struct{} { + out := make(chan struct{}) + go func() { + workerApi.LocalWorker.WaitQuiet() + close(out) + }() + return out + } + go func() { heartbeats := time.NewTicker(stores.HeartbeatInterval) defer heartbeats.Stop() - var connected, reconnect bool + var redeclareStorage bool + var readyCh chan struct{} for { // If we're reconnecting, redeclare storage first - if reconnect { + if redeclareStorage { log.Info("Redeclaring local storage") if err := localStore.Redeclare(ctx); err != nil { @@ -471,14 +490,13 @@ var runCmd = &cli.Command{ } continue } - - connected = false } - log.Info("Making sure no local tasks are running") - // TODO: we could get rid of this, but that requires tracking resources for restarted tasks correctly - workerApi.LocalWorker.WaitQuiet() + if readyCh == nil { + log.Info("Making sure no local tasks are running") + readyCh = waitQuietCh() + } for { curSession, err := nodeApi.Session(ctx) @@ -489,29 +507,28 @@ var runCmd = &cli.Command{ minerSession = curSession break } - - if !connected { - if err := nodeApi.WorkerConnect(ctx, "http://"+address+"/rpc/v0"); err != nil { - log.Errorf("Registering worker failed: %+v", err) - cancel() - return - } - - log.Info("Worker registered successfully, waiting for tasks") - connected = true - } } select { + case <-readyCh: + if err := nodeApi.WorkerConnect(ctx, "http://"+address+"/rpc/v0"); err != nil { + log.Errorf("Registering worker failed: %+v", err) + cancel() + return + } + + log.Info("Worker registered successfully, waiting for tasks") + + readyCh = nil + case <-heartbeats.C: case <-ctx.Done(): return // graceful shutdown - case <-heartbeats.C: } } log.Errorf("LOTUS-MINER CONNECTION LOST") - reconnect = true + redeclareStorage = true } }() diff --git a/cmd/lotus-seal-worker/rpc.go b/cmd/lotus-seal-worker/rpc.go index b543babbf7d..6a6263671bd 100644 --- a/cmd/lotus-seal-worker/rpc.go +++ b/cmd/lotus-seal-worker/rpc.go @@ -2,10 +2,14 @@ package main import ( "context" + "sync/atomic" + "github.com/google/uuid" "github.com/mitchellh/go-homedir" "golang.org/x/xerrors" + "github.com/filecoin-project/lotus/api" + apitypes "github.com/filecoin-project/lotus/api/types" "github.com/filecoin-project/lotus/build" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/sector-storage/stores" @@ -17,10 +21,12 @@ type worker struct { localStore *stores.Local ls stores.LocalStorage + + disabled int64 } -func (w *worker) Version(context.Context) (build.Version, error) { - return build.WorkerAPIVersion, nil +func (w *worker) Version(context.Context) (api.Version, error) { + return api.WorkerAPIVersion0, nil } func (w *worker) StorageAddLocal(ctx context.Context, path string) error { @@ -42,4 +48,38 @@ func (w *worker) StorageAddLocal(ctx context.Context, path string) error { return nil } +func (w *worker) SetEnabled(ctx context.Context, enabled bool) error { + disabled := int64(1) + if enabled { + disabled = 0 + } + atomic.StoreInt64(&w.disabled, disabled) + return nil +} + +func (w *worker) Enabled(ctx context.Context) (bool, error) { + return atomic.LoadInt64(&w.disabled) == 0, nil +} + +func (w *worker) WaitQuiet(ctx context.Context) error { + w.LocalWorker.WaitQuiet() // uses WaitGroup under the hood so no ctx :/ + return nil +} + +func (w *worker) ProcessSession(ctx context.Context) (uuid.UUID, error) { + return w.LocalWorker.Session(ctx) +} + +func (w *worker) Session(ctx context.Context) (uuid.UUID, error) { + if atomic.LoadInt64(&w.disabled) == 1 { + return uuid.UUID{}, xerrors.Errorf("worker disabled") + } + + return w.LocalWorker.Session(ctx) +} + +func (w *worker) Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) { + return build.OpenRPCDiscoverJSON_Worker(), nil +} + var _ storiface.WorkerCalls = &worker{} diff --git a/cmd/lotus-seal-worker/storage.go b/cmd/lotus-seal-worker/storage.go index 39cd3ad5afb..be662a6c36b 100644 --- a/cmd/lotus-seal-worker/storage.go +++ b/cmd/lotus-seal-worker/storage.go @@ -6,6 +6,7 @@ import ( "os" "path/filepath" + "github.com/docker/go-units" "github.com/google/uuid" "github.com/mitchellh/go-homedir" "github.com/urfave/cli/v2" @@ -46,6 +47,10 @@ var storageAttachCmd = &cli.Command{ Name: "store", Usage: "(for init) use path for long-term storage", }, + &cli.StringFlag{ + Name: "max-storage", + Usage: "(for init) limit storage space for sectors (expensive for very large paths!)", + }, }, Action: func(cctx *cli.Context) error { nodeApi, closer, err := lcli.GetWorkerAPI(cctx) @@ -79,15 +84,24 @@ var storageAttachCmd = &cli.Command{ return err } + var maxStor int64 + if cctx.IsSet("max-storage") { + maxStor, err = units.RAMInBytes(cctx.String("max-storage")) + if err != nil { + return xerrors.Errorf("parsing max-storage: %w", err) + } + } + cfg := &stores.LocalStorageMeta{ - ID: stores.ID(uuid.New().String()), - Weight: cctx.Uint64("weight"), - CanSeal: cctx.Bool("seal"), - CanStore: cctx.Bool("store"), + ID: stores.ID(uuid.New().String()), + Weight: cctx.Uint64("weight"), + CanSeal: cctx.Bool("seal"), + CanStore: cctx.Bool("store"), + MaxStorage: uint64(maxStor), } if !(cfg.CanStore || cfg.CanSeal) { - return xerrors.Errorf("must specify at least one of --store of --seal") + return xerrors.Errorf("must specify at least one of --store or --seal") } b, err := json.MarshalIndent(cfg, "", " ") diff --git a/cmd/lotus-seal-worker/tasks.go b/cmd/lotus-seal-worker/tasks.go new file mode 100644 index 00000000000..02e5d6cfd8e --- /dev/null +++ b/cmd/lotus-seal-worker/tasks.go @@ -0,0 +1,82 @@ +package main + +import ( + "context" + "strings" + + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/api" + lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" +) + +var tasksCmd = &cli.Command{ + Name: "tasks", + Usage: "Manage task processing", + Subcommands: []*cli.Command{ + tasksEnableCmd, + tasksDisableCmd, + }, +} + +var allowSetting = map[sealtasks.TaskType]struct{}{ + sealtasks.TTAddPiece: {}, + sealtasks.TTPreCommit1: {}, + sealtasks.TTPreCommit2: {}, + sealtasks.TTCommit2: {}, + sealtasks.TTUnseal: {}, +} + +var settableStr = func() string { + var s []string + for _, tt := range ttList(allowSetting) { + s = append(s, tt.Short()) + } + return strings.Join(s, "|") +}() + +var tasksEnableCmd = &cli.Command{ + Name: "enable", + Usage: "Enable a task type", + ArgsUsage: "[" + settableStr + "]", + Action: taskAction(api.Worker.TaskEnable), +} + +var tasksDisableCmd = &cli.Command{ + Name: "disable", + Usage: "Disable a task type", + ArgsUsage: "[" + settableStr + "]", + Action: taskAction(api.Worker.TaskDisable), +} + +func taskAction(tf func(a api.Worker, ctx context.Context, tt sealtasks.TaskType) error) func(cctx *cli.Context) error { + return func(cctx *cli.Context) error { + if cctx.NArg() != 1 { + return xerrors.Errorf("expected 1 argument") + } + + var tt sealtasks.TaskType + for taskType := range allowSetting { + if taskType.Short() == cctx.Args().First() { + tt = taskType + break + } + } + + if tt == "" { + return xerrors.Errorf("unknown task type '%s'", cctx.Args().First()) + } + + api, closer, err := lcli.GetWorkerAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := lcli.ReqContext(cctx) + + return tf(api, ctx, tt) + } +} diff --git a/cmd/lotus-seed/genesis.go b/cmd/lotus-seed/genesis.go index bbaea6969a9..a27cc0a2f7c 100644 --- a/cmd/lotus-seed/genesis.go +++ b/cmd/lotus-seed/genesis.go @@ -9,6 +9,13 @@ import ( "strconv" "strings" + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/lotus/journal" + "github.com/filecoin-project/lotus/node/modules/testing" "github.com/google/uuid" "github.com/mitchellh/go-homedir" "github.com/urfave/cli/v2" @@ -32,6 +39,10 @@ var genesisCmd = &cli.Command{ genesisNewCmd, genesisAddMinerCmd, genesisAddMsigsCmd, + genesisSetVRKCmd, + genesisSetRemainderCmd, + genesisSetActorVersionCmd, + genesisCarCmd, }, } @@ -48,6 +59,7 @@ var genesisNewCmd = &cli.Command{ return xerrors.New("seed genesis new [genesis.json]") } out := genesis.Template{ + NetworkVersion: build.NewestNetworkVersion, Accounts: []genesis.Actor{}, Miners: []genesis.Miner{}, VerifregRootKey: gen.DefaultVerifregRootkeyActor, @@ -302,3 +314,267 @@ func parseMultisigCsv(csvf string) ([]GenAccountEntry, error) { return entries, nil } + +var genesisSetVRKCmd = &cli.Command{ + Name: "set-vrk", + Usage: "Set the verified registry's root key", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "multisig", + Usage: "CSV file to parse the multisig that will be set as the root key", + }, + &cli.StringFlag{ + Name: "account", + Usage: "pubkey address that will be set as the root key (must NOT be declared anywhere else, since it must be given ID 80)", + }, + }, + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() != 1 { + return fmt.Errorf("must specify template file") + } + + genf, err := homedir.Expand(cctx.Args().First()) + if err != nil { + return err + } + + var template genesis.Template + b, err := ioutil.ReadFile(genf) + if err != nil { + return xerrors.Errorf("read genesis template: %w", err) + } + + if err := json.Unmarshal(b, &template); err != nil { + return xerrors.Errorf("unmarshal genesis template: %w", err) + } + + if cctx.IsSet("account") { + addr, err := address.NewFromString(cctx.String("account")) + if err != nil { + return err + } + + am := genesis.AccountMeta{Owner: addr} + + template.VerifregRootKey = genesis.Actor{ + Type: genesis.TAccount, + Balance: big.Zero(), + Meta: am.ActorMeta(), + } + } else if cctx.IsSet("multisig") { + csvf, err := homedir.Expand(cctx.String("multisig")) + if err != nil { + return err + } + + entries, err := parseMultisigCsv(csvf) + if err != nil { + return xerrors.Errorf("parsing multisig csv file: %w", err) + } + + if len(entries) == 0 { + return xerrors.Errorf("no msig entries in csv file: %w", err) + } + + e := entries[0] + if len(e.Addresses) != e.N { + return fmt.Errorf("entry had mismatch between 'N' and number of addresses") + } + + msig := &genesis.MultisigMeta{ + Signers: e.Addresses, + Threshold: e.M, + VestingDuration: monthsToBlocks(e.VestingMonths), + VestingStart: 0, + } + + act := genesis.Actor{ + Type: genesis.TMultisig, + Balance: abi.TokenAmount(e.Amount), + Meta: msig.ActorMeta(), + } + + template.VerifregRootKey = act + } else { + return xerrors.Errorf("must include either --account or --multisig flag") + } + + b, err = json.MarshalIndent(&template, "", " ") + if err != nil { + return err + } + + if err := ioutil.WriteFile(genf, b, 0644); err != nil { + return err + } + return nil + }, +} + +var genesisSetRemainderCmd = &cli.Command{ + Name: "set-remainder", + Usage: "Set the remainder actor", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "multisig", + Usage: "CSV file to parse the multisig that will be set as the remainder actor", + }, + &cli.StringFlag{ + Name: "account", + Usage: "pubkey address that will be set as the remainder key (must NOT be declared anywhere else, since it must be given ID 90)", + }, + }, + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() != 1 { + return fmt.Errorf("must specify template file") + } + + genf, err := homedir.Expand(cctx.Args().First()) + if err != nil { + return err + } + + var template genesis.Template + b, err := ioutil.ReadFile(genf) + if err != nil { + return xerrors.Errorf("read genesis template: %w", err) + } + + if err := json.Unmarshal(b, &template); err != nil { + return xerrors.Errorf("unmarshal genesis template: %w", err) + } + + if cctx.IsSet("account") { + addr, err := address.NewFromString(cctx.String("account")) + if err != nil { + return err + } + + am := genesis.AccountMeta{Owner: addr} + + template.RemainderAccount = genesis.Actor{ + Type: genesis.TAccount, + Balance: big.Zero(), + Meta: am.ActorMeta(), + } + } else if cctx.IsSet("multisig") { + csvf, err := homedir.Expand(cctx.String("multisig")) + if err != nil { + return err + } + + entries, err := parseMultisigCsv(csvf) + if err != nil { + return xerrors.Errorf("parsing multisig csv file: %w", err) + } + + if len(entries) == 0 { + return xerrors.Errorf("no msig entries in csv file: %w", err) + } + + e := entries[0] + if len(e.Addresses) != e.N { + return fmt.Errorf("entry had mismatch between 'N' and number of addresses") + } + + msig := &genesis.MultisigMeta{ + Signers: e.Addresses, + Threshold: e.M, + VestingDuration: monthsToBlocks(e.VestingMonths), + VestingStart: 0, + } + + act := genesis.Actor{ + Type: genesis.TMultisig, + Balance: abi.TokenAmount(e.Amount), + Meta: msig.ActorMeta(), + } + + template.RemainderAccount = act + } else { + return xerrors.Errorf("must include either --account or --multisig flag") + } + + b, err = json.MarshalIndent(&template, "", " ") + if err != nil { + return err + } + + if err := ioutil.WriteFile(genf, b, 0644); err != nil { + return err + } + return nil + }, +} + +var genesisSetActorVersionCmd = &cli.Command{ + Name: "set-network-version", + Usage: "Set the version that this network will start from", + ArgsUsage: " ", + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() != 2 { + return fmt.Errorf("must specify genesis file and network version (e.g. '0'") + } + + genf, err := homedir.Expand(cctx.Args().First()) + if err != nil { + return err + } + + var template genesis.Template + b, err := ioutil.ReadFile(genf) + if err != nil { + return xerrors.Errorf("read genesis template: %w", err) + } + + if err := json.Unmarshal(b, &template); err != nil { + return xerrors.Errorf("unmarshal genesis template: %w", err) + } + + nv, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64) + if err != nil { + return xerrors.Errorf("parsing network version: %w", err) + } + + if nv > uint64(build.NewestNetworkVersion) { + return xerrors.Errorf("invalid network version: %d", nv) + } + + template.NetworkVersion = network.Version(nv) + + b, err = json.MarshalIndent(&template, "", " ") + if err != nil { + return err + } + + if err := ioutil.WriteFile(genf, b, 0644); err != nil { + return err + } + return nil + }, +} + +var genesisCarCmd = &cli.Command{ + Name: "car", + Description: "write genesis car file", + ArgsUsage: "genesis template `FILE`", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "out", + Aliases: []string{"o"}, + Value: "genesis.car", + Usage: "write output to `FILE`", + }, + }, + Action: func(c *cli.Context) error { + if c.Args().Len() != 1 { + return xerrors.Errorf("Please specify a genesis template. (i.e, the one created with `genesis new`)") + } + ofile := c.String("out") + jrnl := journal.NilJournal() + bstor := blockstore.WrapIDStore(blockstore.NewMemorySync()) + sbldr := vm.Syscalls(ffiwrapper.ProofVerifier) + _, err := testing.MakeGenesis(ofile, c.Args().First())(bstor, sbldr, jrnl)() + return err + }, +} diff --git a/cmd/lotus-seed/main.go b/cmd/lotus-seed/main.go index d365f6493e1..42f4b74e4d9 100644 --- a/cmd/lotus-seed/main.go +++ b/cmd/lotus-seed/main.go @@ -7,9 +7,9 @@ import ( "io/ioutil" "os" - "github.com/docker/go-units" - "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/go-state-types/network" + "github.com/docker/go-units" logging "github.com/ipfs/go-log/v2" "github.com/mitchellh/go-homedir" "github.com/urfave/cli/v2" @@ -19,6 +19,7 @@ import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/cmd/lotus-seed/seed" "github.com/filecoin-project/lotus/genesis" @@ -93,6 +94,10 @@ var preSealCmd = &cli.Command{ Name: "fake-sectors", Value: false, }, + &cli.IntFlag{ + Name: "network-version", + Usage: "specify network version", + }, }, Action: func(c *cli.Context) error { sdir := c.String("sector-dir") @@ -128,12 +133,17 @@ var preSealCmd = &cli.Command{ } sectorSize := abi.SectorSize(sectorSizeInt) - rp, err := ffiwrapper.SealProofTypeFromSectorSize(sectorSize) + nv := build.NewestNetworkVersion + if c.IsSet("network-version") { + nv = network.Version(c.Uint64("network-version")) + } + + spt, err := miner.SealProofTypeFromSectorSize(sectorSize, nv) if err != nil { return err } - gm, key, err := seed.PreSeal(maddr, rp, abi.SectorNumber(c.Uint64("sector-offset")), c.Int("num-sectors"), sbroot, []byte(c.String("ticket-preimage")), k, c.Bool("fake-sectors")) + gm, key, err := seed.PreSeal(maddr, spt, abi.SectorNumber(c.Uint64("sector-offset")), c.Int("num-sectors"), sbroot, []byte(c.String("ticket-preimage")), k, c.Bool("fake-sectors")) if err != nil { return err } diff --git a/cmd/lotus-seed/seed/seed.go b/cmd/lotus-seed/seed/seed.go index ab8e5a52a2b..48183690db7 100644 --- a/cmd/lotus-seed/seed/seed.go +++ b/cmd/lotus-seed/seed/seed.go @@ -19,9 +19,10 @@ import ( ffi "github.com/filecoin-project/filecoin-ffi" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-commp-utils/zerocomm" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/lotus/extern/sector-storage/zerocomm" + "github.com/filecoin-project/specs-storage/storage" market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" @@ -42,10 +43,6 @@ func PreSeal(maddr address.Address, spt abi.RegisteredSealProof, offset abi.Sect return nil, nil, err } - cfg := &ffiwrapper.Config{ - SealProofType: spt, - } - if err := os.MkdirAll(sbroot, 0775); err != nil { //nolint:gosec return nil, nil, err } @@ -56,7 +53,7 @@ func PreSeal(maddr address.Address, spt abi.RegisteredSealProof, offset abi.Sect Root: sbroot, } - sb, err := ffiwrapper.New(sbfs, cfg) + sb, err := ffiwrapper.New(sbfs) if err != nil { return nil, nil, err } @@ -69,16 +66,17 @@ func PreSeal(maddr address.Address, spt abi.RegisteredSealProof, offset abi.Sect var sealedSectors []*genesis.PreSeal for i := 0; i < sectors; i++ { sid := abi.SectorID{Miner: abi.ActorID(mid), Number: next} + ref := storage.SectorRef{ID: sid, ProofType: spt} next++ var preseal *genesis.PreSeal if !fakeSectors { - preseal, err = presealSector(sb, sbfs, sid, spt, ssize, preimage) + preseal, err = presealSector(sb, sbfs, ref, ssize, preimage) if err != nil { return nil, nil, err } } else { - preseal, err = presealSectorFake(sbfs, sid, spt, ssize) + preseal, err = presealSectorFake(sbfs, ref, ssize) if err != nil { return nil, nil, err } @@ -148,7 +146,7 @@ func PreSeal(maddr address.Address, spt abi.RegisteredSealProof, offset abi.Sect return miner, &minerAddr.KeyInfo, nil } -func presealSector(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, sid abi.SectorID, spt abi.RegisteredSealProof, ssize abi.SectorSize, preimage []byte) (*genesis.PreSeal, error) { +func presealSector(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, sid storage.SectorRef, ssize abi.SectorSize, preimage []byte) (*genesis.PreSeal, error) { pi, err := sb.AddPiece(context.TODO(), sid, nil, abi.PaddedPieceSize(ssize).Unpadded(), rand.Reader) if err != nil { return nil, err @@ -182,12 +180,12 @@ func presealSector(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, sid abi.Sector return &genesis.PreSeal{ CommR: cids.Sealed, CommD: cids.Unsealed, - SectorID: sid.Number, - ProofType: spt, + SectorID: sid.ID.Number, + ProofType: sid.ProofType, }, nil } -func presealSectorFake(sbfs *basicfs.Provider, sid abi.SectorID, spt abi.RegisteredSealProof, ssize abi.SectorSize) (*genesis.PreSeal, error) { +func presealSectorFake(sbfs *basicfs.Provider, sid storage.SectorRef, ssize abi.SectorSize) (*genesis.PreSeal, error) { paths, done, err := sbfs.AcquireSector(context.TODO(), sid, 0, storiface.FTSealed|storiface.FTCache, storiface.PathSealing) if err != nil { return nil, xerrors.Errorf("acquire unsealed sector: %w", err) @@ -198,7 +196,7 @@ func presealSectorFake(sbfs *basicfs.Provider, sid abi.SectorID, spt abi.Registe return nil, xerrors.Errorf("mkdir cache: %w", err) } - commr, err := ffi.FauxRep(spt, paths.Cache, paths.Sealed) + commr, err := ffi.FauxRep(sid.ProofType, paths.Cache, paths.Sealed) if err != nil { return nil, xerrors.Errorf("fauxrep: %w", err) } @@ -206,13 +204,13 @@ func presealSectorFake(sbfs *basicfs.Provider, sid abi.SectorID, spt abi.Registe return &genesis.PreSeal{ CommR: commr, CommD: zerocomm.ZeroPieceCommitment(abi.PaddedPieceSize(ssize).Unpadded()), - SectorID: sid.Number, - ProofType: spt, + SectorID: sid.ID.Number, + ProofType: sid.ProofType, }, nil } -func cleanupUnsealed(sbfs *basicfs.Provider, sid abi.SectorID) error { - paths, done, err := sbfs.AcquireSector(context.TODO(), sid, storiface.FTUnsealed, storiface.FTNone, storiface.PathSealing) +func cleanupUnsealed(sbfs *basicfs.Provider, ref storage.SectorRef) error { + paths, done, err := sbfs.AcquireSector(context.TODO(), ref, storiface.FTUnsealed, storiface.FTNone, storiface.PathSealing) if err != nil { return err } diff --git a/cmd/lotus-shed/actor.go b/cmd/lotus-shed/actor.go new file mode 100644 index 00000000000..b78f283497f --- /dev/null +++ b/cmd/lotus-shed/actor.go @@ -0,0 +1,740 @@ +package main + +import ( + "fmt" + "os" + + "github.com/fatih/color" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/api" + + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" + lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/lib/tablewriter" +) + +var actorCmd = &cli.Command{ + Name: "actor", + Usage: "manipulate the miner actor", + Subcommands: []*cli.Command{ + actorWithdrawCmd, + actorSetOwnerCmd, + actorControl, + actorProposeChangeWorker, + actorConfirmChangeWorker, + }, +} + +var actorWithdrawCmd = &cli.Command{ + Name: "withdraw", + Usage: "withdraw available balance", + ArgsUsage: "[amount (FIL)]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "actor", + Usage: "specify the address of miner actor", + }, + }, + Action: func(cctx *cli.Context) error { + var maddr address.Address + if act := cctx.String("actor"); act != "" { + var err error + maddr, err = address.NewFromString(act) + if err != nil { + return fmt.Errorf("parsing address %s: %w", act, err) + } + } + + nodeAPI, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + if maddr.Empty() { + minerAPI, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + + maddr, err = minerAPI.ActorAddress(ctx) + if err != nil { + return err + } + } + + mi, err := nodeAPI.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + available, err := nodeAPI.StateMinerAvailableBalance(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + amount := available + if cctx.Args().Present() { + f, err := types.ParseFIL(cctx.Args().First()) + if err != nil { + return xerrors.Errorf("parsing 'amount' argument: %w", err) + } + + amount = abi.TokenAmount(f) + + if amount.GreaterThan(available) { + return xerrors.Errorf("can't withdraw more funds than available; requested: %s; available: %s", amount, available) + } + } + + params, err := actors.SerializeParams(&miner2.WithdrawBalanceParams{ + AmountRequested: amount, // Default to attempting to withdraw all the extra funds in the miner actor + }) + if err != nil { + return err + } + + smsg, err := nodeAPI.MpoolPushMessage(ctx, &types.Message{ + To: maddr, + From: mi.Owner, + Value: types.NewInt(0), + Method: miner.Methods.WithdrawBalance, + Params: params, + }, &api.MessageSendSpec{MaxFee: abi.TokenAmount(types.MustParseFIL("0.1"))}) + if err != nil { + return err + } + + fmt.Printf("Requested rewards withdrawal in message %s\n", smsg.Cid()) + + return nil + }, +} + +var actorSetOwnerCmd = &cli.Command{ + Name: "set-owner", + Usage: "Set owner address (this command should be invoked twice, first with the old owner as the senderAddress, and then with the new owner)", + ArgsUsage: "[newOwnerAddress senderAddress]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "actor", + Usage: "specify the address of miner actor", + }, + &cli.BoolFlag{ + Name: "really-do-it", + Usage: "Actually send transaction performing the action", + Value: false, + }, + }, + Action: func(cctx *cli.Context) error { + if !cctx.Bool("really-do-it") { + fmt.Println("Pass --really-do-it to actually execute this action") + return nil + } + + if cctx.NArg() != 2 { + return fmt.Errorf("must pass new owner address and sender address") + } + + var maddr address.Address + if act := cctx.String("actor"); act != "" { + var err error + maddr, err = address.NewFromString(act) + if err != nil { + return fmt.Errorf("parsing address %s: %w", act, err) + } + } + + nodeAPI, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + na, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return err + } + + newAddrId, err := nodeAPI.StateLookupID(ctx, na, types.EmptyTSK) + if err != nil { + return err + } + + fa, err := address.NewFromString(cctx.Args().Get(1)) + if err != nil { + return err + } + + fromAddrId, err := nodeAPI.StateLookupID(ctx, fa, types.EmptyTSK) + if err != nil { + return err + } + + if maddr.Empty() { + minerAPI, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + + maddr, err = minerAPI.ActorAddress(ctx) + if err != nil { + return err + } + } + + mi, err := nodeAPI.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + if fromAddrId != mi.Owner && fromAddrId != newAddrId { + return xerrors.New("from address must either be the old owner or the new owner") + } + + sp, err := actors.SerializeParams(&newAddrId) + if err != nil { + return xerrors.Errorf("serializing params: %w", err) + } + + smsg, err := nodeAPI.MpoolPushMessage(ctx, &types.Message{ + From: fromAddrId, + To: maddr, + Method: miner.Methods.ChangeOwnerAddress, + Value: big.Zero(), + Params: sp, + }, nil) + if err != nil { + return xerrors.Errorf("mpool push: %w", err) + } + + fmt.Println("Message CID:", smsg.Cid()) + + // wait for it to get mined into a block + wait, err := nodeAPI.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) + if err != nil { + return err + } + + // check it executed successfully + if wait.Receipt.ExitCode != 0 { + fmt.Println("owner change failed!") + return err + } + + fmt.Println("message succeeded!") + + return nil + }, +} + +var actorControl = &cli.Command{ + Name: "control", + Usage: "Manage control addresses", + Subcommands: []*cli.Command{ + actorControlList, + actorControlSet, + }, +} + +var actorControlList = &cli.Command{ + Name: "list", + Usage: "Get currently set control addresses", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "actor", + Usage: "specify the address of miner actor", + }, + &cli.BoolFlag{ + Name: "verbose", + }, + &cli.BoolFlag{ + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", + }, + }, + Action: func(cctx *cli.Context) error { + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } + + var maddr address.Address + if act := cctx.String("actor"); act != "" { + var err error + maddr, err = address.NewFromString(act) + if err != nil { + return fmt.Errorf("parsing address %s: %w", act, err) + } + } + + nodeAPI, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + if maddr.Empty() { + minerAPI, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + + maddr, err = minerAPI.ActorAddress(ctx) + if err != nil { + return err + } + } + + mi, err := nodeAPI.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + tw := tablewriter.New( + tablewriter.Col("name"), + tablewriter.Col("ID"), + tablewriter.Col("key"), + tablewriter.Col("balance"), + ) + + printKey := func(name string, a address.Address) { + b, err := nodeAPI.WalletBalance(ctx, a) + if err != nil { + fmt.Printf("%s\t%s: error getting balance: %s\n", name, a, err) + return + } + + k, err := nodeAPI.StateAccountKey(ctx, a, types.EmptyTSK) + if err != nil { + fmt.Printf("%s\t%s: error getting account key: %s\n", name, a, err) + return + } + + kstr := k.String() + if !cctx.Bool("verbose") { + kstr = kstr[:9] + "..." + } + + bstr := types.FIL(b).String() + switch { + case b.LessThan(types.FromFil(10)): + bstr = color.RedString(bstr) + case b.LessThan(types.FromFil(50)): + bstr = color.YellowString(bstr) + default: + bstr = color.GreenString(bstr) + } + + tw.Write(map[string]interface{}{ + "name": name, + "ID": a, + "key": kstr, + "balance": bstr, + }) + } + + printKey("owner", mi.Owner) + printKey("worker", mi.Worker) + for i, ca := range mi.ControlAddresses { + printKey(fmt.Sprintf("control-%d", i), ca) + } + + return tw.Flush(os.Stdout) + }, +} + +var actorControlSet = &cli.Command{ + Name: "set", + Usage: "Set control address(-es)", + ArgsUsage: "[...address]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "actor", + Usage: "specify the address of miner actor", + }, + &cli.BoolFlag{ + Name: "really-do-it", + Usage: "Actually send transaction performing the action", + Value: false, + }, + }, + Action: func(cctx *cli.Context) error { + if !cctx.Bool("really-do-it") { + fmt.Println("Pass --really-do-it to actually execute this action") + return nil + } + + var maddr address.Address + if act := cctx.String("actor"); act != "" { + var err error + maddr, err = address.NewFromString(act) + if err != nil { + return fmt.Errorf("parsing address %s: %w", act, err) + } + } + + nodeAPI, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + if maddr.Empty() { + minerAPI, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + + maddr, err = minerAPI.ActorAddress(ctx) + if err != nil { + return err + } + } + + mi, err := nodeAPI.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + del := map[address.Address]struct{}{} + existing := map[address.Address]struct{}{} + for _, controlAddress := range mi.ControlAddresses { + ka, err := nodeAPI.StateAccountKey(ctx, controlAddress, types.EmptyTSK) + if err != nil { + return err + } + + del[ka] = struct{}{} + existing[ka] = struct{}{} + } + + var toSet []address.Address + + for i, as := range cctx.Args().Slice() { + a, err := address.NewFromString(as) + if err != nil { + return xerrors.Errorf("parsing address %d: %w", i, err) + } + + ka, err := nodeAPI.StateAccountKey(ctx, a, types.EmptyTSK) + if err != nil { + return err + } + + // make sure the address exists on chain + _, err = nodeAPI.StateLookupID(ctx, ka, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("looking up %s: %w", ka, err) + } + + delete(del, ka) + toSet = append(toSet, ka) + } + + for a := range del { + fmt.Println("Remove", a) + } + for _, a := range toSet { + if _, exists := existing[a]; !exists { + fmt.Println("Add", a) + } + } + + cwp := &miner2.ChangeWorkerAddressParams{ + NewWorker: mi.Worker, + NewControlAddrs: toSet, + } + + sp, err := actors.SerializeParams(cwp) + if err != nil { + return xerrors.Errorf("serializing params: %w", err) + } + + smsg, err := nodeAPI.MpoolPushMessage(ctx, &types.Message{ + From: mi.Owner, + To: maddr, + Method: miner.Methods.ChangeWorkerAddress, + + Value: big.Zero(), + Params: sp, + }, nil) + if err != nil { + return xerrors.Errorf("mpool push: %w", err) + } + + fmt.Println("Message CID:", smsg.Cid()) + + return nil + }, +} + +var actorProposeChangeWorker = &cli.Command{ + Name: "propose-change-worker", + Usage: "Propose a worker address change", + ArgsUsage: "[address]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "actor", + Usage: "specify the address of miner actor", + }, + &cli.BoolFlag{ + Name: "really-do-it", + Usage: "Actually send transaction performing the action", + Value: false, + }, + }, + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return fmt.Errorf("must pass address of new worker address") + } + + if !cctx.Bool("really-do-it") { + fmt.Fprintln(cctx.App.Writer, "Pass --really-do-it to actually execute this action") + return nil + } + + var maddr address.Address + if act := cctx.String("actor"); act != "" { + var err error + maddr, err = address.NewFromString(act) + if err != nil { + return fmt.Errorf("parsing address %s: %w", act, err) + } + } + + nodeAPI, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + na, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return err + } + + newAddr, err := nodeAPI.StateLookupID(ctx, na, types.EmptyTSK) + if err != nil { + return err + } + + if maddr.Empty() { + minerAPI, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + + maddr, err = minerAPI.ActorAddress(ctx) + if err != nil { + return err + } + } + + mi, err := nodeAPI.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + if mi.NewWorker.Empty() { + if mi.Worker == newAddr { + return fmt.Errorf("worker address already set to %s", na) + } + } else { + if mi.NewWorker == newAddr { + return fmt.Errorf("change to worker address %s already pending", na) + } + } + + cwp := &miner2.ChangeWorkerAddressParams{ + NewWorker: newAddr, + NewControlAddrs: mi.ControlAddresses, + } + + sp, err := actors.SerializeParams(cwp) + if err != nil { + return xerrors.Errorf("serializing params: %w", err) + } + + smsg, err := nodeAPI.MpoolPushMessage(ctx, &types.Message{ + From: mi.Owner, + To: maddr, + Method: miner.Methods.ChangeWorkerAddress, + Value: big.Zero(), + Params: sp, + }, nil) + if err != nil { + return xerrors.Errorf("mpool push: %w", err) + } + + fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", smsg.Cid()) + + // wait for it to get mined into a block + wait, err := nodeAPI.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) + if err != nil { + return err + } + + // check it executed successfully + if wait.Receipt.ExitCode != 0 { + fmt.Fprintln(cctx.App.Writer, "Propose worker change failed!") + return err + } + + mi, err = nodeAPI.StateMinerInfo(ctx, maddr, wait.TipSet) + if err != nil { + return err + } + if mi.NewWorker != newAddr { + return fmt.Errorf("Proposed worker address change not reflected on chain: expected '%s', found '%s'", na, mi.NewWorker) + } + + fmt.Fprintf(cctx.App.Writer, "Worker key change to %s successfully proposed.\n", na) + fmt.Fprintf(cctx.App.Writer, "Call 'confirm-change-worker' at or after height %d to complete.\n", mi.WorkerChangeEpoch) + + return nil + }, +} + +var actorConfirmChangeWorker = &cli.Command{ + Name: "confirm-change-worker", + Usage: "Confirm a worker address change", + ArgsUsage: "[address]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "actor", + Usage: "specify the address of miner actor", + }, + &cli.BoolFlag{ + Name: "really-do-it", + Usage: "Actually send transaction performing the action", + Value: false, + }, + }, + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return fmt.Errorf("must pass address of new worker address") + } + + if !cctx.Bool("really-do-it") { + fmt.Fprintln(cctx.App.Writer, "Pass --really-do-it to actually execute this action") + return nil + } + + var maddr address.Address + if act := cctx.String("actor"); act != "" { + var err error + maddr, err = address.NewFromString(act) + if err != nil { + return fmt.Errorf("parsing address %s: %w", act, err) + } + } + + nodeAPI, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + na, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return err + } + + newAddr, err := nodeAPI.StateLookupID(ctx, na, types.EmptyTSK) + if err != nil { + return err + } + + if maddr.Empty() { + minerAPI, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + + maddr, err = minerAPI.ActorAddress(ctx) + if err != nil { + return err + } + } + + mi, err := nodeAPI.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + if mi.NewWorker.Empty() { + return xerrors.Errorf("no worker key change proposed") + } else if mi.NewWorker != newAddr { + return xerrors.Errorf("worker key %s does not match current worker key proposal %s", newAddr, mi.NewWorker) + } + + if head, err := nodeAPI.ChainHead(ctx); err != nil { + return xerrors.Errorf("failed to get the chain head: %w", err) + } else if head.Height() < mi.WorkerChangeEpoch { + return xerrors.Errorf("worker key change cannot be confirmed until %d, current height is %d", mi.WorkerChangeEpoch, head.Height()) + } + + smsg, err := nodeAPI.MpoolPushMessage(ctx, &types.Message{ + From: mi.Owner, + To: maddr, + Method: miner.Methods.ConfirmUpdateWorkerKey, + Value: big.Zero(), + }, nil) + if err != nil { + return xerrors.Errorf("mpool push: %w", err) + } + + fmt.Fprintln(cctx.App.Writer, "Confirm Message CID:", smsg.Cid()) + + // wait for it to get mined into a block + wait, err := nodeAPI.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) + if err != nil { + return err + } + + // check it executed successfully + if wait.Receipt.ExitCode != 0 { + fmt.Fprintln(cctx.App.Writer, "Worker change failed!") + return err + } + + mi, err = nodeAPI.StateMinerInfo(ctx, maddr, wait.TipSet) + if err != nil { + return err + } + if mi.Worker != newAddr { + return fmt.Errorf("Confirmed worker address change not reflected on chain: expected '%s', found '%s'", newAddr, mi.Worker) + } + + return nil + }, +} diff --git a/cmd/lotus-shed/balances.go b/cmd/lotus-shed/balances.go index b12c069f573..87530c666ee 100644 --- a/cmd/lotus-shed/balances.go +++ b/cmd/lotus-shed/balances.go @@ -2,14 +2,25 @@ package main import ( "context" + "encoding/csv" + "encoding/json" "fmt" + "io" + "os" + "runtime" "strconv" + "strings" + "sync" + "time" + + "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/gen/genesis" _init "github.com/filecoin-project/lotus/chain/actors/builtin/init" "github.com/docker/go-units" + "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/multisig" "github.com/filecoin-project/lotus/chain/actors/builtin/power" @@ -24,6 +35,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/state" @@ -33,7 +45,6 @@ import ( "github.com/filecoin-project/lotus/chain/vm" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" - "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/node/repo" ) @@ -58,8 +69,321 @@ var auditsCmd = &cli.Command{ Description: "a collection of utilities for auditing the filecoin chain", Subcommands: []*cli.Command{ chainBalanceCmd, + chainBalanceSanityCheckCmd, chainBalanceStateCmd, chainPledgeCmd, + fillBalancesCmd, + duplicatedMessagesCmd, + }, +} + +var duplicatedMessagesCmd = &cli.Command{ + Name: "duplicate-messages", + Usage: "Check for duplicate messages included in a tipset.", + UsageText: `Check for duplicate messages included in a tipset. + +Due to Filecoin's expected consensus, a tipset may include the same message multiple times in +different blocks. The message will only be executed once. + +This command will find such duplicate messages and print them to standard out as newline-delimited +JSON. Status messages in the form of "H: $HEIGHT ($PROGRESS%)" will be printed to standard error for +every day of chain processed. +`, + Flags: []cli.Flag{ + &cli.IntFlag{ + Name: "parallel", + Usage: "the number of parallel threads for block processing", + DefaultText: "half the number of cores", + }, + &cli.IntFlag{ + Name: "start", + Usage: "the first epoch to check", + DefaultText: "genesis", + }, + &cli.IntFlag{ + Name: "end", + Usage: "the last epoch to check", + DefaultText: "the current head", + }, + &cli.IntSliceFlag{ + Name: "method", + Usage: "filter results by method number", + DefaultText: "all methods", + }, + &cli.StringSliceFlag{ + Name: "include-to", + Usage: "include only messages to the given address (does not perform address resolution)", + DefaultText: "all recipients", + }, + &cli.StringSliceFlag{ + Name: "include-from", + Usage: "include only messages from the given address (does not perform address resolution)", + DefaultText: "all senders", + }, + &cli.StringSliceFlag{ + Name: "exclude-to", + Usage: "exclude messages to the given address (does not perform address resolution)", + }, + &cli.StringSliceFlag{ + Name: "exclude-from", + Usage: "exclude messages from the given address (does not perform address resolution)", + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + + defer closer() + ctx := lcli.ReqContext(cctx) + + var head *types.TipSet + if cctx.IsSet("end") { + epoch := abi.ChainEpoch(cctx.Int("end")) + head, err = api.ChainGetTipSetByHeight(ctx, epoch, types.EmptyTSK) + } else { + head, err = api.ChainHead(ctx) + } + if err != nil { + return err + } + + var printLk sync.Mutex + + threads := runtime.NumCPU() / 2 + if cctx.IsSet("parallel") { + threads = cctx.Int("int") + if threads <= 0 { + return fmt.Errorf("parallelism needs to be at least 1") + } + } else if threads == 0 { + threads = 1 // if we have one core, but who are we kidding... + } + + throttle := make(chan struct{}, threads) + + methods := map[abi.MethodNum]bool{} + for _, m := range cctx.IntSlice("method") { + if m < 0 { + return fmt.Errorf("expected method numbers to be non-negative") + } + methods[abi.MethodNum(m)] = true + } + + addressSet := func(flag string) (map[address.Address]bool, error) { + if !cctx.IsSet(flag) { + return nil, nil + } + addrs := cctx.StringSlice(flag) + set := make(map[address.Address]bool, len(addrs)) + for _, addrStr := range addrs { + addr, err := address.NewFromString(addrStr) + if err != nil { + return nil, fmt.Errorf("failed to parse address %s: %w", addrStr, err) + } + set[addr] = true + } + return set, nil + } + + onlyFrom, err := addressSet("include-from") + if err != nil { + return err + } + onlyTo, err := addressSet("include-to") + if err != nil { + return err + } + excludeFrom, err := addressSet("exclude-from") + if err != nil { + return err + } + excludeTo, err := addressSet("exclude-to") + if err != nil { + return err + } + + target := abi.ChainEpoch(cctx.Int("start")) + if target < 0 || target > head.Height() { + return fmt.Errorf("start height must be greater than 0 and less than the end height") + } + totalEpochs := head.Height() - target + + for target <= head.Height() { + select { + case throttle <- struct{}{}: + case <-ctx.Done(): + return ctx.Err() + } + + go func(ts *types.TipSet) { + defer func() { + <-throttle + }() + + type addrNonce struct { + s address.Address + n uint64 + } + anonce := func(m *types.Message) addrNonce { + return addrNonce{ + s: m.From, + n: m.Nonce, + } + } + + msgs := map[addrNonce]map[cid.Cid]*types.Message{} + + processMessage := func(c cid.Cid, m *types.Message) { + // Filter + if len(methods) > 0 && !methods[m.Method] { + return + } + if len(onlyFrom) > 0 && !onlyFrom[m.From] { + return + } + if len(onlyTo) > 0 && !onlyTo[m.To] { + return + } + if excludeFrom[m.From] || excludeTo[m.To] { + return + } + + // Record + msgSet, ok := msgs[anonce(m)] + if !ok { + msgSet = make(map[cid.Cid]*types.Message, 1) + msgs[anonce(m)] = msgSet + } + msgSet[c] = m + } + + encoder := json.NewEncoder(os.Stdout) + + for _, bh := range ts.Blocks() { + bms, err := api.ChainGetBlockMessages(ctx, bh.Cid()) + if err != nil { + fmt.Fprintln(os.Stderr, "ERROR: ", err) + return + } + + for i, m := range bms.BlsMessages { + processMessage(bms.Cids[i], m) + } + + for i, m := range bms.SecpkMessages { + processMessage(bms.Cids[len(bms.BlsMessages)+i], &m.Message) + } + } + for _, ms := range msgs { + if len(ms) == 1 { + continue + } + type Msg struct { + Cid string + Value string + Method uint64 + } + grouped := map[string][]Msg{} + for c, m := range ms { + addr := m.To.String() + grouped[addr] = append(grouped[addr], Msg{ + Cid: c.String(), + Value: types.FIL(m.Value).String(), + Method: uint64(m.Method), + }) + } + printLk.Lock() + err := encoder.Encode(grouped) + if err != nil { + fmt.Fprintln(os.Stderr, "ERROR: ", err) + } + printLk.Unlock() + } + }(head) + + if head.Parents().IsEmpty() { + break + } + + head, err = api.ChainGetTipSet(ctx, head.Parents()) + if err != nil { + return err + } + + if head.Height()%2880 == 0 { + printLk.Lock() + fmt.Fprintf(os.Stderr, "H: %s (%d%%)\n", head.Height(), (100*(head.Height()-target))/totalEpochs) + printLk.Unlock() + } + } + + for i := 0; i < threads; i++ { + select { + case throttle <- struct{}{}: + case <-ctx.Done(): + return ctx.Err() + } + + } + + printLk.Lock() + fmt.Fprintf(os.Stderr, "H: %s (100%%)\n", head.Height()) + printLk.Unlock() + + return nil + }, +} + +var chainBalanceSanityCheckCmd = &cli.Command{ + Name: "chain-balance-sanity", + Description: "Confirms that the total balance of every actor in state is still 2 billion", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "tipset", + Usage: "specify tipset to start from", + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + + defer closer() + ctx := lcli.ReqContext(cctx) + + ts, err := lcli.LoadTipSet(ctx, cctx, api) + if err != nil { + return err + } + + tsk := ts.Key() + actors, err := api.StateListActors(ctx, tsk) + if err != nil { + return err + } + + bal := big.Zero() + for _, addr := range actors { + act, err := api.StateGetActor(ctx, addr, tsk) + if err != nil { + return err + } + + bal = big.Add(bal, act.Balance) + } + + attoBase := big.Mul(big.NewInt(int64(build.FilBase)), big.NewInt(int64(build.FilecoinPrecision))) + + if big.Cmp(attoBase, bal) != 0 { + return xerrors.Errorf("sanity check failed (expected %s, actual %s)", attoBase, bal) + } + + fmt.Println("sanity check successful") + + return nil }, } @@ -168,19 +492,26 @@ var chainBalanceStateCmd = &cli.Command{ defer lkrepo.Close() //nolint:errcheck - ds, err := lkrepo.Datastore("/chain") + bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore) if err != nil { - return err + return fmt.Errorf("failed to open blockstore: %w", err) } - mds, err := lkrepo.Datastore("/metadata") + defer func() { + if c, ok := bs.(io.Closer); ok { + if err := c.Close(); err != nil { + log.Warnf("failed to close blockstore: %s", err) + } + } + }() + + mds, err := lkrepo.Datastore(context.Background(), "/metadata") if err != nil { return err } - bs := blockstore.NewBlockstore(ds) - - cs := store.NewChainStore(bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), nil) + cs := store.NewChainStore(bs, bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), nil) + defer cs.Close() //nolint:errcheck cst := cbor.NewCborStore(bs) store := adt.WrapStore(ctx, cst) @@ -382,19 +713,26 @@ var chainPledgeCmd = &cli.Command{ defer lkrepo.Close() //nolint:errcheck - ds, err := lkrepo.Datastore("/chain") + bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore) if err != nil { - return err + return xerrors.Errorf("failed to open blockstore: %w", err) } - mds, err := lkrepo.Datastore("/metadata") + defer func() { + if c, ok := bs.(io.Closer); ok { + if err := c.Close(); err != nil { + log.Warnf("failed to close blockstore: %s", err) + } + } + }() + + mds, err := lkrepo.Datastore(context.Background(), "/metadata") if err != nil { return err } - bs := blockstore.NewBlockstore(ds) - - cs := store.NewChainStore(bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), nil) + cs := store.NewChainStore(bs, bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), nil) + defer cs.Close() //nolint:errcheck cst := cbor.NewCborStore(bs) store := adt.WrapStore(ctx, cst) @@ -471,3 +809,119 @@ var chainPledgeCmd = &cli.Command{ return nil }, } + +const dateFmt = "1/02/06" + +func parseCsv(inp string) ([]time.Time, []address.Address, error) { + fi, err := os.Open(inp) + if err != nil { + return nil, nil, err + } + + r := csv.NewReader(fi) + recs, err := r.ReadAll() + if err != nil { + return nil, nil, err + } + + var addrs []address.Address + for _, rec := range recs[1:] { + a, err := address.NewFromString(rec[0]) + if err != nil { + return nil, nil, err + } + addrs = append(addrs, a) + } + + var dates []time.Time + for _, d := range recs[0][1:] { + if len(d) == 0 { + continue + } + p := strings.Split(d, " ") + t, err := time.Parse(dateFmt, p[len(p)-1]) + if err != nil { + return nil, nil, err + } + + dates = append(dates, t) + } + + return dates, addrs, nil +} + +func heightForDate(d time.Time, ts *types.TipSet) abi.ChainEpoch { + secs := d.Unix() + gents := ts.Blocks()[0].Timestamp + gents -= uint64(30 * ts.Height()) + return abi.ChainEpoch((secs - int64(gents)) / 30) +} + +var fillBalancesCmd = &cli.Command{ + Name: "fill-balances", + Description: "fill out balances for addresses on dates in given spreadsheet", + Flags: []cli.Flag{}, + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + + defer closer() + ctx := lcli.ReqContext(cctx) + + dates, addrs, err := parseCsv(cctx.Args().First()) + if err != nil { + return err + } + + ts, err := api.ChainHead(ctx) + if err != nil { + return err + } + + var tipsets []*types.TipSet + for _, d := range dates { + h := heightForDate(d, ts) + hts, err := api.ChainGetTipSetByHeight(ctx, h, ts.Key()) + if err != nil { + return err + } + tipsets = append(tipsets, hts) + } + + var balances [][]abi.TokenAmount + for _, a := range addrs { + var b []abi.TokenAmount + for _, hts := range tipsets { + act, err := api.StateGetActor(ctx, a, hts.Key()) + if err != nil { + if !strings.Contains(err.Error(), "actor not found") { + return fmt.Errorf("error for %s at %s: %w", a, hts.Key(), err) + } + b = append(b, types.NewInt(0)) + continue + } + b = append(b, act.Balance) + } + balances = append(balances, b) + } + + var datestrs []string + for _, d := range dates { + datestrs = append(datestrs, "Balance at "+d.Format(dateFmt)) + } + + w := csv.NewWriter(os.Stdout) + w.Write(append([]string{"Wallet Address"}, datestrs...)) // nolint:errcheck + for i := 0; i < len(addrs); i++ { + row := []string{addrs[i].String()} + for _, b := range balances[i] { + row = append(row, types.FIL(b).String()) + } + w.Write(row) // nolint:errcheck + } + w.Flush() + return nil + }, +} diff --git a/cmd/lotus-shed/base64.go b/cmd/lotus-shed/base64.go new file mode 100644 index 00000000000..3f0469ef982 --- /dev/null +++ b/cmd/lotus-shed/base64.go @@ -0,0 +1,75 @@ +package main + +import ( + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/go-address" + + "github.com/urfave/cli/v2" +) + +var base64Cmd = &cli.Command{ + Name: "base64", + Description: "multiformats base64", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "decodeAddr", + Value: false, + Usage: "Decode a base64 addr", + }, + &cli.BoolFlag{ + Name: "decodeBig", + Value: false, + Usage: "Decode a base64 big", + }, + }, + Action: func(cctx *cli.Context) error { + var input io.Reader + + if cctx.Args().Len() == 0 { + input = os.Stdin + } else { + input = strings.NewReader(cctx.Args().First()) + } + + bytes, err := ioutil.ReadAll(input) + if err != nil { + return nil + } + + decoded, err := base64.RawStdEncoding.DecodeString(strings.TrimSpace(string(bytes))) + if err != nil { + return err + } + + if cctx.Bool("decodeAddr") { + addr, err := address.NewFromBytes(decoded) + if err != nil { + return err + } + + fmt.Println(addr) + + return nil + } + + if cctx.Bool("decodeBig") { + var val abi.TokenAmount + err = val.UnmarshalBinary(decoded) + if err != nil { + return err + } + + fmt.Println(val) + } + + return nil + }, +} diff --git a/cmd/lotus-shed/bitfield.go b/cmd/lotus-shed/bitfield.go index 442cbef4846..f0824de4f2a 100644 --- a/cmd/lotus-shed/bitfield.go +++ b/cmd/lotus-shed/bitfield.go @@ -17,6 +17,7 @@ import ( var bitFieldCmd = &cli.Command{ Name: "bitfield", + Usage: "Bitfield analyze tool", Description: "analyze bitfields", Flags: []cli.Flag{ &cli.StringFlag{ @@ -26,53 +27,24 @@ var bitFieldCmd = &cli.Command{ }, }, Subcommands: []*cli.Command{ + bitFieldEncodeCmd, + bitFieldDecodeCmd, bitFieldRunsCmd, bitFieldStatCmd, - bitFieldDecodeCmd, + bitFieldMergeCmd, bitFieldIntersectCmd, - bitFieldEncodeCmd, bitFieldSubCmd, }, } var bitFieldRunsCmd = &cli.Command{ Name: "runs", + Usage: "Bitfield bit runs", Description: "print bit runs in a bitfield", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "enc", - Value: "base64", - Usage: "specify input encoding to parse", - }, - }, Action: func(cctx *cli.Context) error { - var val string - if cctx.Args().Present() { - val = cctx.Args().Get(0) - } else { - b, err := ioutil.ReadAll(os.Stdin) - if err != nil { - return err - } - val = string(b) - } - - var dec []byte - switch cctx.String("enc") { - case "base64": - d, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return fmt.Errorf("decoding base64 value: %w", err) - } - dec = d - case "hex": - d, err := hex.DecodeString(val) - if err != nil { - return fmt.Errorf("decoding hex value: %w", err) - } - dec = d - default: - return fmt.Errorf("unrecognized encoding: %s", cctx.String("enc")) + dec, err := decodeToByte(cctx, 0) + if err != nil { + return err } rle, err := rlepluslazy.FromBuf(dec) @@ -98,7 +70,7 @@ var bitFieldRunsCmd = &cli.Command{ s = "FALSE" } - fmt.Printf("@%d %s * %d\n", idx, s, r.Len) + fmt.Printf("@%08d %s * %d\n", idx, s, r.Len) idx += r.Len } @@ -109,43 +81,14 @@ var bitFieldRunsCmd = &cli.Command{ var bitFieldStatCmd = &cli.Command{ Name: "stat", + Usage: "Bitfield stats", Description: "print bitfield stats", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "enc", - Value: "base64", - Usage: "specify input encoding to parse", - }, - }, Action: func(cctx *cli.Context) error { - var val string - if cctx.Args().Present() { - val = cctx.Args().Get(0) - } else { - b, err := ioutil.ReadAll(os.Stdin) - if err != nil { - return err - } - val = string(b) - } - - var dec []byte - switch cctx.String("enc") { - case "base64": - d, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return fmt.Errorf("decoding base64 value: %w", err) - } - dec = d - case "hex": - d, err := hex.DecodeString(val) - if err != nil { - return fmt.Errorf("decoding hex value: %w", err) - } - dec = d - default: - return fmt.Errorf("unrecognized encoding: %s", cctx.String("enc")) + dec, err := decodeToByte(cctx, 0) + if err != nil { + return err } + fmt.Printf("Raw length: %d bits (%d bytes)\n", len(dec)*8, len(dec)) rle, err := rlepluslazy.FromBuf(dec) if err != nil { @@ -157,10 +100,7 @@ var bitFieldStatCmd = &cli.Command{ return xerrors.Errorf("getting run iterator: %w", err) } - fmt.Printf("Raw length: %d bits (%d bytes)\n", len(dec)*8, len(dec)) - var ones, zeros, oneRuns, zeroRuns, invalid uint64 - for rit.HasNext() { r, err := rit.NextRun() if err != nil { @@ -195,14 +135,8 @@ var bitFieldStatCmd = &cli.Command{ var bitFieldDecodeCmd = &cli.Command{ Name: "decode", + Usage: "Bitfield to decimal number", Description: "decode bitfield and print all numbers in it", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "enc", - Value: "base64", - Usage: "specify input encoding to parse", - }, - }, Action: func(cctx *cli.Context) error { rle, err := decode(cctx, 0) if err != nil { @@ -219,43 +153,61 @@ var bitFieldDecodeCmd = &cli.Command{ }, } -var bitFieldIntersectCmd = &cli.Command{ - Name: "intersect", - Description: "intersect 2 bitfields and print the resulting bitfield as base64", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "enc", - Value: "base64", - Usage: "specify input encoding to parse", - }, - }, +var bitFieldMergeCmd = &cli.Command{ + Name: "merge", + Usage: "Merge 2 bitfields", + Description: "Merge 2 bitfields and print the resulting bitfield", Action: func(cctx *cli.Context) error { - b, err := decode(cctx, 1) + a, err := decode(cctx, 0) if err != nil { return err } - a, err := decode(cctx, 0) + b, err := decode(cctx, 1) if err != nil { return err } - o, err := bitfield.IntersectBitField(a, b) + o, err := bitfield.MergeBitFields(a, b) if err != nil { - return xerrors.Errorf("intersect: %w", err) + return xerrors.Errorf("merge: %w", err) } - s, err := o.RunIterator() + str, err := encode(cctx, o) if err != nil { return err } + fmt.Println(str) - bytes, err := rlepluslazy.EncodeRuns(s, []byte{}) + return nil + }, +} + +var bitFieldIntersectCmd = &cli.Command{ + Name: "intersect", + Usage: "Intersect 2 bitfields", + Description: "intersect 2 bitfields and print the resulting bitfield", + Action: func(cctx *cli.Context) error { + a, err := decode(cctx, 0) + if err != nil { + return err + } + + b, err := decode(cctx, 1) if err != nil { return err } - fmt.Println(base64.StdEncoding.EncodeToString(bytes)) + o, err := bitfield.IntersectBitField(a, b) + if err != nil { + return xerrors.Errorf("intersect: %w", err) + } + + str, err := encode(cctx, o) + if err != nil { + return err + } + fmt.Println(str) return nil }, @@ -263,41 +215,29 @@ var bitFieldIntersectCmd = &cli.Command{ var bitFieldSubCmd = &cli.Command{ Name: "sub", - Description: "subtract 2 bitfields and print the resulting bitfield as base64", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "enc", - Value: "base64", - Usage: "specify input encoding to parse", - }, - }, + Usage: "Subtract 2 bitfields", + Description: "subtract 2 bitfields and print the resulting bitfield", Action: func(cctx *cli.Context) error { - b, err := decode(cctx, 1) + a, err := decode(cctx, 0) if err != nil { return err } - a, err := decode(cctx, 0) + b, err := decode(cctx, 1) if err != nil { return err } o, err := bitfield.SubtractBitField(a, b) if err != nil { - return xerrors.Errorf("intersect: %w", err) - } - - s, err := o.RunIterator() - if err != nil { - return err + return xerrors.Errorf("subtract: %w", err) } - bytes, err := rlepluslazy.EncodeRuns(s, []byte{}) + str, err := encode(cctx, o) if err != nil { return err } - - fmt.Println(base64.StdEncoding.EncodeToString(bytes)) + fmt.Println(str) return nil }, @@ -305,15 +245,9 @@ var bitFieldSubCmd = &cli.Command{ var bitFieldEncodeCmd = &cli.Command{ Name: "encode", + Usage: "Decimal number to bitfield", Description: "encode a series of decimal numbers into a bitfield", ArgsUsage: "[infile]", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "enc", - Value: "base64", - Usage: "specify input encoding to parse", - }, - }, Action: func(cctx *cli.Context) error { f, err := os.Open(cctx.Args().First()) if err != nil { @@ -331,38 +265,64 @@ var bitFieldEncodeCmd = &cli.Command{ out.Set(i) } - s, err := out.RunIterator() - if err != nil { - return err - } - - bytes, err := rlepluslazy.EncodeRuns(s, []byte{}) + str, err := encode(cctx, out) if err != nil { return err } - - fmt.Println(base64.StdEncoding.EncodeToString(bytes)) + fmt.Println(str) return nil }, } -func decode(cctx *cli.Context, a int) (bitfield.BitField, error) { +func encode(cctx *cli.Context, field bitfield.BitField) (string, error) { + s, err := field.RunIterator() + if err != nil { + return "", err + } + + bytes, err := rlepluslazy.EncodeRuns(s, []byte{}) + if err != nil { + return "", err + } + + var str string + switch cctx.String("enc") { + case "base64": + str = base64.StdEncoding.EncodeToString(bytes) + case "hex": + str = hex.EncodeToString(bytes) + default: + return "", fmt.Errorf("unrecognized encoding: %s", cctx.String("enc")) + } + + return str, nil + +} +func decode(cctx *cli.Context, i int) (bitfield.BitField, error) { + b, err := decodeToByte(cctx, i) + if err != nil { + return bitfield.BitField{}, err + } + return bitfield.NewFromBytes(b) +} + +func decodeToByte(cctx *cli.Context, i int) ([]byte, error) { var val string if cctx.Args().Present() { - if a >= cctx.NArg() { - return bitfield.BitField{}, xerrors.Errorf("need more than %d args", a) + if i >= cctx.NArg() { + return nil, xerrors.Errorf("need more than %d args", i) } - val = cctx.Args().Get(a) + val = cctx.Args().Get(i) } else { - if a > 0 { - return bitfield.BitField{}, xerrors.Errorf("need more than %d args", a) + if i > 0 { + return nil, xerrors.Errorf("need more than %d args", i) } - b, err := ioutil.ReadAll(os.Stdin) + r, err := ioutil.ReadAll(os.Stdin) if err != nil { - return bitfield.BitField{}, err + return nil, err } - val = string(b) + val = string(r) } var dec []byte @@ -370,18 +330,18 @@ func decode(cctx *cli.Context, a int) (bitfield.BitField, error) { case "base64": d, err := base64.StdEncoding.DecodeString(val) if err != nil { - return bitfield.BitField{}, fmt.Errorf("decoding base64 value: %w", err) + return nil, fmt.Errorf("decoding base64 value: %w", err) } dec = d case "hex": d, err := hex.DecodeString(val) if err != nil { - return bitfield.BitField{}, fmt.Errorf("decoding hex value: %w", err) + return nil, fmt.Errorf("decoding hex value: %w", err) } dec = d default: - return bitfield.BitField{}, fmt.Errorf("unrecognized encoding: %s", cctx.String("enc")) + return nil, fmt.Errorf("unrecognized encoding: %s", cctx.String("enc")) } - return bitfield.NewFromBytes(dec) + return dec, nil } diff --git a/cmd/lotus-shed/blockmsgid.go b/cmd/lotus-shed/blockmsgid.go new file mode 100644 index 00000000000..85b786ec0e2 --- /dev/null +++ b/cmd/lotus-shed/blockmsgid.go @@ -0,0 +1,70 @@ +package main + +import ( + "encoding/base64" + "fmt" + + blake2b "github.com/minio/blake2b-simd" + "github.com/urfave/cli/v2" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/types" + lcli "github.com/filecoin-project/lotus/cli" +) + +var blockmsgidCmd = &cli.Command{ + Name: "blockmsgid", + Usage: "Print a block's pubsub message ID", + ArgsUsage: " ...", + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + + defer closer() + ctx := lcli.ReqContext(cctx) + + for _, arg := range cctx.Args().Slice() { + blkcid, err := cid.Decode(arg) + if err != nil { + return fmt.Errorf("error decoding block cid: %w", err) + } + + blkhdr, err := api.ChainGetBlock(ctx, blkcid) + if err != nil { + return fmt.Errorf("error retrieving block header: %w", err) + } + + blkmsgs, err := api.ChainGetBlockMessages(ctx, blkcid) + if err != nil { + return fmt.Errorf("error retrieving block messages: %w", err) + } + + blkmsg := &types.BlockMsg{ + Header: blkhdr, + } + + for _, m := range blkmsgs.BlsMessages { + blkmsg.BlsMessages = append(blkmsg.BlsMessages, m.Cid()) + } + + for _, m := range blkmsgs.SecpkMessages { + blkmsg.SecpkMessages = append(blkmsg.SecpkMessages, m.Cid()) + } + + bytes, err := blkmsg.Serialize() + if err != nil { + return fmt.Errorf("error serializing BlockMsg: %w", err) + } + + msgId := blake2b.Sum256(bytes) + msgId64 := base64.StdEncoding.EncodeToString(msgId[:]) + + fmt.Println(msgId64) + } + + return nil + }, +} diff --git a/cmd/lotus-shed/cid.go b/cmd/lotus-shed/cid.go new file mode 100644 index 00000000000..d3bd2c3c9fa --- /dev/null +++ b/cmd/lotus-shed/cid.go @@ -0,0 +1,82 @@ +package main + +import ( + "encoding/base64" + "encoding/hex" + "fmt" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" +) + +var cidCmd = &cli.Command{ + Name: "cid", + Usage: "Cid command", + Subcommands: cli.Commands{ + cidIdCmd, + }, +} + +var cidIdCmd = &cli.Command{ + Name: "id", + Usage: "Create identity CID from hex or base64 data", + ArgsUsage: "[data]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "encoding", + Value: "base64", + Usage: "specify input encoding to parse", + }, + &cli.StringFlag{ + Name: "codec", + Value: "id", + Usage: "multicodec-packed content types: abi or id", + }, + }, + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return fmt.Errorf("must specify data") + } + + var dec []byte + switch cctx.String("encoding") { + case "base64": + data, err := base64.StdEncoding.DecodeString(cctx.Args().First()) + if err != nil { + return xerrors.Errorf("decoding base64 value: %w", err) + } + dec = data + case "hex": + data, err := hex.DecodeString(cctx.Args().First()) + if err != nil { + return xerrors.Errorf("decoding hex value: %w", err) + } + dec = data + default: + return xerrors.Errorf("unrecognized encoding: %s", cctx.String("encoding")) + } + + switch cctx.String("codec") { + case "abi": + aCid, err := abi.CidBuilder.Sum(dec) + if err != nil { + return xerrors.Errorf("cidBuilder abi: %w", err) + } + fmt.Println(aCid) + case "id": + builder := cid.V1Builder{Codec: cid.Raw, MhType: mh.IDENTITY} + rCid, err := builder.Sum(dec) + if err != nil { + return xerrors.Errorf("cidBuilder raw: %w", err) + } + fmt.Println(rCid) + default: + return xerrors.Errorf("unrecognized codec: %s", cctx.String("codec")) + } + + return nil + }, +} diff --git a/cmd/lotus-shed/commp.go b/cmd/lotus-shed/commp.go index 9b0cab75df2..6f7923c241a 100644 --- a/cmd/lotus-shed/commp.go +++ b/cmd/lotus-shed/commp.go @@ -1,27 +1,55 @@ package main import ( + "encoding/base64" "encoding/hex" "fmt" commcid "github.com/filecoin-project/go-fil-commcid" "github.com/urfave/cli/v2" + "golang.org/x/xerrors" ) var commpToCidCmd = &cli.Command{ Name: "commp-to-cid", + Usage: "Convert commP to Cid", Description: "Convert a raw commP to a piece-Cid", + ArgsUsage: "[data]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "encoding", + Value: "base64", + Usage: "specify input encoding to parse", + }, + }, Action: func(cctx *cli.Context) error { if !cctx.Args().Present() { return fmt.Errorf("must specify commP to convert") } - dec, err := hex.DecodeString(cctx.Args().First()) - if err != nil { - return fmt.Errorf("failed to decode input as hex string: %w", err) + var dec []byte + switch cctx.String("encoding") { + case "base64": + data, err := base64.StdEncoding.DecodeString(cctx.Args().First()) + if err != nil { + return xerrors.Errorf("decoding base64 value: %w", err) + } + dec = data + case "hex": + data, err := hex.DecodeString(cctx.Args().First()) + if err != nil { + return xerrors.Errorf("decoding hex value: %w", err) + } + dec = data + default: + return xerrors.Errorf("unrecognized encoding: %s", cctx.String("encoding")) } - fmt.Println(commcid.PieceCommitmentV1ToCID(dec)) + cid, err := commcid.PieceCommitmentV1ToCID(dec) + if err != nil { + return err + } + fmt.Println(cid) return nil }, } diff --git a/cmd/lotus-shed/consensus.go b/cmd/lotus-shed/consensus.go index 1fe7756c1fa..2c5df4ea59f 100644 --- a/cmd/lotus-shed/consensus.go +++ b/cmd/lotus-shed/consensus.go @@ -36,7 +36,7 @@ type consensusItem struct { targetTipset *types.TipSet headTipset *types.TipSet peerID peer.ID - version api.Version + version api.APIVersion api api.FullNode } @@ -113,12 +113,12 @@ var consensusCheckCmd = &cli.Command{ return err } ainfo := cliutil.APIInfo{Addr: apima.String()} - addr, err := ainfo.DialArgs() + addr, err := ainfo.DialArgs("v1") if err != nil { return err } - api, closer, err := client.NewFullNodeRPC(cctx.Context, addr, nil) + api, closer, err := client.NewFullNodeRPCV1(cctx.Context, addr, nil) if err != nil { return err } diff --git a/cmd/lotus-shed/cron-count.go b/cmd/lotus-shed/cron-count.go new file mode 100644 index 00000000000..622f38791ff --- /dev/null +++ b/cmd/lotus-shed/cron-count.go @@ -0,0 +1,99 @@ +package main + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/build" + lcli "github.com/filecoin-project/lotus/cli" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" +) + +var cronWcCmd = &cli.Command{ + Name: "cron-wc", + Description: "cron stats", + Subcommands: []*cli.Command{ + minerDeadlineCronCountCmd, + }, +} + +var minerDeadlineCronCountCmd = &cli.Command{ + Name: "deadline", + Description: "list all addresses of miners with active deadline crons", + Action: func(c *cli.Context) error { + return countDeadlineCrons(c) + }, + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "tipset", + Usage: "specify tipset state to search on (pass comma separated array of cids)", + }, + }, +} + +func findDeadlineCrons(c *cli.Context) (map[address.Address]struct{}, error) { + api, acloser, err := lcli.GetFullNodeAPI(c) + if err != nil { + return nil, err + } + defer acloser() + ctx := lcli.ReqContext(c) + + ts, err := lcli.LoadTipSet(ctx, c, api) + if err != nil { + return nil, err + } + if ts == nil { + ts, err = api.ChainHead(ctx) + if err != nil { + return nil, err + } + } + + mAddrs, err := api.StateListMiners(ctx, ts.Key()) + if err != nil { + return nil, err + } + activeMiners := make(map[address.Address]struct{}) + for _, mAddr := range mAddrs { + // All miners have active cron before v4. + // v4 upgrade epoch is last epoch running v3 epoch and api.StateReadState reads + // parent state, so v4 state isn't read until upgrade epoch + 2 + if ts.Height() <= build.UpgradeTurboHeight+1 { + activeMiners[mAddr] = struct{}{} + continue + } + st, err := api.StateReadState(ctx, mAddr, ts.Key()) + if err != nil { + return nil, err + } + minerState, ok := st.State.(map[string]interface{}) + if !ok { + return nil, xerrors.Errorf("internal error: failed to cast miner state to expected map type") + } + + activeDlineIface, ok := minerState["DeadlineCronActive"] + if !ok { + return nil, xerrors.Errorf("miner %s had no deadline state, is this a v3 state root?", mAddr) + } + active := activeDlineIface.(bool) + if active { + activeMiners[mAddr] = struct{}{} + } + } + + return activeMiners, nil +} + +func countDeadlineCrons(c *cli.Context) error { + activeMiners, err := findDeadlineCrons(c) + if err != nil { + return err + } + for addr := range activeMiners { + fmt.Printf("%s\n", addr) + } + + return nil +} diff --git a/cmd/lotus-shed/datastore.go b/cmd/lotus-shed/datastore.go index c6bac6815bf..c3a9e572ce4 100644 --- a/cmd/lotus-shed/datastore.go +++ b/cmd/lotus-shed/datastore.go @@ -1,17 +1,23 @@ package main import ( + "bufio" + "context" "encoding/json" "fmt" + "io" "os" "strings" + "github.com/dgraph-io/badger/v2" "github.com/docker/go-units" "github.com/ipfs/go-datastore" dsq "github.com/ipfs/go-datastore/query" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" + "github.com/mitchellh/go-homedir" "github.com/polydawn/refmt/cbor" "github.com/urfave/cli/v2" + "go.uber.org/multierr" "golang.org/x/xerrors" "github.com/filecoin-project/lotus/lib/backupds" @@ -25,6 +31,7 @@ var datastoreCmd = &cli.Command{ datastoreBackupCmd, datastoreListCmd, datastoreGetCmd, + datastoreRewriteCmd, }, } @@ -69,7 +76,7 @@ var datastoreListCmd = &cli.Command{ } defer lr.Close() //nolint:errcheck - ds, err := lr.Datastore(datastore.NewKey(cctx.Args().First()).String()) + ds, err := lr.Datastore(context.Background(), datastore.NewKey(cctx.Args().First()).String()) if err != nil { return err } @@ -114,7 +121,7 @@ var datastoreGetCmd = &cli.Command{ }, ArgsUsage: "[namespace key]", Action: func(cctx *cli.Context) error { - logging.SetLogLevel("badger", "ERROR") // nolint:errchec + logging.SetLogLevel("badger", "ERROR") // nolint:errcheck r, err := repo.NewFS(cctx.String("repo")) if err != nil { @@ -135,7 +142,7 @@ var datastoreGetCmd = &cli.Command{ } defer lr.Close() //nolint:errcheck - ds, err := lr.Datastore(datastore.NewKey(cctx.Args().First()).String()) + ds, err := lr.Datastore(context.Background(), datastore.NewKey(cctx.Args().First()).String()) if err != nil { return err } @@ -173,8 +180,11 @@ var datastoreBackupStatCmd = &cli.Command{ } defer f.Close() // nolint:errcheck - var keys, kbytes, vbytes uint64 - err = backupds.ReadBackup(f, func(key datastore.Key, value []byte) error { + var keys, logs, kbytes, vbytes uint64 + clean, err := backupds.ReadBackup(f, func(key datastore.Key, value []byte, log bool) error { + if log { + logs++ + } keys++ kbytes += uint64(len(key.String())) vbytes += uint64(len(value)) @@ -184,7 +194,9 @@ var datastoreBackupStatCmd = &cli.Command{ return err } + fmt.Println("Truncated: ", !clean) fmt.Println("Keys: ", keys) + fmt.Println("Log values: ", log) fmt.Println("Key bytes: ", units.BytesSize(float64(kbytes))) fmt.Println("Value bytes: ", units.BytesSize(float64(vbytes))) @@ -218,7 +230,7 @@ var datastoreBackupListCmd = &cli.Command{ defer f.Close() // nolint:errcheck printKv := kvPrinter(cctx.Bool("top-level"), cctx.String("get-enc")) - err = backupds.ReadBackup(f, func(key datastore.Key, value []byte) error { + _, err = backupds.ReadBackup(f, func(key datastore.Key, value []byte, _ bool) error { return printKv(key.String(), value) }) if err != nil { @@ -288,3 +300,76 @@ func printVal(enc string, val []byte) error { return nil } + +var datastoreRewriteCmd = &cli.Command{ + Name: "rewrite", + Description: "rewrites badger datastore to compact it and possibly change params", + ArgsUsage: "source destination", + Action: func(cctx *cli.Context) error { + if cctx.NArg() != 2 { + return xerrors.Errorf("expected 2 arguments, got %d", cctx.NArg()) + } + fromPath, err := homedir.Expand(cctx.Args().Get(0)) + if err != nil { + return xerrors.Errorf("cannot get fromPath: %w", err) + } + toPath, err := homedir.Expand(cctx.Args().Get(1)) + if err != nil { + return xerrors.Errorf("cannot get toPath: %w", err) + } + + var ( + from *badger.DB + to *badger.DB + ) + + // open the destination (to) store. + opts, err := repo.BadgerBlockstoreOptions(repo.UniversalBlockstore, toPath, false) + if err != nil { + return xerrors.Errorf("failed to get badger options: %w", err) + } + opts.SyncWrites = false + if to, err = badger.Open(opts.Options); err != nil { + return xerrors.Errorf("opening 'to' badger store: %w", err) + } + + // open the source (from) store. + opts, err = repo.BadgerBlockstoreOptions(repo.UniversalBlockstore, fromPath, true) + if err != nil { + return xerrors.Errorf("failed to get badger options: %w", err) + } + if from, err = badger.Open(opts.Options); err != nil { + return xerrors.Errorf("opening 'from' datastore: %w", err) + } + + pr, pw := io.Pipe() + errCh := make(chan error) + go func() { + bw := bufio.NewWriterSize(pw, 64<<20) + _, err := from.Backup(bw, 0) + _ = bw.Flush() + _ = pw.CloseWithError(err) + errCh <- err + }() + go func() { + err := to.Load(pr, 256) + errCh <- err + }() + + err = <-errCh + if err != nil { + select { + case nerr := <-errCh: + err = multierr.Append(err, nerr) + default: + } + return err + } + + err = <-errCh + if err != nil { + return err + } + return multierr.Append(from.Close(), to.Close()) + }, +} diff --git a/cmd/lotus-shed/dealtracker.go b/cmd/lotus-shed/dealtracker.go deleted file mode 100644 index 8ded6bf4acd..00000000000 --- a/cmd/lotus-shed/dealtracker.go +++ /dev/null @@ -1,325 +0,0 @@ -package main - -import ( - "context" - "encoding/json" - "net" - "net/http" - "sync" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/api" - lcli "github.com/filecoin-project/lotus/cli" - "github.com/ipfs/go-cid" - "github.com/urfave/cli/v2" -) - -type dealStatsServer struct { - api api.FullNode -} - -// Requested by @jbenet -// How many epochs back to look at for dealstats -var epochLookback = abi.ChainEpoch(10) - -// these lists grow continuously with the network -// TODO: need to switch this to an LRU of sorts, to ensure refreshes -var knownFiltered = new(sync.Map) -var resolvedWallets = new(sync.Map) - -func init() { - for _, a := range []string{ - "t0100", // client for genesis miner - "t0101", // client for genesis miner - "t0102", // client for genesis miner - "t0112", // client for genesis miner - "t0113", // client for genesis miner - "t0114", // client for genesis miner - "t1nslxql4pck5pq7hddlzym3orxlx35wkepzjkm3i", // SR1 dealbot wallet - "t1stghxhdp2w53dym2nz2jtbpk6ccd4l2lxgmezlq", // SR1 dealbot wallet - "t1mcr5xkgv4jdl3rnz77outn6xbmygb55vdejgbfi", // SR1 dealbot wallet - "t1qiqdbbmrdalbntnuapriirduvxu5ltsc5mhy7si", // SR1 dealbot wallet - } { - a, err := address.NewFromString(a) - if err != nil { - panic(err) - } - knownFiltered.Store(a, true) - } -} - -type dealCountResp struct { - Epoch int64 `json:"epoch"` - Endpoint string `json:"endpoint"` - Payload int64 `json:"payload"` -} - -func (dss *dealStatsServer) handleStorageDealCount(w http.ResponseWriter, r *http.Request) { - - epoch, deals := dss.filteredDealList() - if epoch == 0 { - w.WriteHeader(500) - return - } - - if err := json.NewEncoder(w).Encode(&dealCountResp{ - Endpoint: "COUNT_DEALS", - Payload: int64(len(deals)), - Epoch: epoch, - }); err != nil { - log.Warnf("failed to write back deal count response: %s", err) - return - } -} - -type dealAverageResp struct { - Epoch int64 `json:"epoch"` - Endpoint string `json:"endpoint"` - Payload int64 `json:"payload"` -} - -func (dss *dealStatsServer) handleStorageDealAverageSize(w http.ResponseWriter, r *http.Request) { - - epoch, deals := dss.filteredDealList() - if epoch == 0 { - w.WriteHeader(500) - return - } - - var totalBytes int64 - for _, d := range deals { - totalBytes += int64(d.deal.Proposal.PieceSize.Unpadded()) - } - - if err := json.NewEncoder(w).Encode(&dealAverageResp{ - Endpoint: "AVERAGE_DEAL_SIZE", - Payload: totalBytes / int64(len(deals)), - Epoch: epoch, - }); err != nil { - log.Warnf("failed to write back deal average response: %s", err) - return - } -} - -type dealTotalResp struct { - Epoch int64 `json:"epoch"` - Endpoint string `json:"endpoint"` - Payload int64 `json:"payload"` -} - -func (dss *dealStatsServer) handleStorageDealTotalReal(w http.ResponseWriter, r *http.Request) { - epoch, deals := dss.filteredDealList() - if epoch == 0 { - w.WriteHeader(500) - return - } - - var totalBytes int64 - for _, d := range deals { - totalBytes += int64(d.deal.Proposal.PieceSize.Unpadded()) - } - - if err := json.NewEncoder(w).Encode(&dealTotalResp{ - Endpoint: "DEAL_BYTES", - Payload: totalBytes, - Epoch: epoch, - }); err != nil { - log.Warnf("failed to write back deal average response: %s", err) - return - } - -} - -type clientStatsOutput struct { - Epoch int64 `json:"epoch"` - Endpoint string `json:"endpoint"` - Payload []*clientStats `json:"payload"` -} - -type clientStats struct { - Client address.Address `json:"client"` - DataSize int64 `json:"data_size"` - NumCids int `json:"num_cids"` - NumDeals int `json:"num_deals"` - NumMiners int `json:"num_miners"` - - cids map[cid.Cid]bool - providers map[address.Address]bool -} - -func (dss *dealStatsServer) handleStorageClientStats(w http.ResponseWriter, r *http.Request) { - epoch, deals := dss.filteredDealList() - if epoch == 0 { - w.WriteHeader(500) - return - } - - stats := make(map[address.Address]*clientStats) - - for _, d := range deals { - - st, ok := stats[d.deal.Proposal.Client] - if !ok { - st = &clientStats{ - Client: d.resolvedWallet, - cids: make(map[cid.Cid]bool), - providers: make(map[address.Address]bool), - } - stats[d.deal.Proposal.Client] = st - } - - st.DataSize += int64(d.deal.Proposal.PieceSize.Unpadded()) - st.cids[d.deal.Proposal.PieceCID] = true - st.providers[d.deal.Proposal.Provider] = true - st.NumDeals++ - } - - out := clientStatsOutput{ - Epoch: epoch, - Endpoint: "CLIENT_DEAL_STATS", - Payload: make([]*clientStats, 0, len(stats)), - } - for _, cs := range stats { - cs.NumCids = len(cs.cids) - cs.NumMiners = len(cs.providers) - out.Payload = append(out.Payload, cs) - } - - if err := json.NewEncoder(w).Encode(out); err != nil { - log.Warnf("failed to write back client stats response: %s", err) - return - } -} - -type dealInfo struct { - deal api.MarketDeal - resolvedWallet address.Address -} - -// filteredDealList returns the current epoch and a list of filtered deals -// on error returns an epoch of 0 -func (dss *dealStatsServer) filteredDealList() (int64, map[string]dealInfo) { - ctx := context.Background() - - head, err := dss.api.ChainHead(ctx) - if err != nil { - log.Warnf("failed to get chain head: %s", err) - return 0, nil - } - - head, err = dss.api.ChainGetTipSetByHeight(ctx, head.Height()-epochLookback, head.Key()) - if err != nil { - log.Warnf("failed to walk back %s epochs: %s", epochLookback, err) - return 0, nil - } - - // Disabled as per @pooja's request - // - // // Exclude any address associated with a miner - // miners, err := dss.api.StateListMiners(ctx, head.Key()) - // if err != nil { - // log.Warnf("failed to get miner list: %s", err) - // return 0, nil - // } - // for _, m := range miners { - // info, err := dss.api.StateMinerInfo(ctx, m, head.Key()) - // if err != nil { - // log.Warnf("failed to get info for known miner '%s': %s", m, err) - // continue - // } - - // knownFiltered.Store(info.Owner, true) - // knownFiltered.Store(info.Worker, true) - // for _, a := range info.ControlAddresses { - // knownFiltered.Store(a, true) - // } - // } - - deals, err := dss.api.StateMarketDeals(ctx, head.Key()) - if err != nil { - log.Warnf("failed to get market deals: %s", err) - return 0, nil - } - - ret := make(map[string]dealInfo, len(deals)) - for dealKey, d := range deals { - - // Counting no-longer-active deals as per Pooja's request - // // https://github.com/filecoin-project/specs-actors/blob/v0.9.9/actors/builtin/market/deal.go#L81-L85 - // if d.State.SectorStartEpoch < 0 { - // continue - // } - - if _, isFiltered := knownFiltered.Load(d.Proposal.Client); isFiltered { - continue - } - - if _, wasSeen := resolvedWallets.Load(d.Proposal.Client); !wasSeen { - w, err := dss.api.StateAccountKey(ctx, d.Proposal.Client, head.Key()) - if err != nil { - log.Warnf("failed to resolve id '%s' to wallet address: %s", d.Proposal.Client, err) - continue - } else { - resolvedWallets.Store(d.Proposal.Client, w) - } - } - - w, _ := resolvedWallets.Load(d.Proposal.Client) - if _, isFiltered := knownFiltered.Load(w); isFiltered { - continue - } - - ret[dealKey] = dealInfo{ - deal: d, - resolvedWallet: w.(address.Address), - } - } - - return int64(head.Height()), ret -} - -var serveDealStatsCmd = &cli.Command{ - Name: "serve-deal-stats", - Flags: []cli.Flag{}, - Action: func(cctx *cli.Context) error { - api, closer, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return err - } - - defer closer() - ctx := lcli.ReqContext(cctx) - - _ = ctx - - dss := &dealStatsServer{api} - - mux := &http.ServeMux{} - mux.HandleFunc("/api/storagedeal/count", dss.handleStorageDealCount) - mux.HandleFunc("/api/storagedeal/averagesize", dss.handleStorageDealAverageSize) - mux.HandleFunc("/api/storagedeal/totalreal", dss.handleStorageDealTotalReal) - mux.HandleFunc("/api/storagedeal/clientstats", dss.handleStorageClientStats) - - s := &http.Server{ - Addr: ":7272", - Handler: mux, - } - - go func() { - <-ctx.Done() - if err := s.Shutdown(context.TODO()); err != nil { - log.Error(err) - } - }() - - list, err := net.Listen("tcp", ":7272") // nolint - if err != nil { - panic(err) - } - - log.Warnf("deal-stat server listening on %s\n== NOTE: QUERIES ARE EXPENSIVE - YOU MUST FRONT-CACHE THIS SERVICE\n", list.Addr().String()) - - return s.Serve(list) - }, -} diff --git a/cmd/lotus-shed/election.go b/cmd/lotus-shed/election.go new file mode 100644 index 00000000000..d49d5c04f4f --- /dev/null +++ b/cmd/lotus-shed/election.go @@ -0,0 +1,227 @@ +package main + +import ( + "context" + "encoding/binary" + "fmt" + "math/rand" + + "github.com/filecoin-project/lotus/api/v0api" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/gen" + "github.com/filecoin-project/lotus/chain/types" + lcli "github.com/filecoin-project/lotus/cli" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" +) + +var electionCmd = &cli.Command{ + Name: "election", + Usage: "Commands related to leader election", + Subcommands: []*cli.Command{ + electionRunDummy, + electionEstimate, + electionBacktest, + }, +} + +var electionRunDummy = &cli.Command{ + Name: "run-dummy", + Usage: "Runs dummy elections with given power", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "network-power", + Usage: "network storage power", + }, + &cli.StringFlag{ + Name: "miner-power", + Usage: "miner storage power", + }, + &cli.Uint64Flag{ + Name: "seed", + Usage: "rand number", + Value: 0, + }, + }, + Action: func(cctx *cli.Context) error { + ctx := lcli.ReqContext(cctx) + minerPow, err := types.BigFromString(cctx.String("miner-power")) + if err != nil { + return xerrors.Errorf("decoding miner-power: %w", err) + } + networkPow, err := types.BigFromString(cctx.String("network-power")) + if err != nil { + return xerrors.Errorf("decoding network-power: %w", err) + } + + ep := &types.ElectionProof{} + ep.VRFProof = make([]byte, 32) + seed := cctx.Uint64("seed") + if seed == 0 { + seed = rand.Uint64() + } + binary.BigEndian.PutUint64(ep.VRFProof, seed) + + i := uint64(0) + for { + if ctx.Err() != nil { + return ctx.Err() + } + binary.BigEndian.PutUint64(ep.VRFProof[8:], i) + j := ep.ComputeWinCount(minerPow, networkPow) + _, err := fmt.Printf("%t, %d\n", j != 0, j) + if err != nil { + return err + } + i++ + } + }, +} + +var electionEstimate = &cli.Command{ + Name: "estimate", + Usage: "Estimate elections with given power", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "network-power", + Usage: "network storage power", + }, + &cli.StringFlag{ + Name: "miner-power", + Usage: "miner storage power", + }, + &cli.Uint64Flag{ + Name: "seed", + Usage: "rand number", + Value: 0, + }, + }, + Action: func(cctx *cli.Context) error { + minerPow, err := types.BigFromString(cctx.String("miner-power")) + if err != nil { + return xerrors.Errorf("decoding miner-power: %w", err) + } + networkPow, err := types.BigFromString(cctx.String("network-power")) + if err != nil { + return xerrors.Errorf("decoding network-power: %w", err) + } + + ep := &types.ElectionProof{} + ep.VRFProof = make([]byte, 32) + seed := cctx.Uint64("seed") + if seed == 0 { + seed = rand.Uint64() + } + binary.BigEndian.PutUint64(ep.VRFProof, seed) + + winYear := int64(0) + for i := 0; i < builtin2.EpochsInYear; i++ { + binary.BigEndian.PutUint64(ep.VRFProof[8:], uint64(i)) + j := ep.ComputeWinCount(minerPow, networkPow) + winYear += j + } + winHour := winYear * builtin2.EpochsInHour / builtin2.EpochsInYear + winDay := winYear * builtin2.EpochsInDay / builtin2.EpochsInYear + winMonth := winYear * builtin2.EpochsInDay * 30 / builtin2.EpochsInYear + fmt.Println("winInHour, winInDay, winInMonth, winInYear") + fmt.Printf("%d, %d, %d, %d\n", winHour, winDay, winMonth, winYear) + return nil + }, +} + +var electionBacktest = &cli.Command{ + Name: "backtest", + Usage: "Backtest elections with given miner", + ArgsUsage: "[minerAddress]", + Flags: []cli.Flag{ + &cli.Uint64Flag{ + Name: "height", + Usage: "blockchain head height", + }, + &cli.IntFlag{ + Name: "count", + Usage: "number of won elections to look for", + Value: 120, + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return xerrors.Errorf("GetFullNodeAPI: %w", err) + } + + defer closer() + ctx := lcli.ReqContext(cctx) + + var head *types.TipSet + if cctx.IsSet("height") { + head, err = api.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(cctx.Uint64("height")), types.EmptyTSK) + if err != nil { + return xerrors.Errorf("ChainGetTipSetByHeight: %w", err) + } + } else { + head, err = api.ChainHead(ctx) + if err != nil { + return xerrors.Errorf("ChainHead: %w", err) + } + } + + miner, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return xerrors.Errorf("miner address: %w", err) + } + + count := cctx.Int("count") + if count < 1 { + return xerrors.Errorf("count: %d", count) + } + + fmt.Println("height, winCount") + roundEnd := head.Height() + abi.ChainEpoch(1) + for i := 0; i < count; { + for round := head.Height() + abi.ChainEpoch(1); round <= roundEnd; round++ { + i++ + win, err := backTestWinner(ctx, miner, round, head, api) + if err == nil && win != nil { + fmt.Printf("%d, %d\n", round, win.WinCount) + } + } + + roundEnd = head.Height() + head, err = api.ChainGetTipSet(ctx, head.Parents()) + if err != nil { + break + } + } + return nil + }, +} + +func backTestWinner(ctx context.Context, miner address.Address, round abi.ChainEpoch, ts *types.TipSet, api v0api.FullNode) (*types.ElectionProof, error) { + mbi, err := api.MinerGetBaseInfo(ctx, miner, round, ts.Key()) + if err != nil { + return nil, xerrors.Errorf("failed to get mining base info: %w", err) + } + if mbi == nil { + return nil, nil + } + if !mbi.EligibleForMining { + return nil, nil + } + + brand := mbi.PrevBeaconEntry + bvals := mbi.BeaconEntries + if len(bvals) > 0 { + brand = bvals[len(bvals)-1] + } + + winner, err := gen.IsRoundWinner(ctx, ts, round, miner, brand, mbi, api) + if err != nil { + return nil, xerrors.Errorf("failed to check if we win next round: %w", err) + } + + return winner, nil +} diff --git a/cmd/lotus-shed/export-car.go b/cmd/lotus-shed/export-car.go new file mode 100644 index 00000000000..97e4fb6c608 --- /dev/null +++ b/cmd/lotus-shed/export-car.go @@ -0,0 +1,103 @@ +package main + +import ( + "fmt" + "io" + "os" + + "github.com/ipfs/go-blockservice" + "github.com/ipfs/go-cid" + offline "github.com/ipfs/go-ipfs-exchange-offline" + format "github.com/ipfs/go-ipld-format" + "github.com/ipfs/go-merkledag" + "github.com/ipld/go-car" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/node/repo" +) + +func carWalkFunc(nd format.Node) (out []*format.Link, err error) { + for _, link := range nd.Links() { + if link.Cid.Prefix().Codec == cid.FilCommitmentSealed || link.Cid.Prefix().Codec == cid.FilCommitmentUnsealed { + continue + } + out = append(out, link) + } + return out, nil +} + +var exportCarCmd = &cli.Command{ + Name: "export-car", + Description: "Export a car from repo (requires node to be offline)", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "repo", + Value: "~/.lotus", + }, + }, + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() != 2 { + return lcli.ShowHelp(cctx, fmt.Errorf("must specify file name and object")) + } + + outfile := cctx.Args().First() + var roots []cid.Cid + for _, arg := range cctx.Args().Tail() { + c, err := cid.Decode(arg) + if err != nil { + return err + } + roots = append(roots, c) + } + + ctx := lcli.ReqContext(cctx) + + r, err := repo.NewFS(cctx.String("repo")) + if err != nil { + return xerrors.Errorf("opening fs repo: %w", err) + } + + exists, err := r.Exists() + if err != nil { + return err + } + if !exists { + return xerrors.Errorf("lotus repo doesn't exist") + } + + lr, err := r.Lock(repo.FullNode) + if err != nil { + return err + } + defer lr.Close() //nolint:errcheck + + fi, err := os.Create(outfile) + if err != nil { + return xerrors.Errorf("opening the output file: %w", err) + } + + defer fi.Close() //nolint:errcheck + + bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore) + if err != nil { + return fmt.Errorf("failed to open blockstore: %w", err) + } + + defer func() { + if c, ok := bs.(io.Closer); ok { + if err := c.Close(); err != nil { + log.Warnf("failed to close blockstore: %s", err) + } + } + }() + + dag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))) + err = car.WriteCarWithWalker(ctx, dag, roots, fi, carWalkFunc) + if err != nil { + return err + } + return nil + }, +} diff --git a/cmd/lotus-shed/export.go b/cmd/lotus-shed/export.go index 3be49f0e030..e711ba2bb05 100644 --- a/cmd/lotus-shed/export.go +++ b/cmd/lotus-shed/export.go @@ -3,16 +3,17 @@ package main import ( "context" "fmt" + "io" "os" "github.com/urfave/cli/v2" "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" - "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/node/repo" ) @@ -71,19 +72,27 @@ var exportChainCmd = &cli.Command{ defer fi.Close() //nolint:errcheck - ds, err := lr.Datastore("/chain") + bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore) if err != nil { - return err + return fmt.Errorf("failed to open blockstore: %w", err) } - mds, err := lr.Datastore("/metadata") + defer func() { + if c, ok := bs.(io.Closer); ok { + if err := c.Close(); err != nil { + log.Warnf("failed to close blockstore: %s", err) + } + } + }() + + mds, err := lr.Datastore(context.Background(), "/metadata") if err != nil { return err } - bs := blockstore.NewBlockstore(ds) + cs := store.NewChainStore(bs, bs, mds, nil, nil) + defer cs.Close() //nolint:errcheck - cs := store.NewChainStore(bs, mds, nil, nil) if err := cs.Load(); err != nil { return err } diff --git a/cmd/lotus-shed/frozen-miners.go b/cmd/lotus-shed/frozen-miners.go index 6b843f0d6ba..ed09c00c5a9 100644 --- a/cmd/lotus-shed/frozen-miners.go +++ b/cmd/lotus-shed/frozen-miners.go @@ -35,12 +35,6 @@ var frozenMinersCmd = &cli.Command{ if err != nil { return err } - if ts == nil { - ts, err = api.ChainHead(ctx) - if err != nil { - return err - } - } queryEpoch := ts.Height() diff --git a/cmd/lotus-shed/genesis-verify.go b/cmd/lotus-shed/genesis-verify.go index 4b197c58f1d..32e4e14ad0b 100644 --- a/cmd/lotus-shed/genesis-verify.go +++ b/cmd/lotus-shed/genesis-verify.go @@ -17,6 +17,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin/account" @@ -26,7 +27,6 @@ import ( "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/blockstore" ) type addrInfo struct { @@ -50,9 +50,10 @@ var genesisVerifyCmd = &cli.Command{ if !cctx.Args().Present() { return fmt.Errorf("must pass genesis car file") } - bs := blockstore.NewBlockstore(datastore.NewMapDatastore()) + bs := blockstore.FromDatastore(datastore.NewMapDatastore()) - cs := store.NewChainStore(bs, datastore.NewMapDatastore(), nil, nil) + cs := store.NewChainStore(bs, bs, datastore.NewMapDatastore(), nil, nil) + defer cs.Close() //nolint:errcheck cf := cctx.Args().Get(0) f, err := os.Open(cf) diff --git a/cmd/lotus-shed/import-car.go b/cmd/lotus-shed/import-car.go index 9cbff953b16..4e465029f2d 100644 --- a/cmd/lotus-shed/import-car.go +++ b/cmd/lotus-shed/import-car.go @@ -1,6 +1,7 @@ package main import ( + "context" "encoding/hex" "fmt" "io" @@ -12,7 +13,6 @@ import ( "github.com/urfave/cli/v2" "golang.org/x/xerrors" - "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/node/repo" ) @@ -25,6 +25,8 @@ var importCarCmd = &cli.Command{ return xerrors.Errorf("opening fs repo: %w", err) } + ctx := context.TODO() + exists, err := r.Exists() if err != nil { return err @@ -45,12 +47,18 @@ var importCarCmd = &cli.Command{ return xerrors.Errorf("opening the car file: %w", err) } - ds, err := lr.Datastore("/chain") + bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore) if err != nil { return err } - bs := blockstore.NewBlockstore(ds) + defer func() { + if c, ok := bs.(io.Closer); ok { + if err := c.Close(); err != nil { + log.Warnf("failed to close blockstore: %s", err) + } + } + }() cr, err := car.NewCarReader(f) if err != nil { @@ -65,7 +73,7 @@ var importCarCmd = &cli.Command{ return err } fmt.Println() - return ds.Close() + return nil default: if err := f.Close(); err != nil { return err @@ -94,6 +102,8 @@ var importObjectCmd = &cli.Command{ return xerrors.Errorf("opening fs repo: %w", err) } + ctx := context.TODO() + exists, err := r.Exists() if err != nil { return err @@ -108,12 +118,18 @@ var importObjectCmd = &cli.Command{ } defer lr.Close() //nolint:errcheck - ds, err := lr.Datastore("/chain") + bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore) if err != nil { - return err + return fmt.Errorf("failed to open blockstore: %w", err) } - bs := blockstore.NewBlockstore(ds) + defer func() { + if c, ok := bs.(io.Closer); ok { + if err := c.Close(); err != nil { + log.Warnf("failed to close blockstore: %s", err) + } + } + }() c, err := cid.Decode(cctx.Args().Get(0)) if err != nil { diff --git a/cmd/lotus-shed/jwt.go b/cmd/lotus-shed/jwt.go index 7fa1a18dd6f..e8853b419b6 100644 --- a/cmd/lotus-shed/jwt.go +++ b/cmd/lotus-shed/jwt.go @@ -15,7 +15,8 @@ import ( "github.com/urfave/cli/v2" "github.com/filecoin-project/go-jsonrpc/auth" - "github.com/filecoin-project/lotus/api/apistruct" + + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/node/modules" ) @@ -98,19 +99,19 @@ var jwtTokenCmd = &cli.Command{ perms := []auth.Permission{} if cctx.Bool("read") { - perms = append(perms, apistruct.PermRead) + perms = append(perms, api.PermRead) } if cctx.Bool("write") { - perms = append(perms, apistruct.PermWrite) + perms = append(perms, api.PermWrite) } if cctx.Bool("sign") { - perms = append(perms, apistruct.PermSign) + perms = append(perms, api.PermSign) } if cctx.Bool("admin") { - perms = append(perms, apistruct.PermAdmin) + perms = append(perms, api.PermAdmin) } p := modules.JwtPayload{ @@ -152,7 +153,7 @@ var jwtNewCmd = &cli.Command{ } p := modules.JwtPayload{ - Allow: apistruct.AllPermissions, + Allow: api.AllPermissions, } token, err := jwt.Sign(&p, jwt.NewHS256(keyInfo.PrivateKey)) @@ -168,7 +169,7 @@ var jwtNewCmd = &cli.Command{ defer func() { if err := file.Close(); err != nil { - log.Warnf("failed to close output file: %w", err) + log.Warnf("failed to close output file: %v", err) } }() diff --git a/cmd/lotus-shed/keyinfo.go b/cmd/lotus-shed/keyinfo.go index 4dcd10cbfe8..3c99b5050a0 100644 --- a/cmd/lotus-shed/keyinfo.go +++ b/cmd/lotus-shed/keyinfo.go @@ -427,7 +427,7 @@ var keyinfoNewCmd = &cli.Command{ defer func() { if err := file.Close(); err != nil { - log.Warnf("failed to close output file: %w", err) + log.Warnf("failed to close output file: %v", err) } }() diff --git a/cmd/lotus-shed/ledger.go b/cmd/lotus-shed/ledger.go index ecb13ec645a..0e9c11742cc 100644 --- a/cmd/lotus-shed/ledger.go +++ b/cmd/lotus-shed/ledger.go @@ -6,12 +6,14 @@ import ( "strconv" "strings" + "github.com/filecoin-project/lotus/api/v0api" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" "github.com/urfave/cli/v2" ledgerfil "github.com/whyrusleeping/ledger-filecoin-go" - "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/types" ledgerwallet "github.com/filecoin-project/lotus/chain/wallet/ledger" lcli "github.com/filecoin-project/lotus/cli" @@ -25,6 +27,7 @@ var ledgerCmd = &cli.Command{ ledgerListAddressesCmd, ledgerKeyInfoCmd, ledgerSignTestCmd, + ledgerShowCmd, }, } @@ -40,7 +43,7 @@ var ledgerListAddressesCmd = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { - var api api.FullNode + var api v0api.FullNode if cctx.Bool("print-balances") { a, closer, err := lcli.GetFullNodeAPI(cctx) if err != nil { @@ -57,6 +60,7 @@ var ledgerListAddressesCmd = &cli.Command{ if err != nil { return err } + defer fl.Close() // nolint end := 20 for i := 0; i < end; i++ { @@ -166,6 +170,7 @@ var ledgerKeyInfoCmd = &cli.Command{ if err != nil { return err } + defer fl.Close() // nolint p, err := parseHDPath(cctx.Args().First()) if err != nil { @@ -242,13 +247,46 @@ var ledgerSignTestCmd = &cli.Command{ if err != nil { return err } + fmt.Printf("Message: %x\n", b.RawData()) sig, err := fl.SignSECP256K1(p, b.RawData()) if err != nil { return err } - fmt.Println(sig.SignatureBytes()) + sigBytes := append([]byte{byte(crypto.SigTypeSecp256k1)}, sig.SignatureBytes()...) + + fmt.Printf("Signature: %x\n", sigBytes) + + return nil + }, +} + +var ledgerShowCmd = &cli.Command{ + Name: "show", + ArgsUsage: "[hd path]", + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return cli.ShowCommandHelp(cctx, cctx.Command.Name) + } + + fl, err := ledgerfil.FindLedgerFilecoinApp() + if err != nil { + return err + } + defer fl.Close() // nolint + + p, err := parseHDPath(cctx.Args().First()) + if err != nil { + return err + } + + _, _, a, err := fl.ShowAddressPubKeySECP256K1(p) + if err != nil { + return err + } + + fmt.Println(a) return nil }, diff --git a/cmd/lotus-shed/main.go b/cmd/lotus-shed/main.go index 8201ec1117d..e06b630800c 100644 --- a/cmd/lotus-shed/main.go +++ b/cmd/lotus-shed/main.go @@ -16,9 +16,11 @@ func main() { logging.SetLogLevel("*", "INFO") local := []*cli.Command{ + base64Cmd, base32Cmd, base16Cmd, bitFieldCmd, + cronWcCmd, frozenMinersCmd, keyinfoCmd, jwtCmd, @@ -30,22 +32,34 @@ func main() { importObjectCmd, commpToCidCmd, fetchParamCmd, + postFindCmd, proofsCmd, verifRegCmd, + marketCmd, miscCmd, mpoolCmd, genesisVerifyCmd, mathCmd, + minerCmd, mpoolStatsCmd, exportChainCmd, + exportCarCmd, consensusCmd, - serveDealStatsCmd, + storageStatsCmd, syncCmd, stateTreePruneCmd, datastoreCmd, ledgerCmd, sectorsCmd, msgCmd, + electionCmd, + rpcCmd, + cidCmd, + blockmsgidCmd, + signaturesCmd, + actorCmd, + minerTypesCmd, + minerMultisigsCmd, } app := &cli.App{ diff --git a/cmd/lotus-shed/market.go b/cmd/lotus-shed/market.go new file mode 100644 index 00000000000..e2e322784cb --- /dev/null +++ b/cmd/lotus-shed/market.go @@ -0,0 +1,102 @@ +package main + +import ( + "fmt" + + lcli "github.com/filecoin-project/lotus/cli" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" +) + +var marketCmd = &cli.Command{ + Name: "market", + Usage: "Interact with the market actor", + Flags: []cli.Flag{}, + Subcommands: []*cli.Command{ + marketDealFeesCmd, + }, +} + +var marketDealFeesCmd = &cli.Command{ + Name: "get-deal-fees", + Usage: "View the storage fees associated with a particular deal or storage provider", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "provider", + Usage: "provider whose outstanding fees you'd like to calculate", + }, + &cli.IntFlag{ + Name: "dealId", + Usage: "deal whose outstanding fees you'd like to calculate", + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := lcli.ReqContext(cctx) + + ts, err := lcli.LoadTipSet(ctx, cctx, api) + if err != nil { + return err + } + + ht := ts.Height() + + if cctx.IsSet("provider") { + p, err := address.NewFromString(cctx.String("provider")) + if err != nil { + return fmt.Errorf("failed to parse provider: %w", err) + } + + deals, err := api.StateMarketDeals(ctx, ts.Key()) + if err != nil { + return err + } + + ef := big.Zero() + pf := big.Zero() + count := 0 + + for _, deal := range deals { + if deal.Proposal.Provider == p { + e, p := deal.Proposal.GetDealFees(ht) + ef = big.Add(ef, e) + pf = big.Add(pf, p) + count++ + } + } + + fmt.Println("Total deals: ", count) + fmt.Println("Total earned fees: ", ef) + fmt.Println("Total pending fees: ", pf) + fmt.Println("Total fees: ", big.Add(ef, pf)) + + return nil + } + + if dealid := cctx.Int("dealId"); dealid != 0 { + deal, err := api.StateMarketStorageDeal(ctx, abi.DealID(dealid), ts.Key()) + if err != nil { + return err + } + + ef, pf := deal.Proposal.GetDealFees(ht) + + fmt.Println("Earned fees: ", ef) + fmt.Println("Pending fees: ", pf) + fmt.Println("Total fees: ", big.Add(ef, pf)) + + return nil + } + + return xerrors.New("must provide either --provider or --dealId flag") + }, +} diff --git a/cmd/lotus-shed/math.go b/cmd/lotus-shed/math.go index 434559f09a0..c6d4ed0c952 100644 --- a/cmd/lotus-shed/math.go +++ b/cmd/lotus-shed/math.go @@ -8,8 +8,10 @@ import ( "strings" "github.com/urfave/cli/v2" + "golang.org/x/xerrors" "github.com/filecoin-project/lotus/chain/types" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" ) var mathCmd = &cli.Command{ @@ -17,6 +19,7 @@ var mathCmd = &cli.Command{ Usage: "utility commands around doing math on a list of numbers", Subcommands: []*cli.Command{ mathSumCmd, + mathAggFeesCmd, }, } @@ -101,3 +104,30 @@ var mathSumCmd = &cli.Command{ return nil }, } + +var mathAggFeesCmd = &cli.Command{ + Name: "agg-fees", + Flags: []cli.Flag{ + &cli.IntFlag{ + Name: "size", + Required: true, + }, + &cli.StringFlag{ + Name: "base-fee", + Usage: "baseFee aFIL", + Required: true, + }, + }, + Action: func(cctx *cli.Context) error { + as := cctx.Int("size") + + bf, err := types.BigFromString(cctx.String("base-fee")) + if err != nil { + return xerrors.Errorf("parsing basefee: %w", err) + } + + fmt.Println(types.FIL(miner5.AggregateNetworkFee(as, bf))) + + return nil + }, +} diff --git a/cmd/lotus-shed/mempool-stats.go b/cmd/lotus-shed/mempool-stats.go index bc4a801f0eb..597ba03936a 100644 --- a/cmd/lotus-shed/mempool-stats.go +++ b/cmd/lotus-shed/mempool-stats.go @@ -8,7 +8,7 @@ import ( "contrib.go.opencensus.io/exporter/prometheus" "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" "github.com/urfave/cli/v2" "go.opencensus.io/stats" "go.opencensus.io/stats/view" diff --git a/cmd/lotus-shed/miner-multisig.go b/cmd/lotus-shed/miner-multisig.go new file mode 100644 index 00000000000..d9f15809021 --- /dev/null +++ b/cmd/lotus-shed/miner-multisig.go @@ -0,0 +1,388 @@ +package main + +import ( + "bytes" + "fmt" + "strconv" + + "github.com/filecoin-project/go-state-types/abi" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + + msig5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/multisig" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" + lcli "github.com/filecoin-project/lotus/cli" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" +) + +var minerMultisigsCmd = &cli.Command{ + Name: "miner-multisig", + Description: "a collection of utilities for using multisigs as owner addresses of miners", + Subcommands: []*cli.Command{ + mmProposeWithdrawBalance, + mmApproveWithdrawBalance, + mmProposeChangeOwner, + mmApproveChangeOwner, + }, + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "from", + Usage: "specify address to send message from", + Required: true, + }, + &cli.StringFlag{ + Name: "multisig", + Usage: "specify multisig that will receive the message", + Required: true, + }, + &cli.StringFlag{ + Name: "miner", + Usage: "specify miner being acted upon", + Required: true, + }, + }, +} + +var mmProposeWithdrawBalance = &cli.Command{ + Name: "propose-withdraw", + Usage: "Propose to withdraw FIL from the miner", + ArgsUsage: "[amount]", + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return fmt.Errorf("must pass amount to withdraw") + } + + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := lcli.ReqContext(cctx) + + multisigAddr, sender, minerAddr, err := getInputs(cctx) + if err != nil { + return err + } + + val, err := types.ParseFIL(cctx.Args().First()) + if err != nil { + return err + } + + sp, err := actors.SerializeParams(&miner5.WithdrawBalanceParams{ + AmountRequested: abi.TokenAmount(val), + }) + if err != nil { + return err + } + + pcid, err := api.MsigPropose(ctx, multisigAddr, minerAddr, big.Zero(), sender, uint64(miner.Methods.WithdrawBalance), sp) + if err != nil { + return xerrors.Errorf("proposing message: %w", err) + } + + fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", pcid) + + // wait for it to get mined into a block + wait, err := api.StateWaitMsg(ctx, pcid, build.MessageConfidence) + if err != nil { + return err + } + + // check it executed successfully + if wait.Receipt.ExitCode != 0 { + fmt.Fprintln(cctx.App.Writer, "Propose owner change tx failed!") + return err + } + + var retval msig5.ProposeReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil { + return fmt.Errorf("failed to unmarshal propose return value: %w", err) + } + + fmt.Printf("Transaction ID: %d\n", retval.TxnID) + if retval.Applied { + fmt.Printf("Transaction was executed during propose\n") + fmt.Printf("Exit Code: %d\n", retval.Code) + fmt.Printf("Return Value: %x\n", retval.Ret) + } + + return nil + }, +} + +var mmApproveWithdrawBalance = &cli.Command{ + Name: "approve-withdraw", + Usage: "Approve to withdraw FIL from the miner", + ArgsUsage: "[amount txnId proposer]", + Action: func(cctx *cli.Context) error { + if cctx.NArg() != 3 { + return fmt.Errorf("must pass amount, txn Id, and proposer address") + } + + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := lcli.ReqContext(cctx) + + multisigAddr, sender, minerAddr, err := getInputs(cctx) + if err != nil { + return err + } + + val, err := types.ParseFIL(cctx.Args().First()) + if err != nil { + return err + } + + sp, err := actors.SerializeParams(&miner5.WithdrawBalanceParams{ + AmountRequested: abi.TokenAmount(val), + }) + if err != nil { + return err + } + + txid, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64) + if err != nil { + return err + } + + proposer, err := address.NewFromString(cctx.Args().Get(2)) + if err != nil { + return err + } + + acid, err := api.MsigApproveTxnHash(ctx, multisigAddr, txid, proposer, minerAddr, big.Zero(), sender, uint64(miner.Methods.WithdrawBalance), sp) + if err != nil { + return xerrors.Errorf("approving message: %w", err) + } + + fmt.Fprintln(cctx.App.Writer, "Approve Message CID:", acid) + + // wait for it to get mined into a block + wait, err := api.StateWaitMsg(ctx, acid, build.MessageConfidence) + if err != nil { + return err + } + + // check it executed successfully + if wait.Receipt.ExitCode != 0 { + fmt.Fprintln(cctx.App.Writer, "Approve owner change tx failed!") + return err + } + + var retval msig5.ApproveReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil { + return fmt.Errorf("failed to unmarshal approve return value: %w", err) + } + + if retval.Applied { + fmt.Printf("Transaction was executed with the approve\n") + fmt.Printf("Exit Code: %d\n", retval.Code) + fmt.Printf("Return Value: %x\n", retval.Ret) + } else { + fmt.Println("Transaction was approved, but not executed") + } + return nil + }, +} + +var mmProposeChangeOwner = &cli.Command{ + Name: "propose-change-owner", + Usage: "Propose an owner address change", + ArgsUsage: "[newOwner]", + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return fmt.Errorf("must pass new owner address") + } + + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := lcli.ReqContext(cctx) + + multisigAddr, sender, minerAddr, err := getInputs(cctx) + if err != nil { + return err + } + + na, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return err + } + + newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK) + if err != nil { + return err + } + + mi, err := api.StateMinerInfo(ctx, minerAddr, types.EmptyTSK) + if err != nil { + return err + } + + if mi.Owner == newAddr { + return fmt.Errorf("owner address already set to %s", na) + } + + sp, err := actors.SerializeParams(&newAddr) + if err != nil { + return xerrors.Errorf("serializing params: %w", err) + } + + pcid, err := api.MsigPropose(ctx, multisigAddr, minerAddr, big.Zero(), sender, uint64(miner.Methods.ChangeOwnerAddress), sp) + if err != nil { + return xerrors.Errorf("proposing message: %w", err) + } + + fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", pcid) + + // wait for it to get mined into a block + wait, err := api.StateWaitMsg(ctx, pcid, build.MessageConfidence) + if err != nil { + return err + } + + // check it executed successfully + if wait.Receipt.ExitCode != 0 { + fmt.Fprintln(cctx.App.Writer, "Propose owner change tx failed!") + return err + } + + var retval msig5.ProposeReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil { + return fmt.Errorf("failed to unmarshal propose return value: %w", err) + } + + fmt.Printf("Transaction ID: %d\n", retval.TxnID) + if retval.Applied { + fmt.Printf("Transaction was executed during propose\n") + fmt.Printf("Exit Code: %d\n", retval.Code) + fmt.Printf("Return Value: %x\n", retval.Ret) + } + return nil + }, +} + +var mmApproveChangeOwner = &cli.Command{ + Name: "approve-change-owner", + Usage: "Approve an owner address change", + ArgsUsage: "[newOwner txnId proposer]", + Action: func(cctx *cli.Context) error { + if cctx.NArg() != 3 { + return fmt.Errorf("must pass new owner address, txn Id, and proposer address") + } + + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := lcli.ReqContext(cctx) + + multisigAddr, sender, minerAddr, err := getInputs(cctx) + if err != nil { + return err + } + + na, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return err + } + + newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK) + if err != nil { + return err + } + + txid, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64) + if err != nil { + return err + } + + proposer, err := address.NewFromString(cctx.Args().Get(2)) + if err != nil { + return err + } + + mi, err := api.StateMinerInfo(ctx, minerAddr, types.EmptyTSK) + if err != nil { + return err + } + + if mi.Owner == newAddr { + return fmt.Errorf("owner address already set to %s", na) + } + + sp, err := actors.SerializeParams(&newAddr) + if err != nil { + return xerrors.Errorf("serializing params: %w", err) + } + + acid, err := api.MsigApproveTxnHash(ctx, multisigAddr, txid, proposer, minerAddr, big.Zero(), sender, uint64(miner.Methods.ChangeOwnerAddress), sp) + if err != nil { + return xerrors.Errorf("approving message: %w", err) + } + + fmt.Fprintln(cctx.App.Writer, "Approve Message CID:", acid) + + // wait for it to get mined into a block + wait, err := api.StateWaitMsg(ctx, acid, build.MessageConfidence) + if err != nil { + return err + } + + // check it executed successfully + if wait.Receipt.ExitCode != 0 { + fmt.Fprintln(cctx.App.Writer, "Approve owner change tx failed!") + return err + } + + var retval msig5.ApproveReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil { + return fmt.Errorf("failed to unmarshal approve return value: %w", err) + } + + if retval.Applied { + fmt.Printf("Transaction was executed with the approve\n") + fmt.Printf("Exit Code: %d\n", retval.Code) + fmt.Printf("Return Value: %x\n", retval.Ret) + } else { + fmt.Println("Transaction was approved, but not executed") + } + return nil + }, +} + +func getInputs(cctx *cli.Context) (address.Address, address.Address, address.Address, error) { + multisigAddr, err := address.NewFromString(cctx.String("multisig")) + if err != nil { + return address.Undef, address.Undef, address.Undef, err + } + + sender, err := address.NewFromString(cctx.String("from")) + if err != nil { + return address.Undef, address.Undef, address.Undef, err + } + + minerAddr, err := address.NewFromString(cctx.String("miner")) + if err != nil { + return address.Undef, address.Undef, address.Undef, err + } + + return multisigAddr, sender, minerAddr, nil +} diff --git a/cmd/lotus-shed/miner-types.go b/cmd/lotus-shed/miner-types.go new file mode 100644 index 00000000000..19a30c4b99a --- /dev/null +++ b/cmd/lotus-shed/miner-types.go @@ -0,0 +1,154 @@ +package main + +import ( + "context" + "fmt" + "io" + "math/big" + + big2 "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/state" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/lotus/node/repo" + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + "github.com/filecoin-project/specs-actors/v4/actors/util/adt" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" +) + +var minerTypesCmd = &cli.Command{ + Name: "miner-types", + Usage: "Scrape state to report on how many miners of each WindowPoStProofType exist", Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "repo", + Value: "~/.lotus", + }, + }, + Action: func(cctx *cli.Context) error { + ctx := context.TODO() + + if !cctx.Args().Present() { + return fmt.Errorf("must pass state root") + } + + sroot, err := cid.Decode(cctx.Args().First()) + if err != nil { + return fmt.Errorf("failed to parse input: %w", err) + } + + fsrepo, err := repo.NewFS(cctx.String("repo")) + if err != nil { + return err + } + + lkrepo, err := fsrepo.Lock(repo.FullNode) + if err != nil { + return err + } + + defer lkrepo.Close() //nolint:errcheck + + bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore) + if err != nil { + return fmt.Errorf("failed to open blockstore: %w", err) + } + + defer func() { + if c, ok := bs.(io.Closer); ok { + if err := c.Close(); err != nil { + log.Warnf("failed to close blockstore: %s", err) + } + } + }() + + mds, err := lkrepo.Datastore(context.Background(), "/metadata") + if err != nil { + return err + } + + cs := store.NewChainStore(bs, bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), nil) + defer cs.Close() //nolint:errcheck + + cst := cbor.NewCborStore(bs) + store := adt.WrapStore(ctx, cst) + + tree, err := state.LoadStateTree(cst, sroot) + if err != nil { + return err + } + + typeMap := make(map[abi.RegisteredPoStProof]int64) + pa, err := tree.GetActor(power.Address) + if err != nil { + return err + } + + ps, err := power.Load(store, pa) + if err != nil { + return err + } + + dc := 0 + dz := power.Claim{ + RawBytePower: abi.NewStoragePower(0), + QualityAdjPower: abi.NewStoragePower(0), + } + + err = tree.ForEach(func(addr address.Address, act *types.Actor) error { + if act.Code == builtin4.StorageMinerActorCodeID { + ms, err := miner.Load(store, act) + if err != nil { + return err + } + + mi, err := ms.Info() + if err != nil { + return err + } + + if mi.WindowPoStProofType == abi.RegisteredPoStProof_StackedDrgWindow64GiBV1 { + mp, f, err := ps.MinerPower(addr) + if err != nil { + return err + } + + if f && mp.RawBytePower.Cmp(big.NewInt(10<<40)) >= 0 && mp.RawBytePower.Cmp(big.NewInt(20<<40)) < 0 { + dc = dc + 1 + dz.RawBytePower = big2.Add(dz.RawBytePower, mp.RawBytePower) + dz.QualityAdjPower = big2.Add(dz.QualityAdjPower, mp.QualityAdjPower) + } + } + + c, f := typeMap[mi.WindowPoStProofType] + if !f { + typeMap[mi.WindowPoStProofType] = 1 + } else { + typeMap[mi.WindowPoStProofType] = c + 1 + } + } + return nil + }) + if err != nil { + return xerrors.Errorf("failed to loop over actors: %w", err) + } + + for k, v := range typeMap { + fmt.Println("Type:", k, " Count: ", v) + } + + fmt.Println("Mismatched power (raw, QA): ", dz.RawBytePower, " ", dz.QualityAdjPower) + fmt.Println("Mismatched 64 GiB miner count: ", dc) + + return nil + }, +} diff --git a/cmd/lotus-shed/miner.go b/cmd/lotus-shed/miner.go new file mode 100644 index 00000000000..ec5a445f94d --- /dev/null +++ b/cmd/lotus-shed/miner.go @@ -0,0 +1,113 @@ +package main + +import ( + "bufio" + "io" + "os" + "path/filepath" + "strings" + + "github.com/mitchellh/go-homedir" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" +) + +var minerCmd = &cli.Command{ + Name: "miner", + Usage: "miner-related utilities", + Subcommands: []*cli.Command{ + minerUnpackInfoCmd, + }, +} + +var minerUnpackInfoCmd = &cli.Command{ + Name: "unpack-info", + Usage: "unpack miner info all dump", + ArgsUsage: "[allinfo.txt] [dir]", + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() != 2 { + return xerrors.Errorf("expected 2 args") + } + + src, err := homedir.Expand(cctx.Args().Get(0)) + if err != nil { + return xerrors.Errorf("expand src: %w", err) + } + + f, err := os.Open(src) + if err != nil { + return xerrors.Errorf("open file: %w", err) + } + defer f.Close() // nolint + + dest, err := homedir.Expand(cctx.Args().Get(1)) + if err != nil { + return xerrors.Errorf("expand dest: %w", err) + } + + var outf *os.File + + r := bufio.NewReader(f) + for { + l, _, err := r.ReadLine() + if err == io.EOF { + if outf != nil { + return outf.Close() + } + } + if err != nil { + return xerrors.Errorf("read line: %w", err) + } + sl := string(l) + + if strings.HasPrefix(sl, "#") { + if strings.Contains(sl, "..") { + return xerrors.Errorf("bad name %s", sl) + } + + if strings.HasPrefix(sl, "#: ") { + if outf != nil { + if err := outf.Close(); err != nil { + return xerrors.Errorf("close out file: %w", err) + } + } + p := filepath.Join(dest, sl[len("#: "):]) + if err := os.MkdirAll(filepath.Dir(p), 0775); err != nil { + return xerrors.Errorf("mkdir: %w", err) + } + outf, err = os.Create(p) + if err != nil { + return xerrors.Errorf("create out file: %w", err) + } + continue + } + + if strings.HasPrefix(sl, "##: ") { + if outf != nil { + if err := outf.Close(); err != nil { + return xerrors.Errorf("close out file: %w", err) + } + } + p := filepath.Join(dest, "Per Sector Infos", sl[len("##: "):]) + if err := os.MkdirAll(filepath.Dir(p), 0775); err != nil { + return xerrors.Errorf("mkdir: %w", err) + } + outf, err = os.Create(p) + if err != nil { + return xerrors.Errorf("create out file: %w", err) + } + continue + } + } + + if outf != nil { + if _, err := outf.Write(l); err != nil { + return xerrors.Errorf("write line: %w", err) + } + if _, err := outf.Write([]byte("\n")); err != nil { + return xerrors.Errorf("write line end: %w", err) + } + } + } + }, +} diff --git a/cmd/lotus-shed/mpool.go b/cmd/lotus-shed/mpool.go index d3660db6958..004bd99a6bd 100644 --- a/cmd/lotus-shed/mpool.go +++ b/cmd/lotus-shed/mpool.go @@ -15,6 +15,7 @@ var mpoolCmd = &cli.Command{ Flags: []cli.Flag{}, Subcommands: []*cli.Command{ minerSelectMsgsCmd, + mpoolClear, }, } @@ -66,3 +67,36 @@ var minerSelectMsgsCmd = &cli.Command{ return nil }, } + +var mpoolClear = &cli.Command{ + Name: "clear", + Usage: "Clear all pending messages from the mpool (USE WITH CARE)", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "local", + Usage: "also clear local messages", + }, + &cli.BoolFlag{ + Name: "really-do-it", + Usage: "must be specified for the action to take effect", + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + really := cctx.Bool("really-do-it") + if !really { + //nolint:golint + return fmt.Errorf("--really-do-it must be specified for this action to have an effect; you have been warned") + } + + local := cctx.Bool("local") + + ctx := lcli.ReqContext(cctx) + return api.MpoolClear(ctx, local) + }, +} diff --git a/cmd/lotus-shed/params.go b/cmd/lotus-shed/params.go index 3f7e7b6fb7e..e45d9489c35 100644 --- a/cmd/lotus-shed/params.go +++ b/cmd/lotus-shed/params.go @@ -25,7 +25,7 @@ var fetchParamCmd = &cli.Command{ return err } sectorSize := uint64(sectorSizeInt) - err = paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), sectorSize) + err = paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), build.SrsJSON(), sectorSize) if err != nil { return xerrors.Errorf("fetching proof parameters: %w", err) } diff --git a/cmd/lotus-shed/postfind.go b/cmd/lotus-shed/postfind.go new file mode 100644 index 00000000000..c8a4c990769 --- /dev/null +++ b/cmd/lotus-shed/postfind.go @@ -0,0 +1,123 @@ +package main + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + lapi "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" + lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/specs-actors/v2/actors/builtin" + "github.com/urfave/cli/v2" +) + +var postFindCmd = &cli.Command{ + Name: "post-find", + Description: "return addresses of all miners who have over zero power and have posted in the last day", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "tipset", + Usage: "specify tipset state to search on", + }, + &cli.BoolFlag{ + Name: "verbose", + Usage: "get more frequent print updates", + }, + &cli.BoolFlag{ + Name: "withpower", + Usage: "only print addrs of miners with more than zero power", + }, + &cli.IntFlag{ + Name: "lookback", + Usage: "number of past epochs to search for post", + Value: 2880, //default 1 day + }, + }, + Action: func(c *cli.Context) error { + api, acloser, err := lcli.GetFullNodeAPI(c) + if err != nil { + return err + } + defer acloser() + ctx := lcli.ReqContext(c) + verbose := c.Bool("verbose") + withpower := c.Bool("withpower") + + startTs, err := lcli.LoadTipSet(ctx, c, api) + if err != nil { + return err + } + stopEpoch := startTs.Height() - abi.ChainEpoch(c.Int("lookback")) + if verbose { + fmt.Printf("Collecting messages between %d and %d\n", startTs.Height(), stopEpoch) + } + // Get all messages over the last day + ts := startTs + msgs := make([]*types.Message, 0) + for ts.Height() > stopEpoch { + // Get messages on ts parent + next, err := api.ChainGetParentMessages(ctx, ts.Cids()[0]) + if err != nil { + return err + } + msgs = append(msgs, messagesFromAPIMessages(next)...) + + // Next ts + ts, err = api.ChainGetTipSet(ctx, ts.Parents()) + if err != nil { + return err + } + if verbose && int64(ts.Height())%100 == 0 { + fmt.Printf("Collected messages back to height %d\n", ts.Height()) + } + } + fmt.Printf("Loaded messages to height %d\n", ts.Height()) + + mAddrs, err := api.StateListMiners(ctx, startTs.Key()) + if err != nil { + return err + } + + minersToCheck := make(map[address.Address]struct{}) + for _, mAddr := range mAddrs { + // if they have no power ignore. This filters out 14k inactive miners + // so we can do 100x fewer expensive message queries + if withpower { + power, err := api.StateMinerPower(ctx, mAddr, startTs.Key()) + if err != nil { + return err + } + if power.MinerPower.RawBytePower.GreaterThan(big.Zero()) { + minersToCheck[mAddr] = struct{}{} + } + } else { + minersToCheck[mAddr] = struct{}{} + } + } + fmt.Printf("Loaded %d miners to check\n", len(minersToCheck)) + + postedMiners := make(map[address.Address]struct{}) + for _, msg := range msgs { + _, shouldCheck := minersToCheck[msg.To] + _, seenBefore := postedMiners[msg.To] + + if shouldCheck && !seenBefore { + if msg.Method == builtin.MethodsMiner.SubmitWindowedPoSt { + fmt.Printf("%s\n", msg.To) + postedMiners[msg.To] = struct{}{} + } + } + } + return nil + }, +} + +func messagesFromAPIMessages(apiMessages []lapi.Message) []*types.Message { + messages := make([]*types.Message, len(apiMessages)) + for i, apiMessage := range apiMessages { + messages[i] = apiMessage.Message + } + return messages +} diff --git a/cmd/lotus-shed/pruning.go b/cmd/lotus-shed/pruning.go index 6cf4f8c6f91..1afe76c4d38 100644 --- a/cmd/lotus-shed/pruning.go +++ b/cmd/lotus-shed/pruning.go @@ -3,20 +3,19 @@ package main import ( "context" "fmt" + "io" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" - "github.com/filecoin-project/lotus/lib/blockstore" - "github.com/filecoin-project/lotus/node/repo" "github.com/ipfs/bbloom" "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/query" - dshelp "github.com/ipfs/go-ipfs-ds-help" "github.com/urfave/cli/v2" "golang.org/x/xerrors" + + badgerbs "github.com/filecoin-project/lotus/blockstore/badger" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/lotus/node/repo" ) type cidSet interface { @@ -132,37 +131,47 @@ var stateTreePruneCmd = &cli.Command{ defer lkrepo.Close() //nolint:errcheck - ds, err := lkrepo.Datastore("/chain") + bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore) if err != nil { - return err + return fmt.Errorf("failed to open blockstore: %w", err) } - defer ds.Close() //nolint:errcheck + defer func() { + if c, ok := bs.(io.Closer); ok { + if err := c.Close(); err != nil { + log.Warnf("failed to close blockstore: %s", err) + } + } + }() + + // After migrating to native blockstores, this has been made + // database-specific. + badgbs, ok := bs.(*badgerbs.Blockstore) + if !ok { + return fmt.Errorf("only badger blockstores are supported") + } - mds, err := lkrepo.Datastore("/metadata") + mds, err := lkrepo.Datastore(context.Background(), "/metadata") if err != nil { return err } defer mds.Close() //nolint:errcheck + const DiscardRatio = 0.2 if cctx.Bool("only-ds-gc") { - gcds, ok := ds.(datastore.GCDatastore) - if ok { - fmt.Println("running datastore gc....") - for i := 0; i < cctx.Int("gc-count"); i++ { - if err := gcds.CollectGarbage(); err != nil { - return xerrors.Errorf("datastore GC failed: %w", err) - } + fmt.Println("running datastore gc....") + for i := 0; i < cctx.Int("gc-count"); i++ { + if err := badgbs.DB.RunValueLogGC(DiscardRatio); err != nil { + return xerrors.Errorf("datastore GC failed: %w", err) } - fmt.Println("gc complete!") - return nil } - return fmt.Errorf("datastore doesnt support gc") + fmt.Println("gc complete!") + return nil } - bs := blockstore.NewBlockstore(ds) + cs := store.NewChainStore(bs, bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), nil) + defer cs.Close() //nolint:errcheck - cs := store.NewChainStore(bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), nil) if err := cs.Load(); err != nil { return fmt.Errorf("loading chainstore: %w", err) } @@ -182,7 +191,7 @@ var stateTreePruneCmd = &cli.Command{ rrLb := abi.ChainEpoch(cctx.Int64("keep-from-lookback")) - if err := cs.WalkSnapshot(ctx, ts, rrLb, true, func(c cid.Cid) error { + if err := cs.WalkSnapshot(ctx, ts, rrLb, true, true, func(c cid.Cid) error { if goodSet.Len()%20 == 0 { fmt.Printf("\renumerating keep set: %d ", goodSet.Len()) } @@ -199,63 +208,30 @@ var stateTreePruneCmd = &cli.Command{ return nil } - var b datastore.Batch - var batchCount int - markForRemoval := func(c cid.Cid) error { - if b == nil { - nb, err := ds.Batch() - if err != nil { - return fmt.Errorf("opening batch: %w", err) - } + b := badgbs.DB.NewWriteBatch() + defer b.Cancel() - b = nb - } - batchCount++ - - if err := b.Delete(dshelp.MultihashToDsKey(c.Hash())); err != nil { - return err - } - - if batchCount > 100 { - if err := b.Commit(); err != nil { - return xerrors.Errorf("failed to commit batch deletes: %w", err) - } - b = nil - batchCount = 0 - } - return nil + markForRemoval := func(c cid.Cid) error { + return b.Delete(badgbs.StorageKey(nil, c)) } - res, err := ds.Query(query.Query{KeysOnly: true}) + keys, err := bs.AllKeysChan(context.Background()) if err != nil { - return xerrors.Errorf("failed to query datastore: %w", err) + return xerrors.Errorf("failed to query blockstore: %w", err) } dupTo := cctx.Int("delete-up-to") var deleteCount int var goodHits int - for { - v, ok := res.NextSync() - if !ok { - break - } - - bk, err := dshelp.BinaryFromDsKey(datastore.RawKey(v.Key[len("/blocks"):])) - if err != nil { - return xerrors.Errorf("failed to parse key: %w", err) - } - - if goodSet.HasRaw(bk) { + for k := range keys { + if goodSet.HasRaw(k.Bytes()) { goodHits++ continue } - nc := cid.NewCidV1(cid.Raw, bk) - - deleteCount++ - if err := markForRemoval(nc); err != nil { - return fmt.Errorf("failed to remove cid %s: %w", nc, err) + if err := markForRemoval(k); err != nil { + return fmt.Errorf("failed to remove cid %s: %w", k, err) } if deleteCount%20 == 0 { @@ -267,22 +243,17 @@ var stateTreePruneCmd = &cli.Command{ } } - if b != nil { - if err := b.Commit(); err != nil { - return xerrors.Errorf("failed to commit final batch delete: %w", err) - } + if err := b.Flush(); err != nil { + return xerrors.Errorf("failed to flush final batch delete: %w", err) } - gcds, ok := ds.(datastore.GCDatastore) - if ok { - fmt.Println("running datastore gc....") - for i := 0; i < cctx.Int("gc-count"); i++ { - if err := gcds.CollectGarbage(); err != nil { - return xerrors.Errorf("datastore GC failed: %w", err) - } + fmt.Println("running datastore gc....") + for i := 0; i < cctx.Int("gc-count"); i++ { + if err := badgbs.DB.RunValueLogGC(DiscardRatio); err != nil { + return xerrors.Errorf("datastore GC failed: %w", err) } - fmt.Println("gc complete!") } + fmt.Println("gc complete!") return nil }, diff --git a/cmd/lotus-shed/rpc.go b/cmd/lotus-shed/rpc.go new file mode 100644 index 00000000000..81171916e14 --- /dev/null +++ b/cmd/lotus-shed/rpc.go @@ -0,0 +1,172 @@ +package main + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "strings" + "text/scanner" + + "github.com/chzyer/readline" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/node/repo" +) + +var rpcCmd = &cli.Command{ + Name: "rpc", + Usage: "Interactive JsonPRC shell", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "miner", + }, + &cli.StringFlag{ + Name: "version", + Value: "v0", + }, + }, + Action: func(cctx *cli.Context) error { + rt := repo.FullNode + if cctx.Bool("miner") { + rt = repo.StorageMiner + } + + addr, headers, err := lcli.GetRawAPI(cctx, rt, cctx.String("version")) + if err != nil { + return err + } + + u, err := url.Parse(addr) + if err != nil { + return xerrors.Errorf("parsing api URL: %w", err) + } + + switch u.Scheme { + case "ws": + u.Scheme = "http" + case "wss": + u.Scheme = "https" + } + + addr = u.String() + + ctx := lcli.ReqContext(cctx) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + afmt := lcli.NewAppFmt(cctx.App) + + cs := readline.NewCancelableStdin(afmt.Stdin) + go func() { + <-ctx.Done() + cs.Close() // nolint:errcheck + }() + + send := func(method, params string) error { + jreq, err := json.Marshal(struct { + Jsonrpc string `json:"jsonrpc"` + ID int `json:"id"` + Method string `json:"method"` + Params json.RawMessage `json:"params"` + }{ + Jsonrpc: "2.0", + Method: "Filecoin." + method, + Params: json.RawMessage(params), + ID: 0, + }) + if err != nil { + return err + } + + req, err := http.NewRequest("POST", addr, bytes.NewReader(jreq)) + if err != nil { + return err + } + req.Header = headers + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + + fmt.Println(string(rb)) + + if err := resp.Body.Close(); err != nil { + return err + } + + return nil + } + + if cctx.Args().Present() { + if cctx.Args().Len() > 2 { + return xerrors.Errorf("expected 1 or 2 arguments: method [params]") + } + + params := cctx.Args().Get(1) + if params == "" { + // TODO: try to be smart and use zero-values for method + params = "[]" + } + + return send(cctx.Args().Get(0), params) + } + + cctx.App.Metadata["repoType"] = repo.FullNode + if err := lcli.VersionCmd.Action(cctx); err != nil { + return err + } + fmt.Println("Usage: > Method [Param1, Param2, ...]") + + rl, err := readline.NewEx(&readline.Config{ + Stdin: cs, + HistoryFile: "/tmp/lotusrpc.tmp", + Prompt: "> ", + EOFPrompt: "exit", + HistorySearchFold: true, + + // TODO: Some basic auto completion + }) + if err != nil { + return err + } + + for { + line, err := rl.Readline() + if err == readline.ErrInterrupt { + if len(line) == 0 { + break + } else { + continue + } + } else if err == io.EOF { + break + } + + var s scanner.Scanner + s.Init(strings.NewReader(line)) + s.Scan() + method := s.TokenText() + + s.Scan() + params := line[s.Position.Offset:] + + if err := send(method, params); err != nil { + _, _ = fmt.Fprintf(os.Stderr, "%v", err) + } + } + + return nil + }, +} diff --git a/cmd/lotus-shed/sectors.go b/cmd/lotus-shed/sectors.go index 2e78469fa3e..cf40e1152d0 100644 --- a/cmd/lotus-shed/sectors.go +++ b/cmd/lotus-shed/sectors.go @@ -6,6 +6,7 @@ import ( "golang.org/x/xerrors" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" @@ -25,6 +26,7 @@ var sectorsCmd = &cli.Command{ Flags: []cli.Flag{}, Subcommands: []*cli.Command{ terminateSectorCmd, + terminateSectorPenaltyEstimationCmd, }, } @@ -33,6 +35,10 @@ var terminateSectorCmd = &cli.Command{ Usage: "Forcefully terminate a sector (WARNING: This means losing power and pay a one-time termination penalty(including collateral) for the terminated sector)", ArgsUsage: "[sectorNum1 sectorNum2 ...]", Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "actor", + Usage: "specify the address of miner actor", + }, &cli.BoolFlag{ Name: "really-do-it", Usage: "pass this flag if you know what you are doing", @@ -43,6 +49,15 @@ var terminateSectorCmd = &cli.Command{ return fmt.Errorf("at least one sector must be specified") } + var maddr address.Address + if act := cctx.String("actor"); act != "" { + var err error + maddr, err = address.NewFromString(act) + if err != nil { + return fmt.Errorf("parsing address %s: %w", act, err) + } + } + if !cctx.Bool("really-do-it") { return fmt.Errorf("this is a command for advanced users, only use it if you are sure of what you are doing") } @@ -53,17 +68,19 @@ var terminateSectorCmd = &cli.Command{ } defer closer() - api, acloser, err := lcli.GetStorageMinerAPI(cctx) - if err != nil { - return err - } - defer acloser() - ctx := lcli.ReqContext(cctx) - maddr, err := api.ActorAddress(ctx) - if err != nil { - return err + if maddr.Empty() { + api, acloser, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer acloser() + + maddr, err = api.ActorAddress(ctx) + if err != nil { + return err + } } mi, err := nodeApi.StateMinerInfo(ctx, maddr, types.EmptyTSK) @@ -131,3 +148,118 @@ var terminateSectorCmd = &cli.Command{ return nil }, } + +func findPenaltyInInternalExecutions(prefix string, trace []types.ExecutionTrace) { + for _, im := range trace { + if im.Msg.To.String() == "f099" /*Burn actor*/ { + fmt.Printf("Estimated termination penalty: %s attoFIL\n", im.Msg.Value) + return + } + findPenaltyInInternalExecutions(prefix+"\t", im.Subcalls) + } +} + +var terminateSectorPenaltyEstimationCmd = &cli.Command{ + Name: "termination-estimate", + Usage: "Estimate the termination penalty", + ArgsUsage: "[sectorNum1 sectorNum2 ...]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "actor", + Usage: "specify the address of miner actor", + }, + }, + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() < 1 { + return fmt.Errorf("at least one sector must be specified") + } + + var maddr address.Address + if act := cctx.String("actor"); act != "" { + var err error + maddr, err = address.NewFromString(act) + if err != nil { + return fmt.Errorf("parsing address %s: %w", act, err) + } + } + + nodeApi, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := lcli.ReqContext(cctx) + + if maddr.Empty() { + api, acloser, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer acloser() + + maddr, err = api.ActorAddress(ctx) + if err != nil { + return err + } + } + + mi, err := nodeApi.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + terminationDeclarationParams := []miner2.TerminationDeclaration{} + + for _, sn := range cctx.Args().Slice() { + sectorNum, err := strconv.ParseUint(sn, 10, 64) + if err != nil { + return fmt.Errorf("could not parse sector number: %w", err) + } + + sectorbit := bitfield.New() + sectorbit.Set(sectorNum) + + loca, err := nodeApi.StateSectorPartition(ctx, maddr, abi.SectorNumber(sectorNum), types.EmptyTSK) + if err != nil { + return fmt.Errorf("get state sector partition %s", err) + } + + para := miner2.TerminationDeclaration{ + Deadline: loca.Deadline, + Partition: loca.Partition, + Sectors: sectorbit, + } + + terminationDeclarationParams = append(terminationDeclarationParams, para) + } + + terminateSectorParams := &miner2.TerminateSectorsParams{ + Terminations: terminationDeclarationParams, + } + + sp, err := actors.SerializeParams(terminateSectorParams) + if err != nil { + return xerrors.Errorf("serializing params: %w", err) + } + + msg := &types.Message{ + From: mi.Owner, + To: maddr, + Method: miner.Methods.TerminateSectors, + + Value: big.Zero(), + Params: sp, + } + + //TODO: 4667 add an option to give a more precise estimation with pending termination penalty excluded + + invocResult, err := nodeApi.StateCall(ctx, msg, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("fail to state call: %w", err) + } + + findPenaltyInInternalExecutions("\t", invocResult.ExecutionTrace.Subcalls) + return nil + }, +} diff --git a/cmd/lotus-shed/signatures.go b/cmd/lotus-shed/signatures.go new file mode 100644 index 00000000000..d287e0c3f18 --- /dev/null +++ b/cmd/lotus-shed/signatures.go @@ -0,0 +1,148 @@ +package main + +import ( + "encoding/hex" + "fmt" + "strconv" + + ffi "github.com/filecoin-project/filecoin-ffi" + lcli "github.com/filecoin-project/lotus/cli" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/lotus/lib/sigs" + + "github.com/filecoin-project/go-address" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" +) + +var signaturesCmd = &cli.Command{ + Name: "signatures", + Usage: "tools involving signatures", + Subcommands: []*cli.Command{ + sigsVerifyVoteCmd, + sigsVerifyBlsMsgsCmd, + }, +} + +var sigsVerifyBlsMsgsCmd = &cli.Command{ + Name: "verify-bls", + Description: "given a block, verifies the bls signature of the messages in the block", + Usage: "", + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() != 1 { + return xerrors.Errorf("usage: ") + } + + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + + defer closer() + ctx := lcli.ReqContext(cctx) + + bc, err := cid.Decode(cctx.Args().First()) + if err != nil { + return err + } + + b, err := api.ChainGetBlock(ctx, bc) + if err != nil { + return err + } + + ms, err := api.ChainGetBlockMessages(ctx, bc) + if err != nil { + return err + } + + var sigCids []cid.Cid // this is what we get for people not wanting the marshalcbor method on the cid type + var pubks [][]byte + + for _, m := range ms.BlsMessages { + sigCids = append(sigCids, m.Cid()) + + if m.From.Protocol() != address.BLS { + return xerrors.Errorf("address must be BLS address") + } + + pubks = append(pubks, m.From.Payload()) + } + + msgsS := make([]ffi.Message, len(sigCids)) + pubksS := make([]ffi.PublicKey, len(sigCids)) + for i := 0; i < len(sigCids); i++ { + msgsS[i] = sigCids[i].Bytes() + copy(pubksS[i][:], pubks[i][:ffi.PublicKeyBytes]) + } + + sigS := new(ffi.Signature) + copy(sigS[:], b.BLSAggregate.Data[:ffi.SignatureBytes]) + + if len(sigCids) == 0 { + return nil + } + + valid := ffi.HashVerify(sigS, msgsS, pubksS) + if !valid { + return xerrors.New("bls aggregate signature failed to verify") + } + + fmt.Println("BLS siggys valid!") + return nil + }, +} + +var sigsVerifyVoteCmd = &cli.Command{ + Name: "verify-vote", + Description: "can be used to verify signed votes being submitted for FILPolls", + Usage: " ", + Action: func(cctx *cli.Context) error { + + if cctx.Args().Len() != 3 { + return xerrors.Errorf("usage: verify-vote ") + } + + fip, err := strconv.ParseInt(cctx.Args().First(), 10, 64) + if err != nil { + return xerrors.Errorf("couldn't parse FIP number: %w", err) + } + + addr, err := address.NewFromString(cctx.Args().Get(1)) + if err != nil { + return xerrors.Errorf("couldn't parse signing address: %w", err) + } + + sigBytes, err := hex.DecodeString(cctx.Args().Get(2)) + if err != nil { + return xerrors.Errorf("couldn't parse sig: %w", err) + } + + var sig crypto.Signature + if err := sig.UnmarshalBinary(sigBytes); err != nil { + return xerrors.Errorf("couldn't unmarshal sig: %w", err) + } + + switch fip { + case 14: + approve := []byte("7 - Approve") + + if sigs.Verify(&sig, addr, approve) == nil { + fmt.Println("valid vote for approving FIP-0014") + return nil + } + + reject := []byte("7 - Reject") + if sigs.Verify(&sig, addr, reject) == nil { + fmt.Println("valid vote for rejecting FIP-0014") + return nil + } + + return xerrors.Errorf("invalid vote for FIP-0014!") + default: + return xerrors.Errorf("unrecognized FIP number") + } + }, +} diff --git a/cmd/lotus-shed/stateroot-stats.go b/cmd/lotus-shed/stateroot-stats.go index 023f782bdd1..6d5d577089f 100644 --- a/cmd/lotus-shed/stateroot-stats.go +++ b/cmd/lotus-shed/stateroot-stats.go @@ -56,13 +56,6 @@ var staterootDiffsCmd = &cli.Command{ return err } - if ts == nil { - ts, err = api.ChainHead(ctx) - if err != nil { - return err - } - } - fn := func(ts *types.TipSet) (cid.Cid, []cid.Cid) { blk := ts.Blocks()[0] strt := blk.ParentStateRoot @@ -134,13 +127,6 @@ var staterootStatCmd = &cli.Command{ return err } - if ts == nil { - ts, err = api.ChainHead(ctx) - if err != nil { - return err - } - } - var addrs []address.Address for _, inp := range cctx.Args().Slice() { diff --git a/cmd/lotus-shed/storage-stats.go b/cmd/lotus-shed/storage-stats.go new file mode 100644 index 00000000000..a9a5744a6bd --- /dev/null +++ b/cmd/lotus-shed/storage-stats.go @@ -0,0 +1,131 @@ +package main + +import ( + "encoding/json" + corebig "math/big" + "os" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + filbig "github.com/filecoin-project/go-state-types/big" + lcli "github.com/filecoin-project/lotus/cli" + "github.com/ipfs/go-cid" + "github.com/urfave/cli/v2" +) + +// How many epochs back to look at for dealstats +var defaultEpochLookback = abi.ChainEpoch(10) + +type networkTotalsOutput struct { + Epoch int64 `json:"epoch"` + Endpoint string `json:"endpoint"` + Payload networkTotals `json:"payload"` +} + +type networkTotals struct { + QaNetworkPower filbig.Int `json:"total_qa_power"` + RawNetworkPower filbig.Int `json:"total_raw_capacity"` + CapacityCarryingData float64 `json:"capacity_fraction_carrying_data"` + UniqueCids int `json:"total_unique_cids"` + UniqueProviders int `json:"total_unique_providers"` + UniqueClients int `json:"total_unique_clients"` + TotalDeals int `json:"total_num_deals"` + TotalBytes int64 `json:"total_stored_data_size"` + FilplusTotalDeals int `json:"filplus_total_num_deals"` + FilplusTotalBytes int64 `json:"filplus_total_stored_data_size"` + + seenClient map[address.Address]bool + seenProvider map[address.Address]bool + seenPieceCid map[cid.Cid]bool +} + +var storageStatsCmd = &cli.Command{ + Name: "storage-stats", + Usage: "Translates current lotus state into a json summary suitable for driving https://storage.filecoin.io/", + Flags: []cli.Flag{ + &cli.Int64Flag{ + Name: "height", + }, + }, + Action: func(cctx *cli.Context) error { + ctx := lcli.ReqContext(cctx) + + api, apiCloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer apiCloser() + + head, err := api.ChainHead(ctx) + if err != nil { + return err + } + + requestedHeight := cctx.Int64("height") + if requestedHeight > 0 { + head, err = api.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(requestedHeight), head.Key()) + } else { + head, err = api.ChainGetTipSetByHeight(ctx, head.Height()-defaultEpochLookback, head.Key()) + } + if err != nil { + return err + } + + power, err := api.StateMinerPower(ctx, address.Address{}, head.Key()) + if err != nil { + return err + } + + netTotals := networkTotals{ + QaNetworkPower: power.TotalPower.QualityAdjPower, + RawNetworkPower: power.TotalPower.RawBytePower, + seenClient: make(map[address.Address]bool), + seenProvider: make(map[address.Address]bool), + seenPieceCid: make(map[cid.Cid]bool), + } + + deals, err := api.StateMarketDeals(ctx, head.Key()) + if err != nil { + return err + } + + for _, dealInfo := range deals { + + // Only count deals that have properly started, not past/future ones + // https://github.com/filecoin-project/specs-actors/blob/v0.9.9/actors/builtin/market/deal.go#L81-L85 + // Bail on 0 as well in case SectorStartEpoch is uninitialized due to some bug + if dealInfo.State.SectorStartEpoch <= 0 || + dealInfo.State.SectorStartEpoch > head.Height() { + continue + } + + netTotals.seenClient[dealInfo.Proposal.Client] = true + netTotals.TotalBytes += int64(dealInfo.Proposal.PieceSize) + netTotals.seenProvider[dealInfo.Proposal.Provider] = true + netTotals.seenPieceCid[dealInfo.Proposal.PieceCID] = true + netTotals.TotalDeals++ + + if dealInfo.Proposal.VerifiedDeal { + netTotals.FilplusTotalDeals++ + netTotals.FilplusTotalBytes += int64(dealInfo.Proposal.PieceSize) + } + } + + netTotals.UniqueCids = len(netTotals.seenPieceCid) + netTotals.UniqueClients = len(netTotals.seenClient) + netTotals.UniqueProviders = len(netTotals.seenProvider) + + netTotals.CapacityCarryingData, _ = new(corebig.Rat).SetFrac( + corebig.NewInt(netTotals.TotalBytes), + netTotals.RawNetworkPower.Int, + ).Float64() + + return json.NewEncoder(os.Stdout).Encode( + networkTotalsOutput{ + Epoch: int64(head.Height()), + Endpoint: "NETWORK_WIDE_TOTALS", + Payload: netTotals, + }, + ) + }, +} diff --git a/cmd/lotus-shed/sync.go b/cmd/lotus-shed/sync.go index 65d2b6d6f50..cab3bd29ead 100644 --- a/cmd/lotus-shed/sync.go +++ b/cmd/lotus-shed/sync.go @@ -172,12 +172,13 @@ var syncScrapePowerCmd = &cli.Command{ return err } - qpercI := types.BigDiv(types.BigMul(totalWonPower.QualityAdjPower, types.NewInt(1000000)), totalPower.TotalPower.QualityAdjPower) - fmt.Println("Number of winning miners: ", len(miners)) fmt.Println("QAdjPower of winning miners: ", totalWonPower.QualityAdjPower) fmt.Println("QAdjPower of all miners: ", totalPower.TotalPower.QualityAdjPower) - fmt.Println("Percentage of winning QAdjPower: ", float64(qpercI.Int64())/10000) + fmt.Println("Percentage of winning QAdjPower: ", types.BigDivFloat( + types.BigMul(totalWonPower.QualityAdjPower, big.NewInt(100)), + totalPower.TotalPower.QualityAdjPower, + )) return nil }, diff --git a/cmd/lotus-shed/verifreg.go b/cmd/lotus-shed/verifreg.go index df1f0d99012..7640e636a77 100644 --- a/cmd/lotus-shed/verifreg.go +++ b/cmd/lotus-shed/verifreg.go @@ -13,7 +13,7 @@ import ( verifreg2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/verifreg" - "github.com/filecoin-project/lotus/api/apibstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" @@ -67,11 +67,13 @@ var verifRegAddVerifierCmd = &cli.Command{ return err } - api, closer, err := lcli.GetFullNodeAPI(cctx) + srv, err := lcli.GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() ctx := lcli.ReqContext(cctx) vrk, err := api.StateVerifiedRegistryRootKey(ctx, types.EmptyTSK) @@ -79,14 +81,21 @@ var verifRegAddVerifierCmd = &cli.Command{ return err } - smsg, err := api.MsigPropose(ctx, vrk, verifreg.Address, big.Zero(), sender, uint64(verifreg.Methods.AddVerifier), params) + proto, err := api.MsigPropose(ctx, vrk, verifreg.Address, big.Zero(), sender, uint64(verifreg.Methods.AddVerifier), params) + if err != nil { + return err + } + + sm, _, err := srv.PublishMessage(ctx, proto, false) if err != nil { return err } - fmt.Printf("message sent, now waiting on cid: %s\n", smsg) + msgCid := sm.Cid() + + fmt.Printf("message sent, now waiting on cid: %s\n", msgCid) - mwait, err := api.StateWaitMsg(ctx, smsg, build.MessageConfidence) + mwait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { return err } @@ -102,8 +111,9 @@ var verifRegAddVerifierCmd = &cli.Command{ } var verifRegVerifyClientCmd = &cli.Command{ - Name: "verify-client", - Usage: "make a given account a verified client", + Name: "verify-client", + Usage: "make a given account a verified client", + Hidden: true, Flags: []cli.Flag{ &cli.StringFlag{ Name: "from", @@ -111,6 +121,7 @@ var verifRegVerifyClientCmd = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { + fmt.Println("DEPRECATED: This behavior is being moved to `lotus verifreg`") froms := cctx.String("from") if froms == "" { return fmt.Errorf("must specify from address with --from") @@ -175,9 +186,11 @@ var verifRegVerifyClientCmd = &cli.Command{ } var verifRegListVerifiersCmd = &cli.Command{ - Name: "list-verifiers", - Usage: "list all verifiers", + Name: "list-verifiers", + Usage: "list all verifiers", + Hidden: true, Action: func(cctx *cli.Context) error { + fmt.Println("DEPRECATED: This behavior is being moved to `lotus verifreg`") api, closer, err := lcli.GetFullNodeAPI(cctx) if err != nil { return err @@ -190,7 +203,7 @@ var verifRegListVerifiersCmd = &cli.Command{ return err } - apibs := apibstore.NewAPIBlockstore(api) + apibs := blockstore.NewAPIBlockstore(api) store := adt.WrapStore(ctx, cbor.NewCborStore(apibs)) st, err := verifreg.Load(store, act) @@ -205,9 +218,11 @@ var verifRegListVerifiersCmd = &cli.Command{ } var verifRegListClientsCmd = &cli.Command{ - Name: "list-clients", - Usage: "list all verified clients", + Name: "list-clients", + Usage: "list all verified clients", + Hidden: true, Action: func(cctx *cli.Context) error { + fmt.Println("DEPRECATED: This behavior is being moved to `lotus verifreg`") api, closer, err := lcli.GetFullNodeAPI(cctx) if err != nil { return err @@ -220,7 +235,7 @@ var verifRegListClientsCmd = &cli.Command{ return err } - apibs := apibstore.NewAPIBlockstore(api) + apibs := blockstore.NewAPIBlockstore(api) store := adt.WrapStore(ctx, cbor.NewCborStore(apibs)) st, err := verifreg.Load(store, act) @@ -235,9 +250,11 @@ var verifRegListClientsCmd = &cli.Command{ } var verifRegCheckClientCmd = &cli.Command{ - Name: "check-client", - Usage: "check verified client remaining bytes", + Name: "check-client", + Usage: "check verified client remaining bytes", + Hidden: true, Action: func(cctx *cli.Context) error { + fmt.Println("DEPRECATED: This behavior is being moved to `lotus verifreg`") if !cctx.Args().Present() { return fmt.Errorf("must specify client address to check") } @@ -269,9 +286,11 @@ var verifRegCheckClientCmd = &cli.Command{ } var verifRegCheckVerifierCmd = &cli.Command{ - Name: "check-verifier", - Usage: "check verifiers remaining bytes", + Name: "check-verifier", + Usage: "check verifiers remaining bytes", + Hidden: true, Action: func(cctx *cli.Context) error { + fmt.Println("DEPRECATED: This behavior is being moved to `lotus verifreg`") if !cctx.Args().Present() { return fmt.Errorf("must specify verifier address to check") } @@ -303,7 +322,7 @@ var verifRegCheckVerifierCmd = &cli.Command{ return err } - apibs := apibstore.NewAPIBlockstore(api) + apibs := blockstore.NewAPIBlockstore(api) store := adt.WrapStore(ctx, cbor.NewCborStore(apibs)) st, err := verifreg.Load(store, act) diff --git a/cmd/lotus-sim/copy.go b/cmd/lotus-sim/copy.go new file mode 100644 index 00000000000..5faba69f21d --- /dev/null +++ b/cmd/lotus-sim/copy.go @@ -0,0 +1,28 @@ +package main + +import ( + "fmt" + + "github.com/urfave/cli/v2" +) + +var copySimCommand = &cli.Command{ + Name: "copy", + ArgsUsage: "", + Action: func(cctx *cli.Context) (err error) { + node, err := open(cctx) + if err != nil { + return err + } + defer func() { + if cerr := node.Close(); err == nil { + err = cerr + } + }() + if cctx.NArg() != 1 { + return fmt.Errorf("expected 1 argument") + } + name := cctx.Args().First() + return node.CopySim(cctx.Context, cctx.String("simulation"), name) + }, +} diff --git a/cmd/lotus-sim/create.go b/cmd/lotus-sim/create.go new file mode 100644 index 00000000000..4867a5da5ec --- /dev/null +++ b/cmd/lotus-sim/create.go @@ -0,0 +1,49 @@ +package main + +import ( + "fmt" + + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/lotus/chain/types" + lcli "github.com/filecoin-project/lotus/cli" +) + +var createSimCommand = &cli.Command{ + Name: "create", + ArgsUsage: "[tipset]", + Action: func(cctx *cli.Context) (err error) { + node, err := open(cctx) + if err != nil { + return err + } + defer func() { + if cerr := node.Close(); err == nil { + err = cerr + } + }() + + var ts *types.TipSet + switch cctx.NArg() { + case 0: + if err := node.Chainstore.Load(); err != nil { + return err + } + ts = node.Chainstore.GetHeaviestTipSet() + case 1: + cids, err := lcli.ParseTipSetString(cctx.Args().Get(1)) + if err != nil { + return err + } + tsk := types.NewTipSetKey(cids...) + ts, err = node.Chainstore.LoadTipSet(tsk) + if err != nil { + return err + } + default: + return fmt.Errorf("expected 0 or 1 arguments") + } + _, err = node.CreateSim(cctx.Context, cctx.String("simulation"), ts) + return err + }, +} diff --git a/cmd/lotus-sim/delete.go b/cmd/lotus-sim/delete.go new file mode 100644 index 00000000000..c19b3d27d04 --- /dev/null +++ b/cmd/lotus-sim/delete.go @@ -0,0 +1,22 @@ +package main + +import ( + "github.com/urfave/cli/v2" +) + +var deleteSimCommand = &cli.Command{ + Name: "delete", + Action: func(cctx *cli.Context) (err error) { + node, err := open(cctx) + if err != nil { + return err + } + defer func() { + if cerr := node.Close(); err == nil { + err = cerr + } + }() + + return node.DeleteSim(cctx.Context, cctx.String("simulation")) + }, +} diff --git a/cmd/lotus-sim/info.go b/cmd/lotus-sim/info.go new file mode 100644 index 00000000000..864adb3bc9b --- /dev/null +++ b/cmd/lotus-sim/info.go @@ -0,0 +1,110 @@ +package main + +import ( + "context" + "fmt" + "io" + "text/tabwriter" + "time" + + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation" +) + +func getTotalPower(ctx context.Context, sm *stmgr.StateManager, ts *types.TipSet) (power.Claim, error) { + actor, err := sm.LoadActor(ctx, power.Address, ts) + if err != nil { + return power.Claim{}, err + } + state, err := power.Load(sm.ChainStore().ActorStore(ctx), actor) + if err != nil { + return power.Claim{}, err + } + return state.TotalPower() +} + +func printInfo(ctx context.Context, sim *simulation.Simulation, out io.Writer) error { + head := sim.GetHead() + start := sim.GetStart() + + powerNow, err := getTotalPower(ctx, sim.StateManager, head) + if err != nil { + return err + } + powerLookbackEpoch := head.Height() - builtin.EpochsInDay*2 + if powerLookbackEpoch < start.Height() { + powerLookbackEpoch = start.Height() + } + lookbackTs, err := sim.Node.Chainstore.GetTipsetByHeight(ctx, powerLookbackEpoch, head, false) + if err != nil { + return err + } + powerLookback, err := getTotalPower(ctx, sim.StateManager, lookbackTs) + if err != nil { + return err + } + // growth rate in size/day + growthRate := big.Div( + big.Mul(big.Sub(powerNow.RawBytePower, powerLookback.RawBytePower), + big.NewInt(builtin.EpochsInDay)), + big.NewInt(int64(head.Height()-lookbackTs.Height())), + ) + + tw := tabwriter.NewWriter(out, 8, 8, 1, ' ', 0) + + headEpoch := head.Height() + firstEpoch := start.Height() + 1 + + headTime := time.Unix(int64(head.MinTimestamp()), 0) + startTime := time.Unix(int64(start.MinTimestamp()), 0) + duration := headTime.Sub(startTime) + + fmt.Fprintf(tw, "Name:\t%s\n", sim.Name()) + fmt.Fprintf(tw, "Head:\t%s\n", head) + fmt.Fprintf(tw, "Start Epoch:\t%d\n", firstEpoch) + fmt.Fprintf(tw, "End Epoch:\t%d\n", headEpoch) + fmt.Fprintf(tw, "Length:\t%d\n", headEpoch-firstEpoch) + fmt.Fprintf(tw, "Start Date:\t%s\n", startTime) + fmt.Fprintf(tw, "End Date:\t%s\n", headTime) + fmt.Fprintf(tw, "Duration:\t%.2f day(s)\n", duration.Hours()/24) + fmt.Fprintf(tw, "Capacity:\t%s\n", types.SizeStr(powerNow.RawBytePower)) + fmt.Fprintf(tw, "Daily Capacity Growth:\t%s/day\n", types.SizeStr(growthRate)) + fmt.Fprintf(tw, "Network Version:\t%d\n", sim.GetNetworkVersion()) + return tw.Flush() +} + +var infoSimCommand = &cli.Command{ + Name: "info", + Description: "Output information about the simulation.", + Subcommands: []*cli.Command{ + infoCommitGasSimCommand, + infoMessageSizeSimCommand, + infoWindowPostBandwidthSimCommand, + infoCapacityGrowthSimCommand, + infoStateGrowthSimCommand, + }, + Action: func(cctx *cli.Context) (err error) { + node, err := open(cctx) + if err != nil { + return err + } + defer func() { + if cerr := node.Close(); err == nil { + err = cerr + } + }() + + sim, err := node.LoadSim(cctx.Context, cctx.String("simulation")) + if err != nil { + return err + } + return printInfo(cctx.Context, sim, cctx.App.Writer) + }, +} diff --git a/cmd/lotus-sim/info_capacity.go b/cmd/lotus-sim/info_capacity.go new file mode 100644 index 00000000000..4372ee34afb --- /dev/null +++ b/cmd/lotus-sim/info_capacity.go @@ -0,0 +1,67 @@ +package main + +import ( + "fmt" + + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/types" +) + +var infoCapacityGrowthSimCommand = &cli.Command{ + Name: "capacity-growth", + Description: "List daily capacity growth over the course of the simulation starting at the end.", + Action: func(cctx *cli.Context) (err error) { + node, err := open(cctx) + if err != nil { + return err + } + defer func() { + if cerr := node.Close(); err == nil { + err = cerr + } + }() + + sim, err := node.LoadSim(cctx.Context, cctx.String("simulation")) + if err != nil { + return err + } + + firstEpoch := sim.GetStart().Height() + ts := sim.GetHead() + lastPower, err := getTotalPower(cctx.Context, sim.StateManager, ts) + if err != nil { + return err + } + lastHeight := ts.Height() + + for ts.Height() > firstEpoch && cctx.Err() == nil { + ts, err = sim.Node.Chainstore.LoadTipSet(ts.Parents()) + if err != nil { + return err + } + newEpoch := ts.Height() + if newEpoch != firstEpoch && newEpoch+builtin.EpochsInDay > lastHeight { + continue + } + + newPower, err := getTotalPower(cctx.Context, sim.StateManager, ts) + if err != nil { + return err + } + + growthRate := big.Div( + big.Mul(big.Sub(lastPower.RawBytePower, newPower.RawBytePower), + big.NewInt(builtin.EpochsInDay)), + big.NewInt(int64(lastHeight-newEpoch)), + ) + lastPower = newPower + lastHeight = newEpoch + fmt.Fprintf(cctx.App.Writer, "%s/day\n", types.SizeStr(growthRate)) + } + return cctx.Err() + }, +} diff --git a/cmd/lotus-sim/info_commit.go b/cmd/lotus-sim/info_commit.go new file mode 100644 index 00000000000..738fcde95e5 --- /dev/null +++ b/cmd/lotus-sim/info_commit.go @@ -0,0 +1,148 @@ +package main + +import ( + "bytes" + "fmt" + "os" + "syscall" + + "github.com/streadway/quantile" + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation" + "github.com/filecoin-project/lotus/lib/stati" +) + +var infoCommitGasSimCommand = &cli.Command{ + Name: "commit-gas", + Description: "Output information about the gas for commits", + Flags: []cli.Flag{ + &cli.Int64Flag{ + Name: "lookback", + Value: 0, + }, + }, + Action: func(cctx *cli.Context) (err error) { + log := func(f string, i ...interface{}) { + fmt.Fprintf(os.Stderr, f, i...) + } + node, err := open(cctx) + if err != nil { + return err + } + defer func() { + if cerr := node.Close(); err == nil { + err = cerr + } + }() + + go profileOnSignal(cctx, syscall.SIGUSR2) + + sim, err := node.LoadSim(cctx.Context, cctx.String("simulation")) + if err != nil { + return err + } + + var gasAgg, proofsAgg uint64 + var gasAggMax, proofsAggMax uint64 + var gasSingle, proofsSingle uint64 + + qpoints := []struct{ q, tol float64 }{ + {0.01, 0.0005}, + {0.05, 0.001}, + {0.20, 0.01}, + {0.25, 0.01}, + {0.30, 0.01}, + {0.40, 0.01}, + {0.45, 0.01}, + {0.50, 0.01}, + {0.60, 0.01}, + {0.80, 0.01}, + {0.95, 0.001}, + {0.99, 0.0005}, + } + estims := make([]quantile.Estimate, len(qpoints)) + for i, p := range qpoints { + estims[i] = quantile.Known(p.q, p.tol) + } + qua := quantile.New(estims...) + hist, err := stati.NewHistogram([]float64{ + 1, 3, 5, 7, 15, 30, 50, 100, 200, 400, 600, 700, 819}) + if err != nil { + return err + } + + err = sim.Walk(cctx.Context, cctx.Int64("lookback"), func( + sm *stmgr.StateManager, ts *types.TipSet, stCid cid.Cid, + messages []*simulation.AppliedMessage, + ) error { + for _, m := range messages { + if m.ExitCode != exitcode.Ok { + continue + } + if m.Method == miner.Methods.ProveCommitAggregate { + param := miner.ProveCommitAggregateParams{} + err := param.UnmarshalCBOR(bytes.NewReader(m.Params)) + if err != nil { + log("failed to decode params: %+v", err) + return nil + } + c, err := param.SectorNumbers.Count() + if err != nil { + log("failed to count sectors") + return nil + } + gasAgg += uint64(m.GasUsed) + proofsAgg += c + if c == 819 { + gasAggMax += uint64(m.GasUsed) + proofsAggMax += c + } + for i := uint64(0); i < c; i++ { + qua.Add(float64(c)) + } + hist.Observe(float64(c)) + } + + if m.Method == miner.Methods.ProveCommitSector { + gasSingle += uint64(m.GasUsed) + proofsSingle++ + qua.Add(1) + hist.Observe(1) + } + } + + return nil + }) + if err != nil { + return err + } + idealGassUsed := float64(gasAggMax) / float64(proofsAggMax) * float64(proofsAgg+proofsSingle) + + fmt.Printf("Gas usage efficiency in comparison to all 819: %f%%\n", 100*idealGassUsed/float64(gasAgg+gasSingle)) + + fmt.Printf("Proofs in singles: %d\n", proofsSingle) + fmt.Printf("Proofs in Aggs: %d\n", proofsAgg) + fmt.Printf("Proofs in Aggs(819): %d\n", proofsAggMax) + + fmt.Println() + fmt.Println("Quantiles of proofs in given aggregate size:") + for _, p := range qpoints { + fmt.Printf("%.0f%%\t%.0f\n", p.q*100, qua.Get(p.q)) + } + fmt.Println() + fmt.Println("Histogram of messages:") + fmt.Printf("Total\t%d\n", hist.Total()) + for i, b := range hist.Buckets[1:] { + fmt.Printf("%.0f\t%d\n", b, hist.Get(i)) + } + + return nil + }, +} diff --git a/cmd/lotus-sim/info_message.go b/cmd/lotus-sim/info_message.go new file mode 100644 index 00000000000..33c45e7280f --- /dev/null +++ b/cmd/lotus-sim/info_message.go @@ -0,0 +1,95 @@ +package main + +import ( + "fmt" + "syscall" + + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation" + "github.com/filecoin-project/lotus/lib/stati" + "github.com/ipfs/go-cid" + "github.com/streadway/quantile" + "github.com/urfave/cli/v2" +) + +var infoMessageSizeSimCommand = &cli.Command{ + Name: "message-size", + Description: "Output information about message size distribution", + Flags: []cli.Flag{ + &cli.Int64Flag{ + Name: "lookback", + Value: 0, + }, + }, + Action: func(cctx *cli.Context) (err error) { + node, err := open(cctx) + if err != nil { + return err + } + defer func() { + if cerr := node.Close(); err == nil { + err = cerr + } + }() + + go profileOnSignal(cctx, syscall.SIGUSR2) + + sim, err := node.LoadSim(cctx.Context, cctx.String("simulation")) + if err != nil { + return err + } + + qpoints := []struct{ q, tol float64 }{ + {0.30, 0.01}, + {0.40, 0.01}, + {0.60, 0.01}, + {0.70, 0.01}, + {0.80, 0.01}, + {0.85, 0.01}, + {0.90, 0.01}, + {0.95, 0.001}, + {0.99, 0.0005}, + {0.999, 0.0001}, + } + estims := make([]quantile.Estimate, len(qpoints)) + for i, p := range qpoints { + estims[i] = quantile.Known(p.q, p.tol) + } + qua := quantile.New(estims...) + hist, err := stati.NewHistogram([]float64{ + 1 << 8, 1 << 10, 1 << 11, 1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, + }) + if err != nil { + return err + } + + err = sim.Walk(cctx.Context, cctx.Int64("lookback"), func( + sm *stmgr.StateManager, ts *types.TipSet, stCid cid.Cid, + messages []*simulation.AppliedMessage, + ) error { + for _, m := range messages { + msgSize := float64(m.ChainLength()) + qua.Add(msgSize) + hist.Observe(msgSize) + } + + return nil + }) + if err != nil { + return err + } + fmt.Println("Quantiles of message sizes:") + for _, p := range qpoints { + fmt.Printf("%.1f%%\t%.0f\n", p.q*100, qua.Get(p.q)) + } + fmt.Println() + fmt.Println("Histogram of message sizes:") + fmt.Printf("Total\t%d\n", hist.Total()) + for i, b := range hist.Buckets[1:] { + fmt.Printf("%.0f\t%d\t%.1f%%\n", b, hist.Get(i), 100*hist.GetRatio(i)) + } + + return nil + }, +} diff --git a/cmd/lotus-sim/info_state.go b/cmd/lotus-sim/info_state.go new file mode 100644 index 00000000000..5c9541513c6 --- /dev/null +++ b/cmd/lotus-sim/info_state.go @@ -0,0 +1,141 @@ +package main + +import ( + "bytes" + "context" + "fmt" + "math" + "sync" + "sync/atomic" + + "github.com/ipfs/go-cid" + "github.com/urfave/cli/v2" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/types" +) + +var infoStateGrowthSimCommand = &cli.Command{ + Name: "state-size", + Description: "List daily state size over the course of the simulation starting at the end.", + Action: func(cctx *cli.Context) (err error) { + node, err := open(cctx) + if err != nil { + return err + } + defer func() { + if cerr := node.Close(); err == nil { + err = cerr + } + }() + + sim, err := node.LoadSim(cctx.Context, cctx.String("simulation")) + if err != nil { + return err + } + + // NOTE: This code is entirely read-bound. + store := node.Chainstore.StateBlockstore() + stateSize := func(ctx context.Context, c cid.Cid) (uint64, error) { + seen := cid.NewSet() + sema := make(chan struct{}, 40) + var lock sync.Mutex + var recSize func(cid.Cid) (uint64, error) + recSize = func(c cid.Cid) (uint64, error) { + // Not a part of the chain state. + if err := ctx.Err(); err != nil { + return 0, err + } + + lock.Lock() + visit := seen.Visit(c) + lock.Unlock() + // Already seen? + if !visit { + return 0, nil + } + + var links []cid.Cid + var totalSize uint64 + if err := store.View(c, func(data []byte) error { + totalSize += uint64(len(data)) + return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) { + if c.Prefix().Codec != cid.DagCBOR { + return + } + + links = append(links, c) + }) + }); err != nil { + return 0, err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + cb := func(c cid.Cid) { + size, err := recSize(c) + if err != nil { + select { + case errCh <- err: + default: + } + return + } + atomic.AddUint64(&totalSize, size) + } + asyncCb := func(c cid.Cid) { + wg.Add(1) + go func() { + defer wg.Done() + defer func() { <-sema }() + cb(c) + }() + } + for _, link := range links { + select { + case sema <- struct{}{}: + asyncCb(link) + default: + cb(link) + } + + } + wg.Wait() + + select { + case err := <-errCh: + return 0, err + default: + } + + return totalSize, nil + } + return recSize(c) + } + + firstEpoch := sim.GetStart().Height() + ts := sim.GetHead() + lastHeight := abi.ChainEpoch(math.MaxInt64) + for ts.Height() > firstEpoch && cctx.Err() == nil { + if ts.Height()+builtin.EpochsInDay <= lastHeight { + lastHeight = ts.Height() + + parentStateSize, err := stateSize(cctx.Context, ts.ParentState()) + if err != nil { + return err + } + + fmt.Fprintf(cctx.App.Writer, "%d: %s\n", ts.Height(), types.SizeStr(types.NewInt(parentStateSize))) + } + + ts, err = sim.Node.Chainstore.LoadTipSet(ts.Parents()) + if err != nil { + return err + } + } + return cctx.Err() + }, +} diff --git a/cmd/lotus-sim/info_wdpost.go b/cmd/lotus-sim/info_wdpost.go new file mode 100644 index 00000000000..719a133b17e --- /dev/null +++ b/cmd/lotus-sim/info_wdpost.go @@ -0,0 +1,69 @@ +package main + +import ( + "fmt" + + "github.com/ipfs/go-cid" + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/go-state-types/exitcode" + + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation" +) + +var infoWindowPostBandwidthSimCommand = &cli.Command{ + Name: "post-bandwidth", + Description: "List average chain bandwidth used by window posts for each day of the simulation.", + Action: func(cctx *cli.Context) (err error) { + node, err := open(cctx) + if err != nil { + return err + } + defer func() { + if cerr := node.Close(); err == nil { + err = cerr + } + }() + + sim, err := node.LoadSim(cctx.Context, cctx.String("simulation")) + if err != nil { + return err + } + + var postGas, totalGas int64 + printStats := func() { + fmt.Fprintf(cctx.App.Writer, "%.4f%%\n", float64(100*postGas)/float64(totalGas)) + } + idx := 0 + err = sim.Walk(cctx.Context, 0, func( + sm *stmgr.StateManager, ts *types.TipSet, stCid cid.Cid, + messages []*simulation.AppliedMessage, + ) error { + for _, m := range messages { + totalGas += m.GasUsed + if m.ExitCode != exitcode.Ok { + continue + } + if m.Method == miner.Methods.SubmitWindowedPoSt { + postGas += m.GasUsed + } + } + idx++ + idx %= builtin.EpochsInDay + if idx == 0 { + printStats() + postGas = 0 + totalGas = 0 + } + return nil + }) + if idx > 0 { + printStats() + } + return err + }, +} diff --git a/cmd/lotus-sim/list.go b/cmd/lotus-sim/list.go new file mode 100644 index 00000000000..37e767b9ab0 --- /dev/null +++ b/cmd/lotus-sim/list.go @@ -0,0 +1,38 @@ +package main + +import ( + "fmt" + "text/tabwriter" + + "github.com/urfave/cli/v2" +) + +var listSimCommand = &cli.Command{ + Name: "list", + Action: func(cctx *cli.Context) (err error) { + node, err := open(cctx) + if err != nil { + return err + } + defer func() { + if cerr := node.Close(); err == nil { + err = cerr + } + }() + + list, err := node.ListSims(cctx.Context) + if err != nil { + return err + } + tw := tabwriter.NewWriter(cctx.App.Writer, 8, 8, 0, ' ', 0) + for _, name := range list { + sim, err := node.LoadSim(cctx.Context, name) + if err != nil { + return err + } + head := sim.GetHead() + fmt.Fprintf(tw, "%s\t%s\t%s\n", name, head.Height(), head.Key()) + } + return tw.Flush() + }, +} diff --git a/cmd/lotus-sim/main.go b/cmd/lotus-sim/main.go new file mode 100644 index 00000000000..e6cd5d9932b --- /dev/null +++ b/cmd/lotus-sim/main.go @@ -0,0 +1,63 @@ +package main + +import ( + "context" + "fmt" + "os" + "os/signal" + "syscall" + + "github.com/urfave/cli/v2" + + logging "github.com/ipfs/go-log/v2" +) + +var root []*cli.Command = []*cli.Command{ + createSimCommand, + deleteSimCommand, + copySimCommand, + renameSimCommand, + listSimCommand, + + runSimCommand, + infoSimCommand, + upgradeCommand, +} + +func main() { + if _, set := os.LookupEnv("GOLOG_LOG_LEVEL"); !set { + _ = logging.SetLogLevel("simulation", "DEBUG") + _ = logging.SetLogLevel("simulation-mock", "DEBUG") + } + app := &cli.App{ + Name: "lotus-sim", + Usage: "A tool to simulate a network.", + Commands: root, + Writer: os.Stdout, + ErrWriter: os.Stderr, + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "repo", + EnvVars: []string{"LOTUS_PATH"}, + Hidden: true, + Value: "~/.lotus", + }, + &cli.StringFlag{ + Name: "simulation", + Aliases: []string{"sim"}, + EnvVars: []string{"LOTUS_SIMULATION"}, + Value: "default", + }, + }, + } + + ctx, cancel := signal.NotifyContext(context.Background(), + syscall.SIGTERM, syscall.SIGINT, syscall.SIGHUP) + defer cancel() + + if err := app.RunContext(ctx, os.Args); err != nil { + fmt.Fprintf(os.Stderr, "Error: %s\n", err) + os.Exit(1) + return + } +} diff --git a/cmd/lotus-sim/profile.go b/cmd/lotus-sim/profile.go new file mode 100644 index 00000000000..63e0ef3bd86 --- /dev/null +++ b/cmd/lotus-sim/profile.go @@ -0,0 +1,94 @@ +package main + +import ( + "context" + "fmt" + "os" + "os/signal" + "path/filepath" + "runtime/pprof" + "time" + + "github.com/urfave/cli/v2" +) + +func takeProfiles(ctx context.Context) (fname string, _err error) { + dir, err := os.MkdirTemp(".", ".profiles-temp*") + if err != nil { + return "", err + } + + if err := writeProfiles(ctx, dir); err != nil { + _ = os.RemoveAll(dir) + return "", err + } + + fname = fmt.Sprintf("pprof-simulation-%s", time.Now().Format(time.RFC3339)) + if err := os.Rename(dir, fname); err != nil { + _ = os.RemoveAll(dir) + return "", err + } + return fname, nil +} + +func writeProfiles(ctx context.Context, dir string) error { + for _, profile := range pprof.Profiles() { + file, err := os.Create(filepath.Join(dir, profile.Name()+".pprof.gz")) + if err != nil { + return err + } + if err := profile.WriteTo(file, 0); err != nil { + _ = file.Close() + return err + } + if err := file.Close(); err != nil { + return err + } + if err := ctx.Err(); err != nil { + return err + } + } + + file, err := os.Create(filepath.Join(dir, "cpu.pprof.gz")) + if err != nil { + return err + } + + if err := pprof.StartCPUProfile(file); err != nil { + _ = file.Close() + return err + } + select { + case <-time.After(30 * time.Second): + case <-ctx.Done(): + } + pprof.StopCPUProfile() + err = file.Close() + if err := ctx.Err(); err != nil { + return err + } + return err +} + +func profileOnSignal(cctx *cli.Context, signals ...os.Signal) { + ch := make(chan os.Signal, 1) + signal.Notify(ch, signals...) + defer signal.Stop(ch) + + for { + select { + case <-ch: + fname, err := takeProfiles(cctx.Context) + switch err { + case context.Canceled: + return + case nil: + fmt.Fprintf(cctx.App.ErrWriter, "Wrote profile to %q\n", fname) + default: + fmt.Fprintf(cctx.App.ErrWriter, "ERROR: failed to write profile: %s\n", err) + } + case <-cctx.Done(): + return + } + } +} diff --git a/cmd/lotus-sim/rename.go b/cmd/lotus-sim/rename.go new file mode 100644 index 00000000000..c336717c792 --- /dev/null +++ b/cmd/lotus-sim/rename.go @@ -0,0 +1,29 @@ +package main + +import ( + "fmt" + + "github.com/urfave/cli/v2" +) + +var renameSimCommand = &cli.Command{ + Name: "rename", + ArgsUsage: "", + Action: func(cctx *cli.Context) (err error) { + node, err := open(cctx) + if err != nil { + return err + } + defer func() { + if cerr := node.Close(); err == nil { + err = cerr + } + }() + + if cctx.NArg() != 1 { + return fmt.Errorf("expected 1 argument") + } + name := cctx.Args().First() + return node.RenameSim(cctx.Context, cctx.String("simulation"), name) + }, +} diff --git a/cmd/lotus-sim/run.go b/cmd/lotus-sim/run.go new file mode 100644 index 00000000000..a985fdf9ec9 --- /dev/null +++ b/cmd/lotus-sim/run.go @@ -0,0 +1,72 @@ +package main + +import ( + "fmt" + "os" + "os/signal" + "syscall" + + "github.com/urfave/cli/v2" +) + +var runSimCommand = &cli.Command{ + Name: "run", + Description: `Run the simulation. + +Signals: +- SIGUSR1: Print information about the current simulation (equivalent to 'lotus-sim info'). +- SIGUSR2: Write pprof profiles to ./pprof-simulation-$DATE/`, + Flags: []cli.Flag{ + &cli.IntFlag{ + Name: "epochs", + Usage: "Advance the given number of epochs then stop.", + }, + }, + Action: func(cctx *cli.Context) (err error) { + node, err := open(cctx) + if err != nil { + return err + } + defer func() { + if cerr := node.Close(); err == nil { + err = cerr + } + }() + + go profileOnSignal(cctx, syscall.SIGUSR2) + + sim, err := node.LoadSim(cctx.Context, cctx.String("simulation")) + if err != nil { + return err + } + targetEpochs := cctx.Int("epochs") + + ch := make(chan os.Signal, 1) + signal.Notify(ch, syscall.SIGUSR1) + defer signal.Stop(ch) + + for i := 0; targetEpochs == 0 || i < targetEpochs; i++ { + ts, err := sim.Step(cctx.Context) + if err != nil { + return err + } + + fmt.Fprintf(cctx.App.Writer, "advanced to %d %s\n", ts.Height(), ts.Key()) + + // Print + select { + case <-ch: + fmt.Fprintln(cctx.App.Writer, "---------------------") + if err := printInfo(cctx.Context, sim, cctx.App.Writer); err != nil { + fmt.Fprintf(cctx.App.ErrWriter, "ERROR: failed to print info: %s\n", err) + } + fmt.Fprintln(cctx.App.Writer, "---------------------") + case <-cctx.Context.Done(): + return cctx.Err() + default: + } + } + fmt.Fprintln(cctx.App.Writer, "simulation done") + return err + }, +} diff --git a/cmd/lotus-sim/simulation/block.go b/cmd/lotus-sim/simulation/block.go new file mode 100644 index 00000000000..93e6a319177 --- /dev/null +++ b/cmd/lotus-sim/simulation/block.go @@ -0,0 +1,93 @@ +package simulation + +import ( + "context" + "crypto/sha256" + "encoding/binary" + "time" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" +) + +const beaconPrefix = "mockbeacon:" + +// nextBeaconEntries returns a fake beacon entries for the next block. +func (sim *Simulation) nextBeaconEntries() []types.BeaconEntry { + parentBeacons := sim.head.Blocks()[0].BeaconEntries + lastBeacon := parentBeacons[len(parentBeacons)-1] + beaconRound := lastBeacon.Round + 1 + + buf := make([]byte, len(beaconPrefix)+8) + copy(buf, beaconPrefix) + binary.BigEndian.PutUint64(buf[len(beaconPrefix):], beaconRound) + beaconRand := sha256.Sum256(buf) + return []types.BeaconEntry{{ + Round: beaconRound, + Data: beaconRand[:], + }} +} + +// nextTicket returns a fake ticket for the next block. +func (sim *Simulation) nextTicket() *types.Ticket { + newProof := sha256.Sum256(sim.head.MinTicket().VRFProof) + return &types.Ticket{ + VRFProof: newProof[:], + } +} + +// makeTipSet generates and executes the next tipset from the given messages. This method: +// +// 1. Stores the given messages in the Chainstore. +// 2. Creates and persists a single block mined by the same miner as the parent. +// 3. Creates a tipset from this block and executes it. +// 4. Returns the resulting tipset. +// +// This method does _not_ mutate local state (although it does add blocks to the datastore). +func (sim *Simulation) makeTipSet(ctx context.Context, messages []*types.Message) (*types.TipSet, error) { + parentTs := sim.head + parentState, parentRec, err := sim.StateManager.TipSetState(ctx, parentTs) + if err != nil { + return nil, xerrors.Errorf("failed to compute parent tipset: %w", err) + } + msgsCid, err := sim.storeMessages(ctx, messages) + if err != nil { + return nil, xerrors.Errorf("failed to store block messages: %w", err) + } + + uts := parentTs.MinTimestamp() + build.BlockDelaySecs + + blks := []*types.BlockHeader{{ + Miner: parentTs.MinTicketBlock().Miner, // keep reusing the same miner. + Ticket: sim.nextTicket(), + BeaconEntries: sim.nextBeaconEntries(), + Parents: parentTs.Cids(), + Height: parentTs.Height() + 1, + ParentStateRoot: parentState, + ParentMessageReceipts: parentRec, + Messages: msgsCid, + ParentBaseFee: abi.NewTokenAmount(0), + Timestamp: uts, + ElectionProof: &types.ElectionProof{WinCount: 1}, + }} + err = sim.Node.Chainstore.PersistBlockHeaders(blks...) + if err != nil { + return nil, xerrors.Errorf("failed to persist block headers: %w", err) + } + newTipSet, err := types.NewTipSet(blks) + if err != nil { + return nil, xerrors.Errorf("failed to create new tipset: %w", err) + } + now := time.Now() + _, _, err = sim.StateManager.TipSetState(ctx, newTipSet) + if err != nil { + return nil, xerrors.Errorf("failed to compute new tipset: %w", err) + } + duration := time.Since(now) + log.Infow("computed tipset", "duration", duration, "height", newTipSet.Height()) + + return newTipSet, nil +} diff --git a/cmd/lotus-sim/simulation/blockbuilder/blockbuilder.go b/cmd/lotus-sim/simulation/blockbuilder/blockbuilder.go new file mode 100644 index 00000000000..2ffc0bf140b --- /dev/null +++ b/cmd/lotus-sim/simulation/blockbuilder/blockbuilder.go @@ -0,0 +1,280 @@ +package blockbuilder + +import ( + "context" + "math" + + "go.uber.org/zap" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/builtin/account" + "github.com/filecoin-project/lotus/chain/state" + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/vm" +) + +const ( + // 0.25 is the default, but the number below is from the network. + gasOverestimation = 1.0 / 0.808 + // The number of expected blocks in a tipset. We use this to determine how much gas a tipset + // has. + // 5 per tipset, but we effectively get 4 blocks worth of messages. + expectedBlocks = 4 + // TODO: This will produce invalid blocks but it will accurately model the amount of gas + // we're willing to use per-tipset. + // A more correct approach would be to produce 5 blocks. We can do that later. + targetGas = build.BlockGasTarget * expectedBlocks +) + +type BlockBuilder struct { + ctx context.Context + logger *zap.SugaredLogger + + parentTs *types.TipSet + parentSt *state.StateTree + vm *vm.VM + sm *stmgr.StateManager + + gasTotal int64 + messages []*types.Message +} + +// NewBlockBuilder constructs a new block builder from the parent state. Use this to pack a block +// with messages. +// +// NOTE: The context applies to the life of the block builder itself (but does not need to be canceled). +func NewBlockBuilder(ctx context.Context, logger *zap.SugaredLogger, sm *stmgr.StateManager, parentTs *types.TipSet) (*BlockBuilder, error) { + parentState, _, err := sm.TipSetState(ctx, parentTs) + if err != nil { + return nil, err + } + parentSt, err := sm.StateTree(parentState) + if err != nil { + return nil, err + } + + bb := &BlockBuilder{ + ctx: ctx, + logger: logger.With("epoch", parentTs.Height()+1), + sm: sm, + parentTs: parentTs, + parentSt: parentSt, + } + + // Then we construct a VM to execute messages for gas estimation. + // + // Most parts of this VM are "real" except: + // 1. We don't charge a fee. + // 2. The runtime has "fake" proof logic. + // 3. We don't actually save any of the results. + r := store.NewChainRand(sm.ChainStore(), parentTs.Cids()) + vmopt := &vm.VMOpts{ + StateBase: parentState, + Epoch: parentTs.Height() + 1, + Rand: r, + Bstore: sm.ChainStore().StateBlockstore(), + Syscalls: sm.ChainStore().VMSys(), + CircSupplyCalc: sm.GetVMCirculatingSupply, + NtwkVersion: sm.GetNtwkVersion, + BaseFee: abi.NewTokenAmount(0), + LookbackState: stmgr.LookbackStateGetterForTipset(sm, parentTs), + } + bb.vm, err = vm.NewVM(bb.ctx, vmopt) + if err != nil { + return nil, err + } + return bb, nil +} + +// PushMessages tries to push the specified message into the block. +// +// 1. All messages will be executed in-order. +// 2. Gas computation & nonce selection will be handled internally. +// 3. The base-fee is 0 so the sender does not need funds. +// 4. As usual, the sender must be an account (any account). +// 5. If the message fails to execute, this method will fail. +// +// Returns ErrOutOfGas when out of gas. Check BlockBuilder.GasRemaining and try pushing a cheaper +// message. +func (bb *BlockBuilder) PushMessage(msg *types.Message) (*types.MessageReceipt, error) { + if bb.gasTotal >= targetGas { + return nil, new(ErrOutOfGas) + } + + st := bb.StateTree() + store := bb.ActorStore() + + // Copy the message before we start mutating it. + msgCpy := *msg + msg = &msgCpy + + actor, err := st.GetActor(msg.From) + if err != nil { + return nil, err + } + if !builtin.IsAccountActor(actor.Code) { + return nil, xerrors.Errorf( + "messags may only be sent from account actors, got message from %s (%s)", + msg.From, builtin.ActorNameByCode(actor.Code), + ) + } + msg.Nonce = actor.Nonce + if msg.From.Protocol() == address.ID { + state, err := account.Load(store, actor) + if err != nil { + return nil, err + } + msg.From, err = state.PubkeyAddress() + if err != nil { + return nil, err + } + } + + // TODO: Our gas estimation is broken for payment channels due to horrible hacks in + // gasEstimateGasLimit. + if msg.Value == types.EmptyInt { + msg.Value = abi.NewTokenAmount(0) + } + msg.GasPremium = abi.NewTokenAmount(0) + msg.GasFeeCap = abi.NewTokenAmount(0) + msg.GasLimit = build.BlockGasTarget + + // We manually snapshot so we can revert nonce changes, etc. on failure. + err = st.Snapshot(bb.ctx) + if err != nil { + return nil, xerrors.Errorf("failed to take a snapshot while estimating message gas: %w", err) + } + defer st.ClearSnapshot() + + ret, err := bb.vm.ApplyMessage(bb.ctx, msg) + if err != nil { + _ = st.Revert() + return nil, err + } + if ret.ActorErr != nil { + _ = st.Revert() + return nil, ret.ActorErr + } + + // Sometimes there are bugs. Let's catch them. + if ret.GasUsed == 0 { + _ = st.Revert() + return nil, xerrors.Errorf("used no gas %v -> %v", msg, ret) + } + + // Update the gas limit taking overestimation into account. + msg.GasLimit = int64(math.Ceil(float64(ret.GasUsed) * gasOverestimation)) + + // Did we go over? Yes, revert. + newTotal := bb.gasTotal + msg.GasLimit + if newTotal > targetGas { + _ = st.Revert() + return nil, &ErrOutOfGas{Available: targetGas - bb.gasTotal, Required: msg.GasLimit} + } + bb.gasTotal = newTotal + + bb.messages = append(bb.messages, msg) + return &ret.MessageReceipt, nil +} + +// ActorStore returns the VM's current (pending) blockstore. +func (bb *BlockBuilder) ActorStore() adt.Store { + return bb.vm.ActorStore(bb.ctx) +} + +// StateTree returns the VM's current (pending) state-tree. This includes any changes made by +// successfully pushed messages. +// +// You probably want ParentStateTree +func (bb *BlockBuilder) StateTree() *state.StateTree { + return bb.vm.StateTree().(*state.StateTree) +} + +// ParentStateTree returns the parent state-tree (not the paren't tipset's parent state-tree). +func (bb *BlockBuilder) ParentStateTree() *state.StateTree { + return bb.parentSt +} + +// StateTreeByHeight will return a state-tree up through and including the current in-progress +// epoch. +// +// NOTE: This will return the state after the given epoch, not the parent state for the epoch. +func (bb *BlockBuilder) StateTreeByHeight(epoch abi.ChainEpoch) (*state.StateTree, error) { + now := bb.Height() + if epoch > now { + return nil, xerrors.Errorf( + "cannot load state-tree from future: %d > %d", epoch, bb.Height(), + ) + } else if epoch <= 0 { + return nil, xerrors.Errorf( + "cannot load state-tree: epoch %d <= 0", epoch, + ) + } + + // Manually handle "now" and "previous". + switch epoch { + case now: + return bb.StateTree(), nil + case now - 1: + return bb.ParentStateTree(), nil + } + + // Get the tipset of the block _after_ the target epoch so we can use its parent state. + targetTs, err := bb.sm.ChainStore().GetTipsetByHeight(bb.ctx, epoch+1, bb.parentTs, false) + if err != nil { + return nil, err + } + + return bb.sm.StateTree(targetTs.ParentState()) +} + +// Messages returns all messages currently packed into the next block. +// 1. DO NOT modify the slice, copy it. +// 2. DO NOT retain the slice, copy it. +func (bb *BlockBuilder) Messages() []*types.Message { + return bb.messages +} + +// GasRemaining returns the amount of remaining gas in the next block. +func (bb *BlockBuilder) GasRemaining() int64 { + return targetGas - bb.gasTotal +} + +// ParentTipSet returns the parent tipset. +func (bb *BlockBuilder) ParentTipSet() *types.TipSet { + return bb.parentTs +} + +// Height returns the epoch for the target block. +func (bb *BlockBuilder) Height() abi.ChainEpoch { + return bb.parentTs.Height() + 1 +} + +// NetworkVersion returns the network version for the target block. +func (bb *BlockBuilder) NetworkVersion() network.Version { + return bb.sm.GetNtwkVersion(bb.ctx, bb.Height()) +} + +// StateManager returns the stmgr.StateManager. +func (bb *BlockBuilder) StateManager() *stmgr.StateManager { + return bb.sm +} + +// ActorsVersion returns the actors version for the target block. +func (bb *BlockBuilder) ActorsVersion() actors.Version { + return actors.VersionForNetwork(bb.NetworkVersion()) +} + +func (bb *BlockBuilder) L() *zap.SugaredLogger { + return bb.logger +} diff --git a/cmd/lotus-sim/simulation/blockbuilder/errors.go b/cmd/lotus-sim/simulation/blockbuilder/errors.go new file mode 100644 index 00000000000..ddf08ea1899 --- /dev/null +++ b/cmd/lotus-sim/simulation/blockbuilder/errors.go @@ -0,0 +1,25 @@ +package blockbuilder + +import ( + "errors" + "fmt" +) + +// ErrOutOfGas is returned from BlockBuilder.PushMessage when the block does not have enough gas to +// fit the given message. +type ErrOutOfGas struct { + Available, Required int64 +} + +func (e *ErrOutOfGas) Error() string { + if e.Available == 0 { + return "out of gas: block full" + } + return fmt.Sprintf("out of gas: %d < %d", e.Required, e.Available) +} + +// IsOutOfGas returns true if the error is an "out of gas" error. +func IsOutOfGas(err error) bool { + var oog *ErrOutOfGas + return errors.As(err, &oog) +} diff --git a/cmd/lotus-sim/simulation/messages.go b/cmd/lotus-sim/simulation/messages.go new file mode 100644 index 00000000000..5bed2743670 --- /dev/null +++ b/cmd/lotus-sim/simulation/messages.go @@ -0,0 +1,58 @@ +package simulation + +import ( + "context" + + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" + + "github.com/filecoin-project/lotus/chain/types" +) + +// toArray converts the given set of CIDs to an AMT. This is usually used to pack messages into blocks. +func toArray(store blockadt.Store, cids []cid.Cid) (cid.Cid, error) { + arr := blockadt.MakeEmptyArray(store) + for i, c := range cids { + oc := cbg.CborCid(c) + if err := arr.Set(uint64(i), &oc); err != nil { + return cid.Undef, err + } + } + return arr.Root() +} + +// storeMessages packs a set of messages into a types.MsgMeta and returns the resulting CID. The +// resulting CID is valid for the BlocKHeader's Messages field. +func (sim *Simulation) storeMessages(ctx context.Context, messages []*types.Message) (cid.Cid, error) { + // We store all messages as "bls" messages so they're executed in-order. This ensures + // accurate gas accounting. It also ensures we don't, e.g., try to fund a miner after we + // fail a pre-commit... + var msgCids []cid.Cid + for _, msg := range messages { + c, err := sim.Node.Chainstore.PutMessage(msg) + if err != nil { + return cid.Undef, err + } + msgCids = append(msgCids, c) + } + adtStore := sim.Node.Chainstore.ActorStore(ctx) + blsMsgArr, err := toArray(adtStore, msgCids) + if err != nil { + return cid.Undef, err + } + sekpMsgArr, err := toArray(adtStore, nil) + if err != nil { + return cid.Undef, err + } + + msgsCid, err := adtStore.Put(adtStore.Context(), &types.MsgMeta{ + BlsMessages: blsMsgArr, + SecpkMessages: sekpMsgArr, + }) + if err != nil { + return cid.Undef, err + } + return msgsCid, nil +} diff --git a/cmd/lotus-sim/simulation/mock/mock.go b/cmd/lotus-sim/simulation/mock/mock.go new file mode 100644 index 00000000000..38648f758dc --- /dev/null +++ b/cmd/lotus-sim/simulation/mock/mock.go @@ -0,0 +1,179 @@ +package mock + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + tutils "github.com/filecoin-project/specs-actors/v5/support/testing" + + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" +) + +// Ideally, we'd use extern/sector-storage/mock. Unfortunately, those mocks are a bit _too_ accurate +// and would force us to load sector info for window post proofs. + +const ( + mockSealProofPrefix = "valid seal proof:" + mockAggregateSealProofPrefix = "valid aggregate seal proof:" + mockPoStProofPrefix = "valid post proof:" +) + +var log = logging.Logger("simulation-mock") + +// mockVerifier is a simple mock for verifying "fake" proofs. +type mockVerifier struct{} + +var Verifier ffiwrapper.Verifier = mockVerifier{} + +func (mockVerifier) VerifySeal(proof proof5.SealVerifyInfo) (bool, error) { + addr, err := address.NewIDAddress(uint64(proof.Miner)) + if err != nil { + return false, err + } + mockProof, err := MockSealProof(proof.SealProof, addr) + if err != nil { + return false, err + } + if bytes.Equal(proof.Proof, mockProof) { + return true, nil + } + log.Debugw("invalid seal proof", "expected", mockProof, "actual", proof.Proof, "miner", addr) + return false, nil +} + +func (mockVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) { + addr, err := address.NewIDAddress(uint64(aggregate.Miner)) + if err != nil { + return false, err + } + mockProof, err := MockAggregateSealProof(aggregate.SealProof, addr, len(aggregate.Infos)) + if err != nil { + return false, err + } + if bytes.Equal(aggregate.Proof, mockProof) { + return true, nil + } + log.Debugw("invalid aggregate seal proof", + "expected", mockProof, + "actual", aggregate.Proof, + "count", len(aggregate.Infos), + "miner", addr, + ) + return false, nil +} +func (mockVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) { + panic("should not be called") +} +func (mockVerifier) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) { + if len(info.Proofs) != 1 { + return false, fmt.Errorf("expected exactly one proof") + } + proof := info.Proofs[0] + addr, err := address.NewIDAddress(uint64(info.Prover)) + if err != nil { + return false, err + } + mockProof, err := MockWindowPoStProof(proof.PoStProof, addr) + if err != nil { + return false, err + } + if bytes.Equal(proof.ProofBytes, mockProof) { + return true, nil + } + + log.Debugw("invalid window post proof", + "expected", mockProof, + "actual", info.Proofs[0], + "miner", addr, + ) + return false, nil +} + +func (mockVerifier) GenerateWinningPoStSectorChallenge(context.Context, abi.RegisteredPoStProof, abi.ActorID, abi.PoStRandomness, uint64) ([]uint64, error) { + panic("should not be called") +} + +// MockSealProof generates a mock "seal" proof tied to the specified proof type and the given miner. +func MockSealProof(proofType abi.RegisteredSealProof, minerAddr address.Address) ([]byte, error) { + plen, err := proofType.ProofSize() + if err != nil { + return nil, err + } + proof := make([]byte, plen) + i := copy(proof, mockSealProofPrefix) + binary.BigEndian.PutUint64(proof[i:], uint64(proofType)) + i += 8 + i += copy(proof[i:], minerAddr.Bytes()) + return proof, nil +} + +// MockAggregateSealProof generates a mock "seal" aggregate proof tied to the specified proof type, +// the given miner, and the number of proven sectors. +func MockAggregateSealProof(proofType abi.RegisteredSealProof, minerAddr address.Address, count int) ([]byte, error) { + proof := make([]byte, aggProofLen(count)) + i := copy(proof, mockAggregateSealProofPrefix) + binary.BigEndian.PutUint64(proof[i:], uint64(proofType)) + i += 8 + binary.BigEndian.PutUint64(proof[i:], uint64(count)) + i += 8 + i += copy(proof[i:], minerAddr.Bytes()) + + return proof, nil +} + +// MockWindowPoStProof generates a mock "window post" proof tied to the specified proof type, and the +// given miner. +func MockWindowPoStProof(proofType abi.RegisteredPoStProof, minerAddr address.Address) ([]byte, error) { + plen, err := proofType.ProofSize() + if err != nil { + return nil, err + } + proof := make([]byte, plen) + i := copy(proof, mockPoStProofPrefix) + i += copy(proof[i:], minerAddr.Bytes()) + return proof, nil +} + +// makeCommR generates a "fake" but valid CommR for a sector. It is unique for the given sector/miner. +func MockCommR(minerAddr address.Address, sno abi.SectorNumber) cid.Cid { + return tutils.MakeCID(fmt.Sprintf("%s:%d", minerAddr, sno), &miner5.SealedCIDPrefix) +} + +// TODO: dedup +func aggProofLen(nproofs int) int { + switch { + case nproofs <= 8: + return 11220 + case nproofs <= 16: + return 14196 + case nproofs <= 32: + return 17172 + case nproofs <= 64: + return 20148 + case nproofs <= 128: + return 23124 + case nproofs <= 256: + return 26100 + case nproofs <= 512: + return 29076 + case nproofs <= 1024: + return 32052 + case nproofs <= 2048: + return 35028 + case nproofs <= 4096: + return 38004 + case nproofs <= 8192: + return 40980 + default: + panic("too many proofs") + } +} diff --git a/cmd/lotus-sim/simulation/node.go b/cmd/lotus-sim/simulation/node.go new file mode 100644 index 00000000000..5b8bf2bf91f --- /dev/null +++ b/cmd/lotus-sim/simulation/node.go @@ -0,0 +1,241 @@ +package simulation + +import ( + "context" + "strings" + + "go.uber.org/multierr" + "golang.org/x/xerrors" + + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/query" + + "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/mock" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/stages" + "github.com/filecoin-project/lotus/node/repo" +) + +// Node represents the local lotus node, or at least the part of it we care about. +type Node struct { + repo repo.LockedRepo + Blockstore blockstore.Blockstore + MetadataDS datastore.Batching + Chainstore *store.ChainStore +} + +// OpenNode opens the local lotus node for writing. This will fail if the node is online. +func OpenNode(ctx context.Context, path string) (*Node, error) { + r, err := repo.NewFS(path) + if err != nil { + return nil, err + } + + return NewNode(ctx, r) +} + +// NewNode constructs a new node from the given repo. +func NewNode(ctx context.Context, r repo.Repo) (nd *Node, _err error) { + lr, err := r.Lock(repo.FullNode) + if err != nil { + return nil, err + } + defer func() { + if _err != nil { + _ = lr.Close() + } + }() + + bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore) + if err != nil { + return nil, err + } + + ds, err := lr.Datastore(ctx, "/metadata") + if err != nil { + return nil, err + } + return &Node{ + repo: lr, + Chainstore: store.NewChainStore(bs, bs, ds, vm.Syscalls(mock.Verifier), nil), + MetadataDS: ds, + Blockstore: bs, + }, err +} + +// Close cleanly close the repo. Please call this on shutdown to make sure everything is flushed. +func (nd *Node) Close() error { + if nd.repo != nil { + return nd.repo.Close() + } + return nil +} + +// LoadSim loads +func (nd *Node) LoadSim(ctx context.Context, name string) (*Simulation, error) { + stages, err := stages.DefaultPipeline() + if err != nil { + return nil, err + } + sim := &Simulation{ + Node: nd, + name: name, + stages: stages, + } + + sim.head, err = sim.loadNamedTipSet("head") + if err != nil { + return nil, err + } + sim.start, err = sim.loadNamedTipSet("start") + if err != nil { + return nil, err + } + + err = sim.loadConfig() + if err != nil { + return nil, xerrors.Errorf("failed to load config for simulation %s: %w", name, err) + } + + us, err := sim.config.upgradeSchedule() + if err != nil { + return nil, xerrors.Errorf("failed to create upgrade schedule for simulation %s: %w", name, err) + } + sim.StateManager, err = stmgr.NewStateManagerWithUpgradeSchedule(nd.Chainstore, us) + if err != nil { + return nil, xerrors.Errorf("failed to create state manager for simulation %s: %w", name, err) + } + return sim, nil +} + +// Create creates a new simulation. +// +// - This will fail if a simulation already exists with the given name. +// - Name must not contain a '/'. +func (nd *Node) CreateSim(ctx context.Context, name string, head *types.TipSet) (*Simulation, error) { + if strings.Contains(name, "/") { + return nil, xerrors.Errorf("simulation name %q cannot contain a '/'", name) + } + stages, err := stages.DefaultPipeline() + if err != nil { + return nil, err + } + sim := &Simulation{ + name: name, + Node: nd, + StateManager: stmgr.NewStateManager(nd.Chainstore), + stages: stages, + } + if has, err := nd.MetadataDS.Has(sim.key("head")); err != nil { + return nil, err + } else if has { + return nil, xerrors.Errorf("simulation named %s already exists", name) + } + + if err := sim.storeNamedTipSet("start", head); err != nil { + return nil, xerrors.Errorf("failed to set simulation start: %w", err) + } + + if err := sim.SetHead(head); err != nil { + return nil, err + } + + return sim, nil +} + +// ListSims lists all simulations. +func (nd *Node) ListSims(ctx context.Context) ([]string, error) { + prefix := simulationPrefix.ChildString("head").String() + items, err := nd.MetadataDS.Query(query.Query{ + Prefix: prefix, + KeysOnly: true, + Orders: []query.Order{query.OrderByKey{}}, + }) + if err != nil { + return nil, xerrors.Errorf("failed to list simulations: %w", err) + } + + defer func() { _ = items.Close() }() + + var names []string + for { + select { + case result, ok := <-items.Next(): + if !ok { + return names, nil + } + if result.Error != nil { + return nil, xerrors.Errorf("failed to retrieve next simulation: %w", result.Error) + } + names = append(names, strings.TrimPrefix(result.Key, prefix+"/")) + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +var simFields = []string{"head", "start", "config"} + +// DeleteSim deletes a simulation and all related metadata. +// +// NOTE: This function does not delete associated messages, blocks, or chain state. +func (nd *Node) DeleteSim(ctx context.Context, name string) error { + var err error + for _, field := range simFields { + key := simulationPrefix.ChildString(field).ChildString(name) + err = multierr.Append(err, nd.MetadataDS.Delete(key)) + } + return err +} + +// CopySim copies a simulation. +func (nd *Node) CopySim(ctx context.Context, oldName, newName string) error { + if strings.Contains(newName, "/") { + return xerrors.Errorf("simulation name %q cannot contain a '/'", newName) + } + if strings.Contains(oldName, "/") { + return xerrors.Errorf("simulation name %q cannot contain a '/'", oldName) + } + + values := make(map[string][]byte) + for _, field := range simFields { + key := simulationPrefix.ChildString(field).ChildString(oldName) + value, err := nd.MetadataDS.Get(key) + if err == datastore.ErrNotFound { + continue + } else if err != nil { + return err + } + values[field] = value + } + + if _, ok := values["head"]; !ok { + return xerrors.Errorf("simulation named %s not found", oldName) + } + + for _, field := range simFields { + key := simulationPrefix.ChildString(field).ChildString(newName) + var err error + if value, ok := values[field]; ok { + err = nd.MetadataDS.Put(key, value) + } else { + err = nd.MetadataDS.Delete(key) + } + if err != nil { + return err + } + } + return nil +} + +// RenameSim renames a simulation. +func (nd *Node) RenameSim(ctx context.Context, oldName, newName string) error { + if err := nd.CopySim(ctx, oldName, newName); err != nil { + return err + } + return nd.DeleteSim(ctx, oldName) +} diff --git a/cmd/lotus-sim/simulation/simulation.go b/cmd/lotus-sim/simulation/simulation.go new file mode 100644 index 00000000000..d91d30edaf2 --- /dev/null +++ b/cmd/lotus-sim/simulation/simulation.go @@ -0,0 +1,408 @@ +package simulation + +import ( + "context" + "encoding/json" + "runtime" + + "golang.org/x/sync/errgroup" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + logging "github.com/ipfs/go-log/v2" + + blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" + + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/stages" +) + +var log = logging.Logger("simulation") + +// config is the simulation's config, persisted to the local metadata store and loaded on start. +// +// See Simulation.loadConfig and Simulation.saveConfig. +type config struct { + Upgrades map[network.Version]abi.ChainEpoch +} + +// upgradeSchedule constructs an stmgr.StateManager upgrade schedule, overriding any network upgrade +// epochs as specified in the config. +func (c *config) upgradeSchedule() (stmgr.UpgradeSchedule, error) { + upgradeSchedule := stmgr.DefaultUpgradeSchedule() + expected := make(map[network.Version]struct{}, len(c.Upgrades)) + for nv := range c.Upgrades { + expected[nv] = struct{}{} + } + + // Update network upgrade epochs. + newUpgradeSchedule := upgradeSchedule[:0] + for _, upgrade := range upgradeSchedule { + if height, ok := c.Upgrades[upgrade.Network]; ok { + delete(expected, upgrade.Network) + if height < 0 { + continue + } + upgrade.Height = height + } + newUpgradeSchedule = append(newUpgradeSchedule, upgrade) + } + + // Make sure we didn't try to configure an unknown network version. + if len(expected) > 0 { + missing := make([]network.Version, 0, len(expected)) + for nv := range expected { + missing = append(missing, nv) + } + return nil, xerrors.Errorf("unknown network versions %v in config", missing) + } + + // Finally, validate it. This ensures we don't change the order of the upgrade or anything + // like that. + if err := newUpgradeSchedule.Validate(); err != nil { + return nil, err + } + return newUpgradeSchedule, nil +} + +// Simulation specifies a lotus-sim simulation. +type Simulation struct { + Node *Node + StateManager *stmgr.StateManager + + name string + config config + start *types.TipSet + + // head + head *types.TipSet + + stages []stages.Stage +} + +// loadConfig loads a simulation's config from the datastore. This must be called on startup and may +// be called to restore the config from-disk. +func (sim *Simulation) loadConfig() error { + configBytes, err := sim.Node.MetadataDS.Get(sim.key("config")) + if err == nil { + err = json.Unmarshal(configBytes, &sim.config) + } + switch err { + case nil: + case datastore.ErrNotFound: + sim.config = config{} + default: + return xerrors.Errorf("failed to load config: %w", err) + } + return nil +} + +// saveConfig saves the current config to the datastore. This must be called whenever the config is +// changed. +func (sim *Simulation) saveConfig() error { + buf, err := json.Marshal(sim.config) + if err != nil { + return err + } + return sim.Node.MetadataDS.Put(sim.key("config"), buf) +} + +var simulationPrefix = datastore.NewKey("/simulation") + +// key returns the the key in the form /simulation//. For example, +// /simulation/head/default. +func (sim *Simulation) key(subkey string) datastore.Key { + return simulationPrefix.ChildString(subkey).ChildString(sim.name) +} + +// loadNamedTipSet the tipset with the given name (for this simulation) +func (sim *Simulation) loadNamedTipSet(name string) (*types.TipSet, error) { + tskBytes, err := sim.Node.MetadataDS.Get(sim.key(name)) + if err != nil { + return nil, xerrors.Errorf("failed to load tipset %s/%s: %w", sim.name, name, err) + } + tsk, err := types.TipSetKeyFromBytes(tskBytes) + if err != nil { + return nil, xerrors.Errorf("failed to parse tipste %v (%s/%s): %w", tskBytes, sim.name, name, err) + } + ts, err := sim.Node.Chainstore.LoadTipSet(tsk) + if err != nil { + return nil, xerrors.Errorf("failed to load tipset %s (%s/%s): %w", tsk, sim.name, name, err) + } + return ts, nil +} + +// storeNamedTipSet stores the tipset at name (relative to the simulation). +func (sim *Simulation) storeNamedTipSet(name string, ts *types.TipSet) error { + if err := sim.Node.MetadataDS.Put(sim.key(name), ts.Key().Bytes()); err != nil { + return xerrors.Errorf("failed to store tipset (%s/%s): %w", sim.name, name, err) + } + return nil +} + +// GetHead returns the current simulation head. +func (sim *Simulation) GetHead() *types.TipSet { + return sim.head +} + +// GetStart returns simulation's parent tipset. +func (sim *Simulation) GetStart() *types.TipSet { + return sim.start +} + +// GetNetworkVersion returns the current network version for the simulation. +func (sim *Simulation) GetNetworkVersion() network.Version { + return sim.StateManager.GetNtwkVersion(context.TODO(), sim.head.Height()) +} + +// SetHead updates the current head of the simulation and stores it in the metadata store. This is +// called for every Simulation.Step. +func (sim *Simulation) SetHead(head *types.TipSet) error { + if err := sim.storeNamedTipSet("head", head); err != nil { + return err + } + sim.head = head + return nil +} + +// Name returns the simulation's name. +func (sim *Simulation) Name() string { + return sim.name +} + +// SetUpgradeHeight sets the height of the given network version change (and saves the config). +// +// This fails if the specified epoch has already passed or the new upgrade schedule is invalid. +func (sim *Simulation) SetUpgradeHeight(nv network.Version, epoch abi.ChainEpoch) (_err error) { + if epoch <= sim.head.Height() { + return xerrors.Errorf("cannot set upgrade height in the past (%d <= %d)", epoch, sim.head.Height()) + } + + if sim.config.Upgrades == nil { + sim.config.Upgrades = make(map[network.Version]abi.ChainEpoch, 1) + } + + sim.config.Upgrades[nv] = epoch + defer func() { + if _err != nil { + // try to restore the old config on error. + _ = sim.loadConfig() + } + }() + + newUpgradeSchedule, err := sim.config.upgradeSchedule() + if err != nil { + return err + } + sm, err := stmgr.NewStateManagerWithUpgradeSchedule(sim.Node.Chainstore, newUpgradeSchedule) + if err != nil { + return err + } + err = sim.saveConfig() + if err != nil { + return err + } + + sim.StateManager = sm + return nil +} + +// ListUpgrades returns any future network upgrades. +func (sim *Simulation) ListUpgrades() (stmgr.UpgradeSchedule, error) { + upgrades, err := sim.config.upgradeSchedule() + if err != nil { + return nil, err + } + var pending stmgr.UpgradeSchedule + for _, upgrade := range upgrades { + if upgrade.Height < sim.head.Height() { + continue + } + pending = append(pending, upgrade) + } + return pending, nil +} + +type AppliedMessage struct { + types.Message + types.MessageReceipt +} + +// Walk walks the simulation's chain from the current head back to the first tipset. +func (sim *Simulation) Walk( + ctx context.Context, + lookback int64, + cb func(sm *stmgr.StateManager, + ts *types.TipSet, + stCid cid.Cid, + messages []*AppliedMessage) error, +) error { + store := sim.Node.Chainstore.ActorStore(ctx) + minEpoch := sim.start.Height() + if lookback != 0 { + minEpoch = sim.head.Height() - abi.ChainEpoch(lookback) + } + + // Given tha loading messages and receipts can be a little bit slow, we do this in parallel. + // + // 1. We spin up some number of workers. + // 2. We hand tipsets to workers in round-robin order. + // 3. We pull "resolved" tipsets in the same round-robin order. + // 4. We serially call the callback in reverse-chain order. + // + // We have a buffer of size 1 for both resolved tipsets and unresolved tipsets. This should + // ensure that we never block unecessarily. + + type work struct { + ts *types.TipSet + stCid cid.Cid + recCid cid.Cid + } + type result struct { + ts *types.TipSet + stCid cid.Cid + messages []*AppliedMessage + } + + // This is more disk bound than CPU bound, but eh... + workerCount := runtime.NumCPU() * 2 + + workQs := make([]chan *work, workerCount) + resultQs := make([]chan *result, workerCount) + + for i := range workQs { + workQs[i] = make(chan *work, 1) + } + + for i := range resultQs { + resultQs[i] = make(chan *result, 1) + } + + grp, ctx := errgroup.WithContext(ctx) + + // Walk the chain and fire off work items. + grp.Go(func() error { + ts := sim.head + stCid, recCid, err := sim.StateManager.TipSetState(ctx, ts) + if err != nil { + return err + } + i := 0 + for ts.Height() > minEpoch { + if err := ctx.Err(); err != nil { + return ctx.Err() + } + + select { + case workQs[i] <- &work{ts, stCid, recCid}: + case <-ctx.Done(): + return ctx.Err() + } + + stCid = ts.MinTicketBlock().ParentStateRoot + recCid = ts.MinTicketBlock().ParentMessageReceipts + ts, err = sim.Node.Chainstore.LoadTipSet(ts.Parents()) + if err != nil { + return xerrors.Errorf("loading parent: %w", err) + } + i = (i + 1) % workerCount + } + for _, q := range workQs { + close(q) + } + return nil + }) + + // Spin up one worker per queue pair. + for i := 0; i < workerCount; i++ { + workQ := workQs[i] + resultQ := resultQs[i] + grp.Go(func() error { + for { + if err := ctx.Err(); err != nil { + return ctx.Err() + } + + var job *work + var ok bool + select { + case job, ok = <-workQ: + case <-ctx.Done(): + return ctx.Err() + } + + if !ok { + break + } + + msgs, err := sim.Node.Chainstore.MessagesForTipset(job.ts) + if err != nil { + return err + } + + recs, err := blockadt.AsArray(store, job.recCid) + if err != nil { + return xerrors.Errorf("amt load: %w", err) + } + applied := make([]*AppliedMessage, len(msgs)) + var rec types.MessageReceipt + err = recs.ForEach(&rec, func(i int64) error { + applied[i] = &AppliedMessage{ + Message: *msgs[i].VMMessage(), + MessageReceipt: rec, + } + return nil + }) + if err != nil { + return err + } + select { + case resultQ <- &result{ + ts: job.ts, + stCid: job.stCid, + messages: applied, + }: + case <-ctx.Done(): + return ctx.Err() + } + } + close(resultQ) + return nil + }) + } + + // Process results in the same order we enqueued them. + grp.Go(func() error { + qs := resultQs + for len(qs) > 0 { + newQs := qs[:0] + for _, q := range qs { + if err := ctx.Err(); err != nil { + return ctx.Err() + } + select { + case r, ok := <-q: + if !ok { + continue + } + err := cb(sim.StateManager, r.ts, r.stCid, r.messages) + if err != nil { + return err + } + case <-ctx.Done(): + return ctx.Err() + } + newQs = append(newQs, q) + } + qs = newQs + } + return nil + }) + + // Wait for everything to finish. + return grp.Wait() +} diff --git a/cmd/lotus-sim/simulation/stages/actor_iter.go b/cmd/lotus-sim/simulation/stages/actor_iter.go new file mode 100644 index 00000000000..b2c14ebdb0d --- /dev/null +++ b/cmd/lotus-sim/simulation/stages/actor_iter.go @@ -0,0 +1,38 @@ +package stages + +import ( + "math/rand" + + "github.com/filecoin-project/go-address" +) + +// actorIter is a simple persistent iterator that loops over a set of actors. +type actorIter struct { + actors []address.Address + offset int +} + +// shuffle randomly permutes the set of actors. +func (p *actorIter) shuffle() { + rand.Shuffle(len(p.actors), func(i, j int) { + p.actors[i], p.actors[j] = p.actors[j], p.actors[i] + }) +} + +// next returns the next actor's address and advances the iterator. +func (p *actorIter) next() address.Address { + next := p.actors[p.offset] + p.offset++ + p.offset %= len(p.actors) + return next +} + +// add adds a new actor to the iterator. +func (p *actorIter) add(addr address.Address) { + p.actors = append(p.actors, addr) +} + +// len returns the number of actors in the iterator. +func (p *actorIter) len() int { + return len(p.actors) +} diff --git a/cmd/lotus-sim/simulation/stages/commit_queue.go b/cmd/lotus-sim/simulation/stages/commit_queue.go new file mode 100644 index 00000000000..d625dedb65f --- /dev/null +++ b/cmd/lotus-sim/simulation/stages/commit_queue.go @@ -0,0 +1,200 @@ +package stages + +import ( + "sort" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/policy" +) + +// pendingCommitTracker tracks pending commits per-miner for a single epoch. +type pendingCommitTracker map[address.Address]minerPendingCommits + +// minerPendingCommits tracks a miner's pending commits during a single epoch (grouped by seal proof type). +type minerPendingCommits map[abi.RegisteredSealProof][]abi.SectorNumber + +// finish marks count sectors of the given proof type as "prove-committed". +func (m minerPendingCommits) finish(proof abi.RegisteredSealProof, count int) { + snos := m[proof] + if len(snos) < count { + panic("not enough sector numbers to finish") + } else if len(snos) == count { + delete(m, proof) + } else { + m[proof] = snos[count:] + } +} + +// empty returns true if there are no pending commits. +func (m minerPendingCommits) empty() bool { + return len(m) == 0 +} + +// count returns the number of pending commits. +func (m minerPendingCommits) count() int { + count := 0 + for _, snos := range m { + count += len(snos) + } + return count +} + +// commitQueue is used to track pending prove-commits. +// +// Miners are processed in round-robin where _all_ commits from a given miner are finished before +// moving on to the next. This is designed to maximize batching. +type commitQueue struct { + minerQueue []address.Address + queue []pendingCommitTracker + offset abi.ChainEpoch +} + +// ready returns the number of prove-commits ready to be proven at the current epoch. Useful for logging. +func (q *commitQueue) ready() int { + if len(q.queue) == 0 { + return 0 + } + count := 0 + for _, pending := range q.queue[0] { + count += pending.count() + } + return count +} + +// nextMiner returns the next miner to be proved and the set of pending prove commits for that +// miner. When some number of sectors have successfully been proven, call "finish" so we don't try +// to prove them again. +func (q *commitQueue) nextMiner() (address.Address, minerPendingCommits, bool) { + if len(q.queue) == 0 { + return address.Undef, nil, false + } + next := q.queue[0] + + // Go through the queue and find the first non-empty batch. + for len(q.minerQueue) > 0 { + addr := q.minerQueue[0] + q.minerQueue = q.minerQueue[1:] + pending := next[addr] + if !pending.empty() { + return addr, pending, true + } + delete(next, addr) + } + + return address.Undef, nil, false +} + +// advanceEpoch will advance to the next epoch. If some sectors were left unproven in the current +// epoch, they will be "prepended" into the next epochs sector set. +func (q *commitQueue) advanceEpoch(epoch abi.ChainEpoch) { + if epoch < q.offset { + panic("cannot roll epoch backwards") + } + // Now we "roll forwards", merging each epoch we advance over with the next. + for len(q.queue) > 1 && q.offset < epoch { + curr := q.queue[0] + q.queue[0] = nil + q.queue = q.queue[1:] + q.offset++ + + next := q.queue[0] + + // Cleanup empty entries. + for addr, pending := range curr { + if pending.empty() { + delete(curr, addr) + } + } + + // If the entire level is actually empty, just skip to the next one. + if len(curr) == 0 { + continue + } + + // Otherwise, merge the next into the current. + for addr, nextPending := range next { + currPending := curr[addr] + if currPending.empty() { + curr[addr] = nextPending + continue + } + for ty, nextSnos := range nextPending { + currSnos := currPending[ty] + if len(currSnos) == 0 { + currPending[ty] = nextSnos + continue + } + currPending[ty] = append(currSnos, nextSnos...) + } + } + // Now replace next with the merged curr. + q.queue[0] = curr + } + q.offset = epoch + if len(q.queue) == 0 { + return + } + + next := q.queue[0] + seenMiners := make(map[address.Address]struct{}, len(q.minerQueue)) + for _, addr := range q.minerQueue { + seenMiners[addr] = struct{}{} + } + + // Find the new miners not already in the queue. + offset := len(q.minerQueue) + for addr, pending := range next { + if pending.empty() { + delete(next, addr) + continue + } + if _, ok := seenMiners[addr]; ok { + continue + } + q.minerQueue = append(q.minerQueue, addr) + } + + // Sort the new miners only. + newMiners := q.minerQueue[offset:] + sort.Slice(newMiners, func(i, j int) bool { + // eh, escape analysis should be fine here... + return string(newMiners[i].Bytes()) < string(newMiners[j].Bytes()) + }) +} + +// enquueProveCommit enqueues prove-commit for the given pre-commit for the given miner. +func (q *commitQueue) enqueueProveCommit(addr address.Address, preCommitEpoch abi.ChainEpoch, info miner.SectorPreCommitInfo) error { + // Compute the epoch at which we can start trying to commit. + preCommitDelay := policy.GetPreCommitChallengeDelay() + minCommitEpoch := preCommitEpoch + preCommitDelay + 1 + + // Figure out the offset in the queue. + i := int(minCommitEpoch - q.offset) + if i < 0 { + i = 0 + } + + // Expand capacity and insert. + if cap(q.queue) <= i { + pc := make([]pendingCommitTracker, i+1, preCommitDelay*2) + copy(pc, q.queue) + q.queue = pc + } else if len(q.queue) <= i { + q.queue = q.queue[:i+1] + } + tracker := q.queue[i] + if tracker == nil { + tracker = make(pendingCommitTracker) + q.queue[i] = tracker + } + minerPending := tracker[addr] + if minerPending == nil { + minerPending = make(minerPendingCommits) + tracker[addr] = minerPending + } + minerPending[info.SealProof] = append(minerPending[info.SealProof], info.SectorNumber) + return nil +} diff --git a/cmd/lotus-sim/simulation/stages/commit_queue_test.go b/cmd/lotus-sim/simulation/stages/commit_queue_test.go new file mode 100644 index 00000000000..8ab05250efb --- /dev/null +++ b/cmd/lotus-sim/simulation/stages/commit_queue_test.go @@ -0,0 +1,128 @@ +package stages + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/policy" +) + +func TestCommitQueue(t *testing.T) { + var q commitQueue + addr1, err := address.NewIDAddress(1000) + require.NoError(t, err) + proofType := abi.RegisteredSealProof_StackedDrg64GiBV1_1 + require.NoError(t, q.enqueueProveCommit(addr1, 0, miner.SectorPreCommitInfo{ + SealProof: proofType, + SectorNumber: 0, + })) + require.NoError(t, q.enqueueProveCommit(addr1, 0, miner.SectorPreCommitInfo{ + SealProof: proofType, + SectorNumber: 1, + })) + require.NoError(t, q.enqueueProveCommit(addr1, 1, miner.SectorPreCommitInfo{ + SealProof: proofType, + SectorNumber: 2, + })) + require.NoError(t, q.enqueueProveCommit(addr1, 1, miner.SectorPreCommitInfo{ + SealProof: proofType, + SectorNumber: 3, + })) + require.NoError(t, q.enqueueProveCommit(addr1, 3, miner.SectorPreCommitInfo{ + SealProof: proofType, + SectorNumber: 4, + })) + require.NoError(t, q.enqueueProveCommit(addr1, 4, miner.SectorPreCommitInfo{ + SealProof: proofType, + SectorNumber: 5, + })) + require.NoError(t, q.enqueueProveCommit(addr1, 6, miner.SectorPreCommitInfo{ + SealProof: proofType, + SectorNumber: 6, + })) + + epoch := abi.ChainEpoch(0) + q.advanceEpoch(epoch) + _, _, ok := q.nextMiner() + require.False(t, ok) + + epoch += policy.GetPreCommitChallengeDelay() + q.advanceEpoch(epoch) + _, _, ok = q.nextMiner() + require.False(t, ok) + + // 0 : empty + non-empty + epoch++ + q.advanceEpoch(epoch) + addr, sectors, ok := q.nextMiner() + require.True(t, ok) + require.Equal(t, sectors.count(), 2) + require.Equal(t, addr, addr1) + sectors.finish(proofType, 1) + require.Equal(t, sectors.count(), 1) + require.EqualValues(t, []abi.SectorNumber{1}, sectors[proofType]) + + // 1 : non-empty + non-empty + epoch++ + q.advanceEpoch(epoch) + addr, sectors, ok = q.nextMiner() + require.True(t, ok) + require.Equal(t, addr, addr1) + require.Equal(t, sectors.count(), 3) + require.EqualValues(t, []abi.SectorNumber{1, 2, 3}, sectors[proofType]) + sectors.finish(proofType, 3) + require.Equal(t, sectors.count(), 0) + + // 2 : empty + empty + epoch++ + q.advanceEpoch(epoch) + _, _, ok = q.nextMiner() + require.False(t, ok) + + // 3 : empty + non-empty + epoch++ + q.advanceEpoch(epoch) + _, sectors, ok = q.nextMiner() + require.True(t, ok) + require.Equal(t, sectors.count(), 1) + require.EqualValues(t, []abi.SectorNumber{4}, sectors[proofType]) + + // 4 : non-empty + non-empty + epoch++ + q.advanceEpoch(epoch) + _, sectors, ok = q.nextMiner() + require.True(t, ok) + require.Equal(t, sectors.count(), 2) + require.EqualValues(t, []abi.SectorNumber{4, 5}, sectors[proofType]) + + // 5 : empty + non-empty + epoch++ + q.advanceEpoch(epoch) + _, sectors, ok = q.nextMiner() + require.True(t, ok) + require.Equal(t, sectors.count(), 2) + require.EqualValues(t, []abi.SectorNumber{4, 5}, sectors[proofType]) + sectors.finish(proofType, 1) + require.EqualValues(t, []abi.SectorNumber{5}, sectors[proofType]) + + // 6 + epoch++ + q.advanceEpoch(epoch) + _, sectors, ok = q.nextMiner() + require.True(t, ok) + require.Equal(t, sectors.count(), 2) + require.EqualValues(t, []abi.SectorNumber{5, 6}, sectors[proofType]) + + // 8 + epoch += 2 + q.advanceEpoch(epoch) + _, sectors, ok = q.nextMiner() + require.True(t, ok) + require.Equal(t, sectors.count(), 2) + require.EqualValues(t, []abi.SectorNumber{5, 6}, sectors[proofType]) +} diff --git a/cmd/lotus-sim/simulation/stages/funding_stage.go b/cmd/lotus-sim/simulation/stages/funding_stage.go new file mode 100644 index 00000000000..f57f852931c --- /dev/null +++ b/cmd/lotus-sim/simulation/stages/funding_stage.go @@ -0,0 +1,318 @@ +package stages + +import ( + "bytes" + "context" + "sort" + "time" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/exitcode" + + "github.com/filecoin-project/lotus/chain/actors/aerrors" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/builtin/multisig" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/blockbuilder" +) + +var ( + TargetFunds = abi.TokenAmount(types.MustParseFIL("1000FIL")) + MinimumFunds = abi.TokenAmount(types.MustParseFIL("100FIL")) +) + +type FundingStage struct { + fundAccount address.Address + taxMin abi.TokenAmount + minFunds, maxFunds abi.TokenAmount +} + +func NewFundingStage() (*FundingStage, error) { + // TODO: make all this configurable. + addr, err := address.NewIDAddress(100) + if err != nil { + return nil, err + } + return &FundingStage{ + fundAccount: addr, + taxMin: abi.TokenAmount(types.MustParseFIL("1000FIL")), + minFunds: abi.TokenAmount(types.MustParseFIL("1000000FIL")), + maxFunds: abi.TokenAmount(types.MustParseFIL("100000000FIL")), + }, nil +} + +func (*FundingStage) Name() string { + return "funding" +} + +func (fs *FundingStage) Fund(bb *blockbuilder.BlockBuilder, target address.Address) error { + return fs.fund(bb, target, 0) +} + +// sendAndFund "packs" the given message, funding the actor if necessary. It: +// +// 1. Tries to send the given message. +// 2. If that fails, it checks to see if the exit code was ErrInsufficientFunds. +// 3. If so, it sends 1K FIL from the "burnt funds actor" (because we need to send it from +// somewhere) and re-tries the message.0 +func (fs *FundingStage) SendAndFund(bb *blockbuilder.BlockBuilder, msg *types.Message) (res *types.MessageReceipt, err error) { + for i := 0; i < 10; i++ { + res, err = bb.PushMessage(msg) + if err == nil { + return res, nil + } + aerr, ok := err.(aerrors.ActorError) + if !ok || aerr.RetCode() != exitcode.ErrInsufficientFunds { + return nil, err + } + + // Ok, insufficient funds. Let's fund this miner and try again. + if err := fs.fund(bb, msg.To, i); err != nil { + if !blockbuilder.IsOutOfGas(err) { + err = xerrors.Errorf("failed to fund %s: %w", msg.To, err) + } + return nil, err + } + } + return res, err +} + +// fund funds the target actor with 'TargetFunds << shift' FIL. The "shift" parameter allows us to +// keep doubling the amount until the intended operation succeeds. +func (fs *FundingStage) fund(bb *blockbuilder.BlockBuilder, target address.Address, shift int) error { + amt := TargetFunds + if shift > 0 { + if shift >= 8 { + shift = 8 // cap + } + amt = big.Lsh(amt, uint(shift)) + } + _, err := bb.PushMessage(&types.Message{ + From: fs.fundAccount, + To: target, + Value: amt, + Method: builtin.MethodSend, + }) + return err +} + +func (fs *FundingStage) PackMessages(ctx context.Context, bb *blockbuilder.BlockBuilder) (_err error) { + st := bb.StateTree() + fundAccActor, err := st.GetActor(fs.fundAccount) + if err != nil { + return err + } + if fs.minFunds.LessThan(fundAccActor.Balance) { + return nil + } + + // Ok, we're going to go fund this thing. + start := time.Now() + + type actor struct { + types.Actor + Address address.Address + } + + var targets []*actor + err = st.ForEach(func(addr address.Address, act *types.Actor) error { + // Don't steal from ourselves! + if addr == fs.fundAccount { + return nil + } + if act.Balance.LessThan(fs.taxMin) { + return nil + } + if !(builtin.IsAccountActor(act.Code) || builtin.IsMultisigActor(act.Code)) { + return nil + } + targets = append(targets, &actor{*act, addr}) + return nil + }) + if err != nil { + return err + } + + balance := fundAccActor.Balance.Copy() + + sort.Slice(targets, func(i, j int) bool { + return targets[i].Balance.GreaterThan(targets[j].Balance) + }) + + store := bb.ActorStore() + epoch := bb.Height() + actorsVersion := bb.ActorsVersion() + + var accounts, multisigs int + defer func() { + if _err != nil { + return + } + bb.L().Infow("finished funding the simulation", + "duration", time.Since(start), + "targets", len(targets), + "epoch", epoch, + "new-balance", types.FIL(balance), + "old-balance", types.FIL(fundAccActor.Balance), + "multisigs", multisigs, + "accounts", accounts, + ) + }() + + for _, actor := range targets { + switch { + case builtin.IsAccountActor(actor.Code): + if _, err := bb.PushMessage(&types.Message{ + From: actor.Address, + To: fs.fundAccount, + Value: actor.Balance, + }); blockbuilder.IsOutOfGas(err) { + return nil + } else if err != nil { + return err + } + accounts++ + case builtin.IsMultisigActor(actor.Code): + msigState, err := multisig.Load(store, &actor.Actor) + if err != nil { + return err + } + + threshold, err := msigState.Threshold() + if err != nil { + return err + } + + if threshold > 16 { + bb.L().Debugw("ignoring multisig with high threshold", + "multisig", actor.Address, + "threshold", threshold, + "max", 16, + ) + continue + } + + locked, err := msigState.LockedBalance(epoch) + if err != nil { + return err + } + + if locked.LessThan(fs.taxMin) { + continue // not worth it. + } + + allSigners, err := msigState.Signers() + if err != nil { + return err + } + signers := make([]address.Address, 0, threshold) + for _, signer := range allSigners { + actor, err := st.GetActor(signer) + if err != nil { + return err + } + if !builtin.IsAccountActor(actor.Code) { + // I am so not dealing with this mess. + continue + } + if uint64(len(signers)) >= threshold { + break + } + } + // Ok, we're not dealing with this one. + if uint64(len(signers)) < threshold { + continue + } + + available := big.Sub(actor.Balance, locked) + + var txnId uint64 + { + msg, err := multisig.Message(actorsVersion, signers[0]).Propose( + actor.Address, fs.fundAccount, available, + builtin.MethodSend, nil, + ) + if err != nil { + return err + } + res, err := bb.PushMessage(msg) + if err != nil { + if blockbuilder.IsOutOfGas(err) { + err = nil + } + return err + } + var ret multisig.ProposeReturn + err = ret.UnmarshalCBOR(bytes.NewReader(res.Return)) + if err != nil { + return err + } + if ret.Applied { + if !ret.Code.IsSuccess() { + bb.L().Errorw("failed to tax multisig", + "multisig", actor.Address, + "exitcode", ret.Code, + ) + } + break + } + txnId = uint64(ret.TxnID) + } + var ret multisig.ProposeReturn + for _, signer := range signers[1:] { + msg, err := multisig.Message(actorsVersion, signer).Approve(actor.Address, txnId, nil) + if err != nil { + return err + } + res, err := bb.PushMessage(msg) + if err != nil { + if blockbuilder.IsOutOfGas(err) { + err = nil + } + return err + } + var ret multisig.ProposeReturn + err = ret.UnmarshalCBOR(bytes.NewReader(res.Return)) + if err != nil { + return err + } + // A bit redundant, but nice. + if ret.Applied { + break + } + + } + if !ret.Applied { + bb.L().Errorw("failed to apply multisig transaction", + "multisig", actor.Address, + "txnid", txnId, + "signers", len(signers), + "threshold", threshold, + ) + continue + } + if !ret.Code.IsSuccess() { + bb.L().Errorw("failed to tax multisig", + "multisig", actor.Address, + "txnid", txnId, + "exitcode", ret.Code, + ) + } else { + multisigs++ + } + default: + panic("impossible case") + } + balance = big.Int{Int: balance.Add(balance.Int, actor.Balance.Int)} + if balance.GreaterThanEqual(fs.maxFunds) { + // There's no need to get greedy. + // Well, really, we're trying to avoid messing with state _too_ much. + return nil + } + } + return nil +} diff --git a/cmd/lotus-sim/simulation/stages/interface.go b/cmd/lotus-sim/simulation/stages/interface.go new file mode 100644 index 00000000000..0c40a9b2308 --- /dev/null +++ b/cmd/lotus-sim/simulation/stages/interface.go @@ -0,0 +1,27 @@ +package stages + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/blockbuilder" +) + +// Stage is a stage of the simulation. It's asked to pack messages for every block. +type Stage interface { + Name() string + PackMessages(ctx context.Context, bb *blockbuilder.BlockBuilder) error +} + +type Funding interface { + SendAndFund(*blockbuilder.BlockBuilder, *types.Message) (*types.MessageReceipt, error) + Fund(*blockbuilder.BlockBuilder, address.Address) error +} + +type Committer interface { + EnqueueProveCommit(addr address.Address, preCommitEpoch abi.ChainEpoch, info miner.SectorPreCommitInfo) error +} diff --git a/cmd/lotus-sim/simulation/stages/pipeline.go b/cmd/lotus-sim/simulation/stages/pipeline.go new file mode 100644 index 00000000000..317e5b5a9e0 --- /dev/null +++ b/cmd/lotus-sim/simulation/stages/pipeline.go @@ -0,0 +1,31 @@ +package stages + +// DefaultPipeline returns the default stage pipeline. This pipeline. +// +// 1. Funds a "funding" actor, if necessary. +// 2. Submits any ready window posts. +// 3. Submits any ready prove commits. +// 4. Submits pre-commits with the remaining gas. +func DefaultPipeline() ([]Stage, error) { + // TODO: make this configurable. E.g., through DI? + // Ideally, we'd also be able to change priority, limit throughput (by limiting gas in the + // block builder, etc. + funding, err := NewFundingStage() + if err != nil { + return nil, err + } + wdpost, err := NewWindowPoStStage() + if err != nil { + return nil, err + } + provecommit, err := NewProveCommitStage(funding) + if err != nil { + return nil, err + } + precommit, err := NewPreCommitStage(funding, provecommit) + if err != nil { + return nil, err + } + + return []Stage{funding, wdpost, provecommit, precommit}, nil +} diff --git a/cmd/lotus-sim/simulation/stages/precommit_stage.go b/cmd/lotus-sim/simulation/stages/precommit_stage.go new file mode 100644 index 00000000000..5b9fed09e2a --- /dev/null +++ b/cmd/lotus-sim/simulation/stages/precommit_stage.go @@ -0,0 +1,347 @@ +package stages + +import ( + "context" + "sort" + "time" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" + + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/aerrors" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/blockbuilder" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/mock" +) + +const ( + minPreCommitBatchSize = 1 + maxPreCommitBatchSize = miner5.PreCommitSectorBatchMaxSize +) + +type PreCommitStage struct { + funding Funding + committer Committer + + // The tiers represent the top 1%, top 10%, and everyone else. When sealing sectors, we seal + // a group of sectors for the top 1%, a group (half that size) for the top 10%, and one + // sector for everyone else. We determine these rates by looking at two power tables. + // TODO Ideally we'd "learn" this distribution from the network. But this is good enough for + // now. + top1, top10, rest actorIter + initialized bool +} + +func NewPreCommitStage(funding Funding, committer Committer) (*PreCommitStage, error) { + return &PreCommitStage{ + funding: funding, + committer: committer, + }, nil +} + +func (*PreCommitStage) Name() string { + return "pre-commit" +} + +// packPreCommits packs pre-commit messages until the block is full. +func (stage *PreCommitStage) PackMessages(ctx context.Context, bb *blockbuilder.BlockBuilder) (_err error) { + if !stage.initialized { + if err := stage.load(ctx, bb); err != nil { + return err + } + } + + var ( + full bool + top1Count, top10Count, restCount int + ) + start := time.Now() + defer func() { + if _err != nil { + return + } + bb.L().Debugw("packed pre commits", + "done", top1Count+top10Count+restCount, + "top1", top1Count, + "top10", top10Count, + "rest", restCount, + "filled-block", full, + "duration", time.Since(start), + ) + }() + + var top1Miners, top10Miners, restMiners int + for i := 0; ; i++ { + var ( + minerAddr address.Address + count *int + ) + + // We pre-commit for the top 1%, 10%, and the of the network 1/3rd of the time each. + // This won't yield the most accurate distribution... but it'll give us a good + // enough distribution. + switch { + case (i%3) <= 0 && top1Miners < stage.top1.len(): + count = &top1Count + minerAddr = stage.top1.next() + top1Miners++ + case (i%3) <= 1 && top10Miners < stage.top10.len(): + count = &top10Count + minerAddr = stage.top10.next() + top10Miners++ + case (i%3) <= 2 && restMiners < stage.rest.len(): + count = &restCount + minerAddr = stage.rest.next() + restMiners++ + default: + // Well, we've run through all miners. + return nil + } + + var ( + added int + err error + ) + added, full, err = stage.packMiner(ctx, bb, minerAddr, maxProveCommitBatchSize) + if err != nil { + return xerrors.Errorf("failed to pack precommits for miner %s: %w", minerAddr, err) + } + *count += added + if full { + return nil + } + } +} + +// packPreCommitsMiner packs count pre-commits for the given miner. +func (stage *PreCommitStage) packMiner( + ctx context.Context, bb *blockbuilder.BlockBuilder, + minerAddr address.Address, count int, +) (int, bool, error) { + log := bb.L().With("miner", minerAddr) + epoch := bb.Height() + nv := bb.NetworkVersion() + + minerActor, err := bb.StateTree().GetActor(minerAddr) + if err != nil { + return 0, false, err + } + minerState, err := miner.Load(bb.ActorStore(), minerActor) + if err != nil { + return 0, false, err + } + + minerInfo, err := minerState.Info() + if err != nil { + return 0, false, err + } + + // Make sure the miner is funded. + minerBalance, err := minerState.AvailableBalance(minerActor.Balance) + if err != nil { + return 0, false, err + } + + if big.Cmp(minerBalance, MinimumFunds) < 0 { + err := stage.funding.Fund(bb, minerAddr) + if err != nil { + if blockbuilder.IsOutOfGas(err) { + return 0, true, nil + } + return 0, false, err + } + } + + // Generate pre-commits. + sealType, err := miner.PreferredSealProofTypeFromWindowPoStType( + nv, minerInfo.WindowPoStProofType, + ) + if err != nil { + return 0, false, err + } + + sectorNos, err := minerState.UnallocatedSectorNumbers(count) + if err != nil { + return 0, false, err + } + + expiration := epoch + policy.GetMaxSectorExpirationExtension() + infos := make([]miner.SectorPreCommitInfo, len(sectorNos)) + for i, sno := range sectorNos { + infos[i] = miner.SectorPreCommitInfo{ + SealProof: sealType, + SectorNumber: sno, + SealedCID: mock.MockCommR(minerAddr, sno), + SealRandEpoch: epoch - 1, + Expiration: expiration, + } + } + + // Commit the pre-commits. + added := 0 + if nv >= network.Version13 { + targetBatchSize := maxPreCommitBatchSize + for targetBatchSize >= minPreCommitBatchSize && len(infos) >= minPreCommitBatchSize { + batch := infos + if len(batch) > targetBatchSize { + batch = batch[:targetBatchSize] + } + params := miner5.PreCommitSectorBatchParams{ + Sectors: batch, + } + enc, err := actors.SerializeParams(¶ms) + if err != nil { + return added, false, err + } + // NOTE: just in-case, sendAndFund will "fund" and re-try for any message + // that fails due to "insufficient funds". + if _, err := stage.funding.SendAndFund(bb, &types.Message{ + To: minerAddr, + From: minerInfo.Worker, + Value: abi.NewTokenAmount(0), + Method: miner.Methods.PreCommitSectorBatch, + Params: enc, + }); blockbuilder.IsOutOfGas(err) { + // try again with a smaller batch. + targetBatchSize /= 2 + continue + } else if aerr, ok := err.(aerrors.ActorError); ok && !aerr.IsFatal() { + // Log the error and move on. No reason to stop. + log.Errorw("failed to pre-commit for unknown reasons", + "error", aerr, + "sectors", batch, + ) + return added, false, nil + } else if err != nil { + return added, false, err + } + + for _, info := range batch { + if err := stage.committer.EnqueueProveCommit(minerAddr, epoch, info); err != nil { + return added, false, err + } + added++ + } + infos = infos[len(batch):] + } + } + for _, info := range infos { + enc, err := actors.SerializeParams(&info) //nolint + if err != nil { + return 0, false, err + } + if _, err := stage.funding.SendAndFund(bb, &types.Message{ + To: minerAddr, + From: minerInfo.Worker, + Value: abi.NewTokenAmount(0), + Method: miner.Methods.PreCommitSector, + Params: enc, + }); blockbuilder.IsOutOfGas(err) { + return added, true, nil + } else if err != nil { + return added, false, err + } + + if err := stage.committer.EnqueueProveCommit(minerAddr, epoch, info); err != nil { + return added, false, err + } + added++ + } + return added, false, nil +} + +func (stage *PreCommitStage) load(ctx context.Context, bb *blockbuilder.BlockBuilder) (_err error) { + bb.L().Infow("loading miner power for pre-commits") + start := time.Now() + defer func() { + if _err != nil { + return + } + bb.L().Infow("loaded miner power for pre-commits", + "duration", time.Since(start), + "top1", stage.top1.len(), + "top10", stage.top10.len(), + "rest", stage.rest.len(), + ) + }() + + store := bb.ActorStore() + st := bb.ParentStateTree() + powerState, err := loadPower(store, st) + if err != nil { + return xerrors.Errorf("failed to power actor: %w", err) + } + + type onboardingInfo struct { + addr address.Address + sectorCount uint64 + } + var sealList []onboardingInfo + err = powerState.ForEachClaim(func(addr address.Address, claim power.Claim) error { + if claim.RawBytePower.IsZero() { + return nil + } + + minerState, err := loadMiner(store, st, addr) + if err != nil { + return err + } + info, err := minerState.Info() + if err != nil { + return err + } + + sectorCount := sectorsFromClaim(info.SectorSize, claim) + + if sectorCount > 0 { + sealList = append(sealList, onboardingInfo{addr, uint64(sectorCount)}) + } + return nil + }) + if err != nil { + return err + } + + if len(sealList) == 0 { + return xerrors.Errorf("simulation has no miners") + } + + // Now that we have a list of sealing miners, sort them into percentiles. + sort.Slice(sealList, func(i, j int) bool { + return sealList[i].sectorCount < sealList[j].sectorCount + }) + + // reset, just in case. + stage.top1 = actorIter{} + stage.top10 = actorIter{} + stage.rest = actorIter{} + + for i, oi := range sealList { + var dist *actorIter + if i < len(sealList)/100 { + dist = &stage.top1 + } else if i < len(sealList)/10 { + dist = &stage.top10 + } else { + dist = &stage.rest + } + dist.add(oi.addr) + } + + stage.top1.shuffle() + stage.top10.shuffle() + stage.rest.shuffle() + + stage.initialized = true + return nil +} diff --git a/cmd/lotus-sim/simulation/stages/provecommit_stage.go b/cmd/lotus-sim/simulation/stages/provecommit_stage.go new file mode 100644 index 00000000000..6cbca7de9fb --- /dev/null +++ b/cmd/lotus-sim/simulation/stages/provecommit_stage.go @@ -0,0 +1,372 @@ +package stages + +import ( + "context" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/go-state-types/network" + + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + power5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/power" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/aerrors" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/blockbuilder" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/mock" +) + +const ( + minProveCommitBatchSize = 4 + maxProveCommitBatchSize = miner5.MaxAggregatedSectors +) + +type ProveCommitStage struct { + funding Funding + // We track the set of pending commits. On simulation load, and when a new pre-commit is + // added to the chain, we put the commit in this queue. advanceEpoch(currentEpoch) should be + // called on this queue at every epoch before using it. + commitQueue commitQueue + initialized bool +} + +func NewProveCommitStage(funding Funding) (*ProveCommitStage, error) { + return &ProveCommitStage{ + funding: funding, + }, nil +} + +func (*ProveCommitStage) Name() string { + return "prove-commit" +} + +func (stage *ProveCommitStage) EnqueueProveCommit( + minerAddr address.Address, preCommitEpoch abi.ChainEpoch, info miner.SectorPreCommitInfo, +) error { + return stage.commitQueue.enqueueProveCommit(minerAddr, preCommitEpoch, info) +} + +// packProveCommits packs all prove-commits for all "ready to be proven" sectors until it fills the +// block or runs out. +func (stage *ProveCommitStage) PackMessages(ctx context.Context, bb *blockbuilder.BlockBuilder) (_err error) { + if !stage.initialized { + if err := stage.load(ctx, bb); err != nil { + return err + } + } + // Roll the commitQueue forward. + stage.commitQueue.advanceEpoch(bb.Height()) + + start := time.Now() + var failed, done, unbatched, count int + defer func() { + if _err != nil { + return + } + remaining := stage.commitQueue.ready() + bb.L().Debugw("packed prove commits", + "remaining", remaining, + "done", done, + "failed", failed, + "unbatched", unbatched, + "miners-processed", count, + "duration", time.Since(start), + ) + }() + + for { + addr, pending, ok := stage.commitQueue.nextMiner() + if !ok { + return nil + } + + res, err := stage.packProveCommitsMiner(ctx, bb, addr, pending) + if err != nil { + return err + } + failed += res.failed + done += res.done + unbatched += res.unbatched + count++ + if res.full { + return nil + } + } +} + +type proveCommitResult struct { + done, failed, unbatched int + full bool +} + +// packProveCommitsMiner enqueues a prove commits from the given miner until it runs out of +// available prove-commits, batching as much as possible. +// +// This function will fund as necessary from the "burnt funds actor" (look, it's convenient). +func (stage *ProveCommitStage) packProveCommitsMiner( + ctx context.Context, bb *blockbuilder.BlockBuilder, minerAddr address.Address, + pending minerPendingCommits, +) (res proveCommitResult, _err error) { + minerActor, err := bb.StateTree().GetActor(minerAddr) + if err != nil { + return res, err + } + minerState, err := miner.Load(bb.ActorStore(), minerActor) + if err != nil { + return res, err + } + info, err := minerState.Info() + if err != nil { + return res, err + } + + log := bb.L().With("miner", minerAddr) + + nv := bb.NetworkVersion() + for sealType, snos := range pending { + if nv >= network.Version13 { + for len(snos) > minProveCommitBatchSize { + batchSize := maxProveCommitBatchSize + if len(snos) < batchSize { + batchSize = len(snos) + } + batch := snos[:batchSize] + + proof, err := mock.MockAggregateSealProof(sealType, minerAddr, batchSize) + if err != nil { + return res, err + } + + params := miner5.ProveCommitAggregateParams{ + SectorNumbers: bitfield.New(), + AggregateProof: proof, + } + for _, sno := range batch { + params.SectorNumbers.Set(uint64(sno)) + } + + enc, err := actors.SerializeParams(¶ms) + if err != nil { + return res, err + } + + if _, err := stage.funding.SendAndFund(bb, &types.Message{ + From: info.Worker, + To: minerAddr, + Value: abi.NewTokenAmount(0), + Method: miner.Methods.ProveCommitAggregate, + Params: enc, + }); err == nil { + res.done += len(batch) + } else if blockbuilder.IsOutOfGas(err) { + res.full = true + return res, nil + } else if aerr, ok := err.(aerrors.ActorError); !ok || aerr.IsFatal() { + // If we get a random error, or a fatal actor error, bail. + return res, err + } else if aerr.RetCode() == exitcode.ErrNotFound || aerr.RetCode() == exitcode.ErrIllegalArgument { + // If we get a "not-found" or illegal argument error, try to + // remove any missing prove-commits and continue. This can + // happen either because: + // + // 1. The pre-commit failed on execution (but not when + // packing). This shouldn't happen, but we might as well + // gracefully handle it. + // 2. The pre-commit has expired. We'd have to be really + // backloged to hit this case, but we might as well handle + // it. + // First, split into "good" and "missing" + good, err := stage.filterProveCommits(ctx, bb, minerAddr, batch) + if err != nil { + log.Errorw("failed to filter prove commits", "error", err) + // fail with the original error. + return res, aerr + } + removed := len(batch) - len(good) + if removed == 0 { + log.Errorw("failed to prove-commit for unknown reasons", + "error", aerr, + "sectors", batch, + ) + res.failed += len(batch) + } else if len(good) == 0 { + log.Errorw("failed to prove commit missing pre-commits", + "error", aerr, + "discarded", removed, + ) + res.failed += len(batch) + } else { + // update the pending sector numbers in-place to remove the expired ones. + snos = snos[removed:] + copy(snos, good) + pending.finish(sealType, removed) + + log.Errorw("failed to prove commit expired/missing pre-commits", + "error", aerr, + "discarded", removed, + "kept", len(good), + ) + res.failed += removed + + // Then try again. + continue + } + } else { + log.Errorw("failed to prove commit sector(s)", + "error", err, + "sectors", batch, + ) + res.failed += len(batch) + } + pending.finish(sealType, len(batch)) + snos = snos[len(batch):] + } + } + for len(snos) > 0 && res.unbatched < power5.MaxMinerProveCommitsPerEpoch { + sno := snos[0] + snos = snos[1:] + + proof, err := mock.MockSealProof(sealType, minerAddr) + if err != nil { + return res, err + } + params := miner.ProveCommitSectorParams{ + SectorNumber: sno, + Proof: proof, + } + enc, err := actors.SerializeParams(¶ms) + if err != nil { + return res, err + } + if _, err := stage.funding.SendAndFund(bb, &types.Message{ + From: info.Worker, + To: minerAddr, + Value: abi.NewTokenAmount(0), + Method: miner.Methods.ProveCommitSector, + Params: enc, + }); err == nil { + res.unbatched++ + res.done++ + } else if blockbuilder.IsOutOfGas(err) { + res.full = true + return res, nil + } else if aerr, ok := err.(aerrors.ActorError); !ok || aerr.IsFatal() { + return res, err + } else { + log.Errorw("failed to prove commit sector(s)", + "error", err, + "sectors", []abi.SectorNumber{sno}, + ) + res.failed++ + } + // mark it as "finished" regardless so we skip it. + pending.finish(sealType, 1) + } + // if we get here, we can't pre-commit anything more. + } + return res, nil +} + +// loadMiner enqueue all pending prove-commits for the given miner. This is called on load to +// populate the commitQueue and should not need to be called later. +// +// It will drop any pre-commits that have already expired. +func (stage *ProveCommitStage) loadMiner(ctx context.Context, bb *blockbuilder.BlockBuilder, addr address.Address) error { + epoch := bb.Height() + av := bb.ActorsVersion() + minerState, err := loadMiner(bb.ActorStore(), bb.ParentStateTree(), addr) + if err != nil { + return err + } + + // Find all pending prove commits and group by proof type. Really, there should never + // (except during upgrades be more than one type. + var total, dropped int + err = minerState.ForEachPrecommittedSector(func(info miner.SectorPreCommitOnChainInfo) error { + total++ + msd := policy.GetMaxProveCommitDuration(av, info.Info.SealProof) + if epoch > info.PreCommitEpoch+msd { + dropped++ + return nil + } + return stage.commitQueue.enqueueProveCommit(addr, info.PreCommitEpoch, info.Info) + }) + if err != nil { + return err + } + if dropped > 0 { + bb.L().Warnw("dropped expired pre-commits on load", + "miner", addr, + "total", total, + "expired", dropped, + ) + } + return nil +} + +// filterProveCommits filters out expired and/or missing pre-commits. +func (stage *ProveCommitStage) filterProveCommits( + ctx context.Context, bb *blockbuilder.BlockBuilder, + minerAddr address.Address, snos []abi.SectorNumber, +) ([]abi.SectorNumber, error) { + act, err := bb.StateTree().GetActor(minerAddr) + if err != nil { + return nil, err + } + + minerState, err := miner.Load(bb.ActorStore(), act) + if err != nil { + return nil, err + } + + nextEpoch := bb.Height() + av := bb.ActorsVersion() + + good := make([]abi.SectorNumber, 0, len(snos)) + for _, sno := range snos { + info, err := minerState.GetPrecommittedSector(sno) + if err != nil { + return nil, err + } + if info == nil { + continue + } + msd := policy.GetMaxProveCommitDuration(av, info.Info.SealProof) + if nextEpoch > info.PreCommitEpoch+msd { + continue + } + good = append(good, sno) + } + return good, nil +} + +func (stage *ProveCommitStage) load(ctx context.Context, bb *blockbuilder.BlockBuilder) error { + stage.initialized = false // in case something failes while we're doing this. + stage.commitQueue = commitQueue{offset: bb.Height()} + powerState, err := loadPower(bb.ActorStore(), bb.ParentStateTree()) + if err != nil { + return err + } + + err = powerState.ForEachClaim(func(minerAddr address.Address, claim power.Claim) error { + // TODO: If we want to finish pre-commits for "new" miners, we'll need to change + // this. + if claim.RawBytePower.IsZero() { + return nil + } + return stage.loadMiner(ctx, bb, minerAddr) + }) + if err != nil { + return err + } + + stage.initialized = true + return nil +} diff --git a/cmd/lotus-sim/simulation/stages/util.go b/cmd/lotus-sim/simulation/stages/util.go new file mode 100644 index 00000000000..97c1e57af83 --- /dev/null +++ b/cmd/lotus-sim/simulation/stages/util.go @@ -0,0 +1,51 @@ +package stages + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/blockbuilder" +) + +func loadMiner(store adt.Store, st types.StateTree, addr address.Address) (miner.State, error) { + minerActor, err := st.GetActor(addr) + if err != nil { + return nil, err + } + return miner.Load(store, minerActor) +} + +func loadPower(store adt.Store, st types.StateTree) (power.State, error) { + powerActor, err := st.GetActor(power.Address) + if err != nil { + return nil, err + } + return power.Load(store, powerActor) +} + +// Compute the number of sectors a miner has from their power claim. +func sectorsFromClaim(sectorSize abi.SectorSize, c power.Claim) int64 { + if c.RawBytePower.Int == nil { + return 0 + } + sectorCount := big.Div(c.RawBytePower, big.NewIntUnsigned(uint64(sectorSize))) + if !sectorCount.IsInt64() { + panic("impossible number of sectors") + } + return sectorCount.Int64() +} + +func postChainCommitInfo(ctx context.Context, bb *blockbuilder.BlockBuilder, epoch abi.ChainEpoch) (abi.Randomness, error) { + cs := bb.StateManager().ChainStore() + ts := bb.ParentTipSet() + commitRand, err := cs.GetChainRandomness(ctx, ts.Cids(), crypto.DomainSeparationTag_PoStChainCommit, epoch, nil, true) + return commitRand, err +} diff --git a/cmd/lotus-sim/simulation/stages/windowpost_stage.go b/cmd/lotus-sim/simulation/stages/windowpost_stage.go new file mode 100644 index 00000000000..68f8ea179b3 --- /dev/null +++ b/cmd/lotus-sim/simulation/stages/windowpost_stage.go @@ -0,0 +1,317 @@ +package stages + +import ( + "context" + "math" + "time" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/aerrors" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/blockbuilder" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/mock" +) + +type WindowPoStStage struct { + // We track the window post periods per miner and assume that no new miners are ever added. + + // We record all pending window post messages, and the epoch up through which we've + // generated window post messages. + pendingWposts []*types.Message + wpostPeriods [][]address.Address // (epoch % (epochs in a deadline)) -> miner + nextWpostEpoch abi.ChainEpoch +} + +func NewWindowPoStStage() (*WindowPoStStage, error) { + return new(WindowPoStStage), nil +} + +func (*WindowPoStStage) Name() string { + return "window-post" +} + +// packWindowPoSts packs window posts until either the block is full or all healty sectors +// have been proven. It does not recover sectors. +func (stage *WindowPoStStage) PackMessages(ctx context.Context, bb *blockbuilder.BlockBuilder) (_err error) { + // Push any new window posts into the queue. + if err := stage.tick(ctx, bb); err != nil { + return err + } + done := 0 + failed := 0 + defer func() { + if _err != nil { + return + } + + bb.L().Debugw("packed window posts", + "done", done, + "failed", failed, + "remaining", len(stage.pendingWposts), + ) + }() + // Then pack as many as we can. + for len(stage.pendingWposts) > 0 { + next := stage.pendingWposts[0] + if _, err := bb.PushMessage(next); err != nil { + if blockbuilder.IsOutOfGas(err) { + return nil + } + if aerr, ok := err.(aerrors.ActorError); !ok || aerr.IsFatal() { + return err + } + bb.L().Errorw("failed to submit windowed post", + "error", err, + "miner", next.To, + ) + failed++ + } else { + done++ + } + + stage.pendingWposts = stage.pendingWposts[1:] + } + stage.pendingWposts = nil + return nil +} + +// stepWindowPoStsMiner enqueues all missing window posts for the current epoch for the given miner. +func (stage *WindowPoStStage) queueMiner( + ctx context.Context, bb *blockbuilder.BlockBuilder, + addr address.Address, minerState miner.State, + commitEpoch abi.ChainEpoch, commitRand abi.Randomness, +) error { + + if active, err := minerState.DeadlineCronActive(); err != nil { + return err + } else if !active { + return nil + } + + minerInfo, err := minerState.Info() + if err != nil { + return err + } + + di, err := minerState.DeadlineInfo(bb.Height()) + if err != nil { + return err + } + di = di.NextNotElapsed() + + dl, err := minerState.LoadDeadline(di.Index) + if err != nil { + return err + } + + provenBf, err := dl.PartitionsPoSted() + if err != nil { + return err + } + proven, err := provenBf.AllMap(math.MaxUint64) + if err != nil { + return err + } + + poStBatchSize, err := policy.GetMaxPoStPartitions(bb.NetworkVersion(), minerInfo.WindowPoStProofType) + if err != nil { + return err + } + + var ( + partitions []miner.PoStPartition + partitionGroups [][]miner.PoStPartition + ) + // Only prove partitions with live sectors. + err = dl.ForEachPartition(func(idx uint64, part miner.Partition) error { + if proven[idx] { + return nil + } + // NOTE: We're mimicing the behavior of wdpost_run.go here. + if len(partitions) > 0 && idx%uint64(poStBatchSize) == 0 { + partitionGroups = append(partitionGroups, partitions) + partitions = nil + + } + live, err := part.LiveSectors() + if err != nil { + return err + } + liveCount, err := live.Count() + if err != nil { + return err + } + faulty, err := part.FaultySectors() + if err != nil { + return err + } + faultyCount, err := faulty.Count() + if err != nil { + return err + } + if liveCount-faultyCount > 0 { + partitions = append(partitions, miner.PoStPartition{Index: idx}) + } + return nil + }) + if err != nil { + return err + } + if len(partitions) > 0 { + partitionGroups = append(partitionGroups, partitions) + partitions = nil + } + + proof, err := mock.MockWindowPoStProof(minerInfo.WindowPoStProofType, addr) + if err != nil { + return err + } + for _, group := range partitionGroups { + params := miner.SubmitWindowedPoStParams{ + Deadline: di.Index, + Partitions: group, + Proofs: []proof5.PoStProof{{ + PoStProof: minerInfo.WindowPoStProofType, + ProofBytes: proof, + }}, + ChainCommitEpoch: commitEpoch, + ChainCommitRand: commitRand, + } + enc, aerr := actors.SerializeParams(¶ms) + if aerr != nil { + return xerrors.Errorf("could not serialize submit window post parameters: %w", aerr) + } + msg := &types.Message{ + To: addr, + From: minerInfo.Worker, + Method: miner.Methods.SubmitWindowedPoSt, + Params: enc, + Value: types.NewInt(0), + } + stage.pendingWposts = append(stage.pendingWposts, msg) + } + return nil +} + +func (stage *WindowPoStStage) load(ctx context.Context, bb *blockbuilder.BlockBuilder) (_err error) { + bb.L().Info("loading window post info") + + start := time.Now() + defer func() { + if _err != nil { + return + } + + bb.L().Infow("loaded window post info", "duration", time.Since(start)) + }() + + // reset + stage.wpostPeriods = make([][]address.Address, miner.WPoStChallengeWindow) + stage.pendingWposts = nil + stage.nextWpostEpoch = bb.Height() + 1 + + st := bb.ParentStateTree() + store := bb.ActorStore() + + powerState, err := loadPower(store, st) + if err != nil { + return err + } + + commitEpoch := bb.ParentTipSet().Height() + commitRand, err := postChainCommitInfo(ctx, bb, commitEpoch) + if err != nil { + return err + } + + return powerState.ForEachClaim(func(minerAddr address.Address, claim power.Claim) error { + // TODO: If we start recovering power, we'll need to change this. + if claim.RawBytePower.IsZero() { + return nil + } + + minerState, err := loadMiner(store, st, minerAddr) + if err != nil { + return err + } + + // Shouldn't be necessary if the miner has power, but we might as well be safe. + if active, err := minerState.DeadlineCronActive(); err != nil { + return err + } else if !active { + return nil + } + + // Record when we need to prove for this miner. + dinfo, err := minerState.DeadlineInfo(bb.Height()) + if err != nil { + return err + } + dinfo = dinfo.NextNotElapsed() + + ppOffset := int(dinfo.PeriodStart % miner.WPoStChallengeWindow) + stage.wpostPeriods[ppOffset] = append(stage.wpostPeriods[ppOffset], minerAddr) + + return stage.queueMiner(ctx, bb, minerAddr, minerState, commitEpoch, commitRand) + }) +} + +func (stage *WindowPoStStage) tick(ctx context.Context, bb *blockbuilder.BlockBuilder) error { + // If this is our first time, load from scratch. + if stage.wpostPeriods == nil { + return stage.load(ctx, bb) + } + + targetHeight := bb.Height() + now := time.Now() + was := len(stage.pendingWposts) + count := 0 + defer func() { + bb.L().Debugw("computed window posts", + "miners", count, + "count", len(stage.pendingWposts)-was, + "duration", time.Since(now), + ) + }() + + st := bb.ParentStateTree() + store := bb.ActorStore() + + // Perform a bit of catch up. This lets us do things like skip blocks at upgrades then catch + // up to make the simulation easier. + for ; stage.nextWpostEpoch <= targetHeight; stage.nextWpostEpoch++ { + if stage.nextWpostEpoch+miner.WPoStChallengeWindow < targetHeight { + bb.L().Warnw("skipping old window post", "deadline-open", stage.nextWpostEpoch) + continue + } + commitEpoch := stage.nextWpostEpoch - 1 + commitRand, err := postChainCommitInfo(ctx, bb, commitEpoch) + if err != nil { + return err + } + + for _, addr := range stage.wpostPeriods[int(stage.nextWpostEpoch%miner.WPoStChallengeWindow)] { + minerState, err := loadMiner(store, st, addr) + if err != nil { + return err + } + + if err := stage.queueMiner(ctx, bb, addr, minerState, commitEpoch, commitRand); err != nil { + return err + } + count++ + } + + } + return nil +} diff --git a/cmd/lotus-sim/simulation/step.go b/cmd/lotus-sim/simulation/step.go new file mode 100644 index 00000000000..902f2ad6ca6 --- /dev/null +++ b/cmd/lotus-sim/simulation/step.go @@ -0,0 +1,71 @@ +package simulation + +import ( + "context" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/blockbuilder" +) + +// Step steps the simulation forward one step. This may move forward by more than one epoch. +func (sim *Simulation) Step(ctx context.Context) (*types.TipSet, error) { + log.Infow("step", "epoch", sim.head.Height()+1) + messages, err := sim.popNextMessages(ctx) + if err != nil { + return nil, xerrors.Errorf("failed to select messages for block: %w", err) + } + head, err := sim.makeTipSet(ctx, messages) + if err != nil { + return nil, xerrors.Errorf("failed to make tipset: %w", err) + } + if err := sim.SetHead(head); err != nil { + return nil, xerrors.Errorf("failed to update head: %w", err) + } + return head, nil +} + +// popNextMessages generates/picks a set of messages to be included in the next block. +// +// - This function is destructive and should only be called once per epoch. +// - This function does not store anything in the repo. +// - This function handles all gas estimation. The returned messages should all fit in a single +// block. +func (sim *Simulation) popNextMessages(ctx context.Context) ([]*types.Message, error) { + parentTs := sim.head + + // First we make sure we don't have an upgrade at this epoch. If we do, we return no + // messages so we can just create an empty block at that epoch. + // + // This isn't what the network does, but it makes things easier. Otherwise, we'd need to run + // migrations before this epoch and I'd rather not deal with that. + nextHeight := parentTs.Height() + 1 + prevVer := sim.StateManager.GetNtwkVersion(ctx, nextHeight-1) + nextVer := sim.StateManager.GetNtwkVersion(ctx, nextHeight) + if nextVer != prevVer { + log.Warnw("packing no messages for version upgrade block", + "old", prevVer, + "new", nextVer, + "epoch", nextHeight, + ) + return nil, nil + } + + bb, err := blockbuilder.NewBlockBuilder( + ctx, log.With("simulation", sim.name), + sim.StateManager, parentTs, + ) + if err != nil { + return nil, err + } + + for _, stage := range sim.stages { + // We're intentionally ignoring the "full" signal so we can try to pack a few more + // messages. + if err := stage.PackMessages(ctx, bb); err != nil && !blockbuilder.IsOutOfGas(err) { + return nil, xerrors.Errorf("when packing messages with %s: %w", stage.Name(), err) + } + } + return bb.Messages(), nil +} diff --git a/cmd/lotus-sim/upgrade.go b/cmd/lotus-sim/upgrade.go new file mode 100644 index 00000000000..dfc726d6b01 --- /dev/null +++ b/cmd/lotus-sim/upgrade.go @@ -0,0 +1,109 @@ +package main + +import ( + "fmt" + "strconv" + "strings" + "text/tabwriter" + + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" +) + +var upgradeCommand = &cli.Command{ + Name: "upgrade", + Description: "Modifies network upgrade heights.", + Subcommands: []*cli.Command{ + upgradeSetCommand, + upgradeList, + }, +} + +var upgradeList = &cli.Command{ + Name: "list", + Description: "Lists all pending upgrades.", + Subcommands: []*cli.Command{ + upgradeSetCommand, + }, + Action: func(cctx *cli.Context) (err error) { + node, err := open(cctx) + if err != nil { + return err + } + defer func() { + if cerr := node.Close(); err == nil { + err = cerr + } + }() + + sim, err := node.LoadSim(cctx.Context, cctx.String("simulation")) + if err != nil { + return err + } + upgrades, err := sim.ListUpgrades() + if err != nil { + return err + } + + tw := tabwriter.NewWriter(cctx.App.Writer, 8, 8, 0, ' ', 0) + fmt.Fprintf(tw, "version\theight\tepochs\tmigration\texpensive") + epoch := sim.GetHead().Height() + for _, upgrade := range upgrades { + fmt.Fprintf( + tw, "%d\t%d\t%+d\t%t\t%t", + upgrade.Network, upgrade.Height, upgrade.Height-epoch, + upgrade.Migration != nil, + upgrade.Expensive, + ) + } + return nil + }, +} + +var upgradeSetCommand = &cli.Command{ + Name: "set", + ArgsUsage: " [+]", + Description: "Set a network upgrade height. Prefix with '+' to set it relative to the last epoch.", + Action: func(cctx *cli.Context) (err error) { + args := cctx.Args() + if args.Len() != 2 { + return fmt.Errorf("expected 2 arguments") + } + nvString := args.Get(0) + networkVersion, err := strconv.ParseUint(nvString, 10, 32) + if err != nil { + return fmt.Errorf("failed to parse network version %q: %w", nvString, err) + } + heightString := args.Get(1) + relative := false + if strings.HasPrefix(heightString, "+") { + heightString = heightString[1:] + relative = true + } + height, err := strconv.ParseInt(heightString, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse height version %q: %w", heightString, err) + } + + node, err := open(cctx) + if err != nil { + return err + } + defer func() { + if cerr := node.Close(); err == nil { + err = cerr + } + }() + + sim, err := node.LoadSim(cctx.Context, cctx.String("simulation")) + if err != nil { + return err + } + if relative { + height += int64(sim.GetHead().Height()) + } + return sim.SetUpgradeHeight(network.Version(networkVersion), abi.ChainEpoch(height)) + }, +} diff --git a/cmd/lotus-sim/util.go b/cmd/lotus-sim/util.go new file mode 100644 index 00000000000..cd15cca0dd8 --- /dev/null +++ b/cmd/lotus-sim/util.go @@ -0,0 +1,18 @@ +package main + +import ( + "fmt" + + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation" + "github.com/filecoin-project/lotus/lib/ulimit" +) + +func open(cctx *cli.Context) (*simulation.Node, error) { + _, _, err := ulimit.ManageFdLimit() + if err != nil { + fmt.Fprintf(cctx.App.ErrWriter, "ERROR: failed to raise ulimit: %s\n", err) + } + return simulation.OpenNode(cctx.Context, cctx.String("repo")) +} diff --git a/cmd/lotus-stats/chain.dashboard.json b/cmd/lotus-stats/chain.dashboard.json index 5ff7654d016..8083c96b183 100644 --- a/cmd/lotus-stats/chain.dashboard.json +++ b/cmd/lotus-stats/chain.dashboard.json @@ -1,20 +1,11 @@ { - "__inputs": [ - { - "name": "DS_INFLUXDB", - "label": "InfluxDB", - "description": "", - "type": "datasource", - "pluginId": "influxdb", - "pluginName": "InfluxDB" - } - ], + "__inputs": [], "__requires": [ { "type": "grafana", "id": "grafana", "name": "Grafana", - "version": "6.5.0-pre" + "version": "7.3.0" }, { "type": "panel", @@ -36,8 +27,8 @@ }, { "type": "panel", - "id": "table", - "name": "Table", + "id": "table-old", + "name": "Table (old)", "version": "" } ], @@ -58,6 +49,7 @@ "gnetId": null, "graphTooltip": 0, "id": null, + "iteration": 1604018016916, "links": [], "panels": [ { @@ -65,8 +57,15 @@ "bars": true, "dashLength": 10, "dashes": false, - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", "decimals": 2, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 3, "fillGradient": 0, "gridPos": { @@ -75,6 +74,7 @@ "x": 0, "y": 0 }, + "hiddenSeries": false, "hideTimeOverride": false, "id": 38, "interval": "", @@ -93,15 +93,25 @@ }, "lines": false, "linewidth": 1, - "nullPointMode": "null", + "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.0", "pointradius": 2, "points": false, "renderer": "flot", - "seriesOverrides": [], + "seriesOverrides": [ + { + "alias": "all", + "bars": false, + "color": "rgb(99, 99, 99)", + "fill": 1, + "lines": true, + "stack": false + } + ], "spaceLength": 10, "stack": true, "steppedLine": false, @@ -128,10 +138,11 @@ "type": "fill" } ], + "hide": false, "measurement": "chain.election", "orderByTime": "ASC", "policy": "default", - "query": "SELECT count(\"value\") FROM \"chain.election\" WHERE $timeFilter -10m GROUP BY time($__interval), \"miner\" fill(null)", + "query": "SELECT sum(\"value\") FROM \"chain.election\" WHERE $timeFilter GROUP BY time($blockInterval), \"miner\" fill(null)", "rawQuery": true, "refId": "A", "resultFormat": "time_series", @@ -156,13 +167,52 @@ ] ], "tags": [] + }, + { + "alias": "all", + "groupBy": [ + { + "params": [ + "$__interval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "hide": false, + "orderByTime": "ASC", + "policy": "defult", + "query": "SELECT TRIPLE_EXPONENTIAL_MOVING_AVERAGE(sum(\"value\"), 40) FROM \"chain.election\" WHERE $timeFilter -$blockInterval*40 AND time < now() - $blockInterval*3 GROUP BY time($blockInterval) fill(0)", + "rawQuery": true, + "refId": "B", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "mean" + } + ] + ], + "tags": [] } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Blocks Won", + "title": "Blocks and Win Counts", "tooltip": { "shared": true, "sort": 2, @@ -207,7 +257,14 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -216,6 +273,7 @@ "x": 0, "y": 9 }, + "hiddenSeries": false, "id": 22, "interval": "", "legend": { @@ -232,9 +290,10 @@ "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.0", "pointradius": 2, "points": false, "renderer": "flot", @@ -318,7 +377,13 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "format": "s", "gauge": { "maxValue": 100, @@ -350,7 +415,6 @@ "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, - "options": {}, "postfix": "", "postfixFontSize": "50%", "prefix": "", @@ -422,7 +486,13 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "format": "bytes", "gauge": { "maxValue": 100, @@ -454,7 +524,6 @@ "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, - "options": {}, "postfix": "", "postfixFontSize": "50%", "prefix": "", @@ -493,7 +562,7 @@ ], "orderByTime": "ASC", "policy": "default", - "query": "SELECT sum(\"value\") FROM \"chain.miner_power\" WHERE $timeFilter GROUP BY time(45s)", + "query": "SELECT sum(\"value\") FROM \"chain.power\" WHERE $timeFilter GROUP BY time(25s)", "rawQuery": true, "refId": "A", "resultFormat": "time_series", @@ -538,7 +607,13 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "format": "none", "gauge": { "maxValue": 100, @@ -570,7 +645,6 @@ "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, - "options": {}, "postfix": "", "postfixFontSize": "50%", "prefix": "", @@ -596,7 +670,7 @@ "groupBy": [ { "params": [ - "$interval" + "$blockInterval" ], "type": "time" } @@ -616,7 +690,7 @@ }, { "params": [], - "type": "sum" + "type": "count" } ] ], @@ -648,7 +722,13 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "format": "none", "gauge": { "maxValue": 100, @@ -680,7 +760,6 @@ "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, - "options": {}, "postfix": "", "postfixFontSize": "50%", "prefix": "", @@ -746,7 +825,13 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "format": "s", "gauge": { "maxValue": 100, @@ -778,7 +863,6 @@ "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, - "options": {}, "postfix": "", "postfixFontSize": "50%", "prefix": "", @@ -848,7 +932,13 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "format": "none", "gauge": { "maxValue": 100, @@ -880,7 +970,6 @@ "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, - "options": {}, "postfix": "", "postfixFontSize": "50%", "prefix": "", @@ -906,7 +995,7 @@ "groupBy": [ { "params": [ - "$__interval" + "$blockInterval" ], "type": "time" }, @@ -917,7 +1006,7 @@ "type": "fill" } ], - "measurement": "chain.message_gasprice", + "measurement": "chain.message_gaspremium", "orderByTime": "ASC", "policy": "default", "refId": "A", @@ -932,7 +1021,7 @@ }, { "params": [], - "type": "mean" + "type": "median" } ] ], @@ -942,7 +1031,7 @@ "thresholds": "", "timeFrom": null, "timeShift": null, - "title": "Avg Gas Price", + "title": "Avg Gas Premium", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -963,7 +1052,13 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "format": "decbytes", "gauge": { "maxValue": 100, @@ -995,7 +1090,6 @@ "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, - "options": {}, "postfix": "", "postfixFontSize": "50%", "prefix": "", @@ -1021,7 +1115,7 @@ "groupBy": [ { "params": [ - "$__interval" + "$blockInterval" ], "type": "time" }, @@ -1078,7 +1172,13 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "format": "bytes", "gauge": { "maxValue": 100, @@ -1110,7 +1210,6 @@ "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, - "options": {}, "postfix": "", "postfixFontSize": "50%", "prefix": "", @@ -1136,7 +1235,7 @@ "groupBy": [ { "params": [ - "$__interval" + "$blockInterval" ], "type": "time" }, @@ -1193,7 +1292,13 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "format": "none", "gauge": { "maxValue": 100, @@ -1225,7 +1330,6 @@ "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, - "options": {}, "pluginVersion": "6.4.2", "postfix": "", "postfixFontSize": "50%", @@ -1252,7 +1356,7 @@ "groupBy": [ { "params": [ - "$__interval" + "$blockInterval" ], "type": "time" }, @@ -1311,8 +1415,14 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", "decimals": 0, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "format": "dateTimeFromNow", "gauge": { "maxValue": 100, @@ -1344,7 +1454,6 @@ "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, - "options": {}, "postfix": "", "postfixFontSize": "50%", "prefix": "", @@ -1413,7 +1522,14 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -1422,6 +1538,7 @@ "x": 4, "y": 16 }, + "hiddenSeries": false, "id": 2, "legend": { "alignAsTable": true, @@ -1441,9 +1558,10 @@ "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.0", "pointradius": 2, "points": false, "renderer": "flot", @@ -1569,7 +1687,13 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "format": "none", "gauge": { "maxValue": 100, @@ -1601,7 +1725,6 @@ "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, - "options": {}, "postfix": "FIL", "postfixFontSize": "50%", "prefix": "", @@ -1660,7 +1783,13 @@ }, { "columns": [], - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "fontSize": "100%", "gridPos": { "h": 21, @@ -1669,7 +1798,6 @@ "y": 19 }, "id": 28, - "options": {}, "pageSize": null, "showHeader": true, "sort": { @@ -1679,12 +1807,14 @@ "styles": [ { "alias": "Time", + "align": "auto", "dateFormat": "YYYY-MM-DD HH:mm:ss", "pattern": "Time", "type": "hidden" }, { "alias": "", + "align": "auto", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -1701,6 +1831,7 @@ }, { "alias": "", + "align": "auto", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -1741,7 +1872,7 @@ "timeShift": null, "title": "Top Power Table", "transform": "table", - "type": "table" + "type": "table-old" }, { "aliasColors": {}, @@ -1749,7 +1880,14 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 5, "fillGradient": 0, "gridPos": { @@ -1758,8 +1896,9 @@ "x": 4, "y": 19 }, + "hiddenSeries": false, "id": 40, - "interval": "", + "interval": "300s", "legend": { "alignAsTable": true, "avg": false, @@ -1778,11 +1917,12 @@ "lines": true, "linewidth": 1, "links": [], - "nullPointMode": "null", + "nullPointMode": "connected", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": true, + "pluginVersion": "7.3.0", "pointradius": 2, "points": false, "renderer": "flot", @@ -1817,7 +1957,7 @@ "measurement": "chain.miner_power", "orderByTime": "ASC", "policy": "default", - "query": "SELECT mean(\"value\") FROM \"chain.miner_power\" WHERE $timeFilter GROUP BY time($__interval), \"miner\" fill(previous)", + "query": "SELECT mean(\"value\") FROM \"chain.miner_power\" WHERE $timeFilter GROUP BY time($__interval), \"miner\" fill(null)", "rawQuery": true, "refId": "A", "resultFormat": "time_series", @@ -1885,7 +2025,13 @@ }, { "columns": [], - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "fontSize": "100%", "gridPos": { "h": 21, @@ -1894,7 +2040,6 @@ "y": 19 }, "id": 18, - "options": {}, "pageSize": null, "showHeader": true, "sort": { @@ -1904,6 +2049,7 @@ "styles": [ { "alias": "Height", + "align": "auto", "dateFormat": "YYYY-MM-DD HH:mm:ss", "link": false, "mappingType": 1, @@ -1914,6 +2060,7 @@ }, { "alias": "Tipset", + "align": "auto", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -1930,6 +2077,7 @@ }, { "alias": "", + "align": "auto", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -1973,74 +2121,77 @@ "timeShift": null, "title": "Chain Table", "transform": "timeseries_to_columns", - "type": "table" + "type": "table-old" }, { "aliasColors": {}, "bars": false, - "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { - "h": 6, + "h": 7, "w": 12, "x": 4, "y": 27 }, - "id": 24, + "hiddenSeries": false, + "id": 50, "legend": { - "alignAsTable": false, "avg": false, "current": false, "max": false, "min": false, - "rightSide": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, - "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.0", "pointradius": 2, "points": false, "renderer": "flot", - "seriesOverrides": [ - { - "alias": "/.*/", - "color": "rgb(31, 120, 193)" - } - ], + "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { + "alias": "Total GasLimit", "groupBy": [ { "params": [ - "$__interval" + "$blockInterval" ], "type": "time" }, { "params": [ - "previous" + "null" ], "type": "fill" } ], - "measurement": "chain.pledge_collateral", + "measurement": "chain.gas_limit_total", "orderByTime": "ASC", "policy": "default", + "query": "SELECT max(\"value\") FROM \"chain.gas_limit_total\" WHERE $timeFilter GROUP BY time($blockInterval) fill(null)", + "rawQuery": false, "refId": "A", "resultFormat": "time_series", "select": [ @@ -2053,18 +2204,107 @@ }, { "params": [], - "type": "mean" + "type": "max" + } + ] + ], + "tags": [] + }, + { + "alias": "Total GasUsed", + "groupBy": [ + { + "params": [ + "$blockInterval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "measurement": "chain.gas_used_total", + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT max(\"value\") FROM \"chain.gas_used_total\" WHERE $timeFilter GROUP BY time($blockInterval) fill(null)", + "rawQuery": false, + "refId": "B", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "max" + } + ] + ], + "tags": [] + }, + { + "alias": "Total Unique GasLimit", + "groupBy": [ + { + "params": [ + "$blockInterval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "measurement": "chain.gas_limit_uniq_total", + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT max(\"value\") FROM \"chain.gas_limit_total\" WHERE $timeFilter GROUP BY time($blockInterval) fill(null)", + "rawQuery": false, + "refId": "C", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "max" } ] ], "tags": [] } ], - "thresholds": [], + "thresholds": [ + { + "colorMode": "custom", + "fill": false, + "fillColor": "rgba(50, 116, 217, 0.2)", + "line": true, + "lineColor": "rgba(31, 96, 196, 0.6)", + "op": "gt", + "value": 25000000000, + "yaxis": "left" + } + ], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Pledge Collateral", + "title": "Network Gas", "tooltip": { "shared": true, "sort": 0, @@ -2081,7 +2321,7 @@ "yaxes": [ { "format": "short", - "label": "FIL", + "label": null, "logBase": 1, "max": null, "min": null, @@ -2107,15 +2347,23 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { - "h": 7, + "h": 6, "w": 12, "x": 4, - "y": 33 + "y": 34 }, + "hiddenSeries": false, "id": 44, "legend": { "avg": false, @@ -2131,9 +2379,10 @@ "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.0", "pointradius": 2, "points": false, "renderer": "flot", @@ -2146,7 +2395,7 @@ "groupBy": [ { "params": [ - "$__interval" + "$blockInterval" ], "type": "time" }, @@ -2228,7 +2477,14 @@ "bars": true, "dashLength": 10, "dashes": false, - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -2237,6 +2493,7 @@ "x": 0, "y": 40 }, + "hiddenSeries": false, "id": 34, "legend": { "alignAsTable": true, @@ -2251,11 +2508,12 @@ }, "lines": false, "linewidth": 1, - "nullPointMode": "null", + "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.0", "pointradius": 2, "points": false, "renderer": "flot", @@ -2269,7 +2527,7 @@ "groupBy": [ { "params": [ - "$__interval" + "$blockInterval" ], "type": "time" }, @@ -2360,7 +2618,14 @@ "bars": true, "dashLength": 10, "dashes": false, - "datasource": "${DS_INFLUXDB}", + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -2369,6 +2634,7 @@ "x": 12, "y": 40 }, + "hiddenSeries": false, "id": 36, "legend": { "alignAsTable": true, @@ -2387,11 +2653,12 @@ }, "lines": false, "linewidth": 1, - "nullPointMode": "null", + "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.0", "pointradius": 2, "points": false, "renderer": "flot", @@ -2437,7 +2704,7 @@ "measurement": "chain.message_count", "orderByTime": "ASC", "policy": "default", - "query": "SELECT sum(\"value\") FROM \"chain.message_count\" WHERE $timeFilter GROUP BY time($__interval), \"method\", \"exitcode\", \"actor\" fill(null)", + "query": "SELECT sum(\"value\") FROM \"chain.message_count\" WHERE $timeFilter GROUP BY time($blockInterval), \"method\", \"exitcode\", \"actor\" fill(null)", "rawQuery": true, "refId": "A", "resultFormat": "time_series", @@ -2498,14 +2765,701 @@ "align": false, "alignLevel": null } - } - ], - "refresh": "45s", - "schemaVersion": 20, - "style": "dark", - "tags": [], - "templating": { - "list": [] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 49 + }, + "hiddenSeries": false, + "id": 48, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.0", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "Transfer Fee", + "groupBy": [ + { + "params": [ + "$blockInterval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "measurement": "chain.basefee", + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT mean(\"value\")*1000000 FROM \"chain.basefee\" WHERE $timeFilter GROUP BY time($blockInterval) fill(null)", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "mean" + } + ] + ], + "tags": [] + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Cost of simple transfer [FIL]", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 2, + "format": "sci", + "label": "", + "logBase": 10, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 49 + }, + "hiddenSeries": false, + "id": 46, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.0", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "Transfer Fee", + "groupBy": [ + { + "params": [ + "$blockInterval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "measurement": "chain.basefee", + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT mean(\"value\") FROM \"chain.basefee\" WHERE $timeFilter GROUP BY time($blockInterval) fill(null)", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "mean" + } + ] + ], + "tags": [] + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Base Fee[FIL]", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$network", + "decimals": null, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 57 + }, + "hiddenSeries": false, + "id": 51, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": false, + "max": true, + "min": true, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.0", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "Precommit Transfer Fee", + "groupBy": [ + { + "params": [ + "$blockInterval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "measurement": "chain.basefee", + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT mean(\"value\")*24000000 FROM \"chain.basefee\" WHERE $timeFilter GROUP BY time($blockInterval) fill(null)", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "mean" + } + ] + ], + "tags": [] + }, + { + "alias": "Commit Transfer Fee", + "groupBy": [ + { + "params": [ + "$blockInterval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "measurement": "chain.basefee", + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT mean(\"value\")*56000000 FROM \"chain.basefee\" WHERE $timeFilter GROUP BY time($blockInterval) fill(null)", + "rawQuery": true, + "refId": "B", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "mean" + } + ] + ], + "tags": [] + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Message Gas fees [FIL]", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 2, + "format": "none", + "label": null, + "logBase": 10, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$network", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 57 + }, + "hiddenSeries": false, + "id": 52, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.0", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "10 PIB PoSt Fee", + "groupBy": [ + { + "params": [ + "$blockInterval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "measurement": "chain.basefee", + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT mean(\"value\")*940000000 FROM \"chain.basefee\" WHERE $timeFilter GROUP BY time($blockInterval) fill(null)", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "mean" + } + ] + ], + "tags": [] + }, + { + "alias": "750TiB miner PoSt Fee", + "groupBy": [ + { + "params": [ + "$blockInterval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "measurement": "chain.basefee", + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT mean(\"value\")*580000000 FROM \"chain.basefee\" WHERE $timeFilter GROUP BY time($blockInterval) fill(null)", + "rawQuery": true, + "refId": "B", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "mean" + } + ] + ], + "tags": [] + }, + { + "alias": "10TiB miner PoSt Fee", + "groupBy": [ + { + "params": [ + "$blockInterval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "measurement": "chain.basefee", + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT mean(\"value\")*380000000 FROM \"chain.basefee\" WHERE $timeFilter GROUP BY time($blockInterval) fill(null)", + "rawQuery": true, + "refId": "C", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "mean" + } + ] + ], + "tags": [] + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Message Gas fees [FIL]", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 2, + "format": "none", + "label": null, + "logBase": 10, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": false, + "schemaVersion": 26, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "filecoin-ntwk-testnet", + "value": "filecoin-ntwk-testnet" + }, + "error": null, + "hide": 0, + "includeAll": false, + "label": "Network", + "multi": false, + "name": "network", + "options": [], + "query": "influxdb", + "queryValue": "", + "refresh": 1, + "regex": "/^filecoin-ntwk-/", + "skipUrlSync": false, + "type": "datasource" + }, + { + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "selected": false, + "text": "30s", + "value": "30s" + }, + "error": null, + "hide": 2, + "label": null, + "name": "blockInterval", + "options": [ + { + "selected": true, + "text": "30s", + "value": "30s" + } + ], + "query": "30s", + "refresh": 2, + "skipUrlSync": false, + "type": "interval" + } + ] }, "time": { "from": "now-30m", @@ -2515,6 +3469,7 @@ "refresh_intervals": [ "5s", "10s", + "25s", "30s", "45s", "1m", @@ -2527,7 +3482,7 @@ ] }, "timezone": "", - "title": "Chain", + "title": "Filecoin Chain Stats", "uid": "z6FtI92Zz", - "version": 9 + "version": 4 } diff --git a/cmd/lotus-stats/docker-compose.yml b/cmd/lotus-stats/docker-compose.yml index 03d573b94ab..b08a2157eea 100644 --- a/cmd/lotus-stats/docker-compose.yml +++ b/cmd/lotus-stats/docker-compose.yml @@ -4,10 +4,10 @@ services: influxdb: image: influxdb:latest container_name: influxdb + ports: + - "18086:8086" environment: - INFLUXDB_DB=lotus - ports: - - "8086:8086" volumes: - influxdb:/var/lib/influxdb @@ -15,7 +15,7 @@ services: image: grafana/grafana:latest container_name: grafana ports: - - "3000:3000" + - "13000:3000" links: - influxdb volumes: diff --git a/cmd/lotus-stats/env.stats b/cmd/lotus-stats/env.stats index a76e7554aa2..ad5ec1619ee 100644 --- a/cmd/lotus-stats/env.stats +++ b/cmd/lotus-stats/env.stats @@ -1,3 +1,3 @@ -export INFLUX_ADDR="http://localhost:8086" +export INFLUX_ADDR="http://localhost:18086" export INFLUX_USER="" export INFLUX_PASS="" diff --git a/cmd/lotus-stats/main.go b/cmd/lotus-stats/main.go index 3ca139b7dc0..b4c13ea8c26 100644 --- a/cmd/lotus-stats/main.go +++ b/cmd/lotus-stats/main.go @@ -2,71 +2,160 @@ package main import ( "context" - "flag" "os" + "github.com/filecoin-project/lotus/build" + lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/tools/stats" + logging "github.com/ipfs/go-log/v2" + "github.com/urfave/cli/v2" ) var log = logging.Logger("stats") -const ( - influxAddrEnvVar = "INFLUX_ADDR" - influxUserEnvVar = "INFLUX_USER" - influxPassEnvVar = "INFLUX_PASS" -) - func main() { - var repo string = "~/.lotus" - var database string = "lotus" - var reset bool = false - var nosync bool = false - var height int64 = 0 - var headlag int = 3 - - flag.StringVar(&repo, "repo", repo, "lotus repo path") - flag.StringVar(&database, "database", database, "influx database") - flag.Int64Var(&height, "height", height, "block height to start syncing from (0 will resume)") - flag.IntVar(&headlag, "head-lag", headlag, "number of head events to hold to protect against small reorgs") - flag.BoolVar(&reset, "reset", reset, "truncate database before starting stats gathering") - flag.BoolVar(&nosync, "nosync", nosync, "skip waiting for sync") - - flag.Parse() - - ctx := context.Background() - - influx, err := stats.InfluxClient(os.Getenv(influxAddrEnvVar), os.Getenv(influxUserEnvVar), os.Getenv(influxPassEnvVar)) - if err != nil { - log.Fatal(err) + local := []*cli.Command{ + runCmd, + versionCmd, } - if reset { - if err := stats.ResetDatabase(influx, database); err != nil { - log.Fatal(err) - } + app := &cli.App{ + Name: "lotus-stats", + Usage: "Collect basic information about a filecoin network using lotus", + Version: build.UserVersion(), + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "lotus-path", + EnvVars: []string{"LOTUS_PATH"}, + Value: "~/.lotus", // TODO: Consider XDG_DATA_HOME + }, + &cli.StringFlag{ + Name: "log-level", + EnvVars: []string{"LOTUS_STATS_LOG_LEVEL"}, + Value: "info", + }, + }, + Before: func(cctx *cli.Context) error { + return logging.SetLogLevel("stats", cctx.String("log-level")) + }, + Commands: local, + } + + if err := app.Run(os.Args); err != nil { + log.Errorw("exit in error", "err", err) + os.Exit(1) + return } +} + +var versionCmd = &cli.Command{ + Name: "version", + Usage: "Print version", + Action: func(cctx *cli.Context) error { + cli.VersionPrinter(cctx) + return nil + }, +} + +var runCmd = &cli.Command{ + Name: "run", + Usage: "", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "influx-database", + EnvVars: []string{"LOTUS_STATS_INFLUX_DATABASE"}, + Usage: "influx database", + Value: "", + }, + &cli.StringFlag{ + Name: "influx-hostname", + EnvVars: []string{"LOTUS_STATS_INFLUX_HOSTNAME"}, + Value: "http://localhost:8086", + Usage: "influx hostname", + }, + &cli.StringFlag{ + Name: "influx-username", + EnvVars: []string{"LOTUS_STATS_INFLUX_USERNAME"}, + Usage: "influx username", + Value: "", + }, + &cli.StringFlag{ + Name: "influx-password", + EnvVars: []string{"LOTUS_STATS_INFLUX_PASSWORD"}, + Usage: "influx password", + Value: "", + }, + &cli.IntFlag{ + Name: "height", + EnvVars: []string{"LOTUS_STATS_HEIGHT"}, + Usage: "tipset height to start processing from", + Value: 0, + }, + &cli.IntFlag{ + Name: "head-lag", + EnvVars: []string{"LOTUS_STATS_HEAD_LAG"}, + Usage: "the number of tipsets to delay processing on to smooth chain reorgs", + Value: int(build.MessageConfidence), + }, + &cli.BoolFlag{ + Name: "no-sync", + EnvVars: []string{"LOTUS_STATS_NO_SYNC"}, + Usage: "do not wait for chain sync to complete", + Value: false, + }, + }, + Action: func(cctx *cli.Context) error { + ctx := context.Background() + + resetFlag := cctx.Bool("reset") + noSyncFlag := cctx.Bool("no-sync") + heightFlag := cctx.Int("height") + headLagFlag := cctx.Int("head-lag") + + influxHostnameFlag := cctx.String("influx-hostname") + influxUsernameFlag := cctx.String("influx-username") + influxPasswordFlag := cctx.String("influx-password") + influxDatabaseFlag := cctx.String("influx-database") + + log.Infow("opening influx client", "hostname", influxHostnameFlag, "username", influxUsernameFlag, "database", influxDatabaseFlag) - if !reset && height == 0 { - h, err := stats.GetLastRecordedHeight(influx, database) + influx, err := stats.InfluxClient(influxHostnameFlag, influxUsernameFlag, influxPasswordFlag) if err != nil { - log.Info(err) + log.Fatal(err) } - height = h - } + if resetFlag { + if err := stats.ResetDatabase(influx, influxDatabaseFlag); err != nil { + log.Fatal(err) + } + } - api, closer, err := stats.GetFullNodeAPI(ctx, repo) - if err != nil { - log.Fatal(err) - } - defer closer() + height := int64(heightFlag) - if !nosync { - if err := stats.WaitForSyncComplete(ctx, api); err != nil { - log.Fatal(err) + if !resetFlag && height == 0 { + h, err := stats.GetLastRecordedHeight(influx, influxDatabaseFlag) + if err != nil { + log.Info(err) + } + + height = h } - } - stats.Collect(ctx, api, influx, database, height, headlag) + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + if !noSyncFlag { + if err := stats.WaitForSyncComplete(ctx, api); err != nil { + log.Fatal(err) + } + } + + stats.Collect(ctx, api, influx, influxDatabaseFlag, height, headLagFlag) + + return nil + }, } diff --git a/cmd/lotus-stats/setup.bash b/cmd/lotus-stats/setup.bash index e2812b93a61..6510c2fc6d7 100755 --- a/cmd/lotus-stats/setup.bash +++ b/cmd/lotus-stats/setup.bash @@ -1,10 +1,10 @@ #!/usr/bin/env bash -GRAFANA_HOST="localhost:3000" +GRAFANA_HOST="http://localhost:13000" curl -s -XPOST http://admin:admin@$GRAFANA_HOST/api/datasources -H 'Content-Type: text/json' --data-binary @- > /dev/null << EOF { - "name":"InfluxDB", + "name":"filecoin-ntwk-localstats", "type":"influxdb", "database":"lotus", "url": "http://influxdb:8086", diff --git a/cmd/lotus-storage-miner/actor.go b/cmd/lotus-storage-miner/actor.go index 0027ceb73b4..6ba57366376 100644 --- a/cmd/lotus-storage-miner/actor.go +++ b/cmd/lotus-storage-miner/actor.go @@ -21,7 +21,7 @@ import ( miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" - "github.com/filecoin-project/lotus/api/apibstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" @@ -29,7 +29,6 @@ import ( "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/lib/tablewriter" - "github.com/filecoin-project/lotus/storage" ) var actorCmd = &cli.Command{ @@ -56,8 +55,22 @@ var actorSetAddrsCmd = &cli.Command{ Usage: "set gas limit", Value: 0, }, + &cli.BoolFlag{ + Name: "unset", + Usage: "unset address", + Value: false, + }, }, Action: func(cctx *cli.Context) error { + args := cctx.Args().Slice() + unset := cctx.Bool("unset") + if len(args) == 0 && !unset { + return cli.ShowSubcommandHelp(cctx) + } + if len(args) > 0 && unset { + return fmt.Errorf("unset can only be used with no arguments") + } + nodeAPI, closer, err := lcli.GetStorageMinerAPI(cctx) if err != nil { return err @@ -73,7 +86,7 @@ var actorSetAddrsCmd = &cli.Command{ ctx := lcli.ReqContext(cctx) var addrs []abi.Multiaddrs - for _, a := range cctx.Args().Slice() { + for _, a := range args { maddr, err := ma.NewMultiaddr(a) if err != nil { return fmt.Errorf("failed to parse %q as a multiaddr: %w", a, err) @@ -310,7 +323,7 @@ var actorRepayDebtCmd = &cli.Command{ return err } - store := adt.WrapStore(ctx, cbor.NewCborStore(apibstore.NewAPIBlockstore(api))) + store := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(api))) mst, err := miner.Load(store, mact) if err != nil { @@ -377,12 +390,15 @@ var actorControlList = &cli.Command{ Name: "verbose", }, &cli.BoolFlag{ - Name: "color", - Value: true, + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", }, }, Action: func(cctx *cli.Context) error { - color.NoColor = !cctx.Bool("color") + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) if err != nil { @@ -416,9 +432,59 @@ var actorControlList = &cli.Command{ tablewriter.Col("balance"), ) - postAddr, err := storage.AddressFor(ctx, api, mi, storage.PoStAddr, types.FromFil(1)) + ac, err := nodeApi.ActorAddressConfig(ctx) if err != nil { - return xerrors.Errorf("getting address for post: %w", err) + return err + } + + commit := map[address.Address]struct{}{} + precommit := map[address.Address]struct{}{} + terminate := map[address.Address]struct{}{} + dealPublish := map[address.Address]struct{}{} + post := map[address.Address]struct{}{} + + for _, ca := range mi.ControlAddresses { + post[ca] = struct{}{} + } + + for _, ca := range ac.PreCommitControl { + ca, err := api.StateLookupID(ctx, ca, types.EmptyTSK) + if err != nil { + return err + } + + delete(post, ca) + precommit[ca] = struct{}{} + } + + for _, ca := range ac.CommitControl { + ca, err := api.StateLookupID(ctx, ca, types.EmptyTSK) + if err != nil { + return err + } + + delete(post, ca) + commit[ca] = struct{}{} + } + + for _, ca := range ac.TerminateControl { + ca, err := api.StateLookupID(ctx, ca, types.EmptyTSK) + if err != nil { + return err + } + + delete(post, ca) + terminate[ca] = struct{}{} + } + + for _, ca := range ac.DealPublishControl { + ca, err := api.StateLookupID(ctx, ca, types.EmptyTSK) + if err != nil { + return err + } + + delete(post, ca) + dealPublish[ca] = struct{}{} } printKey := func(name string, a address.Address) { @@ -453,9 +519,21 @@ var actorControlList = &cli.Command{ if a == mi.Worker { uses = append(uses, color.YellowString("other")) } - if a == postAddr { + if _, ok := post[a]; ok { uses = append(uses, color.GreenString("post")) } + if _, ok := precommit[a]; ok { + uses = append(uses, color.CyanString("precommit")) + } + if _, ok := commit[a]; ok { + uses = append(uses, color.BlueString("commit")) + } + if _, ok := terminate[a]; ok { + uses = append(uses, color.YellowString("terminate")) + } + if _, ok := dealPublish[a]; ok { + uses = append(uses, color.MagentaString("deals")) + } tw.Write(map[string]interface{}{ "name": name, @@ -591,8 +669,8 @@ var actorControlSet = &cli.Command{ var actorSetOwnerCmd = &cli.Command{ Name: "set-owner", - Usage: "Set owner address", - ArgsUsage: "[address]", + Usage: "Set owner address (this command should be invoked twice, first with the old owner as the senderAddress, and then with the new owner)", + ArgsUsage: "[newOwnerAddress senderAddress]", Flags: []cli.Flag{ &cli.BoolFlag{ Name: "really-do-it", @@ -606,8 +684,8 @@ var actorSetOwnerCmd = &cli.Command{ return nil } - if !cctx.Args().Present() { - return fmt.Errorf("must pass address of new owner address") + if cctx.NArg() != 2 { + return fmt.Errorf("must pass new owner address and sender address") } nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) @@ -629,53 +707,42 @@ var actorSetOwnerCmd = &cli.Command{ return err } - newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK) + newAddrId, err := api.StateLookupID(ctx, na, types.EmptyTSK) if err != nil { return err } - maddr, err := nodeApi.ActorAddress(ctx) + fa, err := address.NewFromString(cctx.Args().Get(1)) if err != nil { return err } - mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + fromAddrId, err := api.StateLookupID(ctx, fa, types.EmptyTSK) if err != nil { return err } - sp, err := actors.SerializeParams(&newAddr) + maddr, err := nodeApi.ActorAddress(ctx) if err != nil { - return xerrors.Errorf("serializing params: %w", err) + return err } - smsg, err := api.MpoolPushMessage(ctx, &types.Message{ - From: mi.Owner, - To: maddr, - Method: miner.Methods.ChangeOwnerAddress, - Value: big.Zero(), - Params: sp, - }, nil) + mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) if err != nil { - return xerrors.Errorf("mpool push: %w", err) + return err } - fmt.Println("Propose Message CID:", smsg.Cid()) - - // wait for it to get mined into a block - wait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) - if err != nil { - return err + if fromAddrId != mi.Owner && fromAddrId != newAddrId { + return xerrors.New("from address must either be the old owner or the new owner") } - // check it executed successfully - if wait.Receipt.ExitCode != 0 { - fmt.Println("Propose owner change failed!") - return err + sp, err := actors.SerializeParams(&newAddrId) + if err != nil { + return xerrors.Errorf("serializing params: %w", err) } - smsg, err = api.MpoolPushMessage(ctx, &types.Message{ - From: newAddr, + smsg, err := api.MpoolPushMessage(ctx, &types.Message{ + From: fromAddrId, To: maddr, Method: miner.Methods.ChangeOwnerAddress, Value: big.Zero(), @@ -685,20 +752,22 @@ var actorSetOwnerCmd = &cli.Command{ return xerrors.Errorf("mpool push: %w", err) } - fmt.Println("Approve Message CID:", smsg.Cid()) + fmt.Println("Message CID:", smsg.Cid()) // wait for it to get mined into a block - wait, err = api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) + wait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) if err != nil { return err } // check it executed successfully if wait.Receipt.ExitCode != 0 { - fmt.Println("Approve owner change failed!") + fmt.Println("owner change failed!") return err } + fmt.Println("message succeeded!") + return nil }, } diff --git a/cmd/lotus-storage-miner/actor_test.go b/cmd/lotus-storage-miner/actor_test.go index 949171699e0..073a8305988 100644 --- a/cmd/lotus-storage-miner/actor_test.go +++ b/cmd/lotus-storage-miner/actor_test.go @@ -7,23 +7,20 @@ import ( "fmt" "regexp" "strconv" - "sync/atomic" "testing" "time" - logging "github.com/ipfs/go-log/v2" + "github.com/filecoin-project/go-state-types/network" "github.com/stretchr/testify/require" "github.com/urfave/cli/v2" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/api/test" - "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/lotuslog" + "github.com/filecoin-project/lotus/itests/kit" "github.com/filecoin-project/lotus/node/repo" - builder "github.com/filecoin-project/lotus/node/test" ) func TestWorkerKeyChange(t *testing.T) { @@ -34,43 +31,24 @@ func TestWorkerKeyChange(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _ = logging.SetLogLevel("*", "INFO") - - policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048)) - policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) - policy.SetMinVerifiedDealSize(abi.NewStoragePower(256)) - - lotuslog.SetupLogLevels() - logging.SetLogLevel("miner", "ERROR") - logging.SetLogLevel("chainstore", "ERROR") - logging.SetLogLevel("chain", "ERROR") - logging.SetLogLevel("pubsub", "ERROR") - logging.SetLogLevel("sub", "ERROR") - logging.SetLogLevel("storageminer", "ERROR") + kit.QuietMiningLogs() blocktime := 1 * time.Millisecond - - n, sn := builder.MockSbBuilder(t, []test.FullNodeOpts{test.FullNodeWithUpgradeAt(1), test.FullNodeWithUpgradeAt(1)}, test.OneMiner) - - client1 := n[0] - client2 := n[1] - - // Connect the nodes. - addrinfo, err := client1.NetAddrsListen(ctx) - require.NoError(t, err) - err = client2.NetConnect(ctx, addrinfo) - require.NoError(t, err) + client1, client2, miner, ens := kit.EnsembleTwoOne(t, kit.MockProofs(), + kit.ConstructorOpts(kit.InstantaneousNetworkVersion(network.Version13)), + ) + ens.InterconnectAll().BeginMining(blocktime) output := bytes.NewBuffer(nil) run := func(cmd *cli.Command, args ...string) error { app := cli.NewApp() app.Metadata = map[string]interface{}{ "repoType": repo.StorageMiner, - "testnode-full": n[0], - "testnode-storage": sn[0], + "testnode-full": client1, + "testnode-storage": miner, } app.Writer = output - build.RunningNodeType = build.NodeMiner + api.RunningNodeType = api.NodeMiner fs := flag.NewFlagSet("", flag.ContinueOnError) for _, f := range cmd.Flags { @@ -84,29 +62,11 @@ func TestWorkerKeyChange(t *testing.T) { return cmd.Action(cctx) } - // setup miner - mine := int64(1) - done := make(chan struct{}) - go func() { - defer close(done) - for atomic.LoadInt64(&mine) == 1 { - time.Sleep(blocktime) - if err := sn[0].MineOne(ctx, test.MineNext); err != nil { - t.Error(err) - } - } - }() - defer func() { - atomic.AddInt64(&mine, -1) - fmt.Println("shutting down mining") - <-done - }() - newKey, err := client1.WalletNew(ctx, types.KTBLS) require.NoError(t, err) // Initialize wallet. - test.SendFunds(ctx, t, client1, newKey, abi.NewTokenAmount(0)) + kit.SendFunds(ctx, t, client1, newKey, abi.NewTokenAmount(0)) require.NoError(t, run(actorProposeChangeWorker, "--really-do-it", newKey.String())) @@ -126,14 +86,8 @@ func TestWorkerKeyChange(t *testing.T) { require.Error(t, run(actorConfirmChangeWorker, "--really-do-it", newKey.String())) output.Reset() - for { - head, err := client1.ChainHead(ctx) - require.NoError(t, err) - if head.Height() >= abi.ChainEpoch(targetEpoch) { - break - } - build.Clock.Sleep(10 * blocktime) - } + client1.WaitTillChain(ctx, kit.HeightAtLeast(abi.ChainEpoch(targetEpoch))) + require.NoError(t, run(actorConfirmChangeWorker, "--really-do-it", newKey.String())) output.Reset() @@ -142,23 +96,8 @@ func TestWorkerKeyChange(t *testing.T) { // Wait for finality (worker key switch). targetHeight := head.Height() + policy.ChainFinality - for { - head, err := client1.ChainHead(ctx) - require.NoError(t, err) - if head.Height() >= targetHeight { - break - } - build.Clock.Sleep(10 * blocktime) - } + client1.WaitTillChain(ctx, kit.HeightAtLeast(targetHeight)) // Make sure the other node can catch up. - for i := 0; i < 20; i++ { - head, err := client2.ChainHead(ctx) - require.NoError(t, err) - if head.Height() >= targetHeight { - return - } - build.Clock.Sleep(10 * blocktime) - } - t.Fatal("failed to reach target epoch on the second miner") + client2.WaitTillChain(ctx, kit.HeightAtLeast(targetHeight)) } diff --git a/cmd/lotus-storage-miner/allinfo_test.go b/cmd/lotus-storage-miner/allinfo_test.go index a458c024b55..5f30b4fec3d 100644 --- a/cmd/lotus-storage-miner/allinfo_test.go +++ b/cmd/lotus-storage-miner/allinfo_test.go @@ -1,22 +1,18 @@ package main import ( + "context" "flag" "testing" "time" - logging "github.com/ipfs/go-log/v2" + "github.com/filecoin-project/lotus/itests/kit" "github.com/stretchr/testify/require" "github.com/urfave/cli/v2" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/api/test" - "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/policy" - "github.com/filecoin-project/lotus/lib/lotuslog" "github.com/filecoin-project/lotus/node/repo" - builder "github.com/filecoin-project/lotus/node/test" ) func TestMinerAllInfo(t *testing.T) { @@ -24,20 +20,9 @@ func TestMinerAllInfo(t *testing.T) { t.Skip("skipping test in short mode") } - _ = logging.SetLogLevel("*", "INFO") - - policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048)) - policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) - policy.SetMinVerifiedDealSize(abi.NewStoragePower(256)) - _test = true - lotuslog.SetupLogLevels() - logging.SetLogLevel("miner", "ERROR") - logging.SetLogLevel("chainstore", "ERROR") - logging.SetLogLevel("chain", "ERROR") - logging.SetLogLevel("sub", "ERROR") - logging.SetLogLevel("storageminer", "ERROR") + kit.QuietMiningLogs() oldDelay := policy.GetPreCommitChallengeDelay() policy.SetPreCommitChallengeDelay(5) @@ -45,32 +30,29 @@ func TestMinerAllInfo(t *testing.T) { policy.SetPreCommitChallengeDelay(oldDelay) }) - var n []test.TestNode - var sn []test.TestStorageNode + client, miner, ens := kit.EnsembleMinimal(t) + ens.InterconnectAll().BeginMining(time.Second) run := func(t *testing.T) { app := cli.NewApp() app.Metadata = map[string]interface{}{ "repoType": repo.StorageMiner, - "testnode-full": n[0], - "testnode-storage": sn[0], + "testnode-full": client, + "testnode-storage": miner, } - build.RunningNodeType = build.NodeMiner + api.RunningNodeType = api.NodeMiner cctx := cli.NewContext(app, flag.NewFlagSet("", flag.ContinueOnError), nil) require.NoError(t, infoAllCmd.Action(cctx)) } - bp := func(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.StorageMiner) ([]test.TestNode, []test.TestStorageNode) { - n, sn = builder.Builder(t, fullOpts, storage) - - t.Run("pre-info-all", run) - - return n, sn - } + t.Run("pre-info-all", run) - test.TestDealFlow(t, bp, time.Second, false, false) + dh := kit.NewDealHarness(t, client, miner, miner) + deal, res, inPath := dh.MakeOnlineDeal(context.Background(), kit.MakeFullDealParams{Rseed: 6}) + outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, false) + kit.AssertFilesEqual(t, inPath, outPath) t.Run("post-info-all", run) } diff --git a/cmd/lotus-storage-miner/info.go b/cmd/lotus-storage-miner/info.go index 213d62e6e93..3941ce5632f 100644 --- a/cmd/lotus-storage-miner/info.go +++ b/cmd/lotus-storage-miner/info.go @@ -3,7 +3,12 @@ package main import ( "context" "fmt" + "math" + corebig "math/big" + "os" "sort" + "strings" + "text/tabwriter" "time" "github.com/fatih/color" @@ -12,19 +17,20 @@ import ( cbor "github.com/ipfs/go-ipld-cbor" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" sealing "github.com/filecoin-project/lotus/extern/storage-sealing" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/apibstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" - "github.com/filecoin-project/lotus/lib/blockstore" - "github.com/filecoin-project/lotus/lib/bufbstore" ) var infoCmd = &cli.Command{ @@ -43,8 +49,6 @@ var infoCmd = &cli.Command{ } func infoCmdAct(cctx *cli.Context) error { - color.NoColor = !cctx.Bool("color") - nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) if err != nil { return err @@ -59,7 +63,39 @@ func infoCmdAct(cctx *cli.Context) error { ctx := lcli.ReqContext(cctx) - maddr, err := getActorAddress(ctx, nodeApi, cctx.String("actor")) + fmt.Print("Chain: ") + + head, err := api.ChainHead(ctx) + if err != nil { + return err + } + + switch { + case time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs*3/2): // within 1.5 epochs + fmt.Printf("[%s]", color.GreenString("sync ok")) + case time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs*5): // within 5 epochs + fmt.Printf("[%s]", color.YellowString("sync slow (%s behind)", time.Now().Sub(time.Unix(int64(head.MinTimestamp()), 0)).Truncate(time.Second))) + default: + fmt.Printf("[%s]", color.RedString("sync behind! (%s behind)", time.Now().Sub(time.Unix(int64(head.MinTimestamp()), 0)).Truncate(time.Second))) + } + + basefee := head.MinTicketBlock().ParentBaseFee + gasCol := []color.Attribute{color.FgBlue} + switch { + case basefee.GreaterThan(big.NewInt(7000_000_000)): // 7 nFIL + gasCol = []color.Attribute{color.BgRed, color.FgBlack} + case basefee.GreaterThan(big.NewInt(3000_000_000)): // 3 nFIL + gasCol = []color.Attribute{color.FgRed} + case basefee.GreaterThan(big.NewInt(750_000_000)): // 750 uFIL + gasCol = []color.Attribute{color.FgYellow} + case basefee.GreaterThan(big.NewInt(100_000_000)): // 100 uFIL + gasCol = []color.Attribute{color.FgGreen} + } + fmt.Printf(" [basefee %s]", color.New(gasCol...).Sprint(types.FIL(basefee).Short())) + + fmt.Println() + + maddr, err := getActorAddress(ctx, cctx) if err != nil { return err } @@ -69,40 +105,43 @@ func infoCmdAct(cctx *cli.Context) error { return err } - tbs := bufbstore.NewTieredBstore(apibstore.NewAPIBlockstore(api), blockstore.NewTemporary()) + tbs := blockstore.NewTieredBstore(blockstore.NewAPIBlockstore(api), blockstore.NewMemory()) mas, err := miner.Load(adt.WrapStore(ctx, cbor.NewCborStore(tbs)), mact) if err != nil { return err } - fmt.Printf("Miner: %s\n", color.BlueString("%s", maddr)) - // Sector size mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) if err != nil { return err } - fmt.Printf("Sector Size: %s\n", types.SizeStr(types.NewInt(uint64(mi.SectorSize)))) + ssize := types.SizeStr(types.NewInt(uint64(mi.SectorSize))) + fmt.Printf("Miner: %s (%s sectors)\n", color.BlueString("%s", maddr), ssize) pow, err := api.StateMinerPower(ctx, maddr, types.EmptyTSK) if err != nil { return err } - rpercI := types.BigDiv(types.BigMul(pow.MinerPower.RawBytePower, types.NewInt(1000000)), pow.TotalPower.RawBytePower) - qpercI := types.BigDiv(types.BigMul(pow.MinerPower.QualityAdjPower, types.NewInt(1000000)), pow.TotalPower.QualityAdjPower) - - fmt.Printf("Byte Power: %s / %s (%0.4f%%)\n", - color.BlueString(types.SizeStr(pow.MinerPower.RawBytePower)), - types.SizeStr(pow.TotalPower.RawBytePower), - float64(rpercI.Int64())/10000) - - fmt.Printf("Actual Power: %s / %s (%0.4f%%)\n", + fmt.Printf("Power: %s / %s (%0.4f%%)\n", color.GreenString(types.DeciStr(pow.MinerPower.QualityAdjPower)), types.DeciStr(pow.TotalPower.QualityAdjPower), - float64(qpercI.Int64())/10000) + types.BigDivFloat( + types.BigMul(pow.MinerPower.QualityAdjPower, big.NewInt(100)), + pow.TotalPower.QualityAdjPower, + ), + ) + fmt.Printf("\tRaw: %s / %s (%0.4f%%)\n", + color.BlueString(types.SizeStr(pow.MinerPower.RawBytePower)), + types.SizeStr(pow.TotalPower.RawBytePower), + types.BigDivFloat( + types.BigMul(pow.MinerPower.RawBytePower, big.NewInt(100)), + pow.TotalPower.RawBytePower, + ), + ) secCounts, err := api.StateMinerSectorCount(ctx, maddr, types.EmptyTSK) if err != nil { return err @@ -116,7 +155,7 @@ func infoCmdAct(cctx *cli.Context) error { } else { var faultyPercentage float64 if secCounts.Live != 0 { - faultyPercentage = float64(10000*nfaults/secCounts.Live) / 100. + faultyPercentage = float64(100*nfaults) / float64(secCounts.Live) } fmt.Printf("\tProving: %s (%s Faulty, %.2f%%)\n", types.SizeStr(types.BigMul(types.NewInt(proving), types.NewInt(uint64(mi.SectorSize)))), @@ -127,16 +166,54 @@ func infoCmdAct(cctx *cli.Context) error { if !pow.HasMinPower { fmt.Print("Below minimum power threshold, no blocks will be won") } else { - expWinChance := float64(types.BigMul(qpercI, types.NewInt(build.BlocksPerEpoch)).Int64()) / 1000000 - if expWinChance > 0 { - if expWinChance > 1 { - expWinChance = 1 + + winRatio := new(corebig.Rat).SetFrac( + types.BigMul(pow.MinerPower.QualityAdjPower, types.NewInt(build.BlocksPerEpoch)).Int, + pow.TotalPower.QualityAdjPower.Int, + ) + + if winRatioFloat, _ := winRatio.Float64(); winRatioFloat > 0 { + + // if the corresponding poisson distribution isn't infinitely small then + // throw it into the mix as well, accounting for multi-wins + winRationWithPoissonFloat := -math.Expm1(-winRatioFloat) + winRationWithPoisson := new(corebig.Rat).SetFloat64(winRationWithPoissonFloat) + if winRationWithPoisson != nil { + winRatio = winRationWithPoisson + winRatioFloat = winRationWithPoissonFloat } - winRate := time.Duration(float64(time.Second*time.Duration(build.BlockDelaySecs)) / expWinChance) - winPerDay := float64(time.Hour*24) / float64(winRate) - fmt.Print("Expected block win rate: ") - color.Blue("%.4f/day (every %s)", winPerDay, winRate.Truncate(time.Second)) + weekly, _ := new(corebig.Rat).Mul( + winRatio, + new(corebig.Rat).SetInt64(7*builtin.EpochsInDay), + ).Float64() + + avgDuration, _ := new(corebig.Rat).Mul( + new(corebig.Rat).SetInt64(builtin.EpochDurationSeconds), + new(corebig.Rat).Inv(winRatio), + ).Float64() + + fmt.Print("Projected average block win rate: ") + color.Blue( + "%.02f/week (every %s)", + weekly, + (time.Second * time.Duration(avgDuration)).Truncate(time.Second).String(), + ) + + // Geometric distribution of P(Y < k) calculated as described in https://en.wikipedia.org/wiki/Geometric_distribution#Probability_Outcomes_Examples + // https://www.wolframalpha.com/input/?i=t+%3E+0%3B+p+%3E+0%3B+p+%3C+1%3B+c+%3E+0%3B+c+%3C1%3B+1-%281-p%29%5E%28t%29%3Dc%3B+solve+t + // t == how many dice-rolls (epochs) before win + // p == winRate == ( minerPower / netPower ) + // c == target probability of win ( 99.9% in this case ) + fmt.Print("Projected block win with ") + color.Green( + "99.9%% probability every %s", + (time.Second * time.Duration( + builtin.EpochDurationSeconds*math.Log(1-0.999)/ + math.Log(1-winRatioFloat), + )).Truncate(time.Second).String(), + ) + fmt.Println("(projections DO NOT account for future network and miner growth)") } } @@ -147,27 +224,93 @@ func infoCmdAct(cctx *cli.Context) error { return err } - var nactiveDeals, nVerifDeals, ndeals uint64 - var activeDealBytes, activeVerifDealBytes, dealBytes abi.PaddedPieceSize + type dealStat struct { + count, verifCount int + bytes, verifBytes uint64 + } + dsAdd := func(ds *dealStat, deal storagemarket.MinerDeal) { + ds.count++ + ds.bytes += uint64(deal.Proposal.PieceSize) + if deal.Proposal.VerifiedDeal { + ds.verifCount++ + ds.verifBytes += uint64(deal.Proposal.PieceSize) + } + } + + showDealStates := map[storagemarket.StorageDealStatus]struct{}{ + storagemarket.StorageDealActive: {}, + storagemarket.StorageDealTransferring: {}, + storagemarket.StorageDealStaged: {}, + storagemarket.StorageDealAwaitingPreCommit: {}, + storagemarket.StorageDealSealing: {}, + storagemarket.StorageDealPublish: {}, + storagemarket.StorageDealCheckForAcceptance: {}, + storagemarket.StorageDealPublishing: {}, + } + + var total dealStat + perState := map[storagemarket.StorageDealStatus]*dealStat{} for _, deal := range deals { - ndeals++ - dealBytes += deal.Proposal.PieceSize + if _, ok := showDealStates[deal.State]; !ok { + continue + } + if perState[deal.State] == nil { + perState[deal.State] = new(dealStat) + } - if deal.State == storagemarket.StorageDealActive { - nactiveDeals++ - activeDealBytes += deal.Proposal.PieceSize + dsAdd(&total, deal) + dsAdd(perState[deal.State], deal) + } - if deal.Proposal.VerifiedDeal { - nVerifDeals++ - activeVerifDealBytes += deal.Proposal.PieceSize - } + type wstr struct { + str string + status storagemarket.StorageDealStatus + } + sorted := make([]wstr, 0, len(perState)) + for status, stat := range perState { + st := strings.TrimPrefix(storagemarket.DealStates[status], "StorageDeal") + sorted = append(sorted, wstr{ + str: fmt.Sprintf(" %s:\t%d\t\t%s\t(Verified: %d\t%s)\n", st, stat.count, types.SizeStr(types.NewInt(stat.bytes)), stat.verifCount, types.SizeStr(types.NewInt(stat.verifBytes))), + status: status, + }, + ) + } + sort.Slice(sorted, func(i, j int) bool { + if sorted[i].status == storagemarket.StorageDealActive || sorted[j].status == storagemarket.StorageDealActive { + return sorted[i].status == storagemarket.StorageDealActive + } + return sorted[i].status > sorted[j].status + }) + + fmt.Printf("Storage Deals: %d, %s\n", total.count, types.SizeStr(types.NewInt(total.bytes))) + + tw := tabwriter.NewWriter(os.Stdout, 1, 1, 1, ' ', 0) + for _, e := range sorted { + _, _ = tw.Write([]byte(e.str)) + } + + _ = tw.Flush() + fmt.Println() + + retrievals, err := nodeApi.MarketListRetrievalDeals(ctx) + if err != nil { + return xerrors.Errorf("getting retrieval deal list: %w", err) + } + + var retrComplete dealStat + for _, retrieval := range retrievals { + if retrieval.Status == retrievalmarket.DealStatusCompleted { + retrComplete.count++ + retrComplete.bytes += retrieval.TotalSent } } - fmt.Printf("Deals: %d, %s\n", ndeals, types.SizeStr(types.NewInt(uint64(dealBytes)))) - fmt.Printf("\tActive: %d, %s (Verified: %d, %s)\n", nactiveDeals, types.SizeStr(types.NewInt(uint64(activeDealBytes))), nVerifDeals, types.SizeStr(types.NewInt(uint64(activeVerifDealBytes)))) + fmt.Printf("Retrieval Deals (complete): %d, %s\n", retrComplete.count, types.SizeStr(types.NewInt(retrComplete.bytes))) + fmt.Println() + spendable := big.Zero() + // NOTE: there's no need to unlock anything here. Funds only // vest on deadline boundaries, and they're unlocked by cron. lockedFunds, err := mas.LockedFunds() @@ -178,32 +321,46 @@ func infoCmdAct(cctx *cli.Context) error { if err != nil { return xerrors.Errorf("getting available balance: %w", err) } - fmt.Printf("Miner Balance: %s\n", color.YellowString("%s", types.FIL(mact.Balance))) - fmt.Printf("\tPreCommit: %s\n", types.FIL(lockedFunds.PreCommitDeposits)) - fmt.Printf("\tPledge: %s\n", types.FIL(lockedFunds.InitialPledgeRequirement)) - fmt.Printf("\tVesting: %s\n", types.FIL(lockedFunds.VestingFunds)) - color.Green("\tAvailable: %s", types.FIL(availBalance)) - wb, err := api.WalletBalance(ctx, mi.Worker) - if err != nil { - return xerrors.Errorf("getting worker balance: %w", err) - } - color.Cyan("Worker Balance: %s", types.FIL(wb)) + spendable = big.Add(spendable, availBalance) + + fmt.Printf("Miner Balance: %s\n", color.YellowString("%s", types.FIL(mact.Balance).Short())) + fmt.Printf(" PreCommit: %s\n", types.FIL(lockedFunds.PreCommitDeposits).Short()) + fmt.Printf(" Pledge: %s\n", types.FIL(lockedFunds.InitialPledgeRequirement).Short()) + fmt.Printf(" Vesting: %s\n", types.FIL(lockedFunds.VestingFunds).Short()) + colorTokenAmount(" Available: %s\n", availBalance) mb, err := api.StateMarketBalance(ctx, maddr, types.EmptyTSK) if err != nil { return xerrors.Errorf("getting market balance: %w", err) } - fmt.Printf("Market (Escrow): %s\n", types.FIL(mb.Escrow)) - fmt.Printf("Market (Locked): %s\n", types.FIL(mb.Locked)) + spendable = big.Add(spendable, big.Sub(mb.Escrow, mb.Locked)) - fmt.Println() + fmt.Printf("Market Balance: %s\n", types.FIL(mb.Escrow).Short()) + fmt.Printf(" Locked: %s\n", types.FIL(mb.Locked).Short()) + colorTokenAmount(" Available: %s\n", big.Sub(mb.Escrow, mb.Locked)) - sealdur, err := nodeApi.SectorGetExpectedSealDuration(ctx) + wb, err := api.WalletBalance(ctx, mi.Worker) if err != nil { - return err + return xerrors.Errorf("getting worker balance: %w", err) } + spendable = big.Add(spendable, wb) + color.Cyan("Worker Balance: %s", types.FIL(wb).Short()) + if len(mi.ControlAddresses) > 0 { + cbsum := big.Zero() + for _, ca := range mi.ControlAddresses { + b, err := api.WalletBalance(ctx, ca) + if err != nil { + return xerrors.Errorf("getting control address balance: %w", err) + } + cbsum = big.Add(cbsum, b) + } + spendable = big.Add(spendable, cbsum) + + fmt.Printf(" Control: %s\n", types.FIL(cbsum).Short()) + } + colorTokenAmount("Total Spendable: %s\n", spendable) - fmt.Printf("Expected Seal Duration: %s\n\n", sealdur) + fmt.Println() if !cctx.Bool("hide-sectors-info") { fmt.Println("Sectors:") @@ -232,28 +389,41 @@ var stateList = []stateMeta{ {col: color.FgBlue, state: sealing.Empty}, {col: color.FgBlue, state: sealing.WaitDeals}, + {col: color.FgBlue, state: sealing.AddPiece}, {col: color.FgRed, state: sealing.UndefinedSectorState}, {col: color.FgYellow, state: sealing.Packing}, + {col: color.FgYellow, state: sealing.GetTicket}, {col: color.FgYellow, state: sealing.PreCommit1}, {col: color.FgYellow, state: sealing.PreCommit2}, {col: color.FgYellow, state: sealing.PreCommitting}, {col: color.FgYellow, state: sealing.PreCommitWait}, + {col: color.FgYellow, state: sealing.SubmitPreCommitBatch}, + {col: color.FgYellow, state: sealing.PreCommitBatchWait}, {col: color.FgYellow, state: sealing.WaitSeed}, {col: color.FgYellow, state: sealing.Committing}, + {col: color.FgYellow, state: sealing.CommitFinalize}, {col: color.FgYellow, state: sealing.SubmitCommit}, {col: color.FgYellow, state: sealing.CommitWait}, + {col: color.FgYellow, state: sealing.SubmitCommitAggregate}, + {col: color.FgYellow, state: sealing.CommitAggregateWait}, {col: color.FgYellow, state: sealing.FinalizeSector}, + {col: color.FgCyan, state: sealing.Terminating}, + {col: color.FgCyan, state: sealing.TerminateWait}, + {col: color.FgCyan, state: sealing.TerminateFinality}, + {col: color.FgCyan, state: sealing.TerminateFailed}, {col: color.FgCyan, state: sealing.Removing}, {col: color.FgCyan, state: sealing.Removed}, {col: color.FgRed, state: sealing.FailedUnrecoverable}, + {col: color.FgRed, state: sealing.AddPieceFailed}, {col: color.FgRed, state: sealing.SealPreCommit1Failed}, {col: color.FgRed, state: sealing.SealPreCommit2Failed}, {col: color.FgRed, state: sealing.PreCommitFailed}, {col: color.FgRed, state: sealing.ComputeProofFailed}, {col: color.FgRed, state: sealing.CommitFailed}, + {col: color.FgRed, state: sealing.CommitFinalizeFailed}, {col: color.FgRed, state: sealing.PackingFailed}, {col: color.FgRed, state: sealing.FinalizeFailed}, {col: color.FgRed, state: sealing.Faulty}, @@ -274,22 +444,18 @@ func init() { } func sectorsInfo(ctx context.Context, napi api.StorageMiner) error { - sectors, err := napi.SectorsList(ctx) + summary, err := napi.SectorsSummary(ctx) if err != nil { return err } - buckets := map[sealing.SectorState]int{ - "Total": len(sectors), - } - for _, s := range sectors { - st, err := napi.SectorsStatus(ctx, s, false) - if err != nil { - return err - } - - buckets[sealing.SectorState(st.State)]++ + buckets := make(map[sealing.SectorState]int) + var total int + for s, c := range summary { + buckets[sealing.SectorState(s)] = c + total += c } + buckets["Total"] = total var sorted []stateMeta for state, i := range buckets { @@ -306,3 +472,13 @@ func sectorsInfo(ctx context.Context, napi api.StorageMiner) error { return nil } + +func colorTokenAmount(format string, amount abi.TokenAmount) { + if amount.GreaterThan(big.Zero()) { + color.Green(format, types.FIL(amount).Short()) + } else if amount.Equals(big.Zero()) { + color.Yellow(format, types.FIL(amount).Short()) + } else { + color.Red(format, types.FIL(amount).Short()) + } +} diff --git a/cmd/lotus-storage-miner/info_all.go b/cmd/lotus-storage-miner/info_all.go index 408f9b5c750..e5e08a56911 100644 --- a/cmd/lotus-storage-miner/info_all.go +++ b/cmd/lotus-storage-miner/info_all.go @@ -35,80 +35,80 @@ var infoAllCmd = &cli.Command{ fmt.Println("#: Version") if err := lcli.VersionCmd.Action(cctx); err != nil { - return err + fmt.Println("ERROR: ", err) } fmt.Println("\n#: Miner Info") if err := infoCmdAct(cctx); err != nil { - return err + fmt.Println("ERROR: ", err) } // Verbose info fmt.Println("\n#: Storage List") if err := storageListCmd.Action(cctx); err != nil { - return err + fmt.Println("ERROR: ", err) } fmt.Println("\n#: Worker List") if err := sealingWorkersCmd.Action(cctx); err != nil { - return err + fmt.Println("ERROR: ", err) } fmt.Println("\n#: PeerID") if err := lcli.NetId.Action(cctx); err != nil { - return err + fmt.Println("ERROR: ", err) } fmt.Println("\n#: Listen Addresses") if err := lcli.NetListen.Action(cctx); err != nil { - return err + fmt.Println("ERROR: ", err) } fmt.Println("\n#: Reachability") if err := lcli.NetReachability.Action(cctx); err != nil { - return err + fmt.Println("ERROR: ", err) } // Very Verbose info fmt.Println("\n#: Peers") if err := lcli.NetPeers.Action(cctx); err != nil { - return err + fmt.Println("ERROR: ", err) } fmt.Println("\n#: Sealing Jobs") if err := sealingJobsCmd.Action(cctx); err != nil { - return err + fmt.Println("ERROR: ", err) } fmt.Println("\n#: Sched Diag") if err := sealingSchedDiagCmd.Action(cctx); err != nil { - return err + fmt.Println("ERROR: ", err) } fmt.Println("\n#: Storage Ask") if err := getAskCmd.Action(cctx); err != nil { - return err + fmt.Println("ERROR: ", err) } fmt.Println("\n#: Storage Deals") if err := dealsListCmd.Action(cctx); err != nil { - return err + fmt.Println("ERROR: ", err) } fmt.Println("\n#: Retrieval Deals") if err := retrievalDealsListCmd.Action(cctx); err != nil { - return err + fmt.Println("ERROR: ", err) } fmt.Println("\n#: Sector List") if err := sectorsListCmd.Action(cctx); err != nil { - return err + fmt.Println("ERROR: ", err) } fmt.Println("\n#: Sector Refs") if err := sectorsRefsCmd.Action(cctx); err != nil { - return err + fmt.Println("ERROR: ", err) } // Very Very Verbose info @@ -116,7 +116,7 @@ var infoAllCmd = &cli.Command{ list, err := nodeApi.SectorsList(ctx) if err != nil { - return err + fmt.Println("ERROR: ", err) } sort.Slice(list, func(i, j int) bool { @@ -129,11 +129,11 @@ var infoAllCmd = &cli.Command{ fs := &flag.FlagSet{} for _, f := range sectorsStatusCmd.Flags { if err := f.Apply(fs); err != nil { - return err + fmt.Println("ERROR: ", err) } } if err := fs.Parse([]string{"--log", "--on-chain-info", fmt.Sprint(s)}); err != nil { - return err + fmt.Println("ERROR: ", err) } if err := sectorsStatusCmd.Action(cli.NewContext(cctx.App, fs, cctx)); err != nil { @@ -144,7 +144,7 @@ var infoAllCmd = &cli.Command{ fs = &flag.FlagSet{} if err := fs.Parse([]string{fmt.Sprint(s)}); err != nil { - return err + fmt.Println("ERROR: ", err) } if err := storageFindCmd.Action(cli.NewContext(cctx.App, fs, cctx)); err != nil { @@ -155,7 +155,7 @@ var infoAllCmd = &cli.Command{ if !_test { fmt.Println("\n#: Goroutines") if err := lcli.PprofGoroutines.Action(cctx); err != nil { - return err + fmt.Println("ERROR: ", err) } } diff --git a/cmd/lotus-storage-miner/init.go b/cmd/lotus-storage-miner/init.go index a7fcd722a9e..1cce52a41a1 100644 --- a/cmd/lotus-storage-miner/init.go +++ b/cmd/lotus-storage-miner/init.go @@ -8,6 +8,7 @@ import ( "encoding/json" "fmt" "io/ioutil" + "net/http" "os" "path/filepath" "strconv" @@ -37,6 +38,8 @@ import ( power2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/power" lapi "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v0api" + "github.com/filecoin-project/lotus/api/v1api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" @@ -118,7 +121,8 @@ var initCmd = &cli.Command{ }, }, Subcommands: []*cli.Command{ - initRestoreCmd, + restoreCmd, + serviceCmd, }, Action: func(cctx *cli.Context) error { log.Info("Initializing lotus miner") @@ -143,13 +147,17 @@ var initCmd = &cli.Command{ log.Info("Checking proof parameters") - if err := paramfetch.GetParams(ctx, build.ParametersJSON(), uint64(ssize)); err != nil { + if err := paramfetch.GetParams(ctx, build.ParametersJSON(), build.SrsJSON(), uint64(ssize)); err != nil { return xerrors.Errorf("fetching proof parameters: %w", err) } log.Info("Trying to connect to full node RPC") - api, closer, err := lcli.GetFullNodeAPI(cctx) // TODO: consider storing full node address in config + if err := checkV1ApiSupport(ctx, cctx); err != nil { + return err + } + + api, closer, err := lcli.GetFullNodeAPIV1(cctx) // TODO: consider storing full node address in config if err != nil { return err } @@ -158,7 +166,7 @@ var initCmd = &cli.Command{ log.Info("Checking full node sync status") if !cctx.Bool("genesis-miner") && !cctx.Bool("nosync") { - if err := lcli.SyncWait(ctx, api, false); err != nil { + if err := lcli.SyncWait(ctx, &v0api.WrapperV1Full{FullNode: api}, false); err != nil { return xerrors.Errorf("sync wait: %w", err) } } @@ -186,8 +194,8 @@ var initCmd = &cli.Command{ return err } - if !v.APIVersion.EqMajorMinor(build.FullAPIVersion) { - return xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", build.FullAPIVersion, v.APIVersion) + if !v.APIVersion.EqMajorMinor(lapi.FullAPIVersion1) { + return xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", lapi.FullAPIVersion1, v.APIVersion) } log.Info("Initializing repo") @@ -269,7 +277,7 @@ var initCmd = &cli.Command{ }, } -func migratePreSealMeta(ctx context.Context, api lapi.FullNode, metadata string, maddr address.Address, mds dtypes.MetadataDS) error { +func migratePreSealMeta(ctx context.Context, api v1api.FullNode, metadata string, maddr address.Address, mds dtypes.MetadataDS) error { metadata, err := homedir.Expand(metadata) if err != nil { return xerrors.Errorf("expanding preseal dir: %w", err) @@ -310,9 +318,10 @@ func migratePreSealMeta(ctx context.Context, api lapi.FullNode, metadata string, Size: abi.PaddedPieceSize(meta.SectorSize), PieceCID: commD, }, - DealInfo: &sealing.DealInfo{ - DealID: dealID, - DealSchedule: sealing.DealSchedule{ + DealInfo: &lapi.PieceDealInfo{ + DealID: dealID, + DealProposal: §or.Deal, + DealSchedule: lapi.DealSchedule{ StartEpoch: sector.Deal.StartEpoch, EndEpoch: sector.Deal.EndEpoch, }, @@ -378,7 +387,7 @@ func migratePreSealMeta(ctx context.Context, api lapi.FullNode, metadata string, return mds.Put(datastore.NewKey(modules.StorageCounterDSPrefix), buf[:size]) } -func findMarketDealID(ctx context.Context, api lapi.FullNode, deal market2.DealProposal) (abi.DealID, error) { +func findMarketDealID(ctx context.Context, api v1api.FullNode, deal market2.DealProposal) (abi.DealID, error) { // TODO: find a better way // (this is only used by genesis miners) @@ -397,7 +406,7 @@ func findMarketDealID(ctx context.Context, api lapi.FullNode, deal market2.DealP return 0, xerrors.New("deal not found") } -func storageMinerInit(ctx context.Context, cctx *cli.Context, api lapi.FullNode, r repo.Repo, ssize abi.SectorSize, gasPrice types.BigInt) error { +func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode, r repo.Repo, ssize abi.SectorSize, gasPrice types.BigInt) error { lr, err := r.Lock(repo.StorageMiner) if err != nil { return err @@ -416,7 +425,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api lapi.FullNode, return xerrors.Errorf("peer ID from private key: %w", err) } - mds, err := lr.Datastore("/metadata") + mds, err := lr.Datastore(context.TODO(), "/metadata") if err != nil { return err } @@ -433,11 +442,6 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api lapi.FullNode, return err } - spt, err := ffiwrapper.SealProofTypeFromSectorSize(ssize) - if err != nil { - return err - } - mid, err := address.IDFromAddress(a) if err != nil { return xerrors.Errorf("getting id address: %w", err) @@ -451,16 +455,22 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api lapi.FullNode, wsts := statestore.New(namespace.Wrap(mds, modules.WorkerCallsPrefix)) smsts := statestore.New(namespace.Wrap(mds, modules.ManagerWorkPrefix)) - smgr, err := sectorstorage.New(ctx, lr, stores.NewIndex(), &ffiwrapper.Config{ - SealProofType: spt, - }, sectorstorage.SealerConfig{ + si := stores.NewIndex() + + lstor, err := stores.NewLocal(ctx, lr, si, nil) + if err != nil { + return err + } + stor := stores.NewRemote(lstor, si, http.Header(sa), 10, &stores.DefaultPartialFileHandler{}) + + smgr, err := sectorstorage.New(ctx, lstor, stor, lr, si, sectorstorage.SealerConfig{ ParallelFetchLimit: 10, AllowAddPiece: true, AllowPreCommit1: true, AllowPreCommit2: true, AllowCommit: true, AllowUnseal: true, - }, nil, sa, wsts, smsts) + }, wsts, smsts) if err != nil { return err } @@ -568,7 +578,7 @@ func makeHostKey(lr repo.LockedRepo) (crypto.PrivKey, error) { return pk, nil } -func configureStorageMiner(ctx context.Context, api lapi.FullNode, addr address.Address, peerid peer.ID, gasPrice types.BigInt) error { +func configureStorageMiner(ctx context.Context, api v1api.FullNode, addr address.Address, peerid peer.ID, gasPrice types.BigInt) error { mi, err := api.StateMinerInfo(ctx, addr, types.EmptyTSK) if err != nil { return xerrors.Errorf("getWorkerAddr returned bad address: %w", err) @@ -594,7 +604,7 @@ func configureStorageMiner(ctx context.Context, api lapi.FullNode, addr address. } log.Info("Waiting for message: ", smsg.Cid()) - ret, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) + ret, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence, lapi.LookbackNoLimit, true) if err != nil { return err } @@ -606,7 +616,7 @@ func configureStorageMiner(ctx context.Context, api lapi.FullNode, addr address. return nil } -func createStorageMiner(ctx context.Context, api lapi.FullNode, peerid peer.ID, gasPrice types.BigInt, cctx *cli.Context) (address.Address, error) { +func createStorageMiner(ctx context.Context, api v1api.FullNode, peerid peer.ID, gasPrice types.BigInt, cctx *cli.Context) (address.Address, error) { var err error var owner address.Address if cctx.String("owner") != "" { @@ -648,7 +658,7 @@ func createStorageMiner(ctx context.Context, api lapi.FullNode, peerid peer.ID, log.Infof("Initializing worker account %s, message: %s", worker, signed.Cid()) log.Infof("Waiting for confirmation") - mw, err := api.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence) + mw, err := api.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, lapi.LookbackNoLimit, true) if err != nil { return address.Undef, xerrors.Errorf("waiting for worker init: %w", err) } @@ -657,9 +667,14 @@ func createStorageMiner(ctx context.Context, api lapi.FullNode, peerid peer.ID, } } - spt, err := ffiwrapper.SealProofTypeFromSectorSize(abi.SectorSize(ssize)) + nv, err := api.StateNetworkVersion(ctx, types.EmptyTSK) if err != nil { - return address.Undef, err + return address.Undef, xerrors.Errorf("getting network version: %w", err) + } + + spt, err := miner.SealProofTypeFromSectorSize(abi.SectorSize(ssize), nv) + if err != nil { + return address.Undef, xerrors.Errorf("getting seal proof type: %w", err) } params, err := actors.SerializeParams(&power2.CreateMinerParams{ @@ -701,7 +716,7 @@ func createStorageMiner(ctx context.Context, api lapi.FullNode, peerid peer.ID, log.Infof("Pushed CreateMiner message: %s", signed.Cid()) log.Infof("Waiting for confirmation") - mw, err := api.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence) + mw, err := api.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, lapi.LookbackNoLimit, true) if err != nil { return address.Undef, xerrors.Errorf("waiting for createMiner message: %w", err) } @@ -718,3 +733,26 @@ func createStorageMiner(ctx context.Context, api lapi.FullNode, peerid peer.ID, log.Infof("New miners address is: %s (%s)", retval.IDAddress, retval.RobustAddress) return retval.IDAddress, nil } + +// checkV1ApiSupport uses v0 api version to signal support for v1 API +// trying to query the v1 api on older lotus versions would get a 404, which can happen for any number of other reasons +func checkV1ApiSupport(ctx context.Context, cctx *cli.Context) error { + // check v0 api version to make sure it supports v1 api + api0, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + + v, err := api0.Version(ctx) + closer() + + if err != nil { + return err + } + + if !v.APIVersion.EqMajorMinor(lapi.FullAPIVersion0) { + return xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", lapi.FullAPIVersion0, v.APIVersion) + } + + return nil +} diff --git a/cmd/lotus-storage-miner/init_restore.go b/cmd/lotus-storage-miner/init_restore.go index 83a9ad87c53..3b4e2b26d2e 100644 --- a/cmd/lotus-storage-miner/init_restore.go +++ b/cmd/lotus-storage-miner/init_restore.go @@ -1,10 +1,13 @@ package main import ( + "context" "encoding/json" "io/ioutil" "os" + "github.com/filecoin-project/lotus/api/v0api" + "github.com/docker/go-units" "github.com/ipfs/go-datastore" "github.com/libp2p/go-libp2p-core/peer" @@ -17,7 +20,9 @@ import ( paramfetch "github.com/filecoin-project/go-paramfetch" "github.com/filecoin-project/go-state-types/big" + lapi "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/extern/sector-storage/stores" @@ -26,7 +31,7 @@ import ( "github.com/filecoin-project/lotus/node/repo" ) -var initRestoreCmd = &cli.Command{ +var restoreCmd = &cli.Command{ Name: "restore", Usage: "Initialize a lotus miner repo from a backup", Flags: []cli.Flag{ @@ -45,230 +50,248 @@ var initRestoreCmd = &cli.Command{ }, ArgsUsage: "[backupFile]", Action: func(cctx *cli.Context) error { - log.Info("Initializing lotus miner using a backup") - if cctx.Args().Len() != 1 { - return xerrors.Errorf("expected 1 argument") - } - - log.Info("Trying to connect to full node RPC") - - api, closer, err := lcli.GetFullNodeAPI(cctx) // TODO: consider storing full node address in config - if err != nil { - return err - } - defer closer() - - log.Info("Checking full node version") - ctx := lcli.ReqContext(cctx) + log.Info("Initializing lotus miner using a backup") - v, err := api.Version(ctx) - if err != nil { - return err - } - - if !v.APIVersion.EqMajorMinor(build.FullAPIVersion) { - return xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", build.FullAPIVersion, v.APIVersion) - } + var storageCfg *stores.StorageConfig + if cctx.IsSet("storage-config") { + cf, err := homedir.Expand(cctx.String("storage-config")) + if err != nil { + return xerrors.Errorf("expanding storage config path: %w", err) + } - if !cctx.Bool("nosync") { - if err := lcli.SyncWait(ctx, api, false); err != nil { - return xerrors.Errorf("sync wait: %w", err) + cfb, err := ioutil.ReadFile(cf) + if err != nil { + return xerrors.Errorf("reading storage config: %w", err) } - } - bf, err := homedir.Expand(cctx.Args().First()) - if err != nil { - return xerrors.Errorf("expand backup file path: %w", err) + storageCfg = &stores.StorageConfig{} + err = json.Unmarshal(cfb, storageCfg) + if err != nil { + return xerrors.Errorf("cannot unmarshal json for storage config: %w", err) + } } - st, err := os.Stat(bf) - if err != nil { - return xerrors.Errorf("stat backup file (%s): %w", bf, err) - } + if err := restore(ctx, cctx, storageCfg, nil, func(api lapi.FullNode, maddr address.Address, peerid peer.ID, mi miner.MinerInfo) error { + log.Info("Checking proof parameters") - f, err := os.Open(bf) - if err != nil { - return xerrors.Errorf("opening backup file: %w", err) - } - defer f.Close() // nolint:errcheck + if err := paramfetch.GetParams(ctx, build.ParametersJSON(), build.SrsJSON(), uint64(mi.SectorSize)); err != nil { + return xerrors.Errorf("fetching proof parameters: %w", err) + } - log.Info("Checking if repo exists") + log.Info("Configuring miner actor") - repoPath := cctx.String(FlagMinerRepo) - r, err := repo.NewFS(repoPath) - if err != nil { - return err - } + if err := configureStorageMiner(ctx, api, maddr, peerid, big.Zero()); err != nil { + return err + } - ok, err := r.Exists() - if err != nil { + return nil + }); err != nil { return err } - if ok { - return xerrors.Errorf("repo at '%s' is already initialized", cctx.String(FlagMinerRepo)) - } - log.Info("Initializing repo") + return nil + }, +} - if err := r.Init(repo.StorageMiner); err != nil { - return err - } +func restore(ctx context.Context, cctx *cli.Context, strConfig *stores.StorageConfig, manageConfig func(*config.StorageMiner) error, after func(api lapi.FullNode, addr address.Address, peerid peer.ID, mi miner.MinerInfo) error) error { + if cctx.Args().Len() != 1 { + return xerrors.Errorf("expected 1 argument") + } - lr, err := r.Lock(repo.StorageMiner) - if err != nil { - return err - } - defer lr.Close() //nolint:errcheck + log.Info("Trying to connect to full node RPC") - if cctx.IsSet("config") { - log.Info("Restoring config") + api, closer, err := lcli.GetFullNodeAPIV1(cctx) // TODO: consider storing full node address in config + if err != nil { + return err + } + defer closer() - cf, err := homedir.Expand(cctx.String("config")) - if err != nil { - return xerrors.Errorf("expanding config path: %w", err) - } + log.Info("Checking full node version") - _, err = os.Stat(cf) - if err != nil { - return xerrors.Errorf("stat config file (%s): %w", cf, err) - } + v, err := api.Version(ctx) + if err != nil { + return err + } - var cerr error - err = lr.SetConfig(func(raw interface{}) { - rcfg, ok := raw.(*config.StorageMiner) - if !ok { - cerr = xerrors.New("expected miner config") - return - } - - ff, err := config.FromFile(cf, rcfg) - if err != nil { - cerr = xerrors.Errorf("loading config: %w", err) - return - } - - *rcfg = *ff.(*config.StorageMiner) - }) - if cerr != nil { - return cerr - } - if err != nil { - return xerrors.Errorf("setting config: %w", err) - } + if !v.APIVersion.EqMajorMinor(lapi.FullAPIVersion1) { + return xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", lapi.FullAPIVersion1, v.APIVersion) + } - } else { - log.Warn("--config NOT SET, WILL USE DEFAULT VALUES") + if !cctx.Bool("nosync") { + if err := lcli.SyncWait(ctx, &v0api.WrapperV1Full{FullNode: api}, false); err != nil { + return xerrors.Errorf("sync wait: %w", err) + } + } + + bf, err := homedir.Expand(cctx.Args().First()) + if err != nil { + return xerrors.Errorf("expand backup file path: %w", err) + } + + st, err := os.Stat(bf) + if err != nil { + return xerrors.Errorf("stat backup file (%s): %w", bf, err) + } + + f, err := os.Open(bf) + if err != nil { + return xerrors.Errorf("opening backup file: %w", err) + } + defer f.Close() // nolint:errcheck + + log.Info("Checking if repo exists") + + repoPath := cctx.String(FlagMinerRepo) + r, err := repo.NewFS(repoPath) + if err != nil { + return err + } + + ok, err := r.Exists() + if err != nil { + return err + } + if ok { + return xerrors.Errorf("repo at '%s' is already initialized", cctx.String(FlagMinerRepo)) + } + + log.Info("Initializing repo") + + if err := r.Init(repo.StorageMiner); err != nil { + return err + } + + lr, err := r.Lock(repo.StorageMiner) + if err != nil { + return err + } + defer lr.Close() //nolint:errcheck + + if cctx.IsSet("config") { + log.Info("Restoring config") + + cf, err := homedir.Expand(cctx.String("config")) + if err != nil { + return xerrors.Errorf("expanding config path: %w", err) } - if cctx.IsSet("storage-config") { - log.Info("Restoring storage path config") + _, err = os.Stat(cf) + if err != nil { + return xerrors.Errorf("stat config file (%s): %w", cf, err) + } - cf, err := homedir.Expand(cctx.String("storage-config")) - if err != nil { - return xerrors.Errorf("expanding storage config path: %w", err) + var cerr error + err = lr.SetConfig(func(raw interface{}) { + rcfg, ok := raw.(*config.StorageMiner) + if !ok { + cerr = xerrors.New("expected miner config") + return } - cfb, err := ioutil.ReadFile(cf) + ff, err := config.FromFile(cf, rcfg) if err != nil { - return xerrors.Errorf("reading storage config: %w", err) + cerr = xerrors.Errorf("loading config: %w", err) + return } - var cerr error - err = lr.SetStorage(func(scfg *stores.StorageConfig) { - cerr = json.Unmarshal(cfb, scfg) - }) - if cerr != nil { - return xerrors.Errorf("unmarshalling storage config: %w", cerr) - } - if err != nil { - return xerrors.Errorf("setting storage config: %w", err) + *rcfg = *ff.(*config.StorageMiner) + if manageConfig != nil { + cerr = manageConfig(rcfg) } - } else { - log.Warn("--storage-config NOT SET. NO SECTOR PATHS WILL BE CONFIGURED") + }) + if cerr != nil { + return cerr } - - log.Info("Restoring metadata backup") - - mds, err := lr.Datastore("/metadata") if err != nil { - return err + return xerrors.Errorf("setting config: %w", err) } - bar := pb.New64(st.Size()) - br := bar.NewProxyReader(f) - bar.ShowTimeLeft = true - bar.ShowPercent = true - bar.ShowSpeed = true - bar.Units = pb.U_BYTES + } else { + log.Warn("--config NOT SET, WILL USE DEFAULT VALUES") + } - bar.Start() - err = backupds.RestoreInto(br, mds) - bar.Finish() + if strConfig != nil { + log.Info("Restoring storage path config") + err = lr.SetStorage(func(scfg *stores.StorageConfig) { + *scfg = *strConfig + }) if err != nil { - return xerrors.Errorf("restoring metadata: %w", err) + return xerrors.Errorf("setting storage config: %w", err) } + } else { + log.Warn("--storage-config NOT SET. NO SECTOR PATHS WILL BE CONFIGURED") + } - log.Info("Checking actor metadata") + log.Info("Restoring metadata backup") - abytes, err := mds.Get(datastore.NewKey("miner-address")) - if err != nil { - return xerrors.Errorf("getting actor address from metadata datastore: %w", err) - } + mds, err := lr.Datastore(context.TODO(), "/metadata") + if err != nil { + return err + } - maddr, err := address.NewFromBytes(abytes) - if err != nil { - return xerrors.Errorf("parsing actor address: %w", err) - } + bar := pb.New64(st.Size()) + br := bar.NewProxyReader(f) + bar.ShowTimeLeft = true + bar.ShowPercent = true + bar.ShowSpeed = true + bar.Units = pb.U_BYTES - log.Info("ACTOR ADDRESS: ", maddr.String()) + bar.Start() + err = backupds.RestoreInto(br, mds) + bar.Finish() - mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting miner info: %w", err) - } + if err != nil { + return xerrors.Errorf("restoring metadata: %w", err) + } - log.Info("SECTOR SIZE: ", units.BytesSize(float64(mi.SectorSize))) + log.Info("Checking actor metadata") - wk, err := api.StateAccountKey(ctx, mi.Worker, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("resolving worker key: %w", err) - } + abytes, err := mds.Get(datastore.NewKey("miner-address")) + if err != nil { + return xerrors.Errorf("getting actor address from metadata datastore: %w", err) + } - has, err := api.WalletHas(ctx, wk) - if err != nil { - return xerrors.Errorf("checking worker address: %w", err) - } + maddr, err := address.NewFromBytes(abytes) + if err != nil { + return xerrors.Errorf("parsing actor address: %w", err) + } - if !has { - return xerrors.Errorf("worker address %s for miner actor %s not present in full node wallet", mi.Worker, maddr) - } + log.Info("ACTOR ADDRESS: ", maddr.String()) - log.Info("Checking proof parameters") + mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting miner info: %w", err) + } - if err := paramfetch.GetParams(ctx, build.ParametersJSON(), uint64(mi.SectorSize)); err != nil { - return xerrors.Errorf("fetching proof parameters: %w", err) - } + log.Info("SECTOR SIZE: ", units.BytesSize(float64(mi.SectorSize))) - log.Info("Initializing libp2p identity") + wk, err := api.StateAccountKey(ctx, mi.Worker, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("resolving worker key: %w", err) + } - p2pSk, err := makeHostKey(lr) - if err != nil { - return xerrors.Errorf("make host key: %w", err) - } + has, err := api.WalletHas(ctx, wk) + if err != nil { + return xerrors.Errorf("checking worker address: %w", err) + } - peerid, err := peer.IDFromPrivateKey(p2pSk) - if err != nil { - return xerrors.Errorf("peer ID from private key: %w", err) - } + if !has { + return xerrors.Errorf("worker address %s for miner actor %s not present in full node wallet", mi.Worker, maddr) + } - log.Info("Configuring miner actor") + log.Info("Initializing libp2p identity") - if err := configureStorageMiner(ctx, api, maddr, peerid, big.Zero()); err != nil { - return err - } + p2pSk, err := makeHostKey(lr) + if err != nil { + return xerrors.Errorf("make host key: %w", err) + } - return nil - }, + peerid, err := peer.IDFromPrivateKey(p2pSk) + if err != nil { + return xerrors.Errorf("peer ID from private key: %w", err) + } + + return after(api, maddr, peerid, mi) } diff --git a/cmd/lotus-storage-miner/init_service.go b/cmd/lotus-storage-miner/init_service.go new file mode 100644 index 00000000000..ad803a83040 --- /dev/null +++ b/cmd/lotus-storage-miner/init_service.go @@ -0,0 +1,152 @@ +package main + +import ( + "context" + "strings" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + lapi "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/client" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + lcli "github.com/filecoin-project/lotus/cli" + cliutil "github.com/filecoin-project/lotus/cli/util" + "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/node/config" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" +) + +const ( + MarketsService = "markets" +) + +var serviceCmd = &cli.Command{ + Name: "service", + Usage: "Initialize a lotus miner sub-service", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "config", + Usage: "config file (config.toml)", + Required: true, + }, + &cli.BoolFlag{ + Name: "nosync", + Usage: "don't check full-node sync status", + }, + &cli.StringSliceFlag{ + Name: "type", + Usage: "type of service to be enabled", + }, + &cli.StringFlag{ + Name: "api-sealer", + Usage: "sealer API info (lotus-miner auth api-info --perm=admin)", + }, + &cli.StringFlag{ + Name: "api-sector-index", + Usage: "sector Index API info (lotus-miner auth api-info --perm=admin)", + }, + }, + ArgsUsage: "[backupFile]", + Action: func(cctx *cli.Context) error { + ctx := lcli.ReqContext(cctx) + log.Info("Initializing lotus miner service") + + es := EnabledServices(cctx.StringSlice("type")) + + if len(es) == 0 { + return xerrors.Errorf("at least one module must be enabled") + } + + // we should remove this as soon as we have more service types and not just `markets` + if !es.Contains(MarketsService) { + return xerrors.Errorf("markets module must be enabled") + } + + if !cctx.IsSet("api-sealer") { + return xerrors.Errorf("--api-sealer is required without the sealer module enabled") + } + if !cctx.IsSet("api-sector-index") { + return xerrors.Errorf("--api-sector-index is required without the sector storage module enabled") + } + + if err := restore(ctx, cctx, &stores.StorageConfig{}, func(cfg *config.StorageMiner) error { + cfg.Subsystems.EnableMarkets = es.Contains(MarketsService) + cfg.Subsystems.EnableMining = false + cfg.Subsystems.EnableSealing = false + cfg.Subsystems.EnableSectorStorage = false + + if !cfg.Subsystems.EnableSealing { + ai, err := checkApiInfo(ctx, cctx.String("api-sealer")) + if err != nil { + return xerrors.Errorf("checking sealer API: %w", err) + } + cfg.Subsystems.SealerApiInfo = ai + } + + if !cfg.Subsystems.EnableSectorStorage { + ai, err := checkApiInfo(ctx, cctx.String("api-sector-index")) + if err != nil { + return xerrors.Errorf("checking sector index API: %w", err) + } + cfg.Subsystems.SectorIndexApiInfo = ai + } + + return nil + }, func(api lapi.FullNode, maddr address.Address, peerid peer.ID, mi miner.MinerInfo) error { + if es.Contains(MarketsService) { + log.Info("Configuring miner actor") + + if err := configureStorageMiner(ctx, api, maddr, peerid, big.Zero()); err != nil { + return err + } + } + + return nil + }); err != nil { + return err + } + + return nil + }, +} + +type EnabledServices []string + +func (es EnabledServices) Contains(name string) bool { + for _, s := range es { + if s == name { + return true + } + } + return false +} + +func checkApiInfo(ctx context.Context, ai string) (string, error) { + ai = strings.TrimPrefix(strings.TrimSpace(ai), "MINER_API_INFO=") + info := cliutil.ParseApiInfo(ai) + addr, err := info.DialArgs("v0") + if err != nil { + return "", xerrors.Errorf("could not get DialArgs: %w", err) + } + + log.Infof("Checking api version of %s", addr) + + api, closer, err := client.NewStorageMinerRPCV0(ctx, addr, info.AuthHeader()) + if err != nil { + return "", err + } + defer closer() + + v, err := api.Version(ctx) + if err != nil { + return "", xerrors.Errorf("checking version: %w", err) + } + + if !v.APIVersion.EqMajorMinor(lapi.MinerAPIVersion0) { + return "", xerrors.Errorf("remote service API version didn't match (expected %s, remote %s)", lapi.MinerAPIVersion0, v.APIVersion) + } + + return ai, nil +} diff --git a/cmd/lotus-storage-miner/main.go b/cmd/lotus-storage-miner/main.go index 671f75cf0fc..c555531d66a 100644 --- a/cmd/lotus-storage-miner/main.go +++ b/cmd/lotus-storage-miner/main.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "github.com/fatih/color" logging "github.com/ipfs/go-log/v2" "github.com/urfave/cli/v2" "go.opencensus.io/trace" @@ -26,7 +27,7 @@ const FlagMinerRepo = "miner-repo" const FlagMinerRepoDeprecation = "storagerepo" func main() { - build.RunningNodeType = build.NodeMiner + api.RunningNodeType = api.NodeMiner lotuslog.SetupLogLevels() @@ -61,9 +62,14 @@ func main() { trace.UnregisterExporter(jaeger) jaeger = tracing.SetupJaegerTracing("lotus/" + cmd.Name) + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } + if originBefore != nil { return originBefore(cctx) } + return nil } } @@ -81,7 +87,10 @@ func main() { Aliases: []string{"a"}, }, &cli.BoolFlag{ - Name: "color", + // examined in the Before above + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", }, &cli.StringFlag{ Name: "repo", @@ -106,15 +115,21 @@ func main() { lcli.RunApp(app) } -func getActorAddress(ctx context.Context, nodeAPI api.StorageMiner, overrideMaddr string) (maddr address.Address, err error) { - if overrideMaddr != "" { - maddr, err = address.NewFromString(overrideMaddr) +func getActorAddress(ctx context.Context, cctx *cli.Context) (maddr address.Address, err error) { + if cctx.IsSet("actor") { + maddr, err = address.NewFromString(cctx.String("actor")) if err != nil { return maddr, err } return } + nodeAPI, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return address.Undef, err + } + defer closer() + maddr, err = nodeAPI.ActorAddress(ctx) if err != nil { return maddr, xerrors.Errorf("getting actor address: %w", err) diff --git a/cmd/lotus-storage-miner/market.go b/cmd/lotus-storage-miner/market.go index be4a529e982..b216d24fcd4 100644 --- a/cmd/lotus-storage-miner/market.go +++ b/cmd/lotus-storage-miner/market.go @@ -2,6 +2,7 @@ package main import ( "bufio" + "context" "errors" "fmt" "io" @@ -14,7 +15,7 @@ import ( tm "github.com/buger/goterm" "github.com/docker/go-units" - datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/fatih/color" "github.com/ipfs/go-cid" "github.com/ipfs/go-cidutil/cidenc" "github.com/libp2p/go-libp2p-core/peer" @@ -22,6 +23,8 @@ import ( "github.com/urfave/cli/v2" "golang.org/x/xerrors" + cborutil "github.com/filecoin-project/go-cbor-util" + datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" @@ -113,6 +116,16 @@ var storageDealSelectionResetCmd = &cli.Command{ return err } + err = smapi.DealsSetConsiderVerifiedStorageDeals(lcli.DaemonContext(cctx), true) + if err != nil { + return err + } + + err = smapi.DealsSetConsiderUnverifiedStorageDeals(lcli.DaemonContext(cctx), true) + if err != nil { + return err + } + return nil }, } @@ -127,6 +140,12 @@ var storageDealSelectionRejectCmd = &cli.Command{ &cli.BoolFlag{ Name: "offline", }, + &cli.BoolFlag{ + Name: "verified", + }, + &cli.BoolFlag{ + Name: "unverified", + }, }, Action: func(cctx *cli.Context) error { smapi, closer, err := lcli.GetStorageMinerAPI(cctx) @@ -149,6 +168,20 @@ var storageDealSelectionRejectCmd = &cli.Command{ } } + if cctx.Bool("verified") { + err = smapi.DealsSetConsiderVerifiedStorageDeals(lcli.DaemonContext(cctx), false) + if err != nil { + return err + } + } + + if cctx.Bool("unverified") { + err = smapi.DealsSetConsiderUnverifiedStorageDeals(lcli.DaemonContext(cctx), false) + if err != nil { + return err + } + } + return nil }, } @@ -310,6 +343,7 @@ var storageDealsCmd = &cli.Command{ getBlocklistCmd, resetBlocklistCmd, setSealDurationCmd, + dealsPendingPublish, }, } @@ -420,7 +454,7 @@ func outputStorageDeals(out io.Writer, deals []storagemarket.MinerDeal, verbose w := tabwriter.NewWriter(out, 2, 4, 2, ' ', 0) if verbose { - _, _ = fmt.Fprintf(w, "Creation\tProposalCid\tDealId\tState\tClient\tSize\tPrice\tDuration\tMessage\n") + _, _ = fmt.Fprintf(w, "Creation\tVerified\tProposalCid\tDealId\tState\tClient\tSize\tPrice\tDuration\tTransferChannelID\tMessage\n") } else { _, _ = fmt.Fprintf(w, "ProposalCid\tDealId\tState\tClient\tSize\tPrice\tDuration\n") } @@ -434,11 +468,16 @@ func outputStorageDeals(out io.Writer, deals []storagemarket.MinerDeal, verbose fil := types.FIL(types.BigMul(deal.Proposal.StoragePricePerEpoch, types.NewInt(uint64(deal.Proposal.Duration())))) if verbose { - _, _ = fmt.Fprintf(w, "%s\t", deal.CreationTime.Time().Format(time.Stamp)) + _, _ = fmt.Fprintf(w, "%s\t%t\t", deal.CreationTime.Time().Format(time.Stamp), deal.Proposal.VerifiedDeal) } _, _ = fmt.Fprintf(w, "%s\t%d\t%s\t%s\t%s\t%s\t%s", propcid, deal.DealID, storagemarket.DealStates[deal.State], deal.Proposal.Client, units.BytesSize(float64(deal.Proposal.PieceSize)), fil, deal.Proposal.Duration()) if verbose { + tchid := "" + if deal.TransferChannelId != nil { + tchid = deal.TransferChannelId.String() + } + _, _ = fmt.Fprintf(w, "\t%s", tchid) _, _ = fmt.Fprintf(w, "\t%s", deal.Message) } @@ -650,6 +689,11 @@ var marketCancelTransfer = &cli.Command{ Usage: "specify only transfers where peer is/is not initiator", Value: false, }, + &cli.DurationFlag{ + Name: "cancel-timeout", + Usage: "time to wait for cancel to be sent to client", + Value: 5 * time.Second, + }, }, Action: func(cctx *cli.Context) error { if !cctx.Args().Present() { @@ -693,7 +737,9 @@ var marketCancelTransfer = &cli.Command{ } } - return nodeApi.MarketCancelDataTransfer(ctx, transferID, other, initiator) + timeoutCtx, cancel := context.WithTimeout(ctx, cctx.Duration("cancel-timeout")) + defer cancel() + return nodeApi.MarketCancelDataTransfer(timeoutCtx, transferID, other, initiator) }, } @@ -702,9 +748,14 @@ var transfersListCmd = &cli.Command{ Usage: "List ongoing data transfers for this miner", Flags: []cli.Flag{ &cli.BoolFlag{ - Name: "color", - Usage: "use color in display output", - Value: true, + Name: "verbose", + Aliases: []string{"v"}, + Usage: "print verbose transfer details", + }, + &cli.BoolFlag{ + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", }, &cli.BoolFlag{ Name: "completed", @@ -720,6 +771,10 @@ var transfersListCmd = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } + api, closer, err := lcli.GetStorageMinerAPI(cctx) if err != nil { return err @@ -732,8 +787,8 @@ var transfersListCmd = &cli.Command{ return err } + verbose := cctx.Bool("verbose") completed := cctx.Bool("completed") - color := cctx.Bool("color") watch := cctx.Bool("watch") showFailed := cctx.Bool("show-failed") if watch { @@ -747,7 +802,7 @@ var transfersListCmd = &cli.Command{ tm.MoveCursor(1, 1) - lcli.OutputDataTransferChannels(tm.Screen, channels, completed, color, showFailed) + lcli.OutputDataTransferChannels(tm.Screen, channels, verbose, completed, showFailed) tm.Flush() @@ -772,7 +827,61 @@ var transfersListCmd = &cli.Command{ } } } - lcli.OutputDataTransferChannels(os.Stdout, channels, completed, color, showFailed) + lcli.OutputDataTransferChannels(os.Stdout, channels, verbose, completed, showFailed) + return nil + }, +} + +var dealsPendingPublish = &cli.Command{ + Name: "pending-publish", + Usage: "list deals waiting in publish queue", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "publish-now", + Usage: "send a publish message now", + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := lcli.ReqContext(cctx) + + if cctx.Bool("publish-now") { + if err := api.MarketPublishPendingDeals(ctx); err != nil { + return xerrors.Errorf("publishing deals: %w", err) + } + fmt.Println("triggered deal publishing") + return nil + } + + pending, err := api.MarketPendingDeals(ctx) + if err != nil { + return xerrors.Errorf("getting pending deals: %w", err) + } + + if len(pending.Deals) > 0 { + endsIn := pending.PublishPeriodStart.Add(pending.PublishPeriod).Sub(time.Now()) + w := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0) + _, _ = fmt.Fprintf(w, "Publish period: %s (ends in %s)\n", pending.PublishPeriod, endsIn.Round(time.Second)) + _, _ = fmt.Fprintf(w, "First deal queued at: %s\n", pending.PublishPeriodStart) + _, _ = fmt.Fprintf(w, "Deals will be published at: %s\n", pending.PublishPeriodStart.Add(pending.PublishPeriod)) + _, _ = fmt.Fprintf(w, "%d deals queued to be published:\n", len(pending.Deals)) + _, _ = fmt.Fprintf(w, "ProposalCID\tClient\tSize\n") + for _, deal := range pending.Deals { + proposalNd, err := cborutil.AsIpld(&deal) // nolint + if err != nil { + return err + } + + _, _ = fmt.Fprintf(w, "%s\t%s\t%s\n", proposalNd.Cid(), deal.Proposal.Client, units.BytesSize(float64(deal.Proposal.PieceSize))) + } + return w.Flush() + } + + fmt.Println("No deals queued to be published") return nil }, } diff --git a/cmd/lotus-storage-miner/proving.go b/cmd/lotus-storage-miner/proving.go index 377b81d328f..5dfe5d4ceda 100644 --- a/cmd/lotus-storage-miner/proving.go +++ b/cmd/lotus-storage-miner/proving.go @@ -10,11 +10,14 @@ import ( "github.com/urfave/cli/v2" "golang.org/x/xerrors" - "github.com/filecoin-project/lotus/api/apibstore" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/specs-storage/storage" ) var provingCmd = &cli.Command{ @@ -25,6 +28,7 @@ var provingCmd = &cli.Command{ provingDeadlinesCmd, provingDeadlineInfoCmd, provingFaultsCmd, + provingCheckProvableCmd, }, } @@ -32,14 +36,6 @@ var provingFaultsCmd = &cli.Command{ Name: "faults", Usage: "View the currently known proving faulty sectors information", Action: func(cctx *cli.Context) error { - color.NoColor = !cctx.Bool("color") - - nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) - if err != nil { - return err - } - defer closer() - api, acloser, err := lcli.GetFullNodeAPI(cctx) if err != nil { return err @@ -48,9 +44,9 @@ var provingFaultsCmd = &cli.Command{ ctx := lcli.ReqContext(cctx) - stor := store.ActorStore(ctx, apibstore.NewAPIBlockstore(api)) + stor := store.ActorStore(ctx, blockstore.NewAPIBlockstore(api)) - maddr, err := getActorAddress(ctx, nodeApi, cctx.String("actor")) + maddr, err := getActorAddress(ctx, cctx) if err != nil { return err } @@ -92,14 +88,6 @@ var provingInfoCmd = &cli.Command{ Name: "info", Usage: "View current state information", Action: func(cctx *cli.Context) error { - color.NoColor = !cctx.Bool("color") - - nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) - if err != nil { - return err - } - defer closer() - api, acloser, err := lcli.GetFullNodeAPI(cctx) if err != nil { return err @@ -108,7 +96,7 @@ var provingInfoCmd = &cli.Command{ ctx := lcli.ReqContext(cctx) - maddr, err := getActorAddress(ctx, nodeApi, cctx.String("actor")) + maddr, err := getActorAddress(ctx, cctx) if err != nil { return err } @@ -123,7 +111,7 @@ var provingInfoCmd = &cli.Command{ return err } - stor := store.ActorStore(ctx, apibstore.NewAPIBlockstore(api)) + stor := store.ActorStore(ctx, blockstore.NewAPIBlockstore(api)) mas, err := miner.Load(stor, mact) if err != nil { @@ -179,7 +167,7 @@ var provingInfoCmd = &cli.Command{ var faultPerc float64 if proving > 0 { - faultPerc = float64(faults*10000/proving) / 100 + faultPerc = float64(faults * 100 / proving) } fmt.Printf("Current Epoch: %d\n", cd.CurrentEpoch) @@ -205,14 +193,6 @@ var provingDeadlinesCmd = &cli.Command{ Name: "deadlines", Usage: "View the current proving period deadlines information", Action: func(cctx *cli.Context) error { - color.NoColor = !cctx.Bool("color") - - nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) - if err != nil { - return err - } - defer closer() - api, acloser, err := lcli.GetFullNodeAPI(cctx) if err != nil { return err @@ -221,7 +201,7 @@ var provingDeadlinesCmd = &cli.Command{ ctx := lcli.ReqContext(cctx) - maddr, err := getActorAddress(ctx, nodeApi, cctx.String("actor")) + maddr, err := getActorAddress(ctx, cctx) if err != nil { return err } @@ -297,12 +277,6 @@ var provingDeadlineInfoCmd = &cli.Command{ return xerrors.Errorf("could not parse deadline index: %w", err) } - nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) - if err != nil { - return err - } - defer closer() - api, acloser, err := lcli.GetFullNodeAPI(cctx) if err != nil { return err @@ -311,7 +285,7 @@ var provingDeadlineInfoCmd = &cli.Command{ ctx := lcli.ReqContext(cctx) - maddr, err := getActorAddress(ctx, nodeApi, cctx.String("actor")) + maddr, err := getActorAddress(ctx, cctx) if err != nil { return err } @@ -371,3 +345,103 @@ var provingDeadlineInfoCmd = &cli.Command{ return nil }, } + +var provingCheckProvableCmd = &cli.Command{ + Name: "check", + Usage: "Check sectors provable", + ArgsUsage: "", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "only-bad", + Usage: "print only bad sectors", + Value: false, + }, + &cli.BoolFlag{ + Name: "slow", + Usage: "run slower checks", + }, + }, + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() != 1 { + return xerrors.Errorf("must pass deadline index") + } + + dlIdx, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64) + if err != nil { + return xerrors.Errorf("could not parse deadline index: %w", err) + } + + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + sapi, scloser, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer scloser() + + ctx := lcli.ReqContext(cctx) + + addr, err := sapi.ActorAddress(ctx) + if err != nil { + return err + } + + mid, err := address.IDFromAddress(addr) + if err != nil { + return err + } + + info, err := api.StateMinerInfo(ctx, addr, types.EmptyTSK) + if err != nil { + return err + } + + partitions, err := api.StateMinerPartitions(ctx, addr, dlIdx, types.EmptyTSK) + if err != nil { + return err + } + + tw := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0) + _, _ = fmt.Fprintln(tw, "deadline\tpartition\tsector\tstatus") + + for parIdx, par := range partitions { + sectors := make(map[abi.SectorNumber]struct{}) + + sectorInfos, err := api.StateMinerSectors(ctx, addr, &par.LiveSectors, types.EmptyTSK) + if err != nil { + return err + } + + var tocheck []storage.SectorRef + for _, info := range sectorInfos { + sectors[info.SectorNumber] = struct{}{} + tocheck = append(tocheck, storage.SectorRef{ + ProofType: info.SealProof, + ID: abi.SectorID{ + Miner: abi.ActorID(mid), + Number: info.SectorNumber, + }, + }) + } + + bad, err := sapi.CheckProvable(ctx, info.WindowPoStProofType, tocheck, cctx.Bool("slow")) + if err != nil { + return err + } + + for s := range sectors { + if err, exist := bad[s]; exist { + _, _ = fmt.Fprintf(tw, "%d\t%d\t%d\t%s\n", dlIdx, parIdx, s, color.RedString("bad")+fmt.Sprintf(" (%s)", err)) + } else if !cctx.Bool("only-bad") { + _, _ = fmt.Fprintf(tw, "%d\t%d\t%d\t%s\n", dlIdx, parIdx, s, color.GreenString("good")) + } + } + } + + return tw.Flush() + }, +} diff --git a/cmd/lotus-storage-miner/retrieval-deals.go b/cmd/lotus-storage-miner/retrieval-deals.go index 03d397852d8..0411f7f130a 100644 --- a/cmd/lotus-storage-miner/retrieval-deals.go +++ b/cmd/lotus-storage-miner/retrieval-deals.go @@ -235,7 +235,7 @@ var retrievalSetAskCmd = &cli.Command{ var retrievalGetAskCmd = &cli.Command{ Name: "get-ask", - Usage: "Get the provider's current retrieval ask", + Usage: "Get the provider's current retrieval ask configured by the provider in the ask-store using the set-ask CLI command", Flags: []cli.Flag{}, Action: func(cctx *cli.Context) error { ctx := lcli.DaemonContext(cctx) diff --git a/cmd/lotus-storage-miner/run.go b/cmd/lotus-storage-miner/run.go index 0c2fba8b387..f276f319c9b 100644 --- a/cmd/lotus-storage-miner/run.go +++ b/cmd/lotus-storage-miner/run.go @@ -1,33 +1,28 @@ package main import ( - "context" - "net" - "net/http" + "fmt" _ "net/http/pprof" "os" - "os/signal" - "syscall" - mux "github.com/gorilla/mux" + "github.com/filecoin-project/lotus/api/v1api" + + "github.com/filecoin-project/lotus/api/v0api" + "github.com/multiformats/go-multiaddr" - manet "github.com/multiformats/go-multiaddr/net" "github.com/urfave/cli/v2" + "go.opencensus.io/stats" "go.opencensus.io/stats/view" "go.opencensus.io/tag" "golang.org/x/xerrors" - "github.com/filecoin-project/go-jsonrpc" - "github.com/filecoin-project/go-jsonrpc/auth" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/apistruct" "github.com/filecoin-project/lotus/build" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/lib/ulimit" "github.com/filecoin-project/lotus/metrics" "github.com/filecoin-project/lotus/node" - "github.com/filecoin-project/lotus/node/impl" + "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/repo" ) @@ -63,19 +58,29 @@ var runCmd = &cli.Command{ } } - nodeApi, ncloser, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return xerrors.Errorf("getting full node api: %w", err) - } - defer ncloser() - ctx := lcli.DaemonContext(cctx) - + ctx, _ := tag.New(lcli.DaemonContext(cctx), + tag.Insert(metrics.Version, build.BuildVersion), + tag.Insert(metrics.Commit, build.CurrentCommit), + tag.Insert(metrics.NodeType, "miner"), + ) // Register all metric views if err := view.Register( - metrics.DefaultViews..., + metrics.MinerNodeViews..., ); err != nil { log.Fatalf("Cannot register the view: %v", err) } + // Set the metric to one so it is published to the exporter + stats.Record(ctx, metrics.LotusInfo.M(1)) + + if err := checkV1ApiSupport(ctx, cctx); err != nil { + return err + } + + nodeApi, ncloser, err := lcli.GetFullNodeAPIV1(cctx) + if err != nil { + return xerrors.Errorf("getting full node api: %w", err) + } + defer ncloser() v, err := nodeApi.Version(ctx) if err != nil { @@ -88,14 +93,14 @@ var runCmd = &cli.Command{ } } - if v.APIVersion != build.FullAPIVersion { - return xerrors.Errorf("lotus-daemon API version doesn't match: expected: %s", api.Version{APIVersion: build.FullAPIVersion}) + if v.APIVersion != api.FullAPIVersion1 { + return xerrors.Errorf("lotus-daemon API version doesn't match: expected: %s", api.APIVersion{APIVersion: api.FullAPIVersion1}) } log.Info("Checking full node sync status") if !cctx.Bool("nosync") { - if err := lcli.SyncWait(ctx, nodeApi, false); err != nil { + if err := lcli.SyncWait(ctx, &v0api.WrapperV1Full{FullNode: nodeApi}, false); err != nil { return xerrors.Errorf("sync wait: %w", err) } } @@ -114,20 +119,40 @@ var runCmd = &cli.Command{ return xerrors.Errorf("repo at '%s' is not initialized, run 'lotus-miner init' to set it up", minerRepoPath) } + lr, err := r.Lock(repo.StorageMiner) + if err != nil { + return err + } + c, err := lr.Config() + if err != nil { + return err + } + cfg, ok := c.(*config.StorageMiner) + if !ok { + return xerrors.Errorf("invalid config for repo, got: %T", c) + } + + bootstrapLibP2P := cfg.Subsystems.EnableMarkets + + err = lr.Close() + if err != nil { + return err + } + shutdownChan := make(chan struct{}) var minerapi api.StorageMiner stop, err := node.New(ctx, - node.StorageMiner(&minerapi), + node.StorageMiner(&minerapi, cfg.Subsystems), node.Override(new(dtypes.ShutdownChan), shutdownChan), - node.Online(), + node.Base(), node.Repo(r), node.ApplyIf(func(s *node.Settings) bool { return cctx.IsSet("miner-api") }, node.Override(new(dtypes.APIEndpoint), func() (dtypes.APIEndpoint, error) { return multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/" + cctx.String("miner-api")) })), - node.Override(new(api.FullNode), nodeApi), + node.Override(new(v1api.FullNode), nodeApi), ) if err != nil { return xerrors.Errorf("creating node: %w", err) @@ -138,65 +163,41 @@ var runCmd = &cli.Command{ return xerrors.Errorf("getting API endpoint: %w", err) } - // Bootstrap with full node - remoteAddrs, err := nodeApi.NetAddrsListen(ctx) - if err != nil { - return xerrors.Errorf("getting full node libp2p address: %w", err) - } + if bootstrapLibP2P { + log.Infof("Bootstrapping libp2p network with full node") + + // Bootstrap with full node + remoteAddrs, err := nodeApi.NetAddrsListen(ctx) + if err != nil { + return xerrors.Errorf("getting full node libp2p address: %w", err) + } - if err := minerapi.NetConnect(ctx, remoteAddrs); err != nil { - return xerrors.Errorf("connecting to full node (libp2p): %w", err) + if err := minerapi.NetConnect(ctx, remoteAddrs); err != nil { + return xerrors.Errorf("connecting to full node (libp2p): %w", err) + } } log.Infof("Remote version %s", v) - lst, err := manet.Listen(endpoint) + // Instantiate the miner node handler. + handler, err := node.MinerHandler(minerapi, true) if err != nil { - return xerrors.Errorf("could not listen: %w", err) - } - - mux := mux.NewRouter() - - rpcServer := jsonrpc.NewServer() - rpcServer.Register("Filecoin", apistruct.PermissionedStorMinerAPI(metrics.MetricedStorMinerAPI(minerapi))) - - mux.Handle("/rpc/v0", rpcServer) - mux.PathPrefix("/remote").HandlerFunc(minerapi.(*impl.StorageMinerAPI).ServeRemote) - mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof - - ah := &auth.Handler{ - Verify: minerapi.AuthVerify, - Next: mux.ServeHTTP, + return xerrors.Errorf("failed to instantiate rpc handler: %w", err) } - srv := &http.Server{ - Handler: ah, - BaseContext: func(listener net.Listener) context.Context { - ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-miner")) - return ctx - }, + // Serve the RPC. + rpcStopper, err := node.ServeRPC(handler, "lotus-miner", endpoint) + if err != nil { + return fmt.Errorf("failed to start json-rpc endpoint: %s", err) } - sigChan := make(chan os.Signal, 2) - go func() { - select { - case sig := <-sigChan: - log.Warnw("received shutdown", "signal", sig) - case <-shutdownChan: - log.Warn("received shutdown") - } - - log.Warn("Shutting down...") - if err := stop(context.TODO()); err != nil { - log.Errorf("graceful shutting down failed: %s", err) - } - if err := srv.Shutdown(context.TODO()); err != nil { - log.Errorf("shutting down RPC server failed: %s", err) - } - log.Warn("Graceful shutdown successful") - }() - signal.Notify(sigChan, syscall.SIGTERM, syscall.SIGINT) + // Monitor for shutdown. + finishCh := node.MonitorShutdown(shutdownChan, + node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper}, + node.ShutdownHandler{Component: "miner", StopFunc: stop}, + ) - return srv.Serve(manet.NetListener(lst)) + <-finishCh + return nil }, } diff --git a/cmd/lotus-storage-miner/sealing.go b/cmd/lotus-storage-miner/sealing.go index 440d4aaea76..3bf4c675fd7 100644 --- a/cmd/lotus-storage-miner/sealing.go +++ b/cmd/lotus-storage-miner/sealing.go @@ -28,6 +28,7 @@ var sealingCmd = &cli.Command{ sealingJobsCmd, sealingWorkersCmd, sealingSchedDiagCmd, + sealingAbortCmd, }, } @@ -35,10 +36,16 @@ var sealingWorkersCmd = &cli.Command{ Name: "workers", Usage: "list workers", Flags: []cli.Flag{ - &cli.BoolFlag{Name: "color"}, + &cli.BoolFlag{ + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", + }, }, Action: func(cctx *cli.Context) error { - color.NoColor = !cctx.Bool("color") + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) if err != nil { @@ -124,12 +131,22 @@ var sealingWorkersCmd = &cli.Command{ var sealingJobsCmd = &cli.Command{ Name: "jobs", - Usage: "list workers", + Usage: "list running jobs", Flags: []cli.Flag{ - &cli.BoolFlag{Name: "color"}, + &cli.BoolFlag{ + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", + }, + &cli.BoolFlag{ + Name: "show-ret-done", + Usage: "show returned but not consumed calls", + }, }, Action: func(cctx *cli.Context) error { - color.NoColor = !cctx.Bool("color") + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) if err != nil { @@ -187,10 +204,17 @@ var sealingJobsCmd = &cli.Command{ for _, l := range lines { state := "running" - if l.RunWait > 0 { + switch { + case l.RunWait > 0: state = fmt.Sprintf("assigned(%d)", l.RunWait-1) - } - if l.RunWait == -1 { + case l.RunWait == storiface.RWRetDone: + if !cctx.Bool("show-ret-done") { + continue + } + state = "ret-done" + case l.RunWait == storiface.RWReturned: + state = "returned" + case l.RunWait == storiface.RWRetWait: state = "ret-wait" } dur := "n/a" @@ -198,11 +222,16 @@ var sealingJobsCmd = &cli.Command{ dur = time.Now().Sub(l.Start).Truncate(time.Millisecond * 100).String() } + hostname, ok := workerHostnames[l.wid] + if !ok { + hostname = l.Hostname + } + _, _ = fmt.Fprintf(tw, "%s\t%d\t%s\t%s\t%s\t%s\t%s\n", - hex.EncodeToString(l.ID.ID[10:]), + hex.EncodeToString(l.ID.ID[:4]), l.Sector.Number, - hex.EncodeToString(l.wid[5:]), - workerHostnames[l.wid], + hex.EncodeToString(l.wid[:4]), + hostname, l.Task.Short(), state, dur) @@ -215,6 +244,11 @@ var sealingJobsCmd = &cli.Command{ var sealingSchedDiagCmd = &cli.Command{ Name: "sched-diag", Usage: "Dump internal scheduler state", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "force-sched", + }, + }, Action: func(cctx *cli.Context) error { nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) if err != nil { @@ -224,7 +258,7 @@ var sealingSchedDiagCmd = &cli.Command{ ctx := lcli.ReqContext(cctx) - st, err := nodeApi.SealingSchedDiag(ctx) + st, err := nodeApi.SealingSchedDiag(ctx, cctx.Bool("force-sched")) if err != nil { return err } @@ -239,3 +273,47 @@ var sealingSchedDiagCmd = &cli.Command{ return nil }, } + +var sealingAbortCmd = &cli.Command{ + Name: "abort", + Usage: "Abort a running job", + ArgsUsage: "[callid]", + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() != 1 { + return xerrors.Errorf("expected 1 argument") + } + + nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := lcli.ReqContext(cctx) + + jobs, err := nodeApi.WorkerJobs(ctx) + if err != nil { + return xerrors.Errorf("getting worker jobs: %w", err) + } + + var job *storiface.WorkerJob + outer: + for _, workerJobs := range jobs { + for _, j := range workerJobs { + if strings.HasPrefix(j.ID.ID.String(), cctx.Args().First()) { + j := j + job = &j + break outer + } + } + } + + if job == nil { + return xerrors.Errorf("job with specified id prefix not found") + } + + fmt.Printf("aborting job %s, task %s, sector %d, running on host %s\n", job.ID.String(), job.Task.Short(), job.Sector.Number, job.Hostname) + + return nodeApi.SealingAbort(ctx, job.ID) + }, +} diff --git a/cmd/lotus-storage-miner/sectors.go b/cmd/lotus-storage-miner/sectors.go index 967e2d413b7..5c4581bbc53 100644 --- a/cmd/lotus-storage-miner/sectors.go +++ b/cmd/lotus-storage-miner/sectors.go @@ -5,6 +5,7 @@ import ( "os" "sort" "strconv" + "strings" "time" "github.com/docker/go-units" @@ -12,10 +13,13 @@ import ( "github.com/urfave/cli/v2" "golang.org/x/xerrors" + "github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" + miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" @@ -34,11 +38,14 @@ var sectorsCmd = &cli.Command{ sectorsRefsCmd, sectorsUpdateCmd, sectorsPledgeCmd, + sectorsExtendCmd, + sectorsTerminateCmd, sectorsRemoveCmd, sectorsMarkForUpgradeCmd, sectorsStartSealCmd, sectorsSealDelayCmd, sectorsCapacityCollateralCmd, + sectorsBatching, }, } @@ -53,7 +60,14 @@ var sectorsPledgeCmd = &cli.Command{ defer closer() ctx := lcli.ReqContext(cctx) - return nodeApi.PledgeSector(ctx) + id, err := nodeApi.PledgeSector(ctx) + if err != nil { + return err + } + + fmt.Println("Created CC sector: ", id.Number) + + return nil }, } @@ -147,17 +161,32 @@ var sectorsListCmd = &cli.Command{ Usage: "show removed sectors", }, &cli.BoolFlag{ - Name: "color", - Aliases: []string{"c"}, - Value: true, + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", + Aliases: []string{"c"}, }, &cli.BoolFlag{ Name: "fast", Usage: "don't show on-chain info for better performance", }, + &cli.BoolFlag{ + Name: "events", + Usage: "display number of events the sector has received", + }, + &cli.BoolFlag{ + Name: "seal-time", + Usage: "display how long it took for the sector to be sealed", + }, + &cli.StringFlag{ + Name: "states", + Usage: "filter sectors by a comma-separated list of states", + }, }, Action: func(cctx *cli.Context) error { - color.NoColor = !cctx.Bool("color") + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) if err != nil { @@ -173,7 +202,22 @@ var sectorsListCmd = &cli.Command{ ctx := lcli.ReqContext(cctx) - list, err := nodeApi.SectorsList(ctx) + var list []abi.SectorNumber + + showRemoved := cctx.Bool("show-removed") + states := cctx.String("states") + if len(states) == 0 { + list, err = nodeApi.SectorsList(ctx) + } else { + showRemoved = true + sList := strings.Split(states, ",") + ss := make([]api.SectorState, len(sList)) + for i := range sList { + ss[i] = api.SectorState(sList[i]) + } + list, err = nodeApi.SectorsListInStates(ctx, ss) + } + if err != nil { return err } @@ -201,7 +245,7 @@ var sectorsListCmd = &cli.Command{ if err != nil { return err } - commitedIDs := make(map[abi.SectorNumber]struct{}, len(activeSet)) + commitedIDs := make(map[abi.SectorNumber]struct{}, len(sset)) for _, info := range sset { commitedIDs[info.SectorNumber] = struct{}{} } @@ -216,8 +260,11 @@ var sectorsListCmd = &cli.Command{ tablewriter.Col("OnChain"), tablewriter.Col("Active"), tablewriter.Col("Expiration"), + tablewriter.Col("SealTime"), + tablewriter.Col("Events"), tablewriter.Col("Deals"), tablewriter.Col("DealWeight"), + tablewriter.Col("VerifiedPower"), tablewriter.NewLineCol("Error"), tablewriter.NewLineCol("RecoveryTimeout")) @@ -233,13 +280,15 @@ var sectorsListCmd = &cli.Command{ continue } - if cctx.Bool("show-removed") || st.State != api.SectorState(sealing.Removed) { + if showRemoved || st.State != api.SectorState(sealing.Removed) { _, inSSet := commitedIDs[s] _, inASet := activeIDs[s] - dw := .0 + dw, vp := .0, .0 if st.Expiration-st.Activation > 0 { - dw = float64(big.Div(st.DealWeight, big.NewInt(int64(st.Expiration-st.Activation))).Uint64()) + rdw := big.Add(st.DealWeight, st.VerifiedDealWeight) + dw = float64(big.Div(rdw, big.NewInt(int64(st.Expiration-st.Activation))).Uint64()) + vp = float64(big.Div(big.Mul(st.VerifiedDealWeight, big.NewInt(9)), big.NewInt(int64(st.Expiration-st.Activation))).Uint64()) } var deals int @@ -278,6 +327,9 @@ var sectorsListCmd = &cli.Command{ if !fast && deals > 0 { m["DealWeight"] = units.BytesSize(dw) + if vp > 0 { + m["VerifiedPower"] = color.GreenString(units.BytesSize(vp)) + } } if st.Early > 0 { @@ -286,6 +338,52 @@ var sectorsListCmd = &cli.Command{ } } + if cctx.Bool("events") { + var events int + for _, sectorLog := range st.Log { + if !strings.HasPrefix(sectorLog.Kind, "event") { + continue + } + if sectorLog.Kind == "event;sealing.SectorRestart" { + continue + } + events++ + } + + pieces := len(st.Deals) + + switch { + case events < 12+pieces: + m["Events"] = color.GreenString("%d", events) + case events < 20+pieces: + m["Events"] = color.YellowString("%d", events) + default: + m["Events"] = color.RedString("%d", events) + } + } + + if cctx.Bool("seal-time") && len(st.Log) > 1 { + start := time.Unix(int64(st.Log[0].Timestamp), 0) + + for _, sectorLog := range st.Log { + if sectorLog.Kind == "event;sealing.SectorProving" { + end := time.Unix(int64(sectorLog.Timestamp), 0) + dur := end.Sub(start) + + switch { + case dur < 12*time.Hour: + m["SealTime"] = color.GreenString("%s", dur) + case dur < 24*time.Hour: + m["SealTime"] = color.YellowString("%s", dur) + default: + m["SealTime"] = color.RedString("%s", dur) + } + + break + } + } + } + tw.Write(m) } } @@ -320,9 +418,360 @@ var sectorsRefsCmd = &cli.Command{ }, } +var sectorsExtendCmd = &cli.Command{ + Name: "extend", + Usage: "Extend sector expiration", + ArgsUsage: "", + Flags: []cli.Flag{ + &cli.Int64Flag{ + Name: "new-expiration", + Usage: "new expiration epoch", + Required: false, + }, + &cli.BoolFlag{ + Name: "v1-sectors", + Usage: "renews all v1 sectors up to the maximum possible lifetime", + Required: false, + }, + &cli.Int64Flag{ + Name: "tolerance", + Value: 20160, + Usage: "when extending v1 sectors, don't try to extend sectors by fewer than this number of epochs", + Required: false, + }, + &cli.Int64Flag{ + Name: "expiration-ignore", + Value: 120, + Usage: "when extending v1 sectors, skip sectors whose current expiration is less than epochs from now", + Required: false, + }, + &cli.Int64Flag{ + Name: "expiration-cutoff", + Usage: "when extending v1 sectors, skip sectors whose current expiration is more than epochs from now (infinity if unspecified)", + Required: false, + }, + &cli.StringFlag{}, + }, + Action: func(cctx *cli.Context) error { + + api, nCloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer nCloser() + + ctx := lcli.ReqContext(cctx) + + maddr, err := getActorAddress(ctx, cctx) + if err != nil { + return err + } + + var params []miner3.ExtendSectorExpirationParams + + if cctx.Bool("v1-sectors") { + + head, err := api.ChainHead(ctx) + if err != nil { + return err + } + + nv, err := api.StateNetworkVersion(ctx, types.EmptyTSK) + if err != nil { + return err + } + + extensions := map[miner.SectorLocation]map[abi.ChainEpoch][]uint64{} + + // are given durations within tolerance epochs + withinTolerance := func(a, b abi.ChainEpoch) bool { + diff := a - b + if diff < 0 { + diff = b - a + } + + return diff <= abi.ChainEpoch(cctx.Int64("tolerance")) + } + + sis, err := api.StateMinerActiveSectors(ctx, maddr, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting miner sector infos: %w", err) + } + + for _, si := range sis { + if si.SealProof >= abi.RegisteredSealProof_StackedDrg2KiBV1_1 { + continue + } + + if si.Expiration < (head.Height() + abi.ChainEpoch(cctx.Int64("expiration-ignore"))) { + continue + } + + if cctx.IsSet("expiration-cutoff") { + if si.Expiration > (head.Height() + abi.ChainEpoch(cctx.Int64("expiration-cutoff"))) { + continue + } + } + + ml := policy.GetSectorMaxLifetime(si.SealProof, nv) + // if the sector's missing less than "tolerance" of its maximum possible lifetime, don't bother extending it + if withinTolerance(si.Expiration-si.Activation, ml) { + continue + } + + // Set the new expiration to 48 hours less than the theoretical maximum lifetime + newExp := ml - (miner3.WPoStProvingPeriod * 2) + si.Activation + if withinTolerance(si.Expiration, newExp) || si.Expiration >= newExp { + continue + } + + p, err := api.StateSectorPartition(ctx, maddr, si.SectorNumber, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting sector location for sector %d: %w", si.SectorNumber, err) + } + + if p == nil { + return xerrors.Errorf("sector %d not found in any partition", si.SectorNumber) + } + + es, found := extensions[*p] + if !found { + ne := make(map[abi.ChainEpoch][]uint64) + ne[newExp] = []uint64{uint64(si.SectorNumber)} + extensions[*p] = ne + } else { + added := false + for exp := range es { + if withinTolerance(exp, newExp) && newExp >= exp && exp > si.Expiration { + es[exp] = append(es[exp], uint64(si.SectorNumber)) + added = true + break + } + } + + if !added { + es[newExp] = []uint64{uint64(si.SectorNumber)} + } + } + } + + p := miner3.ExtendSectorExpirationParams{} + scount := 0 + + for l, exts := range extensions { + for newExp, numbers := range exts { + scount += len(numbers) + if scount > policy.GetAddressedSectorsMax(nv) || len(p.Extensions) == policy.GetDeclarationsMax(nv) { + params = append(params, p) + p = miner3.ExtendSectorExpirationParams{} + scount = len(numbers) + } + + p.Extensions = append(p.Extensions, miner3.ExpirationExtension{ + Deadline: l.Deadline, + Partition: l.Partition, + Sectors: bitfield.NewFromSet(numbers), + NewExpiration: newExp, + }) + } + } + + // if we have any sectors, then one last append is needed here + if scount != 0 { + params = append(params, p) + } + + } else { + if !cctx.Args().Present() || !cctx.IsSet("new-expiration") { + return xerrors.Errorf("must pass at least one sector number and new expiration") + } + sectors := map[miner.SectorLocation][]uint64{} + + for i, s := range cctx.Args().Slice() { + id, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return xerrors.Errorf("could not parse sector %d: %w", i, err) + } + + p, err := api.StateSectorPartition(ctx, maddr, abi.SectorNumber(id), types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting sector location for sector %d: %w", id, err) + } + + if p == nil { + return xerrors.Errorf("sector %d not found in any partition", id) + } + + sectors[*p] = append(sectors[*p], id) + } + + p := miner3.ExtendSectorExpirationParams{} + for l, numbers := range sectors { + + // TODO: Dedup with above loop + p.Extensions = append(p.Extensions, miner3.ExpirationExtension{ + Deadline: l.Deadline, + Partition: l.Partition, + Sectors: bitfield.NewFromSet(numbers), + NewExpiration: abi.ChainEpoch(cctx.Int64("new-expiration")), + }) + } + + params = append(params, p) + } + + if len(params) == 0 { + fmt.Println("nothing to extend") + return nil + } + + mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting miner info: %w", err) + } + + for i := range params { + sp, aerr := actors.SerializeParams(¶ms[i]) + if aerr != nil { + return xerrors.Errorf("serializing params: %w", err) + } + + smsg, err := api.MpoolPushMessage(ctx, &types.Message{ + From: mi.Worker, + To: maddr, + Method: miner.Methods.ExtendSectorExpiration, + + Value: big.Zero(), + Params: sp, + }, nil) + if err != nil { + return xerrors.Errorf("mpool push message: %w", err) + } + + fmt.Println(smsg.Cid()) + } + + return nil + }, +} + +var sectorsTerminateCmd = &cli.Command{ + Name: "terminate", + Usage: "Terminate sector on-chain then remove (WARNING: This means losing power and collateral for the removed sector)", + ArgsUsage: "", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "really-do-it", + Usage: "pass this flag if you know what you are doing", + }, + }, + Subcommands: []*cli.Command{ + sectorsTerminateFlushCmd, + sectorsTerminatePendingCmd, + }, + Action: func(cctx *cli.Context) error { + if !cctx.Bool("really-do-it") { + return xerrors.Errorf("pass --really-do-it to confirm this action") + } + nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := lcli.ReqContext(cctx) + if cctx.Args().Len() != 1 { + return xerrors.Errorf("must pass sector number") + } + + id, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64) + if err != nil { + return xerrors.Errorf("could not parse sector number: %w", err) + } + + return nodeApi.SectorTerminate(ctx, abi.SectorNumber(id)) + }, +} + +var sectorsTerminateFlushCmd = &cli.Command{ + Name: "flush", + Usage: "Send a terminate message if there are sectors queued for termination", + Action: func(cctx *cli.Context) error { + nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := lcli.ReqContext(cctx) + + mcid, err := nodeApi.SectorTerminateFlush(ctx) + if err != nil { + return err + } + + if mcid == nil { + return xerrors.New("no sectors were queued for termination") + } + + fmt.Println(mcid) + + return nil + }, +} + +var sectorsTerminatePendingCmd = &cli.Command{ + Name: "pending", + Usage: "List sector numbers of sectors pending termination", + Action: func(cctx *cli.Context) error { + nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + api, nCloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer nCloser() + ctx := lcli.ReqContext(cctx) + + pending, err := nodeApi.SectorTerminatePending(ctx) + if err != nil { + return err + } + + maddr, err := nodeApi.ActorAddress(ctx) + if err != nil { + return err + } + + dl, err := api.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting proving deadline info failed: %w", err) + } + + for _, id := range pending { + loc, err := api.StateSectorPartition(ctx, maddr, id.Number, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("finding sector partition: %w", err) + } + + fmt.Print(id.Number) + + if loc.Deadline == (dl.Index+1)%miner.WPoStPeriodDeadlines || // not in next (in case the terminate message takes a while to get on chain) + loc.Deadline == dl.Index || // not in current + (loc.Deadline+1)%miner.WPoStPeriodDeadlines == dl.Index { // not in previous + fmt.Print(" (in proving window)") + } + fmt.Println() + } + + return nil + }, +} + var sectorsRemoveCmd = &cli.Command{ Name: "remove", - Usage: "Forcefully remove a sector (WARNING: This means losing power and collateral for the removed sector)", + Usage: "Forcefully remove a sector (WARNING: This means losing power and collateral for the removed sector (use 'terminate' for lower penalty))", ArgsUsage: "", Flags: []cli.Flag{ &cli.BoolFlag{ @@ -439,37 +888,58 @@ var sectorsCapacityCollateralCmd = &cli.Command{ }, Action: func(cctx *cli.Context) error { - mApi, mCloser, err := lcli.GetStorageMinerAPI(cctx) + nApi, nCloser, err := lcli.GetFullNodeAPI(cctx) if err != nil { return err } - defer mCloser() + defer nCloser() - nApi, nCloser, err := lcli.GetFullNodeAPI(cctx) + ctx := lcli.ReqContext(cctx) + + maddr, err := getActorAddress(ctx, cctx) if err != nil { return err } - defer nCloser() - ctx := lcli.ReqContext(cctx) + mi, err := nApi.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } - maddr, err := mApi.ActorAddress(ctx) + nv, err := nApi.StateNetworkVersion(ctx, types.EmptyTSK) + if err != nil { + return err + } + + spt, err := miner.PreferredSealProofTypeFromWindowPoStType(nv, mi.WindowPoStProofType) if err != nil { return err } pci := miner.SectorPreCommitInfo{ + SealProof: spt, Expiration: abi.ChainEpoch(cctx.Uint64("expiration")), } if pci.Expiration == 0 { - pci.Expiration = policy.GetMaxSectorExpirationExtension() + h, err := nApi.ChainHead(ctx) + if err != nil { + return err + } + + pci.Expiration = policy.GetMaxSectorExpirationExtension() + h.Height() } + pc, err := nApi.StateMinerInitialPledgeCollateral(ctx, maddr, pci, types.EmptyTSK) if err != nil { return err } - fmt.Printf("Estimated collateral: %s\n", types.FIL(pc)) + pcd, err := nApi.StateMinerPreCommitDepositForPower(ctx, maddr, pci, types.EmptyTSK) + if err != nil { + return err + } + + fmt.Printf("Estimated collateral: %s\n", types.FIL(big.Max(pc, pcd))) return nil }, @@ -517,6 +987,135 @@ var sectorsUpdateCmd = &cli.Command{ }, } +var sectorsBatching = &cli.Command{ + Name: "batching", + Usage: "manage batch sector operations", + Subcommands: []*cli.Command{ + sectorsBatchingPendingCommit, + sectorsBatchingPendingPreCommit, + }, +} + +var sectorsBatchingPendingCommit = &cli.Command{ + Name: "commit", + Usage: "list sectors waiting in commit batch queue", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "publish-now", + Usage: "send a batch now", + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := lcli.ReqContext(cctx) + + if cctx.Bool("publish-now") { + res, err := api.SectorCommitFlush(ctx) + if err != nil { + return xerrors.Errorf("flush: %w", err) + } + if res == nil { + return xerrors.Errorf("no sectors to publish") + } + + for i, re := range res { + fmt.Printf("Batch %d:\n", i) + if re.Error != "" { + fmt.Printf("\tError: %s\n", re.Error) + } else { + fmt.Printf("\tMessage: %s\n", re.Msg) + } + fmt.Printf("\tSectors:\n") + for _, sector := range re.Sectors { + if e, found := re.FailedSectors[sector]; found { + fmt.Printf("\t\t%d\tERROR %s\n", sector, e) + } else { + fmt.Printf("\t\t%d\tOK\n", sector) + } + } + } + return nil + } + + pending, err := api.SectorCommitPending(ctx) + if err != nil { + return xerrors.Errorf("getting pending deals: %w", err) + } + + if len(pending) > 0 { + for _, sector := range pending { + fmt.Println(sector.Number) + } + return nil + } + + fmt.Println("No sectors queued to be committed") + return nil + }, +} + +var sectorsBatchingPendingPreCommit = &cli.Command{ + Name: "precommit", + Usage: "list sectors waiting in precommit batch queue", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "publish-now", + Usage: "send a batch now", + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := lcli.ReqContext(cctx) + + if cctx.Bool("publish-now") { + res, err := api.SectorPreCommitFlush(ctx) + if err != nil { + return xerrors.Errorf("flush: %w", err) + } + if res == nil { + return xerrors.Errorf("no sectors to publish") + } + + for i, re := range res { + fmt.Printf("Batch %d:\n", i) + if re.Error != "" { + fmt.Printf("\tError: %s\n", re.Error) + } else { + fmt.Printf("\tMessage: %s\n", re.Msg) + } + fmt.Printf("\tSectors:\n") + for _, sector := range re.Sectors { + fmt.Printf("\t\t%d\tOK\n", sector) + } + } + return nil + } + + pending, err := api.SectorPreCommitPending(ctx) + if err != nil { + return xerrors.Errorf("getting pending deals: %w", err) + } + + if len(pending) > 0 { + for _, sector := range pending { + fmt.Println(sector.Number) + } + return nil + } + + fmt.Println("No sectors queued to be committed") + return nil + }, +} + func yesno(b bool) string { if b { return color.GreenString("YES") diff --git a/cmd/lotus-storage-miner/storage.go b/cmd/lotus-storage-miner/storage.go index 8b960a4bf50..e7508eb295c 100644 --- a/cmd/lotus-storage-miner/storage.go +++ b/cmd/lotus-storage-miner/storage.go @@ -1,6 +1,7 @@ package main import ( + "context" "encoding/json" "fmt" "io/ioutil" @@ -11,6 +12,9 @@ import ( "strings" "time" + "github.com/filecoin-project/lotus/api/v0api" + + "github.com/docker/go-units" "github.com/fatih/color" "github.com/google/uuid" "github.com/mitchellh/go-homedir" @@ -20,11 +24,14 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" "github.com/filecoin-project/lotus/extern/sector-storage/stores" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/lotus/lib/tablewriter" ) const metaFile = "sectorstore.json" @@ -40,6 +47,7 @@ stored while moving through the sealing pipeline (references as 'seal').`, storageAttachCmd, storageListCmd, storageFindCmd, + storageCleanupCmd, }, } @@ -83,6 +91,10 @@ over time Name: "store", Usage: "(for init) use path for long-term storage", }, + &cli.StringFlag{ + Name: "max-storage", + Usage: "(for init) limit storage space for sectors (expensive for very large paths!)", + }, }, Action: func(cctx *cli.Context) error { nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) @@ -116,15 +128,24 @@ over time return err } + var maxStor int64 + if cctx.IsSet("max-storage") { + maxStor, err = units.RAMInBytes(cctx.String("max-storage")) + if err != nil { + return xerrors.Errorf("parsing max-storage: %w", err) + } + } + cfg := &stores.LocalStorageMeta{ - ID: stores.ID(uuid.New().String()), - Weight: cctx.Uint64("weight"), - CanSeal: cctx.Bool("seal"), - CanStore: cctx.Bool("store"), + ID: stores.ID(uuid.New().String()), + Weight: cctx.Uint64("weight"), + CanSeal: cctx.Bool("seal"), + CanStore: cctx.Bool("store"), + MaxStorage: uint64(maxStor), } if !(cfg.CanStore || cfg.CanSeal) { - return xerrors.Errorf("must specify at least one of --store of --seal") + return xerrors.Errorf("must specify at least one of --store or --seal") } b, err := json.MarshalIndent(cfg, "", " ") @@ -145,10 +166,19 @@ var storageListCmd = &cli.Command{ Name: "list", Usage: "list local storage paths", Flags: []cli.Flag{ - &cli.BoolFlag{Name: "color"}, + &cli.BoolFlag{ + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", + }, + }, + Subcommands: []*cli.Command{ + storageListSectorsCmd, }, Action: func(cctx *cli.Context) error { - color.NoColor = !cctx.Bool("color") + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) if err != nil { @@ -212,26 +242,66 @@ var storageListCmd = &cli.Command{ } ping := time.Now().Sub(pingStart) - usedPercent := (st.Capacity - st.Available) * 100 / st.Capacity - - percCol := color.FgGreen - switch { - case usedPercent > 98: - percCol = color.FgRed - case usedPercent > 90: - percCol = color.FgYellow + safeRepeat := func(s string, count int) string { + if count < 0 { + return "" + } + return strings.Repeat(s, count) } var barCols = int64(50) - set := (st.Capacity - st.Available) * barCols / st.Capacity - used := (st.Capacity - (st.Available + st.Reserved)) * barCols / st.Capacity - reserved := set - used - bar := strings.Repeat("#", int(used)) + strings.Repeat("*", int(reserved)) + strings.Repeat(" ", int(barCols-set)) - - fmt.Printf("\t[%s] %s/%s %s\n", color.New(percCol).Sprint(bar), - types.SizeStr(types.NewInt(uint64(st.Capacity-st.Available))), - types.SizeStr(types.NewInt(uint64(st.Capacity))), - color.New(percCol).Sprintf("%d%%", usedPercent)) + + // filesystem use bar + { + usedPercent := (st.Capacity - st.FSAvailable) * 100 / st.Capacity + + percCol := color.FgGreen + switch { + case usedPercent > 98: + percCol = color.FgRed + case usedPercent > 90: + percCol = color.FgYellow + } + + set := (st.Capacity - st.FSAvailable) * barCols / st.Capacity + used := (st.Capacity - (st.FSAvailable + st.Reserved)) * barCols / st.Capacity + reserved := set - used + bar := safeRepeat("#", int(used)) + safeRepeat("*", int(reserved)) + safeRepeat(" ", int(barCols-set)) + + desc := "" + if st.Max > 0 { + desc = " (filesystem)" + } + + fmt.Printf("\t[%s] %s/%s %s%s\n", color.New(percCol).Sprint(bar), + types.SizeStr(types.NewInt(uint64(st.Capacity-st.FSAvailable))), + types.SizeStr(types.NewInt(uint64(st.Capacity))), + color.New(percCol).Sprintf("%d%%", usedPercent), desc) + } + + // optional configured limit bar + if st.Max > 0 { + usedPercent := st.Used * 100 / st.Max + + percCol := color.FgGreen + switch { + case usedPercent > 98: + percCol = color.FgRed + case usedPercent > 90: + percCol = color.FgYellow + } + + set := st.Used * barCols / st.Max + used := (st.Used + st.Reserved) * barCols / st.Max + reserved := set - used + bar := safeRepeat("#", int(used)) + safeRepeat("*", int(reserved)) + safeRepeat(" ", int(barCols-set)) + + fmt.Printf("\t[%s] %s/%s %s (limit)\n", color.New(percCol).Sprint(bar), + types.SizeStr(types.NewInt(uint64(st.Used))), + types.SizeStr(types.NewInt(uint64(st.Max))), + color.New(percCol).Sprintf("%d%%", usedPercent)) + } + fmt.Printf("\t%s; %s; %s; Reserved: %s\n", color.YellowString("Unsealed: %d", cnt[0]), color.GreenString("Sealed: %d", cnt[1]), @@ -408,3 +478,266 @@ var storageFindCmd = &cli.Command{ return nil }, } + +var storageListSectorsCmd = &cli.Command{ + Name: "sectors", + Usage: "get list of all sector files", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", + }, + }, + Action: func(cctx *cli.Context) error { + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } + + nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + + napi, closer2, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer2() + + ctx := lcli.ReqContext(cctx) + + sectors, err := nodeApi.SectorsList(ctx) + if err != nil { + return xerrors.Errorf("listing sectors: %w", err) + } + + maddr, err := nodeApi.ActorAddress(ctx) + if err != nil { + return err + } + + aid, err := address.IDFromAddress(maddr) + if err != nil { + return err + } + + mi, err := napi.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + sid := func(sn abi.SectorNumber) abi.SectorID { + return abi.SectorID{ + Miner: abi.ActorID(aid), + Number: sn, + } + } + + type entry struct { + id abi.SectorNumber + storage stores.ID + ft storiface.SectorFileType + urls string + + primary, seal, store bool + + state api.SectorState + } + + var list []entry + + for _, sector := range sectors { + st, err := nodeApi.SectorsStatus(ctx, sector, false) + if err != nil { + return xerrors.Errorf("getting sector status for sector %d: %w", sector, err) + } + + for _, ft := range storiface.PathTypes { + si, err := nodeApi.StorageFindSector(ctx, sid(sector), ft, mi.SectorSize, false) + if err != nil { + return xerrors.Errorf("find sector %d: %w", sector, err) + } + + for _, info := range si { + + list = append(list, entry{ + id: sector, + storage: info.ID, + ft: ft, + urls: strings.Join(info.URLs, ";"), + + primary: info.Primary, + seal: info.CanSeal, + store: info.CanStore, + + state: st.State, + }) + } + } + + } + + sort.Slice(list, func(i, j int) bool { + if list[i].store != list[j].store { + return list[i].store + } + + if list[i].storage != list[j].storage { + return list[i].storage < list[j].storage + } + + if list[i].id != list[j].id { + return list[i].id < list[j].id + } + + return list[i].ft < list[j].ft + }) + + tw := tablewriter.New( + tablewriter.Col("Storage"), + tablewriter.Col("Sector"), + tablewriter.Col("Type"), + tablewriter.Col("State"), + tablewriter.Col("Primary"), + tablewriter.Col("Path use"), + tablewriter.Col("URLs"), + ) + + if len(list) == 0 { + return nil + } + + lastS := list[0].storage + sc1, sc2 := color.FgBlue, color.FgCyan + + for _, e := range list { + if e.storage != lastS { + lastS = e.storage + sc1, sc2 = sc2, sc1 + } + + m := map[string]interface{}{ + "Storage": color.New(sc1).Sprint(e.storage), + "Sector": e.id, + "Type": e.ft.String(), + "State": color.New(stateOrder[sealing.SectorState(e.state)].col).Sprint(e.state), + "Primary": maybeStr(e.seal, color.FgGreen, "primary"), + "Path use": maybeStr(e.seal, color.FgMagenta, "seal ") + maybeStr(e.store, color.FgCyan, "store"), + "URLs": e.urls, + } + tw.Write(m) + } + + return tw.Flush(os.Stdout) + }, +} + +func maybeStr(c bool, col color.Attribute, s string) string { + if !c { + return "" + } + + return color.New(col).Sprint(s) +} + +var storageCleanupCmd = &cli.Command{ + Name: "cleanup", + Usage: "trigger cleanup actions", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "removed", + Usage: "cleanup remaining files from removed sectors", + Value: true, + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + + napi, closer2, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer2() + + ctx := lcli.ReqContext(cctx) + + if cctx.Bool("removed") { + if err := cleanupRemovedSectorData(ctx, api, napi); err != nil { + return err + } + } + + // TODO: proving sectors in sealing storage + + return nil + }, +} + +func cleanupRemovedSectorData(ctx context.Context, api api.StorageMiner, napi v0api.FullNode) error { + sectors, err := api.SectorsList(ctx) + if err != nil { + return err + } + + maddr, err := api.ActorAddress(ctx) + if err != nil { + return err + } + + aid, err := address.IDFromAddress(maddr) + if err != nil { + return err + } + + sid := func(sn abi.SectorNumber) abi.SectorID { + return abi.SectorID{ + Miner: abi.ActorID(aid), + Number: sn, + } + } + + mi, err := napi.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + toRemove := map[abi.SectorNumber]struct{}{} + + for _, sector := range sectors { + st, err := api.SectorsStatus(ctx, sector, false) + if err != nil { + return xerrors.Errorf("getting sector status for sector %d: %w", sector, err) + } + + if sealing.SectorState(st.State) != sealing.Removed { + continue + } + + for _, ft := range storiface.PathTypes { + si, err := api.StorageFindSector(ctx, sid(sector), ft, mi.SectorSize, false) + if err != nil { + return xerrors.Errorf("find sector %d: %w", sector, err) + } + + if len(si) > 0 { + toRemove[sector] = struct{}{} + } + } + } + + for sn := range toRemove { + fmt.Printf("cleaning up data for sector %d\n", sn) + err := api.SectorRemove(ctx, sn) + if err != nil { + log.Error(err) + } + } + + return nil +} diff --git a/cmd/lotus-townhall/main.go b/cmd/lotus-townhall/main.go index 7e8f6df7ff3..1e0460deee1 100644 --- a/cmd/lotus-townhall/main.go +++ b/cmd/lotus-townhall/main.go @@ -15,8 +15,8 @@ import ( "github.com/libp2p/go-libp2p-core/peer" pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/lib/blockstore" ) var topic = "/fil/headnotifs/" @@ -28,7 +28,7 @@ func init() { return } - bs := blockstore.NewTemporary() + bs := blockstore.NewMemory() c, err := car.LoadCar(bs, bytes.NewReader(genBytes)) if err != nil { diff --git a/cmd/lotus-wallet/interactive.go b/cmd/lotus-wallet/interactive.go new file mode 100644 index 00000000000..e1ad2cbb292 --- /dev/null +++ b/cmd/lotus-wallet/interactive.go @@ -0,0 +1,245 @@ +package main + +import ( + "bytes" + "context" + "crypto/rand" + "encoding/hex" + "encoding/json" + "fmt" + gobig "math/big" + "strings" + "sync" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-jsonrpc" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v0api" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/builtin/multisig" + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/types" + lcli "github.com/filecoin-project/lotus/cli" +) + +type InteractiveWallet struct { + lk sync.Mutex + + apiGetter func() (v0api.FullNode, jsonrpc.ClientCloser, error) + under v0api.Wallet +} + +func (c *InteractiveWallet) WalletNew(ctx context.Context, typ types.KeyType) (address.Address, error) { + err := c.accept(func() error { + fmt.Println("-----") + fmt.Println("ACTION: WalletNew - Creating new wallet") + fmt.Printf("TYPE: %s\n", typ) + return nil + }) + if err != nil { + return address.Address{}, err + } + + return c.under.WalletNew(ctx, typ) +} + +func (c *InteractiveWallet) WalletHas(ctx context.Context, addr address.Address) (bool, error) { + return c.under.WalletHas(ctx, addr) +} + +func (c *InteractiveWallet) WalletList(ctx context.Context) ([]address.Address, error) { + return c.under.WalletList(ctx) +} + +func (c *InteractiveWallet) WalletSign(ctx context.Context, k address.Address, msg []byte, meta api.MsgMeta) (*crypto.Signature, error) { + err := c.accept(func() error { + fmt.Println("-----") + fmt.Println("ACTION: WalletSign - Sign a message/deal") + fmt.Printf("ADDRESS: %s\n", k) + fmt.Printf("TYPE: %s\n", meta.Type) + + switch meta.Type { + case api.MTChainMsg: + var cmsg types.Message + if err := cmsg.UnmarshalCBOR(bytes.NewReader(meta.Extra)); err != nil { + return xerrors.Errorf("unmarshalling message: %w", err) + } + + _, bc, err := cid.CidFromBytes(msg) + if err != nil { + return xerrors.Errorf("getting cid from signing bytes: %w", err) + } + + if !cmsg.Cid().Equals(bc) { + return xerrors.Errorf("cid(meta.Extra).bytes() != msg") + } + + jb, err := json.MarshalIndent(&cmsg, "", " ") + if err != nil { + return xerrors.Errorf("json-marshaling the message: %w", err) + } + + fmt.Println("Message JSON:", string(jb)) + + fmt.Println("Value:", types.FIL(cmsg.Value)) + fmt.Println("Max Fees:", types.FIL(cmsg.RequiredFunds())) + fmt.Println("Max Total Cost:", types.FIL(big.Add(cmsg.RequiredFunds(), cmsg.Value))) + + if c.apiGetter != nil { + napi, closer, err := c.apiGetter() + if err != nil { + return xerrors.Errorf("getting node api: %w", err) + } + defer closer() + + toact, err := napi.StateGetActor(ctx, cmsg.To, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("looking up dest actor: %w", err) + } + + fmt.Println("Method:", stmgr.MethodsMap[toact.Code][cmsg.Method].Name) + p, err := lcli.JsonParams(toact.Code, cmsg.Method, cmsg.Params) + if err != nil { + return err + } + + fmt.Println("Params:", p) + + if builtin.IsMultisigActor(toact.Code) && cmsg.Method == multisig.Methods.Propose { + var mp multisig.ProposeParams + if err := mp.UnmarshalCBOR(bytes.NewReader(cmsg.Params)); err != nil { + return xerrors.Errorf("unmarshalling multisig propose params: %w", err) + } + + fmt.Println("\tMultiSig Proposal Value:", types.FIL(mp.Value)) + fmt.Println("\tMultiSig Proposal Hex Params:", hex.EncodeToString(mp.Params)) + + toact, err := napi.StateGetActor(ctx, mp.To, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("looking up msig dest actor: %w", err) + } + + fmt.Println("\tMultiSig Proposal Method:", stmgr.MethodsMap[toact.Code][mp.Method].Name) + p, err := lcli.JsonParams(toact.Code, mp.Method, mp.Params) + if err != nil { + return err + } + + fmt.Println("\tMultiSig Proposal Params:", strings.ReplaceAll(p, "\n", "\n\t")) + } + } else { + fmt.Println("Params: No chain node connection, can't decode params") + } + + case api.MTDealProposal: + return xerrors.Errorf("TODO") // TODO + default: + log.Infow("WalletSign", "address", k, "type", meta.Type) + } + + return nil + }) + if err != nil { + return nil, err + } + + return c.under.WalletSign(ctx, k, msg, meta) +} + +func (c *InteractiveWallet) WalletExport(ctx context.Context, a address.Address) (*types.KeyInfo, error) { + err := c.accept(func() error { + fmt.Println("-----") + fmt.Println("ACTION: WalletExport - Export private key") + fmt.Printf("ADDRESS: %s\n", a) + return nil + }) + if err != nil { + return nil, err + } + + return c.under.WalletExport(ctx, a) +} + +func (c *InteractiveWallet) WalletImport(ctx context.Context, ki *types.KeyInfo) (address.Address, error) { + err := c.accept(func() error { + fmt.Println("-----") + fmt.Println("ACTION: WalletImport - Import private key") + fmt.Printf("TYPE: %s\n", ki.Type) + return nil + }) + if err != nil { + return address.Undef, err + } + + return c.under.WalletImport(ctx, ki) +} + +func (c *InteractiveWallet) WalletDelete(ctx context.Context, addr address.Address) error { + err := c.accept(func() error { + fmt.Println("-----") + fmt.Println("ACTION: WalletDelete - Delete a private key") + fmt.Printf("ADDRESS: %s\n", addr) + return nil + }) + if err != nil { + return err + } + + return c.under.WalletDelete(ctx, addr) +} + +func (c *InteractiveWallet) accept(prompt func() error) error { + c.lk.Lock() + defer c.lk.Unlock() + + if err := prompt(); err != nil { + return err + } + + yes := randomYes() + for { + fmt.Printf("\nAccept the above? (%s/No): ", yes) + var a string + if _, err := fmt.Scanln(&a); err != nil { + return err + } + switch a { + case yes: + fmt.Println("approved") + return nil + case "No": + return xerrors.Errorf("action rejected") + } + + fmt.Printf("Type EXACTLY '%s' or 'No'\n", yes) + } +} + +var yeses = []string{ + "yes", + "Yes", + "YES", + "approve", + "Approve", + "accept", + "Accept", + "authorize", + "Authorize", + "confirm", + "Confirm", +} + +func randomYes() string { + i, err := rand.Int(rand.Reader, gobig.NewInt(int64(len(yeses)))) + if err != nil { + panic(err) + } + + return yeses[i.Int64()] +} diff --git a/cmd/lotus-wallet/logged.go b/cmd/lotus-wallet/logged.go index 272a8d10bcf..4f07d6ae46e 100644 --- a/cmd/lotus-wallet/logged.go +++ b/cmd/lotus-wallet/logged.go @@ -16,7 +16,7 @@ import ( ) type LoggedWallet struct { - under api.WalletAPI + under api.Wallet } func (c *LoggedWallet) WalletNew(ctx context.Context, typ types.KeyType) (address.Address, error) { diff --git a/cmd/lotus-wallet/main.go b/cmd/lotus-wallet/main.go index 25b89eb9d91..3e3aa1a585b 100644 --- a/cmd/lotus-wallet/main.go +++ b/cmd/lotus-wallet/main.go @@ -2,25 +2,33 @@ package main import ( "context" + "fmt" "net" "net/http" "os" + "github.com/filecoin-project/lotus/api/v0api" + + "github.com/gbrlsnchs/jwt/v3" "github.com/gorilla/mux" logging "github.com/ipfs/go-log/v2" "github.com/urfave/cli/v2" "go.opencensus.io/stats/view" "go.opencensus.io/tag" + "golang.org/x/xerrors" "github.com/filecoin-project/go-jsonrpc" + "github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/wallet" ledgerwallet "github.com/filecoin-project/lotus/chain/wallet/ledger" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/lib/lotuslog" "github.com/filecoin-project/lotus/metrics" + "github.com/filecoin-project/lotus/node/modules" "github.com/filecoin-project/lotus/node/repo" ) @@ -28,23 +36,45 @@ var log = logging.Logger("main") const FlagWalletRepo = "wallet-repo" +type jwtPayload struct { + Allow []auth.Permission +} + func main() { lotuslog.SetupLogLevels() local := []*cli.Command{ runCmd, + getApiKeyCmd, } app := &cli.App{ Name: "lotus-wallet", Usage: "Basic external wallet", Version: build.UserVersion(), + Description: ` +lotus-wallet provides a remote wallet service for lotus. + +To configure your lotus node to use a remote wallet: +* Run 'lotus-wallet get-api-key' to generate API key +* Start lotus-wallet using 'lotus-wallet run' (see --help for additional flags) +* Edit lotus config (~/.lotus/config.toml) + * Find the '[Wallet]' section + * Set 'RemoteBackend' to '[api key]:http://[wallet ip]:[wallet port]' + (the default port is 1777) +* Start (or restart) the lotus daemon`, Flags: []cli.Flag{ &cli.StringFlag{ Name: FlagWalletRepo, EnvVars: []string{"WALLET_PATH"}, Value: "~/.lotuswallet", // TODO: Consider XDG_DATA_HOME }, + &cli.StringFlag{ + Name: "repo", + EnvVars: []string{"LOTUS_PATH"}, + Hidden: true, + Value: "~/.lotus", + }, }, Commands: local, @@ -57,6 +87,35 @@ func main() { } } +var getApiKeyCmd = &cli.Command{ + Name: "get-api-key", + Usage: "Generate API Key", + Action: func(cctx *cli.Context) error { + lr, ks, err := openRepo(cctx) + if err != nil { + return err + } + defer lr.Close() // nolint + + p := jwtPayload{ + Allow: []auth.Permission{api.PermAdmin}, + } + + authKey, err := modules.APISecret(ks, lr) + if err != nil { + return xerrors.Errorf("setting up api secret: %w", err) + } + + k, err := jwt.Sign(&p, (*jwt.HMACSHA)(authKey)) + if err != nil { + return xerrors.Errorf("jwt sign: %w", err) + } + + fmt.Println(string(k)) + return nil + }, +} + var runCmd = &cli.Command{ Name: "run", Usage: "Start lotus wallet", @@ -70,7 +129,21 @@ var runCmd = &cli.Command{ Name: "ledger", Usage: "use a ledger device instead of an on-disk wallet", }, + &cli.BoolFlag{ + Name: "interactive", + Usage: "prompt before performing actions (DO NOT USE FOR MINER WORKER ADDRESS)", + }, + &cli.BoolFlag{ + Name: "offline", + Usage: "don't query chain state in interactive mode", + }, + &cli.BoolFlag{ + Name: "disable-auth", + Usage: "(insecure) disable api auth", + Hidden: true, + }, }, + Description: "For setup instructions see 'lotus-wallet --help'", Action: func(cctx *cli.Context) error { log.Info("Starting lotus wallet") @@ -85,40 +158,20 @@ var runCmd = &cli.Command{ log.Fatalf("Cannot register the view: %v", err) } - repoPath := cctx.String(FlagWalletRepo) - r, err := repo.NewFS(repoPath) - if err != nil { - return err - } - - ok, err := r.Exists() - if err != nil { - return err - } - if !ok { - if err := r.Init(repo.Worker); err != nil { - return err - } - } - - lr, err := r.Lock(repo.Wallet) - if err != nil { - return err - } - - ks, err := lr.KeyStore() + lr, ks, err := openRepo(cctx) if err != nil { return err } + defer lr.Close() // nolint lw, err := wallet.NewWallet(ks) if err != nil { return err } - var w api.WalletAPI = lw + var w api.Wallet = lw if cctx.Bool("ledger") { - ds, err := lr.Datastore("/metadata") + ds, err := lr.Datastore(context.Background(), "/metadata") if err != nil { return err } @@ -134,19 +187,60 @@ var runCmd = &cli.Command{ log.Info("Setting up API endpoint at " + address) + if cctx.Bool("interactive") { + var ag func() (v0api.FullNode, jsonrpc.ClientCloser, error) + + if !cctx.Bool("offline") { + ag = func() (v0api.FullNode, jsonrpc.ClientCloser, error) { + return lcli.GetFullNodeAPI(cctx) + } + } + + w = &InteractiveWallet{ + under: w, + apiGetter: ag, + } + } else { + w = &LoggedWallet{under: w} + } + + rpcApi := metrics.MetricedWalletAPI(w) + if !cctx.Bool("disable-auth") { + rpcApi = api.PermissionedWalletAPI(rpcApi) + } + rpcServer := jsonrpc.NewServer() - rpcServer.Register("Filecoin", &LoggedWallet{under: metrics.MetricedWalletAPI(w)}) + rpcServer.Register("Filecoin", rpcApi) mux.Handle("/rpc/v0", rpcServer) mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof - /*ah := &auth.Handler{ - Verify: nodeApi.AuthVerify, - Next: mux.ServeHTTP, - }*/ + var handler http.Handler = mux + + if !cctx.Bool("disable-auth") { + authKey, err := modules.APISecret(ks, lr) + if err != nil { + return xerrors.Errorf("setting up api secret: %w", err) + } + + authVerify := func(ctx context.Context, token string) ([]auth.Permission, error) { + var payload jwtPayload + if _, err := jwt.Verify([]byte(token), (*jwt.HMACSHA)(authKey), &payload); err != nil { + return nil, xerrors.Errorf("JWT Verification failed: %w", err) + } + + return payload.Allow, nil + } + + log.Info("API auth enabled, use 'lotus-wallet get-api-key' to get API key") + handler = &auth.Handler{ + Verify: authVerify, + Next: mux.ServeHTTP, + } + } srv := &http.Server{ - Handler: mux, + Handler: handler, BaseContext: func(listener net.Listener) context.Context { ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-wallet")) return ctx @@ -170,3 +264,33 @@ var runCmd = &cli.Command{ return srv.Serve(nl) }, } + +func openRepo(cctx *cli.Context) (repo.LockedRepo, types.KeyStore, error) { + repoPath := cctx.String(FlagWalletRepo) + r, err := repo.NewFS(repoPath) + if err != nil { + return nil, nil, err + } + + ok, err := r.Exists() + if err != nil { + return nil, nil, err + } + if !ok { + if err := r.Init(repo.Worker); err != nil { + return nil, nil, err + } + } + + lr, err := r.Lock(repo.Wallet) + if err != nil { + return nil, nil, err + } + + ks, err := lr.KeyStore() + if err != nil { + return nil, nil, err + } + + return lr, ks, nil +} diff --git a/cmd/lotus/backup.go b/cmd/lotus/backup.go index aec0000c90d..d41e0c098bf 100644 --- a/cmd/lotus/backup.go +++ b/cmd/lotus/backup.go @@ -1,14 +1,122 @@ package main import ( + "context" + "os" + + dstore "github.com/ipfs/go-datastore" + "github.com/mitchellh/go-homedir" "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + "gopkg.in/cheggaaa/pb.v1" "github.com/filecoin-project/go-jsonrpc" + "github.com/filecoin-project/lotus/chain/store" lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/lib/backupds" + "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/repo" ) var backupCmd = lcli.BackupCmd("repo", repo.FullNode, func(cctx *cli.Context) (lcli.BackupAPI, jsonrpc.ClientCloser, error) { return lcli.GetFullNodeAPI(cctx) }) + +func restore(cctx *cli.Context, r repo.Repo) error { + bf, err := homedir.Expand(cctx.Path("restore")) + if err != nil { + return xerrors.Errorf("expand backup file path: %w", err) + } + + st, err := os.Stat(bf) + if err != nil { + return xerrors.Errorf("stat backup file (%s): %w", bf, err) + } + + f, err := os.Open(bf) + if err != nil { + return xerrors.Errorf("opening backup file: %w", err) + } + defer f.Close() // nolint:errcheck + + lr, err := r.Lock(repo.FullNode) + if err != nil { + return err + } + defer lr.Close() // nolint:errcheck + + if cctx.IsSet("restore-config") { + log.Info("Restoring config") + + cf, err := homedir.Expand(cctx.String("restore-config")) + if err != nil { + return xerrors.Errorf("expanding config path: %w", err) + } + + _, err = os.Stat(cf) + if err != nil { + return xerrors.Errorf("stat config file (%s): %w", cf, err) + } + + var cerr error + err = lr.SetConfig(func(raw interface{}) { + rcfg, ok := raw.(*config.FullNode) + if !ok { + cerr = xerrors.New("expected miner config") + return + } + + ff, err := config.FromFile(cf, rcfg) + if err != nil { + cerr = xerrors.Errorf("loading config: %w", err) + return + } + + *rcfg = *ff.(*config.FullNode) + }) + if cerr != nil { + return cerr + } + if err != nil { + return xerrors.Errorf("setting config: %w", err) + } + + } else { + log.Warn("--restore-config NOT SET, WILL USE DEFAULT VALUES") + } + + log.Info("Restoring metadata backup") + + mds, err := lr.Datastore(context.TODO(), "/metadata") + if err != nil { + return err + } + + bar := pb.New64(st.Size()) + br := bar.NewProxyReader(f) + bar.ShowTimeLeft = true + bar.ShowPercent = true + bar.ShowSpeed = true + bar.Units = pb.U_BYTES + + bar.Start() + err = backupds.RestoreInto(br, mds) + bar.Finish() + + if err != nil { + return xerrors.Errorf("restoring metadata: %w", err) + } + + log.Info("Resetting chainstore metadata") + + chainHead := dstore.NewKey("head") + if err := mds.Delete(chainHead); err != nil { + return xerrors.Errorf("clearing chain head: %w", err) + } + if err := store.FlushValidationCache(mds); err != nil { + return xerrors.Errorf("clearing chain validation cache: %w", err) + } + + return nil +} diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go index 4ff63be1182..0d5961aaea3 100644 --- a/cmd/lotus/daemon.go +++ b/cmd/lotus/daemon.go @@ -15,7 +15,9 @@ import ( "runtime/pprof" "strings" + "github.com/filecoin-project/go-jsonrpc" paramfetch "github.com/filecoin-project/go-paramfetch" + metricsprom "github.com/ipfs/go-metrics-prometheus" "github.com/mitchellh/go-homedir" "github.com/multiformats/go-multiaddr" "github.com/urfave/cli/v2" @@ -35,7 +37,6 @@ import ( lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/journal" - "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/lib/peermgr" "github.com/filecoin-project/lotus/lib/ulimit" "github.com/filecoin-project/lotus/metrics" @@ -136,6 +137,22 @@ var DaemonCmd = &cli.Command{ Name: "config", Usage: "specify path of config file to use", }, + // FIXME: This is not the correct place to put this configuration + // option. Ideally it would be part of `config.toml` but at the + // moment that only applies to the node configuration and not outside + // components like the RPC server. + &cli.IntFlag{ + Name: "api-max-req-size", + Usage: "maximum API request size accepted by the JSON RPC server", + }, + &cli.PathFlag{ + Name: "restore", + Usage: "restore from backup file", + }, + &cli.PathFlag{ + Name: "restore-config", + Usage: "config file to use when restoring from backup", + }, }, Action: func(cctx *cli.Context) error { isLite := cctx.Bool("lite") @@ -176,7 +193,20 @@ var DaemonCmd = &cli.Command{ return fmt.Errorf("unrecognized profile type: %q", profile) } - ctx, _ := tag.New(context.Background(), tag.Insert(metrics.Version, build.BuildVersion), tag.Insert(metrics.Commit, build.CurrentCommit)) + ctx, _ := tag.New(context.Background(), + tag.Insert(metrics.Version, build.BuildVersion), + tag.Insert(metrics.Commit, build.CurrentCommit), + tag.Insert(metrics.NodeType, "chain"), + ) + // Register all metric views + if err = view.Register( + metrics.ChainNodeViews..., + ); err != nil { + log.Fatalf("Cannot register the view: %v", err) + } + // Set the metric to one so it is published to the exporter + stats.Record(ctx, metrics.LotusInfo.M(1)) + { dir, err := homedir.Expand(cctx.String("repo")) if err != nil { @@ -195,12 +225,14 @@ var DaemonCmd = &cli.Command{ r.SetConfigPath(cctx.String("config")) } - if err := r.Init(repo.FullNode); err != nil && err != repo.ErrRepoExists { + err = r.Init(repo.FullNode) + if err != nil && err != repo.ErrRepoExists { return xerrors.Errorf("repo init error: %w", err) } + freshRepo := err != repo.ErrRepoExists if !isLite { - if err := paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), 0); err != nil { + if err := paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), build.SrsJSON(), 0); err != nil { return xerrors.Errorf("fetching proof parameters: %w", err) } } @@ -215,6 +247,15 @@ var DaemonCmd = &cli.Command{ genBytes = build.MaybeGenesis() } + if cctx.IsSet("restore") { + if !freshRepo { + return xerrors.Errorf("restoring from backup is only possible with a fresh repo!") + } + if err := restore(cctx, r); err != nil { + return xerrors.Errorf("restoring from backup: %w", err) + } + } + chainfile := cctx.String("import-chain") snapshot := cctx.String("import-snapshot") if chainfile != "" || snapshot != "" { @@ -227,7 +268,7 @@ var DaemonCmd = &cli.Command{ issnapshot = true } - if err := ImportChain(r, chainfile, issnapshot); err != nil { + if err := ImportChain(ctx, r, chainfile, issnapshot); err != nil { return err } if cctx.Bool("halt-after-import") { @@ -249,7 +290,7 @@ var DaemonCmd = &cli.Command{ shutdownChan := make(chan struct{}) - // If the daemon is started in "lite mode", provide a GatewayAPI + // If the daemon is started in "lite mode", provide a Gateway // for RPC calls liteModeDeps := node.Options() if isLite { @@ -259,18 +300,25 @@ var DaemonCmd = &cli.Command{ } defer closer() - liteModeDeps = node.Override(new(api.GatewayAPI), gapi) + liteModeDeps = node.Override(new(api.Gateway), gapi) } - var api api.FullNode + // some libraries like ipfs/go-ds-measure and ipfs/go-ipfs-blockstore + // use ipfs/go-metrics-interface. This injects a Prometheus exporter + // for those. Metrics are exported to the default registry. + if err := metricsprom.Inject(); err != nil { + log.Warnf("unable to inject prometheus ipfs/go-metrics exporter; some metrics will be unavailable; err: %s", err) + } + var api api.FullNode stop, err := node.New(ctx, node.FullAPI(&api, node.Lite(isLite)), + node.Base(), + node.Repo(r), + node.Override(new(dtypes.Bootstrapper), isBootstrapper), node.Override(new(dtypes.ShutdownChan), shutdownChan), - node.Online(), - node.Repo(r), genesis, liteModeDeps, @@ -299,23 +347,42 @@ var DaemonCmd = &cli.Command{ } } - // Register all metric views - if err = view.Register( - metrics.DefaultViews..., - ); err != nil { - log.Fatalf("Cannot register the view: %v", err) + endpoint, err := r.APIEndpoint() + if err != nil { + return xerrors.Errorf("getting api endpoint: %w", err) } - // Set the metric to one so it is published to the exporter - stats.Record(ctx, metrics.LotusInfo.M(1)) + // + // Instantiate JSON-RPC endpoint. + // ---- - endpoint, err := r.APIEndpoint() + // Populate JSON-RPC options. + serverOptions := make([]jsonrpc.ServerOption, 0) + if maxRequestSize := cctx.Int("api-max-req-size"); maxRequestSize != 0 { + serverOptions = append(serverOptions, jsonrpc.WithMaxRequestSize(int64(maxRequestSize))) + } + + // Instantiate the full node handler. + h, err := node.FullNodeHandler(api, true, serverOptions...) if err != nil { - return xerrors.Errorf("getting api endpoint: %w", err) + return fmt.Errorf("failed to instantiate rpc handler: %s", err) + } + + // Serve the RPC. + rpcStopper, err := node.ServeRPC(h, "lotus-daemon", endpoint) + if err != nil { + return fmt.Errorf("failed to start json-rpc endpoint: %s", err) } + // Monitor for shutdown. + finishCh := node.MonitorShutdown(shutdownChan, + node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper}, + node.ShutdownHandler{Component: "node", StopFunc: stop}, + ) + <-finishCh // fires when shutdown is complete. + // TODO: properly parse api endpoint (or make it a URL) - return serveRPC(api, stop, endpoint, shutdownChan) + return nil }, Subcommands: []*cli.Command{ daemonStopCmd, @@ -352,11 +419,11 @@ func importKey(ctx context.Context, api api.FullNode, f string) error { return err } - log.Info("successfully imported key for %s", addr) + log.Infof("successfully imported key for %s", addr) return nil } -func ImportChain(r repo.Repo, fname string, snapshot bool) (err error) { +func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) (err error) { var rd io.Reader var l int64 if strings.HasPrefix(fname, "http://") || strings.HasPrefix(fname, "https://") { @@ -367,7 +434,7 @@ func ImportChain(r repo.Repo, fname string, snapshot bool) (err error) { defer resp.Body.Close() //nolint:errcheck if resp.StatusCode != http.StatusOK { - return xerrors.Errorf("non-200 response: %d", resp.StatusCode) + return xerrors.Errorf("fetching chain CAR failed with non-200 response: %d", resp.StatusCode) } rd = resp.Body @@ -399,23 +466,23 @@ func ImportChain(r repo.Repo, fname string, snapshot bool) (err error) { } defer lr.Close() //nolint:errcheck - ds, err := lr.Datastore("/chain") + bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore) if err != nil { - return err + return xerrors.Errorf("failed to open blockstore: %w", err) } - mds, err := lr.Datastore("/metadata") + mds, err := lr.Datastore(context.TODO(), "/metadata") if err != nil { return err } - bs := blockstore.NewBlockstore(ds) - j, err := journal.OpenFSJournal(lr, journal.EnvDisabledEvents()) if err != nil { return xerrors.Errorf("failed to open journal: %w", err) } - cst := store.NewChainStore(bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), j) + + cst := store.NewChainStore(bs, bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), j) + defer cst.Close() //nolint:errcheck log.Infof("importing chain from %s...", fname) @@ -440,7 +507,7 @@ func ImportChain(r repo.Repo, fname string, snapshot bool) (err error) { return xerrors.Errorf("flushing validation cache failed: %w", err) } - gb, err := cst.GetTipsetByHeight(context.TODO(), 0, ts, true) + gb, err := cst.GetTipsetByHeight(ctx, 0, ts, true) if err != nil { return err } @@ -454,13 +521,13 @@ func ImportChain(r repo.Repo, fname string, snapshot bool) (err error) { if !snapshot { log.Infof("validating imported chain...") - if err := stm.ValidateChain(context.TODO(), ts); err != nil { + if err := stm.ValidateChain(ctx, ts); err != nil { return xerrors.Errorf("chain validation failed: %w", err) } } log.Infof("accepting %s as new head", ts.Cids()) - if err := cst.SetHead(ts); err != nil { + if err := cst.ForceHeadSilent(ctx, ts); err != nil { return err } diff --git a/cmd/lotus/main.go b/cmd/lotus/main.go index eb97045eeb1..63d01f89162 100644 --- a/cmd/lotus/main.go +++ b/cmd/lotus/main.go @@ -2,10 +2,14 @@ package main import ( "context" + "os" + logging "github.com/ipfs/go-log/v2" + "github.com/mattn/go-isatty" "github.com/urfave/cli/v2" "go.opencensus.io/trace" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/lib/lotuslog" @@ -13,10 +17,12 @@ import ( "github.com/filecoin-project/lotus/node/repo" ) +var log = logging.Logger("main") + var AdvanceBlockCmd *cli.Command func main() { - build.RunningNodeType = build.NodeFull + api.RunningNodeType = api.NodeFull lotuslog.SetupLogLevels() @@ -51,6 +57,8 @@ func main() { ctx, span := trace.StartSpan(context.Background(), "/cli") defer span.End() + interactiveDef := isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd()) + app := &cli.App{ Name: "lotus", Usage: "Filecoin decentralized storage network client", @@ -63,10 +71,20 @@ func main() { Hidden: true, Value: "~/.lotus", // TODO: Consider XDG_DATA_HOME }, + &cli.BoolFlag{ + Name: "interactive", + Usage: "setting to false will disable interactive functionality of commands", + Value: interactiveDef, + }, + &cli.BoolFlag{ + Name: "force-send", + Usage: "if true, will ignore pre-send checks", + }, }, Commands: append(local, lcli.Commands...), } + app.Setup() app.Metadata["traceContext"] = ctx app.Metadata["repoType"] = repo.FullNode diff --git a/cmd/lotus/rpc.go b/cmd/lotus/rpc.go deleted file mode 100644 index 4f68ac85a12..00000000000 --- a/cmd/lotus/rpc.go +++ /dev/null @@ -1,130 +0,0 @@ -package main - -import ( - "context" - "encoding/json" - "net" - "net/http" - _ "net/http/pprof" - "os" - "os/signal" - "syscall" - - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - "github.com/multiformats/go-multiaddr" - manet "github.com/multiformats/go-multiaddr/net" - "go.opencensus.io/tag" - "golang.org/x/xerrors" - - "contrib.go.opencensus.io/exporter/prometheus" - - "github.com/filecoin-project/go-jsonrpc" - "github.com/filecoin-project/go-jsonrpc/auth" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/apistruct" - "github.com/filecoin-project/lotus/metrics" - "github.com/filecoin-project/lotus/node" - "github.com/filecoin-project/lotus/node/impl" -) - -var log = logging.Logger("main") - -func serveRPC(a api.FullNode, stop node.StopFunc, addr multiaddr.Multiaddr, shutdownCh <-chan struct{}) error { - rpcServer := jsonrpc.NewServer() - rpcServer.Register("Filecoin", apistruct.PermissionedFullAPI(metrics.MetricedFullAPI(a))) - - ah := &auth.Handler{ - Verify: a.AuthVerify, - Next: rpcServer.ServeHTTP, - } - - http.Handle("/rpc/v0", ah) - - importAH := &auth.Handler{ - Verify: a.AuthVerify, - Next: handleImport(a.(*impl.FullNodeAPI)), - } - - http.Handle("/rest/v0/import", importAH) - - exporter, err := prometheus.NewExporter(prometheus.Options{ - Namespace: "lotus", - }) - if err != nil { - log.Fatalf("could not create the prometheus stats exporter: %v", err) - } - - http.Handle("/debug/metrics", exporter) - - lst, err := manet.Listen(addr) - if err != nil { - return xerrors.Errorf("could not listen: %w", err) - } - - srv := &http.Server{ - Handler: http.DefaultServeMux, - BaseContext: func(listener net.Listener) context.Context { - ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-daemon")) - return ctx - }, - } - - sigCh := make(chan os.Signal, 2) - shutdownDone := make(chan struct{}) - go func() { - select { - case sig := <-sigCh: - log.Warnw("received shutdown", "signal", sig) - case <-shutdownCh: - log.Warn("received shutdown") - } - - log.Warn("Shutting down...") - if err := srv.Shutdown(context.TODO()); err != nil { - log.Errorf("shutting down RPC server failed: %s", err) - } - if err := stop(context.TODO()); err != nil { - log.Errorf("graceful shutting down failed: %s", err) - } - log.Warn("Graceful shutdown successful") - _ = log.Sync() //nolint:errcheck - close(shutdownDone) - }() - signal.Notify(sigCh, syscall.SIGTERM, syscall.SIGINT) - - err = srv.Serve(manet.NetListener(lst)) - if err == http.ErrServerClosed { - <-shutdownDone - return nil - } - return err -} - -func handleImport(a *impl.FullNodeAPI) func(w http.ResponseWriter, r *http.Request) { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != "PUT" { - w.WriteHeader(404) - return - } - if !auth.HasPerm(r.Context(), nil, apistruct.PermWrite) { - w.WriteHeader(401) - _ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing write permission"}) - return - } - - c, err := a.ClientImportLocal(r.Context(), r.Body) - if err != nil { - w.WriteHeader(500) - _ = json.NewEncoder(w).Encode(struct{ Error string }{err.Error()}) - return - } - w.WriteHeader(200) - err = json.NewEncoder(w).Encode(struct{ Cid cid.Cid }{c}) - if err != nil { - log.Errorf("/rest/v0/import: Writing response failed: %+v", err) - return - } - } -} diff --git a/cmd/tvx/codenames.go b/cmd/tvx/codenames.go index b9f590914f1..f8da07e8d88 100644 --- a/cmd/tvx/codenames.go +++ b/cmd/tvx/codenames.go @@ -20,7 +20,7 @@ var ProtocolCodenames = []struct { {build.UpgradeSmokeHeight + 1, "smoke"}, {build.UpgradeIgnitionHeight + 1, "ignition"}, {build.UpgradeRefuelHeight + 1, "refuel"}, - {build.UpgradeActorsV2Height + 1, "actorsv2"}, + {build.UpgradeAssemblyHeight + 1, "actorsv2"}, {build.UpgradeTapeHeight + 1, "tape"}, {build.UpgradeLiftoffHeight + 1, "liftoff"}, {build.UpgradeKumquatHeight + 1, "postliftoff"}, diff --git a/cmd/tvx/codenames_test.go b/cmd/tvx/codenames_test.go index 00d1077072d..e7136d6ccc8 100644 --- a/cmd/tvx/codenames_test.go +++ b/cmd/tvx/codenames_test.go @@ -18,7 +18,7 @@ func TestProtocolCodenames(t *testing.T) { t.Fatal("expected breeze codename") } - if height := build.UpgradeActorsV2Height + 1; GetProtocolCodename(height) != "actorsv2" { + if height := build.UpgradeAssemblyHeight + 1; GetProtocolCodename(abi.ChainEpoch(height)) != "actorsv2" { t.Fatal("expected actorsv2 codename") } diff --git a/cmd/tvx/exec.go b/cmd/tvx/exec.go index 89ad2391351..15bb543a50e 100644 --- a/cmd/tvx/exec.go +++ b/cmd/tvx/exec.go @@ -1,63 +1,169 @@ package main import ( + "bufio" "encoding/json" "fmt" "io" "log" "os" + "path/filepath" + "strings" "github.com/fatih/color" + "github.com/filecoin-project/go-address" + cbornode "github.com/ipfs/go-ipld-cbor" "github.com/urfave/cli/v2" - "github.com/filecoin-project/lotus/conformance" - "github.com/filecoin-project/test-vectors/schema" + + "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/state" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/conformance" ) var execFlags struct { - file string + file string + out string + driverOpts cli.StringSlice + fallbackBlockstore bool } +const ( + optSaveBalances = "save-balances" +) + var execCmd = &cli.Command{ Name: "exec", - Description: "execute one or many test vectors against Lotus; supplied as a single JSON file, or a ndjson stdin stream", - Action: runExecLotus, + Description: "execute one or many test vectors against Lotus; supplied as a single JSON file, a directory, or a ndjson stdin stream", + Action: runExec, Flags: []cli.Flag{ + &repoFlag, &cli.StringFlag{ Name: "file", - Usage: "input file; if not supplied, the vector will be read from stdin", + Usage: "input file or directory; if not supplied, the vector will be read from stdin", TakesFile: true, Destination: &execFlags.file, }, + &cli.BoolFlag{ + Name: "fallback-blockstore", + Usage: "sets the full node API as a fallback blockstore; use this if you're transplanting vectors and get block not found errors", + Destination: &execFlags.fallbackBlockstore, + }, + &cli.StringFlag{ + Name: "out", + Usage: "output directory where to save the results, only used when the input is a directory", + Destination: &execFlags.out, + }, + &cli.StringSliceFlag{ + Name: "driver-opt", + Usage: "comma-separated list of driver options (EXPERIMENTAL; will change), supported: 'save-balances=', 'pipeline-basefee' (unimplemented); only available in single-file mode", + Destination: &execFlags.driverOpts, + }, }, } -func runExecLotus(_ *cli.Context) error { - if file := execFlags.file; file != "" { - // we have a single test vector supplied as a file. - file, err := os.Open(file) - if err != nil { - return fmt.Errorf("failed to open test vector: %w", err) +func runExec(c *cli.Context) error { + if execFlags.fallbackBlockstore { + if err := initialize(c); err != nil { + return fmt.Errorf("fallback blockstore was enabled, but could not resolve lotus API endpoint: %w", err) } + defer destroy(c) //nolint:errcheck + conformance.FallbackBlockstoreGetter = FullAPI + } - var ( - dec = json.NewDecoder(file) - tv schema.TestVector - ) + path := execFlags.file + if path == "" { + return execVectorsStdin() + } + + fi, err := os.Stat(path) + if err != nil { + return err + } - if err = dec.Decode(&tv); err != nil { - return fmt.Errorf("failed to decode test vector: %w", err) + if fi.IsDir() { + // we're in directory mode; ensure the out directory exists. + outdir := execFlags.out + if outdir == "" { + return fmt.Errorf("no output directory provided") } + if err := ensureDir(outdir); err != nil { + return err + } + return execVectorDir(path, outdir) + } - return executeTestVector(tv) + // process tipset vector options. + if err := processTipsetOpts(); err != nil { + return err } + _, err = execVectorFile(new(conformance.LogReporter), path) + return err +} + +func processTipsetOpts() error { + for _, opt := range execFlags.driverOpts.Value() { + switch ss := strings.Split(opt, "="); { + case ss[0] == optSaveBalances: + filename := ss[1] + log.Printf("saving balances after each tipset in: %s", filename) + balancesFile, err := os.Create(filename) + if err != nil { + return err + } + w := bufio.NewWriter(balancesFile) + cb := func(bs blockstore.Blockstore, params *conformance.ExecuteTipsetParams, res *conformance.ExecuteTipsetResult) { + cst := cbornode.NewCborStore(bs) + st, err := state.LoadStateTree(cst, res.PostStateRoot) + if err != nil { + return + } + _ = st.ForEach(func(addr address.Address, actor *types.Actor) error { + _, err := fmt.Fprintln(w, params.ExecEpoch, addr, actor.Balance) + return err + }) + _ = w.Flush() + } + conformance.TipsetVectorOpts.OnTipsetApplied = append(conformance.TipsetVectorOpts.OnTipsetApplied, cb) + + } + + } + return nil +} + +func execVectorDir(path string, outdir string) error { + files, err := filepath.Glob(filepath.Join(path, "*")) + if err != nil { + return fmt.Errorf("failed to glob input directory %s: %w", path, err) + } + for _, f := range files { + outfile := strings.TrimSuffix(filepath.Base(f), filepath.Ext(f)) + ".out" + outpath := filepath.Join(outdir, outfile) + outw, err := os.Create(outpath) + if err != nil { + return fmt.Errorf("failed to create file %s: %w", outpath, err) + } + + log.Printf("processing vector %s; sending output to %s", f, outpath) + log.SetOutput(io.MultiWriter(os.Stderr, outw)) // tee the output. + _, _ = execVectorFile(new(conformance.LogReporter), f) + log.SetOutput(os.Stderr) + _ = outw.Close() + } + return nil +} + +func execVectorsStdin() error { + r := new(conformance.LogReporter) for dec := json.NewDecoder(os.Stdin); ; { var tv schema.TestVector switch err := dec.Decode(&tv); err { case nil: - if err = executeTestVector(tv); err != nil { + if _, err = executeTestVector(r, tv); err != nil { return err } case io.EOF: @@ -70,19 +176,30 @@ func runExecLotus(_ *cli.Context) error { } } -func executeTestVector(tv schema.TestVector) error { +func execVectorFile(r conformance.Reporter, path string) (diffs []string, error error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open test vector: %w", err) + } + + var tv schema.TestVector + if err = json.NewDecoder(file).Decode(&tv); err != nil { + return nil, fmt.Errorf("failed to decode test vector: %w", err) + } + return executeTestVector(r, tv) +} + +func executeTestVector(r conformance.Reporter, tv schema.TestVector) (diffs []string, err error) { log.Println("executing test vector:", tv.Meta.ID) for _, v := range tv.Pre.Variants { - r := new(conformance.LogReporter) - switch class, v := tv.Class, v; class { case "message": - conformance.ExecuteMessageVector(r, &tv, &v) + diffs, err = conformance.ExecuteMessageVector(r, &tv, &v) case "tipset": - conformance.ExecuteTipsetVector(r, &tv, &v) + diffs, err = conformance.ExecuteTipsetVector(r, &tv, &v) default: - return fmt.Errorf("test vector class %s not supported", class) + return nil, fmt.Errorf("test vector class %s not supported", class) } if r.Failed() { @@ -92,5 +209,5 @@ func executeTestVector(tv schema.TestVector) error { } } - return nil + return diffs, err } diff --git a/cmd/tvx/extract.go b/cmd/tvx/extract.go index 3dfec37d883..a3d538abd02 100644 --- a/cmd/tvx/extract.go +++ b/cmd/tvx/extract.go @@ -1,9 +1,6 @@ package main import ( - "bytes" - "compress/gzip" - "context" "encoding/json" "fmt" "io" @@ -11,19 +8,7 @@ import ( "os" "path/filepath" - "github.com/fatih/color" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors/builtin" - init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" - "github.com/filecoin-project/lotus/chain/actors/builtin/reward" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/conformance" - "github.com/filecoin-project/test-vectors/schema" - - "github.com/ipfs/go-cid" "github.com/urfave/cli/v2" ) @@ -37,10 +22,12 @@ type extractOpts struct { block string class string cid string + tsk string file string retain string precursor string ignoreSanityChecks bool + squash bool } var extractFlags extractOpts @@ -55,7 +42,7 @@ var extractCmd = &cli.Command{ &repoFlag, &cli.StringFlag{ Name: "class", - Usage: "class of vector to extract; other required flags depend on the; values: 'message'", + Usage: "class of vector to extract; values: 'message', 'tipset'", Value: "message", Destination: &extractFlags.class, }, @@ -70,16 +57,25 @@ var extractCmd = &cli.Command{ Usage: "optionally, the block CID the message was included in, to avoid expensive chain scanning", Destination: &extractFlags.block, }, + &cli.StringFlag{ + Name: "exec-block", + Usage: "optionally, the block CID of a block where this message was executed, to avoid expensive chain scanning", + Destination: &extractFlags.block, + }, &cli.StringFlag{ Name: "cid", Usage: "message CID to generate test vector from", - Required: true, Destination: &extractFlags.cid, }, + &cli.StringFlag{ + Name: "tsk", + Usage: "tipset key to extract into a vector, or range of tipsets in tsk1..tsk2 form", + Destination: &extractFlags.tsk, + }, &cli.StringFlag{ Name: "out", Aliases: []string{"o"}, - Usage: "file to write test vector to", + Usage: "file to write test vector to, or directory to write the batch to", Destination: &extractFlags.file, }, &cli.StringFlag{ @@ -104,303 +100,29 @@ var extractCmd = &cli.Command{ Value: false, Destination: &extractFlags.ignoreSanityChecks, }, + &cli.BoolFlag{ + Name: "squash", + Usage: "when extracting a tipset range, squash all tipsets into a single vector", + Value: false, + Destination: &extractFlags.squash, + }, }, } func runExtract(_ *cli.Context) error { - return doExtract(extractFlags) -} - -func doExtract(opts extractOpts) error { - ctx := context.Background() - - mcid, err := cid.Decode(opts.cid) - if err != nil { - return err - } - - msg, execTs, incTs, err := resolveFromChain(ctx, FullAPI, mcid, opts.block) - if err != nil { - return fmt.Errorf("failed to resolve message and tipsets from chain: %w", err) - } - - // get the circulating supply before the message was executed. - circSupplyDetail, err := FullAPI.StateVMCirculatingSupplyInternal(ctx, incTs.Key()) - if err != nil { - return fmt.Errorf("failed while fetching circulating supply: %w", err) - } - - circSupply := circSupplyDetail.FilCirculating - - log.Printf("message was executed in tipset: %s", execTs.Key()) - log.Printf("message was included in tipset: %s", incTs.Key()) - log.Printf("circulating supply at inclusion tipset: %d", circSupply) - log.Printf("finding precursor messages using mode: %s", opts.precursor) - - // Fetch messages in canonical order from inclusion tipset. - msgs, err := FullAPI.ChainGetParentMessages(ctx, execTs.Blocks()[0].Cid()) - if err != nil { - return fmt.Errorf("failed to fetch messages in canonical order from inclusion tipset: %w", err) - } - - related, found, err := findMsgAndPrecursors(opts.precursor, msg, msgs) - if err != nil { - return fmt.Errorf("failed while finding message and precursors: %w", err) - } - - if !found { - return fmt.Errorf("message not found; precursors found: %d", len(related)) - } - - var ( - precursors = related[:len(related)-1] - precursorsCids []cid.Cid - ) - - for _, p := range precursors { - precursorsCids = append(precursorsCids, p.Cid()) - } - - log.Println(color.GreenString("found message; precursors (count: %d): %v", len(precursors), precursorsCids)) - - var ( - // create a read-through store that uses ChainGetObject to fetch unknown CIDs. - pst = NewProxyingStores(ctx, FullAPI) - g = NewSurgeon(ctx, FullAPI, pst) - ) - - driver := conformance.NewDriver(ctx, schema.Selector{}, conformance.DriverOpts{ - DisableVMFlush: true, - }) - - // this is the root of the state tree we start with. - root := incTs.ParentState() - log.Printf("base state tree root CID: %s", root) - - basefee := incTs.Blocks()[0].ParentBaseFee - log.Printf("basefee: %s", basefee) - - // on top of that state tree, we apply all precursors. - log.Printf("number of precursors to apply: %d", len(precursors)) - for i, m := range precursors { - log.Printf("applying precursor %d, cid: %s", i, m.Cid()) - _, root, err = driver.ExecuteMessage(pst.Blockstore, conformance.ExecuteMessageParams{ - Preroot: root, - Epoch: execTs.Height(), - Message: m, - CircSupply: circSupplyDetail.FilCirculating, - BaseFee: basefee, - // recorded randomness will be discarded. - Rand: conformance.NewRecordingRand(new(conformance.LogReporter), FullAPI), - }) - if err != nil { - return fmt.Errorf("failed to execute precursor message: %w", err) - } - } - - var ( - preroot cid.Cid - postroot cid.Cid - applyret *vm.ApplyRet - carWriter func(w io.Writer) error - retention = opts.retain - - // recordingRand will record randomness so we can embed it in the test vector. - recordingRand = conformance.NewRecordingRand(new(conformance.LogReporter), FullAPI) - ) - - log.Printf("using state retention strategy: %s", retention) - switch retention { - case "accessed-cids": - tbs, ok := pst.Blockstore.(TracingBlockstore) - if !ok { - return fmt.Errorf("requested 'accessed-cids' state retention, but no tracing blockstore was present") - } - - tbs.StartTracing() - - preroot = root - applyret, postroot, err = driver.ExecuteMessage(pst.Blockstore, conformance.ExecuteMessageParams{ - Preroot: preroot, - Epoch: execTs.Height(), - Message: msg, - CircSupply: circSupplyDetail.FilCirculating, - BaseFee: basefee, - Rand: recordingRand, - }) - if err != nil { - return fmt.Errorf("failed to execute message: %w", err) - } - accessed := tbs.FinishTracing() - carWriter = func(w io.Writer) error { - return g.WriteCARIncluding(w, accessed, preroot, postroot) - } - - case "accessed-actors": - log.Printf("calculating accessed actors") - // get actors accessed by message. - retain, err := g.GetAccessedActors(ctx, FullAPI, mcid) - if err != nil { - return fmt.Errorf("failed to calculate accessed actors: %w", err) - } - // also append the reward actor and the burnt funds actor. - retain = append(retain, reward.Address, builtin.BurntFundsActorAddr, init_.Address) - log.Printf("calculated accessed actors: %v", retain) - - // get the masked state tree from the root, - preroot, err = g.GetMaskedStateTree(root, retain) - if err != nil { - return err - } - applyret, postroot, err = driver.ExecuteMessage(pst.Blockstore, conformance.ExecuteMessageParams{ - Preroot: preroot, - Epoch: execTs.Height(), - Message: msg, - CircSupply: circSupplyDetail.FilCirculating, - BaseFee: basefee, - Rand: recordingRand, - }) - if err != nil { - return fmt.Errorf("failed to execute message: %w", err) - } - carWriter = func(w io.Writer) error { - return g.WriteCAR(w, preroot, postroot) - } - + switch extractFlags.class { + case "message": + return doExtractMessage(extractFlags) + case "tipset": + return doExtractTipset(extractFlags) default: - return fmt.Errorf("unknown state retention option: %s", retention) - } - - log.Printf("message applied; preroot: %s, postroot: %s", preroot, postroot) - log.Println("performing sanity check on receipt") - - // TODO sometimes this returns a nil receipt and no error ¯\_(ツ)_/¯ - // ex: https://filfox.info/en/message/bafy2bzacebpxw3yiaxzy2bako62akig46x3imji7fewszen6fryiz6nymu2b2 - // This code is lenient and skips receipt comparison in case of a nil receipt. - rec, err := FullAPI.StateGetReceipt(ctx, mcid, execTs.Key()) - if err != nil { - return fmt.Errorf("failed to find receipt on chain: %w", err) + return fmt.Errorf("unsupported vector class") } - log.Printf("found receipt: %+v", rec) - - // generate the schema receipt; if we got - var receipt *schema.Receipt - if rec != nil { - receipt = &schema.Receipt{ - ExitCode: int64(rec.ExitCode), - ReturnValue: rec.Return, - GasUsed: rec.GasUsed, - } - - reporter := new(conformance.LogReporter) - conformance.AssertMsgResult(reporter, receipt, applyret, "as locally executed") - if reporter.Failed() { - if opts.ignoreSanityChecks { - log.Println(color.YellowString("receipt sanity check failed; proceeding anyway")) - } else { - log.Println(color.RedString("receipt sanity check failed; aborting")) - return fmt.Errorf("vector generation aborted") - } - } else { - log.Println(color.GreenString("receipt sanity check succeeded")) - } - - } else { - receipt = &schema.Receipt{ - ExitCode: int64(applyret.ExitCode), - ReturnValue: applyret.Return, - GasUsed: applyret.GasUsed, - } - log.Println(color.YellowString("skipping receipts comparison; we got back a nil receipt from lotus")) - } - - log.Println("generating vector") - msgBytes, err := msg.Serialize() - if err != nil { - return err - } - - var ( - out = new(bytes.Buffer) - gw = gzip.NewWriter(out) - ) - if err := carWriter(gw); err != nil { - return err - } - if err = gw.Flush(); err != nil { - return err - } - if err = gw.Close(); err != nil { - return err - } - - version, err := FullAPI.Version(ctx) - if err != nil { - return err - } - - ntwkName, err := FullAPI.StateNetworkName(ctx) - if err != nil { - return err - } - - nv, err := FullAPI.StateNetworkVersion(ctx, execTs.Key()) - if err != nil { - return err - } - - codename := GetProtocolCodename(execTs.Height()) - - // Write out the test vector. - vector := schema.TestVector{ - Class: schema.ClassMessage, - Meta: &schema.Metadata{ - ID: opts.id, - // TODO need to replace schema.GenerationData with a more flexible - // data structure that makes no assumption about the traceability - // data that's being recorded; a flexible map[string]string - // would do. - Gen: []schema.GenerationData{ - {Source: fmt.Sprintf("network:%s", ntwkName)}, - {Source: fmt.Sprintf("message:%s", msg.Cid().String())}, - {Source: fmt.Sprintf("inclusion_tipset:%s", incTs.Key().String())}, - {Source: fmt.Sprintf("execution_tipset:%s", execTs.Key().String())}, - {Source: "github.com/filecoin-project/lotus", Version: version.String()}}, - }, - Selector: schema.Selector{ - schema.SelectorMinProtocolVersion: codename, - }, - Randomness: recordingRand.Recorded(), - CAR: out.Bytes(), - Pre: &schema.Preconditions{ - Variants: []schema.Variant{ - {ID: codename, Epoch: int64(execTs.Height()), NetworkVersion: uint(nv)}, - }, - CircSupply: circSupply.Int, - BaseFee: basefee.Int, - StateTree: &schema.StateTree{ - RootCID: preroot, - }, - }, - ApplyMessages: []schema.Message{{Bytes: msgBytes}}, - Post: &schema.Postconditions{ - StateTree: &schema.StateTree{ - RootCID: postroot, - }, - Receipts: []*schema.Receipt{ - { - ExitCode: int64(applyret.ExitCode), - ReturnValue: applyret.Return, - GasUsed: applyret.GasUsed, - }, - }, - }, - } - - return writeVector(vector, opts.file) } -func writeVector(vector schema.TestVector, file string) (err error) { +// writeVector writes the vector into the specified file, or to stdout if +// file is empty. +func writeVector(vector *schema.TestVector, file string) (err error) { output := io.WriteCloser(os.Stdout) if file := file; file != "" { dir := filepath.Dir(file) @@ -420,101 +142,20 @@ func writeVector(vector schema.TestVector, file string) (err error) { return enc.Encode(&vector) } -// resolveFromChain queries the chain for the provided message, using the block CID to -// speed up the query, if provided -func resolveFromChain(ctx context.Context, api api.FullNode, mcid cid.Cid, block string) (msg *types.Message, execTs *types.TipSet, incTs *types.TipSet, err error) { - // Extract the full message. - msg, err = api.ChainGetMessage(ctx, mcid) - if err != nil { - return nil, nil, nil, err - } - - log.Printf("found message with CID %s: %+v", mcid, msg) - - if block == "" { - log.Printf("locating message in blockchain") - - // Locate the message. - msgInfo, err := api.StateSearchMsg(ctx, mcid) - if err != nil { - return nil, nil, nil, fmt.Errorf("failed to locate message: %w", err) - } - - log.Printf("located message at tipset %s (height: %d) with exit code: %s", msgInfo.TipSet, msgInfo.Height, msgInfo.Receipt.ExitCode) - - execTs, incTs, err = fetchThisAndPrevTipset(ctx, api, msgInfo.TipSet) - return msg, execTs, incTs, err - } - - bcid, err := cid.Decode(block) - if err != nil { - return nil, nil, nil, err - } - - log.Printf("message inclusion block CID was provided; scanning around it: %s", bcid) - - blk, err := api.ChainGetBlock(ctx, bcid) - if err != nil { - return nil, nil, nil, fmt.Errorf("failed to get block: %w", err) - } - - // types.EmptyTSK hints to use the HEAD. - execTs, err = api.ChainGetTipSetByHeight(ctx, blk.Height+1, types.EmptyTSK) - if err != nil { - return nil, nil, nil, fmt.Errorf("failed to get message execution tipset: %w", err) - } - - // walk back from the execTs instead of HEAD, to save time. - incTs, err = api.ChainGetTipSetByHeight(ctx, blk.Height, execTs.Key()) - if err != nil { - return nil, nil, nil, fmt.Errorf("failed to get message inclusion tipset: %w", err) - } - - return msg, execTs, incTs, nil -} - -// fetchThisAndPrevTipset returns the full tipset identified by the key, as well -// as the previous tipset. In the context of vector generation, the target -// tipset is the one where a message was executed, and the previous tipset is -// the one where the message was included. -func fetchThisAndPrevTipset(ctx context.Context, api api.FullNode, target types.TipSetKey) (targetTs *types.TipSet, prevTs *types.TipSet, err error) { - // get the tipset on which this message was "executed" on. - // https://github.com/filecoin-project/lotus/issues/2847 - targetTs, err = api.ChainGetTipSet(ctx, target) - if err != nil { - return nil, nil, err - } - // get the previous tipset, on which this message was mined, - // i.e. included on-chain. - prevTs, err = api.ChainGetTipSet(ctx, targetTs.Parents()) - if err != nil { - return nil, nil, err +// writeVectors writes each vector to a different file under the specified +// directory. +func writeVectors(dir string, vectors ...*schema.TestVector) error { + // verify the output directory exists. + if err := ensureDir(dir); err != nil { + return err } - return targetTs, prevTs, nil -} - -// findMsgAndPrecursors ranges through the canonical messages slice, locating -// the target message and returning precursors in accordance to the supplied -// mode. -func findMsgAndPrecursors(mode string, target *types.Message, msgs []api.Message) (related []*types.Message, found bool, err error) { - // Range through canonicalised messages, selecting only the precursors based - // on selection mode. - for _, other := range msgs { - switch { - case mode == PrecursorSelectAll: - fallthrough - case mode == PrecursorSelectSender && other.Message.From == target.From: - related = append(related, other.Message) - } - - // this message is the target; we're done. - if other.Cid == target.Cid() { - return related, true, nil + // write each vector to its file. + for _, v := range vectors { + id := v.Meta.ID + path := filepath.Join(dir, fmt.Sprintf("%s.json", id)) + if err := writeVector(v, path); err != nil { + return err } } - - // this could happen because a block contained related messages, but not - // the target (that is, messages with a lower nonce, but ultimately not the - // target). - return related, false, nil + return nil } diff --git a/cmd/tvx/extract_many.go b/cmd/tvx/extract_many.go index 048271456e3..081678a1726 100644 --- a/cmd/tvx/extract_many.go +++ b/cmd/tvx/extract_many.go @@ -189,7 +189,7 @@ func runExtractMany(c *cli.Context) error { precursor: PrecursorSelectSender, } - if err := doExtract(opts); err != nil { + if err := doExtractMessage(opts); err != nil { log.Println(color.RedString("failed to extract vector for message %s: %s; queuing for 'all' precursor selection", mcid, err)) retry = append(retry, opts) continue @@ -206,7 +206,7 @@ func runExtractMany(c *cli.Context) error { log.Printf("retrying %s: %s", r.cid, r.id) r.precursor = PrecursorSelectAll - if err := doExtract(r); err != nil { + if err := doExtractMessage(r); err != nil { merr = multierror.Append(merr, fmt.Errorf("failed to extract vector for message %s: %w", r.cid, err)) continue } diff --git a/cmd/tvx/extract_message.go b/cmd/tvx/extract_message.go new file mode 100644 index 00000000000..71035867f29 --- /dev/null +++ b/cmd/tvx/extract_message.go @@ -0,0 +1,421 @@ +package main + +import ( + "bytes" + "compress/gzip" + "context" + "fmt" + "io" + "log" + + "github.com/filecoin-project/lotus/api/v0api" + + "github.com/fatih/color" + "github.com/filecoin-project/go-address" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/actors/builtin/reward" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/conformance" + + "github.com/filecoin-project/test-vectors/schema" + + "github.com/ipfs/go-cid" +) + +func doExtractMessage(opts extractOpts) error { + ctx := context.Background() + + if opts.cid == "" { + return fmt.Errorf("missing message CID") + } + + mcid, err := cid.Decode(opts.cid) + if err != nil { + return err + } + + msg, execTs, incTs, err := resolveFromChain(ctx, FullAPI, mcid, opts.block) + if err != nil { + return fmt.Errorf("failed to resolve message and tipsets from chain: %w", err) + } + + // get the circulating supply before the message was executed. + circSupplyDetail, err := FullAPI.StateVMCirculatingSupplyInternal(ctx, incTs.Key()) + if err != nil { + return fmt.Errorf("failed while fetching circulating supply: %w", err) + } + + circSupply := circSupplyDetail.FilCirculating + + log.Printf("message was executed in tipset: %s", execTs.Key()) + log.Printf("message was included in tipset: %s", incTs.Key()) + log.Printf("circulating supply at inclusion tipset: %d", circSupply) + log.Printf("finding precursor messages using mode: %s", opts.precursor) + + // Fetch messages in canonical order from inclusion tipset. + msgs, err := FullAPI.ChainGetParentMessages(ctx, execTs.Blocks()[0].Cid()) + if err != nil { + return fmt.Errorf("failed to fetch messages in canonical order from inclusion tipset: %w", err) + } + + related, found, err := findMsgAndPrecursors(opts.precursor, mcid, msg.From, msgs) + if err != nil { + return fmt.Errorf("failed while finding message and precursors: %w", err) + } + + if !found { + return fmt.Errorf("message not found; precursors found: %d", len(related)) + } + + var ( + precursors = related[:len(related)-1] + precursorsCids []cid.Cid + ) + + for _, p := range precursors { + precursorsCids = append(precursorsCids, p.Cid()) + } + + log.Println(color.GreenString("found message; precursors (count: %d): %v", len(precursors), precursorsCids)) + + var ( + // create a read-through store that uses ChainGetObject to fetch unknown CIDs. + pst = NewProxyingStores(ctx, FullAPI) + g = NewSurgeon(ctx, FullAPI, pst) + ) + + driver := conformance.NewDriver(ctx, schema.Selector{}, conformance.DriverOpts{ + DisableVMFlush: true, + }) + + // this is the root of the state tree we start with. + root := incTs.ParentState() + log.Printf("base state tree root CID: %s", root) + + basefee := incTs.Blocks()[0].ParentBaseFee + log.Printf("basefee: %s", basefee) + + // on top of that state tree, we apply all precursors. + log.Printf("number of precursors to apply: %d", len(precursors)) + for i, m := range precursors { + log.Printf("applying precursor %d, cid: %s", i, m.Cid()) + _, root, err = driver.ExecuteMessage(pst.Blockstore, conformance.ExecuteMessageParams{ + Preroot: root, + Epoch: execTs.Height(), + Message: m, + CircSupply: circSupplyDetail.FilCirculating, + BaseFee: basefee, + // recorded randomness will be discarded. + Rand: conformance.NewRecordingRand(new(conformance.LogReporter), FullAPI), + }) + if err != nil { + return fmt.Errorf("failed to execute precursor message: %w", err) + } + } + + var ( + preroot cid.Cid + postroot cid.Cid + applyret *vm.ApplyRet + carWriter func(w io.Writer) error + retention = opts.retain + + // recordingRand will record randomness so we can embed it in the test vector. + recordingRand = conformance.NewRecordingRand(new(conformance.LogReporter), FullAPI) + ) + + log.Printf("using state retention strategy: %s", retention) + switch retention { + case "accessed-cids": + tbs, ok := pst.Blockstore.(TracingBlockstore) + if !ok { + return fmt.Errorf("requested 'accessed-cids' state retention, but no tracing blockstore was present") + } + + tbs.StartTracing() + + preroot = root + applyret, postroot, err = driver.ExecuteMessage(pst.Blockstore, conformance.ExecuteMessageParams{ + Preroot: preroot, + Epoch: execTs.Height(), + Message: msg, + CircSupply: circSupplyDetail.FilCirculating, + BaseFee: basefee, + Rand: recordingRand, + }) + if err != nil { + return fmt.Errorf("failed to execute message: %w", err) + } + accessed := tbs.FinishTracing() + carWriter = func(w io.Writer) error { + return g.WriteCARIncluding(w, accessed, preroot, postroot) + } + + case "accessed-actors": + log.Printf("calculating accessed actors") + // get actors accessed by message. + retain, err := g.GetAccessedActors(ctx, FullAPI, mcid) + if err != nil { + return fmt.Errorf("failed to calculate accessed actors: %w", err) + } + // also append the reward actor and the burnt funds actor. + retain = append(retain, reward.Address, builtin.BurntFundsActorAddr, init_.Address) + log.Printf("calculated accessed actors: %v", retain) + + // get the masked state tree from the root, + preroot, err = g.GetMaskedStateTree(root, retain) + if err != nil { + return err + } + applyret, postroot, err = driver.ExecuteMessage(pst.Blockstore, conformance.ExecuteMessageParams{ + Preroot: preroot, + Epoch: execTs.Height(), + Message: msg, + CircSupply: circSupplyDetail.FilCirculating, + BaseFee: basefee, + Rand: recordingRand, + }) + if err != nil { + return fmt.Errorf("failed to execute message: %w", err) + } + carWriter = func(w io.Writer) error { + return g.WriteCAR(w, preroot, postroot) + } + + default: + return fmt.Errorf("unknown state retention option: %s", retention) + } + + log.Printf("message applied; preroot: %s, postroot: %s", preroot, postroot) + log.Println("performing sanity check on receipt") + + // TODO sometimes this returns a nil receipt and no error ¯\_(ツ)_/¯ + // ex: https://filfox.info/en/message/bafy2bzacebpxw3yiaxzy2bako62akig46x3imji7fewszen6fryiz6nymu2b2 + // This code is lenient and skips receipt comparison in case of a nil receipt. + rec, err := FullAPI.StateGetReceipt(ctx, mcid, execTs.Key()) + if err != nil { + return fmt.Errorf("failed to find receipt on chain: %w", err) + } + log.Printf("found receipt: %+v", rec) + + // generate the schema receipt; if we got + var receipt *schema.Receipt + if rec != nil { + receipt = &schema.Receipt{ + ExitCode: int64(rec.ExitCode), + ReturnValue: rec.Return, + GasUsed: rec.GasUsed, + } + + reporter := new(conformance.LogReporter) + conformance.AssertMsgResult(reporter, receipt, applyret, "as locally executed") + if reporter.Failed() { + if opts.ignoreSanityChecks { + log.Println(color.YellowString("receipt sanity check failed; proceeding anyway")) + } else { + log.Println(color.RedString("receipt sanity check failed; aborting")) + return fmt.Errorf("vector generation aborted") + } + } else { + log.Println(color.GreenString("receipt sanity check succeeded")) + } + + } else { + receipt = &schema.Receipt{ + ExitCode: int64(applyret.ExitCode), + ReturnValue: applyret.Return, + GasUsed: applyret.GasUsed, + } + log.Println(color.YellowString("skipping receipts comparison; we got back a nil receipt from lotus")) + } + + log.Println("generating vector") + msgBytes, err := msg.Serialize() + if err != nil { + return err + } + + var ( + out = new(bytes.Buffer) + gw = gzip.NewWriter(out) + ) + if err := carWriter(gw); err != nil { + return err + } + if err = gw.Flush(); err != nil { + return err + } + if err = gw.Close(); err != nil { + return err + } + + version, err := FullAPI.Version(ctx) + if err != nil { + return err + } + + ntwkName, err := FullAPI.StateNetworkName(ctx) + if err != nil { + return err + } + + nv, err := FullAPI.StateNetworkVersion(ctx, execTs.Key()) + if err != nil { + return err + } + + codename := GetProtocolCodename(execTs.Height()) + + // Write out the test vector. + vector := schema.TestVector{ + Class: schema.ClassMessage, + Meta: &schema.Metadata{ + ID: opts.id, + // TODO need to replace schema.GenerationData with a more flexible + // data structure that makes no assumption about the traceability + // data that's being recorded; a flexible map[string]string + // would do. + Gen: []schema.GenerationData{ + {Source: fmt.Sprintf("network:%s", ntwkName)}, + {Source: fmt.Sprintf("message:%s", msg.Cid().String())}, + {Source: fmt.Sprintf("inclusion_tipset:%s", incTs.Key().String())}, + {Source: fmt.Sprintf("execution_tipset:%s", execTs.Key().String())}, + {Source: "github.com/filecoin-project/lotus", Version: version.String()}}, + }, + Selector: schema.Selector{ + schema.SelectorMinProtocolVersion: codename, + }, + Randomness: recordingRand.Recorded(), + CAR: out.Bytes(), + Pre: &schema.Preconditions{ + Variants: []schema.Variant{ + {ID: codename, Epoch: int64(execTs.Height()), NetworkVersion: uint(nv)}, + }, + CircSupply: circSupply.Int, + BaseFee: basefee.Int, + StateTree: &schema.StateTree{ + RootCID: preroot, + }, + }, + ApplyMessages: []schema.Message{{Bytes: msgBytes}}, + Post: &schema.Postconditions{ + StateTree: &schema.StateTree{ + RootCID: postroot, + }, + Receipts: []*schema.Receipt{ + { + ExitCode: int64(applyret.ExitCode), + ReturnValue: applyret.Return, + GasUsed: applyret.GasUsed, + }, + }, + }, + } + return writeVector(&vector, opts.file) +} + +// resolveFromChain queries the chain for the provided message, using the block CID to +// speed up the query, if provided +func resolveFromChain(ctx context.Context, api v0api.FullNode, mcid cid.Cid, block string) (msg *types.Message, execTs *types.TipSet, incTs *types.TipSet, err error) { + // Extract the full message. + msg, err = api.ChainGetMessage(ctx, mcid) + if err != nil { + return nil, nil, nil, err + } + + log.Printf("found message with CID %s: %+v", mcid, msg) + + if block == "" { + log.Printf("locating message in blockchain") + + // Locate the message. + msgInfo, err := api.StateSearchMsg(ctx, mcid) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to locate message: %w", err) + } + if msgInfo == nil { + return nil, nil, nil, fmt.Errorf("failed to locate message: not found") + } + + log.Printf("located message at tipset %s (height: %d) with exit code: %s", msgInfo.TipSet, msgInfo.Height, msgInfo.Receipt.ExitCode) + + execTs, incTs, err = fetchThisAndPrevTipset(ctx, api, msgInfo.TipSet) + return msg, execTs, incTs, err + } + + bcid, err := cid.Decode(block) + if err != nil { + return nil, nil, nil, err + } + + log.Printf("message inclusion block CID was provided; scanning around it: %s", bcid) + + blk, err := api.ChainGetBlock(ctx, bcid) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to get block: %w", err) + } + + // types.EmptyTSK hints to use the HEAD. + execTs, err = api.ChainGetTipSetByHeight(ctx, blk.Height+1, types.EmptyTSK) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to get message execution tipset: %w", err) + } + + // walk back from the execTs instead of HEAD, to save time. + incTs, err = api.ChainGetTipSetByHeight(ctx, blk.Height, execTs.Key()) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to get message inclusion tipset: %w", err) + } + + return msg, execTs, incTs, nil +} + +// fetchThisAndPrevTipset returns the full tipset identified by the key, as well +// as the previous tipset. In the context of vector generation, the target +// tipset is the one where a message was executed, and the previous tipset is +// the one where the message was included. +func fetchThisAndPrevTipset(ctx context.Context, api v0api.FullNode, target types.TipSetKey) (targetTs *types.TipSet, prevTs *types.TipSet, err error) { + // get the tipset on which this message was "executed" on. + // https://github.com/filecoin-project/lotus/issues/2847 + targetTs, err = api.ChainGetTipSet(ctx, target) + if err != nil { + return nil, nil, err + } + // get the previous tipset, on which this message was mined, + // i.e. included on-chain. + prevTs, err = api.ChainGetTipSet(ctx, targetTs.Parents()) + if err != nil { + return nil, nil, err + } + return targetTs, prevTs, nil +} + +// findMsgAndPrecursors ranges through the canonical messages slice, locating +// the target message and returning precursors in accordance to the supplied +// mode. +func findMsgAndPrecursors(mode string, msgCid cid.Cid, sender address.Address, msgs []api.Message) (related []*types.Message, found bool, err error) { + // Range through canonicalised messages, selecting only the precursors based + // on selection mode. + for _, other := range msgs { + switch { + case mode == PrecursorSelectAll: + fallthrough + case mode == PrecursorSelectSender && other.Message.From == sender: + related = append(related, other.Message) + } + + // this message is the target; we're done. + if other.Cid == msgCid { + return related, true, nil + } + } + + // this could happen because a block contained related messages, but not + // the target (that is, messages with a lower nonce, but ultimately not the + // target). + return related, false, nil +} diff --git a/cmd/tvx/extract_tipset.go b/cmd/tvx/extract_tipset.go new file mode 100644 index 00000000000..05e856aa1a0 --- /dev/null +++ b/cmd/tvx/extract_tipset.go @@ -0,0 +1,277 @@ +package main + +import ( + "bytes" + "compress/gzip" + "context" + "fmt" + "log" + "strings" + + "github.com/filecoin-project/test-vectors/schema" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/types" + lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/conformance" +) + +func doExtractTipset(opts extractOpts) error { + ctx := context.Background() + + if opts.retain != "accessed-cids" { + return fmt.Errorf("tipset extraction only supports 'accessed-cids' state retention") + } + + if opts.tsk == "" { + return fmt.Errorf("tipset key cannot be empty") + } + + ss := strings.Split(opts.tsk, "..") + switch len(ss) { + case 1: // extracting a single tipset. + ts, err := lcli.ParseTipSetRef(ctx, FullAPI, opts.tsk) + if err != nil { + return fmt.Errorf("failed to fetch tipset: %w", err) + } + v, err := extractTipsets(ctx, ts) + if err != nil { + return err + } + return writeVector(v, opts.file) + + case 2: // extracting a range of tipsets. + left, err := lcli.ParseTipSetRef(ctx, FullAPI, ss[0]) + if err != nil { + return fmt.Errorf("failed to fetch tipset %s: %w", ss[0], err) + } + right, err := lcli.ParseTipSetRef(ctx, FullAPI, ss[1]) + if err != nil { + return fmt.Errorf("failed to fetch tipset %s: %w", ss[1], err) + } + + // resolve the tipset range. + tss, err := resolveTipsetRange(ctx, left, right) + if err != nil { + return err + } + + // are are squashing all tipsets into a single multi-tipset vector? + if opts.squash { + vector, err := extractTipsets(ctx, tss...) + if err != nil { + return err + } + return writeVector(vector, opts.file) + } + + // we are generating a single-tipset vector per tipset. + vectors, err := extractIndividualTipsets(ctx, tss...) + if err != nil { + return err + } + return writeVectors(opts.file, vectors...) + + default: + return fmt.Errorf("unrecognized tipset format") + } +} + +func resolveTipsetRange(ctx context.Context, left *types.TipSet, right *types.TipSet) (tss []*types.TipSet, err error) { + // start from the right tipset and walk back the chain until the left tipset, inclusive. + for curr := right; curr.Key() != left.Parents(); { + tss = append(tss, curr) + curr, err = FullAPI.ChainGetTipSet(ctx, curr.Parents()) + if err != nil { + return nil, fmt.Errorf("failed to get tipset %s (height: %d): %w", curr.Parents(), curr.Height()-1, err) + } + } + // reverse the slice. + for i, j := 0, len(tss)-1; i < j; i, j = i+1, j-1 { + tss[i], tss[j] = tss[j], tss[i] + } + return tss, nil +} + +func extractIndividualTipsets(ctx context.Context, tss ...*types.TipSet) (vectors []*schema.TestVector, err error) { + for _, ts := range tss { + v, err := extractTipsets(ctx, ts) + if err != nil { + return nil, err + } + vectors = append(vectors, v) + } + return vectors, nil +} + +func extractTipsets(ctx context.Context, tss ...*types.TipSet) (*schema.TestVector, error) { + var ( + // create a read-through store that uses ChainGetObject to fetch unknown CIDs. + pst = NewProxyingStores(ctx, FullAPI) + g = NewSurgeon(ctx, FullAPI, pst) + + // recordingRand will record randomness so we can embed it in the test vector. + recordingRand = conformance.NewRecordingRand(new(conformance.LogReporter), FullAPI) + ) + + tbs, ok := pst.Blockstore.(TracingBlockstore) + if !ok { + return nil, fmt.Errorf("requested 'accessed-cids' state retention, but no tracing blockstore was present") + } + + driver := conformance.NewDriver(ctx, schema.Selector{}, conformance.DriverOpts{ + DisableVMFlush: true, + }) + + base := tss[0] + last := tss[len(tss)-1] + + // this is the root of the state tree we start with. + root := base.ParentState() + log.Printf("base state tree root CID: %s", root) + + codename := GetProtocolCodename(base.Height()) + nv, err := FullAPI.StateNetworkVersion(ctx, base.Key()) + if err != nil { + return nil, err + } + + version, err := FullAPI.Version(ctx) + if err != nil { + return nil, err + } + + ntwkName, err := FullAPI.StateNetworkName(ctx) + if err != nil { + return nil, err + } + + vector := schema.TestVector{ + Class: schema.ClassTipset, + Meta: &schema.Metadata{ + ID: fmt.Sprintf("@%d..@%d", base.Height(), last.Height()), + Gen: []schema.GenerationData{ + {Source: fmt.Sprintf("network:%s", ntwkName)}, + {Source: "github.com/filecoin-project/lotus", Version: version.String()}}, + // will be completed by extra tipset stamps. + }, + Selector: schema.Selector{ + schema.SelectorMinProtocolVersion: codename, + }, + Pre: &schema.Preconditions{ + Variants: []schema.Variant{ + {ID: codename, Epoch: int64(base.Height()), NetworkVersion: uint(nv)}, + }, + StateTree: &schema.StateTree{ + RootCID: base.ParentState(), + }, + }, + Post: &schema.Postconditions{ + StateTree: new(schema.StateTree), + }, + } + + tbs.StartTracing() + + roots := []cid.Cid{base.ParentState()} + for i, ts := range tss { + log.Printf("tipset %s block count: %d", ts.Key(), len(ts.Blocks())) + + var blocks []schema.Block + for _, b := range ts.Blocks() { + msgs, err := FullAPI.ChainGetBlockMessages(ctx, b.Cid()) + if err != nil { + return nil, fmt.Errorf("failed to get block messages (cid: %s): %w", b.Cid(), err) + } + + log.Printf("block %s has %d messages", b.Cid(), len(msgs.Cids)) + + packed := make([]schema.Base64EncodedBytes, 0, len(msgs.Cids)) + for _, m := range msgs.BlsMessages { + b, err := m.Serialize() + if err != nil { + return nil, fmt.Errorf("failed to serialize message: %w", err) + } + packed = append(packed, b) + } + for _, m := range msgs.SecpkMessages { + b, err := m.Message.Serialize() + if err != nil { + return nil, fmt.Errorf("failed to serialize message: %w", err) + } + packed = append(packed, b) + } + blocks = append(blocks, schema.Block{ + MinerAddr: b.Miner, + WinCount: b.ElectionProof.WinCount, + Messages: packed, + }) + } + + basefee := base.Blocks()[0].ParentBaseFee + log.Printf("tipset basefee: %s", basefee) + + tipset := schema.Tipset{ + BaseFee: *basefee.Int, + Blocks: blocks, + EpochOffset: int64(i), + } + + params := conformance.ExecuteTipsetParams{ + Preroot: roots[len(roots)-1], + ParentEpoch: ts.Height() - 1, + Tipset: &tipset, + ExecEpoch: ts.Height(), + Rand: recordingRand, + } + + result, err := driver.ExecuteTipset(pst.Blockstore, pst.Datastore, params) + if err != nil { + return nil, fmt.Errorf("failed to execute tipset: %w", err) + } + + roots = append(roots, result.PostStateRoot) + + // update the vector. + vector.ApplyTipsets = append(vector.ApplyTipsets, tipset) + vector.Post.ReceiptsRoots = append(vector.Post.ReceiptsRoots, result.ReceiptsRoot) + + for _, res := range result.AppliedResults { + vector.Post.Receipts = append(vector.Post.Receipts, &schema.Receipt{ + ExitCode: int64(res.ExitCode), + ReturnValue: res.Return, + GasUsed: res.GasUsed, + }) + } + + vector.Meta.Gen = append(vector.Meta.Gen, schema.GenerationData{ + Source: "tipset:" + ts.Key().String(), + }) + } + + accessed := tbs.FinishTracing() + + // + // ComputeBaseFee(ctx, baseTs) + + // write a CAR with the accessed state into a buffer. + var ( + out = new(bytes.Buffer) + gw = gzip.NewWriter(out) + ) + if err := g.WriteCARIncluding(gw, accessed, roots...); err != nil { + return nil, err + } + if err = gw.Flush(); err != nil { + return nil, err + } + if err = gw.Close(); err != nil { + return nil, err + } + + vector.Randomness = recordingRand.Recorded() + vector.Post.StateTree.RootCID = roots[len(roots)-1] + vector.CAR = out.Bytes() + + return &vector, nil +} diff --git a/cmd/tvx/main.go b/cmd/tvx/main.go index 8de851ed59d..0fed8fad4b1 100644 --- a/cmd/tvx/main.go +++ b/cmd/tvx/main.go @@ -9,13 +9,13 @@ import ( "github.com/filecoin-project/go-jsonrpc" "github.com/urfave/cli/v2" - "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v0api" lcli "github.com/filecoin-project/lotus/cli" ) // FullAPI is a JSON-RPC client targeting a full node. It's initialized in a // cli.BeforeFunc. -var FullAPI api.FullNode +var FullAPI v0api.FullNode // Closer is the closer for the JSON-RPC client, which must be called on // cli.AfterFunc. @@ -102,7 +102,7 @@ func initialize(c *cli.Context) error { // Make the API client. var err error if FullAPI, Closer, err = lcli.GetFullNodeAPI(c); err != nil { - err = fmt.Errorf("failed to locate Lotus node; ") + err = fmt.Errorf("failed to locate Lotus node; err: %w", err) } return err } @@ -113,3 +113,19 @@ func destroy(_ *cli.Context) error { } return nil } + +func ensureDir(path string) error { + switch fi, err := os.Stat(path); { + case os.IsNotExist(err): + if err := os.MkdirAll(path, 0755); err != nil { + return fmt.Errorf("failed to create directory %s: %w", path, err) + } + case err == nil: + if !fi.IsDir() { + return fmt.Errorf("path %s is not a directory: %w", path, err) + } + default: + return fmt.Errorf("failed to stat directory %s: %w", path, err) + } + return nil +} diff --git a/cmd/tvx/simulate.go b/cmd/tvx/simulate.go index 82b2bc118c7..da9a034e923 100644 --- a/cmd/tvx/simulate.go +++ b/cmd/tvx/simulate.go @@ -154,7 +154,7 @@ func runSimulateCmd(_ *cli.Context) error { version, err := FullAPI.Version(ctx) if err != nil { log.Printf("failed to get node version: %s; falling back to unknown", err) - version = api.Version{} + version = api.APIVersion{} } nv, err := FullAPI.StateNetworkVersion(ctx, ts.Key()) @@ -202,7 +202,7 @@ func runSimulateCmd(_ *cli.Context) error { }, } - if err := writeVector(vector, simulateFlags.out); err != nil { + if err := writeVector(&vector, simulateFlags.out); err != nil { return fmt.Errorf("failed to write vector: %w", err) } diff --git a/cmd/tvx/state.go b/cmd/tvx/state.go index bff5cbd6ecb..f2d25300adb 100644 --- a/cmd/tvx/state.go +++ b/cmd/tvx/state.go @@ -6,6 +6,8 @@ import ( "io" "log" + "github.com/filecoin-project/lotus/api/v0api" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/ipfs/go-cid" @@ -13,7 +15,6 @@ import ( "github.com/ipld/go-car" cbg "github.com/whyrusleeping/cbor-gen" - "github.com/filecoin-project/lotus/api" init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/types" @@ -23,13 +24,13 @@ import ( // StateSurgeon is an object used to fetch and manipulate state. type StateSurgeon struct { ctx context.Context - api api.FullNode + api v0api.FullNode stores *Stores } // NewSurgeon returns a state surgeon, an object used to fetch and manipulate // state. -func NewSurgeon(ctx context.Context, api api.FullNode, stores *Stores) *StateSurgeon { +func NewSurgeon(ctx context.Context, api v0api.FullNode, stores *Stores) *StateSurgeon { return &StateSurgeon{ ctx: ctx, api: api, @@ -85,7 +86,7 @@ func (sg *StateSurgeon) GetMaskedStateTree(previousRoot cid.Cid, retain []addres // GetAccessedActors identifies the actors that were accessed during the // execution of a message. -func (sg *StateSurgeon) GetAccessedActors(ctx context.Context, a api.FullNode, mid cid.Cid) ([]address.Address, error) { +func (sg *StateSurgeon) GetAccessedActors(ctx context.Context, a v0api.FullNode, mid cid.Cid) ([]address.Address, error) { log.Printf("calculating accessed actors during execution of message: %s", mid) msgInfo, err := a.StateSearchMsg(ctx, mid) if err != nil { diff --git a/cmd/tvx/stores.go b/cmd/tvx/stores.go index 4f574c1752d..04000564178 100644 --- a/cmd/tvx/stores.go +++ b/cmd/tvx/stores.go @@ -5,11 +5,12 @@ import ( "log" "sync" + "github.com/filecoin-project/lotus/api/v0api" + "github.com/fatih/color" dssync "github.com/ipfs/go-datastore/sync" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/lib/blockstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/actors/adt" @@ -40,12 +41,12 @@ type Stores struct { // NewProxyingStores is a set of Stores backed by a proxying Blockstore that // proxies Get requests for unknown CIDs to a Filecoin node, via the // ChainReadObj RPC. -func NewProxyingStores(ctx context.Context, api api.FullNode) *Stores { +func NewProxyingStores(ctx context.Context, api v0api.FullNode) *Stores { ds := dssync.MutexWrap(ds.NewMapDatastore()) bs := &proxyingBlockstore{ ctx: ctx, api: api, - Blockstore: blockstore.NewBlockstore(ds), + Blockstore: blockstore.FromDatastore(ds), } return NewStores(ctx, ds, bs) } @@ -85,7 +86,7 @@ type TracingBlockstore interface { // a Filecoin node via JSON-RPC. type proxyingBlockstore struct { ctx context.Context - api api.FullNode + api v0api.FullNode lk sync.Mutex tracing bool @@ -149,3 +150,14 @@ func (pb *proxyingBlockstore) Put(block blocks.Block) error { pb.lk.Unlock() return pb.Blockstore.Put(block) } + +func (pb *proxyingBlockstore) PutMany(blocks []blocks.Block) error { + pb.lk.Lock() + if pb.tracing { + for _, b := range blocks { + pb.traced[b.Cid()] = struct{}{} + } + } + pb.lk.Unlock() + return pb.Blockstore.PutMany(blocks) +} diff --git a/conformance/chaos/actor.go b/conformance/chaos/actor.go index cdda1db83ef..f5a94989de9 100644 --- a/conformance/chaos/actor.go +++ b/conformance/chaos/actor.go @@ -73,6 +73,8 @@ const ( // MethodInspectRuntime is the identifier for the method that returns the // current runtime values. MethodInspectRuntime + // MethodCreateState is the identifier for the method that creates the chaos actor's state. + MethodCreateState ) // Exports defines the methods this actor exposes publicly. @@ -87,6 +89,7 @@ func (a Actor) Exports() []interface{} { MethodMutateState: a.MutateState, MethodAbortWith: a.AbortWith, MethodInspectRuntime: a.InspectRuntime, + MethodCreateState: a.CreateState, } } @@ -227,6 +230,14 @@ type MutateStateArgs struct { Branch MutateStateBranch } +// CreateState creates the chaos actor's state +func (a Actor) CreateState(rt runtime2.Runtime, _ *abi.EmptyValue) *abi.EmptyValue { + rt.ValidateImmediateCallerAcceptAny() + rt.StateCreate(&State{}) + + return nil +} + // MutateState attempts to mutate a state value in the actor. func (a Actor) MutateState(rt runtime2.Runtime, args *MutateStateArgs) *abi.EmptyValue { rt.ValidateImmediateCallerAcceptAny() diff --git a/conformance/chaos/actor_test.go b/conformance/chaos/actor_test.go index dbce4f4c552..e68b9a4df76 100644 --- a/conformance/chaos/actor_test.go +++ b/conformance/chaos/actor_test.go @@ -129,8 +129,9 @@ func TestMutateStateInTransaction(t *testing.T) { var a Actor rt.ExpectValidateCallerAny() - rt.StateCreate(&State{}) + rt.Call(a.CreateState, nil) + rt.ExpectValidateCallerAny() val := "__mutstat test" rt.Call(a.MutateState, &MutateStateArgs{ Value: val, @@ -155,23 +156,30 @@ func TestMutateStateAfterTransaction(t *testing.T) { var a Actor rt.ExpectValidateCallerAny() - rt.StateCreate(&State{}) + rt.Call(a.CreateState, nil) + rt.ExpectValidateCallerAny() val := "__mutstat test" + defer func() { + if r := recover(); r == nil { + t.Fatal("The code did not panic") + } else { + var st State + rt.GetState(&st) + + // state should be updated successfully _in_ the transaction but not outside + if st.Value != val+"-in" { + t.Fatal("state was not updated") + } + + rt.Verify() + } + }() rt.Call(a.MutateState, &MutateStateArgs{ Value: val, Branch: MutateAfterTransaction, }) - var st State - rt.GetState(&st) - - // state should be updated successfully _in_ the transaction but not outside - if st.Value != val+"-in" { - t.Fatal("state was not updated") - } - - rt.Verify() } func TestMutateStateReadonly(t *testing.T) { @@ -182,22 +190,30 @@ func TestMutateStateReadonly(t *testing.T) { var a Actor rt.ExpectValidateCallerAny() - rt.StateCreate(&State{}) + rt.Call(a.CreateState, nil) + rt.ExpectValidateCallerAny() val := "__mutstat test" + defer func() { + if r := recover(); r == nil { + t.Fatal("The code did not panic") + } else { + var st State + rt.GetState(&st) + + if st.Value != "" { + t.Fatal("state was not expected to be updated") + } + + rt.Verify() + } + }() + rt.Call(a.MutateState, &MutateStateArgs{ Value: val, Branch: MutateReadonly, }) - var st State - rt.GetState(&st) - - if st.Value != "" { - t.Fatal("state was not expected to be updated") - } - - rt.Verify() } func TestMutateStateInvalidBranch(t *testing.T) { @@ -254,11 +270,13 @@ func TestInspectRuntime(t *testing.T) { receiver := atesting2.NewIDAddr(t, 101) builder := mock2.NewBuilder(context.Background(), receiver) - rt := builder.Build(t) - rt.SetCaller(caller, builtin2.AccountActorCodeID) - rt.StateCreate(&State{}) var a Actor + rt := builder.Build(t) + rt.ExpectValidateCallerAny() + rt.Call(a.CreateState, nil) + + rt.SetCaller(caller, builtin2.AccountActorCodeID) rt.ExpectValidateCallerAny() ret := rt.Call(a.InspectRuntime, abi.Empty) rtr, ok := ret.(*InspectRuntimeReturn) diff --git a/conformance/chaos/cbor_gen.go b/conformance/chaos/cbor_gen.go index 876d6a893de..5bf85606f77 100644 --- a/conformance/chaos/cbor_gen.go +++ b/conformance/chaos/cbor_gen.go @@ -5,6 +5,7 @@ package chaos import ( "fmt" "io" + "sort" address "github.com/filecoin-project/go-address" abi "github.com/filecoin-project/go-state-types/abi" @@ -15,6 +16,8 @@ import ( ) var _ = xerrors.Errorf +var _ = cid.Undef +var _ = sort.Sort var lengthBufState = []byte{130} diff --git a/conformance/corpus_test.go b/conformance/corpus_test.go index a09f9a8d336..b9ba062ccba 100644 --- a/conformance/corpus_test.go +++ b/conformance/corpus_test.go @@ -11,7 +11,7 @@ import ( "github.com/filecoin-project/test-vectors/schema" ) -var invokees = map[schema.Class]func(Reporter, *schema.TestVector, *schema.Variant){ +var invokees = map[schema.Class]func(Reporter, *schema.TestVector, *schema.Variant) ([]string, error){ schema.ClassMessage: ExecuteMessageVector, schema.ClassTipset: ExecuteTipsetVector, } @@ -133,7 +133,7 @@ func TestConformance(t *testing.T) { for _, variant := range vector.Pre.Variants { variant := variant t.Run(variant.ID, func(t *testing.T) { - invokee(t, &vector, &variant) + _, _ = invokee(t, &vector, &variant) //nolint:errcheck }) } }) diff --git a/conformance/driver.go b/conformance/driver.go index 95b6f2659ea..c7fc0d6c43a 100644 --- a/conformance/driver.go +++ b/conformance/driver.go @@ -5,6 +5,7 @@ import ( gobig "math/big" "os" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" @@ -12,7 +13,6 @@ import ( "github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/conformance/chaos" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" - "github.com/filecoin-project/lotus/lib/blockstore" _ "github.com/filecoin-project/lotus/lib/sigs/bls" // enable bls signatures _ "github.com/filecoin-project/lotus/lib/sigs/secp" // enable secp signatures @@ -71,26 +71,50 @@ type ExecuteTipsetResult struct { AppliedMessages []*types.Message // AppliedResults stores the results of AppliedMessages, in the same order. AppliedResults []*vm.ApplyRet + + // PostBaseFee returns the basefee after applying this tipset. + PostBaseFee abi.TokenAmount +} + +type ExecuteTipsetParams struct { + Preroot cid.Cid + // ParentEpoch is the last epoch in which an actual tipset was processed. This + // is used by Lotus for null block counting and cron firing. + ParentEpoch abi.ChainEpoch + Tipset *schema.Tipset + ExecEpoch abi.ChainEpoch + // Rand is an optional vm.Rand implementation to use. If nil, the driver + // will use a vm.Rand that returns a fixed value for all calls. + Rand vm.Rand + // BaseFee if not nil or zero, will override the basefee of the tipset. + BaseFee abi.TokenAmount } // ExecuteTipset executes the supplied tipset on top of the state represented // by the preroot CID. // -// parentEpoch is the last epoch in which an actual tipset was processed. This -// is used by Lotus for null block counting and cron firing. -// // This method returns the the receipts root, the poststate root, and the VM // message results. The latter _include_ implicit messages, such as cron ticks // and reward withdrawal per miner. -func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, preroot cid.Cid, parentEpoch abi.ChainEpoch, tipset *schema.Tipset, execEpoch abi.ChainEpoch) (*ExecuteTipsetResult, error) { +func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, params ExecuteTipsetParams) (*ExecuteTipsetResult, error) { var ( + tipset = params.Tipset syscalls = vm.Syscalls(ffiwrapper.ProofVerifier) - vmRand = NewFixedRand() - cs = store.NewChainStore(bs, ds, syscalls, nil) + cs = store.NewChainStore(bs, bs, ds, syscalls, nil) sm = stmgr.NewStateManager(cs) ) + if params.Rand == nil { + params.Rand = NewFixedRand() + } + + if params.BaseFee.NilOrZero() { + params.BaseFee = abi.NewTokenAmount(tipset.BaseFee.Int64()) + } + + defer cs.Close() //nolint:errcheck + blocks := make([]store.BlockMessages, 0, len(tipset.Blocks)) for _, b := range tipset.Blocks { sb := store.BlockMessages{ @@ -117,19 +141,22 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, preroot blocks = append(blocks, sb) } - var ( - messages []*types.Message - results []*vm.ApplyRet + recordOutputs := &outputRecorder{ + messages: []*types.Message{}, + results: []*vm.ApplyRet{}, + } - basefee = abi.NewTokenAmount(tipset.BaseFee.Int64()) + postcid, receiptsroot, err := sm.ApplyBlocks(context.Background(), + params.ParentEpoch, + params.Preroot, + blocks, + params.ExecEpoch, + params.Rand, + recordOutputs, + params.BaseFee, + nil, ) - postcid, receiptsroot, err := sm.ApplyBlocks(context.Background(), parentEpoch, preroot, blocks, execEpoch, vmRand, func(_ cid.Cid, msg *types.Message, ret *vm.ApplyRet) error { - messages = append(messages, msg) - results = append(results, ret) - return nil - }, basefee, nil) - if err != nil { return nil, err } @@ -137,8 +164,8 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, preroot ret := &ExecuteTipsetResult{ ReceiptsRoot: receiptsroot, PostStateRoot: postcid, - AppliedMessages: messages, - AppliedResults: results, + AppliedMessages: recordOutputs.messages, + AppliedResults: recordOutputs.results, } return ret, nil } @@ -252,3 +279,14 @@ func CircSupplyOrDefault(circSupply *gobig.Int) abi.TokenAmount { } return big.NewFromGo(circSupply) } + +type outputRecorder struct { + messages []*types.Message + results []*vm.ApplyRet +} + +func (o *outputRecorder) MessageApplied(ctx context.Context, ts *types.TipSet, mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet, implicit bool) error { + o.messages = append(o.messages, msg) + o.results = append(o.results, ret) + return nil +} diff --git a/conformance/rand_fixed.go b/conformance/rand_fixed.go index d356b53d049..f15910e1d6d 100644 --- a/conformance/rand_fixed.go +++ b/conformance/rand_fixed.go @@ -19,10 +19,18 @@ func NewFixedRand() vm.Rand { return &fixedRand{} } -func (r *fixedRand) GetChainRandomness(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { +func (r *fixedRand) GetChainRandomnessLookingForward(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes. } -func (r *fixedRand) GetBeaconRandomness(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { +func (r *fixedRand) GetChainRandomnessLookingBack(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { + return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes. +} + +func (r *fixedRand) GetBeaconRandomnessLookingForward(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { + return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes. +} + +func (r *fixedRand) GetBeaconRandomnessLookingBack(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes. } diff --git a/conformance/rand_record.go b/conformance/rand_record.go index 6f6d064dc74..906d6b73dd1 100644 --- a/conformance/rand_record.go +++ b/conformance/rand_record.go @@ -10,14 +10,14 @@ import ( "github.com/filecoin-project/test-vectors/schema" - "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" ) type RecordingRand struct { reporter Reporter - api api.FullNode + api v0api.FullNode // once guards the loading of the head tipset. // can be removed when https://github.com/filecoin-project/lotus/issues/4223 @@ -33,7 +33,7 @@ var _ vm.Rand = (*RecordingRand)(nil) // NewRecordingRand returns a vm.Rand implementation that proxies calls to a // full Lotus node via JSON-RPC, and records matching rules and responses so // they can later be embedded in test vectors. -func NewRecordingRand(reporter Reporter, api api.FullNode) *RecordingRand { +func NewRecordingRand(reporter Reporter, api v0api.FullNode) *RecordingRand { return &RecordingRand{reporter: reporter, api: api} } @@ -45,8 +45,17 @@ func (r *RecordingRand) loadHead() { r.head = head.Key() } -func (r *RecordingRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (r *RecordingRand) GetChainRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return r.getChainRandomness(ctx, pers, round, entropy) +} + +func (r *RecordingRand) GetChainRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return r.getChainRandomness(ctx, pers, round, entropy) +} + +func (r *RecordingRand) getChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { r.once.Do(r.loadHead) + // FullNode's ChainGetRandomnessFromTickets handles whether we should be looking forward or back ret, err := r.api.ChainGetRandomnessFromTickets(ctx, r.head, pers, round, entropy) if err != nil { return ret, err @@ -70,7 +79,15 @@ func (r *RecordingRand) GetChainRandomness(ctx context.Context, pers crypto.Doma return ret, err } -func (r *RecordingRand) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (r *RecordingRand) GetBeaconRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return r.getBeaconRandomness(ctx, pers, round, entropy) +} + +func (r *RecordingRand) GetBeaconRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return r.getBeaconRandomness(ctx, pers, round, entropy) +} + +func (r *RecordingRand) getBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { r.once.Do(r.loadHead) ret, err := r.api.ChainGetRandomnessFromBeacon(ctx, r.head, pers, round, entropy) if err != nil { diff --git a/conformance/rand_replay.go b/conformance/rand_replay.go index 1b73e5a08af..faae1d090a7 100644 --- a/conformance/rand_replay.go +++ b/conformance/rand_replay.go @@ -43,7 +43,15 @@ func (r *ReplayingRand) match(requested schema.RandomnessRule) ([]byte, bool) { return nil, false } -func (r *ReplayingRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (r *ReplayingRand) GetChainRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return r.getChainRandomness(ctx, pers, round, entropy, false) +} + +func (r *ReplayingRand) GetChainRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return r.getChainRandomness(ctx, pers, round, entropy, true) +} + +func (r *ReplayingRand) getChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) { rule := schema.RandomnessRule{ Kind: schema.RandomnessChain, DomainSeparationTag: int64(pers), @@ -57,10 +65,23 @@ func (r *ReplayingRand) GetChainRandomness(ctx context.Context, pers crypto.Doma } r.reporter.Logf("returning fallback chain randomness: dst=%d, epoch=%d, entropy=%x", pers, round, entropy) - return r.fallback.GetChainRandomness(ctx, pers, round, entropy) + + if lookback { + return r.fallback.GetChainRandomnessLookingBack(ctx, pers, round, entropy) + } + + return r.fallback.GetChainRandomnessLookingForward(ctx, pers, round, entropy) } -func (r *ReplayingRand) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (r *ReplayingRand) GetBeaconRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return r.getBeaconRandomness(ctx, pers, round, entropy, false) +} + +func (r *ReplayingRand) GetBeaconRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return r.getBeaconRandomness(ctx, pers, round, entropy, true) +} + +func (r *ReplayingRand) getBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) { rule := schema.RandomnessRule{ Kind: schema.RandomnessBeacon, DomainSeparationTag: int64(pers), @@ -74,6 +95,10 @@ func (r *ReplayingRand) GetBeaconRandomness(ctx context.Context, pers crypto.Dom } r.reporter.Logf("returning fallback beacon randomness: dst=%d, epoch=%d, entropy=%x", pers, round, entropy) - return r.fallback.GetBeaconRandomness(ctx, pers, round, entropy) + if lookback { + return r.fallback.GetBeaconRandomnessLookingBack(ctx, pers, round, entropy) + } + + return r.fallback.GetBeaconRandomnessLookingForward(ctx, pers, round, entropy) } diff --git a/conformance/runner.go b/conformance/runner.go index 6f9d73305a3..1044bb329e8 100644 --- a/conformance/runner.go +++ b/conformance/runner.go @@ -14,6 +14,8 @@ import ( "github.com/fatih/color" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/exitcode" + "github.com/hashicorp/go-multierror" + blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-blockservice" "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" @@ -24,13 +26,32 @@ import ( "github.com/filecoin-project/test-vectors/schema" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/lib/blockstore" ) +// FallbackBlockstoreGetter is a fallback blockstore to use for resolving CIDs +// unknown to the test vector. This is rarely used, usually only needed +// when transplanting vectors across versions. This is an interface tighter +// than ChainModuleAPI. It can be backed by a FullAPI client. +var FallbackBlockstoreGetter interface { + ChainReadObj(context.Context, cid.Cid) ([]byte, error) +} + +var TipsetVectorOpts struct { + // PipelineBaseFee pipelines the basefee in multi-tipset vectors from one + // tipset to another. Basefees in the vector are ignored, except for that of + // the first tipset. UNUSED. + PipelineBaseFee bool + + // OnTipsetApplied contains callback functions called after a tipset has been + // applied. + OnTipsetApplied []func(bs blockstore.Blockstore, params *ExecuteTipsetParams, res *ExecuteTipsetResult) +} + // ExecuteMessageVector executes a message-class test vector. -func ExecuteMessageVector(r Reporter, vector *schema.TestVector, variant *schema.Variant) { +func ExecuteMessageVector(r Reporter, vector *schema.TestVector, variant *schema.Variant) (diffs []string, err error) { var ( ctx = context.Background() baseEpoch = variant.Epoch @@ -38,7 +59,7 @@ func ExecuteMessageVector(r Reporter, vector *schema.TestVector, variant *schema ) // Load the CAR into a new temporary Blockstore. - bs, err := LoadVectorCAR(vector.CAR) + bs, err := LoadBlockstore(vector.CAR) if err != nil { r.Fatalf("failed to load the vector CAR: %w", err) } @@ -79,14 +100,16 @@ func ExecuteMessageVector(r Reporter, vector *schema.TestVector, variant *schema // Once all messages are applied, assert that the final state root matches // the expected postcondition root. if expected, actual := vector.Post.StateTree.RootCID, root; expected != actual { - r.Errorf("wrong post root cid; expected %v, but got %v", expected, actual) - dumpThreeWayStateDiff(r, vector, bs, root) - r.FailNow() + ierr := fmt.Errorf("wrong post root cid; expected %v, but got %v", expected, actual) + r.Errorf(ierr.Error()) + err = multierror.Append(err, ierr) + diffs = dumpThreeWayStateDiff(r, vector, bs, root) } + return diffs, err } // ExecuteTipsetVector executes a tipset-class test vector. -func ExecuteTipsetVector(r Reporter, vector *schema.TestVector, variant *schema.Variant) { +func ExecuteTipsetVector(r Reporter, vector *schema.TestVector, variant *schema.Variant) (diffs []string, err error) { var ( ctx = context.Background() baseEpoch = abi.ChainEpoch(variant.Epoch) @@ -95,9 +118,10 @@ func ExecuteTipsetVector(r Reporter, vector *schema.TestVector, variant *schema. ) // Load the vector CAR into a new temporary Blockstore. - bs, err := LoadVectorCAR(vector.CAR) + bs, err := LoadBlockstore(vector.CAR) if err != nil { r.Fatalf("failed to load the vector CAR: %w", err) + return nil, err } // Create a new Driver. @@ -109,9 +133,22 @@ func ExecuteTipsetVector(r Reporter, vector *schema.TestVector, variant *schema. for i, ts := range vector.ApplyTipsets { ts := ts // capture execEpoch := baseEpoch + abi.ChainEpoch(ts.EpochOffset) - ret, err := driver.ExecuteTipset(bs, tmpds, root, prevEpoch, &ts, execEpoch) + params := ExecuteTipsetParams{ + Preroot: root, + ParentEpoch: prevEpoch, + Tipset: &ts, + ExecEpoch: execEpoch, + Rand: NewReplayingRand(r, vector.Randomness), + } + ret, err := driver.ExecuteTipset(bs, tmpds, params) if err != nil { - r.Fatalf("failed to apply tipset %d message: %s", i, err) + r.Fatalf("failed to apply tipset %d: %s", i, err) + return nil, err + } + + // invoke callbacks. + for _, cb := range TipsetVectorOpts.OnTipsetApplied { + cb(bs, ¶ms, ret) } for j, v := range ret.AppliedResults { @@ -121,7 +158,9 @@ func ExecuteTipsetVector(r Reporter, vector *schema.TestVector, variant *schema. // Compare the receipts root. if expected, actual := vector.Post.ReceiptsRoots[i], ret.ReceiptsRoot; expected != actual { - r.Errorf("post receipts root doesn't match; expected: %s, was: %s", expected, actual) + ierr := fmt.Errorf("post receipts root doesn't match; expected: %s, was: %s", expected, actual) + r.Errorf(ierr.Error()) + err = multierror.Append(err, ierr) } prevEpoch = execEpoch @@ -131,10 +170,12 @@ func ExecuteTipsetVector(r Reporter, vector *schema.TestVector, variant *schema. // Once all messages are applied, assert that the final state root matches // the expected postcondition root. if expected, actual := vector.Post.StateTree.RootCID, root; expected != actual { - r.Errorf("wrong post root cid; expected %v, but got %v", expected, actual) - dumpThreeWayStateDiff(r, vector, bs, root) - r.FailNow() + ierr := fmt.Errorf("wrong post root cid; expected %v, but got %v", expected, actual) + r.Errorf(ierr.Error()) + err = multierror.Append(err, ierr) + diffs = dumpThreeWayStateDiff(r, vector, bs, root) } + return diffs, err } // AssertMsgResult compares a message result. It takes the expected receipt @@ -154,7 +195,7 @@ func AssertMsgResult(r Reporter, expected *schema.Receipt, actual *vm.ApplyRet, } } -func dumpThreeWayStateDiff(r Reporter, vector *schema.TestVector, bs blockstore.Blockstore, actual cid.Cid) { +func dumpThreeWayStateDiff(r Reporter, vector *schema.TestVector, bs blockstore.Blockstore, actual cid.Cid) []string { // check if statediff exists; if not, skip. if err := exec.Command("statediff", "--help").Run(); err != nil { r.Log("could not dump 3-way state tree diff upon test failure: statediff command not found") @@ -163,7 +204,7 @@ func dumpThreeWayStateDiff(r Reporter, vector *schema.TestVector, bs blockstore. r.Log("$ cd statediff") r.Log("$ go generate ./...") r.Log("$ go install ./cmd/statediff") - return + return nil } tmpCar, err := writeStateToTempCAR(bs, @@ -173,6 +214,7 @@ func dumpThreeWayStateDiff(r Reporter, vector *schema.TestVector, bs blockstore. ) if err != nil { r.Fatalf("failed to write temporary state CAR: %s", err) + return nil } defer os.RemoveAll(tmpCar) //nolint:errcheck @@ -187,28 +229,43 @@ func dumpThreeWayStateDiff(r Reporter, vector *schema.TestVector, bs blockstore. d3 = color.New(color.FgGreen, color.Bold).Sprint("[Δ3]") ) - printDiff := func(left, right cid.Cid) { + diff := func(left, right cid.Cid) string { cmd := exec.Command("statediff", "car", "--file", tmpCar, left.String(), right.String()) b, err := cmd.CombinedOutput() if err != nil { r.Fatalf("statediff failed: %s", err) } - r.Log(string(b)) + return string(b) } bold := color.New(color.Bold).SprintfFunc() + r.Log(bold("-----BEGIN STATEDIFF-----")) + // run state diffs. r.Log(bold("=== dumping 3-way diffs between %s, %s, %s ===", a, b, c)) r.Log(bold("--- %s left: %s; right: %s ---", d1, a, b)) - printDiff(vector.Post.StateTree.RootCID, actual) + diffA := diff(vector.Post.StateTree.RootCID, actual) + r.Log(bold("----------BEGIN STATEDIFF A----------")) + r.Log(diffA) + r.Log(bold("----------END STATEDIFF A----------")) r.Log(bold("--- %s left: %s; right: %s ---", d2, c, b)) - printDiff(vector.Pre.StateTree.RootCID, actual) + diffB := diff(vector.Pre.StateTree.RootCID, actual) + r.Log(bold("----------BEGIN STATEDIFF B----------")) + r.Log(diffB) + r.Log(bold("----------END STATEDIFF B----------")) r.Log(bold("--- %s left: %s; right: %s ---", d3, c, a)) - printDiff(vector.Pre.StateTree.RootCID, vector.Post.StateTree.RootCID) + diffC := diff(vector.Pre.StateTree.RootCID, vector.Post.StateTree.RootCID) + r.Log(bold("----------BEGIN STATEDIFF C----------")) + r.Log(diffC) + r.Log(bold("----------END STATEDIFF C----------")) + + r.Log(bold("-----END STATEDIFF-----")) + + return []string{diffA, diffB, diffC} } // writeStateToTempCAR writes the provided roots to a temporary CAR that'll be @@ -248,8 +305,8 @@ func writeStateToTempCAR(bs blockstore.Blockstore, roots ...cid.Cid) (string, er return tmp.Name(), nil } -func LoadVectorCAR(vectorCAR schema.Base64EncodedBytes) (blockstore.Blockstore, error) { - bs := blockstore.NewTemporary() +func LoadBlockstore(vectorCAR schema.Base64EncodedBytes) (blockstore.Blockstore, error) { + bs := blockstore.Blockstore(blockstore.NewMemory()) // Read the base64-encoded CAR from the vector, and inflate the gzip. buf := bytes.NewReader(vectorCAR) @@ -264,5 +321,18 @@ func LoadVectorCAR(vectorCAR schema.Base64EncodedBytes) (blockstore.Blockstore, if err != nil { return nil, fmt.Errorf("failed to load state tree car from test vector: %s", err) } + + if FallbackBlockstoreGetter != nil { + fbs := &blockstore.FallbackStore{Blockstore: bs} + fbs.SetFallback(func(ctx context.Context, c cid.Cid) (blocks.Block, error) { + b, err := FallbackBlockstoreGetter.ChainReadObj(ctx, c) + if err != nil { + return nil, err + } + return blocks.NewBlockWithCid(b, c) + }) + bs = fbs + } + return bs, nil } diff --git a/documentation/en/api-v0-methods-miner.md b/documentation/en/api-v0-methods-miner.md new file mode 100644 index 00000000000..b488e8996fe --- /dev/null +++ b/documentation/en/api-v0-methods-miner.md @@ -0,0 +1,2302 @@ +# Groups +* [](#) + * [Closing](#Closing) + * [Discover](#Discover) + * [Session](#Session) + * [Shutdown](#Shutdown) + * [Version](#Version) +* [Actor](#Actor) + * [ActorAddress](#ActorAddress) + * [ActorAddressConfig](#ActorAddressConfig) + * [ActorSectorSize](#ActorSectorSize) +* [Auth](#Auth) + * [AuthNew](#AuthNew) + * [AuthVerify](#AuthVerify) +* [Check](#Check) + * [CheckProvable](#CheckProvable) +* [Compute](#Compute) + * [ComputeProof](#ComputeProof) +* [Create](#Create) + * [CreateBackup](#CreateBackup) +* [Deals](#Deals) + * [DealsConsiderOfflineRetrievalDeals](#DealsConsiderOfflineRetrievalDeals) + * [DealsConsiderOfflineStorageDeals](#DealsConsiderOfflineStorageDeals) + * [DealsConsiderOnlineRetrievalDeals](#DealsConsiderOnlineRetrievalDeals) + * [DealsConsiderOnlineStorageDeals](#DealsConsiderOnlineStorageDeals) + * [DealsConsiderUnverifiedStorageDeals](#DealsConsiderUnverifiedStorageDeals) + * [DealsConsiderVerifiedStorageDeals](#DealsConsiderVerifiedStorageDeals) + * [DealsImportData](#DealsImportData) + * [DealsList](#DealsList) + * [DealsPieceCidBlocklist](#DealsPieceCidBlocklist) + * [DealsSetConsiderOfflineRetrievalDeals](#DealsSetConsiderOfflineRetrievalDeals) + * [DealsSetConsiderOfflineStorageDeals](#DealsSetConsiderOfflineStorageDeals) + * [DealsSetConsiderOnlineRetrievalDeals](#DealsSetConsiderOnlineRetrievalDeals) + * [DealsSetConsiderOnlineStorageDeals](#DealsSetConsiderOnlineStorageDeals) + * [DealsSetConsiderUnverifiedStorageDeals](#DealsSetConsiderUnverifiedStorageDeals) + * [DealsSetConsiderVerifiedStorageDeals](#DealsSetConsiderVerifiedStorageDeals) + * [DealsSetPieceCidBlocklist](#DealsSetPieceCidBlocklist) +* [I](#I) + * [ID](#ID) +* [Log](#Log) + * [LogList](#LogList) + * [LogSetLevel](#LogSetLevel) +* [Market](#Market) + * [MarketCancelDataTransfer](#MarketCancelDataTransfer) + * [MarketDataTransferUpdates](#MarketDataTransferUpdates) + * [MarketGetAsk](#MarketGetAsk) + * [MarketGetDealUpdates](#MarketGetDealUpdates) + * [MarketGetRetrievalAsk](#MarketGetRetrievalAsk) + * [MarketImportDealData](#MarketImportDealData) + * [MarketListDataTransfers](#MarketListDataTransfers) + * [MarketListDeals](#MarketListDeals) + * [MarketListIncompleteDeals](#MarketListIncompleteDeals) + * [MarketListRetrievalDeals](#MarketListRetrievalDeals) + * [MarketPendingDeals](#MarketPendingDeals) + * [MarketPublishPendingDeals](#MarketPublishPendingDeals) + * [MarketRestartDataTransfer](#MarketRestartDataTransfer) + * [MarketSetAsk](#MarketSetAsk) + * [MarketSetRetrievalAsk](#MarketSetRetrievalAsk) +* [Mining](#Mining) + * [MiningBase](#MiningBase) +* [Net](#Net) + * [NetAddrsListen](#NetAddrsListen) + * [NetAgentVersion](#NetAgentVersion) + * [NetAutoNatStatus](#NetAutoNatStatus) + * [NetBandwidthStats](#NetBandwidthStats) + * [NetBandwidthStatsByPeer](#NetBandwidthStatsByPeer) + * [NetBandwidthStatsByProtocol](#NetBandwidthStatsByProtocol) + * [NetBlockAdd](#NetBlockAdd) + * [NetBlockList](#NetBlockList) + * [NetBlockRemove](#NetBlockRemove) + * [NetConnect](#NetConnect) + * [NetConnectedness](#NetConnectedness) + * [NetDisconnect](#NetDisconnect) + * [NetFindPeer](#NetFindPeer) + * [NetPeerInfo](#NetPeerInfo) + * [NetPeers](#NetPeers) + * [NetPubsubScores](#NetPubsubScores) +* [Pieces](#Pieces) + * [PiecesGetCIDInfo](#PiecesGetCIDInfo) + * [PiecesGetPieceInfo](#PiecesGetPieceInfo) + * [PiecesListCidInfos](#PiecesListCidInfos) + * [PiecesListPieces](#PiecesListPieces) +* [Pledge](#Pledge) + * [PledgeSector](#PledgeSector) +* [Return](#Return) + * [ReturnAddPiece](#ReturnAddPiece) + * [ReturnFetch](#ReturnFetch) + * [ReturnFinalizeSector](#ReturnFinalizeSector) + * [ReturnMoveStorage](#ReturnMoveStorage) + * [ReturnReadPiece](#ReturnReadPiece) + * [ReturnReleaseUnsealed](#ReturnReleaseUnsealed) + * [ReturnSealCommit1](#ReturnSealCommit1) + * [ReturnSealCommit2](#ReturnSealCommit2) + * [ReturnSealPreCommit1](#ReturnSealPreCommit1) + * [ReturnSealPreCommit2](#ReturnSealPreCommit2) + * [ReturnUnsealPiece](#ReturnUnsealPiece) +* [Sealing](#Sealing) + * [SealingAbort](#SealingAbort) + * [SealingSchedDiag](#SealingSchedDiag) +* [Sector](#Sector) + * [SectorAddPieceToAny](#SectorAddPieceToAny) + * [SectorCommitFlush](#SectorCommitFlush) + * [SectorCommitPending](#SectorCommitPending) + * [SectorGetExpectedSealDuration](#SectorGetExpectedSealDuration) + * [SectorGetSealDelay](#SectorGetSealDelay) + * [SectorMarkForUpgrade](#SectorMarkForUpgrade) + * [SectorPreCommitFlush](#SectorPreCommitFlush) + * [SectorPreCommitPending](#SectorPreCommitPending) + * [SectorRemove](#SectorRemove) + * [SectorSetExpectedSealDuration](#SectorSetExpectedSealDuration) + * [SectorSetSealDelay](#SectorSetSealDelay) + * [SectorStartSealing](#SectorStartSealing) + * [SectorTerminate](#SectorTerminate) + * [SectorTerminateFlush](#SectorTerminateFlush) + * [SectorTerminatePending](#SectorTerminatePending) +* [Sectors](#Sectors) + * [SectorsList](#SectorsList) + * [SectorsListInStates](#SectorsListInStates) + * [SectorsRefs](#SectorsRefs) + * [SectorsStatus](#SectorsStatus) + * [SectorsSummary](#SectorsSummary) + * [SectorsUnsealPiece](#SectorsUnsealPiece) + * [SectorsUpdate](#SectorsUpdate) +* [Storage](#Storage) + * [StorageAddLocal](#StorageAddLocal) + * [StorageAttach](#StorageAttach) + * [StorageBestAlloc](#StorageBestAlloc) + * [StorageDeclareSector](#StorageDeclareSector) + * [StorageDropSector](#StorageDropSector) + * [StorageFindSector](#StorageFindSector) + * [StorageInfo](#StorageInfo) + * [StorageList](#StorageList) + * [StorageLocal](#StorageLocal) + * [StorageLock](#StorageLock) + * [StorageReportHealth](#StorageReportHealth) + * [StorageStat](#StorageStat) + * [StorageTryLock](#StorageTryLock) +* [Worker](#Worker) + * [WorkerConnect](#WorkerConnect) + * [WorkerJobs](#WorkerJobs) + * [WorkerStats](#WorkerStats) +## + + +### Closing + + +Perms: read + +Inputs: `null` + +Response: `{}` + +### Discover + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "info": { + "title": "Lotus RPC API", + "version": "1.2.1/generated=2020-11-22T08:22:42-06:00" + }, + "methods": [], + "openrpc": "1.2.6" +} +``` + +### Session + + +Perms: read + +Inputs: `null` + +Response: `"07070707-0707-0707-0707-070707070707"` + +### Shutdown + + +Perms: admin + +Inputs: `null` + +Response: `{}` + +### Version + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "Version": "string value", + "APIVersion": 131328, + "BlockDelay": 42 +} +``` + +## Actor + + +### ActorAddress + + +Perms: read + +Inputs: `null` + +Response: `"f01234"` + +### ActorAddressConfig + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "PreCommitControl": null, + "CommitControl": null, + "TerminateControl": null, + "DealPublishControl": null, + "DisableOwnerFallback": true, + "DisableWorkerFallback": true +} +``` + +### ActorSectorSize + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `34359738368` + +## Auth + + +### AuthNew + + +Perms: admin + +Inputs: +```json +[ + null +] +``` + +Response: `"Ynl0ZSBhcnJheQ=="` + +### AuthVerify + + +Perms: read + +Inputs: +```json +[ + "string value" +] +``` + +Response: `null` + +## Check + + +### CheckProvable + + +Perms: admin + +Inputs: +```json +[ + 8, + null, + true +] +``` + +Response: +```json +{ + "123": "can't acquire read lock" +} +``` + +## Compute + + +### ComputeProof + + +Perms: read + +Inputs: +```json +[ + null, + null +] +``` + +Response: `null` + +## Create + + +### CreateBackup +CreateBackup creates node backup onder the specified file name. The +method requires that the lotus-miner is running with the +LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that +the path specified when calling CreateBackup is within the base path + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: `{}` + +## Deals + + +### DealsConsiderOfflineRetrievalDeals + + +Perms: admin + +Inputs: `null` + +Response: `true` + +### DealsConsiderOfflineStorageDeals + + +Perms: admin + +Inputs: `null` + +Response: `true` + +### DealsConsiderOnlineRetrievalDeals + + +Perms: admin + +Inputs: `null` + +Response: `true` + +### DealsConsiderOnlineStorageDeals + + +Perms: admin + +Inputs: `null` + +Response: `true` + +### DealsConsiderUnverifiedStorageDeals + + +Perms: admin + +Inputs: `null` + +Response: `true` + +### DealsConsiderVerifiedStorageDeals + + +Perms: admin + +Inputs: `null` + +Response: `true` + +### DealsImportData + + +Perms: admin + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "string value" +] +``` + +Response: `{}` + +### DealsList + + +Perms: admin + +Inputs: `null` + +Response: `null` + +### DealsPieceCidBlocklist + + +Perms: admin + +Inputs: `null` + +Response: `null` + +### DealsSetConsiderOfflineRetrievalDeals + + +Perms: admin + +Inputs: +```json +[ + true +] +``` + +Response: `{}` + +### DealsSetConsiderOfflineStorageDeals + + +Perms: admin + +Inputs: +```json +[ + true +] +``` + +Response: `{}` + +### DealsSetConsiderOnlineRetrievalDeals + + +Perms: admin + +Inputs: +```json +[ + true +] +``` + +Response: `{}` + +### DealsSetConsiderOnlineStorageDeals + + +Perms: admin + +Inputs: +```json +[ + true +] +``` + +Response: `{}` + +### DealsSetConsiderUnverifiedStorageDeals + + +Perms: admin + +Inputs: +```json +[ + true +] +``` + +Response: `{}` + +### DealsSetConsiderVerifiedStorageDeals + + +Perms: admin + +Inputs: +```json +[ + true +] +``` + +Response: `{}` + +### DealsSetPieceCidBlocklist + + +Perms: admin + +Inputs: +```json +[ + null +] +``` + +Response: `{}` + +## I + + +### ID + + +Perms: read + +Inputs: `null` + +Response: `"12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"` + +## Log + + +### LogList + + +Perms: write + +Inputs: `null` + +Response: `null` + +### LogSetLevel + + +Perms: write + +Inputs: +```json +[ + "string value", + "string value" +] +``` + +Response: `{}` + +## Market + + +### MarketCancelDataTransfer +MarketCancelDataTransfer cancels a data transfer with the given transfer ID and other peer + + +Perms: write + +Inputs: +```json +[ + 3, + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + true +] +``` + +Response: `{}` + +### MarketDataTransferUpdates + + +Perms: write + +Inputs: `null` + +Response: +```json +{ + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": null + } +} +``` + +### MarketGetAsk + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "Ask": { + "Price": "0", + "VerifiedPrice": "0", + "MinPieceSize": 1032, + "MaxPieceSize": 1032, + "Miner": "f01234", + "Timestamp": 10101, + "Expiry": 10101, + "SeqNo": 42 + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } +} +``` + +### MarketGetDealUpdates + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "Proposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "string value", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "ClientSignature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "ProposalCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "AddFundsCid": null, + "PublishCid": null, + "Miner": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Client": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "State": 42, + "PiecePath": ".lotusminer/fstmp123", + "MetadataPath": ".lotusminer/fstmp123", + "SlashEpoch": 10101, + "FastRetrieval": true, + "Message": "string value", + "StoreID": 12, + "FundsReserved": "0", + "Ref": { + "TransferType": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceCid": null, + "PieceSize": 1024, + "RawBlockSize": 42 + }, + "AvailableForRetrieval": true, + "DealID": 5432, + "CreationTime": "0001-01-01T00:00:00Z", + "TransferChannelId": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "SectorNumber": 9 +} +``` + +### MarketGetRetrievalAsk + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "PricePerByte": "0", + "UnsealPrice": "0", + "PaymentInterval": 42, + "PaymentIntervalIncrease": 42 +} +``` + +### MarketImportDealData + + +Perms: write + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "string value" +] +``` + +Response: `{}` + +### MarketListDataTransfers + + +Perms: write + +Inputs: `null` + +Response: `null` + +### MarketListDeals + + +Perms: read + +Inputs: `null` + +Response: `null` + +### MarketListIncompleteDeals + + +Perms: read + +Inputs: `null` + +Response: `null` + +### MarketListRetrievalDeals + + +Perms: read + +Inputs: `null` + +Response: `null` + +### MarketPendingDeals + + +Perms: write + +Inputs: `null` + +Response: +```json +{ + "Deals": null, + "PublishPeriodStart": "0001-01-01T00:00:00Z", + "PublishPeriod": 60000000000 +} +``` + +### MarketPublishPendingDeals + + +Perms: admin + +Inputs: `null` + +Response: `{}` + +### MarketRestartDataTransfer +MarketRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer + + +Perms: write + +Inputs: +```json +[ + 3, + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + true +] +``` + +Response: `{}` + +### MarketSetAsk + + +Perms: admin + +Inputs: +```json +[ + "0", + "0", + 10101, + 1032, + 1032 +] +``` + +Response: `{}` + +### MarketSetRetrievalAsk + + +Perms: admin + +Inputs: +```json +[ + { + "PricePerByte": "0", + "UnsealPrice": "0", + "PaymentInterval": 42, + "PaymentIntervalIncrease": 42 + } +] +``` + +Response: `{}` + +## Mining + + +### MiningBase + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "Cids": null, + "Blocks": null, + "Height": 0 +} +``` + +## Net + + +### NetAddrsListen + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [] +} +``` + +### NetAgentVersion + + +Perms: read + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: `"string value"` + +### NetAutoNatStatus + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "Reachability": 1, + "PublicAddr": "string value" +} +``` + +### NetBandwidthStats + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "TotalIn": 9, + "TotalOut": 9, + "RateIn": 12.3, + "RateOut": 12.3 +} +``` + +### NetBandwidthStatsByPeer + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "12D3KooWSXmXLJmBR1M7i9RW9GQPNUhZSzXKzxDHWtAgNuJAbyEJ": { + "TotalIn": 174000, + "TotalOut": 12500, + "RateIn": 100, + "RateOut": 50 + } +} +``` + +### NetBandwidthStatsByProtocol + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "/fil/hello/1.0.0": { + "TotalIn": 174000, + "TotalOut": 12500, + "RateIn": 100, + "RateOut": 50 + } +} +``` + +### NetBlockAdd + + +Perms: admin + +Inputs: +```json +[ + { + "Peers": null, + "IPAddrs": null, + "IPSubnets": null + } +] +``` + +Response: `{}` + +### NetBlockList + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "Peers": null, + "IPAddrs": null, + "IPSubnets": null +} +``` + +### NetBlockRemove + + +Perms: admin + +Inputs: +```json +[ + { + "Peers": null, + "IPAddrs": null, + "IPSubnets": null + } +] +``` + +Response: `{}` + +### NetConnect + + +Perms: write + +Inputs: +```json +[ + { + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [] + } +] +``` + +Response: `{}` + +### NetConnectedness + + +Perms: read + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: `1` + +### NetDisconnect + + +Perms: write + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: `{}` + +### NetFindPeer + + +Perms: read + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: +```json +{ + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [] +} +``` + +### NetPeerInfo + + +Perms: read + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: +```json +{ + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Agent": "string value", + "Addrs": null, + "Protocols": null, + "ConnMgrMeta": { + "FirstSeen": "0001-01-01T00:00:00Z", + "Value": 123, + "Tags": { + "name": 42 + }, + "Conns": { + "name": "2021-03-08T22:52:18Z" + } + } +} +``` + +### NetPeers + + +Perms: read + +Inputs: `null` + +Response: `null` + +### NetPubsubScores + + +Perms: read + +Inputs: `null` + +Response: `null` + +## Pieces + + +### PiecesGetCIDInfo + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "CID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceBlockLocations": null +} +``` + +### PiecesGetPieceInfo + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Deals": null +} +``` + +### PiecesListCidInfos + + +Perms: read + +Inputs: `null` + +Response: `null` + +### PiecesListPieces + + +Perms: read + +Inputs: `null` + +Response: `null` + +## Pledge + + +### PledgeSector +Temp api for testing + + +Perms: write + +Inputs: `null` + +Response: +```json +{ + "Miner": 1000, + "Number": 9 +} +``` + +## Return + + +### ReturnAddPiece +storiface.WorkerReturn + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + { + "Size": 1032, + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + }, + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + +### ReturnFetch + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + +### ReturnFinalizeSector + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + +### ReturnMoveStorage + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + +### ReturnReadPiece + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + true, + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + +### ReturnReleaseUnsealed + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + +### ReturnSealCommit1 + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + null, + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + +### ReturnSealCommit2 + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + null, + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + +### ReturnSealPreCommit1 + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + null, + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + +### ReturnSealPreCommit2 + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + { + "Unsealed": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Sealed": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + }, + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + +### ReturnUnsealPiece + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + +## Sealing + + +### SealingAbort + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + } +] +``` + +Response: `{}` + +### SealingSchedDiag +SealingSchedDiag dumps internal sealing scheduler state + + +Perms: admin + +Inputs: +```json +[ + true +] +``` + +Response: `{}` + +## Sector + + +### SectorAddPieceToAny +Add piece to an open sector. If no sectors with enough space are open, +either a new sector will be created, or this call will block until more +sectors can be created. + + +Perms: admin + +Inputs: +```json +[ + 1024, + {}, + { + "PublishCid": null, + "DealID": 5432, + "DealProposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "string value", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "DealSchedule": { + "StartEpoch": 10101, + "EndEpoch": 10101 + }, + "KeepUnsealed": true + } +] +``` + +Response: +```json +{ + "Sector": 9, + "Offset": 1032 +} +``` + +### SectorCommitFlush +SectorCommitFlush immediately sends a Commit message with sectors aggregated for Commit. +Returns null if message wasn't sent + + +Perms: admin + +Inputs: `null` + +Response: `null` + +### SectorCommitPending +SectorCommitPending returns a list of pending Commit sectors to be sent in the next aggregate message + + +Perms: admin + +Inputs: `null` + +Response: `null` + +### SectorGetExpectedSealDuration +SectorGetExpectedSealDuration gets the expected time for a sector to seal + + +Perms: read + +Inputs: `null` + +Response: `60000000000` + +### SectorGetSealDelay +SectorGetSealDelay gets the time that a newly-created sector +waits for more deals before it starts sealing + + +Perms: read + +Inputs: `null` + +Response: `60000000000` + +### SectorMarkForUpgrade + + +Perms: admin + +Inputs: +```json +[ + 9 +] +``` + +Response: `{}` + +### SectorPreCommitFlush +SectorPreCommitFlush immediately sends a PreCommit message with sectors batched for PreCommit. +Returns null if message wasn't sent + + +Perms: admin + +Inputs: `null` + +Response: `null` + +### SectorPreCommitPending +SectorPreCommitPending returns a list of pending PreCommit sectors to be sent in the next batch message + + +Perms: admin + +Inputs: `null` + +Response: `null` + +### SectorRemove +SectorRemove removes the sector from storage. It doesn't terminate it on-chain, which can +be done with SectorTerminate. Removing and not terminating live sectors will cause additional penalties. + + +Perms: admin + +Inputs: +```json +[ + 9 +] +``` + +Response: `{}` + +### SectorSetExpectedSealDuration +SectorSetExpectedSealDuration sets the expected time for a sector to seal + + +Perms: write + +Inputs: +```json +[ + 60000000000 +] +``` + +Response: `{}` + +### SectorSetSealDelay +SectorSetSealDelay sets the time that a newly-created sector +waits for more deals before it starts sealing + + +Perms: write + +Inputs: +```json +[ + 60000000000 +] +``` + +Response: `{}` + +### SectorStartSealing +SectorStartSealing can be called on sectors in Empty or WaitDeals states +to trigger sealing early + + +Perms: write + +Inputs: +```json +[ + 9 +] +``` + +Response: `{}` + +### SectorTerminate +SectorTerminate terminates the sector on-chain (adding it to a termination batch first), then +automatically removes it from storage + + +Perms: admin + +Inputs: +```json +[ + 9 +] +``` + +Response: `{}` + +### SectorTerminateFlush +SectorTerminateFlush immediately sends a terminate message with sectors batched for termination. +Returns null if message wasn't sent + + +Perms: admin + +Inputs: `null` + +Response: `null` + +### SectorTerminatePending +SectorTerminatePending returns a list of pending sector terminations to be sent in the next batch message + + +Perms: admin + +Inputs: `null` + +Response: `null` + +## Sectors + + +### SectorsList +List all staged sectors + + +Perms: read + +Inputs: `null` + +Response: +```json +[ + 123, + 124 +] +``` + +### SectorsListInStates +List sectors in particular states + + +Perms: read + +Inputs: +```json +[ + null +] +``` + +Response: +```json +[ + 123, + 124 +] +``` + +### SectorsRefs + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "98000": [ + { + "SectorID": 100, + "Offset": 10485760, + "Size": 1048576 + } + ] +} +``` + +### SectorsStatus +Get the status of a given sector by ID + + +Perms: read + +Inputs: +```json +[ + 9, + true +] +``` + +Response: +```json +{ + "SectorID": 9, + "State": "Proving", + "CommD": null, + "CommR": null, + "Proof": "Ynl0ZSBhcnJheQ==", + "Deals": null, + "Ticket": { + "Value": null, + "Epoch": 10101 + }, + "Seed": { + "Value": null, + "Epoch": 10101 + }, + "PreCommitMsg": null, + "CommitMsg": null, + "Retries": 42, + "ToUpgrade": true, + "LastErr": "string value", + "Log": null, + "SealProof": 8, + "Activation": 10101, + "Expiration": 10101, + "DealWeight": "0", + "VerifiedDealWeight": "0", + "InitialPledge": "0", + "OnTime": 10101, + "Early": 10101 +} +``` + +### SectorsSummary +Get summary info of sectors + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "Proving": 120 +} +``` + +### SectorsUnsealPiece + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + 1040384, + 1024, + null, + null +] +``` + +Response: `{}` + +### SectorsUpdate + + +Perms: admin + +Inputs: +```json +[ + 9, + "Proving" +] +``` + +Response: `{}` + +## Storage + + +### StorageAddLocal + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: `{}` + +### StorageAttach +stores.SectorIndex + + +Perms: admin + +Inputs: +```json +[ + { + "ID": "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8", + "URLs": null, + "Weight": 42, + "MaxStorage": 42, + "CanSeal": true, + "CanStore": true + }, + { + "Capacity": 9, + "Available": 9, + "FSAvailable": 9, + "Reserved": 9, + "Max": 9, + "Used": 9 + } +] +``` + +Response: `{}` + +### StorageBestAlloc + + +Perms: admin + +Inputs: +```json +[ + 1, + 34359738368, + "sealing" +] +``` + +Response: `null` + +### StorageDeclareSector + + +Perms: admin + +Inputs: +```json +[ + "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8", + { + "Miner": 1000, + "Number": 9 + }, + 1, + true +] +``` + +Response: `{}` + +### StorageDropSector + + +Perms: admin + +Inputs: +```json +[ + "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8", + { + "Miner": 1000, + "Number": 9 + }, + 1 +] +``` + +Response: `{}` + +### StorageFindSector + + +Perms: admin + +Inputs: +```json +[ + { + "Miner": 1000, + "Number": 9 + }, + 1, + 34359738368, + true +] +``` + +Response: `null` + +### StorageInfo + + +Perms: admin + +Inputs: +```json +[ + "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8" +] +``` + +Response: +```json +{ + "ID": "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8", + "URLs": null, + "Weight": 42, + "MaxStorage": 42, + "CanSeal": true, + "CanStore": true +} +``` + +### StorageList + + +Perms: admin + +Inputs: `null` + +Response: +```json +{ + "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": [ + { + "Miner": 1000, + "Number": 100, + "SectorFileType": 2 + } + ] +} +``` + +### StorageLocal + + +Perms: admin + +Inputs: `null` + +Response: +```json +{ + "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": "/data/path" +} +``` + +### StorageLock + + +Perms: admin + +Inputs: +```json +[ + { + "Miner": 1000, + "Number": 9 + }, + 1, + 1 +] +``` + +Response: `{}` + +### StorageReportHealth + + +Perms: admin + +Inputs: +```json +[ + "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8", + { + "Stat": { + "Capacity": 9, + "Available": 9, + "FSAvailable": 9, + "Reserved": 9, + "Max": 9, + "Used": 9 + }, + "Err": "string value" + } +] +``` + +Response: `{}` + +### StorageStat + + +Perms: admin + +Inputs: +```json +[ + "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8" +] +``` + +Response: +```json +{ + "Capacity": 9, + "Available": 9, + "FSAvailable": 9, + "Reserved": 9, + "Max": 9, + "Used": 9 +} +``` + +### StorageTryLock + + +Perms: admin + +Inputs: +```json +[ + { + "Miner": 1000, + "Number": 9 + }, + 1, + 1 +] +``` + +Response: `true` + +## Worker + + +### WorkerConnect +WorkerConnect tells the node to connect to workers RPC + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: `{}` + +### WorkerJobs + + +Perms: admin + +Inputs: `null` + +Response: +```json +{ + "ef8d99a2-6865-4189-8ffa-9fef0f806eee": [ + { + "ID": { + "Sector": { + "Miner": 1000, + "Number": 100 + }, + "ID": "76081ba0-61bd-45a5-bc08-af05f1c26e5d" + }, + "Sector": { + "Miner": 1000, + "Number": 100 + }, + "Task": "seal/v0/precommit/2", + "RunWait": 0, + "Start": "2020-11-12T09:22:07Z", + "Hostname": "host" + } + ] +} +``` + +### WorkerStats + + +Perms: admin + +Inputs: `null` + +Response: +```json +{ + "ef8d99a2-6865-4189-8ffa-9fef0f806eee": { + "Info": { + "Hostname": "host", + "IgnoreResources": false, + "Resources": { + "MemPhysical": 274877906944, + "MemSwap": 128849018880, + "MemReserved": 2147483648, + "CPUs": 64, + "GPUs": [ + "aGPU 1337" + ] + } + }, + "Enabled": true, + "MemUsedMin": 0, + "MemUsedMax": 0, + "GpuUsed": false, + "CpuUse": 0 + } +} +``` + diff --git a/documentation/en/api-v0-methods-worker.md b/documentation/en/api-v0-methods-worker.md new file mode 100644 index 00000000000..c620113f489 --- /dev/null +++ b/documentation/en/api-v0-methods-worker.md @@ -0,0 +1,564 @@ +# Groups +* [](#) + * [Enabled](#Enabled) + * [Fetch](#Fetch) + * [Info](#Info) + * [Paths](#Paths) + * [Remove](#Remove) + * [Session](#Session) + * [Version](#Version) +* [Add](#Add) + * [AddPiece](#AddPiece) +* [Finalize](#Finalize) + * [FinalizeSector](#FinalizeSector) +* [Move](#Move) + * [MoveStorage](#MoveStorage) +* [Process](#Process) + * [ProcessSession](#ProcessSession) +* [Release](#Release) + * [ReleaseUnsealed](#ReleaseUnsealed) +* [Seal](#Seal) + * [SealCommit1](#SealCommit1) + * [SealCommit2](#SealCommit2) + * [SealPreCommit1](#SealPreCommit1) + * [SealPreCommit2](#SealPreCommit2) +* [Set](#Set) + * [SetEnabled](#SetEnabled) +* [Storage](#Storage) + * [StorageAddLocal](#StorageAddLocal) +* [Task](#Task) + * [TaskDisable](#TaskDisable) + * [TaskEnable](#TaskEnable) + * [TaskTypes](#TaskTypes) +* [Unseal](#Unseal) + * [UnsealPiece](#UnsealPiece) +* [Wait](#Wait) + * [WaitQuiet](#WaitQuiet) +## + + +### Enabled + + +Perms: admin + +Inputs: `null` + +Response: `true` + +### Fetch + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + 1, + "sealing", + "move" +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + +### Info + + +Perms: admin + +Inputs: `null` + +Response: +```json +{ + "Hostname": "string value", + "IgnoreResources": true, + "Resources": { + "MemPhysical": 42, + "MemSwap": 42, + "MemReserved": 42, + "CPUs": 42, + "GPUs": null + } +} +``` + +### Paths + + +Perms: admin + +Inputs: `null` + +Response: `null` + +### Remove +Storage / Other + + +Perms: admin + +Inputs: +```json +[ + { + "Miner": 1000, + "Number": 9 + } +] +``` + +Response: `{}` + +### Session +Like ProcessSession, but returns an error when worker is disabled + + +Perms: admin + +Inputs: `null` + +Response: `"07070707-0707-0707-0707-070707070707"` + +### Version + + +Perms: admin + +Inputs: `null` + +Response: `131328` + +## Add + + +### AddPiece +storiface.WorkerCalls + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + null, + 1024, + {} +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + +## Finalize + + +### FinalizeSector + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + null +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + +## Move + + +### MoveStorage + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + 1 +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + +## Process + + +### ProcessSession +returns a random UUID of worker session, generated randomly when worker +process starts + + +Perms: admin + +Inputs: `null` + +Response: `"07070707-0707-0707-0707-070707070707"` + +## Release + + +### ReleaseUnsealed + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + null +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + +## Seal + + +### SealCommit1 + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + null, + null, + null, + { + "Unsealed": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Sealed": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + } +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + +### SealCommit2 + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + null +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + +### SealPreCommit1 + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + null, + null +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + +### SealPreCommit2 + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + null +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + +## Set + + +### SetEnabled +SetEnabled marks the worker as enabled/disabled. Not that this setting +may take a few seconds to propagate to task scheduler + + +Perms: admin + +Inputs: +```json +[ + true +] +``` + +Response: `{}` + +## Storage + + +### StorageAddLocal + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: `{}` + +## Task + + +### TaskDisable + + +Perms: admin + +Inputs: +```json +[ + "seal/v0/commit/2" +] +``` + +Response: `{}` + +### TaskEnable + + +Perms: admin + +Inputs: +```json +[ + "seal/v0/commit/2" +] +``` + +Response: `{}` + +### TaskTypes +TaskType -> Weight + + +Perms: admin + +Inputs: `null` + +Response: +```json +{ + "seal/v0/precommit/2": {} +} +``` + +## Unseal + + +### UnsealPiece + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + 1040384, + 1024, + null, + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + +## Wait + + +### WaitQuiet +WaitQuiet blocks until there are no tasks running + + +Perms: admin + +Inputs: `null` + +Response: `{}` + diff --git a/documentation/en/api-methods.md b/documentation/en/api-v0-methods.md similarity index 84% rename from documentation/en/api-methods.md rename to documentation/en/api-v0-methods.md index 4e18d85c919..4466cde8c88 100644 --- a/documentation/en/api-methods.md +++ b/documentation/en/api-v0-methods.md @@ -1,6 +1,7 @@ # Groups * [](#) * [Closing](#Closing) + * [Discover](#Discover) * [Session](#Session) * [Shutdown](#Shutdown) * [Version](#Version) @@ -16,6 +17,7 @@ * [ChainGetBlockMessages](#ChainGetBlockMessages) * [ChainGetGenesis](#ChainGetGenesis) * [ChainGetMessage](#ChainGetMessage) + * [ChainGetMessagesInTipset](#ChainGetMessagesInTipset) * [ChainGetNode](#ChainGetNode) * [ChainGetParentMessages](#ChainGetParentMessages) * [ChainGetParentReceipts](#ChainGetParentReceipts) @@ -34,6 +36,7 @@ * [Client](#Client) * [ClientCalcCommP](#ClientCalcCommP) * [ClientCancelDataTransfer](#ClientCancelDataTransfer) + * [ClientCancelRetrievalDeal](#ClientCancelRetrievalDeal) * [ClientDataTransferUpdates](#ClientDataTransferUpdates) * [ClientDealPieceCID](#ClientDealPieceCID) * [ClientDealSize](#ClientDealSize) @@ -42,11 +45,13 @@ * [ClientGetDealInfo](#ClientGetDealInfo) * [ClientGetDealStatus](#ClientGetDealStatus) * [ClientGetDealUpdates](#ClientGetDealUpdates) + * [ClientGetRetrievalUpdates](#ClientGetRetrievalUpdates) * [ClientHasLocal](#ClientHasLocal) * [ClientImport](#ClientImport) * [ClientListDataTransfers](#ClientListDataTransfers) * [ClientListDeals](#ClientListDeals) * [ClientListImports](#ClientListImports) + * [ClientListRetrievals](#ClientListRetrievals) * [ClientMinerQueryOffer](#ClientMinerQueryOffer) * [ClientQueryAsk](#ClientQueryAsk) * [ClientRemoveImport](#ClientRemoveImport) @@ -55,6 +60,7 @@ * [ClientRetrieveTryRestartInsufficientFunds](#ClientRetrieveTryRestartInsufficientFunds) * [ClientRetrieveWithEvents](#ClientRetrieveWithEvents) * [ClientStartDeal](#ClientStartDeal) + * [ClientStatelessDeal](#ClientStatelessDeal) * [Create](#Create) * [CreateBackup](#CreateBackup) * [Gas](#Gas) @@ -68,7 +74,11 @@ * [LogList](#LogList) * [LogSetLevel](#LogSetLevel) * [Market](#Market) - * [MarketEnsureAvailable](#MarketEnsureAvailable) + * [MarketAddBalance](#MarketAddBalance) + * [MarketGetReserved](#MarketGetReserved) + * [MarketReleaseFunds](#MarketReleaseFunds) + * [MarketReserveFunds](#MarketReserveFunds) + * [MarketWithdraw](#MarketWithdraw) * [Miner](#Miner) * [MinerCreateBlock](#MinerCreateBlock) * [MinerGetBaseInfo](#MinerGetBaseInfo) @@ -95,6 +105,7 @@ * [MsigCancel](#MsigCancel) * [MsigCreate](#MsigCreate) * [MsigGetAvailableBalance](#MsigGetAvailableBalance) + * [MsigGetPending](#MsigGetPending) * [MsigGetVested](#MsigGetVested) * [MsigGetVestingSchedule](#MsigGetVestingSchedule) * [MsigPropose](#MsigPropose) @@ -109,10 +120,14 @@ * [NetBandwidthStats](#NetBandwidthStats) * [NetBandwidthStatsByPeer](#NetBandwidthStatsByPeer) * [NetBandwidthStatsByProtocol](#NetBandwidthStatsByProtocol) + * [NetBlockAdd](#NetBlockAdd) + * [NetBlockList](#NetBlockList) + * [NetBlockRemove](#NetBlockRemove) * [NetConnect](#NetConnect) * [NetConnectedness](#NetConnectedness) * [NetDisconnect](#NetDisconnect) * [NetFindPeer](#NetFindPeer) + * [NetPeerInfo](#NetPeerInfo) * [NetPeers](#NetPeers) * [NetPubsubScores](#NetPubsubScores) * [Paych](#Paych) @@ -140,6 +155,7 @@ * [StateCirculatingSupply](#StateCirculatingSupply) * [StateCompute](#StateCompute) * [StateDealProviderCollateralBounds](#StateDealProviderCollateralBounds) + * [StateDecodeParams](#StateDecodeParams) * [StateGetActor](#StateGetActor) * [StateGetReceipt](#StateGetReceipt) * [StateListActors](#StateListActors) @@ -169,6 +185,7 @@ * [StateReadState](#StateReadState) * [StateReplay](#StateReplay) * [StateSearchMsg](#StateSearchMsg) + * [StateSearchMsgLimited](#StateSearchMsgLimited) * [StateSectorExpiration](#StateSectorExpiration) * [StateSectorGetInfo](#StateSectorGetInfo) * [StateSectorPartition](#StateSectorPartition) @@ -215,6 +232,25 @@ Inputs: `null` Response: `{}` +### Discover + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "info": { + "title": "Lotus RPC API", + "version": "1.2.1/generated=2020-11-22T08:22:42-06:00" + }, + "methods": [], + "openrpc": "1.2.6" +} +``` + ### Session @@ -244,7 +280,7 @@ Response: ```json { "Version": "string value", - "APIVersion": 4352, + "APIVersion": 131328, "BlockDelay": 42 } ``` @@ -414,6 +450,17 @@ Response: ### ChainGetBlockMessages ChainGetBlockMessages returns messages stored in the specified block. +Note: If there are multiple blocks in a tipset, it's likely that some +messages will be duplicated. It's also possible for blocks in a tipset to have +different messages from the same sender at the same nonce. When that happens, +only the first message (in a block with lowest ticket) will be considered +for execution + +NOTE: THIS METHOD SHOULD ONLY BE USED FOR GETTING MESSAGES IN A SPECIFIC BLOCK + +DO NOT USE THIS METHOD TO GET MESSAGES INCLUDED IN A TIPSET +Use ChainGetParentMessages, which will perform correct message deduplication + Perms: read @@ -487,8 +534,30 @@ Response: } ``` +### ChainGetMessagesInTipset +ChainGetMessagesInTipset returns message stores in current tipset + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `null` + ### ChainGetNode -There are not yet any comments for this method. + Perms: read @@ -529,7 +598,8 @@ Response: `null` ### ChainGetParentReceipts ChainGetParentReceipts returns receipts for messages in parent tipset of -the specified block. +the specified block. The receipts in the list returned is one-to-one with the +messages returned by a call to ChainGetParentMessages with the same blockCid. Perms: read @@ -842,7 +912,7 @@ retrieval markets as a client ClientCalcCommP calculates the CommP for a specified file -Perms: read +Perms: write Inputs: ```json @@ -878,8 +948,23 @@ Inputs: Response: `{}` +### ClientCancelRetrievalDeal +ClientCancelRetrievalDeal cancels an ongoing retrieval deal based on DealID + + +Perms: write + +Inputs: +```json +[ + 5 +] +``` + +Response: `{}` + ### ClientDataTransferUpdates -There are not yet any comments for this method. + Perms: write @@ -898,7 +983,10 @@ Response: "Voucher": "string value", "Message": "string value", "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Transferred": 42 + "Transferred": 42, + "Stages": { + "Stages": null + } } ``` @@ -1011,6 +1099,9 @@ Response: }, "State": 42, "Message": "string value", + "DealStages": { + "Stages": null + }, "Provider": "f01234", "DataRef": { "TransferType": "string value", @@ -1018,7 +1109,8 @@ Response: "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, "PieceCid": null, - "PieceSize": 1024 + "PieceSize": 1024, + "RawBlockSize": 42 }, "PieceCID": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" @@ -1028,7 +1120,28 @@ Response: "Duration": 42, "DealID": 5432, "CreationTime": "0001-01-01T00:00:00Z", - "Verified": true + "Verified": true, + "TransferChannelID": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "DataTransfer": { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": null + } + } } ``` @@ -1051,7 +1164,7 @@ Response: `"string value"` ClientGetDealUpdates returns the status of updated deals -Perms: read +Perms: write Inputs: `null` @@ -1063,6 +1176,9 @@ Response: }, "State": 42, "Message": "string value", + "DealStages": { + "Stages": null + }, "Provider": "f01234", "DataRef": { "TransferType": "string value", @@ -1070,7 +1186,8 @@ Response: "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, "PieceCid": null, - "PieceSize": 1024 + "PieceSize": 1024, + "RawBlockSize": 42 }, "PieceCID": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" @@ -1080,7 +1197,76 @@ Response: "Duration": 42, "DealID": 5432, "CreationTime": "0001-01-01T00:00:00Z", - "Verified": true + "Verified": true, + "TransferChannelID": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "DataTransfer": { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": null + } + } +} +``` + +### ClientGetRetrievalUpdates +ClientGetRetrievalUpdates returns status of updated retrieval deals + + +Perms: write + +Inputs: `null` + +Response: +```json +{ + "PayloadCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ID": 5, + "PieceCID": null, + "PricePerByte": "0", + "UnsealPrice": "0", + "Status": 0, + "Message": "string value", + "Provider": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "BytesReceived": 42, + "BytesPaidFor": 42, + "TotalPaid": "0", + "TransferChannelID": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "DataTransfer": { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": null + } + } } ``` @@ -1151,6 +1337,17 @@ Response: `null` ClientListImports lists imported files and their root CIDs +Perms: write + +Inputs: `null` + +Response: `null` + +### ClientListRetrievals +ClientQueryAsk returns a signed StorageAsk from the specified miner. +ClientListRetrievals returns information about retrievals made by the local client + + Perms: write Inputs: `null` @@ -1197,7 +1394,6 @@ Response: ``` ### ClientQueryAsk -ClientQueryAsk returns a signed StorageAsk from the specified miner. Perms: read @@ -1271,6 +1467,7 @@ Inputs: }, "Piece": null, "Size": 42, + "LocalStore": 12, "Total": "0", "UnsealPrice": "0", "PaymentInterval": 42, @@ -1324,6 +1521,7 @@ Inputs: }, "Piece": null, "Size": 42, + "LocalStore": 12, "Total": "0", "UnsealPrice": "0", "PaymentInterval": 42, @@ -1370,7 +1568,41 @@ Inputs: "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, "PieceCid": null, - "PieceSize": 1024 + "PieceSize": 1024, + "RawBlockSize": 42 + }, + "Wallet": "f01234", + "Miner": "f01234", + "EpochPrice": "0", + "MinBlocksDuration": 42, + "ProviderCollateral": "0", + "DealStartEpoch": 10101, + "FastRetrieval": true, + "VerifiedDeal": true + } +] +``` + +Response: `null` + +### ClientStatelessDeal +ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking. + + +Perms: write + +Inputs: +```json +[ + { + "Data": { + "TransferType": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceCid": null, + "PieceSize": 1024, + "RawBlockSize": 42 }, "Wallet": "f01234", "Miner": "f01234", @@ -1611,8 +1843,83 @@ Response: `{}` ## Market -### MarketEnsureAvailable -MarketFreeBalance +### MarketAddBalance +MarketAddBalance adds funds to the market actor + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + "0" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MarketGetReserved +MarketGetReserved gets the amount of funds that are currently reserved for the address + + +Perms: sign + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `"0"` + +### MarketReleaseFunds +MarketReleaseFunds releases funds reserved by MarketReserveFunds + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "0" +] +``` + +Response: `{}` + +### MarketReserveFunds +MarketReserveFunds reserves funds for a deal + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + "0" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MarketWithdraw +MarketWithdraw withdraws unlocked funds from the market actor Perms: sign @@ -1637,7 +1944,7 @@ Response: ### MinerCreateBlock -There are not yet any comments for this method. + Perms: write @@ -2053,7 +2360,7 @@ Response: `null` MpoolSetConfig sets the mpool config to (a copy of) the supplied config -Perms: write +Perms: admin Inputs: ```json @@ -2072,7 +2379,7 @@ Inputs: Response: `{}` ### MpoolSub -There are not yet any comments for this method. + Perms: read @@ -2221,7 +2528,7 @@ using both transaction ID and a hash of the parameters used in the proposal. This method of approval can be used to ensure you only approve exactly the transaction you think you are. It takes the following params: , , , , , -, , +, , Perms: sign @@ -2325,6 +2632,31 @@ Inputs: Response: `"0"` +### MsigGetPending +MsigGetPending returns pending transactions for the given multisig +wallet. Once pending transactions are fully approved, they will no longer +appear here. + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `null` + ### MsigGetVested MsigGetVested returns the amount of FIL that vested in a multisig in a certain period. It takes the following params: , , @@ -2532,8 +2864,8 @@ Inputs: `null` Response: ```json { - "Addrs": null, - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [] } ``` @@ -2621,6 +2953,58 @@ Response: } ``` +### NetBlockAdd + + +Perms: admin + +Inputs: +```json +[ + { + "Peers": null, + "IPAddrs": null, + "IPSubnets": null + } +] +``` + +Response: `{}` + +### NetBlockList + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "Peers": null, + "IPAddrs": null, + "IPSubnets": null +} +``` + +### NetBlockRemove + + +Perms: admin + +Inputs: +```json +[ + { + "Peers": null, + "IPAddrs": null, + "IPSubnets": null + } +] +``` + +Response: `{}` + ### NetConnect @@ -2630,8 +3014,8 @@ Inputs: ```json [ { - "Addrs": null, - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [] } ] ``` @@ -2681,8 +3065,40 @@ Inputs: Response: ```json { + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [] +} +``` + +### NetPeerInfo + + +Perms: read + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: +```json +{ + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Agent": "string value", "Addrs": null, - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + "Protocols": null, + "ConnMgrMeta": { + "FirstSeen": "0001-01-01T00:00:00Z", + "Value": 123, + "Tags": { + "name": 42 + }, + "Conns": { + "name": "2021-03-08T22:52:18Z" + } + } } ``` @@ -2709,7 +3125,7 @@ The Paych methods are for interacting with and managing payment channels ### PaychAllocateLane -There are not yet any comments for this method. + Perms: sign @@ -2723,7 +3139,7 @@ Inputs: Response: `42` ### PaychAvailableFunds -There are not yet any comments for this method. + Perms: sign @@ -2749,7 +3165,7 @@ Response: ``` ### PaychAvailableFundsByFromTo -There are not yet any comments for this method. + Perms: sign @@ -2776,7 +3192,7 @@ Response: ``` ### PaychCollect -There are not yet any comments for this method. + Perms: sign @@ -2819,7 +3235,7 @@ Response: ``` ### PaychGetWaitReady -There are not yet any comments for this method. + Perms: sign @@ -2835,7 +3251,7 @@ Inputs: Response: `"f01234"` ### PaychList -There are not yet any comments for this method. + Perms: read @@ -2844,7 +3260,7 @@ Inputs: `null` Response: `null` ### PaychNewPayment -There are not yet any comments for this method. + Perms: sign @@ -2869,7 +3285,7 @@ Response: ``` ### PaychSettle -There are not yet any comments for this method. + Perms: sign @@ -2888,7 +3304,7 @@ Response: ``` ### PaychStatus -There are not yet any comments for this method. + Perms: read @@ -2908,7 +3324,7 @@ Response: ``` ### PaychVoucherAdd -There are not yet any comments for this method. + Perms: write @@ -2944,7 +3360,7 @@ Inputs: Response: `"0"` ### PaychVoucherCheckSpendable -There are not yet any comments for this method. + Perms: read @@ -2980,7 +3396,7 @@ Inputs: Response: `true` ### PaychVoucherCheckValid -There are not yet any comments for this method. + Perms: read @@ -3014,7 +3430,7 @@ Inputs: Response: `{}` ### PaychVoucherCreate -There are not yet any comments for this method. + Perms: sign @@ -3055,7 +3471,7 @@ Response: ``` ### PaychVoucherList -There are not yet any comments for this method. + Perms: write @@ -3069,7 +3485,7 @@ Inputs: Response: `null` ### PaychVoucherSubmit -There are not yet any comments for this method. + Perms: sign @@ -3111,7 +3527,7 @@ Response: ## State The State methods are used to query, inspect, and interact with chain state. -Most methods take a TipSetKey as a parameter. The state looked up is the state at that tipset. +Most methods take a TipSetKey as a parameter. The state looked up is the parent state of the tipset. A nil TipSetKey can be provided as a param, this will cause the heaviest tipset in the chain to be used. @@ -3164,6 +3580,10 @@ Response: `null` ### StateCall StateCall runs the given message and returns its result without any persisted changes. +StateCall applies the message to the tipset's parent state. The +message is not applied on-top-of the messages in the passed-in +tipset. + Perms: read @@ -3327,6 +3747,36 @@ Response: `"0"` StateCompute is a flexible command that applies the given messages on the given tipset. The messages are run as though the VM were at the provided height. +When called, StateCompute will: +- Load the provided tipset, or use the current chain head if not provided +- Compute the tipset state of the provided tipset on top of the parent state + - (note that this step runs before vmheight is applied to the execution) + - Execute state upgrade if any were scheduled at the epoch, or in null + blocks preceding the tipset + - Call the cron actor on null blocks preceding the tipset + - For each block in the tipset + - Apply messages in blocks in the specified + - Award block reward by calling the reward actor + - Call the cron actor for the current epoch +- If the specified vmheight is higher than the current epoch, apply any + needed state upgrades to the state +- Apply the specified messages to the state + +The vmheight parameter sets VM execution epoch, and can be used to simulate +message execution in different network versions. If the specified vmheight +epoch is higher than the epoch of the specified tipset, any state upgrades +until the vmheight will be executed on the state before applying messages +specified by the user. + +Note that the initial tipset state computation is not affected by the +vmheight parameter - only the messages in the `apply` set are + +If the caller wants to simply compute the state, vmheight should be set to +the epoch of the specified tipset. + +Messages in the `apply` parameter must have the correct nonces, and gas +values set. + Perms: read @@ -3387,6 +3837,31 @@ Response: } ``` +### StateDecodeParams +StateDecodeParams attempts to decode the provided params, based on the recipient actor address and method number. + + +Perms: read + +Inputs: +```json +[ + "f01234", + 1, + "Ynl0ZSBhcnJheQ==", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `{}` + ### StateGetActor StateGetActor returns the indicated actor's nonce and balance. @@ -3423,7 +3898,15 @@ Response: ``` ### StateGetReceipt -StateGetReceipt returns the message receipt for the given message +StateGetReceipt returns the message receipt for the given message or for a +matching gas-repriced replacing message + +NOTE: If the requested message was replaced, this method will return the receipt +for the replacing message - if the caller needs the receipt for exactly the +requested message, use StateSearchMsg().Receipt, and check that MsgLookup.Message +is matching the requested CID + +DEPRECATED: Use StateSearchMsg, this method won't be supported in v1 API Perms: read @@ -3831,7 +4314,7 @@ Response: "WorkerChangeEpoch": 10101, "PeerId": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "Multiaddrs": null, - "SealProofType": 3, + "WindowPoStProofType": 8, "SectorSize": 34359738368, "WindowPoStPartitionSectors": 42, "ConsensusFaultElapsed": 10101 @@ -3849,7 +4332,7 @@ Inputs: [ "f01234", { - "SealProof": 3, + "SealProof": 8, "SectorNumber": 9, "SealedCID": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" @@ -3946,7 +4429,7 @@ Inputs: [ "f01234", { - "SealProof": 3, + "SealProof": 8, "SectorNumber": 9, "SealedCID": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" @@ -4151,7 +4634,7 @@ Inputs: ] ``` -Response: `6` +Response: `13` ### StateReadState StateReadState returns the indicated actor's state. @@ -4178,13 +4661,31 @@ Response: ```json { "Balance": "0", + "Code": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, "State": {} } ``` ### StateReplay StateReplay replays a given message, assuming it was included in a block in the specified tipset. -If no tipset key is provided, the appropriate tipset is looked up. + +If a tipset key is provided, and a replacing message is found on chain, +the method will return an error saying that the message wasn't found + +If no tipset key is provided, the appropriate tipset is looked up, and if +the message was gas-repriced, the on-chain message will be replayed - in +that case the returned InvocResult.MsgCid will not match the Cid param + +If the caller wants to ensure that exactly the requested message was executed, +they MUST check that InvocResult.MsgCid is equal to the provided Cid. +Without this check both the requested and original message may appear as +successfully executed on-chain, which may look like a double-spend. + +A replacing message is a message with a different CID, any of Gas values, and +different signature, but with all other parameters matching (source/destination, +nonce, params, etc.) Perms: read @@ -4278,6 +4779,20 @@ Response: ### StateSearchMsg StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed +NOTE: If a replacing message is found on chain, this method will return +a MsgLookup for the replacing message - the MsgLookup.Message will be a different +CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the +result of the execution of the replacing message. + +If the caller wants to ensure that exactly the requested message was executed, +they MUST check that MsgLookup.Message is equal to the provided 'cid'. +Without this check both the requested and original message may appear as +successfully executed on-chain, which may look like a double-spend. + +A replacing message is a message with a different CID, any of Gas values, and +different signature, but with all other parameters matching (source/destination, +nonce, params, etc.) + Perms: read @@ -4314,6 +4829,60 @@ Response: } ``` +### StateSearchMsgLimited +StateSearchMsgLimited looks back up to limit epochs in the chain for a message, and returns its receipt and the tipset where it was executed + +NOTE: If a replacing message is found on chain, this method will return +a MsgLookup for the replacing message - the MsgLookup.Message will be a different +CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the +result of the execution of the replacing message. + +If the caller wants to ensure that exactly the requested message was executed, +they MUST check that MsgLookup.Message is equal to the provided 'cid'. +Without this check both the requested and original message may appear as +successfully executed on-chain, which may look like a double-spend. + +A replacing message is a message with a different CID, any of Gas values, and +different signature, but with all other parameters matching (source/destination, +nonce, params, etc.) + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + 10101 +] +``` + +Response: +```json +{ + "Message": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Receipt": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "ReturnDec": {}, + "TipSet": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Height": 10101 +} +``` + ### StateSectorExpiration StateSectorExpiration returns epoch at which given sector will expire @@ -4372,7 +4941,7 @@ Response: ```json { "SectorNumber": 9, - "SealProof": 3, + "SealProof": 8, "SealedCID": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, @@ -4443,7 +5012,7 @@ Response: ```json { "Info": { - "SealProof": 3, + "SealProof": 8, "SectorNumber": 9, "SealedCID": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" @@ -4491,7 +5060,8 @@ Response: "FilMined": "0", "FilBurnt": "0", "FilLocked": "0", - "FilCirculating": "0" + "FilCirculating": "0", + "FilReserveDisbursed": "0" } ``` @@ -4571,6 +5141,20 @@ Response: `"0"` StateWaitMsg looks back in the chain for a message. If not found, it blocks until the message arrives on chain, and gets to the indicated confidence depth. +NOTE: If a replacing message is found on chain, this method will return +a MsgLookup for the replacing message - the MsgLookup.Message will be a different +CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the +result of the execution of the replacing message. + +If the caller wants to ensure that exactly the requested message was executed, +they MUST check that MsgLookup.Message is equal to the provided 'cid'. +Without this check both the requested and original message may appear as +successfully executed on-chain, which may look like a double-spend. + +A replacing message is a message with a different CID, any of Gas values, and +different signature, but with all other parameters matching (source/destination, +nonce, params, etc.) + Perms: read @@ -4613,6 +5197,20 @@ StateWaitMsgLimited looks back up to limit epochs in the chain for a message. If not found, it blocks until the message arrives on chain, and gets to the indicated confidence depth. +NOTE: If a replacing message is found on chain, this method will return +a MsgLookup for the replacing message - the MsgLookup.Message will be a different +CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the +result of the execution of the replacing message. + +If the caller wants to ensure that exactly the requested message was executed, +they MUST check that MsgLookup.Message is equal to the provided 'cid'. +Without this check both the requested and original message may appear as +successfully executed on-chain, which may look like a double-spend. + +A replacing message is a message with a different CID, any of Gas values, and +different signature, but with all other parameters matching (source/destination, +nonce, params, etc.) + Perms: read @@ -4913,7 +5511,7 @@ Response: `"f01234"` WalletDelete deletes an address from the wallet. -Perms: write +Perms: admin Inputs: ```json @@ -5009,7 +5607,7 @@ Response: `"f01234"` WalletSetDefault marks the given address as as the default one. -Perms: admin +Perms: write Inputs: ```json diff --git a/documentation/en/api-v1-unstable-methods.md b/documentation/en/api-v1-unstable-methods.md new file mode 100644 index 00000000000..ef151a94dde --- /dev/null +++ b/documentation/en/api-v1-unstable-methods.md @@ -0,0 +1,5859 @@ +# Groups +* [](#) + * [Closing](#Closing) + * [Discover](#Discover) + * [Session](#Session) + * [Shutdown](#Shutdown) + * [Version](#Version) +* [Auth](#Auth) + * [AuthNew](#AuthNew) + * [AuthVerify](#AuthVerify) +* [Beacon](#Beacon) + * [BeaconGetEntry](#BeaconGetEntry) +* [Chain](#Chain) + * [ChainDeleteObj](#ChainDeleteObj) + * [ChainExport](#ChainExport) + * [ChainGetBlock](#ChainGetBlock) + * [ChainGetBlockMessages](#ChainGetBlockMessages) + * [ChainGetGenesis](#ChainGetGenesis) + * [ChainGetMessage](#ChainGetMessage) + * [ChainGetMessagesInTipset](#ChainGetMessagesInTipset) + * [ChainGetNode](#ChainGetNode) + * [ChainGetParentMessages](#ChainGetParentMessages) + * [ChainGetParentReceipts](#ChainGetParentReceipts) + * [ChainGetPath](#ChainGetPath) + * [ChainGetRandomnessFromBeacon](#ChainGetRandomnessFromBeacon) + * [ChainGetRandomnessFromTickets](#ChainGetRandomnessFromTickets) + * [ChainGetTipSet](#ChainGetTipSet) + * [ChainGetTipSetByHeight](#ChainGetTipSetByHeight) + * [ChainHasObj](#ChainHasObj) + * [ChainHead](#ChainHead) + * [ChainNotify](#ChainNotify) + * [ChainReadObj](#ChainReadObj) + * [ChainSetHead](#ChainSetHead) + * [ChainStatObj](#ChainStatObj) + * [ChainTipSetWeight](#ChainTipSetWeight) +* [Client](#Client) + * [ClientCalcCommP](#ClientCalcCommP) + * [ClientCancelDataTransfer](#ClientCancelDataTransfer) + * [ClientCancelRetrievalDeal](#ClientCancelRetrievalDeal) + * [ClientDataTransferUpdates](#ClientDataTransferUpdates) + * [ClientDealPieceCID](#ClientDealPieceCID) + * [ClientDealSize](#ClientDealSize) + * [ClientFindData](#ClientFindData) + * [ClientGenCar](#ClientGenCar) + * [ClientGetDealInfo](#ClientGetDealInfo) + * [ClientGetDealStatus](#ClientGetDealStatus) + * [ClientGetDealUpdates](#ClientGetDealUpdates) + * [ClientGetRetrievalUpdates](#ClientGetRetrievalUpdates) + * [ClientHasLocal](#ClientHasLocal) + * [ClientImport](#ClientImport) + * [ClientListDataTransfers](#ClientListDataTransfers) + * [ClientListDeals](#ClientListDeals) + * [ClientListImports](#ClientListImports) + * [ClientListRetrievals](#ClientListRetrievals) + * [ClientMinerQueryOffer](#ClientMinerQueryOffer) + * [ClientQueryAsk](#ClientQueryAsk) + * [ClientRemoveImport](#ClientRemoveImport) + * [ClientRestartDataTransfer](#ClientRestartDataTransfer) + * [ClientRetrieve](#ClientRetrieve) + * [ClientRetrieveTryRestartInsufficientFunds](#ClientRetrieveTryRestartInsufficientFunds) + * [ClientRetrieveWithEvents](#ClientRetrieveWithEvents) + * [ClientStartDeal](#ClientStartDeal) + * [ClientStatelessDeal](#ClientStatelessDeal) +* [Create](#Create) + * [CreateBackup](#CreateBackup) +* [Gas](#Gas) + * [GasEstimateFeeCap](#GasEstimateFeeCap) + * [GasEstimateGasLimit](#GasEstimateGasLimit) + * [GasEstimateGasPremium](#GasEstimateGasPremium) + * [GasEstimateMessageGas](#GasEstimateMessageGas) +* [I](#I) + * [ID](#ID) +* [Log](#Log) + * [LogList](#LogList) + * [LogSetLevel](#LogSetLevel) +* [Market](#Market) + * [MarketAddBalance](#MarketAddBalance) + * [MarketGetReserved](#MarketGetReserved) + * [MarketReleaseFunds](#MarketReleaseFunds) + * [MarketReserveFunds](#MarketReserveFunds) + * [MarketWithdraw](#MarketWithdraw) +* [Miner](#Miner) + * [MinerCreateBlock](#MinerCreateBlock) + * [MinerGetBaseInfo](#MinerGetBaseInfo) +* [Mpool](#Mpool) + * [MpoolBatchPush](#MpoolBatchPush) + * [MpoolBatchPushMessage](#MpoolBatchPushMessage) + * [MpoolBatchPushUntrusted](#MpoolBatchPushUntrusted) + * [MpoolCheckMessages](#MpoolCheckMessages) + * [MpoolCheckPendingMessages](#MpoolCheckPendingMessages) + * [MpoolCheckReplaceMessages](#MpoolCheckReplaceMessages) + * [MpoolClear](#MpoolClear) + * [MpoolGetConfig](#MpoolGetConfig) + * [MpoolGetNonce](#MpoolGetNonce) + * [MpoolPending](#MpoolPending) + * [MpoolPush](#MpoolPush) + * [MpoolPushMessage](#MpoolPushMessage) + * [MpoolPushUntrusted](#MpoolPushUntrusted) + * [MpoolSelect](#MpoolSelect) + * [MpoolSetConfig](#MpoolSetConfig) + * [MpoolSub](#MpoolSub) +* [Msig](#Msig) + * [MsigAddApprove](#MsigAddApprove) + * [MsigAddCancel](#MsigAddCancel) + * [MsigAddPropose](#MsigAddPropose) + * [MsigApprove](#MsigApprove) + * [MsigApproveTxnHash](#MsigApproveTxnHash) + * [MsigCancel](#MsigCancel) + * [MsigCreate](#MsigCreate) + * [MsigGetAvailableBalance](#MsigGetAvailableBalance) + * [MsigGetPending](#MsigGetPending) + * [MsigGetVested](#MsigGetVested) + * [MsigGetVestingSchedule](#MsigGetVestingSchedule) + * [MsigPropose](#MsigPropose) + * [MsigRemoveSigner](#MsigRemoveSigner) + * [MsigSwapApprove](#MsigSwapApprove) + * [MsigSwapCancel](#MsigSwapCancel) + * [MsigSwapPropose](#MsigSwapPropose) +* [Net](#Net) + * [NetAddrsListen](#NetAddrsListen) + * [NetAgentVersion](#NetAgentVersion) + * [NetAutoNatStatus](#NetAutoNatStatus) + * [NetBandwidthStats](#NetBandwidthStats) + * [NetBandwidthStatsByPeer](#NetBandwidthStatsByPeer) + * [NetBandwidthStatsByProtocol](#NetBandwidthStatsByProtocol) + * [NetBlockAdd](#NetBlockAdd) + * [NetBlockList](#NetBlockList) + * [NetBlockRemove](#NetBlockRemove) + * [NetConnect](#NetConnect) + * [NetConnectedness](#NetConnectedness) + * [NetDisconnect](#NetDisconnect) + * [NetFindPeer](#NetFindPeer) + * [NetPeerInfo](#NetPeerInfo) + * [NetPeers](#NetPeers) + * [NetPubsubScores](#NetPubsubScores) +* [Node](#Node) + * [NodeStatus](#NodeStatus) +* [Paych](#Paych) + * [PaychAllocateLane](#PaychAllocateLane) + * [PaychAvailableFunds](#PaychAvailableFunds) + * [PaychAvailableFundsByFromTo](#PaychAvailableFundsByFromTo) + * [PaychCollect](#PaychCollect) + * [PaychGet](#PaychGet) + * [PaychGetWaitReady](#PaychGetWaitReady) + * [PaychList](#PaychList) + * [PaychNewPayment](#PaychNewPayment) + * [PaychSettle](#PaychSettle) + * [PaychStatus](#PaychStatus) + * [PaychVoucherAdd](#PaychVoucherAdd) + * [PaychVoucherCheckSpendable](#PaychVoucherCheckSpendable) + * [PaychVoucherCheckValid](#PaychVoucherCheckValid) + * [PaychVoucherCreate](#PaychVoucherCreate) + * [PaychVoucherList](#PaychVoucherList) + * [PaychVoucherSubmit](#PaychVoucherSubmit) +* [State](#State) + * [StateAccountKey](#StateAccountKey) + * [StateAllMinerFaults](#StateAllMinerFaults) + * [StateCall](#StateCall) + * [StateChangedActors](#StateChangedActors) + * [StateCirculatingSupply](#StateCirculatingSupply) + * [StateCompute](#StateCompute) + * [StateDealProviderCollateralBounds](#StateDealProviderCollateralBounds) + * [StateDecodeParams](#StateDecodeParams) + * [StateGetActor](#StateGetActor) + * [StateListActors](#StateListActors) + * [StateListMessages](#StateListMessages) + * [StateListMiners](#StateListMiners) + * [StateLookupID](#StateLookupID) + * [StateMarketBalance](#StateMarketBalance) + * [StateMarketDeals](#StateMarketDeals) + * [StateMarketParticipants](#StateMarketParticipants) + * [StateMarketStorageDeal](#StateMarketStorageDeal) + * [StateMinerActiveSectors](#StateMinerActiveSectors) + * [StateMinerAvailableBalance](#StateMinerAvailableBalance) + * [StateMinerDeadlines](#StateMinerDeadlines) + * [StateMinerFaults](#StateMinerFaults) + * [StateMinerInfo](#StateMinerInfo) + * [StateMinerInitialPledgeCollateral](#StateMinerInitialPledgeCollateral) + * [StateMinerPartitions](#StateMinerPartitions) + * [StateMinerPower](#StateMinerPower) + * [StateMinerPreCommitDepositForPower](#StateMinerPreCommitDepositForPower) + * [StateMinerProvingDeadline](#StateMinerProvingDeadline) + * [StateMinerRecoveries](#StateMinerRecoveries) + * [StateMinerSectorAllocated](#StateMinerSectorAllocated) + * [StateMinerSectorCount](#StateMinerSectorCount) + * [StateMinerSectors](#StateMinerSectors) + * [StateNetworkName](#StateNetworkName) + * [StateNetworkVersion](#StateNetworkVersion) + * [StateReadState](#StateReadState) + * [StateReplay](#StateReplay) + * [StateSearchMsg](#StateSearchMsg) + * [StateSectorExpiration](#StateSectorExpiration) + * [StateSectorGetInfo](#StateSectorGetInfo) + * [StateSectorPartition](#StateSectorPartition) + * [StateSectorPreCommitInfo](#StateSectorPreCommitInfo) + * [StateVMCirculatingSupplyInternal](#StateVMCirculatingSupplyInternal) + * [StateVerifiedClientStatus](#StateVerifiedClientStatus) + * [StateVerifiedRegistryRootKey](#StateVerifiedRegistryRootKey) + * [StateVerifierStatus](#StateVerifierStatus) + * [StateWaitMsg](#StateWaitMsg) +* [Sync](#Sync) + * [SyncCheckBad](#SyncCheckBad) + * [SyncCheckpoint](#SyncCheckpoint) + * [SyncIncomingBlocks](#SyncIncomingBlocks) + * [SyncMarkBad](#SyncMarkBad) + * [SyncState](#SyncState) + * [SyncSubmitBlock](#SyncSubmitBlock) + * [SyncUnmarkAllBad](#SyncUnmarkAllBad) + * [SyncUnmarkBad](#SyncUnmarkBad) + * [SyncValidateTipset](#SyncValidateTipset) +* [Wallet](#Wallet) + * [WalletBalance](#WalletBalance) + * [WalletDefaultAddress](#WalletDefaultAddress) + * [WalletDelete](#WalletDelete) + * [WalletExport](#WalletExport) + * [WalletHas](#WalletHas) + * [WalletImport](#WalletImport) + * [WalletList](#WalletList) + * [WalletNew](#WalletNew) + * [WalletSetDefault](#WalletSetDefault) + * [WalletSign](#WalletSign) + * [WalletSignMessage](#WalletSignMessage) + * [WalletValidateAddress](#WalletValidateAddress) + * [WalletVerify](#WalletVerify) +## + + +### Closing + + +Perms: read + +Inputs: `null` + +Response: `{}` + +### Discover + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "info": { + "title": "Lotus RPC API", + "version": "1.2.1/generated=2020-11-22T08:22:42-06:00" + }, + "methods": [], + "openrpc": "1.2.6" +} +``` + +### Session + + +Perms: read + +Inputs: `null` + +Response: `"07070707-0707-0707-0707-070707070707"` + +### Shutdown + + +Perms: admin + +Inputs: `null` + +Response: `{}` + +### Version + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "Version": "string value", + "APIVersion": 131328, + "BlockDelay": 42 +} +``` + +## Auth + + +### AuthNew + + +Perms: admin + +Inputs: +```json +[ + null +] +``` + +Response: `"Ynl0ZSBhcnJheQ=="` + +### AuthVerify + + +Perms: read + +Inputs: +```json +[ + "string value" +] +``` + +Response: `null` + +## Beacon +The Beacon method group contains methods for interacting with the random beacon (DRAND) + + +### BeaconGetEntry +BeaconGetEntry returns the beacon entry for the given filecoin epoch. If +the entry has not yet been produced, the call will block until the entry +becomes available + + +Perms: read + +Inputs: +```json +[ + 10101 +] +``` + +Response: +```json +{ + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" +} +``` + +## Chain +The Chain method group contains methods for interacting with the +blockchain, but that do not require any form of state computation. + + +### ChainDeleteObj +ChainDeleteObj deletes node referenced by the given CID + + +Perms: admin + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: `{}` + +### ChainExport +ChainExport returns a stream of bytes with CAR dump of chain data. +The exported chain data includes the header chain from the given tipset +back to genesis, the entire genesis state, and the most recent 'nroots' +state trees. +If oldmsgskip is set, messages from before the requested roots are also not included. + + +Perms: read + +Inputs: +```json +[ + 10101, + true, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"Ynl0ZSBhcnJheQ=="` + +### ChainGetBlock +ChainGetBlock returns the block specified by the given CID. + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "Miner": "f01234", + "Ticket": { + "VRFProof": "Ynl0ZSBhcnJheQ==" + }, + "ElectionProof": { + "WinCount": 9, + "VRFProof": "Ynl0ZSBhcnJheQ==" + }, + "BeaconEntries": null, + "WinPoStProof": null, + "Parents": null, + "ParentWeight": "0", + "Height": 10101, + "ParentStateRoot": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ParentMessageReceipts": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Messages": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "BLSAggregate": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Timestamp": 42, + "BlockSig": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "ForkSignaling": 42, + "ParentBaseFee": "0" +} +``` + +### ChainGetBlockMessages +ChainGetBlockMessages returns messages stored in the specified block. + +Note: If there are multiple blocks in a tipset, it's likely that some +messages will be duplicated. It's also possible for blocks in a tipset to have +different messages from the same sender at the same nonce. When that happens, +only the first message (in a block with lowest ticket) will be considered +for execution + +NOTE: THIS METHOD SHOULD ONLY BE USED FOR GETTING MESSAGES IN A SPECIFIC BLOCK + +DO NOT USE THIS METHOD TO GET MESSAGES INCLUDED IN A TIPSET +Use ChainGetParentMessages, which will perform correct message deduplication + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "BlsMessages": null, + "SecpkMessages": null, + "Cids": null +} +``` + +### ChainGetGenesis +ChainGetGenesis returns the genesis tipset. + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "Cids": null, + "Blocks": null, + "Height": 0 +} +``` + +### ChainGetMessage +ChainGetMessage reads a message referenced by the specified CID from the +chain blockstore. + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } +} +``` + +### ChainGetMessagesInTipset +ChainGetMessagesInTipset returns message stores in current tipset + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `null` + +### ChainGetNode + + +Perms: read + +Inputs: +```json +[ + "string value" +] +``` + +Response: +```json +{ + "Cid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Obj": {} +} +``` + +### ChainGetParentMessages +ChainGetParentMessages returns messages stored in parent tipset of the +specified block. + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: `null` + +### ChainGetParentReceipts +ChainGetParentReceipts returns receipts for messages in parent tipset of +the specified block. The receipts in the list returned is one-to-one with the +messages returned by a call to ChainGetParentMessages with the same blockCid. + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: `null` + +### ChainGetPath +ChainGetPath returns a set of revert/apply operations needed to get from +one tipset to another, for example: +``` + to + ^ +from tAA + ^ ^ +tBA tAB + ^---*--^ + ^ + tRR +``` +Would return `[revert(tBA), apply(tAB), apply(tAA)]` + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `null` + +### ChainGetRandomnessFromBeacon +ChainGetRandomnessFromBeacon is used to sample the beacon for randomness. + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + 2, + 10101, + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: `null` + +### ChainGetRandomnessFromTickets +ChainGetRandomnessFromTickets is used to sample the chain for randomness. + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + 2, + 10101, + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: `null` + +### ChainGetTipSet +ChainGetTipSet returns the tipset specified by the given TipSetKey. + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Cids": null, + "Blocks": null, + "Height": 0 +} +``` + +### ChainGetTipSetByHeight +ChainGetTipSetByHeight looks back for a tipset at the specified epoch. +If there are no blocks at the specified epoch, a tipset at an earlier epoch +will be returned. + + +Perms: read + +Inputs: +```json +[ + 10101, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Cids": null, + "Blocks": null, + "Height": 0 +} +``` + +### ChainHasObj +ChainHasObj checks if a given CID exists in the chain blockstore. + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: `true` + +### ChainHead +ChainHead returns the current head of the chain. + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "Cids": null, + "Blocks": null, + "Height": 0 +} +``` + +### ChainNotify +ChainNotify returns channel with chain head updates. +First message is guaranteed to be of len == 1, and type == 'current'. + + +Perms: read + +Inputs: `null` + +Response: `null` + +### ChainReadObj +ChainReadObj reads ipld nodes referenced by the specified CID from chain +blockstore and returns raw bytes. + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: `"Ynl0ZSBhcnJheQ=="` + +### ChainSetHead +ChainSetHead forcefully sets current chain head. Use with caution. + + +Perms: admin + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `{}` + +### ChainStatObj +ChainStatObj returns statistics about the graph referenced by 'obj'. +If 'base' is also specified, then the returned stat will be a diff +between the two objects. + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "Size": 42, + "Links": 42 +} +``` + +### ChainTipSetWeight +ChainTipSetWeight computes weight for the specified tipset. + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +## Client +The Client methods all have to do with interacting with the storage and +retrieval markets as a client + + +### ClientCalcCommP +ClientCalcCommP calculates the CommP for a specified file + + +Perms: write + +Inputs: +```json +[ + "string value" +] +``` + +Response: +```json +{ + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 1024 +} +``` + +### ClientCancelDataTransfer +ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer + + +Perms: write + +Inputs: +```json +[ + 3, + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + true +] +``` + +Response: `{}` + +### ClientCancelRetrievalDeal +ClientCancelRetrievalDeal cancels an ongoing retrieval deal based on DealID + + +Perms: write + +Inputs: +```json +[ + 5 +] +``` + +Response: `{}` + +### ClientDataTransferUpdates + + +Perms: write + +Inputs: `null` + +Response: +```json +{ + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": null + } +} +``` + +### ClientDealPieceCID +ClientCalcCommP calculates the CommP and data size of the specified CID + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "PayloadSize": 9, + "PieceSize": 1032, + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +} +``` + +### ClientDealSize +ClientDealSize calculates real deal data size + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "PayloadSize": 9, + "PieceSize": 1032 +} +``` + +### ClientFindData +ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer). + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + null +] +``` + +Response: `null` + +### ClientGenCar +ClientGenCar generates a CAR file for the specified file. + + +Perms: write + +Inputs: +```json +[ + { + "Path": "string value", + "IsCAR": true + }, + "string value" +] +``` + +Response: `{}` + +### ClientGetDealInfo +ClientGetDealInfo returns the latest information about a given deal. + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "ProposalCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "State": 42, + "Message": "string value", + "DealStages": { + "Stages": null + }, + "Provider": "f01234", + "DataRef": { + "TransferType": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceCid": null, + "PieceSize": 1024, + "RawBlockSize": 42 + }, + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 42, + "PricePerEpoch": "0", + "Duration": 42, + "DealID": 5432, + "CreationTime": "0001-01-01T00:00:00Z", + "Verified": true, + "TransferChannelID": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "DataTransfer": { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": null + } + } +} +``` + +### ClientGetDealStatus +ClientGetDealStatus returns status given a code + + +Perms: read + +Inputs: +```json +[ + 42 +] +``` + +Response: `"string value"` + +### ClientGetDealUpdates +ClientGetDealUpdates returns the status of updated deals + + +Perms: write + +Inputs: `null` + +Response: +```json +{ + "ProposalCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "State": 42, + "Message": "string value", + "DealStages": { + "Stages": null + }, + "Provider": "f01234", + "DataRef": { + "TransferType": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceCid": null, + "PieceSize": 1024, + "RawBlockSize": 42 + }, + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 42, + "PricePerEpoch": "0", + "Duration": 42, + "DealID": 5432, + "CreationTime": "0001-01-01T00:00:00Z", + "Verified": true, + "TransferChannelID": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "DataTransfer": { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": null + } + } +} +``` + +### ClientGetRetrievalUpdates +ClientGetRetrievalUpdates returns status of updated retrieval deals + + +Perms: write + +Inputs: `null` + +Response: +```json +{ + "PayloadCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ID": 5, + "PieceCID": null, + "PricePerByte": "0", + "UnsealPrice": "0", + "Status": 0, + "Message": "string value", + "Provider": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "BytesReceived": 42, + "BytesPaidFor": 42, + "TotalPaid": "0", + "TransferChannelID": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "DataTransfer": { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": null + } + } +} +``` + +### ClientHasLocal +ClientHasLocal indicates whether a certain CID is locally stored. + + +Perms: write + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: `true` + +### ClientImport +ClientImport imports file under the specified path into filestore. + + +Perms: admin + +Inputs: +```json +[ + { + "Path": "string value", + "IsCAR": true + } +] +``` + +Response: +```json +{ + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ImportID": 50 +} +``` + +### ClientListDataTransfers +ClientListTransfers returns the status of all ongoing transfers of data + + +Perms: write + +Inputs: `null` + +Response: `null` + +### ClientListDeals +ClientListDeals returns information about the deals made by the local client. + + +Perms: write + +Inputs: `null` + +Response: `null` + +### ClientListImports +ClientListImports lists imported files and their root CIDs + + +Perms: write + +Inputs: `null` + +Response: `null` + +### ClientListRetrievals +ClientListRetrievals returns information about retrievals made by the local client + + +Perms: write + +Inputs: `null` + +Response: `null` + +### ClientMinerQueryOffer +ClientMinerQueryOffer returns a QueryOffer for the specific miner and file. + + +Perms: read + +Inputs: +```json +[ + "f01234", + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + null +] +``` + +Response: +```json +{ + "Err": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Piece": null, + "Size": 42, + "MinPrice": "0", + "UnsealPrice": "0", + "PaymentInterval": 42, + "PaymentIntervalIncrease": 42, + "Miner": "f01234", + "MinerPeer": { + "Address": "f01234", + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "PieceCID": null + } +} +``` + +### ClientQueryAsk +ClientQueryAsk returns a signed StorageAsk from the specified miner. + + +Perms: read + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "f01234" +] +``` + +Response: +```json +{ + "Price": "0", + "VerifiedPrice": "0", + "MinPieceSize": 1032, + "MaxPieceSize": 1032, + "Miner": "f01234", + "Timestamp": 10101, + "Expiry": 10101, + "SeqNo": 42 +} +``` + +### ClientRemoveImport +ClientRemoveImport removes file import + + +Perms: admin + +Inputs: +```json +[ + 50 +] +``` + +Response: `{}` + +### ClientRestartDataTransfer +ClientRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer + + +Perms: write + +Inputs: +```json +[ + 3, + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + true +] +``` + +Response: `{}` + +### ClientRetrieve +ClientRetrieve initiates the retrieval of a file, as specified in the order. + + +Perms: admin + +Inputs: +```json +[ + { + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Piece": null, + "Size": 42, + "LocalStore": 12, + "Total": "0", + "UnsealPrice": "0", + "PaymentInterval": 42, + "PaymentIntervalIncrease": 42, + "Client": "f01234", + "Miner": "f01234", + "MinerPeer": { + "Address": "f01234", + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "PieceCID": null + } + }, + { + "Path": "string value", + "IsCAR": true + } +] +``` + +Response: `{}` + +### ClientRetrieveTryRestartInsufficientFunds +ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel +which are stuck due to insufficient funds + + +Perms: write + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `{}` + +### ClientRetrieveWithEvents +ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel +of status updates. + + +Perms: admin + +Inputs: +```json +[ + { + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Piece": null, + "Size": 42, + "LocalStore": 12, + "Total": "0", + "UnsealPrice": "0", + "PaymentInterval": 42, + "PaymentIntervalIncrease": 42, + "Client": "f01234", + "Miner": "f01234", + "MinerPeer": { + "Address": "f01234", + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "PieceCID": null + } + }, + { + "Path": "string value", + "IsCAR": true + } +] +``` + +Response: +```json +{ + "Event": 5, + "Status": 0, + "BytesReceived": 42, + "FundsSpent": "0", + "Err": "string value" +} +``` + +### ClientStartDeal +ClientStartDeal proposes a deal with a miner. + + +Perms: admin + +Inputs: +```json +[ + { + "Data": { + "TransferType": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceCid": null, + "PieceSize": 1024, + "RawBlockSize": 42 + }, + "Wallet": "f01234", + "Miner": "f01234", + "EpochPrice": "0", + "MinBlocksDuration": 42, + "ProviderCollateral": "0", + "DealStartEpoch": 10101, + "FastRetrieval": true, + "VerifiedDeal": true + } +] +``` + +Response: `null` + +### ClientStatelessDeal +ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking. + + +Perms: write + +Inputs: +```json +[ + { + "Data": { + "TransferType": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceCid": null, + "PieceSize": 1024, + "RawBlockSize": 42 + }, + "Wallet": "f01234", + "Miner": "f01234", + "EpochPrice": "0", + "MinBlocksDuration": 42, + "ProviderCollateral": "0", + "DealStartEpoch": 10101, + "FastRetrieval": true, + "VerifiedDeal": true + } +] +``` + +Response: `null` + +## Create + + +### CreateBackup +CreateBackup creates node backup onder the specified file name. The +method requires that the lotus daemon is running with the +LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that +the path specified when calling CreateBackup is within the base path + + +Perms: admin + +Inputs: +```json +[ + "string value" +] +``` + +Response: `{}` + +## Gas + + +### GasEstimateFeeCap +GasEstimateFeeCap estimates gas fee cap + + +Perms: read + +Inputs: +```json +[ + { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + 9, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### GasEstimateGasLimit +GasEstimateGasLimit estimates gas used by the message and returns it. +It fails if message fails to execute. + + +Perms: read + +Inputs: +```json +[ + { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `9` + +### GasEstimateGasPremium +GasEstimateGasPremium estimates what gas price should be used for a +message to have high likelihood of inclusion in `nblocksincl` epochs. + + +Perms: read + +Inputs: +```json +[ + 42, + "f01234", + 9, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### GasEstimateMessageGas +GasEstimateMessageGas estimates gas values for unset message gas fields + + +Perms: read + +Inputs: +```json +[ + { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + { + "MaxFee": "0" + }, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } +} +``` + +## I + + +### ID + + +Perms: read + +Inputs: `null` + +Response: `"12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"` + +## Log + + +### LogList + + +Perms: write + +Inputs: `null` + +Response: `null` + +### LogSetLevel + + +Perms: write + +Inputs: +```json +[ + "string value", + "string value" +] +``` + +Response: `{}` + +## Market + + +### MarketAddBalance +MarketAddBalance adds funds to the market actor + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + "0" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MarketGetReserved +MarketGetReserved gets the amount of funds that are currently reserved for the address + + +Perms: sign + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `"0"` + +### MarketReleaseFunds +MarketReleaseFunds releases funds reserved by MarketReserveFunds + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "0" +] +``` + +Response: `{}` + +### MarketReserveFunds +MarketReserveFunds reserves funds for a deal + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + "0" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MarketWithdraw +MarketWithdraw withdraws unlocked funds from the market actor + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + "0" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +## Miner + + +### MinerCreateBlock + + +Perms: write + +Inputs: +```json +[ + { + "Miner": "f01234", + "Parents": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Ticket": { + "VRFProof": "Ynl0ZSBhcnJheQ==" + }, + "Eproof": { + "WinCount": 9, + "VRFProof": "Ynl0ZSBhcnJheQ==" + }, + "BeaconValues": null, + "Messages": null, + "Epoch": 10101, + "Timestamp": 42, + "WinningPoStProof": null + } +] +``` + +Response: +```json +{ + "Header": { + "Miner": "f01234", + "Ticket": { + "VRFProof": "Ynl0ZSBhcnJheQ==" + }, + "ElectionProof": { + "WinCount": 9, + "VRFProof": "Ynl0ZSBhcnJheQ==" + }, + "BeaconEntries": null, + "WinPoStProof": null, + "Parents": null, + "ParentWeight": "0", + "Height": 10101, + "ParentStateRoot": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ParentMessageReceipts": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Messages": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "BLSAggregate": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Timestamp": 42, + "BlockSig": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "ForkSignaling": 42, + "ParentBaseFee": "0" + }, + "BlsMessages": null, + "SecpkMessages": null +} +``` + +### MinerGetBaseInfo +There are not yet any comments for this method. + +Perms: read + +Inputs: +```json +[ + "f01234", + 10101, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "MinerPower": "0", + "NetworkPower": "0", + "Sectors": null, + "WorkerKey": "f01234", + "SectorSize": 34359738368, + "PrevBeaconEntry": { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "BeaconEntries": null, + "EligibleForMining": true +} +``` + +## Mpool +The Mpool methods are for interacting with the message pool. The message pool +manages all incoming and outgoing 'messages' going over the network. + + +### MpoolBatchPush +MpoolBatchPush batch pushes a signed message to mempool. + + +Perms: write + +Inputs: +```json +[ + null +] +``` + +Response: `null` + +### MpoolBatchPushMessage +MpoolBatchPushMessage batch pushes a unsigned message to mempool. + + +Perms: sign + +Inputs: +```json +[ + null, + { + "MaxFee": "0" + } +] +``` + +Response: `null` + +### MpoolBatchPushUntrusted +MpoolBatchPushUntrusted batch pushes a signed message to mempool from untrusted sources. + + +Perms: write + +Inputs: +```json +[ + null +] +``` + +Response: `null` + +### MpoolCheckMessages +MpoolCheckMessages performs logical checks on a batch of messages + + +Perms: read + +Inputs: +```json +[ + null +] +``` + +Response: `null` + +### MpoolCheckPendingMessages +MpoolCheckPendingMessages performs logical checks for all pending messages from a given address + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `null` + +### MpoolCheckReplaceMessages +MpoolCheckReplaceMessages performs logical checks on pending messages with replacement + + +Perms: read + +Inputs: +```json +[ + null +] +``` + +Response: `null` + +### MpoolClear +MpoolClear clears pending messages from the mpool + + +Perms: write + +Inputs: +```json +[ + true +] +``` + +Response: `{}` + +### MpoolGetConfig +MpoolGetConfig returns (a copy of) the current mpool config + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "PriorityAddrs": null, + "SizeLimitHigh": 123, + "SizeLimitLow": 123, + "ReplaceByFeeRatio": 12.3, + "PruneCooldown": 60000000000, + "GasLimitOverestimation": 12.3 +} +``` + +### MpoolGetNonce +MpoolGetNonce gets next nonce for the specified sender. +Note that this method may not be atomic. Use MpoolPushMessage instead. + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `42` + +### MpoolPending +MpoolPending returns pending mempool messages. + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `null` + +### MpoolPush +MpoolPush pushes a signed message to mempool. + + +Perms: write + +Inputs: +```json +[ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MpoolPushMessage +MpoolPushMessage atomically assigns a nonce, signs, and pushes a message +to mempool. +maxFee is only used when GasFeeCap/GasPremium fields aren't specified + +When maxFee is set to 0, MpoolPushMessage will guess appropriate fee +based on current chain conditions + + +Perms: sign + +Inputs: +```json +[ + { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + { + "MaxFee": "0" + } +] +``` + +Response: +```json +{ + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } +} +``` + +### MpoolPushUntrusted +MpoolPushUntrusted pushes a signed message to mempool from untrusted sources. + + +Perms: write + +Inputs: +```json +[ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MpoolSelect +MpoolSelect returns a list of pending messages for inclusion in the next block + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + 12.3 +] +``` + +Response: `null` + +### MpoolSetConfig +MpoolSetConfig sets the mpool config to (a copy of) the supplied config + + +Perms: admin + +Inputs: +```json +[ + { + "PriorityAddrs": null, + "SizeLimitHigh": 123, + "SizeLimitLow": 123, + "ReplaceByFeeRatio": 12.3, + "PruneCooldown": 60000000000, + "GasLimitOverestimation": 12.3 + } +] +``` + +Response: `{}` + +### MpoolSub + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "Type": 0, + "Message": { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } +} +``` + +## Msig +The Msig methods are used to interact with multisig wallets on the +filecoin network + + +### MsigAddApprove +MsigAddApprove approves a previously proposed AddSigner message +It takes the following params: , , , +, , + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + 42, + "f01234", + "f01234", + true +] +``` + +Response: +```json +{ + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "ValidNonce": true +} +``` + +### MsigAddCancel +MsigAddCancel cancels a previously proposed AddSigner message +It takes the following params: , , , +, + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + 42, + "f01234", + true +] +``` + +Response: +```json +{ + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "ValidNonce": true +} +``` + +### MsigAddPropose +MsigAddPropose proposes adding a signer in the multisig +It takes the following params: , , +, + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + "f01234", + true +] +``` + +Response: +```json +{ + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "ValidNonce": true +} +``` + +### MsigApprove +MsigApprove approves a previously-proposed multisig message by transaction ID +It takes the following params: , + + +Perms: sign + +Inputs: +```json +[ + "f01234", + 42, + "f01234" +] +``` + +Response: +```json +{ + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "ValidNonce": true +} +``` + +### MsigApproveTxnHash +MsigApproveTxnHash approves a previously-proposed multisig message, specified +using both transaction ID and a hash of the parameters used in the +proposal. This method of approval can be used to ensure you only approve +exactly the transaction you think you are. +It takes the following params: , , , , , +, , + + +Perms: sign + +Inputs: +```json +[ + "f01234", + 42, + "f01234", + "f01234", + "0", + "f01234", + 42, + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: +```json +{ + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "ValidNonce": true +} +``` + +### MsigCancel +MsigCancel cancels a previously-proposed multisig message +It takes the following params: , , , , +, , + + +Perms: sign + +Inputs: +```json +[ + "f01234", + 42, + "f01234", + "0", + "f01234", + 42, + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: +```json +{ + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "ValidNonce": true +} +``` + +### MsigCreate +MsigCreate creates a multisig wallet +It takes the following params: , , +, , + + +Perms: sign + +Inputs: +```json +[ + 42, + null, + 10101, + "0", + "f01234", + "0" +] +``` + +Response: +```json +{ + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "ValidNonce": true +} +``` + +### MsigGetAvailableBalance +MsigGetAvailableBalance returns the portion of a multisig's balance that can be withdrawn or spent + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### MsigGetPending +MsigGetPending returns pending transactions for the given multisig +wallet. Once pending transactions are fully approved, they will no longer +appear here. + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `null` + +### MsigGetVested +MsigGetVested returns the amount of FIL that vested in a multisig in a certain period. +It takes the following params: , , + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### MsigGetVestingSchedule +MsigGetVestingSchedule returns the vesting details of a given multisig. + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "InitialBalance": "0", + "StartEpoch": 10101, + "UnlockDuration": 10101 +} +``` + +### MsigPropose +MsigPropose proposes a multisig message +It takes the following params: , , , +, , + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + "0", + "f01234", + 42, + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: +```json +{ + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "ValidNonce": true +} +``` + +### MsigRemoveSigner +MsigRemoveSigner proposes the removal of a signer from the multisig. +It accepts the multisig to make the change on, the proposer address to +send the message from, the address to be removed, and a boolean +indicating whether or not the signing threshold should be lowered by one +along with the address removal. + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + "f01234", + true +] +``` + +Response: +```json +{ + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "ValidNonce": true +} +``` + +### MsigSwapApprove +MsigSwapApprove approves a previously proposed SwapSigner +It takes the following params: , , , +, , + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + 42, + "f01234", + "f01234", + "f01234" +] +``` + +Response: +```json +{ + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "ValidNonce": true +} +``` + +### MsigSwapCancel +MsigSwapCancel cancels a previously proposed SwapSigner message +It takes the following params: , , , +, + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + 42, + "f01234", + "f01234" +] +``` + +Response: +```json +{ + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "ValidNonce": true +} +``` + +### MsigSwapPropose +MsigSwapPropose proposes swapping 2 signers in the multisig +It takes the following params: , , +, + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + "f01234", + "f01234" +] +``` + +Response: +```json +{ + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "ValidNonce": true +} +``` + +## Net + + +### NetAddrsListen + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [] +} +``` + +### NetAgentVersion + + +Perms: read + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: `"string value"` + +### NetAutoNatStatus + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "Reachability": 1, + "PublicAddr": "string value" +} +``` + +### NetBandwidthStats + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "TotalIn": 9, + "TotalOut": 9, + "RateIn": 12.3, + "RateOut": 12.3 +} +``` + +### NetBandwidthStatsByPeer + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "12D3KooWSXmXLJmBR1M7i9RW9GQPNUhZSzXKzxDHWtAgNuJAbyEJ": { + "TotalIn": 174000, + "TotalOut": 12500, + "RateIn": 100, + "RateOut": 50 + } +} +``` + +### NetBandwidthStatsByProtocol + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "/fil/hello/1.0.0": { + "TotalIn": 174000, + "TotalOut": 12500, + "RateIn": 100, + "RateOut": 50 + } +} +``` + +### NetBlockAdd + + +Perms: admin + +Inputs: +```json +[ + { + "Peers": null, + "IPAddrs": null, + "IPSubnets": null + } +] +``` + +Response: `{}` + +### NetBlockList + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "Peers": null, + "IPAddrs": null, + "IPSubnets": null +} +``` + +### NetBlockRemove + + +Perms: admin + +Inputs: +```json +[ + { + "Peers": null, + "IPAddrs": null, + "IPSubnets": null + } +] +``` + +Response: `{}` + +### NetConnect + + +Perms: write + +Inputs: +```json +[ + { + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [] + } +] +``` + +Response: `{}` + +### NetConnectedness + + +Perms: read + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: `1` + +### NetDisconnect + + +Perms: write + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: `{}` + +### NetFindPeer + + +Perms: read + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: +```json +{ + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [] +} +``` + +### NetPeerInfo + + +Perms: read + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: +```json +{ + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Agent": "string value", + "Addrs": null, + "Protocols": null, + "ConnMgrMeta": { + "FirstSeen": "0001-01-01T00:00:00Z", + "Value": 123, + "Tags": { + "name": 42 + }, + "Conns": { + "name": "2021-03-08T22:52:18Z" + } + } +} +``` + +### NetPeers + + +Perms: read + +Inputs: `null` + +Response: `null` + +### NetPubsubScores + + +Perms: read + +Inputs: `null` + +Response: `null` + +## Node +These methods are general node management and status commands + + +### NodeStatus +There are not yet any comments for this method. + +Perms: read + +Inputs: +```json +[ + true +] +``` + +Response: +```json +{ + "SyncStatus": { + "Epoch": 42, + "Behind": 42 + }, + "PeerStatus": { + "PeersToPublishMsgs": 123, + "PeersToPublishBlocks": 123 + }, + "ChainStatus": { + "BlocksPerTipsetLast100": 12.3, + "BlocksPerTipsetLastFinality": 12.3 + } +} +``` + +## Paych +The Paych methods are for interacting with and managing payment channels + + +### PaychAllocateLane + + +Perms: sign + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `42` + +### PaychAvailableFunds + + +Perms: sign + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +{ + "Channel": "\u003cempty\u003e", + "From": "f01234", + "To": "f01234", + "ConfirmedAmt": "0", + "PendingAmt": "0", + "PendingWaitSentinel": null, + "QueuedAmt": "0", + "VoucherReedeemedAmt": "0" +} +``` + +### PaychAvailableFundsByFromTo + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234" +] +``` + +Response: +```json +{ + "Channel": "\u003cempty\u003e", + "From": "f01234", + "To": "f01234", + "ConfirmedAmt": "0", + "PendingAmt": "0", + "PendingWaitSentinel": null, + "QueuedAmt": "0", + "VoucherReedeemedAmt": "0" +} +``` + +### PaychCollect + + +Perms: sign + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### PaychGet +There are not yet any comments for this method. + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + "0" +] +``` + +Response: +```json +{ + "Channel": "f01234", + "WaitSentinel": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +} +``` + +### PaychGetWaitReady + + +Perms: sign + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: `"f01234"` + +### PaychList + + +Perms: read + +Inputs: `null` + +Response: `null` + +### PaychNewPayment + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "f01234", + null +] +``` + +Response: +```json +{ + "Channel": "f01234", + "WaitSentinel": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Vouchers": null +} +``` + +### PaychSettle + + +Perms: sign + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### PaychStatus + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +{ + "ControlAddr": "f01234", + "Direction": 1 +} +``` + +### PaychVoucherAdd + + +Perms: write + +Inputs: +```json +[ + "f01234", + { + "ChannelAddr": "f01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretPreimage": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": null, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + }, + "Ynl0ZSBhcnJheQ==", + "0" +] +``` + +Response: `"0"` + +### PaychVoucherCheckSpendable + + +Perms: read + +Inputs: +```json +[ + "f01234", + { + "ChannelAddr": "f01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretPreimage": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": null, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + }, + "Ynl0ZSBhcnJheQ==", + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: `true` + +### PaychVoucherCheckValid + + +Perms: read + +Inputs: +```json +[ + "f01234", + { + "ChannelAddr": "f01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretPreimage": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": null, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + } +] +``` + +Response: `{}` + +### PaychVoucherCreate + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "0", + 42 +] +``` + +Response: +```json +{ + "Voucher": { + "ChannelAddr": "f01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretPreimage": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": null, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + }, + "Shortfall": "0" +} +``` + +### PaychVoucherList + + +Perms: write + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `null` + +### PaychVoucherSubmit + + +Perms: sign + +Inputs: +```json +[ + "f01234", + { + "ChannelAddr": "f01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretPreimage": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": null, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + }, + "Ynl0ZSBhcnJheQ==", + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +## State +The State methods are used to query, inspect, and interact with chain state. +Most methods take a TipSetKey as a parameter. The state looked up is the parent state of the tipset. +A nil TipSetKey can be provided as a param, this will cause the heaviest tipset in the chain to be used. + + +### StateAccountKey +StateAccountKey returns the public key address of the given ID address + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"f01234"` + +### StateAllMinerFaults +StateAllMinerFaults returns all non-expired Faults that occur within lookback epochs of the given tipset + + +Perms: read + +Inputs: +```json +[ + 10101, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `null` + +### StateCall +StateCall runs the given message and returns its result without any persisted changes. + +StateCall applies the message to the tipset's parent state. The +message is not applied on-top-of the messages in the passed-in +tipset. + + +Perms: read + +Inputs: +```json +[ + { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "MsgCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Msg": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "GasCost": { + "Message": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "GasUsed": "0", + "BaseFeeBurn": "0", + "OverEstimationBurn": "0", + "MinerPenalty": "0", + "MinerTip": "0", + "Refund": "0", + "TotalCost": "0" + }, + "ExecutionTrace": { + "Msg": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "Error": "string value", + "Duration": 60000000000, + "GasCharges": null, + "Subcalls": null + }, + "Error": "string value", + "Duration": 60000000000 +} +``` + +### StateChangedActors +StateChangedActors returns all the actors whose states change between the two given state CIDs +TODO: Should this take tipset keys instead? + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "t01236": { + "Code": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Head": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Nonce": 42, + "Balance": "0" + } +} +``` + +### StateCirculatingSupply +StateCirculatingSupply returns the exact circulating supply of Filecoin at the given tipset. +This is not used anywhere in the protocol itself, and is only for external consumption. + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### StateCompute +StateCompute is a flexible command that applies the given messages on the given tipset. +The messages are run as though the VM were at the provided height. + +When called, StateCompute will: +- Load the provided tipset, or use the current chain head if not provided +- Compute the tipset state of the provided tipset on top of the parent state + - (note that this step runs before vmheight is applied to the execution) + - Execute state upgrade if any were scheduled at the epoch, or in null + blocks preceding the tipset + - Call the cron actor on null blocks preceding the tipset + - For each block in the tipset + - Apply messages in blocks in the specified + - Award block reward by calling the reward actor + - Call the cron actor for the current epoch +- If the specified vmheight is higher than the current epoch, apply any + needed state upgrades to the state +- Apply the specified messages to the state + +The vmheight parameter sets VM execution epoch, and can be used to simulate +message execution in different network versions. If the specified vmheight +epoch is higher than the epoch of the specified tipset, any state upgrades +until the vmheight will be executed on the state before applying messages +specified by the user. + +Note that the initial tipset state computation is not affected by the +vmheight parameter - only the messages in the `apply` set are + +If the caller wants to simply compute the state, vmheight should be set to +the epoch of the specified tipset. + +Messages in the `apply` parameter must have the correct nonces, and gas +values set. + + +Perms: read + +Inputs: +```json +[ + 10101, + null, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Trace": null +} +``` + +### StateDealProviderCollateralBounds +StateDealProviderCollateralBounds returns the min and max collateral a storage provider +can issue. It takes the deal size and verified status as parameters. + + +Perms: read + +Inputs: +```json +[ + 1032, + true, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Min": "0", + "Max": "0" +} +``` + +### StateDecodeParams +StateDecodeParams attempts to decode the provided params, based on the recipient actor address and method number. + + +Perms: read + +Inputs: +```json +[ + "f01234", + 1, + "Ynl0ZSBhcnJheQ==", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `{}` + +### StateGetActor +StateGetActor returns the indicated actor's nonce and balance. + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Code": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Head": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Nonce": 42, + "Balance": "0" +} +``` + +### StateListActors +StateListActors returns the addresses of every actor in the state + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `null` + +### StateListMessages +StateListMessages looks back and returns all messages with a matching to or from address, stopping at the given height. + + +Perms: read + +Inputs: +```json +[ + { + "To": "f01234", + "From": "f01234" + }, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + 10101 +] +``` + +Response: `null` + +### StateListMiners +StateListMiners returns the addresses of every miner that has claimed power in the Power Actor + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `null` + +### StateLookupID +StateLookupID retrieves the ID address of the given address + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"f01234"` + +### StateMarketBalance +StateMarketBalance looks up the Escrow and Locked balances of the given address in the Storage Market + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Escrow": "0", + "Locked": "0" +} +``` + +### StateMarketDeals +StateMarketDeals returns information about every deal in the Storage Market + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "t026363": { + "Proposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "string value", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "State": { + "SectorStartEpoch": 10101, + "LastUpdatedEpoch": 10101, + "SlashEpoch": 10101 + } + } +} +``` + +### StateMarketParticipants +StateMarketParticipants returns the Escrow and Locked balances of every participant in the Storage Market + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "t026363": { + "Escrow": "0", + "Locked": "0" + } +} +``` + +### StateMarketStorageDeal +StateMarketStorageDeal returns information about the indicated deal + + +Perms: read + +Inputs: +```json +[ + 5432, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Proposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "string value", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "State": { + "SectorStartEpoch": 10101, + "LastUpdatedEpoch": 10101, + "SlashEpoch": 10101 + } +} +``` + +### StateMinerActiveSectors +StateMinerActiveSectors returns info about sectors that a given miner is actively proving. + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `null` + +### StateMinerAvailableBalance +StateMinerAvailableBalance returns the portion of a miner's balance that can be withdrawn or spent + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### StateMinerDeadlines +StateMinerDeadlines returns all the proving deadlines for the given miner + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `null` + +### StateMinerFaults +StateMinerFaults returns a bitfield indicating the faulty sectors of the given miner + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +[ + 5, + 1 +] +``` + +### StateMinerInfo +StateMinerInfo returns info about the indicated miner + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Owner": "f01234", + "Worker": "f01234", + "NewWorker": "f01234", + "ControlAddresses": null, + "WorkerChangeEpoch": 10101, + "PeerId": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Multiaddrs": null, + "WindowPoStProofType": 8, + "SectorSize": 34359738368, + "WindowPoStPartitionSectors": 42, + "ConsensusFaultElapsed": 10101 +} +``` + +### StateMinerInitialPledgeCollateral +StateMinerInitialPledgeCollateral returns the initial pledge collateral for the specified miner's sector + + +Perms: read + +Inputs: +```json +[ + "f01234", + { + "SealProof": 8, + "SectorNumber": 9, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "SealRandEpoch": 10101, + "DealIDs": null, + "Expiration": 10101, + "ReplaceCapacity": true, + "ReplaceSectorDeadline": 42, + "ReplaceSectorPartition": 42, + "ReplaceSectorNumber": 9 + }, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### StateMinerPartitions +StateMinerPartitions returns all partitions in the specified deadline + + +Perms: read + +Inputs: +```json +[ + "f01234", + 42, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `null` + +### StateMinerPower +StateMinerPower returns the power of the indicated miner + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "MinerPower": { + "RawBytePower": "0", + "QualityAdjPower": "0" + }, + "TotalPower": { + "RawBytePower": "0", + "QualityAdjPower": "0" + }, + "HasMinPower": true +} +``` + +### StateMinerPreCommitDepositForPower +StateMinerInitialPledgeCollateral returns the precommit deposit for the specified miner's sector + + +Perms: read + +Inputs: +```json +[ + "f01234", + { + "SealProof": 8, + "SectorNumber": 9, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "SealRandEpoch": 10101, + "DealIDs": null, + "Expiration": 10101, + "ReplaceCapacity": true, + "ReplaceSectorDeadline": 42, + "ReplaceSectorPartition": 42, + "ReplaceSectorNumber": 9 + }, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### StateMinerProvingDeadline +StateMinerProvingDeadline calculates the deadline at some epoch for a proving period +and returns the deadline-related calculations. + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "CurrentEpoch": 10101, + "PeriodStart": 10101, + "Index": 42, + "Open": 10101, + "Close": 10101, + "Challenge": 10101, + "FaultCutoff": 10101, + "WPoStPeriodDeadlines": 42, + "WPoStProvingPeriod": 10101, + "WPoStChallengeWindow": 10101, + "WPoStChallengeLookback": 10101, + "FaultDeclarationCutoff": 10101 +} +``` + +### StateMinerRecoveries +StateMinerRecoveries returns a bitfield indicating the recovering sectors of the given miner + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +[ + 5, + 1 +] +``` + +### StateMinerSectorAllocated +StateMinerSectorAllocated checks if a sector is allocated + + +Perms: read + +Inputs: +```json +[ + "f01234", + 9, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `true` + +### StateMinerSectorCount +StateMinerSectorCount returns the number of sectors in a miner's sector set and proving set + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Live": 42, + "Active": 42, + "Faulty": 42 +} +``` + +### StateMinerSectors +StateMinerSectors returns info about the given miner's sectors. If the filter bitfield is nil, all sectors are included. + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + 0 + ], + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `null` + +### StateNetworkName +StateNetworkName returns the name of the network the node is synced to + + +Perms: read + +Inputs: `null` + +Response: `"lotus"` + +### StateNetworkVersion +StateNetworkVersion returns the network version at the given tipset + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `13` + +### StateReadState +StateReadState returns the indicated actor's state. + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Balance": "0", + "Code": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "State": {} +} +``` + +### StateReplay +StateReplay replays a given message, assuming it was included in a block in the specified tipset. + +If a tipset key is provided, and a replacing message is found on chain, +the method will return an error saying that the message wasn't found + +If no tipset key is provided, the appropriate tipset is looked up, and if +the message was gas-repriced, the on-chain message will be replayed - in +that case the returned InvocResult.MsgCid will not match the Cid param + +If the caller wants to ensure that exactly the requested message was executed, +they MUST check that InvocResult.MsgCid is equal to the provided Cid. +Without this check both the requested and original message may appear as +successfully executed on-chain, which may look like a double-spend. + +A replacing message is a message with a different CID, any of Gas values, and +different signature, but with all other parameters matching (source/destination, +nonce, params, etc.) + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "MsgCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Msg": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "GasCost": { + "Message": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "GasUsed": "0", + "BaseFeeBurn": "0", + "OverEstimationBurn": "0", + "MinerPenalty": "0", + "MinerTip": "0", + "Refund": "0", + "TotalCost": "0" + }, + "ExecutionTrace": { + "Msg": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "Error": "string value", + "Duration": 60000000000, + "GasCharges": null, + "Subcalls": null + }, + "Error": "string value", + "Duration": 60000000000 +} +``` + +### StateSearchMsg +StateSearchMsg looks back up to limit epochs in the chain for a message, and returns its receipt and the tipset where it was executed + +NOTE: If a replacing message is found on chain, this method will return +a MsgLookup for the replacing message - the MsgLookup.Message will be a different +CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the +result of the execution of the replacing message. + +If the caller wants to ensure that exactly the requested message was executed, +they must check that MsgLookup.Message is equal to the provided 'cid', or set the +`allowReplaced` parameter to false. Without this check, and with `allowReplaced` +set to true, both the requested and original message may appear as +successfully executed on-chain, which may look like a double-spend. + +A replacing message is a message with a different CID, any of Gas values, and +different signature, but with all other parameters matching (source/destination, +nonce, params, etc.) + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + 10101, + true +] +``` + +Response: +```json +{ + "Message": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Receipt": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "ReturnDec": {}, + "TipSet": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Height": 10101 +} +``` + +### StateSectorExpiration +StateSectorExpiration returns epoch at which given sector will expire + + +Perms: read + +Inputs: +```json +[ + "f01234", + 9, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "OnTime": 10101, + "Early": 10101 +} +``` + +### StateSectorGetInfo +StateSectorGetInfo returns the on-chain info for the specified miner's sector. Returns null in case the sector info isn't found +NOTE: returned info.Expiration may not be accurate in some cases, use StateSectorExpiration to get accurate +expiration epoch + + +Perms: read + +Inputs: +```json +[ + "f01234", + 9, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "SectorNumber": 9, + "SealProof": 8, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "DealIDs": null, + "Activation": 10101, + "Expiration": 10101, + "DealWeight": "0", + "VerifiedDealWeight": "0", + "InitialPledge": "0", + "ExpectedDayReward": "0", + "ExpectedStoragePledge": "0" +} +``` + +### StateSectorPartition +StateSectorPartition finds deadline/partition with the specified sector + + +Perms: read + +Inputs: +```json +[ + "f01234", + 9, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Deadline": 42, + "Partition": 42 +} +``` + +### StateSectorPreCommitInfo +StateSectorPreCommitInfo returns the PreCommit info for the specified miner's sector + + +Perms: read + +Inputs: +```json +[ + "f01234", + 9, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Info": { + "SealProof": 8, + "SectorNumber": 9, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "SealRandEpoch": 10101, + "DealIDs": null, + "Expiration": 10101, + "ReplaceCapacity": true, + "ReplaceSectorDeadline": 42, + "ReplaceSectorPartition": 42, + "ReplaceSectorNumber": 9 + }, + "PreCommitDeposit": "0", + "PreCommitEpoch": 10101, + "DealWeight": "0", + "VerifiedDealWeight": "0" +} +``` + +### StateVMCirculatingSupplyInternal +StateVMCirculatingSupplyInternal returns an approximation of the circulating supply of Filecoin at the given tipset. +This is the value reported by the runtime interface to actors code. + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "FilVested": "0", + "FilMined": "0", + "FilBurnt": "0", + "FilLocked": "0", + "FilCirculating": "0", + "FilReserveDisbursed": "0" +} +``` + +### StateVerifiedClientStatus +StateVerifiedClientStatus returns the data cap for the given address. +Returns nil if there is no entry in the data cap table for the +address. + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### StateVerifiedRegistryRootKey +StateVerifiedClientStatus returns the address of the Verified Registry's root key + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"f01234"` + +### StateVerifierStatus +StateVerifierStatus returns the data cap for the given address. +Returns nil if there is no entry in the data cap table for the +address. + + +Perms: read + +Inputs: +```json +[ + "f01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### StateWaitMsg +StateWaitMsg looks back up to limit epochs in the chain for a message. +If not found, it blocks until the message arrives on chain, and gets to the +indicated confidence depth. + +NOTE: If a replacing message is found on chain, this method will return +a MsgLookup for the replacing message - the MsgLookup.Message will be a different +CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the +result of the execution of the replacing message. + +If the caller wants to ensure that exactly the requested message was executed, +they must check that MsgLookup.Message is equal to the provided 'cid', or set the +`allowReplaced` parameter to false. Without this check, and with `allowReplaced` +set to true, both the requested and original message may appear as +successfully executed on-chain, which may look like a double-spend. + +A replacing message is a message with a different CID, any of Gas values, and +different signature, but with all other parameters matching (source/destination, +nonce, params, etc.) + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + 42, + 10101, + true +] +``` + +Response: +```json +{ + "Message": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Receipt": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "ReturnDec": {}, + "TipSet": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Height": 10101 +} +``` + +## Sync +The Sync method group contains methods for interacting with and +observing the lotus sync service. + + +### SyncCheckBad +SyncCheckBad checks if a block was marked as bad, and if it was, returns +the reason. + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: `"string value"` + +### SyncCheckpoint +SyncCheckpoint marks a blocks as checkpointed, meaning that it won't ever fork away from it. + + +Perms: admin + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `{}` + +### SyncIncomingBlocks +SyncIncomingBlocks returns a channel streaming incoming, potentially not +yet synced block headers. + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "Miner": "f01234", + "Ticket": { + "VRFProof": "Ynl0ZSBhcnJheQ==" + }, + "ElectionProof": { + "WinCount": 9, + "VRFProof": "Ynl0ZSBhcnJheQ==" + }, + "BeaconEntries": null, + "WinPoStProof": null, + "Parents": null, + "ParentWeight": "0", + "Height": 10101, + "ParentStateRoot": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ParentMessageReceipts": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Messages": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "BLSAggregate": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Timestamp": 42, + "BlockSig": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "ForkSignaling": 42, + "ParentBaseFee": "0" +} +``` + +### SyncMarkBad +SyncMarkBad marks a blocks as bad, meaning that it won't ever by synced. +Use with extreme caution. + + +Perms: admin + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: `{}` + +### SyncState +SyncState returns the current status of the lotus sync system. + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "ActiveSyncs": null, + "VMApplied": 42 +} +``` + +### SyncSubmitBlock +SyncSubmitBlock can be used to submit a newly created block to the. +network through this node + + +Perms: write + +Inputs: +```json +[ + { + "Header": { + "Miner": "f01234", + "Ticket": { + "VRFProof": "Ynl0ZSBhcnJheQ==" + }, + "ElectionProof": { + "WinCount": 9, + "VRFProof": "Ynl0ZSBhcnJheQ==" + }, + "BeaconEntries": null, + "WinPoStProof": null, + "Parents": null, + "ParentWeight": "0", + "Height": 10101, + "ParentStateRoot": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ParentMessageReceipts": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Messages": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "BLSAggregate": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Timestamp": 42, + "BlockSig": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "ForkSignaling": 42, + "ParentBaseFee": "0" + }, + "BlsMessages": null, + "SecpkMessages": null + } +] +``` + +Response: `{}` + +### SyncUnmarkAllBad +SyncUnmarkAllBad purges bad block cache, making it possible to sync to chains previously marked as bad + + +Perms: admin + +Inputs: `null` + +Response: `{}` + +### SyncUnmarkBad +SyncUnmarkBad unmarks a blocks as bad, making it possible to be validated and synced again. + + +Perms: admin + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: `{}` + +### SyncValidateTipset +SyncValidateTipset indicates whether the provided tipset is valid or not + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `true` + +## Wallet + + +### WalletBalance +WalletBalance returns the balance of the given address at the current head of the chain. + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `"0"` + +### WalletDefaultAddress +WalletDefaultAddress returns the address marked as default in the wallet. + + +Perms: write + +Inputs: `null` + +Response: `"f01234"` + +### WalletDelete +WalletDelete deletes an address from the wallet. + + +Perms: admin + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `{}` + +### WalletExport +WalletExport returns the private key of an address in the wallet. + + +Perms: admin + +Inputs: +```json +[ + "f01234" +] +``` + +Response: +```json +{ + "Type": "bls", + "PrivateKey": "Ynl0ZSBhcnJheQ==" +} +``` + +### WalletHas +WalletHas indicates whether the given address is in the wallet. + + +Perms: write + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `true` + +### WalletImport +WalletImport receives a KeyInfo, which includes a private key, and imports it into the wallet. + + +Perms: admin + +Inputs: +```json +[ + { + "Type": "bls", + "PrivateKey": "Ynl0ZSBhcnJheQ==" + } +] +``` + +Response: `"f01234"` + +### WalletList +WalletList lists all the addresses in the wallet. + + +Perms: write + +Inputs: `null` + +Response: `null` + +### WalletNew +WalletNew creates a new address in the wallet with the given sigType. +Available key types: bls, secp256k1, secp256k1-ledger +Support for numerical types: 1 - secp256k1, 2 - BLS is deprecated + + +Perms: write + +Inputs: +```json +[ + "bls" +] +``` + +Response: `"f01234"` + +### WalletSetDefault +WalletSetDefault marks the given address as as the default one. + + +Perms: write + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `{}` + +### WalletSign +WalletSign signs the given bytes using the given address. + + +Perms: sign + +Inputs: +```json +[ + "f01234", + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: +```json +{ + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" +} +``` + +### WalletSignMessage +WalletSignMessage signs the given message using the given address. + + +Perms: sign + +Inputs: +```json +[ + "f01234", + { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } +] +``` + +Response: +```json +{ + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } +} +``` + +### WalletValidateAddress +WalletValidateAddress validates whether a given string can be decoded as a well-formed address + + +Perms: read + +Inputs: +```json +[ + "string value" +] +``` + +Response: `"f01234"` + +### WalletVerify +WalletVerify takes an address, a signature, and some bytes, and indicates whether the signature is valid. +The address does not have to be in the wallet. + + +Perms: read + +Inputs: +```json +[ + "f01234", + "Ynl0ZSBhcnJheQ==", + { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } +] +``` + +Response: `true` + diff --git a/documentation/en/architecture/architecture.md b/documentation/en/architecture/architecture.md index 61cd117bbdb..64914d53996 100644 --- a/documentation/en/architecture/architecture.md +++ b/documentation/en/architecture/architecture.md @@ -311,7 +311,7 @@ FIXME: Maybe mention the `Batching` interface as the developer will stumble upon FIXME: IPFS blocks vs Filecoin blocks ideally happens before this / here -The [`Blockstore` interface](`github.com/ipfs/go-ipfs-blockstore/blockstore.go`) structures the key-value pair +The [`Blockstore` interface](`github.com/filecoin-project/lotus/blockstore/blockstore.go`) structures the key-value pair into the CID format for the key and the [`Block` interface](`github.com/ipfs/go-block-format/blocks.go`) for the value. The `Block` value is just a raw string of bytes addressed by its hash, which is included in the CID key. diff --git a/documentation/en/cli-lotus-miner.md b/documentation/en/cli-lotus-miner.md new file mode 100644 index 00000000000..aa51ab23ef8 --- /dev/null +++ b/documentation/en/cli-lotus-miner.md @@ -0,0 +1,1884 @@ +# lotus-miner +``` +NAME: + lotus-miner - Filecoin decentralized storage network miner + +USAGE: + lotus-miner [global options] command [command options] [arguments...] + +VERSION: + 1.11.1-dev + +COMMANDS: + init Initialize a lotus miner repo + run Start a lotus miner process + stop Stop a running lotus miner + config Output default configuration + backup Create node metadata backup + version Print version + help, h Shows a list of commands or help for one command + CHAIN: + actor manipulate the miner actor + info Print miner info + DEVELOPER: + auth Manage RPC permissions + log Manage logging + wait-api Wait for lotus api to come online + fetch-params Fetch proving parameters + MARKET: + storage-deals Manage storage deals and related configuration + retrieval-deals Manage retrieval deals and related configuration + data-transfers Manage data transfers + NETWORK: + net Manage P2P Network + RETRIEVAL: + pieces interact with the piecestore + STORAGE: + sectors interact with sector store + proving View proving information + storage manage sector storage + sealing interact with sealing pipeline + +GLOBAL OPTIONS: + --actor value, -a value specify other actor to check state for (read only) + --color use color in display output (default: depends on output being a TTY) + --miner-repo value, --storagerepo value Specify miner repo path. flag(storagerepo) and env(LOTUS_STORAGE_PATH) are DEPRECATION, will REMOVE SOON (default: "~/.lotusminer") [$LOTUS_MINER_PATH, $LOTUS_STORAGE_PATH] + --help, -h show help (default: false) + --version, -v print the version (default: false) +``` + +## lotus-miner init +``` +NAME: + lotus-miner init - Initialize a lotus miner repo + +USAGE: + lotus-miner init command [command options] [arguments...] + +COMMANDS: + restore Initialize a lotus miner repo from a backup + service Initialize a lotus miner sub-service + help, h Shows a list of commands or help for one command + +OPTIONS: + --actor value specify the address of an already created miner actor + --create-worker-key create separate worker key (default: false) + --worker value, -w value worker key to use (overrides --create-worker-key) + --owner value, -o value owner key to use + --sector-size value specify sector size to use (default: "32GiB") + --pre-sealed-sectors value specify set of presealed sectors for starting as a genesis miner + --pre-sealed-metadata value specify the metadata file for the presealed sectors + --nosync don't check full-node sync status (default: false) + --symlink-imported-sectors attempt to symlink to presealed sectors instead of copying them into place (default: false) + --no-local-storage don't use storageminer repo for sector storage (default: false) + --gas-premium value set gas premium for initialization messages in AttoFIL (default: "0") + --from value select which address to send actor creation message from + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-miner init restore +``` +NAME: + lotus-miner init restore - Initialize a lotus miner repo from a backup + +USAGE: + lotus-miner init restore [command options] [backupFile] + +OPTIONS: + --nosync don't check full-node sync status (default: false) + --config value config file (config.toml) + --storage-config value storage paths config (storage.json) + --help, -h show help (default: false) + +``` + +### lotus-miner init service +``` +NAME: + lotus-miner init service - Initialize a lotus miner sub-service + +USAGE: + lotus-miner init service [command options] [backupFile] + +OPTIONS: + --config value config file (config.toml) + --nosync don't check full-node sync status (default: false) + --type value type of service to be enabled + --api-sealer value sealer API info (lotus-miner auth api-info --perm=admin) + --api-sector-index value sector Index API info (lotus-miner auth api-info --perm=admin) + --help, -h show help (default: false) + +``` + +## lotus-miner run +``` +NAME: + lotus-miner run - Start a lotus miner process + +USAGE: + lotus-miner run [command options] [arguments...] + +OPTIONS: + --miner-api value 2345 + --enable-gpu-proving enable use of GPU for mining operations (default: true) + --nosync don't check full-node sync status (default: false) + --manage-fdlimit manage open file limit (default: true) + --help, -h show help (default: false) + +``` + +## lotus-miner stop +``` +NAME: + lotus-miner stop - Stop a running lotus miner + +USAGE: + lotus-miner stop [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +## lotus-miner config +``` +NAME: + lotus-miner config - Output default configuration + +USAGE: + lotus-miner config [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +## lotus-miner backup +``` +NAME: + lotus-miner backup - Create node metadata backup + +USAGE: + lotus-miner backup [command options] [backup file path] + +DESCRIPTION: + The backup command writes a copy of node metadata under the specified path + +Online backups: +For security reasons, the daemon must be have LOTUS_BACKUP_BASE_PATH env var set +to a path where backup files are supposed to be saved, and the path specified in +this command must be within this base path + +OPTIONS: + --offline create backup without the node running (default: false) + --help, -h show help (default: false) + +``` + +## lotus-miner version +``` +NAME: + lotus-miner version - Print version + +USAGE: + lotus-miner version [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +## lotus-miner actor +``` +NAME: + lotus-miner actor - manipulate the miner actor + +USAGE: + lotus-miner actor command [command options] [arguments...] + +COMMANDS: + set-addrs set addresses that your miner can be publicly dialed on + withdraw withdraw available balance + repay-debt pay down a miner's debt + set-peer-id set the peer id of your miner + set-owner Set owner address (this command should be invoked twice, first with the old owner as the senderAddress, and then with the new owner) + control Manage control addresses + propose-change-worker Propose a worker address change + confirm-change-worker Confirm a worker address change + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-miner actor set-addrs +``` +NAME: + lotus-miner actor set-addrs - set addresses that your miner can be publicly dialed on + +USAGE: + lotus-miner actor set-addrs [command options] [arguments...] + +OPTIONS: + --gas-limit value set gas limit (default: 0) + --unset unset address (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner actor withdraw +``` +NAME: + lotus-miner actor withdraw - withdraw available balance + +USAGE: + lotus-miner actor withdraw [command options] [amount (FIL)] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner actor repay-debt +``` +NAME: + lotus-miner actor repay-debt - pay down a miner's debt + +USAGE: + lotus-miner actor repay-debt [command options] [amount (FIL)] + +OPTIONS: + --from value optionally specify the account to send funds from + --help, -h show help (default: false) + +``` + +### lotus-miner actor set-peer-id +``` +NAME: + lotus-miner actor set-peer-id - set the peer id of your miner + +USAGE: + lotus-miner actor set-peer-id [command options] [arguments...] + +OPTIONS: + --gas-limit value set gas limit (default: 0) + --help, -h show help (default: false) + +``` + +### lotus-miner actor set-owner +``` +NAME: + lotus-miner actor set-owner - Set owner address (this command should be invoked twice, first with the old owner as the senderAddress, and then with the new owner) + +USAGE: + lotus-miner actor set-owner [command options] [newOwnerAddress senderAddress] + +OPTIONS: + --really-do-it Actually send transaction performing the action (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner actor control +``` +NAME: + lotus-miner actor control - Manage control addresses + +USAGE: + lotus-miner actor control command [command options] [arguments...] + +COMMANDS: + list Get currently set control addresses + set Set control address(-es) + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +#### lotus-miner actor control list +``` +NAME: + lotus-miner actor control list - Get currently set control addresses + +USAGE: + lotus-miner actor control list [command options] [arguments...] + +OPTIONS: + --verbose (default: false) + --color use color in display output (default: depends on output being a TTY) + --help, -h show help (default: false) + +``` + +#### lotus-miner actor control set +``` +NAME: + lotus-miner actor control set - Set control address(-es) + +USAGE: + lotus-miner actor control set [command options] [...address] + +OPTIONS: + --really-do-it Actually send transaction performing the action (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner actor propose-change-worker +``` +NAME: + lotus-miner actor propose-change-worker - Propose a worker address change + +USAGE: + lotus-miner actor propose-change-worker [command options] [address] + +OPTIONS: + --really-do-it Actually send transaction performing the action (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner actor confirm-change-worker +``` +NAME: + lotus-miner actor confirm-change-worker - Confirm a worker address change + +USAGE: + lotus-miner actor confirm-change-worker [command options] [address] + +OPTIONS: + --really-do-it Actually send transaction performing the action (default: false) + --help, -h show help (default: false) + +``` + +## lotus-miner info +``` +NAME: + lotus-miner info - Print miner info + +USAGE: + lotus-miner info command [command options] [arguments...] + +COMMANDS: + all dump all related miner info + help, h Shows a list of commands or help for one command + +OPTIONS: + --hide-sectors-info hide sectors info (default: false) + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-miner info all +``` +NAME: + lotus-miner info all - dump all related miner info + +USAGE: + lotus-miner info all [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +## lotus-miner auth +``` +NAME: + lotus-miner auth - Manage RPC permissions + +USAGE: + lotus-miner auth command [command options] [arguments...] + +COMMANDS: + create-token Create token + api-info Get token with API info required to connect to this node + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-miner auth create-token +``` +NAME: + lotus-miner auth create-token - Create token + +USAGE: + lotus-miner auth create-token [command options] [arguments...] + +OPTIONS: + --perm value permission to assign to the token, one of: read, write, sign, admin + --help, -h show help (default: false) + +``` + +### lotus-miner auth api-info +``` +NAME: + lotus-miner auth api-info - Get token with API info required to connect to this node + +USAGE: + lotus-miner auth api-info [command options] [arguments...] + +OPTIONS: + --perm value permission to assign to the token, one of: read, write, sign, admin + --help, -h show help (default: false) + +``` + +## lotus-miner log +``` +NAME: + lotus-miner log - Manage logging + +USAGE: + lotus-miner log command [command options] [arguments...] + +COMMANDS: + list List log systems + set-level Set log level + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-miner log list +``` +NAME: + lotus-miner log list - List log systems + +USAGE: + lotus-miner log list [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner log set-level +``` +NAME: + lotus-miner log set-level - Set log level + +USAGE: + lotus-miner log set-level [command options] [level] + +DESCRIPTION: + Set the log level for logging systems: + + The system flag can be specified multiple times. + + eg) log set-level --system chain --system chainxchg debug + + Available Levels: + debug + info + warn + error + + Environment Variables: + GOLOG_LOG_LEVEL - Default log level for all log systems + GOLOG_LOG_FMT - Change output log format (json, nocolor) + GOLOG_FILE - Write logs to file + GOLOG_OUTPUT - Specify whether to output to file, stderr, stdout or a combination, i.e. file+stderr + + +OPTIONS: + --system value limit to log system + --help, -h show help (default: false) + +``` + +## lotus-miner wait-api +``` +NAME: + lotus-miner wait-api - Wait for lotus api to come online + +USAGE: + lotus-miner wait-api [command options] [arguments...] + +CATEGORY: + DEVELOPER + +OPTIONS: + --help, -h show help (default: false) + +``` + +## lotus-miner fetch-params +``` +NAME: + lotus-miner fetch-params - Fetch proving parameters + +USAGE: + lotus-miner fetch-params [command options] [sectorSize] + +CATEGORY: + DEVELOPER + +OPTIONS: + --help, -h show help (default: false) + +``` + +## lotus-miner storage-deals +``` +NAME: + lotus-miner storage-deals - Manage storage deals and related configuration + +USAGE: + lotus-miner storage-deals command [command options] [arguments...] + +COMMANDS: + import-data Manually import data for a deal + list List all deals for this miner + selection Configure acceptance criteria for storage deal proposals + set-ask Configure the miner's ask + get-ask Print the miner's ask + set-blocklist Set the miner's list of blocklisted piece CIDs + get-blocklist List the contents of the miner's piece CID blocklist + reset-blocklist Remove all entries from the miner's piece CID blocklist + set-seal-duration Set the expected time, in minutes, that you expect sealing sectors to take. Deals that start before this duration will be rejected. + pending-publish list deals waiting in publish queue + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-miner storage-deals import-data +``` +NAME: + lotus-miner storage-deals import-data - Manually import data for a deal + +USAGE: + lotus-miner storage-deals import-data [command options] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner storage-deals list +``` +NAME: + lotus-miner storage-deals list - List all deals for this miner + +USAGE: + lotus-miner storage-deals list [command options] [arguments...] + +OPTIONS: + --verbose, -v (default: false) + --watch watch deal updates in real-time, rather than a one time list (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner storage-deals selection +``` +NAME: + lotus-miner storage-deals selection - Configure acceptance criteria for storage deal proposals + +USAGE: + lotus-miner storage-deals selection command [command options] [arguments...] + +COMMANDS: + list List storage deal proposal selection criteria + reset Reset storage deal proposal selection criteria to default values + reject Configure criteria which necessitate automatic rejection + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +#### lotus-miner storage-deals selection list +``` +NAME: + lotus-miner storage-deals selection list - List storage deal proposal selection criteria + +USAGE: + lotus-miner storage-deals selection list [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +#### lotus-miner storage-deals selection reset +``` +NAME: + lotus-miner storage-deals selection reset - Reset storage deal proposal selection criteria to default values + +USAGE: + lotus-miner storage-deals selection reset [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +#### lotus-miner storage-deals selection reject +``` +NAME: + lotus-miner storage-deals selection reject - Configure criteria which necessitate automatic rejection + +USAGE: + lotus-miner storage-deals selection reject [command options] [arguments...] + +OPTIONS: + --online (default: false) + --offline (default: false) + --verified (default: false) + --unverified (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner storage-deals set-ask +``` +NAME: + lotus-miner storage-deals set-ask - Configure the miner's ask + +USAGE: + lotus-miner storage-deals set-ask [command options] [arguments...] + +OPTIONS: + --price PRICE Set the price of the ask for unverified deals (specified as FIL / GiB / Epoch) to PRICE. + --verified-price PRICE Set the price of the ask for verified deals (specified as FIL / GiB / Epoch) to PRICE + --min-piece-size SIZE Set minimum piece size (w/bit-padding, in bytes) in ask to SIZE (default: 256B) + --max-piece-size SIZE Set maximum piece size (w/bit-padding, in bytes) in ask to SIZE (default: miner sector size) + --help, -h show help (default: false) + +``` + +### lotus-miner storage-deals get-ask +``` +NAME: + lotus-miner storage-deals get-ask - Print the miner's ask + +USAGE: + lotus-miner storage-deals get-ask [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner storage-deals set-blocklist +``` +NAME: + lotus-miner storage-deals set-blocklist - Set the miner's list of blocklisted piece CIDs + +USAGE: + lotus-miner storage-deals set-blocklist [command options] [ (optional, will read from stdin if omitted)] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner storage-deals get-blocklist +``` +NAME: + lotus-miner storage-deals get-blocklist - List the contents of the miner's piece CID blocklist + +USAGE: + lotus-miner storage-deals get-blocklist [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner storage-deals reset-blocklist +``` +NAME: + lotus-miner storage-deals reset-blocklist - Remove all entries from the miner's piece CID blocklist + +USAGE: + lotus-miner storage-deals reset-blocklist [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner storage-deals set-seal-duration +``` +NAME: + lotus-miner storage-deals set-seal-duration - Set the expected time, in minutes, that you expect sealing sectors to take. Deals that start before this duration will be rejected. + +USAGE: + lotus-miner storage-deals set-seal-duration [command options] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner storage-deals pending-publish +``` +NAME: + lotus-miner storage-deals pending-publish - list deals waiting in publish queue + +USAGE: + lotus-miner storage-deals pending-publish [command options] [arguments...] + +OPTIONS: + --publish-now send a publish message now (default: false) + --help, -h show help (default: false) + +``` + +## lotus-miner retrieval-deals +``` +NAME: + lotus-miner retrieval-deals - Manage retrieval deals and related configuration + +USAGE: + lotus-miner retrieval-deals command [command options] [arguments...] + +COMMANDS: + selection Configure acceptance criteria for retrieval deal proposals + list List all active retrieval deals for this miner + set-ask Configure the provider's retrieval ask + get-ask Get the provider's current retrieval ask configured by the provider in the ask-store using the set-ask CLI command + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-miner retrieval-deals selection +``` +NAME: + lotus-miner retrieval-deals selection - Configure acceptance criteria for retrieval deal proposals + +USAGE: + lotus-miner retrieval-deals selection command [command options] [arguments...] + +COMMANDS: + list List retrieval deal proposal selection criteria + reset Reset retrieval deal proposal selection criteria to default values + reject Configure criteria which necessitate automatic rejection + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +#### lotus-miner retrieval-deals selection list +``` +NAME: + lotus-miner retrieval-deals selection list - List retrieval deal proposal selection criteria + +USAGE: + lotus-miner retrieval-deals selection list [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +#### lotus-miner retrieval-deals selection reset +``` +NAME: + lotus-miner retrieval-deals selection reset - Reset retrieval deal proposal selection criteria to default values + +USAGE: + lotus-miner retrieval-deals selection reset [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +#### lotus-miner retrieval-deals selection reject +``` +NAME: + lotus-miner retrieval-deals selection reject - Configure criteria which necessitate automatic rejection + +USAGE: + lotus-miner retrieval-deals selection reject [command options] [arguments...] + +OPTIONS: + --online (default: false) + --offline (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner retrieval-deals list +``` +NAME: + lotus-miner retrieval-deals list - List all active retrieval deals for this miner + +USAGE: + lotus-miner retrieval-deals list [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner retrieval-deals set-ask +``` +NAME: + lotus-miner retrieval-deals set-ask - Configure the provider's retrieval ask + +USAGE: + lotus-miner retrieval-deals set-ask [command options] [arguments...] + +OPTIONS: + --price value Set the price of the ask for retrievals (FIL/GiB) + --unseal-price value Set the price to unseal + --payment-interval value Set the payment interval (in bytes) for retrieval (default: 1MiB) + --payment-interval-increase value Set the payment interval increase (in bytes) for retrieval (default: 1MiB) + --help, -h show help (default: false) + +``` + +### lotus-miner retrieval-deals get-ask +``` +NAME: + lotus-miner retrieval-deals get-ask - Get the provider's current retrieval ask configured by the provider in the ask-store using the set-ask CLI command + +USAGE: + lotus-miner retrieval-deals get-ask [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +## lotus-miner data-transfers +``` +NAME: + lotus-miner data-transfers - Manage data transfers + +USAGE: + lotus-miner data-transfers command [command options] [arguments...] + +COMMANDS: + list List ongoing data transfers for this miner + restart Force restart a stalled data transfer + cancel Force cancel a data transfer + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-miner data-transfers list +``` +NAME: + lotus-miner data-transfers list - List ongoing data transfers for this miner + +USAGE: + lotus-miner data-transfers list [command options] [arguments...] + +OPTIONS: + --verbose, -v print verbose transfer details (default: false) + --color use color in display output (default: depends on output being a TTY) + --completed show completed data transfers (default: false) + --watch watch deal updates in real-time, rather than a one time list (default: false) + --show-failed show failed/cancelled transfers (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner data-transfers restart +``` +NAME: + lotus-miner data-transfers restart - Force restart a stalled data transfer + +USAGE: + lotus-miner data-transfers restart [command options] [arguments...] + +OPTIONS: + --peerid value narrow to transfer with specific peer + --initiator specify only transfers where peer is/is not initiator (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner data-transfers cancel +``` +NAME: + lotus-miner data-transfers cancel - Force cancel a data transfer + +USAGE: + lotus-miner data-transfers cancel [command options] [arguments...] + +OPTIONS: + --peerid value narrow to transfer with specific peer + --initiator specify only transfers where peer is/is not initiator (default: false) + --cancel-timeout value time to wait for cancel to be sent to client (default: 5s) + --help, -h show help (default: false) + +``` + +## lotus-miner net +``` +NAME: + lotus-miner net - Manage P2P Network + +USAGE: + lotus-miner net command [command options] [arguments...] + +COMMANDS: + peers Print peers + connect Connect to a peer + listen List listen addresses + id Get node identity + findpeer Find the addresses of a given peerID + scores Print peers' pubsub scores + reachability Print information about reachability from the internet + bandwidth Print bandwidth usage information + block Manage network connection gating rules + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-miner net peers +``` +NAME: + lotus-miner net peers - Print peers + +USAGE: + lotus-miner net peers [command options] [arguments...] + +OPTIONS: + --agent, -a Print agent name (default: false) + --extended, -x Print extended peer information in json (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner net connect +``` +NAME: + lotus-miner net connect - Connect to a peer + +USAGE: + lotus-miner net connect [command options] [peerMultiaddr|minerActorAddress] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner net listen +``` +NAME: + lotus-miner net listen - List listen addresses + +USAGE: + lotus-miner net listen [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner net id +``` +NAME: + lotus-miner net id - Get node identity + +USAGE: + lotus-miner net id [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner net findpeer +``` +NAME: + lotus-miner net findpeer - Find the addresses of a given peerID + +USAGE: + lotus-miner net findpeer [command options] [peerId] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner net scores +``` +NAME: + lotus-miner net scores - Print peers' pubsub scores + +USAGE: + lotus-miner net scores [command options] [arguments...] + +OPTIONS: + --extended, -x print extended peer scores in json (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner net reachability +``` +NAME: + lotus-miner net reachability - Print information about reachability from the internet + +USAGE: + lotus-miner net reachability [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner net bandwidth +``` +NAME: + lotus-miner net bandwidth - Print bandwidth usage information + +USAGE: + lotus-miner net bandwidth [command options] [arguments...] + +OPTIONS: + --by-peer list bandwidth usage by peer (default: false) + --by-protocol list bandwidth usage by protocol (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner net block +``` +NAME: + lotus-miner net block - Manage network connection gating rules + +USAGE: + lotus-miner net block command [command options] [arguments...] + +COMMANDS: + add Add connection gating rules + remove Remove connection gating rules + list list connection gating rules + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +#### lotus-miner net block add +``` +NAME: + lotus-miner net block add - Add connection gating rules + +USAGE: + lotus-miner net block add command [command options] [arguments...] + +COMMANDS: + peer Block a peer + ip Block an IP address + subnet Block an IP subnet + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +##### lotus-miner net block add peer +``` +NAME: + lotus-miner net block add peer - Block a peer + +USAGE: + lotus-miner net block add peer [command options] ... + +OPTIONS: + --help, -h show help (default: false) + +``` + +##### lotus-miner net block add ip +``` +NAME: + lotus-miner net block add ip - Block an IP address + +USAGE: + lotus-miner net block add ip [command options] ... + +OPTIONS: + --help, -h show help (default: false) + +``` + +##### lotus-miner net block add subnet +``` +NAME: + lotus-miner net block add subnet - Block an IP subnet + +USAGE: + lotus-miner net block add subnet [command options] ... + +OPTIONS: + --help, -h show help (default: false) + +``` + +#### lotus-miner net block remove +``` +NAME: + lotus-miner net block remove - Remove connection gating rules + +USAGE: + lotus-miner net block remove command [command options] [arguments...] + +COMMANDS: + peer Unblock a peer + ip Unblock an IP address + subnet Unblock an IP subnet + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +##### lotus-miner net block remove peer +``` +NAME: + lotus-miner net block remove peer - Unblock a peer + +USAGE: + lotus-miner net block remove peer [command options] ... + +OPTIONS: + --help, -h show help (default: false) + +``` + +##### lotus-miner net block remove ip +``` +NAME: + lotus-miner net block remove ip - Unblock an IP address + +USAGE: + lotus-miner net block remove ip [command options] ... + +OPTIONS: + --help, -h show help (default: false) + +``` + +##### lotus-miner net block remove subnet +``` +NAME: + lotus-miner net block remove subnet - Unblock an IP subnet + +USAGE: + lotus-miner net block remove subnet [command options] ... + +OPTIONS: + --help, -h show help (default: false) + +``` + +#### lotus-miner net block list +``` +NAME: + lotus-miner net block list - list connection gating rules + +USAGE: + lotus-miner net block list [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +## lotus-miner pieces +``` +NAME: + lotus-miner pieces - interact with the piecestore + +USAGE: + lotus-miner pieces command [command options] [arguments...] + +DESCRIPTION: + The piecestore is a database that tracks and manages data that is made available to the retrieval market + +COMMANDS: + list-pieces list registered pieces + list-cids list registered payload CIDs + piece-info get registered information for a given piece CID + cid-info get registered information for a given payload CID + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-miner pieces list-pieces +``` +NAME: + lotus-miner pieces list-pieces - list registered pieces + +USAGE: + lotus-miner pieces list-pieces [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner pieces list-cids +``` +NAME: + lotus-miner pieces list-cids - list registered payload CIDs + +USAGE: + lotus-miner pieces list-cids [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner pieces piece-info +``` +NAME: + lotus-miner pieces piece-info - get registered information for a given piece CID + +USAGE: + lotus-miner pieces piece-info [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner pieces cid-info +``` +NAME: + lotus-miner pieces cid-info - get registered information for a given payload CID + +USAGE: + lotus-miner pieces cid-info [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +## lotus-miner sectors +``` +NAME: + lotus-miner sectors - interact with sector store + +USAGE: + lotus-miner sectors command [command options] [arguments...] + +COMMANDS: + status Get the seal status of a sector by its number + list List sectors + refs List References to sectors + update-state ADVANCED: manually update the state of a sector, this may aid in error recovery + pledge store random data in a sector + extend Extend sector expiration + terminate Terminate sector on-chain then remove (WARNING: This means losing power and collateral for the removed sector) + remove Forcefully remove a sector (WARNING: This means losing power and collateral for the removed sector (use 'terminate' for lower penalty)) + mark-for-upgrade Mark a committed capacity sector for replacement by a sector with deals + seal Manually start sealing a sector (filling any unused space with junk) + set-seal-delay Set the time, in minutes, that a new sector waits for deals before sealing starts + get-cc-collateral Get the collateral required to pledge a committed capacity sector + batching manage batch sector operations + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-miner sectors status +``` +NAME: + lotus-miner sectors status - Get the seal status of a sector by its number + +USAGE: + lotus-miner sectors status [command options] + +OPTIONS: + --log display event log (default: false) + --on-chain-info show sector on chain info (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner sectors list +``` +NAME: + lotus-miner sectors list - List sectors + +USAGE: + lotus-miner sectors list [command options] [arguments...] + +OPTIONS: + --show-removed show removed sectors (default: false) + --color, -c use color in display output (default: depends on output being a TTY) + --fast don't show on-chain info for better performance (default: false) + --events display number of events the sector has received (default: false) + --seal-time display how long it took for the sector to be sealed (default: false) + --states value filter sectors by a comma-separated list of states + --help, -h show help (default: false) + +``` + +### lotus-miner sectors refs +``` +NAME: + lotus-miner sectors refs - List References to sectors + +USAGE: + lotus-miner sectors refs [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner sectors update-state +``` +NAME: + lotus-miner sectors update-state - ADVANCED: manually update the state of a sector, this may aid in error recovery + +USAGE: + lotus-miner sectors update-state [command options] + +OPTIONS: + --really-do-it pass this flag if you know what you are doing (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner sectors pledge +``` +NAME: + lotus-miner sectors pledge - store random data in a sector + +USAGE: + lotus-miner sectors pledge [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner sectors extend +``` +NAME: + lotus-miner sectors extend - Extend sector expiration + +USAGE: + lotus-miner sectors extend [command options] + +OPTIONS: + --new-expiration value new expiration epoch (default: 0) + --v1-sectors renews all v1 sectors up to the maximum possible lifetime (default: false) + --tolerance value when extending v1 sectors, don't try to extend sectors by fewer than this number of epochs (default: 20160) + --expiration-ignore value when extending v1 sectors, skip sectors whose current expiration is less than epochs from now (default: 120) + --expiration-cutoff value when extending v1 sectors, skip sectors whose current expiration is more than epochs from now (infinity if unspecified) (default: 0) + + --help, -h show help (default: false) + +``` + +### lotus-miner sectors terminate +``` +NAME: + lotus-miner sectors terminate - Terminate sector on-chain then remove (WARNING: This means losing power and collateral for the removed sector) + +USAGE: + lotus-miner sectors terminate command [command options] + +COMMANDS: + flush Send a terminate message if there are sectors queued for termination + pending List sector numbers of sectors pending termination + help, h Shows a list of commands or help for one command + +OPTIONS: + --really-do-it pass this flag if you know what you are doing (default: false) + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +#### lotus-miner sectors terminate flush +``` +NAME: + lotus-miner sectors terminate flush - Send a terminate message if there are sectors queued for termination + +USAGE: + lotus-miner sectors terminate flush [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +#### lotus-miner sectors terminate pending +``` +NAME: + lotus-miner sectors terminate pending - List sector numbers of sectors pending termination + +USAGE: + lotus-miner sectors terminate pending [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner sectors remove +``` +NAME: + lotus-miner sectors remove - Forcefully remove a sector (WARNING: This means losing power and collateral for the removed sector (use 'terminate' for lower penalty)) + +USAGE: + lotus-miner sectors remove [command options] + +OPTIONS: + --really-do-it pass this flag if you know what you are doing (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner sectors mark-for-upgrade +``` +NAME: + lotus-miner sectors mark-for-upgrade - Mark a committed capacity sector for replacement by a sector with deals + +USAGE: + lotus-miner sectors mark-for-upgrade [command options] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner sectors seal +``` +NAME: + lotus-miner sectors seal - Manually start sealing a sector (filling any unused space with junk) + +USAGE: + lotus-miner sectors seal [command options] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner sectors set-seal-delay +``` +NAME: + lotus-miner sectors set-seal-delay - Set the time, in minutes, that a new sector waits for deals before sealing starts + +USAGE: + lotus-miner sectors set-seal-delay [command options] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner sectors get-cc-collateral +``` +NAME: + lotus-miner sectors get-cc-collateral - Get the collateral required to pledge a committed capacity sector + +USAGE: + lotus-miner sectors get-cc-collateral [command options] [arguments...] + +OPTIONS: + --expiration value the epoch when the sector will expire (default: 0) + --help, -h show help (default: false) + +``` + +### lotus-miner sectors batching +``` +NAME: + lotus-miner sectors batching - manage batch sector operations + +USAGE: + lotus-miner sectors batching command [command options] [arguments...] + +COMMANDS: + commit list sectors waiting in commit batch queue + precommit list sectors waiting in precommit batch queue + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +#### lotus-miner sectors batching commit +``` +NAME: + lotus-miner sectors batching commit - list sectors waiting in commit batch queue + +USAGE: + lotus-miner sectors batching commit [command options] [arguments...] + +OPTIONS: + --publish-now send a batch now (default: false) + --help, -h show help (default: false) + +``` + +#### lotus-miner sectors batching precommit +``` +NAME: + lotus-miner sectors batching precommit - list sectors waiting in precommit batch queue + +USAGE: + lotus-miner sectors batching precommit [command options] [arguments...] + +OPTIONS: + --publish-now send a batch now (default: false) + --help, -h show help (default: false) + +``` + +## lotus-miner proving +``` +NAME: + lotus-miner proving - View proving information + +USAGE: + lotus-miner proving command [command options] [arguments...] + +COMMANDS: + info View current state information + deadlines View the current proving period deadlines information + deadline View the current proving period deadline information by its index + faults View the currently known proving faulty sectors information + check Check sectors provable + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-miner proving info +``` +NAME: + lotus-miner proving info - View current state information + +USAGE: + lotus-miner proving info [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner proving deadlines +``` +NAME: + lotus-miner proving deadlines - View the current proving period deadlines information + +USAGE: + lotus-miner proving deadlines [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner proving deadline +``` +NAME: + lotus-miner proving deadline - View the current proving period deadline information by its index + +USAGE: + lotus-miner proving deadline [command options] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner proving faults +``` +NAME: + lotus-miner proving faults - View the currently known proving faulty sectors information + +USAGE: + lotus-miner proving faults [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner proving check +``` +NAME: + lotus-miner proving check - Check sectors provable + +USAGE: + lotus-miner proving check [command options] + +OPTIONS: + --only-bad print only bad sectors (default: false) + --slow run slower checks (default: false) + --help, -h show help (default: false) + +``` + +## lotus-miner storage +``` +NAME: + lotus-miner storage - manage sector storage + +USAGE: + lotus-miner storage command [command options] [arguments...] + +DESCRIPTION: + Sectors can be stored across many filesystem paths. These +commands provide ways to manage the storage the miner will used to store sectors +long term for proving (references as 'store') as well as how sectors will be +stored while moving through the sealing pipeline (references as 'seal'). + +COMMANDS: + attach attach local storage path + list list local storage paths + find find sector in the storage system + cleanup trigger cleanup actions + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-miner storage attach +``` +NAME: + lotus-miner storage attach - attach local storage path + +USAGE: + lotus-miner storage attach [command options] [arguments...] + +DESCRIPTION: + Storage can be attached to the miner using this command. The storage volume +list is stored local to the miner in $LOTUS_MINER_PATH/storage.json. We do not +recommend manually modifying this value without further understanding of the +storage system. + +Each storage volume contains a configuration file which describes the +capabilities of the volume. When the '--init' flag is provided, this file will +be created using the additional flags. + +Weight +A high weight value means data will be more likely to be stored in this path + +Seal +Data for the sealing process will be stored here + +Store +Finalized sectors that will be moved here for long term storage and be proven +over time + + +OPTIONS: + --init initialize the path first (default: false) + --weight value (for init) path weight (default: 10) + --seal (for init) use path for sealing (default: false) + --store (for init) use path for long-term storage (default: false) + --max-storage value (for init) limit storage space for sectors (expensive for very large paths!) + --help, -h show help (default: false) + +``` + +### lotus-miner storage list +``` +NAME: + lotus-miner storage list - list local storage paths + +USAGE: + lotus-miner storage list command [command options] [arguments...] + +COMMANDS: + sectors get list of all sector files + help, h Shows a list of commands or help for one command + +OPTIONS: + --color use color in display output (default: depends on output being a TTY) + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +#### lotus-miner storage list sectors +``` +NAME: + lotus-miner storage list sectors - get list of all sector files + +USAGE: + lotus-miner storage list sectors [command options] [arguments...] + +OPTIONS: + --color use color in display output (default: depends on output being a TTY) + --help, -h show help (default: false) + +``` + +### lotus-miner storage find +``` +NAME: + lotus-miner storage find - find sector in the storage system + +USAGE: + lotus-miner storage find [command options] [sector number] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner storage cleanup +``` +NAME: + lotus-miner storage cleanup - trigger cleanup actions + +USAGE: + lotus-miner storage cleanup [command options] [arguments...] + +OPTIONS: + --removed cleanup remaining files from removed sectors (default: true) + --help, -h show help (default: false) + +``` + +## lotus-miner sealing +``` +NAME: + lotus-miner sealing - interact with sealing pipeline + +USAGE: + lotus-miner sealing command [command options] [arguments...] + +COMMANDS: + jobs list running jobs + workers list workers + sched-diag Dump internal scheduler state + abort Abort a running job + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-miner sealing jobs +``` +NAME: + lotus-miner sealing jobs - list running jobs + +USAGE: + lotus-miner sealing jobs [command options] [arguments...] + +OPTIONS: + --color use color in display output (default: depends on output being a TTY) + --show-ret-done show returned but not consumed calls (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner sealing workers +``` +NAME: + lotus-miner sealing workers - list workers + +USAGE: + lotus-miner sealing workers [command options] [arguments...] + +OPTIONS: + --color use color in display output (default: depends on output being a TTY) + --help, -h show help (default: false) + +``` + +### lotus-miner sealing sched-diag +``` +NAME: + lotus-miner sealing sched-diag - Dump internal scheduler state + +USAGE: + lotus-miner sealing sched-diag [command options] [arguments...] + +OPTIONS: + --force-sched (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner sealing abort +``` +NAME: + lotus-miner sealing abort - Abort a running job + +USAGE: + lotus-miner sealing abort [command options] [callid] + +OPTIONS: + --help, -h show help (default: false) + +``` diff --git a/documentation/en/cli-lotus-worker.md b/documentation/en/cli-lotus-worker.md new file mode 100644 index 00000000000..dbfc8da295a --- /dev/null +++ b/documentation/en/cli-lotus-worker.md @@ -0,0 +1,171 @@ +# lotus-worker +``` +NAME: + lotus-worker - Remote miner worker + +USAGE: + lotus-worker [global options] command [command options] [arguments...] + +VERSION: + 1.11.1-dev + +COMMANDS: + run Start lotus worker + info Print worker info + storage manage sector storage + set Manage worker settings + wait-quiet Block until all running tasks exit + tasks Manage task processing + help, h Shows a list of commands or help for one command + +GLOBAL OPTIONS: + --worker-repo value, --workerrepo value Specify worker repo path. flag workerrepo and env WORKER_PATH are DEPRECATION, will REMOVE SOON (default: "~/.lotusworker") [$LOTUS_WORKER_PATH, $WORKER_PATH] + --miner-repo value, --storagerepo value Specify miner repo path. flag storagerepo and env LOTUS_STORAGE_PATH are DEPRECATION, will REMOVE SOON (default: "~/.lotusminer") [$LOTUS_MINER_PATH, $LOTUS_STORAGE_PATH] + --enable-gpu-proving enable use of GPU for mining operations (default: true) + --help, -h show help (default: false) + --version, -v print the version (default: false) +``` + +## lotus-worker run +``` +NAME: + lotus-worker run - Start lotus worker + +USAGE: + lotus-worker run [command options] [arguments...] + +OPTIONS: + --listen value host address and port the worker api will listen on (default: "0.0.0.0:3456") + --no-local-storage don't use storageminer repo for sector storage (default: false) + --no-swap don't use swap (default: false) + --addpiece enable addpiece (default: true) + --precommit1 enable precommit1 (32G sectors: 1 core, 128GiB Memory) (default: true) + --unseal enable unsealing (32G sectors: 1 core, 128GiB Memory) (default: true) + --precommit2 enable precommit2 (32G sectors: all cores, 96GiB Memory) (default: true) + --commit enable commit (32G sectors: all cores or GPUs, 128GiB Memory + 64GiB swap) (default: true) + --parallel-fetch-limit value maximum fetch operations to run in parallel (default: 5) + --timeout value used when 'listen' is unspecified. must be a valid duration recognized by golang's time.ParseDuration function (default: "30m") + --help, -h show help (default: false) + +``` + +## lotus-worker info +``` +NAME: + lotus-worker info - Print worker info + +USAGE: + lotus-worker info [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +## lotus-worker storage +``` +NAME: + lotus-worker storage - manage sector storage + +USAGE: + lotus-worker storage command [command options] [arguments...] + +COMMANDS: + attach attach local storage path + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-worker storage attach +``` +NAME: + lotus-worker storage attach - attach local storage path + +USAGE: + lotus-worker storage attach [command options] [arguments...] + +OPTIONS: + --init initialize the path first (default: false) + --weight value (for init) path weight (default: 10) + --seal (for init) use path for sealing (default: false) + --store (for init) use path for long-term storage (default: false) + --max-storage value (for init) limit storage space for sectors (expensive for very large paths!) + --help, -h show help (default: false) + +``` + +## lotus-worker set +``` +NAME: + lotus-worker set - Manage worker settings + +USAGE: + lotus-worker set [command options] [arguments...] + +OPTIONS: + --enabled enable/disable new task processing (default: true) + --help, -h show help (default: false) + +``` + +## lotus-worker wait-quiet +``` +NAME: + lotus-worker wait-quiet - Block until all running tasks exit + +USAGE: + lotus-worker wait-quiet [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +## lotus-worker tasks +``` +NAME: + lotus-worker tasks - Manage task processing + +USAGE: + lotus-worker tasks command [command options] [arguments...] + +COMMANDS: + enable Enable a task type + disable Disable a task type + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-worker tasks enable +``` +NAME: + lotus-worker tasks enable - Enable a task type + +USAGE: + lotus-worker tasks enable [command options] [UNS|C2|PC2|PC1|AP] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-worker tasks disable +``` +NAME: + lotus-worker tasks disable - Disable a task type + +USAGE: + lotus-worker tasks disable [command options] [UNS|C2|PC2|PC1|AP] + +OPTIONS: + --help, -h show help (default: false) + +``` diff --git a/documentation/en/cli-lotus.md b/documentation/en/cli-lotus.md new file mode 100644 index 00000000000..6971ed6e77a --- /dev/null +++ b/documentation/en/cli-lotus.md @@ -0,0 +1,2811 @@ +# lotus +``` +NAME: + lotus - Filecoin decentralized storage network client + +USAGE: + lotus [global options] command [command options] [arguments...] + +VERSION: + 1.11.1-dev + +COMMANDS: + daemon Start a lotus daemon process + backup Create node metadata backup + version Print version + help, h Shows a list of commands or help for one command + BASIC: + send Send funds between accounts + wallet Manage wallet + client Make deals, store data, retrieve data + msig Interact with a multisig wallet + filplus Interact with the verified registry actor used by Filplus + paych Manage payment channels + DEVELOPER: + auth Manage RPC permissions + mpool Manage message pool + state Interact with and query filecoin chain state + chain Interact with filecoin blockchain + log Manage logging + wait-api Wait for lotus api to come online + fetch-params Fetch proving parameters + NETWORK: + net Manage P2P Network + sync Inspect or interact with the chain syncer + STATUS: + status Check node status + +GLOBAL OPTIONS: + --interactive setting to false will disable interactive functionality of commands (default: false) + --force-send if true, will ignore pre-send checks (default: false) + --help, -h show help (default: false) + --version, -v print the version (default: false) +``` + +## lotus daemon +``` +NAME: + lotus daemon - Start a lotus daemon process + +USAGE: + lotus daemon command [command options] [arguments...] + +COMMANDS: + stop Stop a running lotus daemon + help, h Shows a list of commands or help for one command + +OPTIONS: + --api value (default: "1234") + --genesis value genesis file to use for first node run + --bootstrap (default: true) + --import-chain value on first run, load chain from given file or url and validate + --import-snapshot value import chain state from a given chain export file or url + --halt-after-import halt the process after importing chain from file (default: false) + --pprof value specify name of file for writing cpu profile to + --profile value specify type of node + --manage-fdlimit manage open file limit (default: true) + --config value specify path of config file to use + --api-max-req-size value maximum API request size accepted by the JSON RPC server (default: 0) + --restore value restore from backup file + --restore-config value config file to use when restoring from backup + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus daemon stop +``` +NAME: + lotus daemon stop - Stop a running lotus daemon + +USAGE: + lotus daemon stop [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +## lotus backup +``` +NAME: + lotus backup - Create node metadata backup + +USAGE: + lotus backup [command options] [backup file path] + +DESCRIPTION: + The backup command writes a copy of node metadata under the specified path + +Online backups: +For security reasons, the daemon must be have LOTUS_BACKUP_BASE_PATH env var set +to a path where backup files are supposed to be saved, and the path specified in +this command must be within this base path + +OPTIONS: + --offline create backup without the node running (default: false) + --help, -h show help (default: false) + +``` + +## lotus version +``` +NAME: + lotus version - Print version + +USAGE: + lotus version [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +## lotus send +``` +NAME: + lotus send - Send funds between accounts + +USAGE: + lotus send [command options] [targetAddress] [amount] + +CATEGORY: + BASIC + +OPTIONS: + --from value optionally specify the account to send funds from + --gas-premium value specify gas price to use in AttoFIL (default: "0") + --gas-feecap value specify gas fee cap to use in AttoFIL (default: "0") + --gas-limit value specify gas limit (default: 0) + --nonce value specify the nonce to use (default: 0) + --method value specify method to invoke (default: 0) + --params-json value specify invocation parameters in json + --params-hex value specify invocation parameters in hex + --force Deprecated: use global 'force-send' (default: false) + --help, -h show help (default: false) + +``` + +## lotus wallet +``` +NAME: + lotus wallet - Manage wallet + +USAGE: + lotus wallet command [command options] [arguments...] + +COMMANDS: + new Generate a new key of the given type + list List wallet address + balance Get account balance + export export keys + import import keys + default Get default wallet address + set-default Set default wallet address + sign sign a message + verify verify the signature of a message + delete Delete an account from the wallet + market Interact with market balances + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus wallet new +``` +NAME: + lotus wallet new - Generate a new key of the given type + +USAGE: + lotus wallet new [command options] [bls|secp256k1 (default secp256k1)] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus wallet list +``` +NAME: + lotus wallet list - List wallet address + +USAGE: + lotus wallet list [command options] [arguments...] + +OPTIONS: + --addr-only, -a Only print addresses (default: false) + --id, -i Output ID addresses (default: false) + --market, -m Output market balances (default: false) + --help, -h show help (default: false) + +``` + +### lotus wallet balance +``` +NAME: + lotus wallet balance - Get account balance + +USAGE: + lotus wallet balance [command options] [address] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus wallet export +``` +NAME: + lotus wallet export - export keys + +USAGE: + lotus wallet export [command options] [address] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus wallet import +``` +NAME: + lotus wallet import - import keys + +USAGE: + lotus wallet import [command options] [ (optional, will read from stdin if omitted)] + +OPTIONS: + --format value specify input format for key (default: "hex-lotus") + --as-default import the given key as your new default key (default: false) + --help, -h show help (default: false) + +``` + +### lotus wallet default +``` +NAME: + lotus wallet default - Get default wallet address + +USAGE: + lotus wallet default [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus wallet set-default +``` +NAME: + lotus wallet set-default - Set default wallet address + +USAGE: + lotus wallet set-default [command options] [address] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus wallet sign +``` +NAME: + lotus wallet sign - sign a message + +USAGE: + lotus wallet sign [command options] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus wallet verify +``` +NAME: + lotus wallet verify - verify the signature of a message + +USAGE: + lotus wallet verify [command options] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus wallet delete +``` +NAME: + lotus wallet delete - Delete an account from the wallet + +USAGE: + lotus wallet delete [command options]
+ +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus wallet market +``` +NAME: + lotus wallet market - Interact with market balances + +USAGE: + lotus wallet market command [command options] [arguments...] + +COMMANDS: + withdraw Withdraw funds from the Storage Market Actor + add Add funds to the Storage Market Actor + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +#### lotus wallet market withdraw +``` +NAME: + lotus wallet market withdraw - Withdraw funds from the Storage Market Actor + +USAGE: + lotus wallet market withdraw [command options] [amount (FIL) optional, otherwise will withdraw max available] + +OPTIONS: + --wallet value, -w value Specify address to withdraw funds to, otherwise it will use the default wallet address + --address value, -a value Market address to withdraw from (account or miner actor address, defaults to --wallet address) + --help, -h show help (default: false) + +``` + +#### lotus wallet market add +``` +NAME: + lotus wallet market add - Add funds to the Storage Market Actor + +USAGE: + lotus wallet market add [command options] + +OPTIONS: + --from value, -f value Specify address to move funds from, otherwise it will use the default wallet address + --address value, -a value Market address to move funds to (account or miner actor address, defaults to --from address) + --help, -h show help (default: false) + +``` + +## lotus client +``` +NAME: + lotus client - Make deals, store data, retrieve data + +USAGE: + lotus client command [command options] [arguments...] + +COMMANDS: + help, h Shows a list of commands or help for one command + DATA: + import Import data + drop Remove import + local List locally imported data + stat Print information about a locally stored file (piece size, etc) + RETRIEVAL: + find Find data in the network + retrieve Retrieve data from network + cancel-retrieval Cancel a retrieval deal by deal ID; this also cancels the associated transfer + list-retrievals List retrieval market deals + STORAGE: + deal Initialize storage deal with a miner + query-ask Find a miners ask + list-deals List storage market deals + get-deal Print detailed deal information + list-asks List asks for top miners + deal-stats Print statistics about local storage deals + inspect-deal Inspect detailed information about deal's lifecycle and the various stages it goes through + UTIL: + commP Calculate the piece-cid (commP) of a CAR file + generate-car Generate a car file from input + balances Print storage market client balances + list-transfers List ongoing data transfers for deals + restart-transfer Force restart a stalled data transfer + cancel-transfer Force cancel a data transfer + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus client import +``` +NAME: + lotus client import - Import data + +USAGE: + lotus client import [command options] [inputPath] + +CATEGORY: + DATA + +OPTIONS: + --car import from a car file instead of a regular file (default: false) + --quiet, -q Output root CID only (default: false) + --help, -h show help (default: false) + +``` + +### lotus client drop +``` +NAME: + lotus client drop - Remove import + +USAGE: + lotus client drop [command options] [import ID...] + +CATEGORY: + DATA + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus client local +``` +NAME: + lotus client local - List locally imported data + +USAGE: + lotus client local [command options] [arguments...] + +CATEGORY: + DATA + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus client stat +``` +NAME: + lotus client stat - Print information about a locally stored file (piece size, etc) + +USAGE: + lotus client stat [command options] + +CATEGORY: + DATA + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus client find +``` +NAME: + lotus client find - Find data in the network + +USAGE: + lotus client find [command options] [dataCid] + +CATEGORY: + RETRIEVAL + +OPTIONS: + --pieceCid value require data to be retrieved from a specific Piece CID + --help, -h show help (default: false) + +``` + +### lotus client retrieve +``` +NAME: + lotus client retrieve - Retrieve data from network + +USAGE: + lotus client retrieve [command options] [dataCid outputPath] + +CATEGORY: + RETRIEVAL + +OPTIONS: + --from value address to send transactions from + --car export to a car file instead of a regular file (default: false) + --miner value miner address for retrieval, if not present it'll use local discovery + --maxPrice value maximum price the client is willing to consider (default: 0.01 FIL) + --pieceCid value require data to be retrieved from a specific Piece CID + --allow-local (default: false) + --help, -h show help (default: false) + +``` + +### lotus client cancel-retrieval +``` +NAME: + lotus client cancel-retrieval - Cancel a retrieval deal by deal ID; this also cancels the associated transfer + +USAGE: + lotus client cancel-retrieval [command options] [arguments...] + +CATEGORY: + RETRIEVAL + +OPTIONS: + --deal-id value specify retrieval deal by deal ID (default: 0) + --help, -h show help (default: false) + +``` + +### lotus client list-retrievals +``` +NAME: + lotus client list-retrievals - List retrieval market deals + +USAGE: + lotus client list-retrievals [command options] [arguments...] + +CATEGORY: + RETRIEVAL + +OPTIONS: + --verbose, -v print verbose deal details (default: false) + --color use color in display output (default: depends on output being a TTY) + --show-failed show failed/failing deals (default: true) + --completed show completed retrievals (default: false) + --watch watch deal updates in real-time, rather than a one time list (default: false) + --help, -h show help (default: false) + +``` + +### lotus client deal +``` +NAME: + lotus client deal - Initialize storage deal with a miner + +USAGE: + lotus client deal [command options] [dataCid miner price duration] + +CATEGORY: + STORAGE + +DESCRIPTION: + Make a deal with a miner. +dataCid comes from running 'lotus client import'. +miner is the address of the miner you wish to make a deal with. +price is measured in FIL/Epoch. Miners usually don't accept a bid +lower than their advertised ask (which is in FIL/GiB/Epoch). You can check a miners listed price +with 'lotus client query-ask '. +duration is how long the miner should store the data for, in blocks. +The minimum value is 518400 (6 months). + +OPTIONS: + --manual-piece-cid value manually specify piece commitment for data (dataCid must be to a car file) + --manual-piece-size value if manually specifying piece cid, used to specify size (dataCid must be to a car file) (default: 0) + --manual-stateless-deal instructs the node to send an offline deal without registering it with the deallist/fsm (default: false) + --from value specify address to fund the deal with + --start-epoch value specify the epoch that the deal should start at (default: -1) + --fast-retrieval indicates that data should be available for fast retrieval (default: true) + --verified-deal indicate that the deal counts towards verified client total (default: true if client is verified, false otherwise) + --provider-collateral value specify the requested provider collateral the miner should put up + --help, -h show help (default: false) + +``` + +### lotus client query-ask +``` +NAME: + lotus client query-ask - Find a miners ask + +USAGE: + lotus client query-ask [command options] [minerAddress] + +CATEGORY: + STORAGE + +OPTIONS: + --peerid value specify peer ID of node to make query against + --size value data size in bytes (default: 0) + --duration value deal duration (default: 0) + --help, -h show help (default: false) + +``` + +### lotus client list-deals +``` +NAME: + lotus client list-deals - List storage market deals + +USAGE: + lotus client list-deals [command options] [arguments...] + +CATEGORY: + STORAGE + +OPTIONS: + --verbose, -v print verbose deal details (default: false) + --color use color in display output (default: depends on output being a TTY) + --show-failed show failed/failing deals (default: false) + --watch watch deal updates in real-time, rather than a one time list (default: false) + --help, -h show help (default: false) + +``` + +### lotus client get-deal +``` +NAME: + lotus client get-deal - Print detailed deal information + +USAGE: + lotus client get-deal [command options] [arguments...] + +CATEGORY: + STORAGE + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus client list-asks +``` +NAME: + lotus client list-asks - List asks for top miners + +USAGE: + lotus client list-asks [command options] [arguments...] + +CATEGORY: + STORAGE + +OPTIONS: + --by-ping sort by ping (default: false) + --output-format value Either 'text' or 'csv' (default: "text") + --help, -h show help (default: false) + +``` + +### lotus client deal-stats +``` +NAME: + lotus client deal-stats - Print statistics about local storage deals + +USAGE: + lotus client deal-stats [command options] [arguments...] + +CATEGORY: + STORAGE + +OPTIONS: + --newer-than value (default: 0s) + --help, -h show help (default: false) + +``` + +### lotus client inspect-deal +``` +NAME: + lotus client inspect-deal - Inspect detailed information about deal's lifecycle and the various stages it goes through + +USAGE: + lotus client inspect-deal [command options] [arguments...] + +CATEGORY: + STORAGE + +OPTIONS: + --deal-id value (default: 0) + --proposal-cid value + --help, -h show help (default: false) + +``` + +### lotus client commP +``` +NAME: + lotus client commP - Calculate the piece-cid (commP) of a CAR file + +USAGE: + lotus client commP [command options] [inputFile] + +CATEGORY: + UTIL + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus client generate-car +``` +NAME: + lotus client generate-car - Generate a car file from input + +USAGE: + lotus client generate-car [command options] [inputPath outputPath] + +CATEGORY: + UTIL + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus client balances +``` +NAME: + lotus client balances - Print storage market client balances + +USAGE: + lotus client balances [command options] [arguments...] + +CATEGORY: + UTIL + +OPTIONS: + --client value specify storage client address + --help, -h show help (default: false) + +``` + +### lotus client list-transfers +``` +NAME: + lotus client list-transfers - List ongoing data transfers for deals + +USAGE: + lotus client list-transfers [command options] [arguments...] + +CATEGORY: + UTIL + +OPTIONS: + --verbose, -v print verbose transfer details (default: false) + --color use color in display output (default: depends on output being a TTY) + --completed show completed data transfers (default: false) + --watch watch deal updates in real-time, rather than a one time list (default: false) + --show-failed show failed/cancelled transfers (default: false) + --help, -h show help (default: false) + +``` + +### lotus client restart-transfer +``` +NAME: + lotus client restart-transfer - Force restart a stalled data transfer + +USAGE: + lotus client restart-transfer [command options] [arguments...] + +CATEGORY: + UTIL + +OPTIONS: + --peerid value narrow to transfer with specific peer + --initiator specify only transfers where peer is/is not initiator (default: true) + --help, -h show help (default: false) + +``` + +### lotus client cancel-transfer +``` +NAME: + lotus client cancel-transfer - Force cancel a data transfer + +USAGE: + lotus client cancel-transfer [command options] [arguments...] + +CATEGORY: + UTIL + +OPTIONS: + --peerid value narrow to transfer with specific peer + --initiator specify only transfers where peer is/is not initiator (default: true) + --cancel-timeout value time to wait for cancel to be sent to storage provider (default: 5s) + --help, -h show help (default: false) + +``` + +## lotus msig +``` +NAME: + lotus msig - Interact with a multisig wallet + +USAGE: + lotus msig command [command options] [arguments...] + +COMMANDS: + create Create a new multisig wallet + inspect Inspect a multisig wallet + propose Propose a multisig transaction + propose-remove Propose to remove a signer + approve Approve a multisig message + add-propose Propose to add a signer + add-approve Approve a message to add a signer + add-cancel Cancel a message to add a signer + swap-propose Propose to swap signers + swap-approve Approve a message to swap signers + swap-cancel Cancel a message to swap signers + lock-propose Propose to lock up some balance + lock-approve Approve a message to lock up some balance + lock-cancel Cancel a message to lock up some balance + vested Gets the amount vested in an msig between two epochs + propose-threshold Propose setting a different signing threshold on the account + help, h Shows a list of commands or help for one command + +OPTIONS: + --confidence value number of block confirmations to wait for (default: 5) + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus msig create +``` +NAME: + lotus msig create - Create a new multisig wallet + +USAGE: + lotus msig create [command options] [address1 address2 ...] + +OPTIONS: + --required value number of required approvals (uses number of signers provided if omitted) (default: 0) + --value value initial funds to give to multisig (default: "0") + --duration value length of the period over which funds unlock (default: "0") + --from value account to send the create message from + --help, -h show help (default: false) + +``` + +### lotus msig inspect +``` +NAME: + lotus msig inspect - Inspect a multisig wallet + +USAGE: + lotus msig inspect [command options] [address] + +OPTIONS: + --vesting Include vesting details (default: false) + --decode-params Decode parameters of transaction proposals (default: false) + --help, -h show help (default: false) + +``` + +### lotus msig propose +``` +NAME: + lotus msig propose - Propose a multisig transaction + +USAGE: + lotus msig propose [command options] [multisigAddress destinationAddress value (optional)] + +OPTIONS: + --from value account to send the propose message from + --help, -h show help (default: false) + +``` + +### lotus msig propose-remove +``` +NAME: + lotus msig propose-remove - Propose to remove a signer + +USAGE: + lotus msig propose-remove [command options] [multisigAddress signer] + +OPTIONS: + --decrease-threshold whether the number of required signers should be decreased (default: false) + --from value account to send the propose message from + --help, -h show help (default: false) + +``` + +### lotus msig approve +``` +NAME: + lotus msig approve - Approve a multisig message + +USAGE: + lotus msig approve [command options] [proposerAddress destination value [methodId methodParams]] + +OPTIONS: + --from value account to send the approve message from + --help, -h show help (default: false) + +``` + +### lotus msig add-propose +``` +NAME: + lotus msig add-propose - Propose to add a signer + +USAGE: + lotus msig add-propose [command options] [multisigAddress signer] + +OPTIONS: + --increase-threshold whether the number of required signers should be increased (default: false) + --from value account to send the propose message from + --help, -h show help (default: false) + +``` + +### lotus msig add-approve +``` +NAME: + lotus msig add-approve - Approve a message to add a signer + +USAGE: + lotus msig add-approve [command options] [multisigAddress proposerAddress txId newAddress increaseThreshold] + +OPTIONS: + --from value account to send the approve message from + --help, -h show help (default: false) + +``` + +### lotus msig add-cancel +``` +NAME: + lotus msig add-cancel - Cancel a message to add a signer + +USAGE: + lotus msig add-cancel [command options] [multisigAddress txId newAddress increaseThreshold] + +OPTIONS: + --from value account to send the approve message from + --help, -h show help (default: false) + +``` + +### lotus msig swap-propose +``` +NAME: + lotus msig swap-propose - Propose to swap signers + +USAGE: + lotus msig swap-propose [command options] [multisigAddress oldAddress newAddress] + +OPTIONS: + --from value account to send the approve message from + --help, -h show help (default: false) + +``` + +### lotus msig swap-approve +``` +NAME: + lotus msig swap-approve - Approve a message to swap signers + +USAGE: + lotus msig swap-approve [command options] [multisigAddress proposerAddress txId oldAddress newAddress] + +OPTIONS: + --from value account to send the approve message from + --help, -h show help (default: false) + +``` + +### lotus msig swap-cancel +``` +NAME: + lotus msig swap-cancel - Cancel a message to swap signers + +USAGE: + lotus msig swap-cancel [command options] [multisigAddress txId oldAddress newAddress] + +OPTIONS: + --from value account to send the approve message from + --help, -h show help (default: false) + +``` + +### lotus msig lock-propose +``` +NAME: + lotus msig lock-propose - Propose to lock up some balance + +USAGE: + lotus msig lock-propose [command options] [multisigAddress startEpoch unlockDuration amount] + +OPTIONS: + --from value account to send the propose message from + --help, -h show help (default: false) + +``` + +### lotus msig lock-approve +``` +NAME: + lotus msig lock-approve - Approve a message to lock up some balance + +USAGE: + lotus msig lock-approve [command options] [multisigAddress proposerAddress txId startEpoch unlockDuration amount] + +OPTIONS: + --from value account to send the approve message from + --help, -h show help (default: false) + +``` + +### lotus msig lock-cancel +``` +NAME: + lotus msig lock-cancel - Cancel a message to lock up some balance + +USAGE: + lotus msig lock-cancel [command options] [multisigAddress txId startEpoch unlockDuration amount] + +OPTIONS: + --from value account to send the cancel message from + --help, -h show help (default: false) + +``` + +### lotus msig vested +``` +NAME: + lotus msig vested - Gets the amount vested in an msig between two epochs + +USAGE: + lotus msig vested [command options] [multisigAddress] + +OPTIONS: + --start-epoch value start epoch to measure vesting from (default: 0) + --end-epoch value end epoch to stop measure vesting at (default: -1) + --help, -h show help (default: false) + +``` + +### lotus msig propose-threshold +``` +NAME: + lotus msig propose-threshold - Propose setting a different signing threshold on the account + +USAGE: + lotus msig propose-threshold [command options] + +OPTIONS: + --from value account to send the proposal from + --help, -h show help (default: false) + +``` + +## lotus filplus +``` +NAME: + lotus filplus - Interact with the verified registry actor used by Filplus + +USAGE: + lotus filplus command [command options] [arguments...] + +COMMANDS: + grant-datacap give allowance to the specified verified client address + list-notaries list all notaries + list-clients list all verified clients + check-client-datacap check verified client remaining bytes + check-notaries-datacap check notaries remaining bytes + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus filplus grant-datacap +``` +NAME: + lotus filplus grant-datacap - give allowance to the specified verified client address + +USAGE: + lotus filplus grant-datacap [command options] [arguments...] + +OPTIONS: + --from value specify your notary address to send the message from + --help, -h show help (default: false) + +``` + +### lotus filplus list-notaries +``` +NAME: + lotus filplus list-notaries - list all notaries + +USAGE: + lotus filplus list-notaries [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus filplus list-clients +``` +NAME: + lotus filplus list-clients - list all verified clients + +USAGE: + lotus filplus list-clients [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus filplus check-client-datacap +``` +NAME: + lotus filplus check-client-datacap - check verified client remaining bytes + +USAGE: + lotus filplus check-client-datacap [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus filplus check-notaries-datacap +``` +NAME: + lotus filplus check-notaries-datacap - check notaries remaining bytes + +USAGE: + lotus filplus check-notaries-datacap [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +## lotus paych +``` +NAME: + lotus paych - Manage payment channels + +USAGE: + lotus paych command [command options] [arguments...] + +COMMANDS: + add-funds Add funds to the payment channel between fromAddress and toAddress. Creates the payment channel if it doesn't already exist. + list List all locally registered payment channels + voucher Interact with payment channel vouchers + settle Settle a payment channel + status Show the status of an outbound payment channel + status-by-from-to Show the status of an active outbound payment channel by from/to addresses + collect Collect funds for a payment channel + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus paych add-funds +``` +NAME: + lotus paych add-funds - Add funds to the payment channel between fromAddress and toAddress. Creates the payment channel if it doesn't already exist. + +USAGE: + lotus paych add-funds [command options] [fromAddress toAddress amount] + +OPTIONS: + --restart-retrievals restart stalled retrieval deals on this payment channel (default: true) + --help, -h show help (default: false) + +``` + +### lotus paych list +``` +NAME: + lotus paych list - List all locally registered payment channels + +USAGE: + lotus paych list [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus paych voucher +``` +NAME: + lotus paych voucher - Interact with payment channel vouchers + +USAGE: + lotus paych voucher command [command options] [arguments...] + +COMMANDS: + create Create a signed payment channel voucher + check Check validity of payment channel voucher + add Add payment channel voucher to local datastore + list List stored vouchers for a given payment channel + best-spendable Print vouchers with highest value that is currently spendable for each lane + submit Submit voucher to chain to update payment channel state + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +#### lotus paych voucher create +``` +NAME: + lotus paych voucher create - Create a signed payment channel voucher + +USAGE: + lotus paych voucher create [command options] [channelAddress amount] + +OPTIONS: + --lane value specify payment channel lane to use (default: 0) + --help, -h show help (default: false) + +``` + +#### lotus paych voucher check +``` +NAME: + lotus paych voucher check - Check validity of payment channel voucher + +USAGE: + lotus paych voucher check [command options] [channelAddress voucher] + +OPTIONS: + --help, -h show help (default: false) + +``` + +#### lotus paych voucher add +``` +NAME: + lotus paych voucher add - Add payment channel voucher to local datastore + +USAGE: + lotus paych voucher add [command options] [channelAddress voucher] + +OPTIONS: + --help, -h show help (default: false) + +``` + +#### lotus paych voucher list +``` +NAME: + lotus paych voucher list - List stored vouchers for a given payment channel + +USAGE: + lotus paych voucher list [command options] [channelAddress] + +OPTIONS: + --export Print voucher as serialized string (default: false) + --help, -h show help (default: false) + +``` + +#### lotus paych voucher best-spendable +``` +NAME: + lotus paych voucher best-spendable - Print vouchers with highest value that is currently spendable for each lane + +USAGE: + lotus paych voucher best-spendable [command options] [channelAddress] + +OPTIONS: + --export Print voucher as serialized string (default: false) + --help, -h show help (default: false) + +``` + +#### lotus paych voucher submit +``` +NAME: + lotus paych voucher submit - Submit voucher to chain to update payment channel state + +USAGE: + lotus paych voucher submit [command options] [channelAddress voucher] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus paych settle +``` +NAME: + lotus paych settle - Settle a payment channel + +USAGE: + lotus paych settle [command options] [channelAddress] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus paych status +``` +NAME: + lotus paych status - Show the status of an outbound payment channel + +USAGE: + lotus paych status [command options] [channelAddress] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus paych status-by-from-to +``` +NAME: + lotus paych status-by-from-to - Show the status of an active outbound payment channel by from/to addresses + +USAGE: + lotus paych status-by-from-to [command options] [fromAddress toAddress] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus paych collect +``` +NAME: + lotus paych collect - Collect funds for a payment channel + +USAGE: + lotus paych collect [command options] [channelAddress] + +OPTIONS: + --help, -h show help (default: false) + +``` + +## lotus auth +``` +NAME: + lotus auth - Manage RPC permissions + +USAGE: + lotus auth command [command options] [arguments...] + +COMMANDS: + create-token Create token + api-info Get token with API info required to connect to this node + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus auth create-token +``` +NAME: + lotus auth create-token - Create token + +USAGE: + lotus auth create-token [command options] [arguments...] + +OPTIONS: + --perm value permission to assign to the token, one of: read, write, sign, admin + --help, -h show help (default: false) + +``` + +### lotus auth api-info +``` +NAME: + lotus auth api-info - Get token with API info required to connect to this node + +USAGE: + lotus auth api-info [command options] [arguments...] + +OPTIONS: + --perm value permission to assign to the token, one of: read, write, sign, admin + --help, -h show help (default: false) + +``` + +## lotus mpool +``` +NAME: + lotus mpool - Manage message pool + +USAGE: + lotus mpool command [command options] [arguments...] + +COMMANDS: + pending Get pending messages + sub Subscribe to mpool changes + stat print mempool stats + replace replace a message in the mempool + find find a message in the mempool + config get or set current mpool configuration + gas-perf Check gas performance of messages in mempool + manage + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus mpool pending +``` +NAME: + lotus mpool pending - Get pending messages + +USAGE: + lotus mpool pending [command options] [arguments...] + +OPTIONS: + --local print pending messages for addresses in local wallet only (default: false) + --cids only print cids of messages in output (default: false) + --to value return messages to a given address + --from value return messages from a given address + --help, -h show help (default: false) + +``` + +### lotus mpool sub +``` +NAME: + lotus mpool sub - Subscribe to mpool changes + +USAGE: + lotus mpool sub [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus mpool stat +``` +NAME: + lotus mpool stat - print mempool stats + +USAGE: + lotus mpool stat [command options] [arguments...] + +OPTIONS: + --local print stats for addresses in local wallet only (default: false) + --basefee-lookback value number of blocks to look back for minimum basefee (default: 60) + --help, -h show help (default: false) + +``` + +### lotus mpool replace +``` +NAME: + lotus mpool replace - replace a message in the mempool + +USAGE: + lotus mpool replace [command options] | + +OPTIONS: + --gas-feecap value gas feecap for new message (burn and pay to miner, attoFIL/GasUnit) + --gas-premium value gas price for new message (pay to miner, attoFIL/GasUnit) + --gas-limit value gas limit for new message (GasUnit) (default: 0) + --auto automatically reprice the specified message (default: false) + --max-fee value Spend up to X attoFIL for this message (applicable for auto mode) + --help, -h show help (default: false) + +``` + +### lotus mpool find +``` +NAME: + lotus mpool find - find a message in the mempool + +USAGE: + lotus mpool find [command options] [arguments...] + +OPTIONS: + --from value search for messages with given 'from' address + --to value search for messages with given 'to' address + --method value search for messages with given method (default: 0) + --help, -h show help (default: false) + +``` + +### lotus mpool config +``` +NAME: + lotus mpool config - get or set current mpool configuration + +USAGE: + lotus mpool config [command options] [new-config] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus mpool gas-perf +``` +NAME: + lotus mpool gas-perf - Check gas performance of messages in mempool + +USAGE: + lotus mpool gas-perf [command options] [arguments...] + +OPTIONS: + --all print gas performance for all mempool messages (default only prints for local) (default: false) + --help, -h show help (default: false) + +``` +# nage +``` +``` + +## lotus state +``` +NAME: + lotus state - Interact with and query filecoin chain state + +USAGE: + lotus state command [command options] [arguments...] + +COMMANDS: + power Query network or miner power + sectors Query the sector set of a miner + active-sectors Query the active sector set of a miner + list-actors list all actors in the network + list-miners list all miners in the network + circulating-supply Get the exact current circulating supply of Filecoin + sector Get miner sector info + get-actor Print actor information + lookup Find corresponding ID address + replay Replay a particular message + sector-size Look up miners sector size + read-state View a json representation of an actors state + list-messages list messages on chain matching given criteria + compute-state Perform state computations + call Invoke a method on an actor locally + get-deal View on-chain deal info + wait-msg Wait for a message to appear on chain + search-msg Search to see whether a message has appeared on chain + miner-info Retrieve miner information + market Inspect the storage market actor + exec-trace Get the execution trace of a given message + network-version Returns the network version + miner-proving-deadline Retrieve information about a given miner's proving deadline + help, h Shows a list of commands or help for one command + +OPTIONS: + --tipset value specify tipset to call method on (pass comma separated array of cids) + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus state power +``` +NAME: + lotus state power - Query network or miner power + +USAGE: + lotus state power [command options] [ (optional)] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus state sectors +``` +NAME: + lotus state sectors - Query the sector set of a miner + +USAGE: + lotus state sectors [command options] [minerAddress] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus state active-sectors +``` +NAME: + lotus state active-sectors - Query the active sector set of a miner + +USAGE: + lotus state active-sectors [command options] [minerAddress] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus state list-actors +``` +NAME: + lotus state list-actors - list all actors in the network + +USAGE: + lotus state list-actors [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus state list-miners +``` +NAME: + lotus state list-miners - list all miners in the network + +USAGE: + lotus state list-miners [command options] [arguments...] + +OPTIONS: + --sort-by value criteria to sort miners by (none, num-deals) + --help, -h show help (default: false) + +``` + +### lotus state circulating-supply +``` +NAME: + lotus state circulating-supply - Get the exact current circulating supply of Filecoin + +USAGE: + lotus state circulating-supply [command options] [arguments...] + +OPTIONS: + --vm-supply calculates the approximation of the circulating supply used internally by the VM (instead of the exact amount) (default: false) + --help, -h show help (default: false) + +``` + +### lotus state sector +``` +NAME: + lotus state sector - Get miner sector info + +USAGE: + lotus state sector [command options] [minerAddress] [sectorNumber] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus state get-actor +``` +NAME: + lotus state get-actor - Print actor information + +USAGE: + lotus state get-actor [command options] [actorAddress] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus state lookup +``` +NAME: + lotus state lookup - Find corresponding ID address + +USAGE: + lotus state lookup [command options] [address] + +OPTIONS: + --reverse, -r Perform reverse lookup (default: false) + --help, -h show help (default: false) + +``` + +### lotus state replay +``` +NAME: + lotus state replay - Replay a particular message + +USAGE: + lotus state replay [command options] + +OPTIONS: + --show-trace print out full execution trace for given message (default: false) + --detailed-gas print out detailed gas costs for given message (default: false) + --help, -h show help (default: false) + +``` + +### lotus state sector-size +``` +NAME: + lotus state sector-size - Look up miners sector size + +USAGE: + lotus state sector-size [command options] [minerAddress] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus state read-state +``` +NAME: + lotus state read-state - View a json representation of an actors state + +USAGE: + lotus state read-state [command options] [actorAddress] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus state list-messages +``` +NAME: + lotus state list-messages - list messages on chain matching given criteria + +USAGE: + lotus state list-messages [command options] [arguments...] + +OPTIONS: + --to value return messages to a given address + --from value return messages from a given address + --toheight value don't look before given block height (default: 0) + --cids print message CIDs instead of messages (default: false) + --help, -h show help (default: false) + +``` + +### lotus state compute-state +``` +NAME: + lotus state compute-state - Perform state computations + +USAGE: + lotus state compute-state [command options] [arguments...] + +OPTIONS: + --vm-height value set the height that the vm will see (default: 0) + --apply-mpool-messages apply messages from the mempool to the computed state (default: false) + --show-trace print out full execution trace for given tipset (default: false) + --html generate html report (default: false) + --json generate json output (default: false) + --compute-state-output value a json file containing pre-existing compute-state output, to generate html reports without rerunning state changes + --no-timing don't show timing information in html traces (default: false) + --help, -h show help (default: false) + +``` + +### lotus state call +``` +NAME: + lotus state call - Invoke a method on an actor locally + +USAGE: + lotus state call [command options] [toAddress methodId params (optional)] + +OPTIONS: + --from value (default: "f00") + --value value specify value field for invocation (default: "0") + --ret value specify how to parse output (raw, decoded, base64, hex) (default: "decoded") + --encoding value specify params encoding to parse (base64, hex) (default: "base64") + --help, -h show help (default: false) + +``` + +### lotus state get-deal +``` +NAME: + lotus state get-deal - View on-chain deal info + +USAGE: + lotus state get-deal [command options] [dealId] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus state wait-msg +``` +NAME: + lotus state wait-msg - Wait for a message to appear on chain + +USAGE: + lotus state wait-msg [command options] [messageCid] + +OPTIONS: + --timeout value (default: "10m") + --help, -h show help (default: false) + +``` + +### lotus state search-msg +``` +NAME: + lotus state search-msg - Search to see whether a message has appeared on chain + +USAGE: + lotus state search-msg [command options] [messageCid] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus state miner-info +``` +NAME: + lotus state miner-info - Retrieve miner information + +USAGE: + lotus state miner-info [command options] [minerAddress] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus state market +``` +NAME: + lotus state market - Inspect the storage market actor + +USAGE: + lotus state market command [command options] [arguments...] + +COMMANDS: + balance Get the market balance (locked and escrowed) for a given account + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +#### lotus state market balance +``` +NAME: + lotus state market balance - Get the market balance (locked and escrowed) for a given account + +USAGE: + lotus state market balance [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus state exec-trace +``` +NAME: + lotus state exec-trace - Get the execution trace of a given message + +USAGE: + lotus state exec-trace [command options] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus state network-version +``` +NAME: + lotus state network-version - Returns the network version + +USAGE: + lotus state network-version [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus state miner-proving-deadline +``` +NAME: + lotus state miner-proving-deadline - Retrieve information about a given miner's proving deadline + +USAGE: + lotus state miner-proving-deadline [command options] [minerAddress] + +OPTIONS: + --help, -h show help (default: false) + +``` + +## lotus chain +``` +NAME: + lotus chain - Interact with filecoin blockchain + +USAGE: + lotus chain command [command options] [arguments...] + +COMMANDS: + head Print chain head + getblock Get a block and print its details + read-obj Read the raw bytes of an object + delete-obj Delete an object from the chain blockstore + stat-obj Collect size and ipld link counts for objs + getmessage Get and print a message by its cid + sethead manually set the local nodes head tipset (Caution: normally only used for recovery) + list, love View a segment of the chain + get Get chain DAG node by path + bisect bisect chain for an event + export export chain to a car file + slash-consensus Report consensus fault + gas-price Estimate gas prices + inspect-usage Inspect block space usage of a given tipset + decode decode various types + encode encode various types + disputer interact with the window post disputer + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus chain head +``` +NAME: + lotus chain head - Print chain head + +USAGE: + lotus chain head [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus chain getblock +``` +NAME: + lotus chain getblock - Get a block and print its details + +USAGE: + lotus chain getblock [command options] [blockCid] + +OPTIONS: + --raw print just the raw block header (default: false) + --help, -h show help (default: false) + +``` + +### lotus chain read-obj +``` +NAME: + lotus chain read-obj - Read the raw bytes of an object + +USAGE: + lotus chain read-obj [command options] [objectCid] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus chain delete-obj +``` +NAME: + lotus chain delete-obj - Delete an object from the chain blockstore + +USAGE: + lotus chain delete-obj [command options] [objectCid] + +DESCRIPTION: + WARNING: Removing wrong objects from the chain blockstore may lead to sync issues + +OPTIONS: + --really-do-it (default: false) + --help, -h show help (default: false) + +``` + +### lotus chain stat-obj +``` +NAME: + lotus chain stat-obj - Collect size and ipld link counts for objs + +USAGE: + lotus chain stat-obj [command options] [cid] + +DESCRIPTION: + Collect object size and ipld link count for an object. + + When a base is provided it will be walked first, and all links visisted + will be ignored when the passed in object is walked. + + +OPTIONS: + --base value ignore links found in this obj + --help, -h show help (default: false) + +``` + +### lotus chain getmessage +``` +NAME: + lotus chain getmessage - Get and print a message by its cid + +USAGE: + lotus chain getmessage [command options] [messageCid] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus chain sethead +``` +NAME: + lotus chain sethead - manually set the local nodes head tipset (Caution: normally only used for recovery) + +USAGE: + lotus chain sethead [command options] [tipsetkey] + +OPTIONS: + --genesis reset head to genesis (default: false) + --epoch value reset head to given epoch (default: 0) + --help, -h show help (default: false) + +``` + +#### lotus chain list, love +``` +``` + +### lotus chain get +``` +NAME: + lotus chain get - Get chain DAG node by path + +USAGE: + lotus chain get [command options] [path] + +DESCRIPTION: + Get ipld node under a specified path: + + lotus chain get /ipfs/[cid]/some/path + + Path prefixes: + - /ipfs/[cid], /ipld/[cid] - traverse IPLD path + - /pstate - traverse from head.ParentStateRoot + + Note: + You can use special path elements to traverse through some data structures: + - /ipfs/[cid]/@H:elem - get 'elem' from hamt + - /ipfs/[cid]/@Hi:123 - get varint elem 123 from hamt + - /ipfs/[cid]/@Hu:123 - get uvarint elem 123 from hamt + - /ipfs/[cid]/@Ha:t01 - get element under Addr(t01).Bytes + - /ipfs/[cid]/@A:10 - get 10th amt element + - .../@Ha:t01/@state - get pretty map-based actor state + + List of --as-type types: + - raw + - block + - message + - smessage, signedmessage + - actor + - amt + - hamt-epoch + - hamt-address + - cronevent + - account-state + + +OPTIONS: + --as-type value specify type to interpret output as + --verbose (default: false) + --tipset value specify tipset for /pstate (pass comma separated array of cids) + --help, -h show help (default: false) + +``` + +### lotus chain bisect +``` +NAME: + lotus chain bisect - bisect chain for an event + +USAGE: + lotus chain bisect [command options] [minHeight maxHeight path shellCommand ] + +DESCRIPTION: + Bisect the chain state tree: + + lotus chain bisect [min height] [max height] '1/2/3/state/path' 'shell command' 'args' + + Returns the first tipset in which condition is true + v + [start] FFFFFFFTTT [end] + + Example: find height at which deal ID 100 000 appeared + - lotus chain bisect 1 32000 '@Ha:t03/1' jq -e '.[2] > 100000' + + For special path elements see 'chain get' help + + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus chain export +``` +NAME: + lotus chain export - export chain to a car file + +USAGE: + lotus chain export [command options] [outputPath] + +OPTIONS: + --tipset value specify tipset to start the export from (default: "@head") + --recent-stateroots value specify the number of recent state roots to include in the export (default: 0) + --skip-old-msgs (default: false) + --help, -h show help (default: false) + +``` + +### lotus chain slash-consensus +``` +NAME: + lotus chain slash-consensus - Report consensus fault + +USAGE: + lotus chain slash-consensus [command options] [blockCid1 blockCid2] + +OPTIONS: + --from value optionally specify the account to report consensus from + --extra value Extra block cid + --help, -h show help (default: false) + +``` + +### lotus chain gas-price +``` +NAME: + lotus chain gas-price - Estimate gas prices + +USAGE: + lotus chain gas-price [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus chain inspect-usage +``` +NAME: + lotus chain inspect-usage - Inspect block space usage of a given tipset + +USAGE: + lotus chain inspect-usage [command options] [arguments...] + +OPTIONS: + --tipset value specify tipset to view block space usage of (default: "@head") + --length value length of chain to inspect block space usage for (default: 1) + --num-results value number of results to print per category (default: 10) + --help, -h show help (default: false) + +``` + +### lotus chain decode +``` +NAME: + lotus chain decode - decode various types + +USAGE: + lotus chain decode command [command options] [arguments...] + +COMMANDS: + params Decode message params + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +#### lotus chain decode params +``` +NAME: + lotus chain decode params - Decode message params + +USAGE: + lotus chain decode params [command options] [toAddr method params] + +OPTIONS: + --tipset value + --encoding value specify input encoding to parse (default: "base64") + --help, -h show help (default: false) + +``` + +### lotus chain encode +``` +NAME: + lotus chain encode - encode various types + +USAGE: + lotus chain encode command [command options] [arguments...] + +COMMANDS: + params Encodes the given JSON params + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +#### lotus chain encode params +``` +NAME: + lotus chain encode params - Encodes the given JSON params + +USAGE: + lotus chain encode params [command options] [toAddr method params] + +OPTIONS: + --tipset value + --encoding value specify input encoding to parse (default: "base64") + --help, -h show help (default: false) + +``` + +### lotus chain disputer +``` +NAME: + lotus chain disputer - interact with the window post disputer + +USAGE: + lotus chain disputer command [command options] [arguments...] + +COMMANDS: + start Start the window post disputer + dispute Send a specific DisputeWindowedPoSt message + help, h Shows a list of commands or help for one command + +OPTIONS: + --max-fee value Spend up to X FIL per DisputeWindowedPoSt message + --from value optionally specify the account to send messages from + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +#### lotus chain disputer start +``` +NAME: + lotus chain disputer start - Start the window post disputer + +USAGE: + lotus chain disputer start [command options] [minerAddress] + +OPTIONS: + --start-epoch value only start disputing PoSts after this epoch (default: 0) + --help, -h show help (default: false) + +``` + +#### lotus chain disputer dispute +``` +NAME: + lotus chain disputer dispute - Send a specific DisputeWindowedPoSt message + +USAGE: + lotus chain disputer dispute [command options] [minerAddress index postIndex] + +OPTIONS: + --help, -h show help (default: false) + +``` + +## lotus log +``` +NAME: + lotus log - Manage logging + +USAGE: + lotus log command [command options] [arguments...] + +COMMANDS: + list List log systems + set-level Set log level + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus log list +``` +NAME: + lotus log list - List log systems + +USAGE: + lotus log list [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus log set-level +``` +NAME: + lotus log set-level - Set log level + +USAGE: + lotus log set-level [command options] [level] + +DESCRIPTION: + Set the log level for logging systems: + + The system flag can be specified multiple times. + + eg) log set-level --system chain --system chainxchg debug + + Available Levels: + debug + info + warn + error + + Environment Variables: + GOLOG_LOG_LEVEL - Default log level for all log systems + GOLOG_LOG_FMT - Change output log format (json, nocolor) + GOLOG_FILE - Write logs to file + GOLOG_OUTPUT - Specify whether to output to file, stderr, stdout or a combination, i.e. file+stderr + + +OPTIONS: + --system value limit to log system + --help, -h show help (default: false) + +``` + +## lotus wait-api +``` +NAME: + lotus wait-api - Wait for lotus api to come online + +USAGE: + lotus wait-api [command options] [arguments...] + +CATEGORY: + DEVELOPER + +OPTIONS: + --help, -h show help (default: false) + +``` + +## lotus fetch-params +``` +NAME: + lotus fetch-params - Fetch proving parameters + +USAGE: + lotus fetch-params [command options] [sectorSize] + +CATEGORY: + DEVELOPER + +OPTIONS: + --help, -h show help (default: false) + +``` + +## lotus net +``` +NAME: + lotus net - Manage P2P Network + +USAGE: + lotus net command [command options] [arguments...] + +COMMANDS: + peers Print peers + connect Connect to a peer + listen List listen addresses + id Get node identity + findpeer Find the addresses of a given peerID + scores Print peers' pubsub scores + reachability Print information about reachability from the internet + bandwidth Print bandwidth usage information + block Manage network connection gating rules + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus net peers +``` +NAME: + lotus net peers - Print peers + +USAGE: + lotus net peers [command options] [arguments...] + +OPTIONS: + --agent, -a Print agent name (default: false) + --extended, -x Print extended peer information in json (default: false) + --help, -h show help (default: false) + +``` + +### lotus net connect +``` +NAME: + lotus net connect - Connect to a peer + +USAGE: + lotus net connect [command options] [peerMultiaddr|minerActorAddress] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus net listen +``` +NAME: + lotus net listen - List listen addresses + +USAGE: + lotus net listen [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus net id +``` +NAME: + lotus net id - Get node identity + +USAGE: + lotus net id [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus net findpeer +``` +NAME: + lotus net findpeer - Find the addresses of a given peerID + +USAGE: + lotus net findpeer [command options] [peerId] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus net scores +``` +NAME: + lotus net scores - Print peers' pubsub scores + +USAGE: + lotus net scores [command options] [arguments...] + +OPTIONS: + --extended, -x print extended peer scores in json (default: false) + --help, -h show help (default: false) + +``` + +### lotus net reachability +``` +NAME: + lotus net reachability - Print information about reachability from the internet + +USAGE: + lotus net reachability [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus net bandwidth +``` +NAME: + lotus net bandwidth - Print bandwidth usage information + +USAGE: + lotus net bandwidth [command options] [arguments...] + +OPTIONS: + --by-peer list bandwidth usage by peer (default: false) + --by-protocol list bandwidth usage by protocol (default: false) + --help, -h show help (default: false) + +``` + +### lotus net block +``` +NAME: + lotus net block - Manage network connection gating rules + +USAGE: + lotus net block command [command options] [arguments...] + +COMMANDS: + add Add connection gating rules + remove Remove connection gating rules + list list connection gating rules + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +#### lotus net block add +``` +NAME: + lotus net block add - Add connection gating rules + +USAGE: + lotus net block add command [command options] [arguments...] + +COMMANDS: + peer Block a peer + ip Block an IP address + subnet Block an IP subnet + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +##### lotus net block add peer +``` +NAME: + lotus net block add peer - Block a peer + +USAGE: + lotus net block add peer [command options] ... + +OPTIONS: + --help, -h show help (default: false) + +``` + +##### lotus net block add ip +``` +NAME: + lotus net block add ip - Block an IP address + +USAGE: + lotus net block add ip [command options] ... + +OPTIONS: + --help, -h show help (default: false) + +``` + +##### lotus net block add subnet +``` +NAME: + lotus net block add subnet - Block an IP subnet + +USAGE: + lotus net block add subnet [command options] ... + +OPTIONS: + --help, -h show help (default: false) + +``` + +#### lotus net block remove +``` +NAME: + lotus net block remove - Remove connection gating rules + +USAGE: + lotus net block remove command [command options] [arguments...] + +COMMANDS: + peer Unblock a peer + ip Unblock an IP address + subnet Unblock an IP subnet + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +##### lotus net block remove peer +``` +NAME: + lotus net block remove peer - Unblock a peer + +USAGE: + lotus net block remove peer [command options] ... + +OPTIONS: + --help, -h show help (default: false) + +``` + +##### lotus net block remove ip +``` +NAME: + lotus net block remove ip - Unblock an IP address + +USAGE: + lotus net block remove ip [command options] ... + +OPTIONS: + --help, -h show help (default: false) + +``` + +##### lotus net block remove subnet +``` +NAME: + lotus net block remove subnet - Unblock an IP subnet + +USAGE: + lotus net block remove subnet [command options] ... + +OPTIONS: + --help, -h show help (default: false) + +``` + +#### lotus net block list +``` +NAME: + lotus net block list - list connection gating rules + +USAGE: + lotus net block list [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +## lotus sync +``` +NAME: + lotus sync - Inspect or interact with the chain syncer + +USAGE: + lotus sync command [command options] [arguments...] + +COMMANDS: + status check sync status + wait Wait for sync to be complete + mark-bad Mark the given block as bad, will prevent syncing to a chain that contains it + unmark-bad Unmark the given block as bad, makes it possible to sync to a chain containing it + check-bad check if the given block was marked bad, and for what reason + checkpoint mark a certain tipset as checkpointed; the node will never fork away from this tipset + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus sync status +``` +NAME: + lotus sync status - check sync status + +USAGE: + lotus sync status [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus sync wait +``` +NAME: + lotus sync wait - Wait for sync to be complete + +USAGE: + lotus sync wait [command options] [arguments...] + +OPTIONS: + --watch don't exit after node is synced (default: false) + --help, -h show help (default: false) + +``` + +### lotus sync mark-bad +``` +NAME: + lotus sync mark-bad - Mark the given block as bad, will prevent syncing to a chain that contains it + +USAGE: + lotus sync mark-bad [command options] [blockCid] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus sync unmark-bad +``` +NAME: + lotus sync unmark-bad - Unmark the given block as bad, makes it possible to sync to a chain containing it + +USAGE: + lotus sync unmark-bad [command options] [blockCid] + +OPTIONS: + --all drop the entire bad block cache (default: false) + --help, -h show help (default: false) + +``` + +### lotus sync check-bad +``` +NAME: + lotus sync check-bad - check if the given block was marked bad, and for what reason + +USAGE: + lotus sync check-bad [command options] [blockCid] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus sync checkpoint +``` +NAME: + lotus sync checkpoint - mark a certain tipset as checkpointed; the node will never fork away from this tipset + +USAGE: + lotus sync checkpoint [command options] [tipsetKey] + +OPTIONS: + --epoch value checkpoint the tipset at the given epoch (default: 0) + --help, -h show help (default: false) + +``` + +## lotus status +``` +NAME: + lotus status - Check node status + +USAGE: + lotus status [command options] [arguments...] + +CATEGORY: + STATUS + +OPTIONS: + --chain include chain health status (default: false) + --help, -h show help (default: false) + +``` diff --git a/documentation/en/jaeger-tracing.md b/documentation/en/jaeger-tracing.md index bbe4d30523a..ec9351d5322 100644 --- a/documentation/en/jaeger-tracing.md +++ b/documentation/en/jaeger-tracing.md @@ -12,7 +12,20 @@ Currently it is set up to use Jaeger, though other tracing backends should be fa To easily run and view tracing locally, first, install jaeger. The easiest way to do this is to [download the binaries](https://www.jaegertracing.io/download/) and then run the `jaeger-all-in-one` binary. This will start up jaeger, listen for spans on `localhost:6831`, and expose a web UI for viewing traces on `http://localhost:16686/`. -Now, to start sending traces from Lotus to Jaeger, set the environment variable `LOTUS_JAEGER` to `localhost:6831`, and start the `lotus daemon`. +Now, to start sending traces from Lotus to Jaeger, set the environment variable and start the daemon. + +```bash +export LOTUS_JAEGER_AGENT_ENDPOINT=127.0.0.1:6831 +lotus daemon +``` + +Alternatively, the agent endpoint can also be configured by a pair of environemnt variables to provide the host and port. The following snipit is functionally equivilent to the previous. + +```bash +export LOTUS_JAEGER_AGENT_HOST=127.0.0.1 +export LOTUS_JAEGER_AGENT_PORT=6831 +lotus daemon +``` Now, to view any generated traces, open up `http://localhost:16686/` in your browser. diff --git a/documentation/misc/RELEASE_ISSUE_TEMPLATE.md b/documentation/misc/RELEASE_ISSUE_TEMPLATE.md new file mode 100644 index 00000000000..8adab967106 --- /dev/null +++ b/documentation/misc/RELEASE_ISSUE_TEMPLATE.md @@ -0,0 +1,115 @@ +> Release Issue Template + +# Lotus X.Y.Z Release + +We're happy to announce Lotus X.Y.Z... + +## 🗺 Must-dos for the release + +## 🌟 Nice-to-haves for the release + + + +## 🚢 Estimated shipping date + + + +## 🔦 Highlights + +< top highlights for this release notes > + +## ✅ Release Checklist + +**Note for whomever is owning the release:** please capture notes as comments in this issue for anything you noticed that could be improved for future releases. There is a *Post Release* step below for incorporating changes back into the [RELEASE_ISSUE_TEMPLATE](https://github.com/filecoin-project/lotus/blob/master/documentation/misc/RELEASE_ISSUE_TEMPLATE.md), and this is easier done by collecting notes from along the way rather than just thinking about it at the end. + +First steps: + + - [ ] Fork a new branch (`release/vX.Y.Z`) from `master` and make any further release related changes to this branch. If any "non-trivial" changes get added to the release, uncheck all the checkboxes and return to this stage. + - [ ] Bump the version in `version.go` in the `master` branch to `vX.(Y+1).0-dev`. + +Prepping an RC: + +- [ ] version string in `build/version.go` has been updated (in the `release/vX.Y.Z` branch). +- [ ] tag commit with `vX.Y.Z-rcN` +- [ ] cut a pre-release [here](https://github.com/filecoin-project/lotus/releases/new?prerelease=true) + +Testing an RC: + +- [ ] **Stage 0 - Automated Testing** + - Automated Testing + - [ ] CI: Ensure that all tests are passing. + - [ ] Testground tests + +- [ ] **Stage 1 - Internal Testing** + - Binaries + - [ ] Ensure the RC release has downloadable binaries + - Upgrade our testnet infra + - [ ] Wait 24 hours, confirm nodes stay in sync + - Upgrade our mainnet infra + - [ ] Subset of development full archival nodes + - [ ] Subset of bootstrappers (1 per region) + - [ ] Confirm nodes stay in sync + - Metrics report + - Block validation time + - Memory / CPU usage + - Number of goroutines + - IPLD block read latency + - Bandwidth usage + - [ ] If anything has worsened significantly, investigate + fix + - Confirm the following work (some combination of Testground / Calibnet / Mainnet / beta users) + - [ ] Seal a sector + - [ ] make a deal + - [ ] Submit a PoSt + - [ ] (optional) let a sector go faulty, and see it be recovered + +- [ ] **Stage 2 - Community Testing** + - [ ] Inform beta miners (@lotus-early-testers-miner in Filecoin Slack #fil-lotus) + - [ ] Ask close ecosystem partners to test their projects (@lotus-early-testers-eco-dev in Filecoin slack #fil-lotus) + - [ ] Powergate + - [ ] Glif + - [ ] Zondax + - [ ] Stats dashboard + - [ ] Community dashboards + - [ ] Infura + - [ ] Sentinel + - [ ] Protofire + - [ ] Fleek + +- [ ] **Stage 3 - Community Prod Testing** + - [ ] Documentation + - [ ] Ensure that [CHANGELOG.md](https://github.com/filecoin-project/lotus/blob/master/CHANGELOG.md) is up to date + - [ ] Check if any [config](https://docs.filecoin.io/get-started/lotus/configuration-and-advanced-usage/#configuration) updates are needed + - [ ] Invite the wider community through (link to the release issue): + - [ ] Check `Create a discussion for this release` when tagging for the major rcs(new features, hot-fixes) release + - [ ] Link the disucssion in #fil-lotus on Filecoin slack + +- [ ] **Stage 4 - Release** + - [ ] Final preparation + - [ ] Verify that version string in [`version.go`](https://github.com/ipfs/go-ipfs/tree/master/version.go) has been updated. + - [ ] Ensure that [CHANGELOG.md](https://github.com/filecoin-project/lotus/blob/master/CHANGELOG.md) is up to date + - [ ] Prep the changelog using `scripts/mkreleaselog`, and add it to `CHANGELOG.md` + - [ ] Merge `release-vX.Y.Z` into the `releases` branch. + - [ ] Tag this merge commit (on the `releases` branch) with `vX.Y.Z` + - [ ] Cut the release [here](https://github.com/filecoin-project/lotus/releases/new?prerelease=true&target=releases). + - [ ] Check `Create a discussion for this release` when tagging the release + - [ ] Final announcements + - [ ] Update network.filecoin.io for mainnet, calib and nerpa. + - [ ] repost in #fil-lotus in filecoin slack + - [ ] Inform node provides (Protofire, Digital Ocean..) + +- [ ] **Post-Release** + - [ ] Merge the `releases` branch back into `master`, ignoring the changes to `version.go` (keep the `-dev` version from master). Do NOT delete the `releases` branch when doing so! + - [ ] Update [RELEASE_ISSUE_TEMPLATE.md](https://github.com/filecoin-project/lotus/blob/master/documentation/misc/RELEASE_ISSUE_TEMPLATE.md) with any improvements determined from this latest release iteration. + - [ ] Create an issue using [RELEASE_ISSUE_TEMPLATE.md](https://github.com/filecoin-project/lotus/blob/master/documentation/misc/RELEASE_ISSUE_TEMPLATE.md) for the _next_ release. + +## ❤️ Contributors + +< list generated by scripts/mkreleaselog > + +Would you like to contribute to Lotus and don't know how? Well, there are a few places you can get started: + +- TODO + +## ⁉️ Do you have questions? + +Leave a comment [here]() if you have any questions. diff --git a/documentation/misc/actors_version_checklist.md b/documentation/misc/actors_version_checklist.md new file mode 100644 index 00000000000..1fae4bd8aa5 --- /dev/null +++ b/documentation/misc/actors_version_checklist.md @@ -0,0 +1,19 @@ +### Actor version integration checklist + +- [ ] Import new actors +- [ ] Define upgrade heights in `build/params_` +- [ ] Generate adapters + - [ ] Add the new version in `chain/actors/agen/main.go` + - [ ] Update adapter code in `chain/actors/builtin` if needed +- [ ] Update `chain/actors/policy/policy.go` +- [ ] Update `chain/actors/version.go` +- [ ] Register in `chain/vm/invoker.go` +- [ ] Register in `chain/vm/mkactor.go` +- [ ] Update `chain/types/state.go` +- [ ] Update `chain/state/statetree.go` (New / Load) +- [ ] Update `chain/stmgr/forks.go` + - [ ] Schedule + - [ ] Migration +- [ ] Update upgrade schedule in `api/test/test.go` and `chain/sync_test.go` +- [ ] Update `NewestNetworkVersion` in `build/params_shared_vals.go` +- [ ] Register in init in `chain/stmgr/utils.go` diff --git a/documentation/misc/gas_balancing.md b/documentation/misc/gas_balancing.md new file mode 100644 index 00000000000..64d9fcf0e4b --- /dev/null +++ b/documentation/misc/gas_balancing.md @@ -0,0 +1,54 @@ +## Gas Balancing + +The gas balancing process targets to set gas costs of syscalls to be in line with +10 gas per nanosecond on reference hardware. +The process can be either performed for all syscalls based on existing messages and chain or targeted +at single syscall. + +#### Reference hardware + +The reference hardware is TR3970x with 128GB of RAM. This is what was available at the time and +may be subject to change. + +### Complete gas balancing + +Complete gas balancing is performed using `lotus-bench` the process is based on importing a chain export +and collecting gas traces which are later aggregated. + +Before building `lotus-bench` make sure `EnableGasTracing` in `chain/vm/runtime.go` is set to `true`. + +The process can be started using `./lotus-bench import` with `--car` flag set to the location of +CAR chain export. `--start-epoch` and `--end-epoch` can be used to to limit the range of epochs to run +the benchmark. Note that state tree of `start-epoch` needs to be in the CAR file or has to be previously computed +to work. + +The output will be a `bench.json` file containing information about every syscall invoked +and the time taken by these invocations. This file can grow to be quite big in size so make sure you have +spare space. + +After the bench run is complete the `bench.json` file can be analyzed with `./lotus-bench import analyze bench.json`. + +It will compute means, standard deviations and co-variances (when applicable) of syscall runtimes. +The output is in nanoseconds, so the gas values for syscalls should be 10x that. In cases where co-variance of +execution time to some parameter is evaluated, the strength of the correlation should be taken into account. + +#### Special cases + +OnImplPut compute gas is based on the flush time to disk of objects created, +during block execution (when gas traces are formed) objects are only written to memory. Use `vm/flush_copy_ms` and `vm/flush_copy_count` to estimate OnIpldPut compute cost. + + +### Targeted gas balancing + +In some cases complete gas balancing is infeasible, either new syscall gets introduced or +complete balancing is too time consuming. + +In these cases the recommended way to estimate gas for given syscall is to perform an `in-vivo` benchmark. +In the past `in-vitro` as in standalone benchmarks were found to be highly inaccurate when compared to results +of real execution. + +A in-vivo benchmark can be performed by running an example of such syscall during block execution. +The best place to hook-in such benchmark is message execution loop in +`chain/stmgr/stmgr.go` in `ApplyBlocks()`. Depending of time required to complete the syscall it might be +advisable to run the execution only once every few messages. + diff --git a/extern/fil-blst b/extern/fil-blst deleted file mode 160000 index 5f93488fc0d..00000000000 --- a/extern/fil-blst +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 5f93488fc0dbfb450f2355269f18fc67010d59bb diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi index 0226d0be6f0..d60fc680aa8 160000 --- a/extern/filecoin-ffi +++ b/extern/filecoin-ffi @@ -1 +1 @@ -Subproject commit 0226d0be6f0ec441e687512cd833040414437351 +Subproject commit d60fc680aa8abeafba698f738fed5b94c9bda33d diff --git a/extern/oni b/extern/oni deleted file mode 160000 index 10ed9ef5768..00000000000 --- a/extern/oni +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 10ed9ef576836186de3b8513c03cdc3fb18c44ed diff --git a/extern/sector-storage/cbor_gen.go b/extern/sector-storage/cbor_gen.go index 0db97f2c9f4..ed06b920e6e 100644 --- a/extern/sector-storage/cbor_gen.go +++ b/extern/sector-storage/cbor_gen.go @@ -5,13 +5,17 @@ package sectorstorage import ( "fmt" "io" + "sort" sealtasks "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" + cid "github.com/ipfs/go-cid" cbg "github.com/whyrusleeping/cbor-gen" xerrors "golang.org/x/xerrors" ) var _ = xerrors.Errorf +var _ = cid.Undef +var _ = sort.Sort func (t *Call) MarshalCBOR(w io.Writer) error { if t == nil { @@ -188,7 +192,8 @@ func (t *Call) UnmarshalCBOR(r io.Reader) error { } default: - return fmt.Errorf("unknown struct field %d: '%s'", i, name) + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) } } @@ -199,7 +204,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{164}); err != nil { + if _, err := w.Write([]byte{166}); err != nil { return err } @@ -282,6 +287,51 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error { if _, err := io.WriteString(w, string(t.WorkError)); err != nil { return err } + + // t.WorkerHostname (string) (string) + if len("WorkerHostname") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"WorkerHostname\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("WorkerHostname"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("WorkerHostname")); err != nil { + return err + } + + if len(t.WorkerHostname) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.WorkerHostname was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.WorkerHostname))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.WorkerHostname)); err != nil { + return err + } + + // t.StartTime (int64) (int64) + if len("StartTime") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"StartTime\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("StartTime"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("StartTime")); err != nil { + return err + } + + if t.StartTime >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.StartTime)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.StartTime-1)); err != nil { + return err + } + } return nil } @@ -360,9 +410,47 @@ func (t *WorkState) UnmarshalCBOR(r io.Reader) error { t.WorkError = string(sval) } + // t.WorkerHostname (string) (string) + case "WorkerHostname": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.WorkerHostname = string(sval) + } + // t.StartTime (int64) (int64) + case "StartTime": + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.StartTime = int64(extraI) + } default: - return fmt.Errorf("unknown struct field %d: '%s'", i, name) + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) } } @@ -484,7 +572,8 @@ func (t *WorkID) UnmarshalCBOR(r io.Reader) error { } default: - return fmt.Errorf("unknown struct field %d: '%s'", i, name) + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) } } diff --git a/extern/sector-storage/faults.go b/extern/sector-storage/faults.go index c4e1364adcc..fdd5f6b7d6c 100644 --- a/extern/sector-storage/faults.go +++ b/extern/sector-storage/faults.go @@ -2,25 +2,29 @@ package sectorstorage import ( "context" + "crypto/rand" "fmt" "os" "path/filepath" "golang.org/x/xerrors" + ffi "github.com/filecoin-project/filecoin-ffi" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-actors/actors/runtime/proof" + "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) // FaultTracker TODO: Track things more actively type FaultTracker interface { - CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []abi.SectorID) ([]abi.SectorID, error) + CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, rg storiface.RGetter) (map[abi.SectorID]string, error) } // CheckProvable returns unprovable sectors -func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []abi.SectorID) ([]abi.SectorID, error) { - var bad []abi.SectorID +func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, rg storiface.RGetter) (map[abi.SectorID]string, error) { + var bad = make(map[abi.SectorID]string) ssize, err := pp.SectorSize() if err != nil { @@ -33,27 +37,27 @@ func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, ctx, cancel := context.WithCancel(ctx) defer cancel() - locked, err := m.index.StorageTryLock(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTNone) + locked, err := m.index.StorageTryLock(ctx, sector.ID, storiface.FTSealed|storiface.FTCache, storiface.FTNone) if err != nil { return xerrors.Errorf("acquiring sector lock: %w", err) } if !locked { - log.Warnw("CheckProvable Sector FAULT: can't acquire read lock", "sector", sector, "sealed") - bad = append(bad, sector) + log.Warnw("CheckProvable Sector FAULT: can't acquire read lock", "sector", sector) + bad[sector.ID] = fmt.Sprint("can't acquire read lock") return nil } - lp, _, err := m.localStore.AcquireSector(ctx, sector, ssize, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) + lp, _, err := m.localStore.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) if err != nil { log.Warnw("CheckProvable Sector FAULT: acquire sector in checkProvable", "sector", sector, "error", err) - bad = append(bad, sector) + bad[sector.ID] = fmt.Sprintf("acquire sector failed: %s", err) return nil } if lp.Sealed == "" || lp.Cache == "" { - log.Warnw("CheckProvable Sector FAULT: cache an/or sealed paths not found", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache) - bad = append(bad, sector) + log.Warnw("CheckProvable Sector FAULT: cache and/or sealed paths not found", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache) + bad[sector.ID] = fmt.Sprintf("cache and/or sealed paths not found, cache %q, sealed %q", lp.Cache, lp.Sealed) return nil } @@ -69,19 +73,62 @@ func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, st, err := os.Stat(p) if err != nil { log.Warnw("CheckProvable Sector FAULT: sector file stat error", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "file", p, "err", err) - bad = append(bad, sector) + bad[sector.ID] = fmt.Sprintf("%s", err) return nil } if sz != 0 { if st.Size() != int64(ssize)*sz { log.Warnw("CheckProvable Sector FAULT: sector file is wrong size", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "file", p, "size", st.Size(), "expectSize", int64(ssize)*sz) - bad = append(bad, sector) + bad[sector.ID] = fmt.Sprintf("%s is wrong size (got %d, expect %d)", p, st.Size(), int64(ssize)*sz) return nil } } } + if rg != nil { + wpp, err := sector.ProofType.RegisteredWindowPoStProof() + if err != nil { + return err + } + + var pr abi.PoStRandomness = make([]byte, abi.RandomnessLength) + _, _ = rand.Read(pr) + pr[31] &= 0x3f + + ch, err := ffi.GeneratePoStFallbackSectorChallenges(wpp, sector.ID.Miner, pr, []abi.SectorNumber{ + sector.ID.Number, + }) + if err != nil { + log.Warnw("CheckProvable Sector FAULT: generating challenges", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "err", err) + bad[sector.ID] = fmt.Sprintf("generating fallback challenges: %s", err) + return nil + } + + commr, err := rg(ctx, sector.ID) + if err != nil { + log.Warnw("CheckProvable Sector FAULT: getting commR", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "err", err) + bad[sector.ID] = fmt.Sprintf("getting commR: %s", err) + return nil + } + + _, err = ffi.GenerateSingleVanillaProof(ffi.PrivateSectorInfo{ + SectorInfo: proof.SectorInfo{ + SealProof: sector.ProofType, + SectorNumber: sector.ID.Number, + SealedCID: commr, + }, + CacheDirPath: lp.Cache, + PoStProofType: wpp, + SealedSectorPath: lp.Sealed, + }, ch.Challenges[sector.ID.Number]) + if err != nil { + log.Warnw("CheckProvable Sector FAULT: generating vanilla proof", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "err", err) + bad[sector.ID] = fmt.Sprintf("generating vanilla proof: %s", err) + return nil + } + } + return nil }() if err != nil { diff --git a/extern/sector-storage/ffiwrapper/basicfs/fs.go b/extern/sector-storage/ffiwrapper/basicfs/fs.go index 7ae303d9c2f..a833f728cd0 100644 --- a/extern/sector-storage/ffiwrapper/basicfs/fs.go +++ b/extern/sector-storage/ffiwrapper/basicfs/fs.go @@ -7,6 +7,7 @@ import ( "sync" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) @@ -23,7 +24,7 @@ type Provider struct { waitSector map[sectorFile]chan struct{} } -func (b *Provider) AcquireSector(ctx context.Context, id abi.SectorID, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) { +func (b *Provider) AcquireSector(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) { if err := os.Mkdir(filepath.Join(b.Root, storiface.FTUnsealed.String()), 0755); err != nil && !os.IsExist(err) { // nolint return storiface.SectorPaths{}, nil, err } @@ -37,7 +38,7 @@ func (b *Provider) AcquireSector(ctx context.Context, id abi.SectorID, existing done := func() {} out := storiface.SectorPaths{ - ID: id, + ID: id.ID, } for _, fileType := range storiface.PathTypes { @@ -49,10 +50,10 @@ func (b *Provider) AcquireSector(ctx context.Context, id abi.SectorID, existing if b.waitSector == nil { b.waitSector = map[sectorFile]chan struct{}{} } - ch, found := b.waitSector[sectorFile{id, fileType}] + ch, found := b.waitSector[sectorFile{id.ID, fileType}] if !found { ch = make(chan struct{}, 1) - b.waitSector[sectorFile{id, fileType}] = ch + b.waitSector[sectorFile{id.ID, fileType}] = ch } b.lk.Unlock() @@ -63,7 +64,7 @@ func (b *Provider) AcquireSector(ctx context.Context, id abi.SectorID, existing return storiface.SectorPaths{}, nil, ctx.Err() } - path := filepath.Join(b.Root, fileType.String(), storiface.SectorName(id)) + path := filepath.Join(b.Root, fileType.String(), storiface.SectorName(id.ID)) prevDone := done done = func() { diff --git a/extern/sector-storage/ffiwrapper/config.go b/extern/sector-storage/ffiwrapper/config.go deleted file mode 100644 index ca32b119186..00000000000 --- a/extern/sector-storage/ffiwrapper/config.go +++ /dev/null @@ -1,34 +0,0 @@ -package ffiwrapper - -import ( - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-state-types/abi" -) - -type Config struct { - SealProofType abi.RegisteredSealProof - - _ struct{} // guard against nameless init -} - -func sizeFromConfig(cfg Config) (abi.SectorSize, error) { - return cfg.SealProofType.SectorSize() -} - -func SealProofTypeFromSectorSize(ssize abi.SectorSize) (abi.RegisteredSealProof, error) { - switch ssize { - case 2 << 10: - return abi.RegisteredSealProof_StackedDrg2KiBV1, nil - case 8 << 20: - return abi.RegisteredSealProof_StackedDrg8MiBV1, nil - case 512 << 20: - return abi.RegisteredSealProof_StackedDrg512MiBV1, nil - case 32 << 30: - return abi.RegisteredSealProof_StackedDrg32GiBV1, nil - case 64 << 30: - return abi.RegisteredSealProof_StackedDrg64GiBV1, nil - default: - return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize) - } -} diff --git a/extern/sector-storage/ffiwrapper/files.go b/extern/sector-storage/ffiwrapper/files.go deleted file mode 100644 index a13776d2dab..00000000000 --- a/extern/sector-storage/ffiwrapper/files.go +++ /dev/null @@ -1,53 +0,0 @@ -package ffiwrapper - -import ( - "io" - "os" - "sync" - - "golang.org/x/xerrors" -) - -func ToReadableFile(r io.Reader, n int64) (*os.File, func() error, error) { - f, ok := r.(*os.File) - if ok { - return f, func() error { return nil }, nil - } - - var w *os.File - - f, w, err := os.Pipe() - if err != nil { - return nil, nil, err - } - - var wait sync.Mutex - var werr error - - wait.Lock() - go func() { - defer wait.Unlock() - - var copied int64 - copied, werr = io.CopyN(w, r, n) - if werr != nil { - log.Warnf("toReadableFile: copy error: %+v", werr) - } - - err := w.Close() - if werr == nil && err != nil { - werr = err - log.Warnf("toReadableFile: close error: %+v", err) - return - } - if copied != n { - log.Warnf("copied different amount than expected: %d != %d", copied, n) - werr = xerrors.Errorf("copied different amount than expected: %d != %d", copied, n) - } - }() - - return f, func() error { - wait.Lock() - return werr - }, nil -} diff --git a/extern/sector-storage/ffiwrapper/prover_cgo.go b/extern/sector-storage/ffiwrapper/prover_cgo.go new file mode 100644 index 00000000000..3ad73c81c93 --- /dev/null +++ b/extern/sector-storage/ffiwrapper/prover_cgo.go @@ -0,0 +1,18 @@ +//+build cgo + +package ffiwrapper + +import ( + ffi "github.com/filecoin-project/filecoin-ffi" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" +) + +var ProofProver = proofProver{} + +var _ Prover = ProofProver + +type proofProver struct{} + +func (v proofProver) AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) { + return ffi.AggregateSealProofs(aggregateInfo, proofs) +} diff --git a/extern/sector-storage/ffiwrapper/sealer.go b/extern/sector-storage/ffiwrapper/sealer.go index c1b558d9a30..39cb8fa1b37 100644 --- a/extern/sector-storage/ffiwrapper/sealer.go +++ b/extern/sector-storage/ffiwrapper/sealer.go @@ -1,16 +1,12 @@ package ffiwrapper import ( - "github.com/filecoin-project/go-state-types/abi" logging "github.com/ipfs/go-log/v2" ) var log = logging.Logger("ffiwrapper") type Sealer struct { - sealProofType abi.RegisteredSealProof - ssize abi.SectorSize // a function of sealProofType and postProofType - sectors SectorProvider stopping chan struct{} } @@ -18,11 +14,3 @@ type Sealer struct { func (sb *Sealer) Stop() { close(sb.stopping) } - -func (sb *Sealer) SectorSize() abi.SectorSize { - return sb.ssize -} - -func (sb *Sealer) SealProofType() abi.RegisteredSealProof { - return sb.sealProofType -} diff --git a/extern/sector-storage/ffiwrapper/sealer_cgo.go b/extern/sector-storage/ffiwrapper/sealer_cgo.go index b48b0bfd5c4..820c53c4b82 100644 --- a/extern/sector-storage/ffiwrapper/sealer_cgo.go +++ b/extern/sector-storage/ffiwrapper/sealer_cgo.go @@ -20,23 +20,17 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/specs-storage/storage" + commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper" + "github.com/filecoin-project/go-commp-utils/zerocomm" "github.com/filecoin-project/lotus/extern/sector-storage/fr32" + "github.com/filecoin-project/lotus/extern/sector-storage/partialfile" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" - "github.com/filecoin-project/lotus/extern/sector-storage/zerocomm" ) var _ Storage = &Sealer{} -func New(sectors SectorProvider, cfg *Config) (*Sealer, error) { - sectorSize, err := sizeFromConfig(*cfg) - if err != nil { - return nil, err - } - +func New(sectors SectorProvider) (*Sealer, error) { sb := &Sealer{ - sealProofType: cfg.SealProofType, - ssize: sectorSize, - sectors: sectors, stopping: make(chan struct{}), @@ -45,27 +39,35 @@ func New(sectors SectorProvider, cfg *Config) (*Sealer, error) { return sb, nil } -func (sb *Sealer) NewSector(ctx context.Context, sector abi.SectorID) error { +func (sb *Sealer) NewSector(ctx context.Context, sector storage.SectorRef) error { // TODO: Allocate the sector here instead of in addpiece return nil } -func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPieceSizes []abi.UnpaddedPieceSize, pieceSize abi.UnpaddedPieceSize, file storage.Data) (abi.PieceInfo, error) { +func (sb *Sealer) AddPiece(ctx context.Context, sector storage.SectorRef, existingPieceSizes []abi.UnpaddedPieceSize, pieceSize abi.UnpaddedPieceSize, file storage.Data) (abi.PieceInfo, error) { + // TODO: allow tuning those: + chunk := abi.PaddedPieceSize(4 << 20) + parallel := runtime.NumCPU() + var offset abi.UnpaddedPieceSize for _, size := range existingPieceSizes { offset += size } - maxPieceSize := abi.PaddedPieceSize(sb.ssize) + ssize, err := sector.ProofType.SectorSize() + if err != nil { + return abi.PieceInfo{}, err + } + + maxPieceSize := abi.PaddedPieceSize(ssize) if offset.Padded()+pieceSize.Padded() > maxPieceSize { return abi.PieceInfo{}, xerrors.Errorf("can't add %d byte piece to sector %v with %d bytes of existing pieces", pieceSize, sector, offset) } - var err error var done func() - var stagedFile *partialFile + var stagedFile *partialfile.PartialFile defer func() { if done != nil { @@ -86,7 +88,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err) } - stagedFile, err = createPartialFile(maxPieceSize, stagedPath.Unsealed) + stagedFile, err = partialfile.CreatePartialFile(maxPieceSize, stagedPath.Unsealed) if err != nil { return abi.PieceInfo{}, xerrors.Errorf("creating unsealed sector file: %w", err) } @@ -96,7 +98,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err) } - stagedFile, err = openPartialFile(maxPieceSize, stagedPath.Unsealed) + stagedFile, err = partialfile.OpenPartialFile(maxPieceSize, stagedPath.Unsealed) if err != nil { return abi.PieceInfo{}, xerrors.Errorf("opening unsealed sector file: %w", err) } @@ -111,10 +113,16 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie pr := io.TeeReader(io.LimitReader(file, int64(pieceSize)), pw) - chunk := abi.PaddedPieceSize(4 << 20) + throttle := make(chan []byte, parallel) + piecePromises := make([]func() (abi.PieceInfo, error), 0) buf := make([]byte, chunk.Unpadded()) - var pieceCids []abi.PieceInfo + for i := 0; i < parallel; i++ { + if abi.UnpaddedPieceSize(i)*chunk.Unpadded() >= pieceSize { + break // won't use this many buffers + } + throttle <- make([]byte, chunk.Unpadded()) + } for { var read int @@ -135,13 +143,39 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie break } - c, err := sb.pieceCid(buf[:read]) - if err != nil { - return abi.PieceInfo{}, xerrors.Errorf("pieceCid error: %w", err) - } - pieceCids = append(pieceCids, abi.PieceInfo{ - Size: abi.UnpaddedPieceSize(len(buf[:read])).Padded(), - PieceCID: c, + done := make(chan struct { + cid.Cid + error + }, 1) + pbuf := <-throttle + copy(pbuf, buf[:read]) + + go func(read int) { + defer func() { + throttle <- pbuf + }() + + c, err := sb.pieceCid(sector.ProofType, pbuf[:read]) + done <- struct { + cid.Cid + error + }{c, err} + }(read) + + piecePromises = append(piecePromises, func() (abi.PieceInfo, error) { + select { + case e := <-done: + if e.error != nil { + return abi.PieceInfo{}, e.error + } + + return abi.PieceInfo{ + Size: abi.UnpaddedPieceSize(len(buf[:read])).Padded(), + PieceCID: e.Cid, + }, nil + case <-ctx.Done(): + return abi.PieceInfo{}, ctx.Err() + } }) } @@ -158,11 +192,23 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie } stagedFile = nil - if len(pieceCids) == 1 { - return pieceCids[0], nil + if len(piecePromises) == 1 { + return piecePromises[0]() + } + + var payloadRoundedBytes abi.PaddedPieceSize + pieceCids := make([]abi.PieceInfo, len(piecePromises)) + for i, promise := range piecePromises { + pinfo, err := promise() + if err != nil { + return abi.PieceInfo{}, err + } + + pieceCids[i] = pinfo + payloadRoundedBytes += pinfo.Size } - pieceCID, err := ffi.GenerateUnsealedCID(sb.sealProofType, pieceCids) + pieceCID, err := ffi.GenerateUnsealedCID(sector.ProofType, pieceCids) if err != nil { return abi.PieceInfo{}, xerrors.Errorf("generate unsealed CID: %w", err) } @@ -172,19 +218,28 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPie return abi.PieceInfo{}, err } + if payloadRoundedBytes < pieceSize.Padded() { + paddedCid, err := commpffi.ZeroPadPieceCommitment(pieceCID, payloadRoundedBytes.Unpadded(), pieceSize) + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("failed to pad data: %w", err) + } + + pieceCID = paddedCid + } + return abi.PieceInfo{ Size: pieceSize.Padded(), PieceCID: pieceCID, }, nil } -func (sb *Sealer) pieceCid(in []byte) (cid.Cid, error) { - prf, werr, err := ToReadableFile(bytes.NewReader(in), int64(len(in))) +func (sb *Sealer) pieceCid(spt abi.RegisteredSealProof, in []byte) (cid.Cid, error) { + prf, werr, err := commpffi.ToReadableFile(bytes.NewReader(in), int64(len(in))) if err != nil { return cid.Undef, xerrors.Errorf("getting tee reader pipe: %w", err) } - pieceCID, err := ffi.GeneratePieceCIDFromFile(sb.sealProofType, prf, abi.UnpaddedPieceSize(len(in))) + pieceCID, err := ffi.GeneratePieceCIDFromFile(spt, prf, abi.UnpaddedPieceSize(len(in))) if err != nil { return cid.Undef, xerrors.Errorf("generating piece commitment: %w", err) } @@ -194,12 +249,16 @@ func (sb *Sealer) pieceCid(in []byte) (cid.Cid, error) { return pieceCID, werr() } -func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error { - maxPieceSize := abi.PaddedPieceSize(sb.ssize) +func (sb *Sealer) UnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error { + ssize, err := sector.ProofType.SectorSize() + if err != nil { + return err + } + maxPieceSize := abi.PaddedPieceSize(ssize) // try finding existing unsealedPath, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTNone, storiface.PathStorage) - var pf *partialFile + var pf *partialfile.PartialFile switch { case xerrors.Is(err, storiface.ErrSectorNotFound): @@ -209,7 +268,7 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s } defer done() - pf, err = createPartialFile(maxPieceSize, unsealedPath.Unsealed) + pf, err = partialfile.CreatePartialFile(maxPieceSize, unsealedPath.Unsealed) if err != nil { return xerrors.Errorf("create unsealed file: %w", err) } @@ -217,7 +276,7 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s case err == nil: defer done() - pf, err = openPartialFile(maxPieceSize, unsealedPath.Unsealed) + pf, err = partialfile.OpenPartialFile(maxPieceSize, unsealedPath.Unsealed) if err != nil { return xerrors.Errorf("opening partial file: %w", err) } @@ -317,12 +376,12 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s // // TODO: This may be possible to do in parallel - err = ffi.UnsealRange(sb.sealProofType, + err = ffi.UnsealRange(sector.ProofType, srcPaths.Cache, sealed, opw, - sector.Number, - sector.Miner, + sector.ID.Number, + sector.ID.Miner, randomness, commd, uint64(at.Unpadded()), @@ -356,16 +415,20 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset s return nil } -func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { +func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { path, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTNone, storiface.PathStorage) if err != nil { return false, xerrors.Errorf("acquire unsealed sector path: %w", err) } defer done() - maxPieceSize := abi.PaddedPieceSize(sb.ssize) + ssize, err := sector.ProofType.SectorSize() + if err != nil { + return false, err + } + maxPieceSize := abi.PaddedPieceSize(ssize) - pf, err := openPartialFile(maxPieceSize, path.Unsealed) + pf, err := partialfile.OpenPartialFile(maxPieceSize, path.Unsealed) if err != nil { if xerrors.Is(err, os.ErrNotExist) { return false, nil @@ -408,7 +471,7 @@ func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.Se return true, nil } -func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { +func (sb *Sealer) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTSealed|storiface.FTCache, storiface.PathSealing) if err != nil { return nil, xerrors.Errorf("acquiring sector paths: %w", err) @@ -443,29 +506,33 @@ func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke for _, piece := range pieces { sum += piece.Size.Unpadded() } - ussize := abi.PaddedPieceSize(sb.ssize).Unpadded() + ssize, err := sector.ProofType.SectorSize() + if err != nil { + return nil, err + } + ussize := abi.PaddedPieceSize(ssize).Unpadded() if sum != ussize { return nil, xerrors.Errorf("aggregated piece sizes don't match sector size: %d != %d (%d)", sum, ussize, int64(ussize-sum)) } // TODO: context cancellation respect p1o, err := ffi.SealPreCommitPhase1( - sb.sealProofType, + sector.ProofType, paths.Cache, paths.Unsealed, paths.Sealed, - sector.Number, - sector.Miner, + sector.ID.Number, + sector.ID.Miner, ticket, pieces, ) if err != nil { - return nil, xerrors.Errorf("presealing sector %d (%s): %w", sector.Number, paths.Unsealed, err) + return nil, xerrors.Errorf("presealing sector %d (%s): %w", sector.ID.Number, paths.Unsealed, err) } return p1o, nil } -func (sb *Sealer) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (storage.SectorCids, error) { +func (sb *Sealer) SealPreCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.PreCommit1Out) (storage.SectorCids, error) { paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, 0, storiface.PathSealing) if err != nil { return storage.SectorCids{}, xerrors.Errorf("acquiring sector paths: %w", err) @@ -474,7 +541,7 @@ func (sb *Sealer) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase sealedCID, unsealedCID, err := ffi.SealPreCommitPhase2(phase1Out, paths.Cache, paths.Sealed) if err != nil { - return storage.SectorCids{}, xerrors.Errorf("presealing sector %d (%s): %w", sector.Number, paths.Unsealed, err) + return storage.SectorCids{}, xerrors.Errorf("presealing sector %d (%s): %w", sector.ID.Number, paths.Unsealed, err) } return storage.SectorCids{ @@ -483,42 +550,47 @@ func (sb *Sealer) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase }, nil } -func (sb *Sealer) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) { +func (sb *Sealer) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) { paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, 0, storiface.PathSealing) if err != nil { return nil, xerrors.Errorf("acquire sector paths: %w", err) } defer done() output, err := ffi.SealCommitPhase1( - sb.sealProofType, + sector.ProofType, cids.Sealed, cids.Unsealed, paths.Cache, paths.Sealed, - sector.Number, - sector.Miner, + sector.ID.Number, + sector.ID.Miner, ticket, seed, pieces, ) if err != nil { log.Warn("StandaloneSealCommit error: ", err) - log.Warnf("num:%d tkt:%v seed:%v, pi:%v sealedCID:%v, unsealedCID:%v", sector.Number, ticket, seed, pieces, cids.Sealed, cids.Unsealed) + log.Warnf("num:%d tkt:%v seed:%v, pi:%v sealedCID:%v, unsealedCID:%v", sector.ID.Number, ticket, seed, pieces, cids.Sealed, cids.Unsealed) return nil, xerrors.Errorf("StandaloneSealCommit: %w", err) } return output, nil } -func (sb *Sealer) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.Commit1Out) (storage.Proof, error) { - return ffi.SealCommitPhase2(phase1Out, sector.Number, sector.Miner) +func (sb *Sealer) SealCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.Commit1Out) (storage.Proof, error) { + return ffi.SealCommitPhase2(phase1Out, sector.ID.Number, sector.ID.Miner) } -func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error { +func (sb *Sealer) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) error { + ssize, err := sector.ProofType.SectorSize() + if err != nil { + return err + } + maxPieceSize := abi.PaddedPieceSize(ssize) + if len(keepUnsealed) > 0 { - maxPieceSize := abi.PaddedPieceSize(sb.ssize) - sr := pieceRun(0, maxPieceSize) + sr := partialfile.PieceRun(0, maxPieceSize) for _, s := range keepUnsealed { si := &rlepluslazy.RunSliceIterator{} @@ -540,7 +612,7 @@ func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU } defer done() - pf, err := openPartialFile(maxPieceSize, paths.Unsealed) + pf, err := partialfile.OpenPartialFile(maxPieceSize, paths.Unsealed) if err == nil { var at uint64 for sr.HasNext() { @@ -580,10 +652,10 @@ func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU } defer done() - return ffi.ClearCache(uint64(sb.ssize), paths.Cache) + return ffi.ClearCache(uint64(ssize), paths.Cache) } -func (sb *Sealer) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error { +func (sb *Sealer) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) error { // This call is meant to mark storage as 'freeable'. Given that unsealing is // very expensive, we don't remove data as soon as we can - instead we only // do that when we don't have free space for data that really needs it @@ -593,24 +665,10 @@ func (sb *Sealer) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safe return xerrors.Errorf("not supported at this layer") } -func (sb *Sealer) Remove(ctx context.Context, sector abi.SectorID) error { +func (sb *Sealer) Remove(ctx context.Context, sector storage.SectorRef) error { return xerrors.Errorf("not supported at this layer") // happens in localworker } -func GeneratePieceCIDFromFile(proofType abi.RegisteredSealProof, piece io.Reader, pieceSize abi.UnpaddedPieceSize) (cid.Cid, error) { - f, werr, err := ToReadableFile(piece, int64(pieceSize)) - if err != nil { - return cid.Undef, err - } - - pieceCID, err := ffi.GeneratePieceCIDFromFile(proofType, f, pieceSize) - if err != nil { - return cid.Undef, err - } - - return pieceCID, werr() -} - func GetRequiredPadding(oldLength abi.PaddedPieceSize, newPieceLength abi.PaddedPieceSize) ([]abi.PaddedPieceSize, abi.PaddedPieceSize) { padPieces := make([]abi.PaddedPieceSize, 0) diff --git a/extern/sector-storage/ffiwrapper/sealer_test.go b/extern/sector-storage/ffiwrapper/sealer_test.go index 5ae5cec678b..a6034cc79ad 100644 --- a/extern/sector-storage/ffiwrapper/sealer_test.go +++ b/extern/sector-storage/ffiwrapper/sealer_test.go @@ -15,11 +15,14 @@ import ( "testing" "time" + commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper" + proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" "github.com/stretchr/testify/require" "golang.org/x/xerrors" @@ -28,9 +31,12 @@ import ( "github.com/filecoin-project/specs-storage/storage" ffi "github.com/filecoin-project/filecoin-ffi" + "github.com/filecoin-project/filecoin-ffi/generated" + "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper/basicfs" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + "github.com/filecoin-project/lotus/extern/storage-sealing/lib/nullreader" ) func init() { @@ -43,7 +49,7 @@ var sectorSize, _ = sealProofType.SectorSize() var sealRand = abi.SealRandomness{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2} type seal struct { - id abi.SectorID + ref storage.SectorRef cids storage.SectorCids pi abi.PieceInfo ticket abi.SealRandomness @@ -56,12 +62,12 @@ func data(sn abi.SectorNumber, dlen abi.UnpaddedPieceSize) io.Reader { ) } -func (s *seal) precommit(t *testing.T, sb *Sealer, id abi.SectorID, done func()) { +func (s *seal) precommit(t *testing.T, sb *Sealer, id storage.SectorRef, done func()) { defer done() dlen := abi.PaddedPieceSize(sectorSize).Unpadded() var err error - r := data(id.Number, dlen) + r := data(id.ID.Number, dlen) s.pi, err = sb.AddPiece(context.TODO(), id, []abi.UnpaddedPieceSize{}, dlen, r) if err != nil { t.Fatalf("%+v", err) @@ -80,23 +86,24 @@ func (s *seal) precommit(t *testing.T, sb *Sealer, id abi.SectorID, done func()) s.cids = cids } -func (s *seal) commit(t *testing.T, sb *Sealer, done func()) { +var seed = abi.InteractiveSealRandomness{0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9} + +func (s *seal) commit(t *testing.T, sb *Sealer, done func()) storage.Proof { defer done() - seed := abi.InteractiveSealRandomness{0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9} - pc1, err := sb.SealCommit1(context.TODO(), s.id, s.ticket, seed, []abi.PieceInfo{s.pi}, s.cids) + pc1, err := sb.SealCommit1(context.TODO(), s.ref, s.ticket, seed, []abi.PieceInfo{s.pi}, s.cids) if err != nil { t.Fatalf("%+v", err) } - proof, err := sb.SealCommit2(context.TODO(), s.id, pc1) + proof, err := sb.SealCommit2(context.TODO(), s.ref, pc1) if err != nil { t.Fatalf("%+v", err) } ok, err := ProofVerifier.VerifySeal(proof2.SealVerifyInfo{ - SectorID: s.id, + SectorID: s.ref.ID, SealedCID: s.cids.Sealed, - SealProof: sealProofType, + SealProof: s.ref.ProofType, Proof: proof, Randomness: s.ticket, InteractiveRandomness: seed, @@ -109,9 +116,11 @@ func (s *seal) commit(t *testing.T, sb *Sealer, done func()) { if !ok { t.Fatal("proof failed to validate") } + + return proof } -func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.SectorID, done func()) { +func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si storage.SectorRef, done func()) { defer done() var b bytes.Buffer @@ -120,7 +129,7 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.Sec t.Fatal(err) } - expect, _ := ioutil.ReadAll(data(si.Number, 1016)) + expect, _ := ioutil.ReadAll(data(si.ID.Number, 1016)) if !bytes.Equal(b.Bytes(), expect) { t.Fatal("read wrong bytes") } @@ -150,7 +159,7 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.Sec t.Fatal(err) } - expect, _ = ioutil.ReadAll(data(si.Number, 1016)) + expect, _ = ioutil.ReadAll(data(si.ID.Number, 1016)) require.Equal(t, expect, b.Bytes()) b.Reset() @@ -174,13 +183,13 @@ func post(t *testing.T, sealer *Sealer, skipped []abi.SectorID, seals ...seal) { sis := make([]proof2.SectorInfo, len(seals)) for i, s := range seals { sis[i] = proof2.SectorInfo{ - SealProof: sealProofType, - SectorNumber: s.id.Number, + SealProof: s.ref.ProofType, + SectorNumber: s.ref.ID.Number, SealedCID: s.cids.Sealed, } } - proofs, skp, err := sealer.GenerateWindowPoSt(context.TODO(), seals[0].id.Miner, sis, randomness) + proofs, skp, err := sealer.GenerateWindowPoSt(context.TODO(), seals[0].ref.ID.Miner, sis, randomness) if len(skipped) > 0 { require.Error(t, err) require.EqualValues(t, skipped, skp) @@ -195,7 +204,7 @@ func post(t *testing.T, sealer *Sealer, skipped []abi.SectorID, seals ...seal) { Randomness: randomness, Proofs: proofs, ChallengedSectors: sis, - Prover: seals[0].id.Miner, + Prover: seals[0].ref.ID.Miner, }) if err != nil { t.Fatalf("%+v", err) @@ -205,7 +214,7 @@ func post(t *testing.T, sealer *Sealer, skipped []abi.SectorID, seals ...seal) { } } -func corrupt(t *testing.T, sealer *Sealer, id abi.SectorID) { +func corrupt(t *testing.T, sealer *Sealer, id storage.SectorRef) { paths, done, err := sealer.sectors.AcquireSector(context.Background(), id, storiface.FTSealed, 0, storiface.PathStorage) require.NoError(t, err) defer done() @@ -226,7 +235,12 @@ func getGrothParamFileAndVerifyingKeys(s abi.SectorSize) { panic(err) } - err = paramfetch.GetParams(context.TODO(), dat, uint64(s)) + datSrs, err := ioutil.ReadFile("../../../build/proof-params/srs-inner-product.json") + if err != nil { + panic(err) + } + + err = paramfetch.GetParams(context.TODO(), dat, datSrs, uint64(s)) if err != nil { panic(xerrors.Errorf("failed to acquire Groth parameters for 2KiB sectors: %w", err)) } @@ -239,7 +253,7 @@ func getGrothParamFileAndVerifyingKeys(s abi.SectorSize) { // go test -run=^TestDownloadParams // func TestDownloadParams(t *testing.T) { - defer requireFDsClosed(t, openFDs(t)) + // defer requireFDsClosed(t, openFDs(t)) flaky likely cause of how go-embed works with param files getGrothParamFileAndVerifyingKeys(sectorSize) } @@ -264,14 +278,10 @@ func TestSealAndVerify(t *testing.T) { } miner := abi.ActorID(123) - cfg := &Config{ - SealProofType: sealProofType, - } - sp := &basicfs.Provider{ Root: cdir, } - sb, err := New(sp, cfg) + sb, err := New(sp) if err != nil { t.Fatalf("%+v", err) } @@ -286,9 +296,12 @@ func TestSealAndVerify(t *testing.T) { } defer cleanup() - si := abi.SectorID{Miner: miner, Number: 1} + si := storage.SectorRef{ + ID: abi.SectorID{Miner: miner, Number: 1}, + ProofType: sealProofType, + } - s := seal{id: si} + s := seal{ref: si} start := time.Now() @@ -338,13 +351,10 @@ func TestSealPoStNoCommit(t *testing.T) { miner := abi.ActorID(123) - cfg := &Config{ - SealProofType: sealProofType, - } sp := &basicfs.Provider{ Root: dir, } - sb, err := New(sp, cfg) + sb, err := New(sp) if err != nil { t.Fatalf("%+v", err) } @@ -360,9 +370,12 @@ func TestSealPoStNoCommit(t *testing.T) { } defer cleanup() - si := abi.SectorID{Miner: miner, Number: 1} + si := storage.SectorRef{ + ID: abi.SectorID{Miner: miner, Number: 1}, + ProofType: sealProofType, + } - s := seal{id: si} + s := seal{ref: si} start := time.Now() @@ -403,13 +416,10 @@ func TestSealAndVerify3(t *testing.T) { miner := abi.ActorID(123) - cfg := &Config{ - SealProofType: sealProofType, - } sp := &basicfs.Provider{ Root: dir, } - sb, err := New(sp, cfg) + sb, err := New(sp) if err != nil { t.Fatalf("%+v", err) } @@ -424,13 +434,22 @@ func TestSealAndVerify3(t *testing.T) { var wg sync.WaitGroup - si1 := abi.SectorID{Miner: miner, Number: 1} - si2 := abi.SectorID{Miner: miner, Number: 2} - si3 := abi.SectorID{Miner: miner, Number: 3} + si1 := storage.SectorRef{ + ID: abi.SectorID{Miner: miner, Number: 1}, + ProofType: sealProofType, + } + si2 := storage.SectorRef{ + ID: abi.SectorID{Miner: miner, Number: 2}, + ProofType: sealProofType, + } + si3 := storage.SectorRef{ + ID: abi.SectorID{Miner: miner, Number: 3}, + ProofType: sealProofType, + } - s1 := seal{id: si1} - s2 := seal{id: si2} - s3 := seal{id: si3} + s1 := seal{ref: si1} + s2 := seal{ref: si2} + s3 := seal{ref: si3} wg.Add(3) go s1.precommit(t, sb, si1, wg.Done) //nolint: staticcheck @@ -451,7 +470,99 @@ func TestSealAndVerify3(t *testing.T) { corrupt(t, sb, si1) corrupt(t, sb, si2) - post(t, sb, []abi.SectorID{si1, si2}, s1, s2, s3) + post(t, sb, []abi.SectorID{si1.ID, si2.ID}, s1, s2, s3) +} + +func TestSealAndVerifyAggregate(t *testing.T) { + numAgg := 5 + + if testing.Short() { + t.Skip("skipping test in short mode") + } + + defer requireFDsClosed(t, openFDs(t)) + + if runtime.NumCPU() < 10 && os.Getenv("CI") == "" { // don't bother on slow hardware + t.Skip("this is slow") + } + _ = os.Setenv("RUST_LOG", "info") + + getGrothParamFileAndVerifyingKeys(sectorSize) + + cdir, err := ioutil.TempDir("", "sbtest-c-") + if err != nil { + t.Fatal(err) + } + miner := abi.ActorID(123) + + sp := &basicfs.Provider{ + Root: cdir, + } + sb, err := New(sp) + if err != nil { + t.Fatalf("%+v", err) + } + cleanup := func() { + if t.Failed() { + fmt.Printf("not removing %s\n", cdir) + return + } + if err := os.RemoveAll(cdir); err != nil { + t.Error(err) + } + } + defer cleanup() + + avi := proof5.AggregateSealVerifyProofAndInfos{ + Miner: miner, + SealProof: sealProofType, + AggregateProof: policy.GetDefaultAggregationProof(), + Proof: nil, + Infos: make([]proof5.AggregateSealVerifyInfo, numAgg), + } + + toAggregate := make([][]byte, numAgg) + for i := 0; i < numAgg; i++ { + si := storage.SectorRef{ + ID: abi.SectorID{Miner: miner, Number: abi.SectorNumber(i + 1)}, + ProofType: sealProofType, + } + + s := seal{ref: si} + s.precommit(t, sb, si, func() {}) + toAggregate[i] = s.commit(t, sb, func() {}) + + avi.Infos[i] = proof5.AggregateSealVerifyInfo{ + Number: abi.SectorNumber(i + 1), + Randomness: s.ticket, + InteractiveRandomness: seed, + SealedCID: s.cids.Sealed, + UnsealedCID: s.cids.Unsealed, + } + } + + aggStart := time.Now() + + avi.Proof, err = ProofProver.AggregateSealProofs(avi, toAggregate) + require.NoError(t, err) + require.Len(t, avi.Proof, 11188) + + aggDone := time.Now() + + _, err = ProofProver.AggregateSealProofs(avi, toAggregate) + require.NoError(t, err) + + aggHot := time.Now() + + ok, err := ProofVerifier.VerifyAggregateSeals(avi) + require.NoError(t, err) + require.True(t, ok) + + verifDone := time.Now() + + fmt.Printf("Aggregate: %s\n", aggDone.Sub(aggStart).String()) + fmt.Printf("Hot: %s\n", aggHot.Sub(aggDone).String()) + fmt.Printf("Verify: %s\n", verifDone.Sub(aggHot).String()) } func BenchmarkWriteWithAlignment(b *testing.B) { @@ -460,7 +571,7 @@ func BenchmarkWriteWithAlignment(b *testing.B) { for i := 0; i < b.N; i++ { b.StopTimer() - rf, w, _ := ToReadableFile(bytes.NewReader(bytes.Repeat([]byte{0xff, 0}, int(bt/2))), int64(bt)) + rf, w, _ := commpffi.ToReadableFile(bytes.NewReader(bytes.Repeat([]byte{0xff, 0}, int(bt/2))), int64(bt)) tf, _ := ioutil.TempFile("/tmp/", "scrb-") b.StartTimer() @@ -519,7 +630,7 @@ func TestGenerateUnsealedCID(t *testing.T) { ups := int(abi.PaddedPieceSize(2048).Unpadded()) commP := func(b []byte) cid.Cid { - pf, werr, err := ToReadableFile(bytes.NewReader(b), int64(len(b))) + pf, werr, err := commpffi.ToReadableFile(bytes.NewReader(b), int64(len(b))) require.NoError(t, err) c, err := ffi.GeneratePieceCIDFromFile(pt, pf, abi.UnpaddedPieceSize(len(b))) @@ -615,3 +726,216 @@ func TestGenerateUnsealedCID(t *testing.T) { [][]byte{barr(1, 16), barr(0, 16), barr(2, 8), barr(3, 16), barr(0, 16), barr(0, 8), barr(4, 4), barr(5, 16), barr(0, 16), barr(0, 8)}, ) } + +func TestAddPiece512M(t *testing.T) { + sz := abi.PaddedPieceSize(512 << 20).Unpadded() + + cdir, err := ioutil.TempDir("", "sbtest-c-") + if err != nil { + t.Fatal(err) + } + miner := abi.ActorID(123) + + sp := &basicfs.Provider{ + Root: cdir, + } + sb, err := New(sp) + if err != nil { + t.Fatalf("%+v", err) + } + cleanup := func() { + if t.Failed() { + fmt.Printf("not removing %s\n", cdir) + return + } + if err := os.RemoveAll(cdir); err != nil { + t.Error(err) + } + } + t.Cleanup(cleanup) + + r := rand.New(rand.NewSource(0x7e5)) + + c, err := sb.AddPiece(context.TODO(), storage.SectorRef{ + ID: abi.SectorID{ + Miner: miner, + Number: 0, + }, + ProofType: abi.RegisteredSealProof_StackedDrg512MiBV1_1, + }, nil, sz, io.LimitReader(r, int64(sz))) + if err != nil { + t.Fatal(err) + } + + require.Equal(t, "baga6ea4seaqhyticusemlcrjhvulpfng4nint6bu3wpe5s3x4bnuj2rs47hfacy", c.PieceCID.String()) +} + +func BenchmarkAddPiece512M(b *testing.B) { + sz := abi.PaddedPieceSize(512 << 20).Unpadded() + b.SetBytes(int64(sz)) + + cdir, err := ioutil.TempDir("", "sbtest-c-") + if err != nil { + b.Fatal(err) + } + miner := abi.ActorID(123) + + sp := &basicfs.Provider{ + Root: cdir, + } + sb, err := New(sp) + if err != nil { + b.Fatalf("%+v", err) + } + cleanup := func() { + if b.Failed() { + fmt.Printf("not removing %s\n", cdir) + return + } + if err := os.RemoveAll(cdir); err != nil { + b.Error(err) + } + } + b.Cleanup(cleanup) + + for i := 0; i < b.N; i++ { + c, err := sb.AddPiece(context.TODO(), storage.SectorRef{ + ID: abi.SectorID{ + Miner: miner, + Number: abi.SectorNumber(i), + }, + ProofType: abi.RegisteredSealProof_StackedDrg512MiBV1_1, + }, nil, sz, io.LimitReader(&nullreader.Reader{}, int64(sz))) + if err != nil { + b.Fatal(err) + } + fmt.Println(c) + } +} + +func TestAddPiece512MPadded(t *testing.T) { + sz := abi.PaddedPieceSize(512 << 20).Unpadded() + + cdir, err := ioutil.TempDir("", "sbtest-c-") + if err != nil { + t.Fatal(err) + } + miner := abi.ActorID(123) + + sp := &basicfs.Provider{ + Root: cdir, + } + sb, err := New(sp) + if err != nil { + t.Fatalf("%+v", err) + } + cleanup := func() { + if t.Failed() { + fmt.Printf("not removing %s\n", cdir) + return + } + if err := os.RemoveAll(cdir); err != nil { + t.Error(err) + } + } + t.Cleanup(cleanup) + + r := rand.New(rand.NewSource(0x7e5)) + + c, err := sb.AddPiece(context.TODO(), storage.SectorRef{ + ID: abi.SectorID{ + Miner: miner, + Number: 0, + }, + ProofType: abi.RegisteredSealProof_StackedDrg512MiBV1_1, + }, nil, sz, io.LimitReader(r, int64(sz/4))) + if err != nil { + t.Fatalf("add piece failed: %s", err) + } + + require.Equal(t, "baga6ea4seaqonenxyku4o7hr5xkzbqsceipf6xgli3on54beqbk6k246sbooobq", c.PieceCID.String()) +} + +func setupLogger(t *testing.T) *bytes.Buffer { + _ = os.Setenv("RUST_LOG", "info") + + var bb bytes.Buffer + r, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + + go func() { + _, _ = io.Copy(&bb, r) + runtime.KeepAlive(w) + }() + + resp := generated.FilInitLogFd(int32(w.Fd())) + resp.Deref() + + defer generated.FilDestroyInitLogFdResponse(resp) + + if resp.StatusCode != generated.FCPResponseStatusFCPNoError { + t.Fatal(generated.RawString(resp.ErrorMsg).Copy()) + } + + return &bb +} + +func TestMulticoreSDR(t *testing.T) { + if os.Getenv("TEST_RUSTPROOFS_LOGS") != "1" { + t.Skip("skipping test without TEST_RUSTPROOFS_LOGS=1") + } + + rustLogger := setupLogger(t) + + getGrothParamFileAndVerifyingKeys(sectorSize) + + dir, err := ioutil.TempDir("", "sbtest") + if err != nil { + t.Fatal(err) + } + + miner := abi.ActorID(123) + + sp := &basicfs.Provider{ + Root: dir, + } + sb, err := New(sp) + if err != nil { + t.Fatalf("%+v", err) + } + + cleanup := func() { + if t.Failed() { + fmt.Printf("not removing %s\n", dir) + return + } + if err := os.RemoveAll(dir); err != nil { + t.Error(err) + } + } + defer cleanup() + + si := storage.SectorRef{ + ID: abi.SectorID{Miner: miner, Number: 1}, + ProofType: sealProofType, + } + + s := seal{ref: si} + + // check multicore + _ = os.Setenv("FIL_PROOFS_USE_MULTICORE_SDR", "1") + rustLogger.Reset() + s.precommit(t, sb, si, func() {}) + + ok := false + for _, s := range strings.Split(rustLogger.String(), "\n") { + if strings.Contains(s, "create_label::multi") { + ok = true + break + } + } + + require.True(t, ok) +} diff --git a/extern/sector-storage/ffiwrapper/types.go b/extern/sector-storage/ffiwrapper/types.go index b67f9c595ec..a5b2fdf1fa0 100644 --- a/extern/sector-storage/ffiwrapper/types.go +++ b/extern/sector-storage/ffiwrapper/types.go @@ -4,7 +4,7 @@ import ( "context" "io" - proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/ipfs/go-cid" @@ -29,22 +29,30 @@ type Storage interface { storage.Prover StorageSealer - UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error - ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) + UnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error + ReadPiece(ctx context.Context, writer io.Writer, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) } type Verifier interface { - VerifySeal(proof2.SealVerifyInfo) (bool, error) - VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) - VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStVerifyInfo) (bool, error) + VerifySeal(proof5.SealVerifyInfo) (bool, error) + VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) + VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) + VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) GenerateWinningPoStSectorChallenge(context.Context, abi.RegisteredPoStProof, abi.ActorID, abi.PoStRandomness, uint64) ([]uint64, error) } +// Prover contains cheap proving-related methods +type Prover interface { + // TODO: move GenerateWinningPoStSectorChallenge from the Verifier interface to here + + AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) +} + type SectorProvider interface { // * returns storiface.ErrSectorNotFound if a requested existing sector doesn't exist // * returns an error when allocate is set, and existing isn't, and the sector exists - AcquireSector(ctx context.Context, id abi.SectorID, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) + AcquireSector(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) } var _ SectorProvider = &basicfs.Provider{} diff --git a/extern/sector-storage/ffiwrapper/unseal_ranges.go b/extern/sector-storage/ffiwrapper/unseal_ranges.go index 4519fc21e6a..3a13c73a74a 100644 --- a/extern/sector-storage/ffiwrapper/unseal_ranges.go +++ b/extern/sector-storage/ffiwrapper/unseal_ranges.go @@ -7,6 +7,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/extern/sector-storage/partialfile" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) @@ -17,7 +18,7 @@ const mergeGaps = 32 << 20 // TODO const expandRuns = 16 << 20 // unseal more than requested for future requests func computeUnsealRanges(unsealed rlepluslazy.RunIterator, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (rlepluslazy.RunIterator, error) { - todo := pieceRun(offset.Padded(), size.Padded()) + todo := partialfile.PieceRun(offset.Padded(), size.Padded()) todo, err := rlepluslazy.Subtract(todo, unsealed) if err != nil { return nil, xerrors.Errorf("compute todo-unsealed: %w", err) diff --git a/extern/sector-storage/ffiwrapper/verifier_cgo.go b/extern/sector-storage/ffiwrapper/verifier_cgo.go index 9dab7103e93..95724bb7cbd 100644 --- a/extern/sector-storage/ffiwrapper/verifier_cgo.go +++ b/extern/sector-storage/ffiwrapper/verifier_cgo.go @@ -10,12 +10,13 @@ import ( ffi "github.com/filecoin-project/filecoin-ffi" "github.com/filecoin-project/go-state-types/abi" - proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) -func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof2.SectorInfo, randomness abi.PoStRandomness) ([]proof2.PoStProof, error) { +func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, error) { randomness[31] &= 0x3f privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWinningPoStProof) // TODO: FAULTS? if err != nil { @@ -29,7 +30,7 @@ func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, return ffi.GenerateWinningPoSt(minerID, privsectors, randomness) } -func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof2.SectorInfo, randomness abi.PoStRandomness) ([]proof2.PoStProof, []abi.SectorID, error) { +func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, []abi.SectorID, error) { randomness[31] &= 0x3f privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWindowPoStProof) if err != nil { @@ -54,7 +55,7 @@ func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, s return proof, faultyIDs, err } -func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []proof2.SectorInfo, faults []abi.SectorNumber, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error)) (ffi.SortedPrivateSectorInfo, []abi.SectorID, func(), error) { +func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []proof5.SectorInfo, faults []abi.SectorNumber, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error)) (ffi.SortedPrivateSectorInfo, []abi.SectorID, func(), error) { fmap := map[abi.SectorNumber]struct{}{} for _, fault := range faults { fmap[fault] = struct{}{} @@ -74,12 +75,15 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn continue } - sid := abi.SectorID{Miner: mid, Number: s.SectorNumber} + sid := storage.SectorRef{ + ID: abi.SectorID{Miner: mid, Number: s.SectorNumber}, + ProofType: s.SealProof, + } paths, d, err := sb.sectors.AcquireSector(ctx, sid, storiface.FTCache|storiface.FTSealed, 0, storiface.PathStorage) if err != nil { - log.Warnw("failed to acquire sector, skipping", "sector", sid, "error", err) - skipped = append(skipped, sid) + log.Warnw("failed to acquire sector, skipping", "sector", sid.ID, "error", err) + skipped = append(skipped, sid.ID) continue } doneFuncs = append(doneFuncs, d) @@ -107,11 +111,15 @@ type proofVerifier struct{} var ProofVerifier = proofVerifier{} -func (proofVerifier) VerifySeal(info proof2.SealVerifyInfo) (bool, error) { +func (proofVerifier) VerifySeal(info proof5.SealVerifyInfo) (bool, error) { return ffi.VerifySeal(info) } -func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) { +func (proofVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) { + return ffi.VerifyAggregateSeals(aggregate) +} + +func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) { info.Randomness[31] &= 0x3f _, span := trace.StartSpan(ctx, "VerifyWinningPoSt") defer span.End() @@ -119,7 +127,7 @@ func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info proof2.WinningP return ffi.VerifyWinningPoSt(info) } -func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStVerifyInfo) (bool, error) { +func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) { info.Randomness[31] &= 0x3f _, span := trace.StartSpan(ctx, "VerifyWindowPoSt") defer span.End() diff --git a/extern/sector-storage/fr32/fr32_ffi_cmp_test.go b/extern/sector-storage/fr32/fr32_ffi_cmp_test.go index 3d567909555..49eb115480d 100644 --- a/extern/sector-storage/fr32/fr32_ffi_cmp_test.go +++ b/extern/sector-storage/fr32/fr32_ffi_cmp_test.go @@ -7,11 +7,12 @@ import ( "os" "testing" - "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/fr32" ffi "github.com/filecoin-project/filecoin-ffi" + commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper" + "github.com/filecoin-project/go-state-types/abi" "github.com/stretchr/testify/require" @@ -29,7 +30,7 @@ func TestWriteTwoPcs(t *testing.T) { buf := bytes.Repeat([]byte{0xab * byte(i)}, int(paddedSize.Unpadded())) rawBytes = append(rawBytes, buf...) - rf, w, _ := ffiwrapper.ToReadableFile(bytes.NewReader(buf), int64(len(buf))) + rf, w, _ := commpffi.ToReadableFile(bytes.NewReader(buf), int64(len(buf))) _, _, _, err := ffi.WriteWithAlignment(abi.RegisteredSealProof_StackedDrg32GiBV1, rf, abi.UnpaddedPieceSize(len(buf)), tf, nil) if err != nil { diff --git a/extern/sector-storage/fr32/fr32_test.go b/extern/sector-storage/fr32/fr32_test.go index 415134272d0..0626f72a976 100644 --- a/extern/sector-storage/fr32/fr32_test.go +++ b/extern/sector-storage/fr32/fr32_test.go @@ -9,15 +9,15 @@ import ( "testing" ffi "github.com/filecoin-project/filecoin-ffi" + commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper" "github.com/filecoin-project/go-state-types/abi" "github.com/stretchr/testify/require" - "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/fr32" ) func padFFI(buf []byte) []byte { - rf, w, _ := ffiwrapper.ToReadableFile(bytes.NewReader(buf), int64(len(buf))) + rf, w, _ := commpffi.ToReadableFile(bytes.NewReader(buf), int64(len(buf))) tf, _ := ioutil.TempFile("/tmp/", "scrb-") _, _, _, err := ffi.WriteWithAlignment(abi.RegisteredSealProof_StackedDrg32GiBV1, rf, abi.UnpaddedPieceSize(len(buf)), tf, nil) diff --git a/extern/sector-storage/fr32/readers.go b/extern/sector-storage/fr32/readers.go index 20f3e9b3185..f14d5bf1cbd 100644 --- a/extern/sector-storage/fr32/readers.go +++ b/extern/sector-storage/fr32/readers.go @@ -51,13 +51,12 @@ func (r *unpadReader) Read(out []byte) (int, error) { r.left -= uint64(todo) - n, err := r.src.Read(r.work[:todo]) + n, err := io.ReadAtLeast(r.src, r.work[:todo], int(todo)) if err != nil && err != io.EOF { return n, err } - - if n != int(todo) { - return 0, xerrors.Errorf("didn't read enough: %w", err) + if n < int(todo) { + return 0, xerrors.Errorf("didn't read enough: %d / %d, left %d, out %d", n, todo, r.left, len(out)) } Unpad(r.work[:todo], out[:todo.Unpadded()]) diff --git a/extern/sector-storage/fr32/readers_test.go b/extern/sector-storage/fr32/readers_test.go index 706af5fee79..2411955529a 100644 --- a/extern/sector-storage/fr32/readers_test.go +++ b/extern/sector-storage/fr32/readers_test.go @@ -1,6 +1,7 @@ package fr32_test import ( + "bufio" "bytes" "io/ioutil" "testing" @@ -25,7 +26,8 @@ func TestUnpadReader(t *testing.T) { t.Fatal(err) } - readered, err := ioutil.ReadAll(r) + // using bufio reader to make sure reads are big enough for the padreader - it can't handle small reads right now + readered, err := ioutil.ReadAll(bufio.NewReaderSize(r, 512)) if err != nil { t.Fatal(err) } diff --git a/extern/sector-storage/fsutil/filesize_unix.go b/extern/sector-storage/fsutil/filesize_unix.go index 500e54386c3..7df8dae4c96 100644 --- a/extern/sector-storage/fsutil/filesize_unix.go +++ b/extern/sector-storage/fsutil/filesize_unix.go @@ -2,6 +2,7 @@ package fsutil import ( "os" + "path/filepath" "syscall" "golang.org/x/xerrors" @@ -11,19 +12,32 @@ type SizeInfo struct { OnDisk int64 } -// FileSize returns bytes used by a file on disk +// FileSize returns bytes used by a file or directory on disk +// NOTE: We care about the allocated bytes, not file or directory size func FileSize(path string) (SizeInfo, error) { - var stat syscall.Stat_t - if err := syscall.Stat(path, &stat); err != nil { - if err == syscall.ENOENT { + var size int64 + err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + stat, ok := info.Sys().(*syscall.Stat_t) + if !ok { + return xerrors.New("FileInfo.Sys of wrong type") + } + + // NOTE: stat.Blocks is in 512B blocks, NOT in stat.Blksize return SizeInfo{size}, nil + // See https://www.gnu.org/software/libc/manual/html_node/Attribute-Meanings.html + size += int64(stat.Blocks) * 512 // nolint NOTE: int64 cast is needed on osx + } + return err + }) + if err != nil { + if os.IsNotExist(err) { return SizeInfo{}, os.ErrNotExist } - return SizeInfo{}, xerrors.Errorf("stat: %w", err) + return SizeInfo{}, xerrors.Errorf("filepath.Walk err: %w", err) } - // NOTE: stat.Blocks is in 512B blocks, NOT in stat.Blksize - // See https://www.gnu.org/software/libc/manual/html_node/Attribute-Meanings.html - return SizeInfo{ - int64(stat.Blocks) * 512, // nolint NOTE: int64 cast is needed on osx - }, nil + return SizeInfo{size}, nil } diff --git a/extern/sector-storage/fsutil/statfs.go b/extern/sector-storage/fsutil/statfs.go index 2a00ccb9aba..50ec86d463f 100644 --- a/extern/sector-storage/fsutil/statfs.go +++ b/extern/sector-storage/fsutil/statfs.go @@ -1,7 +1,12 @@ package fsutil type FsStat struct { - Capacity int64 - Available int64 // Available to use for sector storage - Reserved int64 + Capacity int64 + Available int64 // Available to use for sector storage + FSAvailable int64 // Available in the filesystem + Reserved int64 + + // non-zero when storage has configured MaxStorage + Max int64 + Used int64 } diff --git a/extern/sector-storage/fsutil/statfs_unix.go b/extern/sector-storage/fsutil/statfs_unix.go index 831fd8b4f10..da09c5c60fe 100644 --- a/extern/sector-storage/fsutil/statfs_unix.go +++ b/extern/sector-storage/fsutil/statfs_unix.go @@ -15,7 +15,9 @@ func Statfs(path string) (FsStat, error) { // force int64 to handle platform specific differences //nolint:unconvert return FsStat{ - Capacity: int64(stat.Blocks) * int64(stat.Bsize), - Available: int64(stat.Bavail) * int64(stat.Bsize), + Capacity: int64(stat.Blocks) * int64(stat.Bsize), + + Available: int64(stat.Bavail) * int64(stat.Bsize), + FSAvailable: int64(stat.Bavail) * int64(stat.Bsize), }, nil } diff --git a/extern/sector-storage/fsutil/statfs_windows.go b/extern/sector-storage/fsutil/statfs_windows.go index d785651826e..87ff75708d0 100644 --- a/extern/sector-storage/fsutil/statfs_windows.go +++ b/extern/sector-storage/fsutil/statfs_windows.go @@ -22,7 +22,8 @@ func Statfs(volumePath string) (FsStat, error) { uintptr(unsafe.Pointer(&availBytes))) return FsStat{ - Capacity: totalBytes, - Available: availBytes, + Capacity: totalBytes, + Available: availBytes, + FSAvailable: availBytes, }, nil } diff --git a/extern/sector-storage/manager.go b/extern/sector-storage/manager.go index f32a363e7ba..bf676bffaa1 100644 --- a/extern/sector-storage/manager.go +++ b/extern/sector-storage/manager.go @@ -29,8 +29,6 @@ var log = logging.Logger("advmgr") var ErrNoWorkers = errors.New("no suitable workers found") -type URLs []string - type Worker interface { storiface.WorkerCalls @@ -47,10 +45,6 @@ type Worker interface { } type SectorManager interface { - SectorSize() abi.SectorSize - - ReadPiece(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error - ffiwrapper.StorageSealer storage.Prover storiface.WorkerReturn @@ -60,9 +54,11 @@ type SectorManager interface { type WorkerID uuid.UUID // worker session UUID var ClosedWorkerID = uuid.UUID{} -type Manager struct { - scfg *ffiwrapper.Config +func (w WorkerID) String() string { + return uuid.UUID(w).String() +} +type Manager struct { ls stores.LocalStorage storage *stores.Remote localStore *stores.Local @@ -89,6 +85,20 @@ type result struct { err error } +// ResourceFilteringStrategy is an enum indicating the kinds of resource +// filtering strategies that can be configured for workers. +type ResourceFilteringStrategy string + +const ( + // ResourceFilteringHardware specifies that available hardware resources + // should be evaluated when scheduling a task against the worker. + ResourceFilteringHardware = ResourceFilteringStrategy("hardware") + + // ResourceFilteringDisabled disables resource filtering against this + // worker. The scheduler may assign any task to this worker. + ResourceFilteringDisabled = ResourceFilteringStrategy("disabled") +) + type SealerConfig struct { ParallelFetchLimit int @@ -98,6 +108,11 @@ type SealerConfig struct { AllowPreCommit2 bool AllowCommit bool AllowUnseal bool + + // ResourceFiltering instructs the system which resource filtering strategy + // to use when evaluating tasks against this worker. An empty value defaults + // to "hardware". + ResourceFiltering ResourceFilteringStrategy } type StorageAuth http.Header @@ -105,29 +120,20 @@ type StorageAuth http.Header type WorkerStateStore *statestore.StateStore type ManagerStateStore *statestore.StateStore -func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg *ffiwrapper.Config, sc SealerConfig, urls URLs, sa StorageAuth, wss WorkerStateStore, mss ManagerStateStore) (*Manager, error) { - lstor, err := stores.NewLocal(ctx, ls, si, urls) - if err != nil { - return nil, err - } - - prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor, index: si, spt: cfg.SealProofType}, cfg) +func New(ctx context.Context, lstor *stores.Local, stor *stores.Remote, ls stores.LocalStorage, si stores.SectorIndex, sc SealerConfig, wss WorkerStateStore, mss ManagerStateStore) (*Manager, error) { + prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor, index: si}) if err != nil { return nil, xerrors.Errorf("creating prover instance: %w", err) } - stor := stores.NewRemote(lstor, si, http.Header(sa), sc.ParallelFetchLimit) - m := &Manager{ - scfg: cfg, - ls: ls, storage: stor, localStore: lstor, - remoteHnd: &stores.FetchHandler{Local: lstor}, + remoteHnd: &stores.FetchHandler{Local: lstor, PfHandler: &stores.DefaultPartialFileHandler{}}, index: si, - sched: newScheduler(cfg.SealProofType), + sched: newScheduler(), Prover: prover, @@ -143,7 +149,7 @@ func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg go m.sched.runSched() localTasks := []sealtasks.TaskType{ - sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch, sealtasks.TTReadUnsealed, + sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch, } if sc.AllowAddPiece { localTasks = append(localTasks, sealtasks.TTAddPiece) @@ -161,10 +167,12 @@ func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg localTasks = append(localTasks, sealtasks.TTUnseal) } - err = m.AddWorker(ctx, NewLocalWorker(WorkerConfig{ - SealProof: cfg.SealProofType, - TaskTypes: localTasks, - }, stor, lstor, si, m, wss)) + wcfg := WorkerConfig{ + IgnoreResourceFiltering: sc.ResourceFiltering == ResourceFilteringDisabled, + TaskTypes: localTasks, + } + worker := NewLocalWorker(wcfg, stor, lstor, si, m, wss) + err = m.AddWorker(ctx, worker) if err != nil { return nil, xerrors.Errorf("adding local worker: %w", err) } @@ -198,133 +206,85 @@ func (m *Manager) ServeHTTP(w http.ResponseWriter, r *http.Request) { m.remoteHnd.ServeHTTP(w, r) } -func (m *Manager) SectorSize() abi.SectorSize { - sz, _ := m.scfg.SealProofType.SectorSize() - return sz -} - func schedNop(context.Context, Worker) error { return nil } -func (m *Manager) schedFetch(sector abi.SectorID, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) func(context.Context, Worker) error { +func (m *Manager) schedFetch(sector storage.SectorRef, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) func(context.Context, Worker) error { return func(ctx context.Context, worker Worker) error { _, err := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, ft, ptype, am)) return err } } -func (m *Manager) readPiece(sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, rok *bool) func(ctx context.Context, w Worker) error { - return func(ctx context.Context, w Worker) error { - r, err := m.waitSimpleCall(ctx)(w.ReadPiece(ctx, sink, sector, offset, size)) - if err != nil { - return err - } - *rok = r.(bool) - return nil - } -} - -func (m *Manager) tryReadUnsealedPiece(ctx context.Context, sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (foundUnsealed bool, readOk bool, selector WorkerSelector, returnErr error) { - - // acquire a lock purely for reading unsealed sectors +// SectorsUnsealPiece will Unseal the Sealed sector file for the given sector. +// It will schedule the Unsealing task on a worker that either already has the sealed sector files or has space in +// one of it's sealing scratch spaces to store them after fetching them from another worker. +// If the chosen worker already has the Unsealed sector file, we will NOT Unseal the sealed sector file again. +func (m *Manager) SectorsUnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed *cid.Cid) error { ctx, cancel := context.WithCancel(ctx) defer cancel() - if err := m.index.StorageLock(ctx, sector, storiface.FTUnsealed, storiface.FTNone); err != nil { - returnErr = xerrors.Errorf("acquiring read sector lock: %w", err) - return - } - - // passing 0 spt because we only need it when allowFetch is true - best, err := m.index.StorageFindSector(ctx, sector, storiface.FTUnsealed, 0, false) - if err != nil { - returnErr = xerrors.Errorf("read piece: checking for already existing unsealed sector: %w", err) - return - } - - foundUnsealed = len(best) > 0 - if foundUnsealed { // append to existing - // There is unsealed sector, see if we can read from it - - selector = newExistingSelector(m.index, sector, storiface.FTUnsealed, false) - - err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, m.schedFetch(sector, storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove), - m.readPiece(sink, sector, offset, size, &readOk)) - if err != nil { - returnErr = xerrors.Errorf("reading piece from sealed sector: %w", err) - } - } else { - selector = newAllocSelector(m.index, storiface.FTUnsealed, storiface.PathSealing) - } - return -} - -func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) error { - foundUnsealed, readOk, selector, err := m.tryReadUnsealedPiece(ctx, sink, sector, offset, size) - if err != nil { - return err - } - if readOk { - return nil - } - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - if err := m.index.StorageLock(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTUnsealed); err != nil { + log.Debugf("acquire unseal sector lock for sector %d", sector.ID) + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTSealed|storiface.FTCache, storiface.FTUnsealed); err != nil { return xerrors.Errorf("acquiring unseal sector lock: %w", err) } - unsealFetch := func(ctx context.Context, worker Worker) error { + // if the selected worker does NOT have the sealed files for the sector, instruct it to fetch it from a worker that has them and + // put it in the sealing scratch space. + sealFetch := func(ctx context.Context, worker Worker) error { + log.Debugf("copy sealed/cache sector data for sector %d", sector.ID) if _, err := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.PathSealing, storiface.AcquireCopy)); err != nil { return xerrors.Errorf("copy sealed/cache sector data: %w", err) } - if foundUnsealed { - if _, err := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove)); err != nil { - return xerrors.Errorf("copy unsealed sector data: %w", err) - } - } return nil } - if unsealed == cid.Undef { + if unsealed == nil { return xerrors.Errorf("cannot unseal piece (sector: %d, offset: %d size: %d) - unsealed cid is undefined", sector, offset, size) } - err = m.sched.Schedule(ctx, sector, sealtasks.TTUnseal, selector, unsealFetch, func(ctx context.Context, w Worker) error { - // TODO: make restartable - _, err := m.waitSimpleCall(ctx)(w.UnsealPiece(ctx, sector, offset, size, ticket, unsealed)) - return err - }) + + ssize, err := sector.ProofType.SectorSize() if err != nil { - return err + return xerrors.Errorf("getting sector size: %w", err) } - selector = newExistingSelector(m.index, sector, storiface.FTUnsealed, false) + // selector will schedule the Unseal task on a worker that either already has the sealed sector files or has space in + // one of it's sealing scratch spaces to store them after fetching them from another worker. + selector := newExistingSelector(m.index, sector.ID, storiface.FTSealed|storiface.FTCache, true) - err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, m.schedFetch(sector, storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove), - m.readPiece(sink, sector, offset, size, &readOk)) - if err != nil { - return xerrors.Errorf("reading piece from sealed sector: %w", err) - } + log.Debugf("will schedule unseal for sector %d", sector.ID) + err = m.sched.Schedule(ctx, sector, sealtasks.TTUnseal, selector, sealFetch, func(ctx context.Context, w Worker) error { + // TODO: make restartable + + // NOTE: we're unsealing the whole sector here as with SDR we can't really + // unseal the sector partially. Requesting the whole sector here can + // save us some work in case another piece is requested from here + log.Debugf("calling unseal sector on worker, sectoID=%d", sector.ID) - if !readOk { - return xerrors.Errorf("failed to read unsealed piece") + // Note: This unseal piece call will essentially become a no-op if the worker already has an Unsealed sector file for the given sector. + _, err := m.waitSimpleCall(ctx)(w.UnsealPiece(ctx, sector, 0, abi.PaddedPieceSize(ssize).Unpadded(), ticket, *unsealed)) + log.Debugf("completed unseal sector %d", sector.ID) + return err + }) + if err != nil { + return xerrors.Errorf("worker UnsealPiece call: %s", err) } return nil } -func (m *Manager) NewSector(ctx context.Context, sector abi.SectorID) error { +func (m *Manager) NewSector(ctx context.Context, sector storage.SectorRef) error { log.Warnf("stub NewSector") return nil } -func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPieces []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) { +func (m *Manager) AddPiece(ctx context.Context, sector storage.SectorRef, existingPieces []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) { ctx, cancel := context.WithCancel(ctx) defer cancel() - if err := m.index.StorageLock(ctx, sector, storiface.FTNone, storiface.FTUnsealed); err != nil { + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTUnsealed); err != nil { return abi.PieceInfo{}, xerrors.Errorf("acquiring sector lock: %w", err) } @@ -333,7 +293,7 @@ func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPie if len(existingPieces) == 0 { // new selector = newAllocSelector(m.index, storiface.FTUnsealed, storiface.PathSealing) } else { // use existing - selector = newExistingSelector(m.index, sector, storiface.FTUnsealed, false) + selector = newExistingSelector(m.index, sector.ID, storiface.FTUnsealed, false) } var out abi.PieceInfo @@ -342,14 +302,16 @@ func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPie if err != nil { return err } - out = p.(abi.PieceInfo) + if p != nil { + out = p.(abi.PieceInfo) + } return nil }) return out, err } -func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { +func (m *Manager) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -366,7 +328,9 @@ func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke waitErr = werr return } - out = p.(storage.PreCommit1Out) + if p != nil { + out = p.(storage.PreCommit1Out) + } } if wait { // already in progress @@ -374,7 +338,7 @@ func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke return out, waitErr } - if err := m.index.StorageLock(ctx, sector, storiface.FTUnsealed, storiface.FTSealed|storiface.FTCache); err != nil { + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTUnsealed, storiface.FTSealed|storiface.FTCache); err != nil { return nil, xerrors.Errorf("acquiring sector lock: %w", err) } @@ -383,7 +347,7 @@ func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke selector := newAllocSelector(m.index, storiface.FTCache|storiface.FTSealed, storiface.PathSealing) err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit1, selector, m.schedFetch(sector, storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove), func(ctx context.Context, w Worker) error { - err := m.startWork(ctx, wk)(w.SealPreCommit1(ctx, sector, ticket, pieces)) + err := m.startWork(ctx, w, wk)(w.SealPreCommit1(ctx, sector, ticket, pieces)) if err != nil { return err } @@ -398,7 +362,7 @@ func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticke return out, waitErr } -func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (out storage.SectorCids, err error) { +func (m *Manager) SealPreCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.PreCommit1Out) (out storage.SectorCids, err error) { ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -415,7 +379,9 @@ func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase waitErr = werr return } - out = p.(storage.SectorCids) + if p != nil { + out = p.(storage.SectorCids) + } } if wait { // already in progress @@ -423,14 +389,14 @@ func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase return out, waitErr } - if err := m.index.StorageLock(ctx, sector, storiface.FTSealed, storiface.FTCache); err != nil { + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTSealed, storiface.FTCache); err != nil { return storage.SectorCids{}, xerrors.Errorf("acquiring sector lock: %w", err) } - selector := newExistingSelector(m.index, sector, storiface.FTCache|storiface.FTSealed, true) + selector := newExistingSelector(m.index, sector.ID, storiface.FTCache|storiface.FTSealed, true) err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit2, selector, m.schedFetch(sector, storiface.FTCache|storiface.FTSealed, storiface.PathSealing, storiface.AcquireMove), func(ctx context.Context, w Worker) error { - err := m.startWork(ctx, wk)(w.SealPreCommit2(ctx, sector, phase1Out)) + err := m.startWork(ctx, w, wk)(w.SealPreCommit2(ctx, sector, phase1Out)) if err != nil { return err } @@ -445,7 +411,7 @@ func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase return out, waitErr } -func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (out storage.Commit1Out, err error) { +func (m *Manager) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (out storage.Commit1Out, err error) { ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -462,7 +428,9 @@ func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket a waitErr = werr return } - out = p.(storage.Commit1Out) + if p != nil { + out = p.(storage.Commit1Out) + } } if wait { // already in progress @@ -470,17 +438,17 @@ func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket a return out, waitErr } - if err := m.index.StorageLock(ctx, sector, storiface.FTSealed, storiface.FTCache); err != nil { + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTSealed, storiface.FTCache); err != nil { return storage.Commit1Out{}, xerrors.Errorf("acquiring sector lock: %w", err) } // NOTE: We set allowFetch to false in so that we always execute on a worker // with direct access to the data. We want to do that because this step is // generally very cheap / fast, and transferring data is not worth the effort - selector := newExistingSelector(m.index, sector, storiface.FTCache|storiface.FTSealed, false) + selector := newExistingSelector(m.index, sector.ID, storiface.FTCache|storiface.FTSealed, false) err = m.sched.Schedule(ctx, sector, sealtasks.TTCommit1, selector, m.schedFetch(sector, storiface.FTCache|storiface.FTSealed, storiface.PathSealing, storiface.AcquireMove), func(ctx context.Context, w Worker) error { - err := m.startWork(ctx, wk)(w.SealCommit1(ctx, sector, ticket, seed, pieces, cids)) + err := m.startWork(ctx, w, wk)(w.SealCommit1(ctx, sector, ticket, seed, pieces, cids)) if err != nil { return err } @@ -495,7 +463,7 @@ func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket a return out, waitErr } -func (m *Manager) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.Commit1Out) (out storage.Proof, err error) { +func (m *Manager) SealCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.Commit1Out) (out storage.Proof, err error) { wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTCommit2, sector, phase1Out) if err != nil { return storage.Proof{}, xerrors.Errorf("getWork: %w", err) @@ -509,7 +477,9 @@ func (m *Manager) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Ou waitErr = werr return } - out = p.(storage.Proof) + if p != nil { + out = p.(storage.Proof) + } } if wait { // already in progress @@ -520,7 +490,7 @@ func (m *Manager) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Ou selector := newTaskSelector() err = m.sched.Schedule(ctx, sector, sealtasks.TTCommit2, selector, schedNop, func(ctx context.Context, w Worker) error { - err := m.startWork(ctx, wk)(w.SealCommit2(ctx, sector, phase1Out)) + err := m.startWork(ctx, w, wk)(w.SealCommit2(ctx, sector, phase1Out)) if err != nil { return err } @@ -536,17 +506,17 @@ func (m *Manager) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Ou return out, waitErr } -func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error { +func (m *Manager) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) error { ctx, cancel := context.WithCancel(ctx) defer cancel() - if err := m.index.StorageLock(ctx, sector, storiface.FTNone, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache); err != nil { + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache); err != nil { return xerrors.Errorf("acquiring sector lock: %w", err) } unsealed := storiface.FTUnsealed { - unsealedStores, err := m.index.StorageFindSector(ctx, sector, storiface.FTUnsealed, 0, false) + unsealedStores, err := m.index.StorageFindSector(ctx, sector.ID, storiface.FTUnsealed, 0, false) if err != nil { return xerrors.Errorf("finding unsealed sector: %w", err) } @@ -556,10 +526,25 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU } } - selector := newExistingSelector(m.index, sector, storiface.FTCache|storiface.FTSealed, false) + pathType := storiface.PathStorage + { + sealedStores, err := m.index.StorageFindSector(ctx, sector.ID, storiface.FTSealed, 0, false) + if err != nil { + return xerrors.Errorf("finding sealed sector: %w", err) + } + + for _, store := range sealedStores { + if store.CanSeal { + pathType = storiface.PathSealing + break + } + } + } + + selector := newExistingSelector(m.index, sector.ID, storiface.FTCache|storiface.FTSealed, false) err := m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector, - m.schedFetch(sector, storiface.FTCache|storiface.FTSealed|unsealed, storiface.PathSealing, storiface.AcquireMove), + m.schedFetch(sector, storiface.FTCache|storiface.FTSealed|unsealed, pathType, storiface.AcquireMove), func(ctx context.Context, w Worker) error { _, err := m.waitSimpleCall(ctx)(w.FinalizeSector(ctx, sector, keepUnsealed)) return err @@ -589,76 +574,76 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU return nil } -func (m *Manager) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error { +func (m *Manager) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) error { log.Warnw("ReleaseUnsealed todo") return nil } -func (m *Manager) Remove(ctx context.Context, sector abi.SectorID) error { +func (m *Manager) Remove(ctx context.Context, sector storage.SectorRef) error { ctx, cancel := context.WithCancel(ctx) defer cancel() - if err := m.index.StorageLock(ctx, sector, storiface.FTNone, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache); err != nil { + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache); err != nil { return xerrors.Errorf("acquiring sector lock: %w", err) } var err error - if rerr := m.storage.Remove(ctx, sector, storiface.FTSealed, true); rerr != nil { + if rerr := m.storage.Remove(ctx, sector.ID, storiface.FTSealed, true); rerr != nil { err = multierror.Append(err, xerrors.Errorf("removing sector (sealed): %w", rerr)) } - if rerr := m.storage.Remove(ctx, sector, storiface.FTCache, true); rerr != nil { + if rerr := m.storage.Remove(ctx, sector.ID, storiface.FTCache, true); rerr != nil { err = multierror.Append(err, xerrors.Errorf("removing sector (cache): %w", rerr)) } - if rerr := m.storage.Remove(ctx, sector, storiface.FTUnsealed, true); rerr != nil { + if rerr := m.storage.Remove(ctx, sector.ID, storiface.FTUnsealed, true); rerr != nil { err = multierror.Append(err, xerrors.Errorf("removing sector (unsealed): %w", rerr)) } return err } -func (m *Manager) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err string) error { - return m.returnResult(callID, pi, err) +func (m *Manager) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error { + return m.returnResult(ctx, callID, pi, err) } -func (m *Manager) ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err string) error { - return m.returnResult(callID, p1o, err) +func (m *Manager) ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err *storiface.CallError) error { + return m.returnResult(ctx, callID, p1o, err) } -func (m *Manager) ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err string) error { - return m.returnResult(callID, sealed, err) +func (m *Manager) ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err *storiface.CallError) error { + return m.returnResult(ctx, callID, sealed, err) } -func (m *Manager) ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err string) error { - return m.returnResult(callID, out, err) +func (m *Manager) ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err *storiface.CallError) error { + return m.returnResult(ctx, callID, out, err) } -func (m *Manager) ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err string) error { - return m.returnResult(callID, proof, err) +func (m *Manager) ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err *storiface.CallError) error { + return m.returnResult(ctx, callID, proof, err) } -func (m *Manager) ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err string) error { - return m.returnResult(callID, nil, err) +func (m *Manager) ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { + return m.returnResult(ctx, callID, nil, err) } -func (m *Manager) ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err string) error { - return m.returnResult(callID, nil, err) +func (m *Manager) ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { + return m.returnResult(ctx, callID, nil, err) } -func (m *Manager) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err string) error { - return m.returnResult(callID, nil, err) +func (m *Manager) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { + return m.returnResult(ctx, callID, nil, err) } -func (m *Manager) ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err string) error { - return m.returnResult(callID, nil, err) +func (m *Manager) ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { + return m.returnResult(ctx, callID, nil, err) } -func (m *Manager) ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err string) error { - return m.returnResult(callID, ok, err) +func (m *Manager) ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err *storiface.CallError) error { + return m.returnResult(ctx, callID, ok, err) } -func (m *Manager) ReturnFetch(ctx context.Context, callID storiface.CallID, err string) error { - return m.returnResult(callID, nil, err) +func (m *Manager) ReturnFetch(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { + return m.returnResult(ctx, callID, nil, err) } func (m *Manager) StorageLocal(ctx context.Context) (map[stores.ID]string, error) { @@ -679,12 +664,62 @@ func (m *Manager) FsStat(ctx context.Context, id stores.ID) (fsutil.FsStat, erro return m.storage.FsStat(ctx, id) } -func (m *Manager) SchedDiag(ctx context.Context) (interface{}, error) { - return m.sched.Info(ctx) +func (m *Manager) SchedDiag(ctx context.Context, doSched bool) (interface{}, error) { + if doSched { + select { + case m.sched.workerChange <- struct{}{}: + case <-ctx.Done(): + return nil, ctx.Err() + } + } + + si, err := m.sched.Info(ctx) + if err != nil { + return nil, err + } + + type SchedInfo interface{} + i := struct { + SchedInfo + + ReturnedWork []string + Waiting []string + + CallToWork map[string]string + + EarlyRet []string + }{ + SchedInfo: si, + + CallToWork: map[string]string{}, + } + + m.workLk.Lock() + + for w := range m.results { + i.ReturnedWork = append(i.ReturnedWork, w.String()) + } + + for id := range m.callRes { + i.EarlyRet = append(i.EarlyRet, id.String()) + } + + for w := range m.waitRes { + i.Waiting = append(i.Waiting, w.String()) + } + + for c, w := range m.callToWork { + i.CallToWork[c.String()] = w.String() + } + + m.workLk.Unlock() + + return i, nil } func (m *Manager) Close(ctx context.Context) error { return m.sched.Close(ctx) } +var _ Unsealer = &Manager{} var _ SectorManager = &Manager{} diff --git a/extern/sector-storage/manager_calltracker.go b/extern/sector-storage/manager_calltracker.go index f0aa0445eff..332a08817bb 100644 --- a/extern/sector-storage/manager_calltracker.go +++ b/extern/sector-storage/manager_calltracker.go @@ -5,9 +5,9 @@ import ( "crypto/sha256" "encoding/hex" "encoding/json" - "errors" "fmt" "os" + "time" "golang.org/x/xerrors" @@ -41,6 +41,9 @@ type WorkState struct { WorkerCall storiface.CallID // Set when entering wsRunning WorkError string // Status = wsDone, set when failed to start work + + WorkerHostname string // hostname of last worker handling this job + StartTime int64 // unix seconds } func newWorkID(method sealtasks.TaskType, params ...interface{}) (WorkID, error) { @@ -85,8 +88,7 @@ func (m *Manager) setupWorkTracker() { log.Errorf("cleannig up work state for %s", wid) } case wsDone: - // realistically this shouldn't ever happen as we return results - // immediately after getting them + // can happen after restart, abandoning work, and another restart log.Warnf("dropping done work, no result, wid %s", wid) if err := m.work.Get(wid).End(); err != nil { @@ -144,7 +146,7 @@ func (m *Manager) getWork(ctx context.Context, method sealtasks.TaskType, params switch ws.Status { case wsStarted: - log.Warn("canceling started (not running) work %s", wid) + log.Warnf("canceling started (not running) work %s", wid) if err := m.work.Get(wid).End(); err != nil { log.Errorf("cancel: failed to cancel started work %s: %+v", wid, err) @@ -152,9 +154,9 @@ func (m *Manager) getWork(ctx context.Context, method sealtasks.TaskType, params } case wsDone: // TODO: still remove? - log.Warn("cancel called on work %s in 'done' state", wid) + log.Warnf("cancel called on work %s in 'done' state", wid) case wsRunning: - log.Warn("cancel called on work %s in 'running' state (manager shutting down?)", wid) + log.Warnf("cancel called on work %s in 'running' state (manager shutting down?)", wid) } }, nil @@ -167,8 +169,16 @@ func (m *Manager) getWork(ctx context.Context, method sealtasks.TaskType, params }, nil } -func (m *Manager) startWork(ctx context.Context, wk WorkID) func(callID storiface.CallID, err error) error { +func (m *Manager) startWork(ctx context.Context, w Worker, wk WorkID) func(callID storiface.CallID, err error) error { return func(callID storiface.CallID, err error) error { + var hostname string + info, ierr := w.Info(ctx) + if ierr != nil { + hostname = "[err]" + } else { + hostname = info.Hostname + } + m.workLk.Lock() defer m.workLk.Unlock() @@ -194,6 +204,8 @@ func (m *Manager) startWork(ctx context.Context, wk WorkID) func(callID storifac ws.Status = wsRunning } ws.WorkerCall = callID + ws.WorkerHostname = hostname + ws.StartTime = time.Now().Unix() return nil }) if err != nil { @@ -251,11 +263,36 @@ func (m *Manager) waitWork(ctx context.Context, wid WorkID) (interface{}, error) return nil, xerrors.Errorf("something else in waiting on callRes") } + done := func() { + delete(m.results, wid) + + _, ok := m.callToWork[ws.WorkerCall] + if ok { + delete(m.callToWork, ws.WorkerCall) + } + + err := m.work.Get(wk).End() + if err != nil { + // Not great, but not worth discarding potentially multi-hour computation over this + log.Errorf("marking work as done: %+v", err) + } + } + + // the result can already be there if the work was running, manager restarted, + // and the worker has delivered the result before we entered waitWork + res, ok := m.results[wid] + if ok { + done() + m.workLk.Unlock() + return res.r, res.err + } + ch, ok := m.waitRes[wid] if !ok { ch = make(chan struct{}) m.waitRes[wid] = ch } + m.workLk.Unlock() select { @@ -264,18 +301,7 @@ func (m *Manager) waitWork(ctx context.Context, wid WorkID) (interface{}, error) defer m.workLk.Unlock() res := m.results[wid] - delete(m.results, wid) - - _, ok := m.callToWork[ws.WorkerCall] - if ok { - delete(m.callToWork, ws.WorkerCall) - } - - err := m.work.Get(wk).End() - if err != nil { - // Not great, but not worth discarding potentially multi-hour computation over this - log.Errorf("marking work as done: %+v", err) - } + done() return res.r, res.err case <-ctx.Done(): @@ -323,18 +349,15 @@ func (m *Manager) waitCall(ctx context.Context, callID storiface.CallID) (interf } } -func (m *Manager) returnResult(callID storiface.CallID, r interface{}, serr string) error { - var err error - if serr != "" { - err = errors.New(serr) - } - +func (m *Manager) returnResult(ctx context.Context, callID storiface.CallID, r interface{}, cerr *storiface.CallError) error { res := result{ - r: r, - err: err, + r: r, + } + if cerr != nil { + res.err = cerr } - m.sched.workTracker.onDone(callID) + m.sched.workTracker.onDone(ctx, callID) m.workLk.Lock() defer m.workLk.Unlock() @@ -365,6 +388,20 @@ func (m *Manager) returnResult(callID storiface.CallID, r interface{}, serr stri m.results[wid] = res + err := m.work.Get(wid).Mutate(func(ws *WorkState) error { + ws.Status = wsDone + return nil + }) + if err != nil { + // in the unlikely case: + // * manager has restarted, and we're still tracking this work, and + // * the work is abandoned (storage-fsm doesn't do a matching call on the sector), and + // * the call is returned from the worker, and + // * this errors + // the user will get jobs stuck in ret-wait state + log.Errorf("marking work as done: %+v", err) + } + _, found := m.waitRes[wid] if found { close(m.waitRes[wid]) @@ -373,3 +410,8 @@ func (m *Manager) returnResult(callID storiface.CallID, r interface{}, serr stri return nil } + +func (m *Manager) Abort(ctx context.Context, call storiface.CallID) error { + // TODO: Allow temp error + return m.returnResult(ctx, call, nil, storiface.Err(storiface.ErrUnknown, xerrors.New("task aborted"))) +} diff --git a/extern/sector-storage/manager_test.go b/extern/sector-storage/manager_test.go index 93863296761..d4044bbaebb 100644 --- a/extern/sector-storage/manager_test.go +++ b/extern/sector-storage/manager_test.go @@ -10,16 +10,18 @@ import ( "path/filepath" "strings" "sync" + "sync/atomic" "testing" "time" "github.com/google/uuid" "github.com/ipfs/go-datastore" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" "github.com/stretchr/testify/require" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-statestore" + "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" @@ -89,28 +91,23 @@ func newTestMgr(ctx context.Context, t *testing.T, ds datastore.Datastore) (*Man st := newTestStorage(t) si := stores.NewIndex() - cfg := &ffiwrapper.Config{ - SealProofType: abi.RegisteredSealProof_StackedDrg2KiBV1, - } lstor, err := stores.NewLocal(ctx, st, si, nil) require.NoError(t, err) - prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor, spt: cfg.SealProofType}, cfg) + prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor, index: si}) require.NoError(t, err) - stor := stores.NewRemote(lstor, si, nil, 6000) + stor := stores.NewRemote(lstor, si, nil, 6000, &stores.DefaultPartialFileHandler{}) m := &Manager{ - scfg: cfg, - ls: st, storage: stor, localStore: lstor, remoteHnd: &stores.FetchHandler{Local: lstor}, index: si, - sched: newScheduler(cfg.SealProofType), + sched: newScheduler(), Prover: prover, @@ -140,12 +137,14 @@ func TestSimple(t *testing.T) { } err := m.AddWorker(ctx, newTestWorker(WorkerConfig{ - SealProof: abi.RegisteredSealProof_StackedDrg2KiBV1, TaskTypes: localTasks, }, lstor, m)) require.NoError(t, err) - sid := abi.SectorID{Miner: 1000, Number: 1} + sid := storage.SectorRef{ + ID: abi.SectorID{Miner: 1000, Number: 1}, + ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1, + } pi, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127))) require.NoError(t, err) @@ -175,14 +174,16 @@ func TestRedoPC1(t *testing.T) { } tw := newTestWorker(WorkerConfig{ - SealProof: abi.RegisteredSealProof_StackedDrg2KiBV1, TaskTypes: localTasks, }, lstor, m) err := m.AddWorker(ctx, tw) require.NoError(t, err) - sid := abi.SectorID{Miner: 1000, Number: 1} + sid := storage.SectorRef{ + ID: abi.SectorID{Miner: 1000, Number: 1}, + ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1, + } pi, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127))) require.NoError(t, err) @@ -210,76 +211,102 @@ func TestRedoPC1(t *testing.T) { // Manager restarts in the middle of a task, restarts it, it completes func TestRestartManager(t *testing.T) { - logging.SetAllLoggers(logging.LevelDebug) + test := func(returnBeforeCall bool) func(*testing.T) { + return func(t *testing.T) { + logging.SetAllLoggers(logging.LevelDebug) - ctx, done := context.WithCancel(context.Background()) - defer done() + ctx, done := context.WithCancel(context.Background()) + defer done() - ds := datastore.NewMapDatastore() + ds := datastore.NewMapDatastore() - m, lstor, _, _, cleanup := newTestMgr(ctx, t, ds) - defer cleanup() + m, lstor, _, _, cleanup := newTestMgr(ctx, t, ds) + defer cleanup() - localTasks := []sealtasks.TaskType{ - sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch, - } + localTasks := []sealtasks.TaskType{ + sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch, + } - tw := newTestWorker(WorkerConfig{ - SealProof: abi.RegisteredSealProof_StackedDrg2KiBV1, - TaskTypes: localTasks, - }, lstor, m) + tw := newTestWorker(WorkerConfig{ + TaskTypes: localTasks, + }, lstor, m) - err := m.AddWorker(ctx, tw) - require.NoError(t, err) + err := m.AddWorker(ctx, tw) + require.NoError(t, err) - sid := abi.SectorID{Miner: 1000, Number: 1} + sid := storage.SectorRef{ + ID: abi.SectorID{Miner: 1000, Number: 1}, + ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1, + } - pi, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127))) - require.NoError(t, err) - require.Equal(t, abi.PaddedPieceSize(1024), pi.Size) + pi, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127))) + require.NoError(t, err) + require.Equal(t, abi.PaddedPieceSize(1024), pi.Size) - piz, err := m.AddPiece(ctx, sid, nil, 1016, bytes.NewReader(make([]byte, 1016)[:])) - require.NoError(t, err) - require.Equal(t, abi.PaddedPieceSize(1024), piz.Size) + piz, err := m.AddPiece(ctx, sid, nil, 1016, bytes.NewReader(make([]byte, 1016)[:])) + require.NoError(t, err) + require.Equal(t, abi.PaddedPieceSize(1024), piz.Size) - pieces := []abi.PieceInfo{pi, piz} + pieces := []abi.PieceInfo{pi, piz} - ticket := abi.SealRandomness{0, 9, 9, 9, 9, 9, 9, 9} + ticket := abi.SealRandomness{0, 9, 9, 9, 9, 9, 9, 9} - tw.pc1lk.Lock() - tw.pc1wait = &sync.WaitGroup{} - tw.pc1wait.Add(1) + tw.pc1lk.Lock() + tw.pc1wait = &sync.WaitGroup{} + tw.pc1wait.Add(1) - var cwg sync.WaitGroup - cwg.Add(1) + var cwg sync.WaitGroup + cwg.Add(1) - var perr error - go func() { - defer cwg.Done() - _, perr = m.SealPreCommit1(ctx, sid, ticket, pieces) - }() + var perr error + go func() { + defer cwg.Done() + _, perr = m.SealPreCommit1(ctx, sid, ticket, pieces) + }() - tw.pc1wait.Wait() + tw.pc1wait.Wait() - require.NoError(t, m.Close(ctx)) - tw.ret = nil + require.NoError(t, m.Close(ctx)) + tw.ret = nil - cwg.Wait() - require.Error(t, perr) + cwg.Wait() + require.Error(t, perr) - m, _, _, _, cleanup2 := newTestMgr(ctx, t, ds) - defer cleanup2() + m, _, _, _, cleanup2 := newTestMgr(ctx, t, ds) + defer cleanup2() - tw.ret = m // simulate jsonrpc auto-reconnect - err = m.AddWorker(ctx, tw) - require.NoError(t, err) + tw.ret = m // simulate jsonrpc auto-reconnect + err = m.AddWorker(ctx, tw) + require.NoError(t, err) - tw.pc1lk.Unlock() + if returnBeforeCall { + tw.pc1lk.Unlock() + time.Sleep(100 * time.Millisecond) - _, err = m.SealPreCommit1(ctx, sid, ticket, pieces) - require.NoError(t, err) + _, err = m.SealPreCommit1(ctx, sid, ticket, pieces) + } else { + done := make(chan struct{}) + go func() { + defer close(done) + _, err = m.SealPreCommit1(ctx, sid, ticket, pieces) + }() + + time.Sleep(100 * time.Millisecond) + tw.pc1lk.Unlock() + <-done + } + + require.NoError(t, err) + + require.Equal(t, 1, tw.pc1s) + + ws := m.WorkerJobs() + require.Empty(t, ws) + } + } - require.Equal(t, 1, tw.pc1s) + t.Run("callThenReturn", test(false)) + t.Run("returnThenCall", test(true)) } // Worker restarts in the middle of a task, task fails after restart @@ -304,14 +331,16 @@ func TestRestartWorker(t *testing.T) { w := newLocalWorker(func() (ffiwrapper.Storage, error) { return &testExec{apch: arch}, nil }, WorkerConfig{ - SealProof: 0, TaskTypes: localTasks, }, stor, lstor, idx, m, statestore.New(wds)) err := m.AddWorker(ctx, w) require.NoError(t, err) - sid := abi.SectorID{Miner: 1000, Number: 1} + sid := storage.SectorRef{ + ID: abi.SectorID{Miner: 1000, Number: 1}, + ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1, + } apDone := make(chan struct{}) @@ -338,7 +367,6 @@ func TestRestartWorker(t *testing.T) { w = newLocalWorker(func() (ffiwrapper.Storage, error) { return &testExec{apch: arch}, nil }, WorkerConfig{ - SealProof: 0, TaskTypes: localTasks, }, stor, lstor, idx, m, statestore.New(wds)) @@ -352,3 +380,76 @@ func TestRestartWorker(t *testing.T) { require.NoError(t, err) require.Empty(t, uf) } + +func TestReenableWorker(t *testing.T) { + logging.SetAllLoggers(logging.LevelDebug) + stores.HeartbeatInterval = 5 * time.Millisecond + + ctx, done := context.WithCancel(context.Background()) + defer done() + + ds := datastore.NewMapDatastore() + + m, lstor, stor, idx, cleanup := newTestMgr(ctx, t, ds) + defer cleanup() + + localTasks := []sealtasks.TaskType{ + sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch, + } + + wds := datastore.NewMapDatastore() + + arch := make(chan chan apres) + w := newLocalWorker(func() (ffiwrapper.Storage, error) { + return &testExec{apch: arch}, nil + }, WorkerConfig{ + TaskTypes: localTasks, + }, stor, lstor, idx, m, statestore.New(wds)) + + err := m.AddWorker(ctx, w) + require.NoError(t, err) + + time.Sleep(time.Millisecond * 100) + + i, _ := m.sched.Info(ctx) + require.Len(t, i.(SchedDiagInfo).OpenWindows, 2) + + // disable + atomic.StoreInt64(&w.testDisable, 1) + + for i := 0; i < 100; i++ { + if !m.WorkerStats()[w.session].Enabled { + break + } + + time.Sleep(time.Millisecond * 3) + } + require.False(t, m.WorkerStats()[w.session].Enabled) + + i, _ = m.sched.Info(ctx) + require.Len(t, i.(SchedDiagInfo).OpenWindows, 0) + + // reenable + atomic.StoreInt64(&w.testDisable, 0) + + for i := 0; i < 100; i++ { + if m.WorkerStats()[w.session].Enabled { + break + } + + time.Sleep(time.Millisecond * 3) + } + require.True(t, m.WorkerStats()[w.session].Enabled) + + for i := 0; i < 100; i++ { + info, _ := m.sched.Info(ctx) + if len(info.(SchedDiagInfo).OpenWindows) != 0 { + break + } + + time.Sleep(time.Millisecond * 3) + } + + i, _ = m.sched.Info(ctx) + require.Len(t, i.(SchedDiagInfo).OpenWindows, 2) +} diff --git a/extern/sector-storage/mock/mock.go b/extern/sector-storage/mock/mock.go index b3de99ce51b..273f0928e41 100644 --- a/extern/sector-storage/mock/mock.go +++ b/extern/sector-storage/mock/mock.go @@ -6,16 +6,18 @@ import ( "crypto/sha256" "fmt" "io" + "io/ioutil" "math/rand" "sync" - proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + ffiwrapper2 "github.com/filecoin-project/go-commp-utils/ffiwrapper" commcid "github.com/filecoin-project/go-fil-commcid" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/specs-storage/storage" "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" "golang.org/x/xerrors" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" @@ -26,22 +28,18 @@ var log = logging.Logger("sbmock") type SectorMgr struct { sectors map[abi.SectorID]*sectorState + failPoSt bool pieces map[cid.Cid][]byte - sectorSize abi.SectorSize nextSectorID abi.SectorNumber - proofType abi.RegisteredSealProof lk sync.Mutex } -type mockVerif struct{} - -func NewMockSectorMgr(ssize abi.SectorSize, genesisSectors []abi.SectorID) *SectorMgr { - rt, err := ffiwrapper.SealProofTypeFromSectorSize(ssize) - if err != nil { - panic(err) - } +type mockVerifProver struct { + aggregates map[string]proof5.AggregateSealVerifyProofAndInfos // used for logging bad verifies +} +func NewMockSectorMgr(genesisSectors []abi.SectorID) *SectorMgr { sectors := make(map[abi.SectorID]*sectorState) for _, sid := range genesisSectors { sectors[sid] = §orState{ @@ -53,9 +51,7 @@ func NewMockSectorMgr(ssize abi.SectorSize, genesisSectors []abi.SectorID) *Sect return &SectorMgr{ sectors: sectors, pieces: map[cid.Cid][]byte{}, - sectorSize: ssize, nextSectorID: 5, - proofType: rt, } } @@ -75,17 +71,21 @@ type sectorState struct { lk sync.Mutex } -func (mgr *SectorMgr) NewSector(ctx context.Context, sector abi.SectorID) error { +func (mgr *SectorMgr) NewSector(ctx context.Context, sector storage.SectorRef) error { return nil } -func (mgr *SectorMgr) AddPiece(ctx context.Context, sectorID abi.SectorID, existingPieces []abi.UnpaddedPieceSize, size abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) { - log.Warn("Add piece: ", sectorID, size, mgr.proofType) +func (mgr *SectorMgr) SectorsUnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd *cid.Cid) error { + panic("SectorMgr: unsealing piece: implement me") +} + +func (mgr *SectorMgr) AddPiece(ctx context.Context, sectorID storage.SectorRef, existingPieces []abi.UnpaddedPieceSize, size abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) { + log.Warn("Add piece: ", sectorID, size, sectorID.ProofType) var b bytes.Buffer tr := io.TeeReader(r, &b) - c, err := ffiwrapper.GeneratePieceCIDFromFile(mgr.proofType, tr, size) + c, err := ffiwrapper2.GeneratePieceCIDFromFile(sectorID.ProofType, tr, size) if err != nil { return abi.PieceInfo{}, xerrors.Errorf("failed to generate piece cid: %w", err) } @@ -95,12 +95,12 @@ func (mgr *SectorMgr) AddPiece(ctx context.Context, sectorID abi.SectorID, exist mgr.lk.Lock() mgr.pieces[c] = b.Bytes() - ss, ok := mgr.sectors[sectorID] + ss, ok := mgr.sectors[sectorID.ID] if !ok { ss = §orState{ state: statePacking, } - mgr.sectors[sectorID] = ss + mgr.sectors[sectorID.ID] = ss } mgr.lk.Unlock() @@ -115,10 +115,6 @@ func (mgr *SectorMgr) AddPiece(ctx context.Context, sectorID abi.SectorID, exist }, nil } -func (mgr *SectorMgr) SectorSize() abi.SectorSize { - return mgr.sectorSize -} - func (mgr *SectorMgr) AcquireSectorNumber() (abi.SectorNumber, error) { mgr.lk.Lock() defer mgr.lk.Unlock() @@ -127,9 +123,13 @@ func (mgr *SectorMgr) AcquireSectorNumber() (abi.SectorNumber, error) { return id, nil } -func (mgr *SectorMgr) ForceState(sid abi.SectorID, st int) error { +func (mgr *SectorMgr) IsUnsealed(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { + return false, nil +} + +func (mgr *SectorMgr) ForceState(sid storage.SectorRef, st int) error { mgr.lk.Lock() - ss, ok := mgr.sectors[sid] + ss, ok := mgr.sectors[sid.ID] mgr.lk.Unlock() if !ok { return xerrors.Errorf("no sector with id %d in storage", sid) @@ -140,18 +140,23 @@ func (mgr *SectorMgr) ForceState(sid abi.SectorID, st int) error { return nil } -func (mgr *SectorMgr) SealPreCommit1(ctx context.Context, sid abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { +func (mgr *SectorMgr) SealPreCommit1(ctx context.Context, sid storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) { mgr.lk.Lock() - ss, ok := mgr.sectors[sid] + ss, ok := mgr.sectors[sid.ID] mgr.lk.Unlock() if !ok { return nil, xerrors.Errorf("no sector with id %d in storage", sid) } + ssize, err := sid.ProofType.SectorSize() + if err != nil { + return nil, xerrors.Errorf("failed to get proof sector size: %w", err) + } + ss.lk.Lock() defer ss.lk.Unlock() - ussize := abi.PaddedPieceSize(mgr.sectorSize).Unpadded() + ussize := abi.PaddedPieceSize(ssize).Unpadded() // TODO: verify pieces in sinfo.pieces match passed in pieces @@ -180,7 +185,7 @@ func (mgr *SectorMgr) SealPreCommit1(ctx context.Context, sid abi.SectorID, tick } } - commd, err := MockVerifier.GenerateDataCommitment(mgr.proofType, pis) + commd, err := MockVerifier.GenerateDataCommitment(sid.ProofType, pis) if err != nil { return nil, err } @@ -195,7 +200,7 @@ func (mgr *SectorMgr) SealPreCommit1(ctx context.Context, sid abi.SectorID, tick return cc, nil } -func (mgr *SectorMgr) SealPreCommit2(ctx context.Context, sid abi.SectorID, phase1Out storage.PreCommit1Out) (cids storage.SectorCids, err error) { +func (mgr *SectorMgr) SealPreCommit2(ctx context.Context, sid storage.SectorRef, phase1Out storage.PreCommit1Out) (cids storage.SectorCids, err error) { db := []byte(string(phase1Out)) db[0] ^= 'd' @@ -214,9 +219,9 @@ func (mgr *SectorMgr) SealPreCommit2(ctx context.Context, sid abi.SectorID, phas }, nil } -func (mgr *SectorMgr) SealCommit1(ctx context.Context, sid abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (output storage.Commit1Out, err error) { +func (mgr *SectorMgr) SealCommit1(ctx context.Context, sid storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (output storage.Commit1Out, err error) { mgr.lk.Lock() - ss, ok := mgr.sectors[sid] + ss, ok := mgr.sectors[sid.ID] mgr.lk.Unlock() if !ok { return nil, xerrors.Errorf("no such sector %d", sid) @@ -236,16 +241,21 @@ func (mgr *SectorMgr) SealCommit1(ctx context.Context, sid abi.SectorID, ticket var out [32]byte for i := range out { - out[i] = cids.Unsealed.Bytes()[i] + cids.Sealed.Bytes()[31-i] - ticket[i]*seed[i] ^ byte(sid.Number&0xff) + out[i] = cids.Unsealed.Bytes()[i] + cids.Sealed.Bytes()[31-i] - ticket[i]*seed[i] ^ byte(sid.ID.Number&0xff) } return out[:], nil } -func (mgr *SectorMgr) SealCommit2(ctx context.Context, sid abi.SectorID, phase1Out storage.Commit1Out) (proof storage.Proof, err error) { - var out [1920]byte +func (mgr *SectorMgr) SealCommit2(ctx context.Context, sid storage.SectorRef, phase1Out storage.Commit1Out) (proof storage.Proof, err error) { + plen, err := sid.ProofType.ProofSize() + if err != nil { + return nil, err + } + + out := make([]byte, plen) for i := range out[:len(phase1Out)] { - out[i] = phase1Out[i] ^ byte(sid.Number&0xff) + out[i] = phase1Out[i] ^ byte(sid.ID.Number&0xff) } return out[:], nil @@ -253,10 +263,10 @@ func (mgr *SectorMgr) SealCommit2(ctx context.Context, sid abi.SectorID, phase1O // Test Instrumentation Methods -func (mgr *SectorMgr) MarkFailed(sid abi.SectorID, failed bool) error { +func (mgr *SectorMgr) MarkFailed(sid storage.SectorRef, failed bool) error { mgr.lk.Lock() defer mgr.lk.Unlock() - ss, ok := mgr.sectors[sid] + ss, ok := mgr.sectors[sid.ID] if !ok { return fmt.Errorf("no such sector in storage") } @@ -265,10 +275,18 @@ func (mgr *SectorMgr) MarkFailed(sid abi.SectorID, failed bool) error { return nil } -func (mgr *SectorMgr) MarkCorrupted(sid abi.SectorID, corrupted bool) error { +func (mgr *SectorMgr) Fail() { + mgr.lk.Lock() + defer mgr.lk.Unlock() + mgr.failPoSt = true + + return +} + +func (mgr *SectorMgr) MarkCorrupted(sid storage.SectorRef, corrupted bool) error { mgr.lk.Lock() defer mgr.lk.Unlock() - ss, ok := mgr.sectors[sid] + ss, ok := mgr.sectors[sid.ID] if !ok { return fmt.Errorf("no such sector in storage") } @@ -293,12 +311,23 @@ func AddOpFinish(ctx context.Context) (context.Context, func()) { } } -func (mgr *SectorMgr) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof2.SectorInfo, randomness abi.PoStRandomness) ([]proof2.PoStProof, error) { +func (mgr *SectorMgr) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, error) { + mgr.lk.Lock() + defer mgr.lk.Unlock() + return generateFakePoSt(sectorInfo, abi.RegisteredSealProof.RegisteredWinningPoStProof, randomness), nil } -func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof2.SectorInfo, randomness abi.PoStRandomness) ([]proof2.PoStProof, []abi.SectorID, error) { - si := make([]proof2.SectorInfo, 0, len(sectorInfo)) +func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, []abi.SectorID, error) { + mgr.lk.Lock() + defer mgr.lk.Unlock() + + if mgr.failPoSt { + return nil, nil, xerrors.Errorf("failed to post (mock)") + } + + si := make([]proof5.SectorInfo, 0, len(sectorInfo)) + var skipped []abi.SectorID var err error @@ -326,7 +355,9 @@ func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorI return generateFakePoSt(si, abi.RegisteredSealProof.RegisteredWindowPoStProof, randomness), skipped, nil } -func generateFakePoStProof(sectorInfo []proof2.SectorInfo, randomness abi.PoStRandomness) []byte { +func generateFakePoStProof(sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) []byte { + randomness[31] &= 0x3f + hasher := sha256.New() _, _ = hasher.Write(randomness) for _, info := range sectorInfo { @@ -339,13 +370,13 @@ func generateFakePoStProof(sectorInfo []proof2.SectorInfo, randomness abi.PoStRa } -func generateFakePoSt(sectorInfo []proof2.SectorInfo, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error), randomness abi.PoStRandomness) []proof2.PoStProof { +func generateFakePoSt(sectorInfo []proof5.SectorInfo, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error), randomness abi.PoStRandomness) []proof5.PoStProof { wp, err := rpt(sectorInfo[0].SealProof) if err != nil { panic(err) } - return []proof2.PoStProof{ + return []proof5.PoStProof{ { PoStProof: wp, ProofBytes: generateFakePoStProof(sectorInfo, randomness), @@ -353,123 +384,135 @@ func generateFakePoSt(sectorInfo []proof2.SectorInfo, rpt func(abi.RegisteredSea } } -func (mgr *SectorMgr) ReadPiece(ctx context.Context, w io.Writer, sectorID abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, c cid.Cid) error { - if len(mgr.sectors[sectorID].pieces) > 1 || offset != 0 { +func (mgr *SectorMgr) ReadPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (io.ReadCloser, bool, error) { + if offset != 0 { panic("implme") } - _, err := io.CopyN(w, bytes.NewReader(mgr.pieces[mgr.sectors[sectorID].pieces[0]]), int64(size)) - return err + return ioutil.NopCloser(bytes.NewReader(mgr.pieces[mgr.sectors[sector.ID].pieces[0]][:size])), false, nil } -func (mgr *SectorMgr) StageFakeData(mid abi.ActorID) (abi.SectorID, []abi.PieceInfo, error) { - usize := abi.PaddedPieceSize(mgr.sectorSize).Unpadded() +func (mgr *SectorMgr) StageFakeData(mid abi.ActorID, spt abi.RegisteredSealProof) (storage.SectorRef, []abi.PieceInfo, error) { + psize, err := spt.SectorSize() + if err != nil { + return storage.SectorRef{}, nil, err + } + usize := abi.PaddedPieceSize(psize).Unpadded() sid, err := mgr.AcquireSectorNumber() if err != nil { - return abi.SectorID{}, nil, err + return storage.SectorRef{}, nil, err } buf := make([]byte, usize) _, _ = rand.Read(buf) // nolint:gosec - id := abi.SectorID{ - Miner: mid, - Number: sid, + id := storage.SectorRef{ + ID: abi.SectorID{ + Miner: mid, + Number: sid, + }, + ProofType: spt, } pi, err := mgr.AddPiece(context.TODO(), id, nil, usize, bytes.NewReader(buf)) if err != nil { - return abi.SectorID{}, nil, err + return storage.SectorRef{}, nil, err } return id, []abi.PieceInfo{pi}, nil } -func (mgr *SectorMgr) FinalizeSector(context.Context, abi.SectorID, []storage.Range) error { +func (mgr *SectorMgr) FinalizeSector(context.Context, storage.SectorRef, []storage.Range) error { return nil } -func (mgr *SectorMgr) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error { +func (mgr *SectorMgr) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) error { return nil } -func (mgr *SectorMgr) Remove(ctx context.Context, sector abi.SectorID) error { +func (mgr *SectorMgr) Remove(ctx context.Context, sector storage.SectorRef) error { mgr.lk.Lock() defer mgr.lk.Unlock() - if _, has := mgr.sectors[sector]; !has { + if _, has := mgr.sectors[sector.ID]; !has { return xerrors.Errorf("sector not found") } - delete(mgr.sectors, sector) + delete(mgr.sectors, sector.ID) return nil } -func (mgr *SectorMgr) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, ids []abi.SectorID) ([]abi.SectorID, error) { - var bad []abi.SectorID +func (mgr *SectorMgr) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, ids []storage.SectorRef, rg storiface.RGetter) (map[abi.SectorID]string, error) { + bad := map[abi.SectorID]string{} for _, sid := range ids { - _, found := mgr.sectors[sid] + _, found := mgr.sectors[sid.ID] - if !found || mgr.sectors[sid].failed { - bad = append(bad, sid) + if !found || mgr.sectors[sid.ID].failed { + bad[sid.ID] = "mock fail" } } return bad, nil } -func (mgr *SectorMgr) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err string) error { +func (mgr *SectorMgr) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error { panic("not supported") } -func (mgr *SectorMgr) ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err string) error { +func (mgr *SectorMgr) ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err *storiface.CallError) error { panic("not supported") } -func (mgr *SectorMgr) ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err string) error { +func (mgr *SectorMgr) ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err *storiface.CallError) error { panic("not supported") } -func (mgr *SectorMgr) ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err string) error { +func (mgr *SectorMgr) ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err *storiface.CallError) error { panic("not supported") } -func (mgr *SectorMgr) ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err string) error { +func (mgr *SectorMgr) ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err *storiface.CallError) error { panic("not supported") } -func (mgr *SectorMgr) ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err string) error { +func (mgr *SectorMgr) ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { panic("not supported") } -func (mgr *SectorMgr) ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err string) error { +func (mgr *SectorMgr) ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { panic("not supported") } -func (mgr *SectorMgr) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err string) error { +func (mgr *SectorMgr) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { panic("not supported") } -func (mgr *SectorMgr) ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err string) error { +func (mgr *SectorMgr) ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { panic("not supported") } -func (mgr *SectorMgr) ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err string) error { +func (mgr *SectorMgr) ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err *storiface.CallError) error { panic("not supported") } -func (mgr *SectorMgr) ReturnFetch(ctx context.Context, callID storiface.CallID, err string) error { +func (mgr *SectorMgr) ReturnFetch(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { panic("not supported") } -func (m mockVerif) VerifySeal(svi proof2.SealVerifyInfo) (bool, error) { - if len(svi.Proof) != 1920 { +func (m mockVerifProver) VerifySeal(svi proof5.SealVerifyInfo) (bool, error) { + plen, err := svi.SealProof.ProofSize() + if err != nil { + return false, err + } + + if len(svi.Proof) != int(plen) { return false, nil } // only the first 32 bytes, the rest are 0. for i, b := range svi.Proof[:32] { + // unsealed+sealed-seed*ticket if b != svi.UnsealedCID.Bytes()[i]+svi.SealedCID.Bytes()[31-i]-svi.InteractiveRandomness[i]*svi.Randomness[i] { return false, nil } @@ -478,11 +521,80 @@ func (m mockVerif) VerifySeal(svi proof2.SealVerifyInfo) (bool, error) { return true, nil } -func (m mockVerif) VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) { +func (m mockVerifProver) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) { + out := make([]byte, m.aggLen(len(aggregate.Infos))) + for pi, svi := range aggregate.Infos { + for i := 0; i < 32; i++ { + b := svi.UnsealedCID.Bytes()[i] + svi.SealedCID.Bytes()[31-i] - svi.InteractiveRandomness[i]*svi.Randomness[i] // raw proof byte + + b *= uint8(pi) // with aggregate index + out[i] += b + } + } + + ok := bytes.Equal(aggregate.Proof, out) + if !ok { + genInfo, found := m.aggregates[string(aggregate.Proof)] + if !found { + log.Errorf("BAD AGGREGATE: saved generate inputs not found; agg.Proof: %x; expected: %x", aggregate.Proof, out) + } else { + log.Errorf("BAD AGGREGATE (1): agg.Proof: %x; expected: %x", aggregate.Proof, out) + log.Errorf("BAD AGGREGATE (2): Verify Infos: %+v", aggregate.Infos) + log.Errorf("BAD AGGREGATE (3): Generate Infos: %+v", genInfo.Infos) + } + } + + return ok, nil +} + +func (m mockVerifProver) AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) { + out := make([]byte, m.aggLen(len(aggregateInfo.Infos))) // todo: figure out more real length + for pi, proof := range proofs { + for i := range proof[:32] { + out[i] += proof[i] * uint8(pi) + } + } + + m.aggregates[string(out)] = aggregateInfo + + return out, nil +} + +func (m mockVerifProver) aggLen(nproofs int) int { + switch { + case nproofs <= 8: + return 11220 + case nproofs <= 16: + return 14196 + case nproofs <= 32: + return 17172 + case nproofs <= 64: + return 20148 + case nproofs <= 128: + return 23124 + case nproofs <= 256: + return 26100 + case nproofs <= 512: + return 29076 + case nproofs <= 1024: + return 32052 + case nproofs <= 2048: + return 35028 + case nproofs <= 4096: + return 38004 + case nproofs <= 8192: + return 40980 + default: + panic("too many proofs") + } +} + +func (m mockVerifProver) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) { + info.Randomness[31] &= 0x3f return true, nil } -func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStVerifyInfo) (bool, error) { +func (m mockVerifProver) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) { if len(info.Proofs) != 1 { return false, xerrors.Errorf("expected 1 proof entry") } @@ -496,15 +608,20 @@ func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStV return true, nil } -func (m mockVerif) GenerateDataCommitment(pt abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) { +func (m mockVerifProver) GenerateDataCommitment(pt abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) { return ffiwrapper.GenerateUnsealedCID(pt, pieces) } -func (m mockVerif) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) { +func (m mockVerifProver) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) { return []uint64{0}, nil } -var MockVerifier = mockVerif{} +var MockVerifier = mockVerifProver{ + aggregates: map[string]proof5.AggregateSealVerifyProofAndInfos{}, +} + +var MockProver = MockVerifier var _ storage.Sealer = &SectorMgr{} var _ ffiwrapper.Verifier = MockVerifier +var _ ffiwrapper.Prover = MockProver diff --git a/extern/sector-storage/mock/mock_test.go b/extern/sector-storage/mock/mock_test.go index 47c060f667d..abc120058d0 100644 --- a/extern/sector-storage/mock/mock_test.go +++ b/extern/sector-storage/mock/mock_test.go @@ -9,9 +9,9 @@ import ( ) func TestOpFinish(t *testing.T) { - sb := NewMockSectorMgr(2048, nil) + sb := NewMockSectorMgr(nil) - sid, pieces, err := sb.StageFakeData(123) + sid, pieces, err := sb.StageFakeData(123, abi.RegisteredSealProof_StackedDrg2KiBV1_1) if err != nil { t.Fatal(err) } diff --git a/extern/sector-storage/ffiwrapper/partialfile.go b/extern/sector-storage/partialfile/partialfile.go similarity index 85% rename from extern/sector-storage/ffiwrapper/partialfile.go rename to extern/sector-storage/partialfile/partialfile.go index e19930ac1ca..529e889eaf2 100644 --- a/extern/sector-storage/ffiwrapper/partialfile.go +++ b/extern/sector-storage/partialfile/partialfile.go @@ -1,4 +1,4 @@ -package ffiwrapper +package partialfile import ( "encoding/binary" @@ -14,8 +14,12 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + + logging "github.com/ipfs/go-log/v2" ) +var log = logging.Logger("partialfile") + const veryLargeRle = 1 << 20 // Sectors can be partially unsealed. We support this by appending a small @@ -25,7 +29,7 @@ const veryLargeRle = 1 << 20 // unsealed sector files internally have this structure // [unpadded (raw) data][rle+][4B LE length fo the rle+ field] -type partialFile struct { +type PartialFile struct { maxPiece abi.PaddedPieceSize path string @@ -57,7 +61,7 @@ func writeTrailer(maxPieceSize int64, w *os.File, r rlepluslazy.RunIterator) err return w.Truncate(maxPieceSize + int64(rb) + 4) } -func createPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialFile, error) { +func CreatePartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*PartialFile, error) { f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644) // nolint if err != nil { return nil, xerrors.Errorf("openning partial file '%s': %w", path, err) @@ -89,10 +93,10 @@ func createPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialF return nil, xerrors.Errorf("close empty partial file: %w", err) } - return openPartialFile(maxPieceSize, path) + return OpenPartialFile(maxPieceSize, path) } -func openPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialFile, error) { +func OpenPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*PartialFile, error) { f, err := os.OpenFile(path, os.O_RDWR, 0644) // nolint if err != nil { return nil, xerrors.Errorf("openning partial file '%s': %w", path, err) @@ -165,7 +169,7 @@ func openPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialFil return nil, err } - return &partialFile{ + return &PartialFile{ maxPiece: maxPieceSize, path: path, allocated: rle, @@ -173,11 +177,11 @@ func openPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialFil }, nil } -func (pf *partialFile) Close() error { +func (pf *PartialFile) Close() error { return pf.file.Close() } -func (pf *partialFile) Writer(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (io.Writer, error) { +func (pf *PartialFile) Writer(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (io.Writer, error) { if _, err := pf.file.Seek(int64(offset), io.SeekStart); err != nil { return nil, xerrors.Errorf("seek piece start: %w", err) } @@ -188,7 +192,7 @@ func (pf *partialFile) Writer(offset storiface.PaddedByteIndex, size abi.PaddedP return nil, err } - and, err := rlepluslazy.And(have, pieceRun(offset, size)) + and, err := rlepluslazy.And(have, PieceRun(offset, size)) if err != nil { return nil, err } @@ -206,13 +210,13 @@ func (pf *partialFile) Writer(offset storiface.PaddedByteIndex, size abi.PaddedP return pf.file, nil } -func (pf *partialFile) MarkAllocated(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) error { +func (pf *PartialFile) MarkAllocated(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) error { have, err := pf.allocated.RunIterator() if err != nil { return err } - ored, err := rlepluslazy.Or(have, pieceRun(offset, size)) + ored, err := rlepluslazy.Or(have, PieceRun(offset, size)) if err != nil { return err } @@ -224,7 +228,7 @@ func (pf *partialFile) MarkAllocated(offset storiface.PaddedByteIndex, size abi. return nil } -func (pf *partialFile) Free(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) error { +func (pf *PartialFile) Free(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) error { have, err := pf.allocated.RunIterator() if err != nil { return err @@ -234,7 +238,7 @@ func (pf *partialFile) Free(offset storiface.PaddedByteIndex, size abi.PaddedPie return xerrors.Errorf("deallocating: %w", err) } - s, err := rlepluslazy.Subtract(have, pieceRun(offset, size)) + s, err := rlepluslazy.Subtract(have, PieceRun(offset, size)) if err != nil { return err } @@ -246,7 +250,7 @@ func (pf *partialFile) Free(offset storiface.PaddedByteIndex, size abi.PaddedPie return nil } -func (pf *partialFile) Reader(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (*os.File, error) { +func (pf *PartialFile) Reader(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (*os.File, error) { if _, err := pf.file.Seek(int64(offset), io.SeekStart); err != nil { return nil, xerrors.Errorf("seek piece start: %w", err) } @@ -257,7 +261,7 @@ func (pf *partialFile) Reader(offset storiface.PaddedByteIndex, size abi.PaddedP return nil, err } - and, err := rlepluslazy.And(have, pieceRun(offset, size)) + and, err := rlepluslazy.And(have, PieceRun(offset, size)) if err != nil { return nil, err } @@ -275,17 +279,17 @@ func (pf *partialFile) Reader(offset storiface.PaddedByteIndex, size abi.PaddedP return pf.file, nil } -func (pf *partialFile) Allocated() (rlepluslazy.RunIterator, error) { +func (pf *PartialFile) Allocated() (rlepluslazy.RunIterator, error) { return pf.allocated.RunIterator() } -func (pf *partialFile) HasAllocated(offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { +func (pf *PartialFile) HasAllocated(offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { have, err := pf.Allocated() if err != nil { return false, err } - u, err := rlepluslazy.And(have, pieceRun(offset.Padded(), size.Padded())) + u, err := rlepluslazy.And(have, PieceRun(offset.Padded(), size.Padded())) if err != nil { return false, err } @@ -298,7 +302,7 @@ func (pf *partialFile) HasAllocated(offset storiface.UnpaddedByteIndex, size abi return abi.PaddedPieceSize(uc) == size.Padded(), nil } -func pieceRun(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) rlepluslazy.RunIterator { +func PieceRun(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) rlepluslazy.RunIterator { var runs []rlepluslazy.Run if offset > 0 { runs = append(runs, rlepluslazy.Run{ diff --git a/extern/sector-storage/piece_provider.go b/extern/sector-storage/piece_provider.go new file mode 100644 index 00000000000..ad3a2543ee3 --- /dev/null +++ b/extern/sector-storage/piece_provider.go @@ -0,0 +1,176 @@ +package sectorstorage + +import ( + "bufio" + "context" + "io" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-storage/storage" + + "github.com/filecoin-project/lotus/extern/sector-storage/fr32" + "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" +) + +type Unsealer interface { + // SectorsUnsealPiece will Unseal a Sealed sector file for the given sector. + SectorsUnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd *cid.Cid) error +} + +type PieceProvider interface { + // ReadPiece is used to read an Unsealed piece at the given offset and of the given size from a Sector + ReadPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (io.ReadCloser, bool, error) + IsUnsealed(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) +} + +var _ PieceProvider = &pieceProvider{} + +type pieceProvider struct { + storage *stores.Remote + index stores.SectorIndex + uns Unsealer +} + +func NewPieceProvider(storage *stores.Remote, index stores.SectorIndex, uns Unsealer) PieceProvider { + return &pieceProvider{ + storage: storage, + index: index, + uns: uns, + } +} + +// IsUnsealed checks if we have the unsealed piece at the given offset in an already +// existing unsealed file either locally or on any of the workers. +func (p *pieceProvider) IsUnsealed(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { + if err := offset.Valid(); err != nil { + return false, xerrors.Errorf("offset is not valid: %w", err) + } + if err := size.Validate(); err != nil { + return false, xerrors.Errorf("size is not a valid piece size: %w", err) + } + + ctxLock, cancel := context.WithCancel(ctx) + defer cancel() + + if err := p.index.StorageLock(ctxLock, sector.ID, storiface.FTUnsealed, storiface.FTNone); err != nil { + return false, xerrors.Errorf("acquiring read sector lock: %w", err) + } + + return p.storage.CheckIsUnsealed(ctxLock, sector, abi.PaddedPieceSize(offset.Padded()), size.Padded()) +} + +// tryReadUnsealedPiece will try to read the unsealed piece from an existing unsealed sector file for the given sector from any worker that has it. +// It will NOT try to schedule an Unseal of a sealed sector file for the read. +// +// Returns a nil reader if the piece does NOT exist in any unsealed file or there is no unsealed file for the given sector on any of the workers. +func (p *pieceProvider) tryReadUnsealedPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (io.ReadCloser, context.CancelFunc, error) { + // acquire a lock purely for reading unsealed sectors + ctx, cancel := context.WithCancel(ctx) + if err := p.index.StorageLock(ctx, sector.ID, storiface.FTUnsealed, storiface.FTNone); err != nil { + cancel() + return nil, nil, xerrors.Errorf("acquiring read sector lock: %w", err) + } + + // Reader returns a reader for an unsealed piece at the given offset in the given sector. + // The returned reader will be nil if none of the workers has an unsealed sector file containing + // the unsealed piece. + r, err := p.storage.Reader(ctx, sector, abi.PaddedPieceSize(offset.Padded()), size.Padded()) + if err != nil { + log.Debugf("did not get storage reader;sector=%+v, err:%s", sector.ID, err) + cancel() + return nil, nil, err + } + if r == nil { + cancel() + } + + return r, cancel, nil +} + +// ReadPiece is used to read an Unsealed piece at the given offset and of the given size from a Sector +// If an Unsealed sector file exists with the Piece Unsealed in it, we'll use that for the read. +// Otherwise, we will Unseal a Sealed sector file for the given sector and read the Unsealed piece from it. +// If we do NOT have an existing unsealed file containing the given piece thus causing us to schedule an Unseal, +// the returned boolean parameter will be set to true. +// If we have an existing unsealed file containing the given piece, the returned boolean will be set to false. +func (p *pieceProvider) ReadPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (io.ReadCloser, bool, error) { + if err := offset.Valid(); err != nil { + return nil, false, xerrors.Errorf("offset is not valid: %w", err) + } + if err := size.Validate(); err != nil { + return nil, false, xerrors.Errorf("size is not a valid piece size: %w", err) + } + + r, unlock, err := p.tryReadUnsealedPiece(ctx, sector, offset, size) + + log.Debugf("result of first tryReadUnsealedPiece: r=%+v, err=%s", r, err) + + if xerrors.Is(err, storiface.ErrSectorNotFound) { + log.Debugf("no unsealed sector file with unsealed piece, sector=%+v, offset=%d, size=%d", sector, offset, size) + err = nil + } + if err != nil { + log.Errorf("returning error from ReadPiece:%s", err) + return nil, false, err + } + + var uns bool + + if r == nil { + // a nil reader means that none of the workers has an unsealed sector file + // containing the unsealed piece. + // we now need to unseal a sealed sector file for the given sector to read the unsealed piece from it. + uns = true + commd := &unsealed + if unsealed == cid.Undef { + commd = nil + } + if err := p.uns.SectorsUnsealPiece(ctx, sector, offset, size, ticket, commd); err != nil { + log.Errorf("failed to SectorsUnsealPiece: %s", err) + return nil, false, xerrors.Errorf("unsealing piece: %w", err) + } + + log.Debugf("unsealed a sector file to read the piece, sector=%+v, offset=%d, size=%d", sector, offset, size) + + r, unlock, err = p.tryReadUnsealedPiece(ctx, sector, offset, size) + if err != nil { + log.Errorf("failed to tryReadUnsealedPiece after SectorsUnsealPiece: %s", err) + return nil, true, xerrors.Errorf("read after unsealing: %w", err) + } + if r == nil { + log.Errorf("got no reader after unsealing piece") + return nil, true, xerrors.Errorf("got no reader after unsealing piece") + } + log.Debugf("got a reader to read unsealed piece, sector=%+v, offset=%d, size=%d", sector, offset, size) + } else { + log.Debugf("unsealed piece already exists, no need to unseal, sector=%+v, offset=%d, size=%d", sector, offset, size) + } + + upr, err := fr32.NewUnpadReader(r, size.Padded()) + if err != nil { + unlock() + return nil, uns, xerrors.Errorf("creating unpadded reader: %w", err) + } + + log.Debugf("returning reader to read unsealed piece, sector=%+v, offset=%d, size=%d", sector, offset, size) + + return &funcCloser{ + Reader: bufio.NewReaderSize(upr, 127), + close: func() error { + err = r.Close() + unlock() + return err + }, + }, uns, nil +} + +type funcCloser struct { + io.Reader + close func() error +} + +func (fc *funcCloser) Close() error { return fc.close() } diff --git a/extern/sector-storage/piece_provider_test.go b/extern/sector-storage/piece_provider_test.go new file mode 100644 index 00000000000..d6fa14574f7 --- /dev/null +++ b/extern/sector-storage/piece_provider_test.go @@ -0,0 +1,361 @@ +package sectorstorage + +import ( + "bytes" + "context" + "io/ioutil" + "math/rand" + "net" + "net/http" + "testing" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-statestore" + "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" + specstorage "github.com/filecoin-project/specs-storage/storage" + "github.com/gorilla/mux" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + ds_sync "github.com/ipfs/go-datastore/sync" + logging "github.com/ipfs/go-log/v2" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" +) + +// TestPieceProviderReadPiece verifies that the ReadPiece method works correctly +// only uses miner and does NOT use any remote worker. +func TestPieceProviderSimpleNoRemoteWorker(t *testing.T) { + // Set up sector storage manager + sealerCfg := SealerConfig{ + ParallelFetchLimit: 10, + AllowAddPiece: true, + AllowPreCommit1: true, + AllowPreCommit2: true, + AllowCommit: true, + AllowUnseal: true, + } + + ppt := newPieceProviderTestHarness(t, sealerCfg, abi.RegisteredSealProof_StackedDrg8MiBV1) + defer ppt.shutdown(t) + + // Create some padded data that aligns with the piece boundaries. + pieceData := generatePieceData(8 * 127 * 1024 * 8) + size := abi.UnpaddedPieceSize(len(pieceData)) + ppt.addPiece(t, pieceData) + + // read piece + ppt.readPiece(t, storiface.UnpaddedByteIndex(0), size, + false, pieceData) + + // pre-commit 1 + preCommit1 := ppt.preCommit1(t) + + // check if IsUnsealed -> true + require.True(t, ppt.isUnsealed(t, storiface.UnpaddedByteIndex(0), size)) + // read piece + ppt.readPiece(t, storiface.UnpaddedByteIndex(0), size, + false, pieceData) + + // pre-commit 2 + ppt.preCommit2(t, preCommit1) + + // check if IsUnsealed -> true + require.True(t, ppt.isUnsealed(t, storiface.UnpaddedByteIndex(0), size)) + // read piece + ppt.readPiece(t, storiface.UnpaddedByteIndex(0), size, + false, pieceData) + + // finalize -> nil here will remove unsealed file + ppt.finalizeSector(t, nil) + + // check if IsUnsealed -> false + require.False(t, ppt.isUnsealed(t, storiface.UnpaddedByteIndex(0), size)) + // Read the piece -> will have to unseal + ppt.readPiece(t, storiface.UnpaddedByteIndex(0), size, + true, pieceData) + + // check if IsUnsealed -> true + require.True(t, ppt.isUnsealed(t, storiface.UnpaddedByteIndex(0), size)) + // read the piece -> will not have to unseal + ppt.readPiece(t, storiface.UnpaddedByteIndex(0), size, + false, pieceData) + +} +func TestReadPieceRemoteWorkers(t *testing.T) { + logging.SetAllLoggers(logging.LevelDebug) + + // miner's worker can only add pieces to an unsealed sector. + sealerCfg := SealerConfig{ + ParallelFetchLimit: 10, + AllowAddPiece: true, + AllowPreCommit1: false, + AllowPreCommit2: false, + AllowCommit: false, + AllowUnseal: false, + } + + // test harness for an 8M sector. + ppt := newPieceProviderTestHarness(t, sealerCfg, abi.RegisteredSealProof_StackedDrg8MiBV1) + defer ppt.shutdown(t) + + // worker 2 will ONLY help with the sealing by first fetching + // the unsealed file from the miner. + ppt.addRemoteWorker(t, []sealtasks.TaskType{ + sealtasks.TTPreCommit1, sealtasks.TTPreCommit2, sealtasks.TTCommit1, + sealtasks.TTFetch, sealtasks.TTFinalize, + }) + + // create a worker that can ONLY unseal and fetch + ppt.addRemoteWorker(t, []sealtasks.TaskType{ + sealtasks.TTUnseal, sealtasks.TTFetch, + }) + + // run the test + + // add one piece that aligns with the padding/piece boundaries. + pd1 := generatePieceData(8 * 127 * 4 * 1024) + pi1 := ppt.addPiece(t, pd1) + pd1size := pi1.Size.Unpadded() + + pd2 := generatePieceData(8 * 127 * 4 * 1024) + pi2 := ppt.addPiece(t, pd2) + pd2size := pi2.Size.Unpadded() + + // pre-commit 1 + pC1 := ppt.preCommit1(t) + + // check if IsUnsealed -> true + require.True(t, ppt.isUnsealed(t, storiface.UnpaddedByteIndex(0), pd1size)) + // Read the piece -> no need to unseal + ppt.readPiece(t, storiface.UnpaddedByteIndex(0), pd1size, + false, pd1) + + // pre-commit 2 + ppt.preCommit2(t, pC1) + + // check if IsUnsealed -> true + require.True(t, ppt.isUnsealed(t, storiface.UnpaddedByteIndex(0), pd1size)) + // Read the piece -> no need to unseal + ppt.readPiece(t, storiface.UnpaddedByteIndex(0), pd1size, + false, pd1) + + // finalize the sector so we declare to the index we have the sealed file + // so the unsealing worker can later look it up and fetch it if needed + // sending nil here will remove all unsealed files after sector is finalized. + ppt.finalizeSector(t, nil) + + // check if IsUnsealed -> false + require.False(t, ppt.isUnsealed(t, storiface.UnpaddedByteIndex(0), pd1size)) + // Read the piece -> have to unseal since we removed the file. + ppt.readPiece(t, storiface.UnpaddedByteIndex(0), pd1size, + true, pd1) + + // Read the same piece again -> will NOT have to unseal. + ppt.readPiece(t, storiface.UnpaddedByteIndex(0), pd1size, false, pd1) + + // remove the unsealed file and read again -> will have to unseal. + ppt.removeAllUnsealedSectorFiles(t) + // check if IsUnsealed -> false + require.False(t, ppt.isUnsealed(t, storiface.UnpaddedByteIndex(0), pd1size)) + ppt.readPiece(t, storiface.UnpaddedByteIndex(0), pd1size, + true, pd1) + + // check if IsUnsealed -> true + require.True(t, ppt.isUnsealed(t, storiface.UnpaddedByteIndex(pd1size), pd2size)) + // Read Piece 2 -> no unsealing as it got unsealed above. + ppt.readPiece(t, storiface.UnpaddedByteIndex(pd1size), pd2size, false, pd2) + + // remove all unseal files -> Read Piece 2 -> will have to Unseal. + ppt.removeAllUnsealedSectorFiles(t) + + // check if IsUnsealed -> false + require.False(t, ppt.isUnsealed(t, storiface.UnpaddedByteIndex(pd1size), pd2size)) + ppt.readPiece(t, storiface.UnpaddedByteIndex(pd1size), pd2size, true, pd2) +} + +type pieceProviderTestHarness struct { + ctx context.Context + index *stores.Index + pp PieceProvider + sector specstorage.SectorRef + mgr *Manager + ticket abi.SealRandomness + commD cid.Cid + localStores []*stores.Local + + servers []*http.Server + + addedPieces []abi.PieceInfo +} + +func generatePieceData(size uint64) []byte { + bz := make([]byte, size) + rand.Read(bz) + return bz +} + +func newPieceProviderTestHarness(t *testing.T, mgrConfig SealerConfig, sectorProofType abi.RegisteredSealProof) *pieceProviderTestHarness { + ctx := context.Background() + // listen on tcp socket to create an http server later + address := "0.0.0.0:0" + nl, err := net.Listen("tcp", address) + require.NoError(t, err) + + // create index, storage, local store & remote store. + index := stores.NewIndex() + storage := newTestStorage(t) + localStore, err := stores.NewLocal(ctx, storage, index, []string{"http://" + nl.Addr().String() + "/remote"}) + require.NoError(t, err) + remoteStore := stores.NewRemote(localStore, index, nil, 6000, &stores.DefaultPartialFileHandler{}) + + // data stores for state tracking. + dstore := ds_sync.MutexWrap(datastore.NewMapDatastore()) + wsts := statestore.New(namespace.Wrap(dstore, datastore.NewKey("/worker/calls"))) + smsts := statestore.New(namespace.Wrap(dstore, datastore.NewKey("/stmgr/calls"))) + + mgr, err := New(ctx, localStore, remoteStore, storage, index, mgrConfig, wsts, smsts) + require.NoError(t, err) + + // start a http server on the manager to serve sector file requests. + svc := &http.Server{ + Addr: nl.Addr().String(), + Handler: mgr, + } + go func() { + _ = svc.Serve(nl) + }() + + pp := NewPieceProvider(remoteStore, index, mgr) + + sector := specstorage.SectorRef{ + ID: abi.SectorID{ + Miner: 100, + Number: 10, + }, + ProofType: sectorProofType, + } + + ticket := abi.SealRandomness{9, 9, 9, 9, 9, 9, 9, 9} + + ppt := &pieceProviderTestHarness{ + ctx: ctx, + index: index, + pp: pp, + sector: sector, + mgr: mgr, + ticket: ticket, + } + ppt.servers = append(ppt.servers, svc) + ppt.localStores = append(ppt.localStores, localStore) + return ppt +} + +func (p *pieceProviderTestHarness) addRemoteWorker(t *testing.T, tasks []sealtasks.TaskType) { + // start an http Server + address := "0.0.0.0:0" + nl, err := net.Listen("tcp", address) + require.NoError(t, err) + + localStore, err := stores.NewLocal(p.ctx, newTestStorage(t), p.index, []string{"http://" + nl.Addr().String() + "/remote"}) + require.NoError(t, err) + + fh := &stores.FetchHandler{ + Local: localStore, + PfHandler: &stores.DefaultPartialFileHandler{}, + } + + mux := mux.NewRouter() + mux.PathPrefix("/remote").HandlerFunc(fh.ServeHTTP) + svc := &http.Server{ + Addr: nl.Addr().String(), + Handler: mux, + } + + go func() { + _ = svc.Serve(nl) + }() + + remote := stores.NewRemote(localStore, p.index, nil, 1000, + &stores.DefaultPartialFileHandler{}) + + dstore := ds_sync.MutexWrap(datastore.NewMapDatastore()) + csts := statestore.New(namespace.Wrap(dstore, datastore.NewKey("/stmgr/calls"))) + + worker := newLocalWorker(nil, WorkerConfig{ + TaskTypes: tasks, + }, remote, localStore, p.index, p.mgr, csts) + + p.servers = append(p.servers, svc) + p.localStores = append(p.localStores, localStore) + + // register self with manager + require.NoError(t, p.mgr.AddWorker(p.ctx, worker)) +} + +func (p *pieceProviderTestHarness) removeAllUnsealedSectorFiles(t *testing.T) { + for i := range p.localStores { + ls := p.localStores[i] + require.NoError(t, ls.Remove(p.ctx, p.sector.ID, storiface.FTUnsealed, false)) + } +} + +func (p *pieceProviderTestHarness) addPiece(t *testing.T, pieceData []byte) abi.PieceInfo { + var existing []abi.UnpaddedPieceSize + for _, pi := range p.addedPieces { + existing = append(existing, pi.Size.Unpadded()) + } + + size := abi.UnpaddedPieceSize(len(pieceData)) + pieceInfo, err := p.mgr.AddPiece(p.ctx, p.sector, existing, size, bytes.NewReader(pieceData)) + require.NoError(t, err) + + p.addedPieces = append(p.addedPieces, pieceInfo) + return pieceInfo +} + +func (p *pieceProviderTestHarness) preCommit1(t *testing.T) specstorage.PreCommit1Out { + preCommit1, err := p.mgr.SealPreCommit1(p.ctx, p.sector, p.ticket, p.addedPieces) + require.NoError(t, err) + return preCommit1 +} + +func (p *pieceProviderTestHarness) preCommit2(t *testing.T, pc1 specstorage.PreCommit1Out) { + sectorCids, err := p.mgr.SealPreCommit2(p.ctx, p.sector, pc1) + require.NoError(t, err) + commD := sectorCids.Unsealed + p.commD = commD +} + +func (p *pieceProviderTestHarness) isUnsealed(t *testing.T, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) bool { + b, err := p.pp.IsUnsealed(p.ctx, p.sector, offset, size) + require.NoError(t, err) + return b +} + +func (p *pieceProviderTestHarness) readPiece(t *testing.T, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, + expectedHadToUnseal bool, expectedBytes []byte) { + rd, isUnsealed, err := p.pp.ReadPiece(p.ctx, p.sector, offset, size, p.ticket, p.commD) + require.NoError(t, err) + require.NotNil(t, rd) + require.Equal(t, expectedHadToUnseal, isUnsealed) + defer func() { _ = rd.Close() }() + + // Make sure the input matches the output + readData, err := ioutil.ReadAll(rd) + require.NoError(t, err) + require.Equal(t, expectedBytes, readData) +} + +func (p *pieceProviderTestHarness) finalizeSector(t *testing.T, keepUnseal []specstorage.Range) { + require.NoError(t, p.mgr.FinalizeSector(p.ctx, p.sector, keepUnseal)) +} + +func (p *pieceProviderTestHarness) shutdown(t *testing.T) { + for _, svc := range p.servers { + s := svc + require.NoError(t, s.Shutdown(p.ctx)) + } +} diff --git a/extern/sector-storage/request_queue.go b/extern/sector-storage/request_queue.go index 9247ce24a9a..925c44fa837 100644 --- a/extern/sector-storage/request_queue.go +++ b/extern/sector-storage/request_queue.go @@ -20,7 +20,7 @@ func (q requestQueue) Less(i, j int) bool { return q[i].taskType.Less(q[j].taskType) } - return q[i].sector.Number < q[j].sector.Number // optimize minerActor.NewSectors bitfield + return q[i].sector.ID.Number < q[j].sector.ID.Number // optimize minerActor.NewSectors bitfield } func (q requestQueue) Swap(i, j int) { diff --git a/extern/sector-storage/resources.go b/extern/sector-storage/resources.go index 6b531e82b1a..2e989fdf45d 100644 --- a/extern/sector-storage/resources.go +++ b/extern/sector-storage/resources.go @@ -313,5 +313,13 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources func init() { ResourceTable[sealtasks.TTUnseal] = ResourceTable[sealtasks.TTPreCommit1] // TODO: measure accurately - ResourceTable[sealtasks.TTReadUnsealed] = ResourceTable[sealtasks.TTFetch] + + // V1_1 is the same as V1 + for _, m := range ResourceTable { + m[abi.RegisteredSealProof_StackedDrg2KiBV1_1] = m[abi.RegisteredSealProof_StackedDrg2KiBV1] + m[abi.RegisteredSealProof_StackedDrg8MiBV1_1] = m[abi.RegisteredSealProof_StackedDrg8MiBV1] + m[abi.RegisteredSealProof_StackedDrg512MiBV1_1] = m[abi.RegisteredSealProof_StackedDrg512MiBV1] + m[abi.RegisteredSealProof_StackedDrg32GiBV1_1] = m[abi.RegisteredSealProof_StackedDrg32GiBV1] + m[abi.RegisteredSealProof_StackedDrg64GiBV1_1] = m[abi.RegisteredSealProof_StackedDrg64GiBV1] + } } diff --git a/extern/sector-storage/roprov.go b/extern/sector-storage/roprov.go index 7f051b54999..ebc7610d78d 100644 --- a/extern/sector-storage/roprov.go +++ b/extern/sector-storage/roprov.go @@ -5,7 +5,7 @@ import ( "golang.org/x/xerrors" - "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/stores" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" @@ -14,23 +14,17 @@ import ( type readonlyProvider struct { index stores.SectorIndex stor *stores.Local - spt abi.RegisteredSealProof } -func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType) (storiface.SectorPaths, func(), error) { +func (l *readonlyProvider) AcquireSector(ctx context.Context, id storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType) (storiface.SectorPaths, func(), error) { if allocate != storiface.FTNone { return storiface.SectorPaths{}, nil, xerrors.New("read-only storage") } - ssize, err := l.spt.SectorSize() - if err != nil { - return storiface.SectorPaths{}, nil, xerrors.Errorf("failed to determine sector size: %w", err) - } - ctx, cancel := context.WithCancel(ctx) // use TryLock to avoid blocking - locked, err := l.index.StorageTryLock(ctx, id, existing, storiface.FTNone) + locked, err := l.index.StorageTryLock(ctx, id.ID, existing, storiface.FTNone) if err != nil { cancel() return storiface.SectorPaths{}, nil, xerrors.Errorf("acquiring sector lock: %w", err) @@ -40,7 +34,7 @@ func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, e return storiface.SectorPaths{}, nil, xerrors.Errorf("failed to acquire sector lock") } - p, _, err := l.stor.AcquireSector(ctx, id, ssize, existing, allocate, sealing, storiface.AcquireMove) + p, _, err := l.stor.AcquireSector(ctx, id, existing, allocate, sealing, storiface.AcquireMove) return p, cancel, err } diff --git a/extern/sector-storage/sched.go b/extern/sector-storage/sched.go index 426658c4100..aabf6f0cef2 100644 --- a/extern/sector-storage/sched.go +++ b/extern/sector-storage/sched.go @@ -7,9 +7,11 @@ import ( "sync" "time" + "github.com/google/uuid" "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" @@ -50,8 +52,6 @@ type WorkerSelector interface { } type scheduler struct { - spt abi.RegisteredSealProof - workersLk sync.RWMutex workers map[WorkerID]*workerHandle @@ -121,7 +121,7 @@ type activeResources struct { } type workerRequest struct { - sector abi.SectorID + sector storage.SectorRef taskType sealtasks.TaskType priority int // larger values more important sel WorkerSelector @@ -142,10 +142,8 @@ type workerResponse struct { err error } -func newScheduler(spt abi.RegisteredSealProof) *scheduler { +func newScheduler() *scheduler { return &scheduler{ - spt: spt, - workers: map[WorkerID]*workerHandle{}, schedule: make(chan *workerRequest), @@ -167,7 +165,7 @@ func newScheduler(spt abi.RegisteredSealProof) *scheduler { } } -func (sh *scheduler) Schedule(ctx context.Context, sector abi.SectorID, taskType sealtasks.TaskType, sel WorkerSelector, prepare WorkerAction, work WorkerAction) error { +func (sh *scheduler) Schedule(ctx context.Context, sector storage.SectorRef, taskType sealtasks.TaskType, sel WorkerSelector, prepare WorkerAction, work WorkerAction) error { ret := make(chan workerResponse) select { @@ -217,7 +215,7 @@ type SchedDiagRequestInfo struct { type SchedDiagInfo struct { Requests []SchedDiagRequestInfo - OpenWindows []WorkerID + OpenWindows []string } func (sh *scheduler) runSched() { @@ -314,7 +312,7 @@ func (sh *scheduler) diag() SchedDiagInfo { task := (*sh.schedQueue)[sqi] out.Requests = append(out.Requests, SchedDiagRequestInfo{ - Sector: task.sector, + Sector: task.sector.ID, TaskType: task.taskType, Priority: task.priority, }) @@ -324,7 +322,7 @@ func (sh *scheduler) diag() SchedDiagInfo { defer sh.workersLk.RUnlock() for _, window := range sh.openWindows { - out.OpenWindows = append(out.OpenWindows, window.worker) + out.OpenWindows = append(out.OpenWindows, uuid.UUID(window.worker).String()) } return out @@ -350,24 +348,25 @@ func (sh *scheduler) trySched() { sh.workersLk.RLock() defer sh.workersLk.RUnlock() - windows := make([]schedWindow, len(sh.openWindows)) - acceptableWindows := make([][]int, sh.schedQueue.Len()) + windowsLen := len(sh.openWindows) + queueLen := sh.schedQueue.Len() - log.Debugf("SCHED %d queued; %d open windows", sh.schedQueue.Len(), len(windows)) + log.Debugf("SCHED %d queued; %d open windows", queueLen, windowsLen) - if len(sh.openWindows) == 0 { + if windowsLen == 0 || queueLen == 0 { // nothing to schedule on return } + windows := make([]schedWindow, windowsLen) + acceptableWindows := make([][]int, queueLen) + // Step 1 - concurrency := len(sh.openWindows) - throttle := make(chan struct{}, concurrency) + throttle := make(chan struct{}, windowsLen) var wg sync.WaitGroup - wg.Add(sh.schedQueue.Len()) - - for i := 0; i < sh.schedQueue.Len(); i++ { + wg.Add(queueLen) + for i := 0; i < queueLen; i++ { throttle <- struct{}{} go func(sqi int) { @@ -377,7 +376,7 @@ func (sh *scheduler) trySched() { }() task := (*sh.schedQueue)[sqi] - needRes := ResourceTable[task.taskType][sh.spt] + needRes := ResourceTable[task.taskType][task.sector.ProofType] task.indexHeap = sqi for wnd, windowRequest := range sh.openWindows { @@ -394,12 +393,12 @@ func (sh *scheduler) trySched() { } // TODO: allow bigger windows - if !windows[wnd].allocated.canHandleRequest(needRes, windowRequest.worker, "schedAcceptable", worker.info.Resources) { + if !windows[wnd].allocated.canHandleRequest(needRes, windowRequest.worker, "schedAcceptable", worker.info) { continue } rpcCtx, cancel := context.WithTimeout(task.ctx, SelectorTimeout) - ok, err := task.sel.Ok(rpcCtx, task.taskType, sh.spt, worker) + ok, err := task.sel.Ok(rpcCtx, task.taskType, task.sector.ProofType, worker) cancel() if err != nil { log.Errorf("trySched(1) req.sel.Ok error: %+v", err) @@ -438,7 +437,7 @@ func (sh *scheduler) trySched() { r, err := task.sel.Cmp(rpcCtx, task.taskType, wi, wj) if err != nil { - log.Error("selecting best worker: %s", err) + log.Errorf("selecting best worker: %s", err) } return r }) @@ -452,26 +451,27 @@ func (sh *scheduler) trySched() { // Step 2 scheduled := 0 + rmQueue := make([]int, 0, queueLen) - for sqi := 0; sqi < sh.schedQueue.Len(); sqi++ { + for sqi := 0; sqi < queueLen; sqi++ { task := (*sh.schedQueue)[sqi] - needRes := ResourceTable[task.taskType][sh.spt] + needRes := ResourceTable[task.taskType][task.sector.ProofType] selectedWindow := -1 for _, wnd := range acceptableWindows[task.indexHeap] { wid := sh.openWindows[wnd].worker - wr := sh.workers[wid].info.Resources + info := sh.workers[wid].info - log.Debugf("SCHED try assign sqi:%d sector %d to window %d", sqi, task.sector.Number, wnd) + log.Debugf("SCHED try assign sqi:%d sector %d to window %d", sqi, task.sector.ID.Number, wnd) // TODO: allow bigger windows - if !windows[wnd].allocated.canHandleRequest(needRes, wid, "schedAssign", wr) { + if !windows[wnd].allocated.canHandleRequest(needRes, wid, "schedAssign", info) { continue } - log.Debugf("SCHED ASSIGNED sqi:%d sector %d task %s to window %d", sqi, task.sector.Number, task.taskType, wnd) + log.Debugf("SCHED ASSIGNED sqi:%d sector %d task %s to window %d", sqi, task.sector.ID.Number, task.taskType, wnd) - windows[wnd].allocated.add(wr, needRes) + windows[wnd].allocated.add(info.Resources, needRes) // TODO: We probably want to re-sort acceptableWindows here based on new // workerHandle.utilization + windows[wnd].allocated.utilization (workerHandle.utilization is used in all // task selectors, but not in the same way, so need to figure out how to do that in a non-O(n^2 way), and @@ -488,11 +488,16 @@ func (sh *scheduler) trySched() { windows[selectedWindow].todo = append(windows[selectedWindow].todo, task) - sh.schedQueue.Remove(sqi) - sqi-- + rmQueue = append(rmQueue, sqi) scheduled++ } + if len(rmQueue) > 0 { + for i := len(rmQueue) - 1; i >= 0; i-- { + sh.schedQueue.Remove(rmQueue[i]) + } + } + // Step 3 if scheduled == 0 { @@ -517,7 +522,7 @@ func (sh *scheduler) trySched() { } // Rewrite sh.openWindows array, removing scheduled windows - newOpenWindows := make([]*schedWindowRequest, 0, len(sh.openWindows)-len(scheduledWindows)) + newOpenWindows := make([]*schedWindowRequest, 0, windowsLen-len(scheduledWindows)) for wnd, window := range sh.openWindows { if _, scheduled := scheduledWindows[wnd]; scheduled { // keep unscheduled windows open diff --git a/extern/sector-storage/sched_resources.go b/extern/sector-storage/sched_resources.go index d6dae577bb7..96a1fa8638d 100644 --- a/extern/sector-storage/sched_resources.go +++ b/extern/sector-storage/sched_resources.go @@ -6,7 +6,7 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) -func (a *activeResources) withResources(id WorkerID, wr storiface.WorkerResources, r Resources, locker sync.Locker, cb func() error) error { +func (a *activeResources) withResources(id WorkerID, wr storiface.WorkerInfo, r Resources, locker sync.Locker, cb func() error) error { for !a.canHandleRequest(r, id, "withResources", wr) { if a.cond == nil { a.cond = sync.NewCond(locker) @@ -14,11 +14,11 @@ func (a *activeResources) withResources(id WorkerID, wr storiface.WorkerResource a.cond.Wait() } - a.add(wr, r) + a.add(wr.Resources, r) err := cb() - a.free(wr, r) + a.free(wr.Resources, r) if a.cond != nil { a.cond.Broadcast() } @@ -27,7 +27,9 @@ func (a *activeResources) withResources(id WorkerID, wr storiface.WorkerResource } func (a *activeResources) add(wr storiface.WorkerResources, r Resources) { - a.gpuUsed = r.CanGPU + if r.CanGPU { + a.gpuUsed = true + } a.cpuUse += r.Threads(wr.CPUs) a.memUsedMin += r.MinMemory a.memUsedMax += r.MaxMemory @@ -42,30 +44,37 @@ func (a *activeResources) free(wr storiface.WorkerResources, r Resources) { a.memUsedMax -= r.MaxMemory } -func (a *activeResources) canHandleRequest(needRes Resources, wid WorkerID, caller string, res storiface.WorkerResources) bool { +// canHandleRequest evaluates if the worker has enough available resources to +// handle the request. +func (a *activeResources) canHandleRequest(needRes Resources, wid WorkerID, caller string, info storiface.WorkerInfo) bool { + if info.IgnoreResources { + // shortcircuit; if this worker is ignoring resources, it can always handle the request. + return true + } + res := info.Resources // TODO: dedupe needRes.BaseMinMemory per task type (don't add if that task is already running) minNeedMem := res.MemReserved + a.memUsedMin + needRes.MinMemory + needRes.BaseMinMemory if minNeedMem > res.MemPhysical { - log.Debugf("sched: not scheduling on worker %d for %s; not enough physical memory - need: %dM, have %dM", wid, caller, minNeedMem/mib, res.MemPhysical/mib) + log.Debugf("sched: not scheduling on worker %s for %s; not enough physical memory - need: %dM, have %dM", wid, caller, minNeedMem/mib, res.MemPhysical/mib) return false } maxNeedMem := res.MemReserved + a.memUsedMax + needRes.MaxMemory + needRes.BaseMinMemory if maxNeedMem > res.MemSwap+res.MemPhysical { - log.Debugf("sched: not scheduling on worker %d for %s; not enough virtual memory - need: %dM, have %dM", wid, caller, maxNeedMem/mib, (res.MemSwap+res.MemPhysical)/mib) + log.Debugf("sched: not scheduling on worker %s for %s; not enough virtual memory - need: %dM, have %dM", wid, caller, maxNeedMem/mib, (res.MemSwap+res.MemPhysical)/mib) return false } if a.cpuUse+needRes.Threads(res.CPUs) > res.CPUs { - log.Debugf("sched: not scheduling on worker %d for %s; not enough threads, need %d, %d in use, target %d", wid, caller, needRes.Threads(res.CPUs), a.cpuUse, res.CPUs) + log.Debugf("sched: not scheduling on worker %s for %s; not enough threads, need %d, %d in use, target %d", wid, caller, needRes.Threads(res.CPUs), a.cpuUse, res.CPUs) return false } if len(res.GPUs) > 0 && needRes.CanGPU { if a.gpuUsed { - log.Debugf("sched: not scheduling on worker %d for %s; GPU in use", wid, caller) + log.Debugf("sched: not scheduling on worker %s for %s; GPU in use", wid, caller) return false } } diff --git a/extern/sector-storage/sched_test.go b/extern/sector-storage/sched_test.go index 849896ff6f1..fbc4d83ee07 100644 --- a/extern/sector-storage/sched_test.go +++ b/extern/sector-storage/sched_test.go @@ -38,6 +38,20 @@ func TestWithPriority(t *testing.T) { require.Equal(t, 2222, getPriority(ctx)) } +var decentWorkerResources = storiface.WorkerResources{ + MemPhysical: 128 << 30, + MemSwap: 200 << 30, + MemReserved: 2 << 30, + CPUs: 32, + GPUs: []string{"a GPU"}, +} + +var constrainedWorkerResources = storiface.WorkerResources{ + MemPhysical: 1 << 30, + MemReserved: 2 << 30, + CPUs: 1, +} + type schedTestWorker struct { name string taskTypes map[sealtasks.TaskType]struct{} @@ -45,57 +59,60 @@ type schedTestWorker struct { closed bool session uuid.UUID + + resources storiface.WorkerResources + ignoreResources bool } -func (s *schedTestWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) { +func (s *schedTestWorker) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storiface.CallID, error) { +func (s *schedTestWorker) SealPreCommit2(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) { +func (s *schedTestWorker) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storiface.CallID, error) { +func (s *schedTestWorker) SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (storiface.CallID, error) { +func (s *schedTestWorker) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) (storiface.CallID, error) { +func (s *schedTestWorker) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) Remove(ctx context.Context, sector abi.SectorID) (storiface.CallID, error) { +func (s *schedTestWorker) Remove(ctx context.Context, sector storage.SectorRef) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) NewSector(ctx context.Context, sector abi.SectorID) (storiface.CallID, error) { +func (s *schedTestWorker) NewSector(ctx context.Context, sector storage.SectorRef) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) { +func (s *schedTestWorker) AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) MoveStorage(ctx context.Context, sector abi.SectorID, types storiface.SectorFileType) (storiface.CallID, error) { +func (s *schedTestWorker) MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) Fetch(ctx context.Context, id abi.SectorID, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) { +func (s *schedTestWorker) Fetch(ctx context.Context, id storage.SectorRef, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) { +func (s *schedTestWorker) UnsealPiece(ctx context.Context, id storage.SectorRef, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) { panic("implement me") } -func (s *schedTestWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) { +func (s *schedTestWorker) ReadPiece(ctx context.Context, writer io.Writer, id storage.SectorRef, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) { panic("implement me") } @@ -107,18 +124,11 @@ func (s *schedTestWorker) Paths(ctx context.Context) ([]stores.StoragePath, erro return s.paths, nil } -var decentWorkerResources = storiface.WorkerResources{ - MemPhysical: 128 << 30, - MemSwap: 200 << 30, - MemReserved: 2 << 30, - CPUs: 32, - GPUs: []string{"a GPU"}, -} - func (s *schedTestWorker) Info(ctx context.Context) (storiface.WorkerInfo, error) { return storiface.WorkerInfo{ - Hostname: s.name, - Resources: decentWorkerResources, + Hostname: s.name, + IgnoreResources: s.ignoreResources, + Resources: s.resources, }, nil } @@ -137,13 +147,16 @@ func (s *schedTestWorker) Close() error { var _ Worker = &schedTestWorker{} -func addTestWorker(t *testing.T, sched *scheduler, index *stores.Index, name string, taskTypes map[sealtasks.TaskType]struct{}) { +func addTestWorker(t *testing.T, sched *scheduler, index *stores.Index, name string, taskTypes map[sealtasks.TaskType]struct{}, resources storiface.WorkerResources, ignoreResources bool) { w := &schedTestWorker{ name: name, taskTypes: taskTypes, paths: []stores.StoragePath{{ID: "bb-8", Weight: 2, LocalPath: "food", CanSeal: true, CanStore: true}}, session: uuid.New(), + + resources: resources, + ignoreResources: ignoreResources, } for _, path := range w.paths { @@ -154,9 +167,10 @@ func addTestWorker(t *testing.T, sched *scheduler, index *stores.Index, name str CanSeal: path.CanSeal, CanStore: path.CanStore, }, fsutil.FsStat{ - Capacity: 1 << 40, - Available: 1 << 40, - Reserved: 3, + Capacity: 1 << 40, + Available: 1 << 40, + FSAvailable: 1 << 40, + Reserved: 3, }) require.NoError(t, err) } @@ -165,11 +179,10 @@ func addTestWorker(t *testing.T, sched *scheduler, index *stores.Index, name str } func TestSchedStartStop(t *testing.T) { - spt := abi.RegisteredSealProof_StackedDrg32GiBV1 - sched := newScheduler(spt) + sched := newScheduler() go sched.runSched() - addTestWorker(t, sched, stores.NewIndex(), "fred", nil) + addTestWorker(t, sched, stores.NewIndex(), "fred", nil, decentWorkerResources, false) require.NoError(t, sched.Close(context.TODO())) } @@ -183,6 +196,9 @@ func TestSched(t *testing.T) { type workerSpec struct { name string taskTypes map[sealtasks.TaskType]struct{} + + resources storiface.WorkerResources + ignoreResources bool } noopAction := func(ctx context.Context, w Worker) error { @@ -211,12 +227,15 @@ func TestSched(t *testing.T) { go func() { defer rm.wg.Done() - sectorNum := abi.SectorID{ - Miner: 8, - Number: sid, + sectorRef := storage.SectorRef{ + ID: abi.SectorID{ + Miner: 8, + Number: sid, + }, + ProofType: spt, } - err := sched.Schedule(ctx, sectorNum, taskType, sel, func(ctx context.Context, w Worker) error { + err := sched.Schedule(ctx, sectorRef, taskType, sel, func(ctx context.Context, w Worker) error { wi, err := w.Info(ctx) require.NoError(t, err) @@ -286,13 +305,13 @@ func TestSched(t *testing.T) { return func(t *testing.T) { index := stores.NewIndex() - sched := newScheduler(spt) + sched := newScheduler() sched.testSync = make(chan struct{}) go sched.runSched() for _, worker := range workers { - addTestWorker(t, sched, index, worker.name, worker.taskTypes) + addTestWorker(t, sched, index, worker.name, worker.taskTypes, worker.resources, worker.ignoreResources) } rm := runMeta{ @@ -319,31 +338,42 @@ func TestSched(t *testing.T) { } } + // checks behaviour with workers with constrained resources + // the first one is not ignoring resource constraints, so we assign to the second worker, who is + t.Run("constrained-resources", testFunc([]workerSpec{ + {name: "fred1", resources: constrainedWorkerResources, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, + {name: "fred2", resources: constrainedWorkerResources, ignoreResources: true, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, + }, []task{ + sched("pc1-1", "fred2", 8, sealtasks.TTPreCommit1), + taskStarted("pc1-1"), + taskDone("pc1-1"), + })) + t.Run("one-pc1", testFunc([]workerSpec{ - {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, + {name: "fred", resources: decentWorkerResources, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, }, []task{ sched("pc1-1", "fred", 8, sealtasks.TTPreCommit1), taskDone("pc1-1"), })) t.Run("pc1-2workers-1", testFunc([]workerSpec{ - {name: "fred2", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2: {}}}, - {name: "fred1", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, + {name: "fred2", resources: decentWorkerResources, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2: {}}}, + {name: "fred1", resources: decentWorkerResources, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, }, []task{ sched("pc1-1", "fred1", 8, sealtasks.TTPreCommit1), taskDone("pc1-1"), })) t.Run("pc1-2workers-2", testFunc([]workerSpec{ - {name: "fred1", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, - {name: "fred2", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2: {}}}, + {name: "fred1", resources: decentWorkerResources, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, + {name: "fred2", resources: decentWorkerResources, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2: {}}}, }, []task{ sched("pc1-1", "fred1", 8, sealtasks.TTPreCommit1), taskDone("pc1-1"), })) t.Run("pc1-block-pc2", testFunc([]workerSpec{ - {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}}, + {name: "fred", resources: decentWorkerResources, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}}, }, []task{ sched("pc1", "fred", 8, sealtasks.TTPreCommit1), taskStarted("pc1"), @@ -356,7 +386,7 @@ func TestSched(t *testing.T) { })) t.Run("pc2-block-pc1", testFunc([]workerSpec{ - {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}}, + {name: "fred", resources: decentWorkerResources, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}}, }, []task{ sched("pc2", "fred", 8, sealtasks.TTPreCommit2), taskStarted("pc2"), @@ -369,7 +399,7 @@ func TestSched(t *testing.T) { })) t.Run("pc1-batching", testFunc([]workerSpec{ - {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, + {name: "fred", resources: decentWorkerResources, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, }, []task{ sched("t1", "fred", 8, sealtasks.TTPreCommit1), taskStarted("t1"), @@ -456,7 +486,7 @@ func TestSched(t *testing.T) { // run this one a bunch of times, it had a very annoying tendency to fail randomly for i := 0; i < 40; i++ { t.Run("pc1-pc2-prio", testFunc([]workerSpec{ - {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}}, + {name: "fred", resources: decentWorkerResources, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}}, }, []task{ // fill queues twoPC1("w0", 0, taskStarted), @@ -518,7 +548,6 @@ func (s slowishSelector) Cmp(ctx context.Context, task sealtasks.TaskType, a, b var _ WorkerSelector = slowishSelector(true) func BenchmarkTrySched(b *testing.B) { - spt := abi.RegisteredSealProof_StackedDrg32GiBV1 logging.SetAllLoggers(logging.LevelInfo) defer logging.SetAllLoggers(logging.LevelDebug) ctx := context.Background() @@ -528,7 +557,7 @@ func BenchmarkTrySched(b *testing.B) { for i := 0; i < b.N; i++ { b.StopTimer() - sched := newScheduler(spt) + sched := newScheduler() sched.workers[WorkerID{}] = &workerHandle{ workerRpc: nil, info: storiface.WorkerInfo{ @@ -568,9 +597,8 @@ func BenchmarkTrySched(b *testing.B) { } func TestWindowCompact(t *testing.T) { - sh := scheduler{ - spt: abi.RegisteredSealProof_StackedDrg32GiBV1, - } + sh := scheduler{} + spt := abi.RegisteredSealProof_StackedDrg32GiBV1 test := func(start [][]sealtasks.TaskType, expect [][]sealtasks.TaskType) func(t *testing.T) { return func(t *testing.T) { @@ -584,8 +612,11 @@ func TestWindowCompact(t *testing.T) { window := &schedWindow{} for _, task := range windowTasks { - window.todo = append(window.todo, &workerRequest{taskType: task}) - window.allocated.add(wh.info.Resources, ResourceTable[task][sh.spt]) + window.todo = append(window.todo, &workerRequest{ + taskType: task, + sector: storage.SectorRef{ProofType: spt}, + }) + window.allocated.add(wh.info.Resources, ResourceTable[task][spt]) } wh.activeWindows = append(wh.activeWindows, window) @@ -604,7 +635,7 @@ func TestWindowCompact(t *testing.T) { for ti, task := range tasks { require.Equal(t, task, wh.activeWindows[wi].todo[ti].taskType, "%d, %d", wi, ti) - expectRes.add(wh.info.Resources, ResourceTable[task][sh.spt]) + expectRes.add(wh.info.Resources, ResourceTable[task][spt]) } require.Equal(t, expectRes.cpuUse, wh.activeWindows[wi].allocated.cpuUse, "%d", wi) diff --git a/extern/sector-storage/sched_worker.go b/extern/sector-storage/sched_worker.go index ff43009d382..7bc1affc3b6 100644 --- a/extern/sector-storage/sched_worker.go +++ b/extern/sector-storage/sched_worker.go @@ -57,6 +57,7 @@ func (sh *scheduler) runWorker(ctx context.Context, w Worker) error { log.Warnw("duplicated worker added", "id", wid) // this is ok, we're already handling this worker in a different goroutine + sh.workersLk.Unlock() return nil } @@ -104,14 +105,16 @@ func (sw *schedWorker) handleWorker() { defer sw.heartbeatTimer.Stop() for { - sched.workersLk.Lock() - enabled := worker.enabled - sched.workersLk.Unlock() - - // ask for more windows if we need them (non-blocking) - if enabled { - if !sw.requestWindows() { - return // graceful shutdown + { + sched.workersLk.Lock() + enabled := worker.enabled + sched.workersLk.Unlock() + + // ask for more windows if we need them (non-blocking) + if enabled { + if !sw.requestWindows() { + return // graceful shutdown + } } } @@ -123,20 +126,31 @@ func (sw *schedWorker) handleWorker() { } // session looks good - if !enabled { + { sched.workersLk.Lock() + enabled := worker.enabled worker.enabled = true sched.workersLk.Unlock() - // we'll send window requests on the next loop + if !enabled { + // go send window requests + break + } } // wait for more tasks to be assigned by the main scheduler or for the worker // to finish precessing a task - update, ok := sw.waitForUpdates() + update, pokeSched, ok := sw.waitForUpdates() if !ok { return } + if pokeSched { + // a task has finished preparing, which can mean that we've freed some space on some worker + select { + case sched.workerChange <- struct{}{}: + default: // workerChange is buffered, and scheduling is global, so it's ok if we don't send here + } + } if update { break } @@ -251,23 +265,23 @@ func (sw *schedWorker) requestWindows() bool { return true } -func (sw *schedWorker) waitForUpdates() (update bool, ok bool) { +func (sw *schedWorker) waitForUpdates() (update bool, sched bool, ok bool) { select { case <-sw.heartbeatTimer.C: - return false, true + return false, false, true case w := <-sw.scheduledWindows: sw.worker.wndLk.Lock() sw.worker.activeWindows = append(sw.worker.activeWindows, w) sw.worker.wndLk.Unlock() - return true, true + return true, false, true case <-sw.taskDone: log.Debugw("task done", "workerid", sw.wid) - return true, true + return true, true, true case <-sw.sched.closing: case <-sw.worker.closingMgr: } - return false, false + return false, false, false } func (sw *schedWorker) workerCompactWindows() { @@ -281,8 +295,8 @@ func (sw *schedWorker) workerCompactWindows() { var moved []int for ti, todo := range window.todo { - needRes := ResourceTable[todo.taskType][sw.sched.spt] - if !lower.allocated.canHandleRequest(needRes, sw.wid, "compactWindows", worker.info.Resources) { + needRes := ResourceTable[todo.taskType][todo.sector.ProofType] + if !lower.allocated.canHandleRequest(needRes, sw.wid, "compactWindows", worker.info) { continue } @@ -337,8 +351,8 @@ assignLoop: worker.lk.Lock() for t, todo := range firstWindow.todo { - needRes := ResourceTable[todo.taskType][sw.sched.spt] - if worker.preparing.canHandleRequest(needRes, sw.wid, "startPreparing", worker.info.Resources) { + needRes := ResourceTable[todo.taskType][todo.sector.ProofType] + if worker.preparing.canHandleRequest(needRes, sw.wid, "startPreparing", worker.info) { tidx = t break } @@ -351,11 +365,11 @@ assignLoop: todo := firstWindow.todo[tidx] - log.Debugf("assign worker sector %d", todo.sector.Number) + log.Debugf("assign worker sector %d", todo.sector.ID.Number) err := sw.startProcessingTask(sw.taskDone, todo) if err != nil { - log.Error("startProcessingTask error: %+v", err) + log.Errorf("startProcessingTask error: %+v", err) go todo.respond(xerrors.Errorf("startProcessingTask error: %w", err)) } @@ -376,7 +390,7 @@ assignLoop: func (sw *schedWorker) startProcessingTask(taskDone chan struct{}, req *workerRequest) error { w, sh := sw.worker, sw.sched - needRes := ResourceTable[req.taskType][sh.spt] + needRes := ResourceTable[req.taskType][req.sector.ProofType] w.lk.Lock() w.preparing.add(w.info.Resources, needRes) @@ -384,7 +398,7 @@ func (sw *schedWorker) startProcessingTask(taskDone chan struct{}, req *workerRe go func() { // first run the prepare step (e.g. fetching sector data from other worker) - err := req.prepare(req.ctx, sh.workTracker.worker(sw.wid, w.workerRpc)) + err := req.prepare(req.ctx, sh.workTracker.worker(sw.wid, w.info, w.workerRpc)) sh.workersLk.Lock() if err != nil { @@ -410,7 +424,7 @@ func (sw *schedWorker) startProcessingTask(taskDone chan struct{}, req *workerRe } // wait (if needed) for resources in the 'active' window - err = w.active.withResources(sw.wid, w.info.Resources, needRes, &sh.workersLk, func() error { + err = w.active.withResources(sw.wid, w.info, needRes, &sh.workersLk, func() error { w.lk.Lock() w.preparing.free(w.info.Resources, needRes) w.lk.Unlock() @@ -423,7 +437,7 @@ func (sw *schedWorker) startProcessingTask(taskDone chan struct{}, req *workerRe } // Do the work! - err = req.work(req.ctx, sh.workTracker.worker(sw.wid, w.workerRpc)) + err = req.work(req.ctx, sh.workTracker.worker(sw.wid, w.info, w.workerRpc)) select { case req.ret <- workerResponse{err: err}: @@ -473,6 +487,6 @@ func (sh *scheduler) workerCleanup(wid WorkerID, w *workerHandle) { } sh.openWindows = newWindows - log.Debugf("worker %d dropped", wid) + log.Debugf("worker %s dropped", wid) } } diff --git a/extern/sector-storage/sealtasks/task.go b/extern/sector-storage/sealtasks/task.go index 4174373a6cf..6d341a4b315 100644 --- a/extern/sector-storage/sealtasks/task.go +++ b/extern/sector-storage/sealtasks/task.go @@ -11,36 +11,33 @@ const ( TTFinalize TaskType = "seal/v0/finalize" - TTFetch TaskType = "seal/v0/fetch" - TTUnseal TaskType = "seal/v0/unseal" - TTReadUnsealed TaskType = "seal/v0/unsealread" + TTFetch TaskType = "seal/v0/fetch" + TTUnseal TaskType = "seal/v0/unseal" ) var order = map[TaskType]int{ - TTAddPiece: 6, // least priority - TTPreCommit1: 5, - TTPreCommit2: 4, - TTCommit2: 3, - TTCommit1: 2, - TTUnseal: 1, - TTFetch: -1, - TTReadUnsealed: -1, - TTFinalize: -2, // most priority + TTAddPiece: 6, // least priority + TTPreCommit1: 5, + TTPreCommit2: 4, + TTCommit2: 3, + TTCommit1: 2, + TTUnseal: 1, + TTFetch: -1, + TTFinalize: -2, // most priority } var shortNames = map[TaskType]string{ - TTAddPiece: "AP ", + TTAddPiece: "AP", TTPreCommit1: "PC1", TTPreCommit2: "PC2", - TTCommit1: "C1 ", - TTCommit2: "C2 ", + TTCommit1: "C1", + TTCommit2: "C2", TTFinalize: "FIN", - TTFetch: "GET", - TTUnseal: "UNS", - TTReadUnsealed: "RD ", + TTFetch: "GET", + TTUnseal: "UNS", } func (a TaskType) MuchLess(b TaskType) (bool, bool) { diff --git a/extern/sector-storage/selector_task.go b/extern/sector-storage/selector_task.go index ffed40d683f..e4d92757edf 100644 --- a/extern/sector-storage/selector_task.go +++ b/extern/sector-storage/selector_task.go @@ -45,4 +45,4 @@ func (s *taskSelector) Cmp(ctx context.Context, _ sealtasks.TaskType, a, b *work return a.utilization() < b.utilization(), nil } -var _ WorkerSelector = &allocSelector{} +var _ WorkerSelector = &taskSelector{} diff --git a/extern/sector-storage/stats.go b/extern/sector-storage/stats.go index bae60b4268b..df3b4eed006 100644 --- a/extern/sector-storage/stats.go +++ b/extern/sector-storage/stats.go @@ -46,7 +46,7 @@ func (m *Manager) WorkerJobs() map[uuid.UUID][]storiface.WorkerJob { for _, request := range window.todo { out[uuid.UUID(id)] = append(out[uuid.UUID(id)], storiface.WorkerJob{ ID: storiface.UndefCall, - Sector: request.sector, + Sector: request.sector.ID, Task: request.taskType, RunWait: wi + 1, Start: request.start, @@ -67,12 +67,26 @@ func (m *Manager) WorkerJobs() map[uuid.UUID][]storiface.WorkerJob { continue } + var ws WorkState + if err := m.work.Get(work).Get(&ws); err != nil { + log.Errorf("WorkerJobs: get work %s: %+v", work, err) + } + + wait := storiface.RWRetWait + if _, ok := m.results[work]; ok { + wait = storiface.RWReturned + } + if ws.Status == wsDone { + wait = storiface.RWRetDone + } + out[uuid.UUID{}] = append(out[uuid.UUID{}], storiface.WorkerJob{ - ID: id, - Sector: id.Sector, - Task: work.Method, - RunWait: -1, - Start: time.Time{}, + ID: id, + Sector: id.Sector, + Task: work.Method, + RunWait: wait, + Start: time.Unix(ws.StartTime, 0), + Hostname: ws.WorkerHostname, }) } diff --git a/extern/sector-storage/stores/http_handler.go b/extern/sector-storage/stores/http_handler.go index 2237bd4071b..5b8477fc8da 100644 --- a/extern/sector-storage/stores/http_handler.go +++ b/extern/sector-storage/stores/http_handler.go @@ -5,25 +5,55 @@ import ( "io" "net/http" "os" + "strconv" "github.com/gorilla/mux" logging "github.com/ipfs/go-log/v2" "golang.org/x/xerrors" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/extern/sector-storage/partialfile" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/lotus/extern/sector-storage/tarutil" + + "github.com/filecoin-project/specs-storage/storage" ) var log = logging.Logger("stores") +var _ partialFileHandler = &DefaultPartialFileHandler{} + +// DefaultPartialFileHandler is the default implementation of the partialFileHandler interface. +// This is probably the only implementation we'll ever use because the purpose of the +// interface to is to mock out partial file related functionality during testing. +type DefaultPartialFileHandler struct{} + +func (d *DefaultPartialFileHandler) OpenPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialfile.PartialFile, error) { + return partialfile.OpenPartialFile(maxPieceSize, path) +} +func (d *DefaultPartialFileHandler) HasAllocated(pf *partialfile.PartialFile, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { + return pf.HasAllocated(offset, size) +} + +func (d *DefaultPartialFileHandler) Reader(pf *partialfile.PartialFile, offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (*os.File, error) { + return pf.Reader(offset, size) +} + +// Close closes the partial file +func (d *DefaultPartialFileHandler) Close(pf *partialfile.PartialFile) error { + return pf.Close() +} + type FetchHandler struct { - *Local + Local Store + PfHandler partialFileHandler } func (handler *FetchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // /remote/ mux := mux.NewRouter() mux.HandleFunc("/remote/stat/{id}", handler.remoteStatFs).Methods("GET") + mux.HandleFunc("/remote/{type}/{id}/{spt}/allocated/{offset}/{size}", handler.remoteGetAllocated).Methods("GET") mux.HandleFunc("/remote/{type}/{id}", handler.remoteGetSector).Methods("GET") mux.HandleFunc("/remote/{type}/{id}", handler.remoteDeleteSector).Methods("DELETE") @@ -52,6 +82,8 @@ func (handler *FetchHandler) remoteStatFs(w http.ResponseWriter, r *http.Request } } +// remoteGetSector returns the sector file/tared directory byte stream for the sectorID and sector file type sent in the request. +// returns an error if it does NOT have the required sector file/dir. func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Request) { log.Infof("SERVE GET %s", r.URL) vars := mux.Vars(r) @@ -71,11 +103,15 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ } // The caller has a lock on this sector already, no need to get one here - // passing 0 spt because we don't allocate anything - paths, _, err := handler.Local.AcquireSector(r.Context(), id, 0, ft, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) + si := storage.SectorRef{ + ID: id, + ProofType: 0, + } + + paths, _, err := handler.Local.AcquireSector(r.Context(), si, ft, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) if err != nil { - log.Errorf("%+v", err) + log.Errorf("AcquireSector: %+v", err) w.WriteHeader(500) return } @@ -91,55 +127,170 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ stat, err := os.Stat(path) if err != nil { - log.Errorf("%+v", err) + log.Errorf("os.Stat: %+v", err) w.WriteHeader(500) return } - var rd io.Reader if stat.IsDir() { - rd, err = tarutil.TarDirectory(path) + if _, has := r.Header["Range"]; has { + log.Error("Range not supported on directories") + w.WriteHeader(500) + return + } + + rd, err := tarutil.TarDirectory(path) + if err != nil { + log.Errorf("%+v", err) + w.WriteHeader(500) + return + } + w.Header().Set("Content-Type", "application/x-tar") + w.WriteHeader(200) + if _, err := io.CopyBuffer(w, rd, make([]byte, CopyBuf)); err != nil { + log.Errorf("%+v", err) + return + } } else { - rd, err = os.OpenFile(path, os.O_RDONLY, 0644) // nolint w.Header().Set("Content-Type", "application/octet-stream") + // will do a ranged read over the file at the given path if the caller has asked for a ranged read in the request headers. + http.ServeFile(w, r, path) } + + log.Debugf("served sector file/dir, sectorID=%+v, fileType=%s, path=%s", id, ft, path) +} + +func (handler *FetchHandler) remoteDeleteSector(w http.ResponseWriter, r *http.Request) { + log.Infof("SERVE DELETE %s", r.URL) + vars := mux.Vars(r) + + id, err := storiface.ParseSectorID(vars["id"]) if err != nil { log.Errorf("%+v", err) w.WriteHeader(500) return } - w.WriteHeader(200) - if _, err := io.Copy(w, rd); err != nil { // TODO: default 32k buf may be too small + ft, err := ftFromString(vars["type"]) + if err != nil { log.Errorf("%+v", err) + w.WriteHeader(500) + return + } + + if err := handler.Local.Remove(r.Context(), id, ft, false); err != nil { + log.Errorf("%+v", err) + w.WriteHeader(500) return } } -func (handler *FetchHandler) remoteDeleteSector(w http.ResponseWriter, r *http.Request) { - log.Infof("SERVE DELETE %s", r.URL) +// remoteGetAllocated returns `http.StatusOK` if the worker already has an Unsealed sector file +// containing the Unsealed piece sent in the request. +// returns `http.StatusRequestedRangeNotSatisfiable` otherwise. +func (handler *FetchHandler) remoteGetAllocated(w http.ResponseWriter, r *http.Request) { + log.Infof("SERVE Alloc check %s", r.URL) vars := mux.Vars(r) id, err := storiface.ParseSectorID(vars["id"]) if err != nil { - log.Errorf("%+v", err) + log.Errorf("parsing sectorID: %+v", err) w.WriteHeader(500) return } ft, err := ftFromString(vars["type"]) if err != nil { - log.Errorf("%+v", err) + log.Errorf("ftFromString: %+v", err) + w.WriteHeader(500) + return + } + if ft != storiface.FTUnsealed { + log.Errorf("/allocated only supports unsealed sector files") w.WriteHeader(500) return } - if err := handler.Remove(r.Context(), id, ft, false); err != nil { - log.Errorf("%+v", err) + spti, err := strconv.ParseInt(vars["spt"], 10, 64) + if err != nil { + log.Errorf("parsing spt: %+v", err) + w.WriteHeader(500) + return + } + spt := abi.RegisteredSealProof(spti) + ssize, err := spt.SectorSize() + if err != nil { + log.Errorf("spt.SectorSize(): %+v", err) + w.WriteHeader(500) + return + } + + offi, err := strconv.ParseInt(vars["offset"], 10, 64) + if err != nil { + log.Errorf("parsing offset: %+v", err) + w.WriteHeader(500) + return + } + szi, err := strconv.ParseInt(vars["size"], 10, 64) + if err != nil { + log.Errorf("parsing size: %+v", err) + w.WriteHeader(500) + return + } + + // The caller has a lock on this sector already, no need to get one here + + // passing 0 spt because we don't allocate anything + si := storage.SectorRef{ + ID: id, + ProofType: 0, + } + + // get the path of the local Unsealed file for the given sector. + // return error if we do NOT have it. + paths, _, err := handler.Local.AcquireSector(r.Context(), si, ft, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) + if err != nil { + log.Errorf("AcquireSector: %+v", err) w.WriteHeader(500) return } + + path := storiface.PathByType(paths, ft) + if path == "" { + log.Error("acquired path was empty") + w.WriteHeader(500) + return + } + + // open the Unsealed file and check if it has the Unsealed sector for the piece at the given offset and size. + pf, err := handler.PfHandler.OpenPartialFile(abi.PaddedPieceSize(ssize), path) + if err != nil { + log.Error("opening partial file: ", err) + w.WriteHeader(500) + return + } + defer func() { + if err := pf.Close(); err != nil { + log.Error("closing partial file: ", err) + } + }() + + has, err := handler.PfHandler.HasAllocated(pf, storiface.UnpaddedByteIndex(offi), abi.UnpaddedPieceSize(szi)) + if err != nil { + log.Error("has allocated: ", err) + w.WriteHeader(500) + return + } + + if has { + log.Debugf("returning ok: worker has unsealed file with unsealed piece, sector:%+v, offset:%d, size:%d", id, offi, szi) + w.WriteHeader(http.StatusOK) + return + } + + log.Debugf("returning StatusRequestedRangeNotSatisfiable: worker does NOT have unsealed file with unsealed piece, sector:%+v, offset:%d, size:%d", id, offi, szi) + w.WriteHeader(http.StatusRequestedRangeNotSatisfiable) } func ftFromString(t string) (storiface.SectorFileType, error) { diff --git a/extern/sector-storage/stores/http_handler_test.go b/extern/sector-storage/stores/http_handler_test.go new file mode 100644 index 00000000000..1258d8530a9 --- /dev/null +++ b/extern/sector-storage/stores/http_handler_test.go @@ -0,0 +1,457 @@ +package stores_test + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "testing" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/extern/sector-storage/partialfile" + "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/extern/sector-storage/stores/mocks" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + "github.com/filecoin-project/specs-storage/storage" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + "golang.org/x/xerrors" +) + +func TestRemoteGetAllocated(t *testing.T) { + + emptyPartialFile := &partialfile.PartialFile{} + pfPath := "path" + expectedSectorRef := storage.SectorRef{ + ID: abi.SectorID{ + Miner: 123, + Number: 123, + }, + ProofType: 0, + } + + validSectorName := fmt.Sprintf("s-t0%d-%d", 123, 123) + validSectorFileType := storiface.FTUnsealed.String() + validSectorType := "1" + sectorSize := abi.SealProofInfos[1].SectorSize + + validOffset := "100" + validOffsetInt := 100 + + validSize := "1000" + validSizeInt := 1000 + + type pieceInfo struct { + sectorName string + fileType string + sectorType string + + // piece info + offset string + size string + } + validPieceInfo := pieceInfo{ + sectorName: validSectorName, + fileType: validSectorFileType, + sectorType: validSectorType, + offset: validOffset, + size: validSize, + } + + tcs := map[string]struct { + piFnc func(pi *pieceInfo) + storeFnc func(s *mocks.MockStore) + pfFunc func(s *mocks.MockpartialFileHandler) + + // expectation + expectedStatusCode int + }{ + "fails when sector name is invalid": { + piFnc: func(pi *pieceInfo) { + pi.sectorName = "invalid" + }, + expectedStatusCode: http.StatusInternalServerError, + }, + "fails when file type is invalid": { + piFnc: func(pi *pieceInfo) { + pi.fileType = "invalid" + }, + expectedStatusCode: http.StatusInternalServerError, + }, + "fails when sector proof type is invalid": { + piFnc: func(pi *pieceInfo) { + pi.sectorType = "invalid" + }, + expectedStatusCode: http.StatusInternalServerError, + }, + "fails when offset is invalid": { + piFnc: func(pi *pieceInfo) { + pi.offset = "invalid" + }, + expectedStatusCode: http.StatusInternalServerError, + }, + "fails when size is invalid": { + piFnc: func(pi *pieceInfo) { + pi.size = "invalid" + }, + expectedStatusCode: http.StatusInternalServerError, + }, + "fails when errors out during acquiring unsealed sector file": { + expectedStatusCode: http.StatusInternalServerError, + storeFnc: func(l *mocks.MockStore) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{ + Unsealed: "path", + }, + storiface.SectorPaths{}, xerrors.New("some error")).Times(1) + }, + }, + "fails when unsealed sector file is not found locally": { + expectedStatusCode: http.StatusInternalServerError, + storeFnc: func(l *mocks.MockStore) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{}, + storiface.SectorPaths{}, nil).Times(1) + }, + }, + "fails when error while opening partial file": { + expectedStatusCode: http.StatusInternalServerError, + storeFnc: func(l *mocks.MockStore) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{ + Unsealed: pfPath, + }, + storiface.SectorPaths{}, nil).Times(1) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + pf.EXPECT().OpenPartialFile(abi.PaddedPieceSize(sectorSize), pfPath).Return(&partialfile.PartialFile{}, + xerrors.New("some error")).Times(1) + }, + }, + + "fails when determining partial file allocation returns an error": { + expectedStatusCode: http.StatusInternalServerError, + storeFnc: func(l *mocks.MockStore) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{ + Unsealed: pfPath, + }, + storiface.SectorPaths{}, nil).Times(1) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + pf.EXPECT().OpenPartialFile(abi.PaddedPieceSize(sectorSize), pfPath).Return(emptyPartialFile, + nil).Times(1) + + pf.EXPECT().HasAllocated(emptyPartialFile, storiface.UnpaddedByteIndex(validOffsetInt), + abi.UnpaddedPieceSize(validSizeInt)).Return(true, xerrors.New("some error")).Times(1) + }, + }, + "StatusRequestedRangeNotSatisfiable when piece is NOT allocated in partial file": { + expectedStatusCode: http.StatusRequestedRangeNotSatisfiable, + storeFnc: func(l *mocks.MockStore) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{ + Unsealed: pfPath, + }, + storiface.SectorPaths{}, nil).Times(1) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + pf.EXPECT().OpenPartialFile(abi.PaddedPieceSize(sectorSize), pfPath).Return(emptyPartialFile, + nil).Times(1) + + pf.EXPECT().HasAllocated(emptyPartialFile, storiface.UnpaddedByteIndex(validOffsetInt), + abi.UnpaddedPieceSize(validSizeInt)).Return(false, nil).Times(1) + }, + }, + "OK when piece is allocated in partial file": { + expectedStatusCode: http.StatusOK, + storeFnc: func(l *mocks.MockStore) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{ + Unsealed: pfPath, + }, + storiface.SectorPaths{}, nil).Times(1) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + pf.EXPECT().OpenPartialFile(abi.PaddedPieceSize(sectorSize), pfPath).Return(emptyPartialFile, + nil).Times(1) + + pf.EXPECT().HasAllocated(emptyPartialFile, storiface.UnpaddedByteIndex(validOffsetInt), + abi.UnpaddedPieceSize(validSizeInt)).Return(true, nil).Times(1) + }, + }, + } + + for name, tc := range tcs { + tc := tc + t.Run(name, func(t *testing.T) { + // create go mock controller here + mockCtrl := gomock.NewController(t) + // when test is done, assert expectations on all mock objects. + defer mockCtrl.Finish() + + lstore := mocks.NewMockStore(mockCtrl) + pfhandler := mocks.NewMockpartialFileHandler(mockCtrl) + + handler := &stores.FetchHandler{ + lstore, + pfhandler, + } + + // run http server + ts := httptest.NewServer(handler) + defer ts.Close() + + pi := validPieceInfo + + if tc.piFnc != nil { + tc.piFnc(&pi) + } + + if tc.storeFnc != nil { + tc.storeFnc(lstore) + } + if tc.pfFunc != nil { + tc.pfFunc(pfhandler) + } + + // call remoteGetAllocated + url := fmt.Sprintf("%s/remote/%s/%s/%s/allocated/%s/%s", + ts.URL, + pi.fileType, + pi.sectorName, + pi.sectorType, + pi.offset, + pi.size) + resp, err := http.Get(url) + require.NoError(t, err) + defer func() { + _ = resp.Body.Close() + }() + + // assert expected status code + require.Equal(t, tc.expectedStatusCode, resp.StatusCode) + }) + } +} + +func TestRemoteGetSector(t *testing.T) { + str := "hello-world" + fileBytes := []byte(str) + + validSectorName := fmt.Sprintf("s-t0%d-%d", 123, 123) + validSectorFileType := storiface.FTUnsealed.String() + expectedSectorRef := storage.SectorRef{ + ID: abi.SectorID{ + Miner: 123, + Number: 123, + }, + ProofType: 0, + } + + type sectorInfo struct { + sectorName string + fileType string + } + validSectorInfo := sectorInfo{ + sectorName: validSectorName, + fileType: validSectorFileType, + } + + tcs := map[string]struct { + siFnc func(pi *sectorInfo) + storeFnc func(s *mocks.MockStore, path string) + + // reading a file or a dir + isDir bool + + // expectation + noResponseBytes bool + expectedContentType string + expectedStatusCode int + expectedResponseBytes []byte + }{ + "fails when sector name is invalid": { + siFnc: func(si *sectorInfo) { + si.sectorName = "invalid" + }, + expectedStatusCode: http.StatusInternalServerError, + noResponseBytes: true, + }, + "fails when file type is invalid": { + siFnc: func(si *sectorInfo) { + si.fileType = "invalid" + }, + expectedStatusCode: http.StatusInternalServerError, + noResponseBytes: true, + }, + "fails when error while acquiring sector file": { + storeFnc: func(l *mocks.MockStore, _ string) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{ + Unsealed: "path", + }, + storiface.SectorPaths{}, xerrors.New("some error")).Times(1) + }, + expectedStatusCode: http.StatusInternalServerError, + noResponseBytes: true, + }, + "fails when acquired sector file path is empty": { + expectedStatusCode: http.StatusInternalServerError, + storeFnc: func(l *mocks.MockStore, _ string) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{}, + storiface.SectorPaths{}, nil).Times(1) + }, + noResponseBytes: true, + }, + "fails when acquired file does not exist": { + expectedStatusCode: http.StatusInternalServerError, + storeFnc: func(l *mocks.MockStore, _ string) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{ + Unsealed: "path", + }, + storiface.SectorPaths{}, nil) + }, + noResponseBytes: true, + }, + "successfully read a sector file": { + storeFnc: func(l *mocks.MockStore, path string) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{ + Unsealed: path, + }, + storiface.SectorPaths{}, nil) + }, + + noResponseBytes: false, + expectedContentType: "application/octet-stream", + expectedStatusCode: 200, + expectedResponseBytes: fileBytes, + }, + "successfully read a sector dir": { + storeFnc: func(l *mocks.MockStore, path string) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{ + Unsealed: path, + }, + storiface.SectorPaths{}, nil) + }, + + isDir: true, + noResponseBytes: false, + expectedContentType: "application/x-tar", + expectedStatusCode: 200, + expectedResponseBytes: fileBytes, + }, + } + + for name, tc := range tcs { + tc := tc + t.Run(name, func(t *testing.T) { + mockCtrl := gomock.NewController(t) + // when test is done, assert expectations on all mock objects. + defer mockCtrl.Finish() + lstore := mocks.NewMockStore(mockCtrl) + pfhandler := mocks.NewMockpartialFileHandler(mockCtrl) + + var path string + + if !tc.isDir { + // create file + tempFile, err := ioutil.TempFile("", "TestRemoteGetSector-") + require.NoError(t, err) + + defer func() { + _ = os.Remove(tempFile.Name()) + }() + + _, err = tempFile.Write(fileBytes) + require.NoError(t, err) + path = tempFile.Name() + } else { + // create dir with a file + tempFile2, err := ioutil.TempFile("", "TestRemoteGetSector-") + require.NoError(t, err) + defer func() { + _ = os.Remove(tempFile2.Name()) + }() + + stat, err := os.Stat(tempFile2.Name()) + require.NoError(t, err) + tempDir, err := ioutil.TempDir("", "TestRemoteGetSector-") + require.NoError(t, err) + + defer func() { + _ = os.RemoveAll(tempDir) + }() + + require.NoError(t, os.Rename(tempFile2.Name(), filepath.Join(tempDir, stat.Name()))) + + path = tempDir + } + + handler := &stores.FetchHandler{ + lstore, + pfhandler, + } + + // run http server + ts := httptest.NewServer(handler) + defer ts.Close() + + si := validSectorInfo + if tc.siFnc != nil { + tc.siFnc(&si) + } + + if tc.storeFnc != nil { + tc.storeFnc(lstore, path) + } + + // call remoteGetAllocated + url := fmt.Sprintf("%s/remote/%s/%s", + ts.URL, + si.fileType, + si.sectorName, + ) + resp, err := http.Get(url) + require.NoError(t, err) + defer func() { + _ = resp.Body.Close() + }() + + bz, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + // assert expected status code + require.Equal(t, tc.expectedStatusCode, resp.StatusCode) + + if !tc.noResponseBytes { + if !tc.isDir { + require.EqualValues(t, tc.expectedResponseBytes, bz) + } + } + + require.Equal(t, tc.expectedContentType, resp.Header.Get("Content-Type")) + }) + } +} diff --git a/extern/sector-storage/stores/index.go b/extern/sector-storage/stores/index.go index acd799ab723..9fd7f6d7d84 100644 --- a/extern/sector-storage/stores/index.go +++ b/extern/sector-storage/stores/index.go @@ -2,6 +2,8 @@ package stores import ( "context" + "errors" + "fmt" "net/url" gopath "path" "sort" @@ -25,9 +27,10 @@ var SkippedHeartbeatThresh = HeartbeatInterval * 5 type ID string type StorageInfo struct { - ID ID - URLs []string // TODO: Support non-http transports - Weight uint64 + ID ID + URLs []string // TODO: Support non-http transports + Weight uint64 + MaxStorage uint64 CanSeal bool CanStore bool @@ -35,7 +38,7 @@ type StorageInfo struct { type HealthReport struct { Stat fsutil.FsStat - Err error + Err string } type SectorStorageInfo struct { @@ -63,6 +66,8 @@ type SectorIndex interface { // part of storage-miner api // atomically acquire locks on all sector file types. close ctx to unlock StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) + + StorageList(ctx context.Context) (map[ID][]Decl, error) } type Decl struct { @@ -154,6 +159,11 @@ func (i *Index) StorageAttach(ctx context.Context, si StorageInfo, st fsutil.FsS i.stores[si.ID].info.URLs = append(i.stores[si.ID].info.URLs, u) } + i.stores[si.ID].info.Weight = si.Weight + i.stores[si.ID].info.MaxStorage = si.MaxStorage + i.stores[si.ID].info.CanSeal = si.CanSeal + i.stores[si.ID].info.CanStore = si.CanStore + return nil } i.stores[si.ID] = &storageEntry{ @@ -175,7 +185,11 @@ func (i *Index) StorageReportHealth(ctx context.Context, id ID, report HealthRep } ent.fsi = report.Stat - ent.heartbeatErr = report.Err + if report.Err != "" { + ent.heartbeatErr = errors.New(report.Err) + } else { + ent.heartbeatErr = nil + } ent.lastHeartbeat = time.Now() return nil @@ -225,7 +239,7 @@ func (i *Index) StorageDropSector(ctx context.Context, storageID ID, s abi.Secto d := Decl{s, fileType} if len(i.sectors[d]) == 0 { - return nil + continue } rewritten := make([]*declMeta, 0, len(i.sectors[d])-1) @@ -238,7 +252,7 @@ func (i *Index) StorageDropSector(ctx context.Context, storageID ID, s abi.Secto } if len(rewritten) == 0 { delete(i.sectors, d) - return nil + continue } i.sectors[d] = rewritten @@ -372,7 +386,16 @@ func (i *Index) StorageBestAlloc(ctx context.Context, allocate storiface.SectorF var candidates []storageEntry - spaceReq, err := allocate.SealSpaceUse(ssize) + var err error + var spaceReq uint64 + switch pathType { + case storiface.PathSealing: + spaceReq, err = allocate.SealSpaceUse(ssize) + case storiface.PathStorage: + spaceReq, err = allocate.StoreSpaceUse(ssize) + default: + panic(fmt.Sprintf("unexpected pathType: %s", pathType)) + } if err != nil { return nil, xerrors.Errorf("estimating required space: %w", err) } diff --git a/extern/sector-storage/stores/interface.go b/extern/sector-storage/stores/interface.go index 574ec599ee6..4986e6c808c 100644 --- a/extern/sector-storage/stores/interface.go +++ b/extern/sector-storage/stores/interface.go @@ -2,15 +2,36 @@ package stores import ( "context" + "os" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/extern/sector-storage/partialfile" + + "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) +// PartialFileHandler helps mock out the partial file functionality during testing. +type partialFileHandler interface { + // OpenPartialFile opens and returns a partial file at the given path and also verifies it has the given + // size + OpenPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialfile.PartialFile, error) + + // HasAllocated returns true if the given partial file has an unsealed piece starting at the given offset with the given size. + // returns false otherwise. + HasAllocated(pf *partialfile.PartialFile, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) + + // Reader returns a file from which we can read the unsealed piece in the partial file. + Reader(pf *partialfile.PartialFile, offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (*os.File, error) + + // Close closes the partial file + Close(pf *partialfile.PartialFile) error +} + type Store interface { - AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType, op storiface.AcquireMode) (paths storiface.SectorPaths, stores storiface.SectorPaths, err error) + AcquireSector(ctx context.Context, s storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType, op storiface.AcquireMode) (paths storiface.SectorPaths, stores storiface.SectorPaths, err error) Remove(ctx context.Context, s abi.SectorID, types storiface.SectorFileType, force bool) error // like remove, but doesn't remove the primary sector copy, nor the last @@ -18,7 +39,9 @@ type Store interface { RemoveCopies(ctx context.Context, s abi.SectorID, types storiface.SectorFileType) error // move sectors into storage - MoveStorage(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, types storiface.SectorFileType) error + MoveStorage(ctx context.Context, s storage.SectorRef, types storiface.SectorFileType) error FsStat(ctx context.Context, id ID) (fsutil.FsStat, error) + + Reserve(ctx context.Context, sid storage.SectorRef, ft storiface.SectorFileType, storageIDs storiface.SectorPaths, overheadTab map[storiface.SectorFileType]int) (func(), error) } diff --git a/extern/sector-storage/stores/local.go b/extern/sector-storage/stores/local.go index 89c22bd9926..cac16013934 100644 --- a/extern/sector-storage/stores/local.go +++ b/extern/sector-storage/stores/local.go @@ -14,6 +14,7 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" @@ -41,6 +42,10 @@ type LocalStorageMeta struct { // Finalized sectors that will be proved over time will be stored here CanStore bool + + // MaxStorage specifies the maximum number of bytes to use for sector storage + // (0 = unlimited) + MaxStorage uint64 } // StorageConfig .lotusstorage/storage.json @@ -76,7 +81,8 @@ type Local struct { } type path struct { - local string // absolute local path + local string // absolute local path + maxStorage uint64 reserved int64 reservations map[abi.SectorID]storiface.SectorFileType @@ -108,7 +114,7 @@ func (p *path) stat(ls LocalStorage) (fsutil.FsStat, error) { used, err = ls.DiskUsage(p) } if err != nil { - log.Errorf("getting disk usage of '%s': %+v", p.sectorPath(id, fileType), err) + log.Debugf("getting disk usage of '%s': %+v", p.sectorPath(id, fileType), err) continue } @@ -126,6 +132,25 @@ func (p *path) stat(ls LocalStorage) (fsutil.FsStat, error) { stat.Available = 0 } + if p.maxStorage > 0 { + used, err := ls.DiskUsage(p.local) + if err != nil { + return fsutil.FsStat{}, err + } + + stat.Max = int64(p.maxStorage) + stat.Used = used + + avail := int64(p.maxStorage) - used + if uint64(used) > p.maxStorage { + avail = 0 + } + + if avail < stat.Available { + stat.Available = avail + } + } + return stat, err } @@ -133,6 +158,8 @@ func (p *path) sectorPath(sid abi.SectorID, fileType storiface.SectorFileType) s return filepath.Join(p.local, fileType.String(), storiface.SectorName(sid)) } +type URLs []string + func NewLocal(ctx context.Context, ls LocalStorage, index SectorIndex, urls []string) (*Local, error) { l := &Local{ localStorage: ls, @@ -163,6 +190,7 @@ func (st *Local) OpenPath(ctx context.Context, p string) error { out := &path{ local: p, + maxStorage: meta.MaxStorage, reserved: 0, reservations: map[abi.SectorID]storiface.SectorFileType{}, } @@ -173,11 +201,12 @@ func (st *Local) OpenPath(ctx context.Context, p string) error { } err = st.index.StorageAttach(ctx, StorageInfo{ - ID: meta.ID, - URLs: st.urls, - Weight: meta.Weight, - CanSeal: meta.CanSeal, - CanStore: meta.CanStore, + ID: meta.ID, + URLs: st.urls, + Weight: meta.Weight, + MaxStorage: meta.MaxStorage, + CanSeal: meta.CanSeal, + CanStore: meta.CanStore, }, fst) if err != nil { return xerrors.Errorf("declaring storage in index: %w", err) @@ -236,11 +265,12 @@ func (st *Local) Redeclare(ctx context.Context) error { } err = st.index.StorageAttach(ctx, StorageInfo{ - ID: id, - URLs: st.urls, - Weight: meta.Weight, - CanSeal: meta.CanSeal, - CanStore: meta.CanStore, + ID: id, + URLs: st.urls, + Weight: meta.Weight, + MaxStorage: meta.MaxStorage, + CanSeal: meta.CanSeal, + CanStore: meta.CanStore, }, fst) if err != nil { return xerrors.Errorf("redeclaring storage in index: %w", err) @@ -298,29 +328,39 @@ func (st *Local) reportHealth(ctx context.Context) { return } - st.localLk.RLock() + st.reportStorage(ctx) + } +} - toReport := map[ID]HealthReport{} - for id, p := range st.paths { - stat, err := p.stat(st.localStorage) +func (st *Local) reportStorage(ctx context.Context) { + st.localLk.RLock() - toReport[id] = HealthReport{ - Stat: stat, - Err: err, - } + toReport := map[ID]HealthReport{} + for id, p := range st.paths { + stat, err := p.stat(st.localStorage) + r := HealthReport{Stat: stat} + if err != nil { + r.Err = err.Error() } - st.localLk.RUnlock() + toReport[id] = r + } + + st.localLk.RUnlock() - for id, report := range toReport { - if err := st.index.StorageReportHealth(ctx, id, report); err != nil { - log.Warnf("error reporting storage health for %s (%+v): %+v", id, report, err) - } + for id, report := range toReport { + if err := st.index.StorageReportHealth(ctx, id, report); err != nil { + log.Warnf("error reporting storage health for %s (%+v): %+v", id, report, err) } } } -func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, ssize abi.SectorSize, ft storiface.SectorFileType, storageIDs storiface.SectorPaths, overheadTab map[storiface.SectorFileType]int) (func(), error) { +func (st *Local) Reserve(ctx context.Context, sid storage.SectorRef, ft storiface.SectorFileType, storageIDs storiface.SectorPaths, overheadTab map[storiface.SectorFileType]int) (func(), error) { + ssize, err := sid.ProofType.SectorSize() + if err != nil { + return nil, err + } + st.localLk.Lock() done := func() {} @@ -350,12 +390,14 @@ func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, ssize abi.Sector overhead := int64(overheadTab[fileType]) * int64(ssize) / storiface.FSOverheadDen if stat.Available < overhead { - return nil, xerrors.Errorf("can't reserve %d bytes in '%s' (id:%s), only %d available", overhead, p.local, id, stat.Available) + return nil, storiface.Err(storiface.ErrTempAllocateSpace, xerrors.Errorf("can't reserve %d bytes in '%s' (id:%s), only %d available", overhead, p.local, id, stat.Available)) } p.reserved += overhead + p.reservations[sid.ID] |= fileType prevDone := done + saveFileType := fileType done = func() { prevDone() @@ -363,6 +405,10 @@ func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, ssize abi.Sector defer st.localLk.Unlock() p.reserved -= overhead + p.reservations[sid.ID] ^= saveFileType + if p.reservations[sid.ID] == storiface.FTNone { + delete(p.reservations, sid.ID) + } } } @@ -370,11 +416,16 @@ func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, ssize abi.Sector return done, nil } -func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, ssize abi.SectorSize, existing storiface.SectorFileType, allocate storiface.SectorFileType, pathType storiface.PathType, op storiface.AcquireMode) (storiface.SectorPaths, storiface.SectorPaths, error) { +func (st *Local) AcquireSector(ctx context.Context, sid storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, pathType storiface.PathType, op storiface.AcquireMode) (storiface.SectorPaths, storiface.SectorPaths, error) { if existing|allocate != existing^allocate { return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.New("can't both find and allocate a sector") } + ssize, err := sid.ProofType.SectorSize() + if err != nil { + return storiface.SectorPaths{}, storiface.SectorPaths{}, err + } + st.localLk.RLock() defer st.localLk.RUnlock() @@ -386,7 +437,7 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, ssize abi. continue } - si, err := st.index.StorageFindSector(ctx, sid, fileType, ssize, false) + si, err := st.index.StorageFindSector(ctx, sid.ID, fileType, ssize, false) if err != nil { log.Warnf("finding existing sector %d(t:%d) failed: %+v", sid, fileType, err) continue @@ -402,7 +453,7 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, ssize abi. continue } - spath := p.sectorPath(sid, fileType) + spath := p.sectorPath(sid.ID, fileType) storiface.SetPathByType(&out, fileType, spath) storiface.SetPathByType(&storageIDs, fileType, string(info.ID)) @@ -444,7 +495,7 @@ func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, ssize abi. // TODO: Check free space - best = p.sectorPath(sid, fileType) + best = p.sectorPath(sid.ID, fileType) bestID = si.ID break } @@ -568,16 +619,18 @@ func (st *Local) removeSector(ctx context.Context, sid abi.SectorID, typ storifa log.Errorf("removing sector (%v) from %s: %+v", sid, spath, err) } + st.reportStorage(ctx) // report freed space + return nil } -func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, types storiface.SectorFileType) error { - dest, destIds, err := st.AcquireSector(ctx, s, ssize, storiface.FTNone, types, storiface.PathStorage, storiface.AcquireMove) +func (st *Local) MoveStorage(ctx context.Context, s storage.SectorRef, types storiface.SectorFileType) error { + dest, destIds, err := st.AcquireSector(ctx, s, storiface.FTNone, types, storiface.PathStorage, storiface.AcquireMove) if err != nil { return xerrors.Errorf("acquire dest storage: %w", err) } - src, srcIds, err := st.AcquireSector(ctx, s, ssize, types, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) + src, srcIds, err := st.AcquireSector(ctx, s, types, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) if err != nil { return xerrors.Errorf("acquire src storage: %w", err) } @@ -609,7 +662,7 @@ func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, ssize abi.Sect log.Debugf("moving %v(%d) to storage: %s(se:%t; st:%t) -> %s(se:%t; st:%t)", s, fileType, sst.ID, sst.CanSeal, sst.CanStore, dst.ID, dst.CanSeal, dst.CanStore) - if err := st.index.StorageDropSector(ctx, ID(storiface.PathByType(srcIds, fileType)), s, fileType); err != nil { + if err := st.index.StorageDropSector(ctx, ID(storiface.PathByType(srcIds, fileType)), s.ID, fileType); err != nil { return xerrors.Errorf("dropping source sector from index: %w", err) } @@ -618,11 +671,13 @@ func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, ssize abi.Sect return xerrors.Errorf("moving sector %v(%d): %w", s, fileType, err) } - if err := st.index.StorageDeclareSector(ctx, ID(storiface.PathByType(destIds, fileType)), s, fileType, true); err != nil { + if err := st.index.StorageDeclareSector(ctx, ID(storiface.PathByType(destIds, fileType)), s.ID, fileType, true); err != nil { return xerrors.Errorf("declare sector %d(t:%d) -> %s: %w", s, fileType, ID(storiface.PathByType(destIds, fileType)), err) } } + st.reportStorage(ctx) // report space use changes + return nil } diff --git a/extern/sector-storage/stores/local_test.go b/extern/sector-storage/stores/local_test.go index 1c31e8c0952..ac5f6f3413f 100644 --- a/extern/sector-storage/stores/local_test.go +++ b/extern/sector-storage/stores/local_test.go @@ -36,8 +36,9 @@ func (t *TestingLocalStorage) SetStorage(f func(*StorageConfig)) error { func (t *TestingLocalStorage) Stat(path string) (fsutil.FsStat, error) { return fsutil.FsStat{ - Capacity: pathSize, - Available: pathSize, + Capacity: pathSize, + Available: pathSize, + FSAvailable: pathSize, }, nil } diff --git a/extern/sector-storage/stores/mocks/index.go b/extern/sector-storage/stores/mocks/index.go new file mode 100644 index 00000000000..59a6017b569 --- /dev/null +++ b/extern/sector-storage/stores/mocks/index.go @@ -0,0 +1,184 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: index.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + abi "github.com/filecoin-project/go-state-types/abi" + fsutil "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" + stores "github.com/filecoin-project/lotus/extern/sector-storage/stores" + storiface "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + gomock "github.com/golang/mock/gomock" +) + +// MockSectorIndex is a mock of SectorIndex interface. +type MockSectorIndex struct { + ctrl *gomock.Controller + recorder *MockSectorIndexMockRecorder +} + +// MockSectorIndexMockRecorder is the mock recorder for MockSectorIndex. +type MockSectorIndexMockRecorder struct { + mock *MockSectorIndex +} + +// NewMockSectorIndex creates a new mock instance. +func NewMockSectorIndex(ctrl *gomock.Controller) *MockSectorIndex { + mock := &MockSectorIndex{ctrl: ctrl} + mock.recorder = &MockSectorIndexMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSectorIndex) EXPECT() *MockSectorIndexMockRecorder { + return m.recorder +} + +// StorageAttach mocks base method. +func (m *MockSectorIndex) StorageAttach(arg0 context.Context, arg1 stores.StorageInfo, arg2 fsutil.FsStat) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageAttach", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// StorageAttach indicates an expected call of StorageAttach. +func (mr *MockSectorIndexMockRecorder) StorageAttach(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageAttach", reflect.TypeOf((*MockSectorIndex)(nil).StorageAttach), arg0, arg1, arg2) +} + +// StorageBestAlloc mocks base method. +func (m *MockSectorIndex) StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]stores.StorageInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageBestAlloc", ctx, allocate, ssize, pathType) + ret0, _ := ret[0].([]stores.StorageInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StorageBestAlloc indicates an expected call of StorageBestAlloc. +func (mr *MockSectorIndexMockRecorder) StorageBestAlloc(ctx, allocate, ssize, pathType interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageBestAlloc", reflect.TypeOf((*MockSectorIndex)(nil).StorageBestAlloc), ctx, allocate, ssize, pathType) +} + +// StorageDeclareSector mocks base method. +func (m *MockSectorIndex) StorageDeclareSector(ctx context.Context, storageID stores.ID, s abi.SectorID, ft storiface.SectorFileType, primary bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageDeclareSector", ctx, storageID, s, ft, primary) + ret0, _ := ret[0].(error) + return ret0 +} + +// StorageDeclareSector indicates an expected call of StorageDeclareSector. +func (mr *MockSectorIndexMockRecorder) StorageDeclareSector(ctx, storageID, s, ft, primary interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageDeclareSector", reflect.TypeOf((*MockSectorIndex)(nil).StorageDeclareSector), ctx, storageID, s, ft, primary) +} + +// StorageDropSector mocks base method. +func (m *MockSectorIndex) StorageDropSector(ctx context.Context, storageID stores.ID, s abi.SectorID, ft storiface.SectorFileType) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageDropSector", ctx, storageID, s, ft) + ret0, _ := ret[0].(error) + return ret0 +} + +// StorageDropSector indicates an expected call of StorageDropSector. +func (mr *MockSectorIndexMockRecorder) StorageDropSector(ctx, storageID, s, ft interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageDropSector", reflect.TypeOf((*MockSectorIndex)(nil).StorageDropSector), ctx, storageID, s, ft) +} + +// StorageFindSector mocks base method. +func (m *MockSectorIndex) StorageFindSector(ctx context.Context, sector abi.SectorID, ft storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]stores.SectorStorageInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageFindSector", ctx, sector, ft, ssize, allowFetch) + ret0, _ := ret[0].([]stores.SectorStorageInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StorageFindSector indicates an expected call of StorageFindSector. +func (mr *MockSectorIndexMockRecorder) StorageFindSector(ctx, sector, ft, ssize, allowFetch interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageFindSector", reflect.TypeOf((*MockSectorIndex)(nil).StorageFindSector), ctx, sector, ft, ssize, allowFetch) +} + +// StorageInfo mocks base method. +func (m *MockSectorIndex) StorageInfo(arg0 context.Context, arg1 stores.ID) (stores.StorageInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageInfo", arg0, arg1) + ret0, _ := ret[0].(stores.StorageInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StorageInfo indicates an expected call of StorageInfo. +func (mr *MockSectorIndexMockRecorder) StorageInfo(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageInfo", reflect.TypeOf((*MockSectorIndex)(nil).StorageInfo), arg0, arg1) +} + +// StorageList mocks base method. +func (m *MockSectorIndex) StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageList", ctx) + ret0, _ := ret[0].(map[stores.ID][]stores.Decl) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StorageList indicates an expected call of StorageList. +func (mr *MockSectorIndexMockRecorder) StorageList(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageList", reflect.TypeOf((*MockSectorIndex)(nil).StorageList), ctx) +} + +// StorageLock mocks base method. +func (m *MockSectorIndex) StorageLock(ctx context.Context, sector abi.SectorID, read, write storiface.SectorFileType) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageLock", ctx, sector, read, write) + ret0, _ := ret[0].(error) + return ret0 +} + +// StorageLock indicates an expected call of StorageLock. +func (mr *MockSectorIndexMockRecorder) StorageLock(ctx, sector, read, write interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageLock", reflect.TypeOf((*MockSectorIndex)(nil).StorageLock), ctx, sector, read, write) +} + +// StorageReportHealth mocks base method. +func (m *MockSectorIndex) StorageReportHealth(arg0 context.Context, arg1 stores.ID, arg2 stores.HealthReport) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageReportHealth", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// StorageReportHealth indicates an expected call of StorageReportHealth. +func (mr *MockSectorIndexMockRecorder) StorageReportHealth(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageReportHealth", reflect.TypeOf((*MockSectorIndex)(nil).StorageReportHealth), arg0, arg1, arg2) +} + +// StorageTryLock mocks base method. +func (m *MockSectorIndex) StorageTryLock(ctx context.Context, sector abi.SectorID, read, write storiface.SectorFileType) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageTryLock", ctx, sector, read, write) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StorageTryLock indicates an expected call of StorageTryLock. +func (mr *MockSectorIndexMockRecorder) StorageTryLock(ctx, sector, read, write interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageTryLock", reflect.TypeOf((*MockSectorIndex)(nil).StorageTryLock), ctx, sector, read, write) +} diff --git a/extern/sector-storage/stores/mocks/stores.go b/extern/sector-storage/stores/mocks/stores.go new file mode 100644 index 00000000000..fdfd73a0774 --- /dev/null +++ b/extern/sector-storage/stores/mocks/stores.go @@ -0,0 +1,212 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: interface.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + os "os" + reflect "reflect" + + abi "github.com/filecoin-project/go-state-types/abi" + fsutil "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" + partialfile "github.com/filecoin-project/lotus/extern/sector-storage/partialfile" + stores "github.com/filecoin-project/lotus/extern/sector-storage/stores" + storiface "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + storage "github.com/filecoin-project/specs-storage/storage" + gomock "github.com/golang/mock/gomock" +) + +// MockpartialFileHandler is a mock of partialFileHandler interface. +type MockpartialFileHandler struct { + ctrl *gomock.Controller + recorder *MockpartialFileHandlerMockRecorder +} + +// MockpartialFileHandlerMockRecorder is the mock recorder for MockpartialFileHandler. +type MockpartialFileHandlerMockRecorder struct { + mock *MockpartialFileHandler +} + +// NewMockpartialFileHandler creates a new mock instance. +func NewMockpartialFileHandler(ctrl *gomock.Controller) *MockpartialFileHandler { + mock := &MockpartialFileHandler{ctrl: ctrl} + mock.recorder = &MockpartialFileHandlerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockpartialFileHandler) EXPECT() *MockpartialFileHandlerMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockpartialFileHandler) Close(pf *partialfile.PartialFile) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close", pf) + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockpartialFileHandlerMockRecorder) Close(pf interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockpartialFileHandler)(nil).Close), pf) +} + +// HasAllocated mocks base method. +func (m *MockpartialFileHandler) HasAllocated(pf *partialfile.PartialFile, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HasAllocated", pf, offset, size) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HasAllocated indicates an expected call of HasAllocated. +func (mr *MockpartialFileHandlerMockRecorder) HasAllocated(pf, offset, size interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasAllocated", reflect.TypeOf((*MockpartialFileHandler)(nil).HasAllocated), pf, offset, size) +} + +// OpenPartialFile mocks base method. +func (m *MockpartialFileHandler) OpenPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialfile.PartialFile, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "OpenPartialFile", maxPieceSize, path) + ret0, _ := ret[0].(*partialfile.PartialFile) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// OpenPartialFile indicates an expected call of OpenPartialFile. +func (mr *MockpartialFileHandlerMockRecorder) OpenPartialFile(maxPieceSize, path interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OpenPartialFile", reflect.TypeOf((*MockpartialFileHandler)(nil).OpenPartialFile), maxPieceSize, path) +} + +// Reader mocks base method. +func (m *MockpartialFileHandler) Reader(pf *partialfile.PartialFile, offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (*os.File, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Reader", pf, offset, size) + ret0, _ := ret[0].(*os.File) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Reader indicates an expected call of Reader. +func (mr *MockpartialFileHandlerMockRecorder) Reader(pf, offset, size interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reader", reflect.TypeOf((*MockpartialFileHandler)(nil).Reader), pf, offset, size) +} + +// MockStore is a mock of Store interface. +type MockStore struct { + ctrl *gomock.Controller + recorder *MockStoreMockRecorder +} + +// MockStoreMockRecorder is the mock recorder for MockStore. +type MockStoreMockRecorder struct { + mock *MockStore +} + +// NewMockStore creates a new mock instance. +func NewMockStore(ctrl *gomock.Controller) *MockStore { + mock := &MockStore{ctrl: ctrl} + mock.recorder = &MockStoreMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockStore) EXPECT() *MockStoreMockRecorder { + return m.recorder +} + +// AcquireSector mocks base method. +func (m *MockStore) AcquireSector(ctx context.Context, s storage.SectorRef, existing, allocate storiface.SectorFileType, sealing storiface.PathType, op storiface.AcquireMode) (storiface.SectorPaths, storiface.SectorPaths, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AcquireSector", ctx, s, existing, allocate, sealing, op) + ret0, _ := ret[0].(storiface.SectorPaths) + ret1, _ := ret[1].(storiface.SectorPaths) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// AcquireSector indicates an expected call of AcquireSector. +func (mr *MockStoreMockRecorder) AcquireSector(ctx, s, existing, allocate, sealing, op interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AcquireSector", reflect.TypeOf((*MockStore)(nil).AcquireSector), ctx, s, existing, allocate, sealing, op) +} + +// FsStat mocks base method. +func (m *MockStore) FsStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FsStat", ctx, id) + ret0, _ := ret[0].(fsutil.FsStat) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FsStat indicates an expected call of FsStat. +func (mr *MockStoreMockRecorder) FsStat(ctx, id interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FsStat", reflect.TypeOf((*MockStore)(nil).FsStat), ctx, id) +} + +// MoveStorage mocks base method. +func (m *MockStore) MoveStorage(ctx context.Context, s storage.SectorRef, types storiface.SectorFileType) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MoveStorage", ctx, s, types) + ret0, _ := ret[0].(error) + return ret0 +} + +// MoveStorage indicates an expected call of MoveStorage. +func (mr *MockStoreMockRecorder) MoveStorage(ctx, s, types interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MoveStorage", reflect.TypeOf((*MockStore)(nil).MoveStorage), ctx, s, types) +} + +// Remove mocks base method. +func (m *MockStore) Remove(ctx context.Context, s abi.SectorID, types storiface.SectorFileType, force bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Remove", ctx, s, types, force) + ret0, _ := ret[0].(error) + return ret0 +} + +// Remove indicates an expected call of Remove. +func (mr *MockStoreMockRecorder) Remove(ctx, s, types, force interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Remove", reflect.TypeOf((*MockStore)(nil).Remove), ctx, s, types, force) +} + +// RemoveCopies mocks base method. +func (m *MockStore) RemoveCopies(ctx context.Context, s abi.SectorID, types storiface.SectorFileType) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveCopies", ctx, s, types) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemoveCopies indicates an expected call of RemoveCopies. +func (mr *MockStoreMockRecorder) RemoveCopies(ctx, s, types interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveCopies", reflect.TypeOf((*MockStore)(nil).RemoveCopies), ctx, s, types) +} + +// Reserve mocks base method. +func (m *MockStore) Reserve(ctx context.Context, sid storage.SectorRef, ft storiface.SectorFileType, storageIDs storiface.SectorPaths, overheadTab map[storiface.SectorFileType]int) (func(), error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Reserve", ctx, sid, ft, storageIDs, overheadTab) + ret0, _ := ret[0].(func()) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Reserve indicates an expected call of Reserve. +func (mr *MockStoreMockRecorder) Reserve(ctx, sid, ft, storageIDs, overheadTab interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reserve", reflect.TypeOf((*MockStore)(nil).Reserve), ctx, sid, ft, storageIDs, overheadTab) +} diff --git a/extern/sector-storage/stores/remote.go b/extern/sector-storage/stores/remote.go index 37dde910dc1..6f8efc03ed6 100644 --- a/extern/sector-storage/stores/remote.go +++ b/extern/sector-storage/stores/remote.go @@ -3,6 +3,8 @@ package stores import ( "context" "encoding/json" + "fmt" + "io" "io/ioutil" "math/bits" "mime" @@ -19,16 +21,18 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/tarutil" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-storage/storage" "github.com/hashicorp/go-multierror" - files "github.com/ipfs/go-ipfs-files" "golang.org/x/xerrors" ) var FetchTempSubdir = "fetching" +var CopyBuf = 1 << 20 + type Remote struct { - local *Local + local Store index SectorIndex auth http.Header @@ -36,6 +40,8 @@ type Remote struct { fetchLk sync.Mutex fetching map[abi.SectorID]chan struct{} + + pfHandler partialFileHandler } func (r *Remote) RemoveCopies(ctx context.Context, s abi.SectorID, types storiface.SectorFileType) error { @@ -46,7 +52,7 @@ func (r *Remote) RemoveCopies(ctx context.Context, s abi.SectorID, types storifa return r.local.RemoveCopies(ctx, s, types) } -func NewRemote(local *Local, index SectorIndex, auth http.Header, fetchLimit int) *Remote { +func NewRemote(local Store, index SectorIndex, auth http.Header, fetchLimit int, pfHandler partialFileHandler) *Remote { return &Remote{ local: local, index: index, @@ -54,11 +60,12 @@ func NewRemote(local *Local, index SectorIndex, auth http.Header, fetchLimit int limit: make(chan struct{}, fetchLimit), - fetching: map[abi.SectorID]chan struct{}{}, + fetching: map[abi.SectorID]chan struct{}{}, + pfHandler: pfHandler, } } -func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, existing storiface.SectorFileType, allocate storiface.SectorFileType, pathType storiface.PathType, op storiface.AcquireMode) (storiface.SectorPaths, storiface.SectorPaths, error) { +func (r *Remote) AcquireSector(ctx context.Context, s storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, pathType storiface.PathType, op storiface.AcquireMode) (storiface.SectorPaths, storiface.SectorPaths, error) { if existing|allocate != existing^allocate { return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.New("can't both find and allocate a sector") } @@ -66,9 +73,9 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.Se for { r.fetchLk.Lock() - c, locked := r.fetching[s] + c, locked := r.fetching[s.ID] if !locked { - r.fetching[s] = make(chan struct{}) + r.fetching[s.ID] = make(chan struct{}) r.fetchLk.Unlock() break } @@ -85,12 +92,12 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.Se defer func() { r.fetchLk.Lock() - close(r.fetching[s]) - delete(r.fetching, s) + close(r.fetching[s.ID]) + delete(r.fetching, s.ID) r.fetchLk.Unlock() }() - paths, stores, err := r.local.AcquireSector(ctx, s, ssize, existing, allocate, pathType, op) + paths, stores, err := r.local.AcquireSector(ctx, s, existing, allocate, pathType, op) if err != nil { return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.Errorf("local acquire error: %w", err) } @@ -106,7 +113,7 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.Se } } - apaths, ids, err := r.local.AcquireSector(ctx, s, ssize, storiface.FTNone, toFetch, pathType, op) + apaths, ids, err := r.local.AcquireSector(ctx, s, storiface.FTNone, toFetch, pathType, op) if err != nil { return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.Errorf("allocate local sector for fetching: %w", err) } @@ -116,7 +123,7 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.Se odt = storiface.FsOverheadFinalized } - releaseStorage, err := r.local.Reserve(ctx, s, ssize, toFetch, ids, odt) + releaseStorage, err := r.local.Reserve(ctx, s, toFetch, ids, odt) if err != nil { return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.Errorf("reserving storage space: %w", err) } @@ -134,7 +141,7 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.Se dest := storiface.PathByType(apaths, fileType) storageID := storiface.PathByType(ids, fileType) - url, err := r.acquireFromRemote(ctx, s, fileType, dest) + url, err := r.acquireFromRemote(ctx, s.ID, fileType, dest) if err != nil { return storiface.SectorPaths{}, storiface.SectorPaths{}, err } @@ -142,7 +149,7 @@ func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, ssize abi.Se storiface.SetPathByType(&paths, fileType, dest) storiface.SetPathByType(&stores, fileType, storageID) - if err := r.index.StorageDeclareSector(ctx, ID(storageID), s, fileType, op == storiface.AcquireMove); err != nil { + if err := r.index.StorageDeclareSector(ctx, ID(storageID), s.ID, fileType, op == storiface.AcquireMove); err != nil { log.Warnf("declaring sector %v in %s failed: %+v", s, storageID, err) continue } @@ -275,20 +282,55 @@ func (r *Remote) fetch(ctx context.Context, url, outname string) error { case "application/x-tar": return tarutil.ExtractTar(resp.Body, outname) case "application/octet-stream": - return files.WriteTo(files.NewReaderFile(resp.Body), outname) + f, err := os.Create(outname) + if err != nil { + return err + } + _, err = io.CopyBuffer(f, resp.Body, make([]byte, CopyBuf)) + if err != nil { + f.Close() // nolint + return err + } + return f.Close() default: return xerrors.Errorf("unknown content type: '%s'", mediatype) } } -func (r *Remote) MoveStorage(ctx context.Context, s abi.SectorID, ssize abi.SectorSize, types storiface.SectorFileType) error { +func (r *Remote) checkAllocated(ctx context.Context, url string, spt abi.RegisteredSealProof, offset, size abi.PaddedPieceSize) (bool, error) { + url = fmt.Sprintf("%s/%d/allocated/%d/%d", url, spt, offset.Unpadded(), size.Unpadded()) + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return false, xerrors.Errorf("request: %w", err) + } + req.Header = r.auth.Clone() + fmt.Printf("req using header: %#v \n", r.auth) + req = req.WithContext(ctx) + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return false, xerrors.Errorf("do request: %w", err) + } + defer resp.Body.Close() // nolint + + switch resp.StatusCode { + case http.StatusOK: + return true, nil + case http.StatusRequestedRangeNotSatisfiable: + return false, nil + default: + return false, xerrors.Errorf("unexpected http response: %d", resp.StatusCode) + } +} + +func (r *Remote) MoveStorage(ctx context.Context, s storage.SectorRef, types storiface.SectorFileType) error { // Make sure we have the data local - _, _, err := r.AcquireSector(ctx, s, ssize, types, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) + _, _, err := r.AcquireSector(ctx, s, types, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) if err != nil { return xerrors.Errorf("acquire src storage (remote): %w", err) } - return r.local.MoveStorage(ctx, s, ssize, types) + return r.local.MoveStorage(ctx, s, types) } func (r *Remote) Remove(ctx context.Context, sid abi.SectorID, typ storiface.SectorFileType, force bool) error { @@ -403,4 +445,240 @@ func (r *Remote) FsStat(ctx context.Context, id ID) (fsutil.FsStat, error) { return out, nil } +func (r *Remote) readRemote(ctx context.Context, url string, offset, size abi.PaddedPieceSize) (io.ReadCloser, error) { + if len(r.limit) >= cap(r.limit) { + log.Infof("Throttling remote read, %d already running", len(r.limit)) + } + + // TODO: Smarter throttling + // * Priority (just going sequentially is still pretty good) + // * Per interface + // * Aware of remote load + select { + case r.limit <- struct{}{}: + defer func() { <-r.limit }() + case <-ctx.Done(): + return nil, xerrors.Errorf("context error while waiting for fetch limiter: %w", ctx.Err()) + } + + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, xerrors.Errorf("request: %w", err) + } + + if r.auth != nil { + req.Header = r.auth.Clone() + } + req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+size-1)) + req = req.WithContext(ctx) + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, xerrors.Errorf("do request: %w", err) + } + + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { + resp.Body.Close() // nolint + return nil, xerrors.Errorf("non-200 code: %d", resp.StatusCode) + } + + return resp.Body, nil +} + +// CheckIsUnsealed checks if we have an unsealed piece at the given offset in an already unsealed sector file for the given piece +// either locally or on any of the workers. +// Returns true if we have the unsealed piece, false otherwise. +func (r *Remote) CheckIsUnsealed(ctx context.Context, s storage.SectorRef, offset, size abi.PaddedPieceSize) (bool, error) { + ft := storiface.FTUnsealed + + paths, _, err := r.local.AcquireSector(ctx, s, ft, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) + if err != nil { + return false, xerrors.Errorf("acquire local: %w", err) + } + + path := storiface.PathByType(paths, ft) + if path != "" { + // if we have the unsealed file locally, check if it has the unsealed piece. + log.Infof("Read local %s (+%d,%d)", path, offset, size) + ssize, err := s.ProofType.SectorSize() + if err != nil { + return false, err + } + + // open the unsealed sector file for the given sector size located at the given path. + pf, err := r.pfHandler.OpenPartialFile(abi.PaddedPieceSize(ssize), path) + if err != nil { + return false, xerrors.Errorf("opening partial file: %w", err) + } + log.Debugf("local partial file opened %s (+%d,%d)", path, offset, size) + + // even though we have an unsealed file for the given sector, we still need to determine if we have the unsealed piece + // in the unsealed sector file. That is what `HasAllocated` checks for. + has, err := r.pfHandler.HasAllocated(pf, storiface.UnpaddedByteIndex(offset.Unpadded()), size.Unpadded()) + if err != nil { + return false, xerrors.Errorf("has allocated: %w", err) + } + + // close the local unsealed file. + if err := r.pfHandler.Close(pf); err != nil { + return false, xerrors.Errorf("failed to close partial file: %s", err) + } + log.Debugf("checked if local partial file has the piece %s (+%d,%d), returning answer=%t", path, offset, size, has) + + // Sector files can technically not have a piece unsealed locally, but have it unsealed in remote storage, so we probably + // want to return only if has is true + if has { + return has, nil + } + } + + // --- We don't have the unsealed piece in an unsealed sector file locally + // Check if we have it in a remote cluster. + + si, err := r.index.StorageFindSector(ctx, s.ID, ft, 0, false) + if err != nil { + return false, xerrors.Errorf("StorageFindSector: %s", err) + } + + if len(si) == 0 { + return false, nil + } + + sort.Slice(si, func(i, j int) bool { + return si[i].Weight < si[j].Weight + }) + + for _, info := range si { + for _, url := range info.URLs { + ok, err := r.checkAllocated(ctx, url, s.ProofType, offset, size) + if err != nil { + log.Warnw("check if remote has piece", "url", url, "error", err) + continue + } + if !ok { + continue + } + + return true, nil + } + } + + return false, nil +} + +// Reader returns a reader for an unsealed piece at the given offset in the given sector. +// If the Miner has the unsealed piece locally, it will return a reader that reads from the local copy. +// If the Miner does NOT have the unsealed piece locally, it will query all workers that have the unsealed sector file +// to know if they have the unsealed piece and will then read the unsealed piece data from a worker that has it. +// +// Returns a nil reader if : +// 1. no worker(local worker included) has an unsealed file for the given sector OR +// 2. no worker(local worker included) has the unsealed piece in their unsealed sector file. +// Will return a nil reader and a nil error in such a case. +func (r *Remote) Reader(ctx context.Context, s storage.SectorRef, offset, size abi.PaddedPieceSize) (io.ReadCloser, error) { + ft := storiface.FTUnsealed + + // check if we have the unsealed sector file locally + paths, _, err := r.local.AcquireSector(ctx, s, ft, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) + if err != nil { + return nil, xerrors.Errorf("acquire local: %w", err) + } + + path := storiface.PathByType(paths, ft) + + if path != "" { + // if we have the unsealed file locally, return a reader that can be used to read the contents of the + // unsealed piece. + log.Debugf("Check local %s (+%d,%d)", path, offset, size) + ssize, err := s.ProofType.SectorSize() + if err != nil { + return nil, err + } + log.Debugf("fetched sector size %s (+%d,%d)", path, offset, size) + + // open the unsealed sector file for the given sector size located at the given path. + pf, err := r.pfHandler.OpenPartialFile(abi.PaddedPieceSize(ssize), path) + if err != nil { + return nil, xerrors.Errorf("opening partial file: %w", err) + } + log.Debugf("local partial file opened %s (+%d,%d)", path, offset, size) + + // even though we have an unsealed file for the given sector, we still need to determine if we have the unsealed piece + // in the unsealed sector file. That is what `HasAllocated` checks for. + has, err := r.pfHandler.HasAllocated(pf, storiface.UnpaddedByteIndex(offset.Unpadded()), size.Unpadded()) + if err != nil { + return nil, xerrors.Errorf("has allocated: %w", err) + } + log.Debugf("check if partial file is allocated %s (+%d,%d)", path, offset, size) + + if has { + log.Infof("returning piece reader for local unsealed piece sector=%+v, (offset=%d, size=%d)", s.ID, offset, size) + return r.pfHandler.Reader(pf, storiface.PaddedByteIndex(offset), size) + } + + log.Debugf("miner has unsealed file but not unseal piece, %s (+%d,%d)", path, offset, size) + if err := r.pfHandler.Close(pf); err != nil { + return nil, xerrors.Errorf("close partial file: %w", err) + } + } + + // --- We don't have the unsealed piece in an unsealed sector file locally + + // if we don't have the unsealed sector file locally, we'll first lookup the Miner Sector Store Index + // to determine which workers have the unsealed file and then query those workers to know + // if they have the unsealed piece in the unsealed sector file. + si, err := r.index.StorageFindSector(ctx, s.ID, ft, 0, false) + if err != nil { + log.Debugf("Reader, did not find unsealed file on any of the workers %s (+%d,%d)", path, offset, size) + return nil, err + } + + if len(si) == 0 { + return nil, xerrors.Errorf("failed to read sector %v from remote(%d): %w", s, ft, storiface.ErrSectorNotFound) + } + + sort.Slice(si, func(i, j int) bool { + return si[i].Weight > si[j].Weight + }) + + var lastErr error + for _, info := range si { + for _, url := range info.URLs { + // checkAllocated makes a JSON RPC query to a remote worker to determine if it has + // unsealed piece in their unsealed sector file. + ok, err := r.checkAllocated(ctx, url, s.ProofType, offset, size) + if err != nil { + log.Warnw("check if remote has piece", "url", url, "error", err) + lastErr = err + continue + } + if !ok { + continue + } + + // readRemote fetches a reader that we can use to read the unsealed piece from the remote worker. + // It uses a ranged HTTP query to ensure we ONLY read the unsealed piece and not the entire unsealed file. + rd, err := r.readRemote(ctx, url, offset, size) + if err != nil { + log.Warnw("reading from remote", "url", url, "error", err) + lastErr = err + continue + } + log.Infof("Read remote %s (+%d,%d)", url, offset, size) + return rd, nil + } + } + + // we couldn't find a unsealed file with the unsealed piece, will return a nil reader. + log.Debugf("returning nil reader, did not find unsealed piece for %+v (+%d,%d), last error=%s", s, offset, size, lastErr) + return nil, nil +} + +func (r *Remote) Reserve(ctx context.Context, sid storage.SectorRef, ft storiface.SectorFileType, storageIDs storiface.SectorPaths, overheadTab map[storiface.SectorFileType]int) (func(), error) { + log.Warnf("reserve called on remote store, sectorID: %v", sid.ID) + return func() { + + }, nil +} + var _ Store = &Remote{} diff --git a/extern/sector-storage/stores/remote_test.go b/extern/sector-storage/stores/remote_test.go new file mode 100644 index 00000000000..b708bb68f6d --- /dev/null +++ b/extern/sector-storage/stores/remote_test.go @@ -0,0 +1,741 @@ +package stores_test + +import ( + "context" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/extern/sector-storage/partialfile" + "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/extern/sector-storage/stores/mocks" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + "github.com/filecoin-project/specs-storage/storage" + "github.com/golang/mock/gomock" + "github.com/gorilla/mux" + logging "github.com/ipfs/go-log/v2" + "github.com/stretchr/testify/require" + "golang.org/x/xerrors" +) + +func TestReader(t *testing.T) { + logging.SetAllLoggers(logging.LevelDebug) + bz := []byte("Hello World") + + pfPath := "path" + emptyPartialFile := &partialfile.PartialFile{} + sectorSize := abi.SealProofInfos[1].SectorSize + + ft := storiface.FTUnsealed + + sectorRef := storage.SectorRef{ + ID: abi.SectorID{ + Miner: 123, + Number: 123, + }, + ProofType: 1, + } + + offset := abi.PaddedPieceSize(100) + size := abi.PaddedPieceSize(1000) + ctx := context.Background() + + tcs := map[string]struct { + storeFnc func(s *mocks.MockStore) + pfFunc func(s *mocks.MockpartialFileHandler) + indexFnc func(s *mocks.MockSectorIndex, serverURL string) + + needHttpServer bool + + getAllocatedReturnCode int + getSectorReturnCode int + + serverUrl string + + // expectation + errStr string + expectedNonNilReader bool + expectedSectorBytes []byte + }{ + + // -------- have the unsealed file locally + "fails when error while acquiring unsealed file": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, xerrors.New("acquire error")) + }, + + errStr: "acquire error", + }, + + "fails when error while opening local partial (unsealed) file": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, xerrors.New("pf open error")) + }, + errStr: "pf open error", + }, + + "fails when error while checking if local unsealed file has piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + mockCheckAllocation(pf, offset, size, emptyPartialFile, + true, xerrors.New("piece check error")) + }, + + errStr: "piece check error", + }, + + "fails when error while closing local unsealed file that does not have the piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + mockCheckAllocation(pf, offset, size, emptyPartialFile, + false, nil) + pf.EXPECT().Close(emptyPartialFile).Return(xerrors.New("close error")).Times(1) + }, + errStr: "close error", + }, + + "fails when error while fetching reader for the local unsealed file that has the unsealed piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + mockCheckAllocation(pf, offset, size, emptyPartialFile, + true, nil) + mockPfReader(pf, emptyPartialFile, offset, size, nil, xerrors.New("reader error")) + + }, + errStr: "reader error", + }, + + // ------------------- don't have the unsealed file locally + + "fails when error while finding sector": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, "", nil) + }, + + indexFnc: func(in *mocks.MockSectorIndex, _ string) { + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return(nil, xerrors.New("find sector error")) + }, + errStr: "find sector error", + }, + + "fails when no worker has unsealed file": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, "", nil) + }, + + indexFnc: func(in *mocks.MockSectorIndex, _ string) { + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return(nil, nil) + }, + errStr: storiface.ErrSectorNotFound.Error(), + }, + + // --- nil reader when local unsealed file does NOT have unsealed piece + "nil reader when local unsealed file does not have the unsealed piece and remote sector also dosen't have the unsealed piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + mockCheckAllocation(pf, offset, size, emptyPartialFile, + false, nil) + + pf.EXPECT().Close(emptyPartialFile).Return(nil).Times(1) + + }, + + indexFnc: func(in *mocks.MockSectorIndex, url string) { + si := stores.SectorStorageInfo{ + URLs: []string{url}, + } + + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return([]stores.SectorStorageInfo{si}, nil).Times(1) + }, + + needHttpServer: true, + getAllocatedReturnCode: 500, + }, + + // ---- nil reader when none of the remote unsealed file has unsealed piece + "nil reader when none of the worker has the unsealed piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, "", nil) + }, + + indexFnc: func(in *mocks.MockSectorIndex, url string) { + si := stores.SectorStorageInfo{ + URLs: []string{url}, + } + + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return([]stores.SectorStorageInfo{si}, nil).Times(1) + }, + + needHttpServer: true, + getAllocatedReturnCode: 500, + }, + + "nil reader when none of the worker is able to serve the unsealed piece even though they have it": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, "", nil) + }, + + indexFnc: func(in *mocks.MockSectorIndex, url string) { + si := stores.SectorStorageInfo{ + URLs: []string{url}, + } + + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return([]stores.SectorStorageInfo{si}, nil).Times(1) + }, + + needHttpServer: true, + getSectorReturnCode: 500, + getAllocatedReturnCode: 200, + }, + + // ---- Success for local unsealed file + "successfully fetches reader for piece from local unsealed file": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + mockCheckAllocation(pf, offset, size, emptyPartialFile, + true, nil) + + f, err := ioutil.TempFile("", "TestReader-") + require.NoError(t, err) + _, err = f.Write(bz) + require.NoError(t, err) + require.NoError(t, f.Close()) + f, err = os.Open(f.Name()) + require.NoError(t, err) + + mockPfReader(pf, emptyPartialFile, offset, size, f, nil) + + }, + + expectedNonNilReader: true, + expectedSectorBytes: bz, + }, + + // --- Success for remote unsealed file + // --- Success for remote unsealed file + "successfully fetches reader from remote unsealed piece when local unsealed file does NOT have the unsealed Piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + mockCheckAllocation(pf, offset, size, emptyPartialFile, + false, nil) + + pf.EXPECT().Close(emptyPartialFile).Return(nil).Times(1) + + }, + + indexFnc: func(in *mocks.MockSectorIndex, url string) { + si := stores.SectorStorageInfo{ + URLs: []string{url}, + } + + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return([]stores.SectorStorageInfo{si}, nil).Times(1) + }, + + needHttpServer: true, + getSectorReturnCode: 200, + getAllocatedReturnCode: 200, + expectedSectorBytes: bz, + expectedNonNilReader: true, + }, + + "successfully fetches reader for piece from remote unsealed piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, "", nil) + }, + + indexFnc: func(in *mocks.MockSectorIndex, url string) { + si := stores.SectorStorageInfo{ + URLs: []string{url}, + } + + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return([]stores.SectorStorageInfo{si}, nil).Times(1) + }, + + needHttpServer: true, + getSectorReturnCode: 200, + getAllocatedReturnCode: 200, + expectedSectorBytes: bz, + expectedNonNilReader: true, + }, + } + + for name, tc := range tcs { + tc := tc + t.Run(name, func(t *testing.T) { + // create go mock controller here + mockCtrl := gomock.NewController(t) + // when test is done, assert expectations on all mock objects. + defer mockCtrl.Finish() + + // create them mocks + lstore := mocks.NewMockStore(mockCtrl) + pfhandler := mocks.NewMockpartialFileHandler(mockCtrl) + index := mocks.NewMockSectorIndex(mockCtrl) + + if tc.storeFnc != nil { + tc.storeFnc(lstore) + } + if tc.pfFunc != nil { + tc.pfFunc(pfhandler) + } + + if tc.needHttpServer { + // run http server + ts := httptest.NewServer(&mockHttpServer{ + expectedSectorName: storiface.SectorName(sectorRef.ID), + expectedFileType: ft.String(), + expectedOffset: fmt.Sprintf("%d", offset.Unpadded()), + expectedSize: fmt.Sprintf("%d", size.Unpadded()), + expectedSectorType: fmt.Sprintf("%d", sectorRef.ProofType), + + getAllocatedReturnCode: tc.getAllocatedReturnCode, + getSectorReturnCode: tc.getSectorReturnCode, + getSectorBytes: tc.expectedSectorBytes, + }) + defer ts.Close() + tc.serverUrl = fmt.Sprintf("%s/remote/%s/%s", ts.URL, ft.String(), storiface.SectorName(sectorRef.ID)) + } + if tc.indexFnc != nil { + tc.indexFnc(index, tc.serverUrl) + } + + remoteStore := stores.NewRemote(lstore, index, nil, 6000, pfhandler) + + rd, err := remoteStore.Reader(ctx, sectorRef, offset, size) + + if tc.errStr != "" { + require.Error(t, err) + require.Nil(t, rd) + require.Contains(t, err.Error(), tc.errStr) + } else { + require.NoError(t, err) + } + + if !tc.expectedNonNilReader { + require.Nil(t, rd) + } else { + require.NotNil(t, rd) + defer func() { + require.NoError(t, rd.Close()) + }() + + if f, ok := rd.(*os.File); ok { + require.NoError(t, os.Remove(f.Name())) + } + + bz, err := ioutil.ReadAll(rd) + require.NoError(t, err) + require.Equal(t, tc.expectedSectorBytes, bz) + } + + }) + } +} + +func TestCheckIsUnsealed(t *testing.T) { + logging.SetAllLoggers(logging.LevelDebug) + + pfPath := "path" + ft := storiface.FTUnsealed + emptyPartialFile := &partialfile.PartialFile{} + + sectorRef := storage.SectorRef{ + ID: abi.SectorID{ + Miner: 123, + Number: 123, + }, + ProofType: 1, + } + sectorSize := abi.SealProofInfos[1].SectorSize + + offset := abi.PaddedPieceSize(100) + size := abi.PaddedPieceSize(1000) + ctx := context.Background() + + tcs := map[string]struct { + storeFnc func(s *mocks.MockStore) + pfFunc func(s *mocks.MockpartialFileHandler) + indexFnc func(s *mocks.MockSectorIndex, serverURL string) + + needHttpServer bool + + getAllocatedReturnCode int + + serverUrl string + + // expectation + errStr string + expectedIsUnealed bool + }{ + + // -------- have the unsealed file locally + "fails when error while acquiring unsealed file": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, xerrors.New("acquire error")) + }, + + errStr: "acquire error", + }, + + "fails when error while opening local partial (unsealed) file": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, xerrors.New("pf open error")) + }, + errStr: "pf open error", + }, + + "fails when error while checking if local unsealed file has piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + mockCheckAllocation(pf, offset, size, emptyPartialFile, + true, xerrors.New("piece check error")) + }, + + errStr: "piece check error", + }, + + "fails when error while closing local unsealed file": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + + mockCheckAllocation(pf, offset, size, emptyPartialFile, + false, nil) + + pf.EXPECT().Close(emptyPartialFile).Return(xerrors.New("close error")).Times(1) + }, + errStr: "close error", + }, + + // ------------------- don't have the unsealed file locally + + "fails when error while finding sector": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, "", nil) + }, + + indexFnc: func(in *mocks.MockSectorIndex, _ string) { + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return(nil, xerrors.New("find sector error")) + }, + errStr: "find sector error", + }, + + "false when no worker has unsealed file": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, "", nil) + }, + + indexFnc: func(in *mocks.MockSectorIndex, _ string) { + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return(nil, nil) + }, + }, + + // false when local unsealed file does NOT have unsealed piece + "false when local unsealed file does not have the piece and remote sector too dosen't have the piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + mockCheckAllocation(pf, offset, size, emptyPartialFile, + false, nil) + + pf.EXPECT().Close(emptyPartialFile).Return(nil).Times(1) + }, + + indexFnc: func(in *mocks.MockSectorIndex, url string) { + si := stores.SectorStorageInfo{ + URLs: []string{url}, + } + + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return([]stores.SectorStorageInfo{si}, nil).Times(1) + }, + + needHttpServer: true, + getAllocatedReturnCode: 500, + }, + + "false when none of the worker has the unsealed piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, "", nil) + }, + + indexFnc: func(in *mocks.MockSectorIndex, url string) { + si := stores.SectorStorageInfo{ + URLs: []string{url}, + } + + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return([]stores.SectorStorageInfo{si}, nil).Times(1) + }, + + needHttpServer: true, + getAllocatedReturnCode: 500, + }, + + // ---- Success for local unsealed file + "true when local unsealed file has the piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + mockCheckAllocation(pf, offset, size, emptyPartialFile, + true, nil) + pf.EXPECT().Close(emptyPartialFile).Return(nil).Times(1) + + }, + + expectedIsUnealed: true, + }, + + // --- Success for remote unsealed file + "true if we have a remote unsealed piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, "", nil) + }, + + indexFnc: func(in *mocks.MockSectorIndex, url string) { + si := stores.SectorStorageInfo{ + URLs: []string{url}, + } + + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return([]stores.SectorStorageInfo{si}, nil).Times(1) + }, + + needHttpServer: true, + getAllocatedReturnCode: 200, + expectedIsUnealed: true, + }, + + "true when local unsealed file does NOT have the unsealed Piece but remote sector has the unsealed piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + mockCheckAllocation(pf, offset, size, emptyPartialFile, + false, nil) + + pf.EXPECT().Close(emptyPartialFile).Return(nil).Times(1) + }, + + indexFnc: func(in *mocks.MockSectorIndex, url string) { + si := stores.SectorStorageInfo{ + URLs: []string{url}, + } + + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return([]stores.SectorStorageInfo{si}, nil).Times(1) + }, + + needHttpServer: true, + getAllocatedReturnCode: 200, + expectedIsUnealed: true, + }, + } + + for name, tc := range tcs { + tc := tc + t.Run(name, func(t *testing.T) { + // create go mock controller here + mockCtrl := gomock.NewController(t) + // when test is done, assert expectations on all mock objects. + defer mockCtrl.Finish() + + // create them mocks + lstore := mocks.NewMockStore(mockCtrl) + pfhandler := mocks.NewMockpartialFileHandler(mockCtrl) + index := mocks.NewMockSectorIndex(mockCtrl) + + if tc.storeFnc != nil { + tc.storeFnc(lstore) + } + if tc.pfFunc != nil { + tc.pfFunc(pfhandler) + } + + if tc.needHttpServer { + // run http server + ts := httptest.NewServer(&mockHttpServer{ + expectedSectorName: storiface.SectorName(sectorRef.ID), + expectedFileType: ft.String(), + expectedOffset: fmt.Sprintf("%d", offset.Unpadded()), + expectedSize: fmt.Sprintf("%d", size.Unpadded()), + expectedSectorType: fmt.Sprintf("%d", sectorRef.ProofType), + + getAllocatedReturnCode: tc.getAllocatedReturnCode, + }) + defer ts.Close() + tc.serverUrl = fmt.Sprintf("%s/remote/%s/%s", ts.URL, ft.String(), storiface.SectorName(sectorRef.ID)) + } + if tc.indexFnc != nil { + tc.indexFnc(index, tc.serverUrl) + } + + remoteStore := stores.NewRemote(lstore, index, nil, 6000, pfhandler) + + isUnsealed, err := remoteStore.CheckIsUnsealed(ctx, sectorRef, offset, size) + + if tc.errStr != "" { + require.Error(t, err) + require.False(t, isUnsealed) + require.Contains(t, err.Error(), tc.errStr) + } else { + require.NoError(t, err) + } + + require.Equal(t, tc.expectedIsUnealed, isUnsealed) + + }) + } +} + +func mockSectorAcquire(l *mocks.MockStore, sectorRef storage.SectorRef, pfPath string, err error) { + l.EXPECT().AcquireSector(gomock.Any(), sectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{ + Unsealed: pfPath, + }, + storiface.SectorPaths{}, err).Times(1) +} + +func mockPartialFileOpen(pf *mocks.MockpartialFileHandler, sectorSize abi.SectorSize, pfPath string, err error) { + pf.EXPECT().OpenPartialFile(abi.PaddedPieceSize(sectorSize), pfPath).Return(&partialfile.PartialFile{}, + err).Times(1) +} + +func mockCheckAllocation(pf *mocks.MockpartialFileHandler, offset, size abi.PaddedPieceSize, file *partialfile.PartialFile, + out bool, err error) { + pf.EXPECT().HasAllocated(file, storiface.UnpaddedByteIndex(offset.Unpadded()), + size.Unpadded()).Return(out, err).Times(1) +} + +func mockPfReader(pf *mocks.MockpartialFileHandler, file *partialfile.PartialFile, offset, size abi.PaddedPieceSize, + outFile *os.File, err error) { + pf.EXPECT().Reader(file, storiface.PaddedByteIndex(offset), size).Return(outFile, err) +} + +type mockHttpServer struct { + expectedSectorName string + expectedFileType string + expectedOffset string + expectedSize string + expectedSectorType string + + getAllocatedReturnCode int + + getSectorReturnCode int + getSectorBytes []byte +} + +func (m *mockHttpServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { + mux := mux.NewRouter() + mux.HandleFunc("/remote/{type}/{id}", m.getSector).Methods("GET") + mux.HandleFunc("/remote/{type}/{id}/{spt}/allocated/{offset}/{size}", m.getAllocated).Methods("GET") + mux.ServeHTTP(w, r) +} + +func (m *mockHttpServer) getAllocated(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + + if vars["id"] != m.expectedSectorName { + w.WriteHeader(http.StatusBadRequest) + return + } + + if vars["type"] != m.expectedFileType { + w.WriteHeader(http.StatusBadRequest) + return + } + + if vars["spt"] != m.expectedSectorType { + w.WriteHeader(http.StatusBadRequest) + return + } + + if vars["offset"] != m.expectedOffset { + w.WriteHeader(http.StatusBadRequest) + return + } + + if vars["size"] != m.expectedSize { + w.WriteHeader(http.StatusBadRequest) + return + } + + w.WriteHeader(m.getAllocatedReturnCode) +} + +func (m *mockHttpServer) getSector(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + + if vars["id"] != m.expectedSectorName { + w.WriteHeader(http.StatusBadRequest) + return + } + + if vars["type"] != m.expectedFileType { + w.WriteHeader(http.StatusBadRequest) + return + } + + w.WriteHeader(m.getSectorReturnCode) + _, _ = w.Write(m.getSectorBytes) +} diff --git a/extern/sector-storage/stores/util_unix.go b/extern/sector-storage/stores/util_unix.go index 2b057468d95..943681b498c 100644 --- a/extern/sector-storage/stores/util_unix.go +++ b/extern/sector-storage/stores/util_unix.go @@ -2,8 +2,10 @@ package stores import ( "bytes" + "os" "os/exec" "path/filepath" + "runtime" "strings" "github.com/mitchellh/go-homedir" @@ -33,7 +35,18 @@ func move(from, to string) error { // can do better var errOut bytes.Buffer - cmd := exec.Command("/usr/bin/env", "mv", "-t", toDir, from) // nolint + + var cmd *exec.Cmd + if runtime.GOOS == "darwin" { + if err := os.MkdirAll(toDir, 0777); err != nil { + return xerrors.Errorf("failed exec MkdirAll: %s", err) + } + + cmd = exec.Command("/usr/bin/env", "mv", from, toDir) // nolint + } else { + cmd = exec.Command("/usr/bin/env", "mv", "-t", toDir, from) // nolint + } + cmd.Stderr = &errOut if err := cmd.Run(); err != nil { return xerrors.Errorf("exec mv (stderr: %s): %w", strings.TrimSpace(errOut.String()), err) diff --git a/extern/sector-storage/storiface/cbor_gen.go b/extern/sector-storage/storiface/cbor_gen.go index 0efbc125b72..a8ade7d1037 100644 --- a/extern/sector-storage/storiface/cbor_gen.go +++ b/extern/sector-storage/storiface/cbor_gen.go @@ -5,12 +5,16 @@ package storiface import ( "fmt" "io" + "sort" + cid "github.com/ipfs/go-cid" cbg "github.com/whyrusleeping/cbor-gen" xerrors "golang.org/x/xerrors" ) var _ = xerrors.Errorf +var _ = cid.Undef +var _ = sort.Sort func (t *CallID) MarshalCBOR(w io.Writer) error { if t == nil { @@ -134,7 +138,8 @@ func (t *CallID) UnmarshalCBOR(r io.Reader) error { } default: - return fmt.Errorf("unknown struct field %d: '%s'", i, name) + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) } } diff --git a/extern/sector-storage/storiface/ffi.go b/extern/sector-storage/storiface/ffi.go index 95d400e5248..2b6df667a68 100644 --- a/extern/sector-storage/storiface/ffi.go +++ b/extern/sector-storage/storiface/ffi.go @@ -1,8 +1,12 @@ package storiface import ( + "context" "errors" + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + "github.com/filecoin-project/go-state-types/abi" ) @@ -14,4 +18,14 @@ func (i UnpaddedByteIndex) Padded() PaddedByteIndex { return PaddedByteIndex(abi.UnpaddedPieceSize(i).Padded()) } +func (i UnpaddedByteIndex) Valid() error { + if i%127 != 0 { + return xerrors.Errorf("unpadded byte index must be a multiple of 127") + } + + return nil +} + type PaddedByteIndex uint64 + +type RGetter func(ctx context.Context, id abi.SectorID) (cid.Cid, error) diff --git a/extern/sector-storage/storiface/filetype.go b/extern/sector-storage/storiface/filetype.go index 3f7c7455ebc..2e099902272 100644 --- a/extern/sector-storage/storiface/filetype.go +++ b/extern/sector-storage/storiface/filetype.go @@ -73,6 +73,24 @@ func (t SectorFileType) SealSpaceUse(ssize abi.SectorSize) (uint64, error) { return need, nil } +func (t SectorFileType) StoreSpaceUse(ssize abi.SectorSize) (uint64, error) { + var need uint64 + for _, pathType := range PathTypes { + if !t.Has(pathType) { + continue + } + + oh, ok := FsOverheadFinalized[pathType] + if !ok { + return 0, xerrors.Errorf("no finalized overhead info for %s", pathType) + } + + need += uint64(oh) * uint64(ssize) / FSOverheadDen + } + + return need, nil +} + func (t SectorFileType) All() [FileTypes]bool { var out [FileTypes]bool diff --git a/extern/sector-storage/storiface/worker.go b/extern/sector-storage/storiface/worker.go index bbc9ca55482..d1373f4c541 100644 --- a/extern/sector-storage/storiface/worker.go +++ b/extern/sector-storage/storiface/worker.go @@ -2,8 +2,8 @@ package storiface import ( "context" + "errors" "fmt" - "io" "time" "github.com/google/uuid" @@ -18,7 +18,12 @@ import ( type WorkerInfo struct { Hostname string - Resources WorkerResources + // IgnoreResources indicates whether the worker's available resources should + // be used ignored (true) or used (false) for the purposes of scheduling and + // task assignment. Only supported on local workers. Used for testing. + // Default should be false (zero value, i.e. resources taken into account). + IgnoreResources bool + Resources WorkerResources } type WorkerResources struct { @@ -41,13 +46,26 @@ type WorkerStats struct { CpuUse uint64 // nolint } +const ( + RWRetWait = -1 + RWReturned = -2 + RWRetDone = -3 +) + type WorkerJob struct { ID CallID Sector abi.SectorID Task sealtasks.TaskType - RunWait int // -1 - ret-wait, 0 - running, 1+ - assigned + // 1+ - assigned + // 0 - running + // -1 - ret-wait + // -2 - returned + // -3 - ret-done + RunWait int Start time.Time + + Hostname string `json:",omitempty"` // optional, set for ret-wait jobs } type CallID struct { @@ -64,29 +82,68 @@ var _ fmt.Stringer = &CallID{} var UndefCall CallID type WorkerCalls interface { - AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (CallID, error) - SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (CallID, error) - SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (CallID, error) - SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (CallID, error) - SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (CallID, error) - FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (CallID, error) - ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) (CallID, error) - MoveStorage(ctx context.Context, sector abi.SectorID, types SectorFileType) (CallID, error) - UnsealPiece(context.Context, abi.SectorID, UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (CallID, error) - ReadPiece(context.Context, io.Writer, abi.SectorID, UnpaddedByteIndex, abi.UnpaddedPieceSize) (CallID, error) - Fetch(context.Context, abi.SectorID, SectorFileType, PathType, AcquireMode) (CallID, error) + AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (CallID, error) + SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (CallID, error) + SealPreCommit2(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (CallID, error) + SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (CallID, error) + SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (CallID, error) + FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (CallID, error) + ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (CallID, error) + MoveStorage(ctx context.Context, sector storage.SectorRef, types SectorFileType) (CallID, error) + UnsealPiece(context.Context, storage.SectorRef, UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (CallID, error) + Fetch(context.Context, storage.SectorRef, SectorFileType, PathType, AcquireMode) (CallID, error) +} + +type ErrorCode int + +const ( + ErrUnknown ErrorCode = iota +) + +const ( + // Temp Errors + ErrTempUnknown ErrorCode = iota + 100 + ErrTempWorkerRestart + ErrTempAllocateSpace +) + +type CallError struct { + Code ErrorCode + Message string + sub error +} + +func (c *CallError) Error() string { + return fmt.Sprintf("storage call error %d: %s", c.Code, c.Message) +} + +func (c *CallError) Unwrap() error { + if c.sub != nil { + return c.sub + } + + return errors.New(c.Message) +} + +func Err(code ErrorCode, sub error) *CallError { + return &CallError{ + Code: code, + Message: sub.Error(), + + sub: sub, + } } type WorkerReturn interface { - ReturnAddPiece(ctx context.Context, callID CallID, pi abi.PieceInfo, err string) error - ReturnSealPreCommit1(ctx context.Context, callID CallID, p1o storage.PreCommit1Out, err string) error - ReturnSealPreCommit2(ctx context.Context, callID CallID, sealed storage.SectorCids, err string) error - ReturnSealCommit1(ctx context.Context, callID CallID, out storage.Commit1Out, err string) error - ReturnSealCommit2(ctx context.Context, callID CallID, proof storage.Proof, err string) error - ReturnFinalizeSector(ctx context.Context, callID CallID, err string) error - ReturnReleaseUnsealed(ctx context.Context, callID CallID, err string) error - ReturnMoveStorage(ctx context.Context, callID CallID, err string) error - ReturnUnsealPiece(ctx context.Context, callID CallID, err string) error - ReturnReadPiece(ctx context.Context, callID CallID, ok bool, err string) error - ReturnFetch(ctx context.Context, callID CallID, err string) error + ReturnAddPiece(ctx context.Context, callID CallID, pi abi.PieceInfo, err *CallError) error + ReturnSealPreCommit1(ctx context.Context, callID CallID, p1o storage.PreCommit1Out, err *CallError) error + ReturnSealPreCommit2(ctx context.Context, callID CallID, sealed storage.SectorCids, err *CallError) error + ReturnSealCommit1(ctx context.Context, callID CallID, out storage.Commit1Out, err *CallError) error + ReturnSealCommit2(ctx context.Context, callID CallID, proof storage.Proof, err *CallError) error + ReturnFinalizeSector(ctx context.Context, callID CallID, err *CallError) error + ReturnReleaseUnsealed(ctx context.Context, callID CallID, err *CallError) error + ReturnMoveStorage(ctx context.Context, callID CallID, err *CallError) error + ReturnUnsealPiece(ctx context.Context, callID CallID, err *CallError) error + ReturnReadPiece(ctx context.Context, callID CallID, ok bool, err *CallError) error + ReturnFetch(ctx context.Context, callID CallID, err *CallError) error } diff --git a/extern/sector-storage/teststorage_test.go b/extern/sector-storage/teststorage_test.go index 0c8a240a322..72b27b154dc 100644 --- a/extern/sector-storage/teststorage_test.go +++ b/extern/sector-storage/teststorage_test.go @@ -31,50 +31,50 @@ func (t *testExec) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, panic("implement me") } -func (t *testExec) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) { +func (t *testExec) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) { panic("implement me") } -func (t *testExec) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storage.SectorCids, error) { +func (t *testExec) SealPreCommit2(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (storage.SectorCids, error) { panic("implement me") } -func (t *testExec) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) { +func (t *testExec) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) { panic("implement me") } -func (t *testExec) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storage.Proof, error) { +func (t *testExec) SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storage.Proof, error) { panic("implement me") } -func (t *testExec) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error { +func (t *testExec) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) error { panic("implement me") } -func (t *testExec) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error { +func (t *testExec) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) error { panic("implement me") } -func (t *testExec) Remove(ctx context.Context, sector abi.SectorID) error { +func (t *testExec) Remove(ctx context.Context, sector storage.SectorRef) error { panic("implement me") } -func (t *testExec) NewSector(ctx context.Context, sector abi.SectorID) error { +func (t *testExec) NewSector(ctx context.Context, sector storage.SectorRef) error { panic("implement me") } -func (t *testExec) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) { +func (t *testExec) AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) { resp := make(chan apres) t.apch <- resp ar := <-resp return ar.pi, ar.err } -func (t *testExec) UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error { +func (t *testExec) UnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error { panic("implement me") } -func (t *testExec) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { +func (t *testExec) ReadPiece(ctx context.Context, writer io.Writer, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { panic("implement me") } diff --git a/extern/sector-storage/testworker_test.go b/extern/sector-storage/testworker_test.go index fda25643ab5..2fe99f3d4cc 100644 --- a/extern/sector-storage/testworker_test.go +++ b/extern/sector-storage/testworker_test.go @@ -2,14 +2,11 @@ package sectorstorage import ( "context" - "io" "sync" - "github.com/google/uuid" - "github.com/ipfs/go-cid" - "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/specs-storage/storage" + "github.com/google/uuid" "github.com/filecoin-project/lotus/extern/sector-storage/mock" "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" @@ -29,14 +26,11 @@ type testWorker struct { pc1wait *sync.WaitGroup session uuid.UUID + + Worker } func newTestWorker(wcfg WorkerConfig, lstor *stores.Local, ret storiface.WorkerReturn) *testWorker { - ssize, err := wcfg.SealProof.SectorSize() - if err != nil { - panic(err) - } - acceptTasks := map[sealtasks.TaskType]struct{}{} for _, taskType := range wcfg.TaskTypes { acceptTasks[taskType] = struct{}{} @@ -47,15 +41,15 @@ func newTestWorker(wcfg WorkerConfig, lstor *stores.Local, ret storiface.WorkerR lstor: lstor, ret: ret, - mockSeal: mock.NewMockSectorMgr(ssize, nil), + mockSeal: mock.NewMockSectorMgr(nil), session: uuid.New(), } } -func (t *testWorker) asyncCall(sector abi.SectorID, work func(ci storiface.CallID)) (storiface.CallID, error) { +func (t *testWorker) asyncCall(sector storage.SectorRef, work func(ci storiface.CallID)) (storiface.CallID, error) { ci := storiface.CallID{ - Sector: sector, + Sector: sector.ID, ID: uuid.New(), } @@ -64,28 +58,16 @@ func (t *testWorker) asyncCall(sector abi.SectorID, work func(ci storiface.CallI return ci, nil } -func (t *testWorker) NewSector(ctx context.Context, sector abi.SectorID) error { - panic("implement me") -} - -func (t *testWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) { - panic("implement me") -} - -func (t *testWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) { - panic("implement me") -} - -func (t *testWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) { +func (t *testWorker) AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) { return t.asyncCall(sector, func(ci storiface.CallID) { p, err := t.mockSeal.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData) - if err := t.ret.ReturnAddPiece(ctx, ci, p, errstr(err)); err != nil { + if err := t.ret.ReturnAddPiece(ctx, ci, p, toCallError(err)); err != nil { log.Error(err) } }) } -func (t *testWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) { +func (t *testWorker) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) { return t.asyncCall(sector, func(ci storiface.CallID) { t.pc1s++ @@ -97,43 +79,15 @@ func (t *testWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ti defer t.pc1lk.Unlock() p1o, err := t.mockSeal.SealPreCommit1(ctx, sector, ticket, pieces) - if err := t.ret.ReturnSealPreCommit1(ctx, ci, p1o, errstr(err)); err != nil { + if err := t.ret.ReturnSealPreCommit1(ctx, ci, p1o, toCallError(err)); err != nil { log.Error(err) } }) } -func (t *testWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storiface.CallID, error) { - panic("implement me") -} - -func (t *testWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) { - panic("implement me") -} - -func (t *testWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storiface.CallID, error) { - panic("implement me") -} - -func (t *testWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (storiface.CallID, error) { - panic("implement me") -} - -func (t *testWorker) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) (storiface.CallID, error) { - panic("implement me") -} - -func (t *testWorker) Remove(ctx context.Context, sector abi.SectorID) (storiface.CallID, error) { - panic("implement me") -} - -func (t *testWorker) MoveStorage(ctx context.Context, sector abi.SectorID, types storiface.SectorFileType) (storiface.CallID, error) { - panic("implement me") -} - -func (t *testWorker) Fetch(ctx context.Context, sector abi.SectorID, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) { +func (t *testWorker) Fetch(ctx context.Context, sector storage.SectorRef, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) { return t.asyncCall(sector, func(ci storiface.CallID) { - if err := t.ret.ReturnFetch(ctx, ci, ""); err != nil { + if err := t.ret.ReturnFetch(ctx, ci, nil); err != nil { log.Error(err) } }) diff --git a/extern/sector-storage/worker_local.go b/extern/sector-storage/worker_local.go index cb1a43c531f..3e63f8659fc 100644 --- a/extern/sector-storage/worker_local.go +++ b/extern/sector-storage/worker_local.go @@ -8,6 +8,7 @@ import ( "reflect" "runtime" "sync" + "sync/atomic" "time" "github.com/elastic/go-sysinfo" @@ -19,7 +20,7 @@ import ( ffi "github.com/filecoin-project/filecoin-ffi" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-statestore" - storage2 "github.com/filecoin-project/specs-storage/storage" + "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" @@ -30,16 +31,19 @@ import ( var pathTypes = []storiface.SectorFileType{storiface.FTUnsealed, storiface.FTSealed, storiface.FTCache} type WorkerConfig struct { - SealProof abi.RegisteredSealProof TaskTypes []sealtasks.TaskType NoSwap bool + + // IgnoreResourceFiltering enables task distribution to happen on this + // worker regardless of its currently available resources. Used in testing + // with the local worker. + IgnoreResourceFiltering bool } // used do provide custom proofs impl (mostly used in testing) type ExecutorFunc func() (ffiwrapper.Storage, error) type LocalWorker struct { - scfg *ffiwrapper.Config storage stores.Store localStore *stores.Local sindex stores.SectorIndex @@ -47,12 +51,17 @@ type LocalWorker struct { executor ExecutorFunc noSwap bool + // see equivalent field on WorkerConfig. + ignoreResources bool + ct *workerCallTracker acceptTasks map[sealtasks.TaskType]struct{} running sync.WaitGroup + taskLk sync.Mutex - session uuid.UUID - closing chan struct{} + session uuid.UUID + testDisable int64 + closing chan struct{} } func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, store stores.Store, local *stores.Local, sindex stores.SectorIndex, ret storiface.WorkerReturn, cst *statestore.StateStore) *LocalWorker { @@ -62,9 +71,6 @@ func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, store stores.Store } w := &LocalWorker{ - scfg: &ffiwrapper.Config{ - SealProofType: wcfg.SealProof, - }, storage: store, localStore: local, sindex: sindex, @@ -73,12 +79,12 @@ func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, store stores.Store ct: &workerCallTracker{ st: cst, }, - acceptTasks: acceptTasks, - executor: executor, - noSwap: wcfg.NoSwap, - - session: uuid.New(), - closing: make(chan struct{}), + acceptTasks: acceptTasks, + executor: executor, + noSwap: wcfg.NoSwap, + ignoreResources: wcfg.IgnoreResourceFiltering, + session: uuid.New(), + closing: make(chan struct{}), } if w.executor == nil { @@ -93,7 +99,7 @@ func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, store stores.Store go func() { for _, call := range unfinished { - err := xerrors.Errorf("worker restarted") + err := storiface.Err(storiface.ErrTempWorkerRestart, xerrors.New("worker restarted")) // TODO: Handle restarting PC1 once support is merged @@ -117,18 +123,13 @@ type localWorkerPathProvider struct { op storiface.AcquireMode } -func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi.SectorID, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType) (storiface.SectorPaths, func(), error) { - ssize, err := l.w.scfg.SealProofType.SectorSize() +func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType) (storiface.SectorPaths, func(), error) { + paths, storageIDs, err := l.w.storage.AcquireSector(ctx, sector, existing, allocate, sealing, l.op) if err != nil { return storiface.SectorPaths{}, nil, err } - paths, storageIDs, err := l.w.storage.AcquireSector(ctx, sector, ssize, existing, allocate, sealing, l.op) - if err != nil { - return storiface.SectorPaths{}, nil, err - } - - releaseStorage, err := l.w.localStore.Reserve(ctx, sector, ssize, allocate, storageIDs, storiface.FSOverheadSeal) + releaseStorage, err := l.w.localStore.Reserve(ctx, sector, allocate, storageIDs, storiface.FSOverheadSeal) if err != nil { return storiface.SectorPaths{}, nil, xerrors.Errorf("reserving storage space: %w", err) } @@ -145,7 +146,7 @@ func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi. sid := storiface.PathByType(storageIDs, fileType) - if err := l.w.sindex.StorageDeclareSector(ctx, stores.ID(sid), sector, fileType, l.op == storiface.AcquireMove); err != nil { + if err := l.w.sindex.StorageDeclareSector(ctx, stores.ID(sid), sector.ID, fileType, l.op == storiface.AcquireMove); err != nil { log.Errorf("declare sector error: %+v", err) } } @@ -153,22 +154,35 @@ func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi. } func (l *LocalWorker) ffiExec() (ffiwrapper.Storage, error) { - return ffiwrapper.New(&localWorkerPathProvider{w: l}, l.scfg) + return ffiwrapper.New(&localWorkerPathProvider{w: l}) } type ReturnType string +const ( + AddPiece ReturnType = "AddPiece" + SealPreCommit1 ReturnType = "SealPreCommit1" + SealPreCommit2 ReturnType = "SealPreCommit2" + SealCommit1 ReturnType = "SealCommit1" + SealCommit2 ReturnType = "SealCommit2" + FinalizeSector ReturnType = "FinalizeSector" + ReleaseUnsealed ReturnType = "ReleaseUnsealed" + MoveStorage ReturnType = "MoveStorage" + UnsealPiece ReturnType = "UnsealPiece" + Fetch ReturnType = "Fetch" +) + // in: func(WorkerReturn, context.Context, CallID, err string) // in: func(WorkerReturn, context.Context, CallID, ret T, err string) -func rfunc(in interface{}) func(context.Context, storiface.CallID, storiface.WorkerReturn, interface{}, error) error { +func rfunc(in interface{}) func(context.Context, storiface.CallID, storiface.WorkerReturn, interface{}, *storiface.CallError) error { rf := reflect.ValueOf(in) ft := rf.Type() withRet := ft.NumIn() == 5 - return func(ctx context.Context, ci storiface.CallID, wr storiface.WorkerReturn, i interface{}, err error) error { + return func(ctx context.Context, ci storiface.CallID, wr storiface.WorkerReturn, i interface{}, err *storiface.CallError) error { rctx := reflect.ValueOf(ctx) rwr := reflect.ValueOf(wr) - rerr := reflect.ValueOf(errstr(err)) + rerr := reflect.ValueOf(err) rci := reflect.ValueOf(ci) var ro []reflect.Value @@ -192,23 +206,22 @@ func rfunc(in interface{}) func(context.Context, storiface.CallID, storiface.Wor } } -var returnFunc = map[ReturnType]func(context.Context, storiface.CallID, storiface.WorkerReturn, interface{}, error) error{ - "AddPiece": rfunc(storiface.WorkerReturn.ReturnAddPiece), - "SealPreCommit1": rfunc(storiface.WorkerReturn.ReturnSealPreCommit1), - "SealPreCommit2": rfunc(storiface.WorkerReturn.ReturnSealPreCommit2), - "SealCommit1": rfunc(storiface.WorkerReturn.ReturnSealCommit1), - "SealCommit2": rfunc(storiface.WorkerReturn.ReturnSealCommit2), - "FinalizeSector": rfunc(storiface.WorkerReturn.ReturnFinalizeSector), - "ReleaseUnsealed": rfunc(storiface.WorkerReturn.ReturnReleaseUnsealed), - "MoveStorage": rfunc(storiface.WorkerReturn.ReturnMoveStorage), - "UnsealPiece": rfunc(storiface.WorkerReturn.ReturnUnsealPiece), - "ReadPiece": rfunc(storiface.WorkerReturn.ReturnReadPiece), - "Fetch": rfunc(storiface.WorkerReturn.ReturnFetch), +var returnFunc = map[ReturnType]func(context.Context, storiface.CallID, storiface.WorkerReturn, interface{}, *storiface.CallError) error{ + AddPiece: rfunc(storiface.WorkerReturn.ReturnAddPiece), + SealPreCommit1: rfunc(storiface.WorkerReturn.ReturnSealPreCommit1), + SealPreCommit2: rfunc(storiface.WorkerReturn.ReturnSealPreCommit2), + SealCommit1: rfunc(storiface.WorkerReturn.ReturnSealCommit1), + SealCommit2: rfunc(storiface.WorkerReturn.ReturnSealCommit2), + FinalizeSector: rfunc(storiface.WorkerReturn.ReturnFinalizeSector), + ReleaseUnsealed: rfunc(storiface.WorkerReturn.ReturnReleaseUnsealed), + MoveStorage: rfunc(storiface.WorkerReturn.ReturnMoveStorage), + UnsealPiece: rfunc(storiface.WorkerReturn.ReturnUnsealPiece), + Fetch: rfunc(storiface.WorkerReturn.ReturnFetch), } -func (l *LocalWorker) asyncCall(ctx context.Context, sector abi.SectorID, rt ReturnType, work func(ctx context.Context, ci storiface.CallID) (interface{}, error)) (storiface.CallID, error) { +func (l *LocalWorker) asyncCall(ctx context.Context, sector storage.SectorRef, rt ReturnType, work func(ctx context.Context, ci storiface.CallID) (interface{}, error)) (storiface.CallID, error) { ci := storiface.CallID{ - Sector: sector, + Sector: sector.ID, ID: uuid.New(), } @@ -239,7 +252,7 @@ func (l *LocalWorker) asyncCall(ctx context.Context, sector abi.SectorID, rt Ret } } - if doReturn(ctx, rt, ci, l.ret, res, err) { + if doReturn(ctx, rt, ci, l.ret, res, toCallError(err)) { if err := l.ct.onReturned(ci); err != nil { log.Errorf("tracking call (done): %+v", err) } @@ -249,8 +262,17 @@ func (l *LocalWorker) asyncCall(ctx context.Context, sector abi.SectorID, rt Ret return ci, nil } +func toCallError(err error) *storiface.CallError { + var serr *storiface.CallError + if err != nil && !xerrors.As(err, &serr) { + serr = storiface.Err(storiface.ErrUnknown, err) + } + + return serr +} + // doReturn tries to send the result to manager, returns true if successful -func doReturn(ctx context.Context, rt ReturnType, ci storiface.CallID, ret storiface.WorkerReturn, res interface{}, rerr error) bool { +func doReturn(ctx context.Context, rt ReturnType, ci storiface.CallID, ret storiface.WorkerReturn, res interface{}, rerr *storiface.CallError) bool { for { err := returnFunc[rt](ctx, ci, ret, res, rerr) if err == nil { @@ -273,15 +295,7 @@ func doReturn(ctx context.Context, rt ReturnType, ci storiface.CallID, ret stori return true } -func errstr(err error) string { - if err != nil { - return err.Error() - } - - return "" -} - -func (l *LocalWorker) NewSector(ctx context.Context, sector abi.SectorID) error { +func (l *LocalWorker) NewSector(ctx context.Context, sector storage.SectorRef) error { sb, err := l.executor() if err != nil { return err @@ -290,19 +304,19 @@ func (l *LocalWorker) NewSector(ctx context.Context, sector abi.SectorID) error return sb.NewSector(ctx, sector) } -func (l *LocalWorker) AddPiece(ctx context.Context, sector abi.SectorID, epcs []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (storiface.CallID, error) { +func (l *LocalWorker) AddPiece(ctx context.Context, sector storage.SectorRef, epcs []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (storiface.CallID, error) { sb, err := l.executor() if err != nil { return storiface.UndefCall, err } - return l.asyncCall(ctx, sector, "AddPiece", func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return l.asyncCall(ctx, sector, AddPiece, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { return sb.AddPiece(ctx, sector, epcs, sz, r) }) } -func (l *LocalWorker) Fetch(ctx context.Context, sector abi.SectorID, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) { - return l.asyncCall(ctx, sector, "Fetch", func(ctx context.Context, ci storiface.CallID) (interface{}, error) { +func (l *LocalWorker) Fetch(ctx context.Context, sector storage.SectorRef, fileType storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) { + return l.asyncCall(ctx, sector, Fetch, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { _, done, err := (&localWorkerPathProvider{w: l, op: am}).AcquireSector(ctx, sector, fileType, storiface.FTNone, ptype) if err == nil { done() @@ -312,16 +326,16 @@ func (l *LocalWorker) Fetch(ctx context.Context, sector abi.SectorID, fileType s }) } -func (l *LocalWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) { - return l.asyncCall(ctx, sector, "SealPreCommit1", func(ctx context.Context, ci storiface.CallID) (interface{}, error) { +func (l *LocalWorker) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) { + return l.asyncCall(ctx, sector, SealPreCommit1, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { { // cleanup previous failed attempts if they exist - if err := l.storage.Remove(ctx, sector, storiface.FTSealed, true); err != nil { + if err := l.storage.Remove(ctx, sector.ID, storiface.FTSealed, true); err != nil { return nil, xerrors.Errorf("cleaning up sealed data: %w", err) } - if err := l.storage.Remove(ctx, sector, storiface.FTCache, true); err != nil { + if err := l.storage.Remove(ctx, sector.ID, storiface.FTCache, true); err != nil { return nil, xerrors.Errorf("cleaning up cache data: %w", err) } } @@ -335,52 +349,52 @@ func (l *LocalWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, t }) } -func (l *LocalWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage2.PreCommit1Out) (storiface.CallID, error) { +func (l *LocalWorker) SealPreCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.PreCommit1Out) (storiface.CallID, error) { sb, err := l.executor() if err != nil { return storiface.UndefCall, err } - return l.asyncCall(ctx, sector, "SealPreCommit2", func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return l.asyncCall(ctx, sector, SealPreCommit2, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { return sb.SealPreCommit2(ctx, sector, phase1Out) }) } -func (l *LocalWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage2.SectorCids) (storiface.CallID, error) { +func (l *LocalWorker) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) { sb, err := l.executor() if err != nil { return storiface.UndefCall, err } - return l.asyncCall(ctx, sector, "SealCommit1", func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return l.asyncCall(ctx, sector, SealCommit1, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { return sb.SealCommit1(ctx, sector, ticket, seed, pieces, cids) }) } -func (l *LocalWorker) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage2.Commit1Out) (storiface.CallID, error) { +func (l *LocalWorker) SealCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.Commit1Out) (storiface.CallID, error) { sb, err := l.executor() if err != nil { return storiface.UndefCall, err } - return l.asyncCall(ctx, sector, "SealCommit2", func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return l.asyncCall(ctx, sector, SealCommit2, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { return sb.SealCommit2(ctx, sector, phase1Out) }) } -func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage2.Range) (storiface.CallID, error) { +func (l *LocalWorker) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) { sb, err := l.executor() if err != nil { return storiface.UndefCall, err } - return l.asyncCall(ctx, sector, "FinalizeSector", func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return l.asyncCall(ctx, sector, FinalizeSector, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { if err := sb.FinalizeSector(ctx, sector, keepUnsealed); err != nil { return nil, xerrors.Errorf("finalizing sector: %w", err) } if len(keepUnsealed) == 0 { - if err := l.storage.Remove(ctx, sector, storiface.FTUnsealed, true); err != nil { + if err := l.storage.Remove(ctx, sector.ID, storiface.FTUnsealed, true); err != nil { return nil, xerrors.Errorf("removing unsealed data: %w", err) } } @@ -389,7 +403,7 @@ func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, k }) } -func (l *LocalWorker) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage2.Range) (storiface.CallID, error) { +func (l *LocalWorker) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) { return storiface.UndefCall, xerrors.Errorf("implement me") } @@ -409,53 +423,59 @@ func (l *LocalWorker) Remove(ctx context.Context, sector abi.SectorID) error { return err } -func (l *LocalWorker) MoveStorage(ctx context.Context, sector abi.SectorID, types storiface.SectorFileType) (storiface.CallID, error) { - return l.asyncCall(ctx, sector, "MoveStorage", func(ctx context.Context, ci storiface.CallID) (interface{}, error) { - ssize, err := l.scfg.SealProofType.SectorSize() - if err != nil { - return nil, err - } - - return nil, l.storage.MoveStorage(ctx, sector, ssize, types) +func (l *LocalWorker) MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) { + return l.asyncCall(ctx, sector, MoveStorage, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return nil, l.storage.MoveStorage(ctx, sector, types) }) } -func (l *LocalWorker) UnsealPiece(ctx context.Context, sector abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) { +func (l *LocalWorker) UnsealPiece(ctx context.Context, sector storage.SectorRef, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) { sb, err := l.executor() if err != nil { return storiface.UndefCall, err } - return l.asyncCall(ctx, sector, "UnsealPiece", func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return l.asyncCall(ctx, sector, UnsealPiece, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + log.Debugf("worker will unseal piece now, sector=%+v", sector.ID) if err = sb.UnsealPiece(ctx, sector, index, size, randomness, cid); err != nil { return nil, xerrors.Errorf("unsealing sector: %w", err) } - if err = l.storage.RemoveCopies(ctx, sector, storiface.FTSealed); err != nil { + if err = l.storage.RemoveCopies(ctx, sector.ID, storiface.FTSealed); err != nil { return nil, xerrors.Errorf("removing source data: %w", err) } - if err = l.storage.RemoveCopies(ctx, sector, storiface.FTCache); err != nil { + if err = l.storage.RemoveCopies(ctx, sector.ID, storiface.FTCache); err != nil { return nil, xerrors.Errorf("removing source data: %w", err) } + log.Debugf("worker has unsealed piece, sector=%+v", sector.ID) + return nil, nil }) } -func (l *LocalWorker) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) { - sb, err := l.executor() - if err != nil { - return storiface.UndefCall, err - } +func (l *LocalWorker) TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) { + l.taskLk.Lock() + defer l.taskLk.Unlock() - return l.asyncCall(ctx, sector, "ReadPiece", func(ctx context.Context, ci storiface.CallID) (interface{}, error) { - return sb.ReadPiece(ctx, writer, sector, index, size) - }) + return l.acceptTasks, nil } -func (l *LocalWorker) TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) { - return l.acceptTasks, nil +func (l *LocalWorker) TaskDisable(ctx context.Context, tt sealtasks.TaskType) error { + l.taskLk.Lock() + defer l.taskLk.Unlock() + + delete(l.acceptTasks, tt) + return nil +} + +func (l *LocalWorker) TaskEnable(ctx context.Context, tt sealtasks.TaskType) error { + l.taskLk.Lock() + defer l.taskLk.Unlock() + + l.acceptTasks[tt] = struct{}{} + return nil } func (l *LocalWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) { @@ -489,7 +509,8 @@ func (l *LocalWorker) Info(context.Context) (storiface.WorkerInfo, error) { } return storiface.WorkerInfo{ - Hostname: hostname, + Hostname: hostname, + IgnoreResources: l.ignoreResources, Resources: storiface.WorkerResources{ MemPhysical: mem.Total, MemSwap: memSwap, @@ -501,6 +522,10 @@ func (l *LocalWorker) Info(context.Context) (storiface.WorkerInfo, error) { } func (l *LocalWorker) Session(ctx context.Context) (uuid.UUID, error) { + if atomic.LoadInt64(&l.testDisable) == 1 { + return uuid.UUID{}, xerrors.Errorf("disabled") + } + select { case <-l.closing: return ClosedWorkerID, nil diff --git a/extern/sector-storage/worker_tracked.go b/extern/sector-storage/worker_tracked.go index 4a22fcca714..2160dd8e6a8 100644 --- a/extern/sector-storage/worker_tracked.go +++ b/extern/sector-storage/worker_tracked.go @@ -2,22 +2,25 @@ package sectorstorage import ( "context" - "io" "sync" "time" "github.com/ipfs/go-cid" + "go.opencensus.io/stats" + "go.opencensus.io/tag" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + "github.com/filecoin-project/lotus/metrics" ) type trackedWork struct { - job storiface.WorkerJob - worker WorkerID + job storiface.WorkerJob + worker WorkerID + workerHostname string } type workTracker struct { @@ -29,20 +32,31 @@ type workTracker struct { // TODO: done, aggregate stats, queue stats, scheduler feedback } -func (wt *workTracker) onDone(callID storiface.CallID) { +func (wt *workTracker) onDone(ctx context.Context, callID storiface.CallID) { wt.lk.Lock() defer wt.lk.Unlock() - _, ok := wt.running[callID] + t, ok := wt.running[callID] if !ok { wt.done[callID] = struct{}{} + + stats.Record(ctx, metrics.WorkerUntrackedCallsReturned.M(1)) return } + took := metrics.SinceInMilliseconds(t.job.Start) + + ctx, _ = tag.New( + ctx, + tag.Upsert(metrics.TaskType, string(t.job.Task)), + tag.Upsert(metrics.WorkerHostname, t.workerHostname), + ) + stats.Record(ctx, metrics.WorkerCallsReturnedCount.M(1), metrics.WorkerCallsReturnedDuration.M(took)) + delete(wt.running, callID) } -func (wt *workTracker) track(wid WorkerID, sid abi.SectorID, task sealtasks.TaskType) func(storiface.CallID, error) (storiface.CallID, error) { +func (wt *workTracker) track(ctx context.Context, wid WorkerID, wi storiface.WorkerInfo, sid storage.SectorRef, task sealtasks.TaskType) func(storiface.CallID, error) (storiface.CallID, error) { return func(callID storiface.CallID, err error) (storiface.CallID, error) { if err != nil { return callID, err @@ -60,21 +74,30 @@ func (wt *workTracker) track(wid WorkerID, sid abi.SectorID, task sealtasks.Task wt.running[callID] = trackedWork{ job: storiface.WorkerJob{ ID: callID, - Sector: sid, + Sector: sid.ID, Task: task, Start: time.Now(), }, - worker: wid, + worker: wid, + workerHostname: wi.Hostname, } + ctx, _ = tag.New( + ctx, + tag.Upsert(metrics.TaskType, string(task)), + tag.Upsert(metrics.WorkerHostname, wi.Hostname), + ) + stats.Record(ctx, metrics.WorkerCallsStarted.M(1)) + return callID, err } } -func (wt *workTracker) worker(wid WorkerID, w Worker) Worker { +func (wt *workTracker) worker(wid WorkerID, wi storiface.WorkerInfo, w Worker) Worker { return &trackedWorker{ - Worker: w, - wid: wid, + Worker: w, + wid: wid, + workerInfo: wi, tracker: wt, } @@ -94,45 +117,42 @@ func (wt *workTracker) Running() []trackedWork { type trackedWorker struct { Worker - wid WorkerID + wid WorkerID + workerInfo storiface.WorkerInfo tracker *workTracker } -func (t *trackedWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) { - return t.tracker.track(t.wid, sector, sealtasks.TTPreCommit1)(t.Worker.SealPreCommit1(ctx, sector, ticket, pieces)) -} - -func (t *trackedWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storiface.CallID, error) { - return t.tracker.track(t.wid, sector, sealtasks.TTPreCommit2)(t.Worker.SealPreCommit2(ctx, sector, pc1o)) +func (t *trackedWorker) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) { + return t.tracker.track(ctx, t.wid, t.workerInfo, sector, sealtasks.TTPreCommit1)(t.Worker.SealPreCommit1(ctx, sector, ticket, pieces)) } -func (t *trackedWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) { - return t.tracker.track(t.wid, sector, sealtasks.TTCommit1)(t.Worker.SealCommit1(ctx, sector, ticket, seed, pieces, cids)) +func (t *trackedWorker) SealPreCommit2(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (storiface.CallID, error) { + return t.tracker.track(ctx, t.wid, t.workerInfo, sector, sealtasks.TTPreCommit2)(t.Worker.SealPreCommit2(ctx, sector, pc1o)) } -func (t *trackedWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storiface.CallID, error) { - return t.tracker.track(t.wid, sector, sealtasks.TTCommit2)(t.Worker.SealCommit2(ctx, sector, c1o)) +func (t *trackedWorker) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) { + return t.tracker.track(ctx, t.wid, t.workerInfo, sector, sealtasks.TTCommit1)(t.Worker.SealCommit1(ctx, sector, ticket, seed, pieces, cids)) } -func (t *trackedWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) (storiface.CallID, error) { - return t.tracker.track(t.wid, sector, sealtasks.TTFinalize)(t.Worker.FinalizeSector(ctx, sector, keepUnsealed)) +func (t *trackedWorker) SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) { + return t.tracker.track(ctx, t.wid, t.workerInfo, sector, sealtasks.TTCommit2)(t.Worker.SealCommit2(ctx, sector, c1o)) } -func (t *trackedWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) { - return t.tracker.track(t.wid, sector, sealtasks.TTAddPiece)(t.Worker.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData)) +func (t *trackedWorker) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) { + return t.tracker.track(ctx, t.wid, t.workerInfo, sector, sealtasks.TTFinalize)(t.Worker.FinalizeSector(ctx, sector, keepUnsealed)) } -func (t *trackedWorker) Fetch(ctx context.Context, s abi.SectorID, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) { - return t.tracker.track(t.wid, s, sealtasks.TTFetch)(t.Worker.Fetch(ctx, s, ft, ptype, am)) +func (t *trackedWorker) AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) { + return t.tracker.track(ctx, t.wid, t.workerInfo, sector, sealtasks.TTAddPiece)(t.Worker.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData)) } -func (t *trackedWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) { - return t.tracker.track(t.wid, id, sealtasks.TTUnseal)(t.Worker.UnsealPiece(ctx, id, index, size, randomness, cid)) +func (t *trackedWorker) Fetch(ctx context.Context, s storage.SectorRef, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) { + return t.tracker.track(ctx, t.wid, t.workerInfo, s, sealtasks.TTFetch)(t.Worker.Fetch(ctx, s, ft, ptype, am)) } -func (t *trackedWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) { - return t.tracker.track(t.wid, id, sealtasks.TTReadUnsealed)(t.Worker.ReadPiece(ctx, writer, id, index, size)) +func (t *trackedWorker) UnsealPiece(ctx context.Context, id storage.SectorRef, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) { + return t.tracker.track(ctx, t.wid, t.workerInfo, id, sealtasks.TTUnseal)(t.Worker.UnsealPiece(ctx, id, index, size, randomness, cid)) } var _ Worker = &trackedWorker{} diff --git a/extern/sector-storage/zerocomm/zerocomm.go b/extern/sector-storage/zerocomm/zerocomm.go deleted file mode 100644 index 9855a582176..00000000000 --- a/extern/sector-storage/zerocomm/zerocomm.go +++ /dev/null @@ -1,56 +0,0 @@ -package zerocomm - -import ( - "math/bits" - - commcid "github.com/filecoin-project/go-fil-commcid" - "github.com/filecoin-project/go-state-types/abi" - "github.com/ipfs/go-cid" -) - -const Levels = 37 -const Skip = 2 // can't generate for 32, 64b - -var PieceComms = [Levels - Skip][32]byte{ - {0x37, 0x31, 0xbb, 0x99, 0xac, 0x68, 0x9f, 0x66, 0xee, 0xf5, 0x97, 0x3e, 0x4a, 0x94, 0xda, 0x18, 0x8f, 0x4d, 0xdc, 0xae, 0x58, 0x7, 0x24, 0xfc, 0x6f, 0x3f, 0xd6, 0xd, 0xfd, 0x48, 0x83, 0x33}, - {0x64, 0x2a, 0x60, 0x7e, 0xf8, 0x86, 0xb0, 0x4, 0xbf, 0x2c, 0x19, 0x78, 0x46, 0x3a, 0xe1, 0xd4, 0x69, 0x3a, 0xc0, 0xf4, 0x10, 0xeb, 0x2d, 0x1b, 0x7a, 0x47, 0xfe, 0x20, 0x5e, 0x5e, 0x75, 0xf}, - {0x57, 0xa2, 0x38, 0x1a, 0x28, 0x65, 0x2b, 0xf4, 0x7f, 0x6b, 0xef, 0x7a, 0xca, 0x67, 0x9b, 0xe4, 0xae, 0xde, 0x58, 0x71, 0xab, 0x5c, 0xf3, 0xeb, 0x2c, 0x8, 0x11, 0x44, 0x88, 0xcb, 0x85, 0x26}, - {0x1f, 0x7a, 0xc9, 0x59, 0x55, 0x10, 0xe0, 0x9e, 0xa4, 0x1c, 0x46, 0xb, 0x17, 0x64, 0x30, 0xbb, 0x32, 0x2c, 0xd6, 0xfb, 0x41, 0x2e, 0xc5, 0x7c, 0xb1, 0x7d, 0x98, 0x9a, 0x43, 0x10, 0x37, 0x2f}, - {0xfc, 0x7e, 0x92, 0x82, 0x96, 0xe5, 0x16, 0xfa, 0xad, 0xe9, 0x86, 0xb2, 0x8f, 0x92, 0xd4, 0x4a, 0x4f, 0x24, 0xb9, 0x35, 0x48, 0x52, 0x23, 0x37, 0x6a, 0x79, 0x90, 0x27, 0xbc, 0x18, 0xf8, 0x33}, - {0x8, 0xc4, 0x7b, 0x38, 0xee, 0x13, 0xbc, 0x43, 0xf4, 0x1b, 0x91, 0x5c, 0xe, 0xed, 0x99, 0x11, 0xa2, 0x60, 0x86, 0xb3, 0xed, 0x62, 0x40, 0x1b, 0xf9, 0xd5, 0x8b, 0x8d, 0x19, 0xdf, 0xf6, 0x24}, - {0xb2, 0xe4, 0x7b, 0xfb, 0x11, 0xfa, 0xcd, 0x94, 0x1f, 0x62, 0xaf, 0x5c, 0x75, 0xf, 0x3e, 0xa5, 0xcc, 0x4d, 0xf5, 0x17, 0xd5, 0xc4, 0xf1, 0x6d, 0xb2, 0xb4, 0xd7, 0x7b, 0xae, 0xc1, 0xa3, 0x2f}, - {0xf9, 0x22, 0x61, 0x60, 0xc8, 0xf9, 0x27, 0xbf, 0xdc, 0xc4, 0x18, 0xcd, 0xf2, 0x3, 0x49, 0x31, 0x46, 0x0, 0x8e, 0xae, 0xfb, 0x7d, 0x2, 0x19, 0x4d, 0x5e, 0x54, 0x81, 0x89, 0x0, 0x51, 0x8}, - {0x2c, 0x1a, 0x96, 0x4b, 0xb9, 0xb, 0x59, 0xeb, 0xfe, 0xf, 0x6d, 0xa2, 0x9a, 0xd6, 0x5a, 0xe3, 0xe4, 0x17, 0x72, 0x4a, 0x8f, 0x7c, 0x11, 0x74, 0x5a, 0x40, 0xca, 0xc1, 0xe5, 0xe7, 0x40, 0x11}, - {0xfe, 0xe3, 0x78, 0xce, 0xf1, 0x64, 0x4, 0xb1, 0x99, 0xed, 0xe0, 0xb1, 0x3e, 0x11, 0xb6, 0x24, 0xff, 0x9d, 0x78, 0x4f, 0xbb, 0xed, 0x87, 0x8d, 0x83, 0x29, 0x7e, 0x79, 0x5e, 0x2, 0x4f, 0x2}, - {0x8e, 0x9e, 0x24, 0x3, 0xfa, 0x88, 0x4c, 0xf6, 0x23, 0x7f, 0x60, 0xdf, 0x25, 0xf8, 0x3e, 0xe4, 0xd, 0xca, 0x9e, 0xd8, 0x79, 0xeb, 0x6f, 0x63, 0x52, 0xd1, 0x50, 0x84, 0xf5, 0xad, 0xd, 0x3f}, - {0x75, 0x2d, 0x96, 0x93, 0xfa, 0x16, 0x75, 0x24, 0x39, 0x54, 0x76, 0xe3, 0x17, 0xa9, 0x85, 0x80, 0xf0, 0x9, 0x47, 0xaf, 0xb7, 0xa3, 0x5, 0x40, 0xd6, 0x25, 0xa9, 0x29, 0x1c, 0xc1, 0x2a, 0x7}, - {0x70, 0x22, 0xf6, 0xf, 0x7e, 0xf6, 0xad, 0xfa, 0x17, 0x11, 0x7a, 0x52, 0x61, 0x9e, 0x30, 0xce, 0xa8, 0x2c, 0x68, 0x7, 0x5a, 0xdf, 0x1c, 0x66, 0x77, 0x86, 0xec, 0x50, 0x6e, 0xef, 0x2d, 0x19}, - {0xd9, 0x98, 0x87, 0xb9, 0x73, 0x57, 0x3a, 0x96, 0xe1, 0x13, 0x93, 0x64, 0x52, 0x36, 0xc1, 0x7b, 0x1f, 0x4c, 0x70, 0x34, 0xd7, 0x23, 0xc7, 0xa9, 0x9f, 0x70, 0x9b, 0xb4, 0xda, 0x61, 0x16, 0x2b}, - {0xd0, 0xb5, 0x30, 0xdb, 0xb0, 0xb4, 0xf2, 0x5c, 0x5d, 0x2f, 0x2a, 0x28, 0xdf, 0xee, 0x80, 0x8b, 0x53, 0x41, 0x2a, 0x2, 0x93, 0x1f, 0x18, 0xc4, 0x99, 0xf5, 0xa2, 0x54, 0x8, 0x6b, 0x13, 0x26}, - {0x84, 0xc0, 0x42, 0x1b, 0xa0, 0x68, 0x5a, 0x1, 0xbf, 0x79, 0x5a, 0x23, 0x44, 0x6, 0x4f, 0xe4, 0x24, 0xbd, 0x52, 0xa9, 0xd2, 0x43, 0x77, 0xb3, 0x94, 0xff, 0x4c, 0x4b, 0x45, 0x68, 0xe8, 0x11}, - {0x65, 0xf2, 0x9e, 0x5d, 0x98, 0xd2, 0x46, 0xc3, 0x8b, 0x38, 0x8c, 0xfc, 0x6, 0xdb, 0x1f, 0x6b, 0x2, 0x13, 0x3, 0xc5, 0xa2, 0x89, 0x0, 0xb, 0xdc, 0xe8, 0x32, 0xa9, 0xc3, 0xec, 0x42, 0x1c}, - {0xa2, 0x24, 0x75, 0x8, 0x28, 0x58, 0x50, 0x96, 0x5b, 0x7e, 0x33, 0x4b, 0x31, 0x27, 0xb0, 0xc0, 0x42, 0xb1, 0xd0, 0x46, 0xdc, 0x54, 0x40, 0x21, 0x37, 0x62, 0x7c, 0xd8, 0x79, 0x9c, 0xe1, 0x3a}, - {0xda, 0xfd, 0xab, 0x6d, 0xa9, 0x36, 0x44, 0x53, 0xc2, 0x6d, 0x33, 0x72, 0x6b, 0x9f, 0xef, 0xe3, 0x43, 0xbe, 0x8f, 0x81, 0x64, 0x9e, 0xc0, 0x9, 0xaa, 0xd3, 0xfa, 0xff, 0x50, 0x61, 0x75, 0x8}, - {0xd9, 0x41, 0xd5, 0xe0, 0xd6, 0x31, 0x4a, 0x99, 0x5c, 0x33, 0xff, 0xbd, 0x4f, 0xbe, 0x69, 0x11, 0x8d, 0x73, 0xd4, 0xe5, 0xfd, 0x2c, 0xd3, 0x1f, 0xf, 0x7c, 0x86, 0xeb, 0xdd, 0x14, 0xe7, 0x6}, - {0x51, 0x4c, 0x43, 0x5c, 0x3d, 0x4, 0xd3, 0x49, 0xa5, 0x36, 0x5f, 0xbd, 0x59, 0xff, 0xc7, 0x13, 0x62, 0x91, 0x11, 0x78, 0x59, 0x91, 0xc1, 0xa3, 0xc5, 0x3a, 0xf2, 0x20, 0x79, 0x74, 0x1a, 0x2f}, - {0xad, 0x6, 0x85, 0x39, 0x69, 0xd3, 0x7d, 0x34, 0xff, 0x8, 0xe0, 0x9f, 0x56, 0x93, 0xa, 0x4a, 0xd1, 0x9a, 0x89, 0xde, 0xf6, 0xc, 0xbf, 0xee, 0x7e, 0x1d, 0x33, 0x81, 0xc1, 0xe7, 0x1c, 0x37}, - {0x39, 0x56, 0xe, 0x7b, 0x13, 0xa9, 0x3b, 0x7, 0xa2, 0x43, 0xfd, 0x27, 0x20, 0xff, 0xa7, 0xcb, 0x3e, 0x1d, 0x2e, 0x50, 0x5a, 0xb3, 0x62, 0x9e, 0x79, 0xf4, 0x63, 0x13, 0x51, 0x2c, 0xda, 0x6}, - {0xcc, 0xc3, 0xc0, 0x12, 0xf5, 0xb0, 0x5e, 0x81, 0x1a, 0x2b, 0xbf, 0xdd, 0xf, 0x68, 0x33, 0xb8, 0x42, 0x75, 0xb4, 0x7b, 0xf2, 0x29, 0xc0, 0x5, 0x2a, 0x82, 0x48, 0x4f, 0x3c, 0x1a, 0x5b, 0x3d}, - {0x7d, 0xf2, 0x9b, 0x69, 0x77, 0x31, 0x99, 0xe8, 0xf2, 0xb4, 0xb, 0x77, 0x91, 0x9d, 0x4, 0x85, 0x9, 0xee, 0xd7, 0x68, 0xe2, 0xc7, 0x29, 0x7b, 0x1f, 0x14, 0x37, 0x3, 0x4f, 0xc3, 0xc6, 0x2c}, - {0x66, 0xce, 0x5, 0xa3, 0x66, 0x75, 0x52, 0xcf, 0x45, 0xc0, 0x2b, 0xcc, 0x4e, 0x83, 0x92, 0x91, 0x9b, 0xde, 0xac, 0x35, 0xde, 0x2f, 0xf5, 0x62, 0x71, 0x84, 0x8e, 0x9f, 0x7b, 0x67, 0x51, 0x7}, - {0xd8, 0x61, 0x2, 0x18, 0x42, 0x5a, 0xb5, 0xe9, 0x5b, 0x1c, 0xa6, 0x23, 0x9d, 0x29, 0xa2, 0xe4, 0x20, 0xd7, 0x6, 0xa9, 0x6f, 0x37, 0x3e, 0x2f, 0x9c, 0x9a, 0x91, 0xd7, 0x59, 0xd1, 0x9b, 0x1}, - {0x6d, 0x36, 0x4b, 0x1e, 0xf8, 0x46, 0x44, 0x1a, 0x5a, 0x4a, 0x68, 0x86, 0x23, 0x14, 0xac, 0xc0, 0xa4, 0x6f, 0x1, 0x67, 0x17, 0xe5, 0x34, 0x43, 0xe8, 0x39, 0xee, 0xdf, 0x83, 0xc2, 0x85, 0x3c}, - {0x7, 0x7e, 0x5f, 0xde, 0x35, 0xc5, 0xa, 0x93, 0x3, 0xa5, 0x50, 0x9, 0xe3, 0x49, 0x8a, 0x4e, 0xbe, 0xdf, 0xf3, 0x9c, 0x42, 0xb7, 0x10, 0xb7, 0x30, 0xd8, 0xec, 0x7a, 0xc7, 0xaf, 0xa6, 0x3e}, - {0xe6, 0x40, 0x5, 0xa6, 0xbf, 0xe3, 0x77, 0x79, 0x53, 0xb8, 0xad, 0x6e, 0xf9, 0x3f, 0xf, 0xca, 0x10, 0x49, 0xb2, 0x4, 0x16, 0x54, 0xf2, 0xa4, 0x11, 0xf7, 0x70, 0x27, 0x99, 0xce, 0xce, 0x2}, - {0x25, 0x9d, 0x3d, 0x6b, 0x1f, 0x4d, 0x87, 0x6d, 0x11, 0x85, 0xe1, 0x12, 0x3a, 0xf6, 0xf5, 0x50, 0x1a, 0xf0, 0xf6, 0x7c, 0xf1, 0x5b, 0x52, 0x16, 0x25, 0x5b, 0x7b, 0x17, 0x8d, 0x12, 0x5, 0x1d}, - {0x3f, 0x9a, 0x4d, 0x41, 0x1d, 0xa4, 0xef, 0x1b, 0x36, 0xf3, 0x5f, 0xf0, 0xa1, 0x95, 0xae, 0x39, 0x2a, 0xb2, 0x3f, 0xee, 0x79, 0x67, 0xb7, 0xc4, 0x1b, 0x3, 0xd1, 0x61, 0x3f, 0xc2, 0x92, 0x39}, - {0xfe, 0x4e, 0xf3, 0x28, 0xc6, 0x1a, 0xa3, 0x9c, 0xfd, 0xb2, 0x48, 0x4e, 0xaa, 0x32, 0xa1, 0x51, 0xb1, 0xfe, 0x3d, 0xfd, 0x1f, 0x96, 0xdd, 0x8c, 0x97, 0x11, 0xfd, 0x86, 0xd6, 0xc5, 0x81, 0x13}, - {0xf5, 0x5d, 0x68, 0x90, 0xe, 0x2d, 0x83, 0x81, 0xec, 0xcb, 0x81, 0x64, 0xcb, 0x99, 0x76, 0xf2, 0x4b, 0x2d, 0xe0, 0xdd, 0x61, 0xa3, 0x1b, 0x97, 0xce, 0x6e, 0xb2, 0x38, 0x50, 0xd5, 0xe8, 0x19}, - {0xaa, 0xaa, 0x8c, 0x4c, 0xb4, 0xa, 0xac, 0xee, 0x1e, 0x2, 0xdc, 0x65, 0x42, 0x4b, 0x2a, 0x6c, 0x8e, 0x99, 0xf8, 0x3, 0xb7, 0x2f, 0x79, 0x29, 0xc4, 0x10, 0x1d, 0x7f, 0xae, 0x6b, 0xff, 0x32}, -} - -func ZeroPieceCommitment(sz abi.UnpaddedPieceSize) cid.Cid { - level := bits.TrailingZeros64(uint64(sz.Padded())) - Skip - 5 // 2^5 = 32 - commP, _ := commcid.PieceCommitmentV1ToCID(PieceComms[level][:]) - return commP -} diff --git a/extern/sector-storage/zerocomm/zerocomm_test.go b/extern/sector-storage/zerocomm/zerocomm_test.go deleted file mode 100644 index 393f61d64f0..00000000000 --- a/extern/sector-storage/zerocomm/zerocomm_test.go +++ /dev/null @@ -1,115 +0,0 @@ -package zerocomm_test - -import ( - "bytes" - "fmt" - "io" - "testing" - - commcid "github.com/filecoin-project/go-fil-commcid" - abi "github.com/filecoin-project/go-state-types/abi" - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" - "github.com/filecoin-project/lotus/extern/sector-storage/zerocomm" -) - -func TestComms(t *testing.T) { - t.Skip("don't have enough ram") // no, but seriously, currently this needs like 3tb of /tmp - - var expPieceComms [zerocomm.Levels - zerocomm.Skip]cid.Cid - - { - l2, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg2KiBV1, bytes.NewReader(make([]byte, 127)), 127) - if err != nil { - t.Fatal(err) - } - expPieceComms[0] = l2 - } - - for i := 1; i < zerocomm.Levels-2; i++ { - var err error - sz := abi.UnpaddedPieceSize(127 << uint(i)) - fmt.Println(i, sz) - r := io.LimitReader(&NullReader{}, int64(sz)) - - expPieceComms[i], err = ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg2KiBV1, r, sz) - if err != nil { - t.Fatal(err) - } - } - - for i, comm := range expPieceComms { - c, err := commcid.CIDToPieceCommitmentV1(comm) - if err != nil { - t.Fatal(err) - } - if string(c) != string(zerocomm.PieceComms[i][:]) { - t.Errorf("zero commitment %d didn't match", i) - } - } - - for _, comm := range expPieceComms { // Could do codegen, but this is good enough - fmt.Printf("%#v,\n", comm) - } -} - -func TestCommsSmall(t *testing.T) { - var expPieceComms [8]cid.Cid - lvls := len(expPieceComms) + zerocomm.Skip - - { - l2, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg2KiBV1, bytes.NewReader(make([]byte, 127)), 127) - if err != nil { - t.Fatal(err) - } - expPieceComms[0] = l2 - } - - for i := 1; i < lvls-2; i++ { - var err error - sz := abi.UnpaddedPieceSize(127 << uint(i)) - fmt.Println(i, sz) - r := io.LimitReader(&NullReader{}, int64(sz)) - - expPieceComms[i], err = ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg2KiBV1, r, sz) - if err != nil { - t.Fatal(err) - } - } - - for i, comm := range expPieceComms { - c, err := commcid.CIDToPieceCommitmentV1(comm) - if err != nil { - t.Fatal(err) - } - if string(c) != string(zerocomm.PieceComms[i][:]) { - t.Errorf("zero commitment %d didn't match", i) - } - } - - for _, comm := range expPieceComms { // Could do codegen, but this is good enough - fmt.Printf("%#v,\n", comm) - } -} - -func TestForSise(t *testing.T) { - exp, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg2KiBV1, bytes.NewReader(make([]byte, 1016)), 1016) - if err != nil { - return - } - - actual := zerocomm.ZeroPieceCommitment(1016) - if !exp.Equals(actual) { - t.Errorf("zero commitment didn't match") - } -} - -type NullReader struct{} - -func (NullReader) Read(out []byte) (int, error) { - for i := range out { - out[i] = 0 - } - return len(out), nil -} diff --git a/extern/storage-sealing/cbor_gen.go b/extern/storage-sealing/cbor_gen.go index 78765d7b4ec..b71c2863cff 100644 --- a/extern/storage-sealing/cbor_gen.go +++ b/extern/storage-sealing/cbor_gen.go @@ -5,14 +5,19 @@ package sealing import ( "fmt" "io" + "sort" abi "github.com/filecoin-project/go-state-types/abi" + api "github.com/filecoin-project/lotus/api" miner "github.com/filecoin-project/specs-actors/actors/builtin/miner" + cid "github.com/ipfs/go-cid" cbg "github.com/whyrusleeping/cbor-gen" xerrors "golang.org/x/xerrors" ) var _ = xerrors.Errorf +var _ = cid.Undef +var _ = sort.Sort func (t *Piece) MarshalCBOR(w io.Writer) error { if t == nil { @@ -41,7 +46,7 @@ func (t *Piece) MarshalCBOR(w io.Writer) error { return err } - // t.DealInfo (sealing.DealInfo) (struct) + // t.DealInfo (api.PieceDealInfo) (struct) if len("DealInfo") > cbg.MaxLength { return xerrors.Errorf("Value in field \"DealInfo\" was too long") } @@ -102,7 +107,7 @@ func (t *Piece) UnmarshalCBOR(r io.Reader) error { } } - // t.DealInfo (sealing.DealInfo) (struct) + // t.DealInfo (api.PieceDealInfo) (struct) case "DealInfo": { @@ -115,7 +120,7 @@ func (t *Piece) UnmarshalCBOR(r io.Reader) error { if err := br.UnreadByte(); err != nil { return err } - t.DealInfo = new(DealInfo) + t.DealInfo = new(api.PieceDealInfo) if err := t.DealInfo.UnmarshalCBOR(br); err != nil { return xerrors.Errorf("unmarshaling t.DealInfo pointer: %w", err) } @@ -124,347 +129,8 @@ func (t *Piece) UnmarshalCBOR(r io.Reader) error { } default: - return fmt.Errorf("unknown struct field %d: '%s'", i, name) - } - } - - return nil -} -func (t *DealInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{164}); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.PublishCid (cid.Cid) (struct) - if len("PublishCid") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"PublishCid\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PublishCid"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("PublishCid")); err != nil { - return err - } - - if t.PublishCid == nil { - if _, err := w.Write(cbg.CborNull); err != nil { - return err - } - } else { - if err := cbg.WriteCidBuf(scratch, w, *t.PublishCid); err != nil { - return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err) - } - } - - // t.DealID (abi.DealID) (uint64) - if len("DealID") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"DealID\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealID"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("DealID")); err != nil { - return err - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { - return err - } - - // t.DealSchedule (sealing.DealSchedule) (struct) - if len("DealSchedule") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"DealSchedule\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealSchedule"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("DealSchedule")); err != nil { - return err - } - - if err := t.DealSchedule.MarshalCBOR(w); err != nil { - return err - } - - // t.KeepUnsealed (bool) (bool) - if len("KeepUnsealed") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"KeepUnsealed\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("KeepUnsealed"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("KeepUnsealed")); err != nil { - return err - } - - if err := cbg.WriteBool(w, t.KeepUnsealed); err != nil { - return err - } - return nil -} - -func (t *DealInfo) UnmarshalCBOR(r io.Reader) error { - *t = DealInfo{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajMap { - return fmt.Errorf("cbor input should be of type map") - } - - if extra > cbg.MaxLength { - return fmt.Errorf("DealInfo: map struct too large (%d)", extra) - } - - var name string - n := extra - - for i := uint64(0); i < n; i++ { - - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - name = string(sval) - } - - switch name { - // t.PublishCid (cid.Cid) (struct) - case "PublishCid": - - { - - b, err := br.ReadByte() - if err != nil { - return err - } - if b != cbg.CborNull[0] { - if err := br.UnreadByte(); err != nil { - return err - } - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err) - } - - t.PublishCid = &c - } - - } - // t.DealID (abi.DealID) (uint64) - case "DealID": - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.DealID = abi.DealID(extra) - - } - // t.DealSchedule (sealing.DealSchedule) (struct) - case "DealSchedule": - - { - - if err := t.DealSchedule.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.DealSchedule: %w", err) - } - - } - // t.KeepUnsealed (bool) (bool) - case "KeepUnsealed": - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.KeepUnsealed = false - case 21: - t.KeepUnsealed = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - - default: - return fmt.Errorf("unknown struct field %d: '%s'", i, name) - } - } - - return nil -} -func (t *DealSchedule) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{162}); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.StartEpoch (abi.ChainEpoch) (int64) - if len("StartEpoch") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"StartEpoch\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("StartEpoch"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("StartEpoch")); err != nil { - return err - } - - if t.StartEpoch >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil { - return err - } - } - - // t.EndEpoch (abi.ChainEpoch) (int64) - if len("EndEpoch") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"EndEpoch\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("EndEpoch"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("EndEpoch")); err != nil { - return err - } - - if t.EndEpoch >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.EndEpoch)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.EndEpoch-1)); err != nil { - return err - } - } - return nil -} - -func (t *DealSchedule) UnmarshalCBOR(r io.Reader) error { - *t = DealSchedule{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajMap { - return fmt.Errorf("cbor input should be of type map") - } - - if extra > cbg.MaxLength { - return fmt.Errorf("DealSchedule: map struct too large (%d)", extra) - } - - var name string - n := extra - - for i := uint64(0); i < n; i++ { - - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - name = string(sval) - } - - switch name { - // t.StartEpoch (abi.ChainEpoch) (int64) - case "StartEpoch": - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.StartEpoch = abi.ChainEpoch(extraI) - } - // t.EndEpoch (abi.ChainEpoch) (int64) - case "EndEpoch": - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.EndEpoch = abi.ChainEpoch(extraI) - } - - default: - return fmt.Errorf("unknown struct field %d: '%s'", i, name) + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) } } @@ -475,7 +141,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{183}); err != nil { + if _, err := w.Write([]byte{184, 26}); err != nil { return err } @@ -542,6 +208,28 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } } + // t.CreationTime (int64) (int64) + if len("CreationTime") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CreationTime\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("CreationTime"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("CreationTime")); err != nil { + return err + } + + if t.CreationTime >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.CreationTime)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.CreationTime-1)); err != nil { + return err + } + } + // t.Pieces ([]sealing.Piece) (slice) if len("Pieces") > cbg.MaxLength { return xerrors.Errorf("Value in field \"Pieces\" was too long") @@ -928,6 +616,50 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } + // t.TerminateMessage (cid.Cid) (struct) + if len("TerminateMessage") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TerminateMessage\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("TerminateMessage"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("TerminateMessage")); err != nil { + return err + } + + if t.TerminateMessage == nil { + if _, err := w.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCidBuf(scratch, w, *t.TerminateMessage); err != nil { + return xerrors.Errorf("failed to write cid field t.TerminateMessage: %w", err) + } + } + + // t.TerminatedAt (abi.ChainEpoch) (int64) + if len("TerminatedAt") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TerminatedAt\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("TerminatedAt"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("TerminatedAt")); err != nil { + return err + } + + if t.TerminatedAt >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.TerminatedAt)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.TerminatedAt-1)); err != nil { + return err + } + } + // t.LastErr (string) (string) if len("LastErr") > cbg.MaxLength { return xerrors.Errorf("Value in field \"LastErr\" was too long") @@ -1063,6 +795,32 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) error { t.SectorType = abi.RegisteredSealProof(extraI) } + // t.CreationTime (int64) (int64) + case "CreationTime": + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.CreationTime = int64(extraI) + } // t.Pieces ([]sealing.Piece) (slice) case "Pieces": @@ -1441,6 +1199,55 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) error { t.Return = ReturnState(sval) } + // t.TerminateMessage (cid.Cid) (struct) + case "TerminateMessage": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.TerminateMessage: %w", err) + } + + t.TerminateMessage = &c + } + + } + // t.TerminatedAt (abi.ChainEpoch) (int64) + case "TerminatedAt": + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.TerminatedAt = abi.ChainEpoch(extraI) + } // t.LastErr (string) (string) case "LastErr": @@ -1483,7 +1290,8 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) error { } default: - return fmt.Errorf("unknown struct field %d: '%s'", i, name) + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) } } @@ -1670,7 +1478,8 @@ func (t *Log) UnmarshalCBOR(r io.Reader) error { } default: - return fmt.Errorf("unknown struct field %d: '%s'", i, name) + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) } } diff --git a/extern/storage-sealing/checks.go b/extern/storage-sealing/checks.go index ed7a691ef1d..5ba23026d04 100644 --- a/extern/storage-sealing/checks.go +++ b/extern/storage-sealing/checks.go @@ -4,7 +4,6 @@ import ( "bytes" "context" - "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/policy" proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" @@ -12,10 +11,9 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-commp-utils/zerocomm" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" - "github.com/filecoin-project/lotus/extern/sector-storage/zerocomm" ) // TODO: For now we handle this by halting state execution, when we get jsonrpc reconnecting @@ -54,7 +52,7 @@ func checkPieces(ctx context.Context, maddr address.Address, si SectorInfo, api continue } - proposal, err := api.StateMarketStorageDeal(ctx, p.DealInfo.DealID, tok) + proposal, err := api.StateMarketStorageDealProposal(ctx, p.DealInfo.DealID, tok) if err != nil { return &ErrInvalidDeals{xerrors.Errorf("getting deal %d for piece %d: %w", p.DealInfo.DealID, i, err)} } @@ -64,7 +62,7 @@ func checkPieces(ctx context.Context, maddr address.Address, si SectorInfo, api } if proposal.PieceCID != p.Piece.PieceCID { - return &ErrInvalidDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %x != %x", i, len(si.Pieces), si.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID, proposal.PieceCID)} + return &ErrInvalidDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %s != %s", i, len(si.Pieces), si.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID, proposal.PieceCID)} } if p.Piece.Size != proposal.PieceSize { @@ -95,14 +93,9 @@ func checkPrecommit(ctx context.Context, maddr address.Address, si SectorInfo, t return &ErrBadCommD{xerrors.Errorf("on chain CommD differs from sector: %s != %s", commD, si.CommD)} } - nv, err := api.StateNetworkVersion(ctx, tok) - if err != nil { - return &ErrApi{xerrors.Errorf("calling StateNetworkVersion: %w", err)} - } - - msd := policy.GetMaxProveCommitDuration(actors.VersionForNetwork(nv), si.SectorType) + ticketEarliest := height - policy.MaxPreCommitRandomnessLookback - if height-(si.TicketEpoch+policy.SealRandomnessLookback) > msd { + if si.TicketEpoch < ticketEarliest { return &ErrExpiredTicket{xerrors.Errorf("ticket expired: seal height: %d, head: %d", si.TicketEpoch+policy.SealRandomnessLookback, height)} } @@ -166,23 +159,14 @@ func (m *Sealing) checkCommit(ctx context.Context, si SectorInfo, proof []byte, return &ErrBadSeed{xerrors.Errorf("seed has changed")} } - ss, err := m.api.StateMinerSectorSize(ctx, m.maddr, tok) - if err != nil { - return &ErrApi{err} - } - spt, err := ffiwrapper.SealProofTypeFromSectorSize(ss) - if err != nil { - return err - } - if *si.CommR != pci.Info.SealedCID { log.Warn("on-chain sealed CID doesn't match!") } ok, err := m.verif.VerifySeal(proof2.SealVerifyInfo{ - SectorID: m.minerSector(si.SectorNumber), + SectorID: m.minerSectorID(si.SectorNumber), SealedCID: pci.Info.SealedCID, - SealProof: spt, + SealProof: pci.Info.SealProof, Proof: proof, Randomness: si.TicketValue, InteractiveRandomness: si.SeedValue, diff --git a/extern/storage-sealing/commit_batch.go b/extern/storage-sealing/commit_batch.go new file mode 100644 index 00000000000..e9ace820e29 --- /dev/null +++ b/extern/storage-sealing/commit_batch.go @@ -0,0 +1,602 @@ +package sealing + +import ( + "bytes" + "context" + "sort" + "sync" + "time" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" + "github.com/filecoin-project/lotus/node/config" +) + +const arp = abi.RegisteredAggregationProof_SnarkPackV1 + +var aggFeeNum = big.NewInt(110) +var aggFeeDen = big.NewInt(100) + +//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_commit_batcher.go -package=mocks . CommitBatcherApi + +type CommitBatcherApi interface { + SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error) + StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error) + ChainHead(ctx context.Context) (TipSetToken, abi.ChainEpoch, error) + ChainBaseFee(context.Context, TipSetToken) (abi.TokenAmount, error) + + StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok TipSetToken) (*miner.SectorPreCommitOnChainInfo, error) + StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error) + StateNetworkVersion(ctx context.Context, tok TipSetToken) (network.Version, error) + StateMinerAvailableBalance(context.Context, address.Address, TipSetToken) (big.Int, error) +} + +type AggregateInput struct { + Spt abi.RegisteredSealProof + Info proof5.AggregateSealVerifyInfo + Proof []byte +} + +type CommitBatcher struct { + api CommitBatcherApi + maddr address.Address + mctx context.Context + addrSel AddrSel + feeCfg config.MinerFeeConfig + getConfig GetSealingConfigFunc + prover ffiwrapper.Prover + + cutoffs map[abi.SectorNumber]time.Time + todo map[abi.SectorNumber]AggregateInput + waiting map[abi.SectorNumber][]chan sealiface.CommitBatchRes + + notify, stop, stopped chan struct{} + force chan chan []sealiface.CommitBatchRes + lk sync.Mutex +} + +func NewCommitBatcher(mctx context.Context, maddr address.Address, api CommitBatcherApi, addrSel AddrSel, feeCfg config.MinerFeeConfig, getConfig GetSealingConfigFunc, prov ffiwrapper.Prover) *CommitBatcher { + b := &CommitBatcher{ + api: api, + maddr: maddr, + mctx: mctx, + addrSel: addrSel, + feeCfg: feeCfg, + getConfig: getConfig, + prover: prov, + + cutoffs: map[abi.SectorNumber]time.Time{}, + todo: map[abi.SectorNumber]AggregateInput{}, + waiting: map[abi.SectorNumber][]chan sealiface.CommitBatchRes{}, + + notify: make(chan struct{}, 1), + force: make(chan chan []sealiface.CommitBatchRes), + stop: make(chan struct{}), + stopped: make(chan struct{}), + } + + go b.run() + + return b +} + +func (b *CommitBatcher) run() { + var forceRes chan []sealiface.CommitBatchRes + var lastMsg []sealiface.CommitBatchRes + + cfg, err := b.getConfig() + if err != nil { + panic(err) + } + + timer := time.NewTimer(b.batchWait(cfg.CommitBatchWait, cfg.CommitBatchSlack)) + for { + if forceRes != nil { + forceRes <- lastMsg + forceRes = nil + } + lastMsg = nil + + // indicates whether we should only start a batch if we have reached or exceeded cfg.MaxCommitBatch + var sendAboveMax bool + select { + case <-b.stop: + close(b.stopped) + return + case <-b.notify: + sendAboveMax = true + case <-timer.C: + // do nothing + case fr := <-b.force: // user triggered + forceRes = fr + } + + var err error + lastMsg, err = b.maybeStartBatch(sendAboveMax) + if err != nil { + log.Warnw("CommitBatcher processBatch error", "error", err) + } + + if !timer.Stop() { + select { + case <-timer.C: + default: + } + } + + timer.Reset(b.batchWait(cfg.CommitBatchWait, cfg.CommitBatchSlack)) + } +} + +func (b *CommitBatcher) batchWait(maxWait, slack time.Duration) time.Duration { + now := time.Now() + + b.lk.Lock() + defer b.lk.Unlock() + + if len(b.todo) == 0 { + return maxWait + } + + var cutoff time.Time + for sn := range b.todo { + sectorCutoff := b.cutoffs[sn] + if cutoff.IsZero() || (!sectorCutoff.IsZero() && sectorCutoff.Before(cutoff)) { + cutoff = sectorCutoff + } + } + for sn := range b.waiting { + sectorCutoff := b.cutoffs[sn] + if cutoff.IsZero() || (!sectorCutoff.IsZero() && sectorCutoff.Before(cutoff)) { + cutoff = sectorCutoff + } + } + + if cutoff.IsZero() { + return maxWait + } + + cutoff = cutoff.Add(-slack) + if cutoff.Before(now) { + return time.Nanosecond // can't return 0 + } + + wait := cutoff.Sub(now) + if wait > maxWait { + wait = maxWait + } + + return wait +} + +func (b *CommitBatcher) maybeStartBatch(notif bool) ([]sealiface.CommitBatchRes, error) { + b.lk.Lock() + defer b.lk.Unlock() + + total := len(b.todo) + if total == 0 { + return nil, nil // nothing to do + } + + cfg, err := b.getConfig() + if err != nil { + return nil, xerrors.Errorf("getting config: %w", err) + } + + if notif && total < cfg.MaxCommitBatch { + return nil, nil + } + + var res []sealiface.CommitBatchRes + + individual := (total < cfg.MinCommitBatch) || (total < miner5.MinAggregatedSectors) + + if !individual && !cfg.AggregateAboveBaseFee.Equals(big.Zero()) { + tok, _, err := b.api.ChainHead(b.mctx) + if err != nil { + return nil, err + } + + bf, err := b.api.ChainBaseFee(b.mctx, tok) + if err != nil { + return nil, xerrors.Errorf("couldn't get base fee: %w", err) + } + + if bf.LessThan(cfg.AggregateAboveBaseFee) { + individual = true + } + } + + if individual { + res, err = b.processIndividually(cfg) + } else { + res, err = b.processBatch(cfg) + } + if err != nil && len(res) == 0 { + return nil, err + } + + for _, r := range res { + if err != nil { + r.Error = err.Error() + } + + for _, sn := range r.Sectors { + for _, ch := range b.waiting[sn] { + ch <- r // buffered + } + + delete(b.waiting, sn) + delete(b.todo, sn) + delete(b.cutoffs, sn) + } + } + + return res, nil +} + +func (b *CommitBatcher) processBatch(cfg sealiface.Config) ([]sealiface.CommitBatchRes, error) { + tok, _, err := b.api.ChainHead(b.mctx) + if err != nil { + return nil, err + } + + total := len(b.todo) + + res := sealiface.CommitBatchRes{ + FailedSectors: map[abi.SectorNumber]string{}, + } + + params := miner5.ProveCommitAggregateParams{ + SectorNumbers: bitfield.New(), + } + + proofs := make([][]byte, 0, total) + infos := make([]proof5.AggregateSealVerifyInfo, 0, total) + collateral := big.Zero() + + for id, p := range b.todo { + if len(infos) >= cfg.MaxCommitBatch { + log.Infow("commit batch full") + break + } + + res.Sectors = append(res.Sectors, id) + + sc, err := b.getSectorCollateral(id, tok) + if err != nil { + res.FailedSectors[id] = err.Error() + continue + } + + collateral = big.Add(collateral, sc) + + params.SectorNumbers.Set(uint64(id)) + infos = append(infos, p.Info) + } + + sort.Slice(infos, func(i, j int) bool { + return infos[i].Number < infos[j].Number + }) + + for _, info := range infos { + proofs = append(proofs, b.todo[info.Number].Proof) + } + + mid, err := address.IDFromAddress(b.maddr) + if err != nil { + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("getting miner id: %w", err) + } + + params.AggregateProof, err = b.prover.AggregateSealProofs(proof5.AggregateSealVerifyProofAndInfos{ + Miner: abi.ActorID(mid), + SealProof: b.todo[infos[0].Number].Spt, + AggregateProof: arp, + Infos: infos, + }, proofs) + if err != nil { + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("aggregating proofs: %w", err) + } + + enc := new(bytes.Buffer) + if err := params.MarshalCBOR(enc); err != nil { + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("couldn't serialize ProveCommitAggregateParams: %w", err) + } + + mi, err := b.api.StateMinerInfo(b.mctx, b.maddr, nil) + if err != nil { + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("couldn't get miner info: %w", err) + } + + maxFee := b.feeCfg.MaxCommitBatchGasFee.FeeForSectors(len(infos)) + + bf, err := b.api.ChainBaseFee(b.mctx, tok) + if err != nil { + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("couldn't get base fee: %w", err) + } + + nv, err := b.api.StateNetworkVersion(b.mctx, tok) + if err != nil { + log.Errorf("getting network version: %s", err) + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("getting network version: %s", err) + } + + aggFee := big.Div(big.Mul(policy.AggregateNetworkFee(nv, len(infos), bf), aggFeeNum), aggFeeDen) + + needFunds := big.Add(collateral, aggFee) + needFunds, err = collateralSendAmount(b.mctx, b.api, b.maddr, cfg, needFunds) + if err != nil { + return []sealiface.CommitBatchRes{res}, err + } + + goodFunds := big.Add(maxFee, needFunds) + + from, _, err := b.addrSel(b.mctx, mi, api.CommitAddr, goodFunds, needFunds) + if err != nil { + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("no good address found: %w", err) + } + + mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.ProveCommitAggregate, needFunds, maxFee, enc.Bytes()) + if err != nil { + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("sending message failed: %w", err) + } + + res.Msg = &mcid + + log.Infow("Sent ProveCommitAggregate message", "cid", mcid, "from", from, "todo", total, "sectors", len(infos)) + + return []sealiface.CommitBatchRes{res}, nil +} + +func (b *CommitBatcher) processIndividually(cfg sealiface.Config) ([]sealiface.CommitBatchRes, error) { + mi, err := b.api.StateMinerInfo(b.mctx, b.maddr, nil) + if err != nil { + return nil, xerrors.Errorf("couldn't get miner info: %w", err) + } + + avail := types.TotalFilecoinInt + + if cfg.CollateralFromMinerBalance && !cfg.DisableCollateralFallback { + avail, err = b.api.StateMinerAvailableBalance(b.mctx, b.maddr, nil) + if err != nil { + return nil, xerrors.Errorf("getting available miner balance: %w", err) + } + + avail = big.Sub(avail, cfg.AvailableBalanceBuffer) + if avail.LessThan(big.Zero()) { + avail = big.Zero() + } + } + + tok, _, err := b.api.ChainHead(b.mctx) + if err != nil { + return nil, err + } + + var res []sealiface.CommitBatchRes + + for sn, info := range b.todo { + r := sealiface.CommitBatchRes{ + Sectors: []abi.SectorNumber{sn}, + FailedSectors: map[abi.SectorNumber]string{}, + } + + mcid, err := b.processSingle(cfg, mi, &avail, sn, info, tok) + if err != nil { + log.Errorf("process single error: %+v", err) // todo: return to user + r.FailedSectors[sn] = err.Error() + } else { + r.Msg = &mcid + } + + res = append(res, r) + } + + return res, nil +} + +func (b *CommitBatcher) processSingle(cfg sealiface.Config, mi miner.MinerInfo, avail *abi.TokenAmount, sn abi.SectorNumber, info AggregateInput, tok TipSetToken) (cid.Cid, error) { + enc := new(bytes.Buffer) + params := &miner.ProveCommitSectorParams{ + SectorNumber: sn, + Proof: info.Proof, + } + + if err := params.MarshalCBOR(enc); err != nil { + return cid.Undef, xerrors.Errorf("marshaling commit params: %w", err) + } + + collateral, err := b.getSectorCollateral(sn, tok) + if err != nil { + return cid.Undef, err + } + + if cfg.CollateralFromMinerBalance { + c := big.Sub(collateral, *avail) + *avail = big.Sub(*avail, collateral) + collateral = c + + if collateral.LessThan(big.Zero()) { + collateral = big.Zero() + } + if (*avail).LessThan(big.Zero()) { + *avail = big.Zero() + } + } + + goodFunds := big.Add(collateral, big.Int(b.feeCfg.MaxCommitGasFee)) + + from, _, err := b.addrSel(b.mctx, mi, api.CommitAddr, goodFunds, collateral) + if err != nil { + return cid.Undef, xerrors.Errorf("no good address to send commit message from: %w", err) + } + + mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.ProveCommitSector, collateral, big.Int(b.feeCfg.MaxCommitGasFee), enc.Bytes()) + if err != nil { + return cid.Undef, xerrors.Errorf("pushing message to mpool: %w", err) + } + + return mcid, nil +} + +// register commit, wait for batch message, return message CID +func (b *CommitBatcher) AddCommit(ctx context.Context, s SectorInfo, in AggregateInput) (res sealiface.CommitBatchRes, err error) { + sn := s.SectorNumber + + cu, err := b.getCommitCutoff(s) + if err != nil { + return sealiface.CommitBatchRes{}, err + } + + b.lk.Lock() + b.cutoffs[sn] = cu + b.todo[sn] = in + + sent := make(chan sealiface.CommitBatchRes, 1) + b.waiting[sn] = append(b.waiting[sn], sent) + + select { + case b.notify <- struct{}{}: + default: // already have a pending notification, don't need more + } + b.lk.Unlock() + + select { + case r := <-sent: + return r, nil + case <-ctx.Done(): + return sealiface.CommitBatchRes{}, ctx.Err() + } +} + +func (b *CommitBatcher) Flush(ctx context.Context) ([]sealiface.CommitBatchRes, error) { + resCh := make(chan []sealiface.CommitBatchRes, 1) + select { + case b.force <- resCh: + select { + case res := <-resCh: + return res, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +func (b *CommitBatcher) Pending(ctx context.Context) ([]abi.SectorID, error) { + b.lk.Lock() + defer b.lk.Unlock() + + mid, err := address.IDFromAddress(b.maddr) + if err != nil { + return nil, err + } + + res := make([]abi.SectorID, 0) + for _, s := range b.todo { + res = append(res, abi.SectorID{ + Miner: abi.ActorID(mid), + Number: s.Info.Number, + }) + } + + sort.Slice(res, func(i, j int) bool { + if res[i].Miner != res[j].Miner { + return res[i].Miner < res[j].Miner + } + + return res[i].Number < res[j].Number + }) + + return res, nil +} + +func (b *CommitBatcher) Stop(ctx context.Context) error { + close(b.stop) + + select { + case <-b.stopped: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// TODO: If this returned epochs, it would make testing much easier +func (b *CommitBatcher) getCommitCutoff(si SectorInfo) (time.Time, error) { + tok, curEpoch, err := b.api.ChainHead(b.mctx) + if err != nil { + return time.Now(), xerrors.Errorf("getting chain head: %s", err) + } + + nv, err := b.api.StateNetworkVersion(b.mctx, tok) + if err != nil { + log.Errorf("getting network version: %s", err) + return time.Now(), xerrors.Errorf("getting network version: %s", err) + } + + pci, err := b.api.StateSectorPreCommitInfo(b.mctx, b.maddr, si.SectorNumber, tok) + if err != nil { + log.Errorf("getting precommit info: %s", err) + return time.Now(), err + } + + cutoffEpoch := pci.PreCommitEpoch + policy.GetMaxProveCommitDuration(actors.VersionForNetwork(nv), si.SectorType) + + for _, p := range si.Pieces { + if p.DealInfo == nil { + continue + } + + startEpoch := p.DealInfo.DealSchedule.StartEpoch + if startEpoch < cutoffEpoch { + cutoffEpoch = startEpoch + } + } + + if cutoffEpoch <= curEpoch { + return time.Now(), nil + } + + return time.Now().Add(time.Duration(cutoffEpoch-curEpoch) * time.Duration(build.BlockDelaySecs) * time.Second), nil +} + +func (b *CommitBatcher) getSectorCollateral(sn abi.SectorNumber, tok TipSetToken) (abi.TokenAmount, error) { + pci, err := b.api.StateSectorPreCommitInfo(b.mctx, b.maddr, sn, tok) + if err != nil { + return big.Zero(), xerrors.Errorf("getting precommit info: %w", err) + } + if pci == nil { + return big.Zero(), xerrors.Errorf("precommit info not found on chain") + } + + collateral, err := b.api.StateMinerInitialPledgeCollateral(b.mctx, b.maddr, pci.Info, tok) + if err != nil { + return big.Zero(), xerrors.Errorf("getting initial pledge collateral: %w", err) + } + + collateral = big.Sub(collateral, pci.PreCommitDeposit) + if collateral.LessThan(big.Zero()) { + collateral = big.Zero() + } + + return collateral, nil +} diff --git a/extern/storage-sealing/commit_batch_test.go b/extern/storage-sealing/commit_batch_test.go new file mode 100644 index 00000000000..aea6d455ebc --- /dev/null +++ b/extern/storage-sealing/commit_batch_test.go @@ -0,0 +1,383 @@ +package sealing_test + +import ( + "bytes" + "context" + "sort" + "sync" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/lotus/extern/storage-sealing/mocks" + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" +) + +func TestCommitBatcher(t *testing.T) { + t0123, err := address.NewFromString("t0123") + require.NoError(t, err) + + ctx := context.Background() + + as := func(ctx context.Context, mi miner.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) { + return t0123, big.Zero(), nil + } + + maxBatch := miner5.MaxAggregatedSectors + minBatch := miner5.MinAggregatedSectors + + cfg := func() (sealiface.Config, error) { + return sealiface.Config{ + MaxWaitDealsSectors: 2, + MaxSealingSectors: 0, + MaxSealingSectorsForDeals: 0, + WaitDealsDelay: time.Hour * 6, + AlwaysKeepUnsealedCopy: true, + + BatchPreCommits: true, + MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, + PreCommitBatchWait: 24 * time.Hour, + PreCommitBatchSlack: 3 * time.Hour, + + AggregateCommits: true, + MinCommitBatch: minBatch, + MaxCommitBatch: maxBatch, + CommitBatchWait: 24 * time.Hour, + CommitBatchSlack: 1 * time.Hour, + + AggregateAboveBaseFee: types.BigMul(types.PicoFil, types.NewInt(150)), // 0.15 nFIL + + TerminateBatchMin: 1, + TerminateBatchMax: 100, + TerminateBatchWait: 5 * time.Minute, + }, nil + } + + type promise func(t *testing.T) + type action func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *sealing.CommitBatcher) promise + + actions := func(as ...action) action { + return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *sealing.CommitBatcher) promise { + var ps []promise + for _, a := range as { + p := a(t, s, pcb) + if p != nil { + ps = append(ps, p) + } + } + + if len(ps) > 0 { + return func(t *testing.T) { + for _, p := range ps { + p(t) + } + } + } + return nil + } + } + + addSector := func(sn abi.SectorNumber) action { + return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *sealing.CommitBatcher) promise { + var pcres sealiface.CommitBatchRes + var pcerr error + done := sync.Mutex{} + done.Lock() + + si := sealing.SectorInfo{ + SectorNumber: sn, + } + + s.EXPECT().ChainHead(gomock.Any()).Return(nil, abi.ChainEpoch(1), nil) + s.EXPECT().StateNetworkVersion(gomock.Any(), gomock.Any()).Return(network.Version13, nil) + s.EXPECT().StateSectorPreCommitInfo(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&miner.SectorPreCommitOnChainInfo{ + PreCommitDeposit: big.Zero(), + }, nil) + + go func() { + defer done.Unlock() + pcres, pcerr = pcb.AddCommit(ctx, si, sealing.AggregateInput{ + Info: proof5.AggregateSealVerifyInfo{ + Number: sn, + }, + }) + }() + + return func(t *testing.T) { + done.Lock() + require.NoError(t, pcerr) + require.Empty(t, pcres.Error) + require.Contains(t, pcres.Sectors, si.SectorNumber) + } + } + } + + addSectors := func(sectors []abi.SectorNumber) action { + as := make([]action, len(sectors)) + for i, sector := range sectors { + as[i] = addSector(sector) + } + return actions(as...) + } + + waitPending := func(n int) action { + return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *sealing.CommitBatcher) promise { + require.Eventually(t, func() bool { + p, err := pcb.Pending(ctx) + require.NoError(t, err) + return len(p) == n + }, time.Second*5, 10*time.Millisecond) + + return nil + } + } + + expectSend := func(expect []abi.SectorNumber, aboveBalancer, failOnePCI bool) action { + return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *sealing.CommitBatcher) promise { + s.EXPECT().StateMinerInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(miner.MinerInfo{Owner: t0123, Worker: t0123}, nil) + + ti := len(expect) + batch := false + if ti >= minBatch { + batch = true + ti = 1 + } + + basefee := types.PicoFil + if aboveBalancer { + basefee = types.NanoFil + } + + if batch { + s.EXPECT().ChainHead(gomock.Any()).Return(nil, abi.ChainEpoch(1), nil) + s.EXPECT().ChainBaseFee(gomock.Any(), gomock.Any()).Return(basefee, nil) + } + + if !aboveBalancer { + batch = false + ti = len(expect) + } + + s.EXPECT().ChainHead(gomock.Any()).Return(nil, abi.ChainEpoch(1), nil) + + pciC := len(expect) + if failOnePCI { + s.EXPECT().StateSectorPreCommitInfo(gomock.Any(), gomock.Any(), abi.SectorNumber(1), gomock.Any()).Return(nil, nil).Times(1) // not found + pciC = len(expect) - 1 + if !batch { + ti-- + } + } + s.EXPECT().StateSectorPreCommitInfo(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&miner.SectorPreCommitOnChainInfo{ + PreCommitDeposit: big.Zero(), + }, nil).Times(pciC) + s.EXPECT().StateMinerInitialPledgeCollateral(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(big.Zero(), nil).Times(pciC) + + if batch { + s.EXPECT().StateNetworkVersion(gomock.Any(), gomock.Any()).Return(network.Version13, nil) + s.EXPECT().ChainBaseFee(gomock.Any(), gomock.Any()).Return(basefee, nil) + } + + s.EXPECT().SendMsg(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), funMatcher(func(i interface{}) bool { + b := i.([]byte) + if batch { + var params miner5.ProveCommitAggregateParams + require.NoError(t, params.UnmarshalCBOR(bytes.NewReader(b))) + for _, number := range expect { + set, err := params.SectorNumbers.IsSet(uint64(number)) + require.NoError(t, err) + require.True(t, set) + } + } else { + var params miner5.ProveCommitSectorParams + require.NoError(t, params.UnmarshalCBOR(bytes.NewReader(b))) + } + return true + })).Times(ti) + return nil + } + } + + flush := func(expect []abi.SectorNumber, aboveBalancer, failOnePCI bool) action { + return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *sealing.CommitBatcher) promise { + _ = expectSend(expect, aboveBalancer, failOnePCI)(t, s, pcb) + + batch := len(expect) >= minBatch && aboveBalancer + + r, err := pcb.Flush(ctx) + require.NoError(t, err) + if batch { + require.Len(t, r, 1) + require.Empty(t, r[0].Error) + sort.Slice(r[0].Sectors, func(i, j int) bool { + return r[0].Sectors[i] < r[0].Sectors[j] + }) + require.Equal(t, expect, r[0].Sectors) + if !failOnePCI { + require.Len(t, r[0].FailedSectors, 0) + } else { + require.Len(t, r[0].FailedSectors, 1) + _, found := r[0].FailedSectors[1] + require.True(t, found) + } + } else { + require.Len(t, r, len(expect)) + for _, res := range r { + require.Len(t, res.Sectors, 1) + require.Empty(t, res.Error) + } + sort.Slice(r, func(i, j int) bool { + return r[i].Sectors[0] < r[j].Sectors[0] + }) + for i, res := range r { + require.Equal(t, abi.SectorNumber(i), res.Sectors[0]) + if failOnePCI && res.Sectors[0] == 1 { + require.Len(t, res.FailedSectors, 1) + _, found := res.FailedSectors[1] + require.True(t, found) + } else { + require.Empty(t, res.FailedSectors) + } + } + } + + return nil + } + } + + getSectors := func(n int) []abi.SectorNumber { + out := make([]abi.SectorNumber, n) + for i := range out { + out[i] = abi.SectorNumber(i) + } + return out + } + + tcs := map[string]struct { + actions []action + }{ + "addSingle-aboveBalancer": { + actions: []action{ + addSector(0), + waitPending(1), + flush([]abi.SectorNumber{0}, true, false), + }, + }, + "addTwo-aboveBalancer": { + actions: []action{ + addSectors(getSectors(2)), + waitPending(2), + flush(getSectors(2), true, false), + }, + }, + "addAte-aboveBalancer": { + actions: []action{ + addSectors(getSectors(8)), + waitPending(8), + flush(getSectors(8), true, false), + }, + }, + "addMax-aboveBalancer": { + actions: []action{ + expectSend(getSectors(maxBatch), true, false), + addSectors(getSectors(maxBatch)), + }, + }, + "addSingle-belowBalancer": { + actions: []action{ + addSector(0), + waitPending(1), + flush([]abi.SectorNumber{0}, false, false), + }, + }, + "addTwo-belowBalancer": { + actions: []action{ + addSectors(getSectors(2)), + waitPending(2), + flush(getSectors(2), false, false), + }, + }, + "addAte-belowBalancer": { + actions: []action{ + addSectors(getSectors(8)), + waitPending(8), + flush(getSectors(8), false, false), + }, + }, + "addMax-belowBalancer": { + actions: []action{ + expectSend(getSectors(maxBatch), false, false), + addSectors(getSectors(maxBatch)), + }, + }, + + "addAte-aboveBalancer-failOne": { + actions: []action{ + addSectors(getSectors(8)), + waitPending(8), + flush(getSectors(8), true, true), + }, + }, + "addAte-belowBalancer-failOne": { + actions: []action{ + addSectors(getSectors(8)), + waitPending(8), + flush(getSectors(8), false, true), + }, + }, + } + + for name, tc := range tcs { + tc := tc + + t.Run(name, func(t *testing.T) { + // create go mock controller here + mockCtrl := gomock.NewController(t) + // when test is done, assert expectations on all mock objects. + defer mockCtrl.Finish() + + // create them mocks + pcapi := mocks.NewMockCommitBatcherApi(mockCtrl) + + pcb := sealing.NewCommitBatcher(ctx, t0123, pcapi, as, fc, cfg, &fakeProver{}) + + var promises []promise + + for _, a := range tc.actions { + p := a(t, pcapi, pcb) + if p != nil { + promises = append(promises, p) + } + } + + for _, p := range promises { + p(t) + } + + err := pcb.Stop(ctx) + require.NoError(t, err) + }) + } +} + +type fakeProver struct{} + +func (f fakeProver) AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) { + return []byte("Trust me, I'm a proof"), nil +} + +var _ ffiwrapper.Prover = &fakeProver{} diff --git a/extern/storage-sealing/currentdealinfo.go b/extern/storage-sealing/currentdealinfo.go new file mode 100644 index 00000000000..ed93512c28a --- /dev/null +++ b/extern/storage-sealing/currentdealinfo.go @@ -0,0 +1,213 @@ +package sealing + +import ( + "bytes" + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/types" + market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" +) + +type CurrentDealInfoAPI interface { + ChainGetMessage(context.Context, cid.Cid) (*types.Message, error) + StateLookupID(context.Context, address.Address, TipSetToken) (address.Address, error) + StateMarketStorageDeal(context.Context, abi.DealID, TipSetToken) (*api.MarketDeal, error) + StateSearchMsg(context.Context, cid.Cid) (*MsgLookup, error) +} + +type CurrentDealInfo struct { + DealID abi.DealID + MarketDeal *api.MarketDeal + PublishMsgTipSet TipSetToken +} + +type CurrentDealInfoManager struct { + CDAPI CurrentDealInfoAPI +} + +// GetCurrentDealInfo gets the current deal state and deal ID. +// Note that the deal ID is assigned when the deal is published, so it may +// have changed if there was a reorg after the deal was published. +func (mgr *CurrentDealInfoManager) GetCurrentDealInfo(ctx context.Context, tok TipSetToken, proposal *market.DealProposal, publishCid cid.Cid) (CurrentDealInfo, error) { + // Lookup the deal ID by comparing the deal proposal to the proposals in + // the publish deals message, and indexing into the message return value + dealID, pubMsgTok, err := mgr.dealIDFromPublishDealsMsg(ctx, tok, proposal, publishCid) + if err != nil { + return CurrentDealInfo{}, err + } + + // Lookup the deal state by deal ID + marketDeal, err := mgr.CDAPI.StateMarketStorageDeal(ctx, dealID, tok) + if err == nil && proposal != nil { + // Make sure the retrieved deal proposal matches the target proposal + equal, err := mgr.CheckDealEquality(ctx, tok, *proposal, marketDeal.Proposal) + if err != nil { + return CurrentDealInfo{}, err + } + if !equal { + return CurrentDealInfo{}, xerrors.Errorf("Deal proposals for publish message %s did not match", publishCid) + } + } + return CurrentDealInfo{DealID: dealID, MarketDeal: marketDeal, PublishMsgTipSet: pubMsgTok}, err +} + +// dealIDFromPublishDealsMsg looks up the publish deals message by cid, and finds the deal ID +// by looking at the message return value +func (mgr *CurrentDealInfoManager) dealIDFromPublishDealsMsg(ctx context.Context, tok TipSetToken, proposal *market.DealProposal, publishCid cid.Cid) (abi.DealID, TipSetToken, error) { + dealID := abi.DealID(0) + + // Get the return value of the publish deals message + lookup, err := mgr.CDAPI.StateSearchMsg(ctx, publishCid) + if err != nil { + return dealID, nil, xerrors.Errorf("looking for publish deal message %s: search msg failed: %w", publishCid, err) + } + + if lookup == nil { + return dealID, nil, xerrors.Errorf("looking for publish deal message %s: not found", publishCid) + } + + if lookup.Receipt.ExitCode != exitcode.Ok { + return dealID, nil, xerrors.Errorf("looking for publish deal message %s: non-ok exit code: %s", publishCid, lookup.Receipt.ExitCode) + } + + var retval market.PublishStorageDealsReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(lookup.Receipt.Return)); err != nil { + return dealID, nil, xerrors.Errorf("looking for publish deal message %s: unmarshalling message return: %w", publishCid, err) + } + + // Previously, publish deals messages contained a single deal, and the + // deal proposal was not included in the sealing deal info. + // So check if the proposal is nil and check the number of deals published + // in the message. + if proposal == nil { + if len(retval.IDs) > 1 { + return dealID, nil, xerrors.Errorf( + "getting deal ID from publish deal message %s: "+ + "no deal proposal supplied but message return value has more than one deal (%d deals)", + publishCid, len(retval.IDs)) + } + + // There is a single deal in this publish message and no deal proposal + // was supplied, so we have nothing to compare against. Just assume + // the deal ID is correct. + return retval.IDs[0], lookup.TipSetTok, nil + } + + // Get the parameters to the publish deals message + pubmsg, err := mgr.CDAPI.ChainGetMessage(ctx, publishCid) + if err != nil { + return dealID, nil, xerrors.Errorf("getting publish deal message %s: %w", publishCid, err) + } + + var pubDealsParams market2.PublishStorageDealsParams + if err := pubDealsParams.UnmarshalCBOR(bytes.NewReader(pubmsg.Params)); err != nil { + return dealID, nil, xerrors.Errorf("unmarshalling publish deal message params for message %s: %w", publishCid, err) + } + + // Scan through the deal proposals in the message parameters to find the + // index of the target deal proposal + dealIdx := -1 + for i, paramDeal := range pubDealsParams.Deals { + eq, err := mgr.CheckDealEquality(ctx, tok, *proposal, market.DealProposal(paramDeal.Proposal)) + if err != nil { + return dealID, nil, xerrors.Errorf("comparing publish deal message %s proposal to deal proposal: %w", publishCid, err) + } + if eq { + dealIdx = i + break + } + } + + if dealIdx == -1 { + return dealID, nil, xerrors.Errorf("could not find deal in publish deals message %s", publishCid) + } + + if dealIdx >= len(retval.IDs) { + return dealID, nil, xerrors.Errorf( + "deal index %d out of bounds of deals (len %d) in publish deals message %s", + dealIdx, len(retval.IDs), publishCid) + } + + return retval.IDs[dealIdx], lookup.TipSetTok, nil +} + +func (mgr *CurrentDealInfoManager) CheckDealEquality(ctx context.Context, tok TipSetToken, p1, p2 market.DealProposal) (bool, error) { + p1ClientID, err := mgr.CDAPI.StateLookupID(ctx, p1.Client, tok) + if err != nil { + return false, err + } + p2ClientID, err := mgr.CDAPI.StateLookupID(ctx, p2.Client, tok) + if err != nil { + return false, err + } + return p1.PieceCID.Equals(p2.PieceCID) && + p1.PieceSize == p2.PieceSize && + p1.VerifiedDeal == p2.VerifiedDeal && + p1.Label == p2.Label && + p1.StartEpoch == p2.StartEpoch && + p1.EndEpoch == p2.EndEpoch && + p1.StoragePricePerEpoch.Equals(p2.StoragePricePerEpoch) && + p1.ProviderCollateral.Equals(p2.ProviderCollateral) && + p1.ClientCollateral.Equals(p2.ClientCollateral) && + p1.Provider == p2.Provider && + p1ClientID == p2ClientID, nil +} + +type CurrentDealInfoTskAPI interface { + ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) + StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) + StateMarketStorageDeal(context.Context, abi.DealID, types.TipSetKey) (*api.MarketDeal, error) + StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) +} + +type CurrentDealInfoAPIAdapter struct { + CurrentDealInfoTskAPI +} + +func (c *CurrentDealInfoAPIAdapter) StateLookupID(ctx context.Context, a address.Address, tok TipSetToken) (address.Address, error) { + tsk, err := types.TipSetKeyFromBytes(tok) + if err != nil { + return address.Undef, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err) + } + + return c.CurrentDealInfoTskAPI.StateLookupID(ctx, a, tsk) +} + +func (c *CurrentDealInfoAPIAdapter) StateMarketStorageDeal(ctx context.Context, dealID abi.DealID, tok TipSetToken) (*api.MarketDeal, error) { + tsk, err := types.TipSetKeyFromBytes(tok) + if err != nil { + return nil, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err) + } + + return c.CurrentDealInfoTskAPI.StateMarketStorageDeal(ctx, dealID, tsk) +} + +func (c *CurrentDealInfoAPIAdapter) StateSearchMsg(ctx context.Context, k cid.Cid) (*MsgLookup, error) { + wmsg, err := c.CurrentDealInfoTskAPI.StateSearchMsg(ctx, types.EmptyTSK, k, api.LookbackNoLimit, true) + if err != nil { + return nil, err + } + + if wmsg == nil { + return nil, nil + } + + return &MsgLookup{ + Receipt: MessageReceipt{ + ExitCode: wmsg.Receipt.ExitCode, + Return: wmsg.Receipt.Return, + GasUsed: wmsg.Receipt.GasUsed, + }, + TipSetTok: wmsg.TipSet.Bytes(), + Height: wmsg.Height, + }, nil +} + +var _ CurrentDealInfoAPI = (*CurrentDealInfoAPIAdapter)(nil) diff --git a/extern/storage-sealing/currentdealinfo_test.go b/extern/storage-sealing/currentdealinfo_test.go new file mode 100644 index 00000000000..b28dd461abd --- /dev/null +++ b/extern/storage-sealing/currentdealinfo_test.go @@ -0,0 +1,316 @@ +package sealing + +import ( + "bytes" + "errors" + "math/rand" + "sort" + "testing" + "time" + + "golang.org/x/net/context" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + evtmock "github.com/filecoin-project/lotus/chain/events/state/mock" + "github.com/filecoin-project/lotus/chain/types" + market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" + tutils "github.com/filecoin-project/specs-actors/v2/support/testing" + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" +) + +var errNotFound = errors.New("could not find") + +func TestGetCurrentDealInfo(t *testing.T) { + ctx := context.Background() + dummyCid, _ := cid.Parse("bafkqaaa") + dummyCid2, _ := cid.Parse("bafkqaab") + zeroDealID := abi.DealID(0) + earlierDealID := abi.DealID(9) + successDealID := abi.DealID(10) + proposal := market.DealProposal{ + PieceCID: dummyCid, + PieceSize: abi.PaddedPieceSize(100), + Client: tutils.NewActorAddr(t, "client"), + Provider: tutils.NewActorAddr(t, "provider"), + StoragePricePerEpoch: abi.NewTokenAmount(1), + ProviderCollateral: abi.NewTokenAmount(1), + ClientCollateral: abi.NewTokenAmount(1), + Label: "success", + } + otherProposal := market.DealProposal{ + PieceCID: dummyCid2, + PieceSize: abi.PaddedPieceSize(100), + Client: tutils.NewActorAddr(t, "client"), + Provider: tutils.NewActorAddr(t, "provider"), + StoragePricePerEpoch: abi.NewTokenAmount(1), + ProviderCollateral: abi.NewTokenAmount(1), + ClientCollateral: abi.NewTokenAmount(1), + Label: "other", + } + successDeal := &api.MarketDeal{ + Proposal: proposal, + State: market.DealState{ + SectorStartEpoch: 1, + LastUpdatedEpoch: 2, + }, + } + earlierDeal := &api.MarketDeal{ + Proposal: otherProposal, + State: market.DealState{ + SectorStartEpoch: 1, + LastUpdatedEpoch: 2, + }, + } + + type testCaseData struct { + searchMessageLookup *MsgLookup + searchMessageErr error + marketDeals map[abi.DealID]*api.MarketDeal + publishCid cid.Cid + targetProposal *market.DealProposal + expectedDealID abi.DealID + expectedMarketDeal *api.MarketDeal + expectedError error + } + testCases := map[string]testCaseData{ + "deal lookup succeeds": { + publishCid: dummyCid, + searchMessageLookup: &MsgLookup{ + Receipt: MessageReceipt{ + ExitCode: exitcode.Ok, + Return: makePublishDealsReturnBytes(t, []abi.DealID{successDealID}), + }, + }, + marketDeals: map[abi.DealID]*api.MarketDeal{ + successDealID: successDeal, + }, + targetProposal: &proposal, + expectedDealID: successDealID, + expectedMarketDeal: successDeal, + }, + "deal lookup succeeds two return values": { + publishCid: dummyCid, + searchMessageLookup: &MsgLookup{ + Receipt: MessageReceipt{ + ExitCode: exitcode.Ok, + Return: makePublishDealsReturnBytes(t, []abi.DealID{earlierDealID, successDealID}), + }, + }, + marketDeals: map[abi.DealID]*api.MarketDeal{ + earlierDealID: earlierDeal, + successDealID: successDeal, + }, + targetProposal: &proposal, + expectedDealID: successDealID, + expectedMarketDeal: successDeal, + }, + "deal lookup fails proposal mis-match": { + publishCid: dummyCid, + searchMessageLookup: &MsgLookup{ + Receipt: MessageReceipt{ + ExitCode: exitcode.Ok, + Return: makePublishDealsReturnBytes(t, []abi.DealID{earlierDealID}), + }, + }, + marketDeals: map[abi.DealID]*api.MarketDeal{ + earlierDealID: earlierDeal, + }, + targetProposal: &proposal, + expectedDealID: zeroDealID, + expectedError: xerrors.Errorf("could not find deal in publish deals message %s", dummyCid), + }, + "deal lookup fails mismatch count of deals and return values": { + publishCid: dummyCid, + searchMessageLookup: &MsgLookup{ + Receipt: MessageReceipt{ + ExitCode: exitcode.Ok, + Return: makePublishDealsReturnBytes(t, []abi.DealID{earlierDealID}), + }, + }, + marketDeals: map[abi.DealID]*api.MarketDeal{ + earlierDealID: earlierDeal, + successDealID: successDeal, + }, + targetProposal: &proposal, + expectedDealID: zeroDealID, + expectedError: xerrors.Errorf("deal index 1 out of bounds of deals (len 1) in publish deals message %s", dummyCid), + }, + "deal lookup succeeds, target proposal nil, single deal in message": { + publishCid: dummyCid, + searchMessageLookup: &MsgLookup{ + Receipt: MessageReceipt{ + ExitCode: exitcode.Ok, + Return: makePublishDealsReturnBytes(t, []abi.DealID{successDealID}), + }, + }, + marketDeals: map[abi.DealID]*api.MarketDeal{ + successDealID: successDeal, + }, + targetProposal: nil, + expectedDealID: successDealID, + expectedMarketDeal: successDeal, + }, + "deal lookup fails, multiple deals in return value but target proposal nil": { + publishCid: dummyCid, + searchMessageLookup: &MsgLookup{ + Receipt: MessageReceipt{ + ExitCode: exitcode.Ok, + Return: makePublishDealsReturnBytes(t, []abi.DealID{earlierDealID, successDealID}), + }, + }, + marketDeals: map[abi.DealID]*api.MarketDeal{ + earlierDealID: earlierDeal, + successDealID: successDeal, + }, + targetProposal: nil, + expectedDealID: zeroDealID, + expectedError: xerrors.Errorf("getting deal ID from publish deal message %s: no deal proposal supplied but message return value has more than one deal (2 deals)", dummyCid), + }, + "search message fails": { + publishCid: dummyCid, + searchMessageErr: errors.New("something went wrong"), + targetProposal: &proposal, + expectedDealID: zeroDealID, + expectedError: xerrors.Errorf("looking for publish deal message %s: search msg failed: something went wrong", dummyCid), + }, + "search message not found": { + publishCid: dummyCid, + targetProposal: &proposal, + expectedDealID: zeroDealID, + expectedError: xerrors.Errorf("looking for publish deal message %s: not found", dummyCid), + }, + "return code not ok": { + publishCid: dummyCid, + searchMessageLookup: &MsgLookup{ + Receipt: MessageReceipt{ + ExitCode: exitcode.ErrIllegalState, + }, + }, + targetProposal: &proposal, + expectedDealID: zeroDealID, + expectedError: xerrors.Errorf("looking for publish deal message %s: non-ok exit code: %s", dummyCid, exitcode.ErrIllegalState), + }, + "unable to unmarshal params": { + publishCid: dummyCid, + searchMessageLookup: &MsgLookup{ + Receipt: MessageReceipt{ + ExitCode: exitcode.Ok, + Return: []byte("applesauce"), + }, + }, + targetProposal: &proposal, + expectedDealID: zeroDealID, + expectedError: xerrors.Errorf("looking for publish deal message %s: unmarshalling message return: cbor input should be of type array", dummyCid), + }, + } + runTestCase := func(testCase string, data testCaseData) { + t.Run(testCase, func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + ts, err := evtmock.MockTipset(address.TestAddress, rand.Uint64()) + require.NoError(t, err) + marketDeals := make(map[marketDealKey]*api.MarketDeal) + for dealID, deal := range data.marketDeals { + marketDeals[marketDealKey{dealID, ts.Key()}] = deal + } + mockApi := &CurrentDealInfoMockAPI{ + SearchMessageLookup: data.searchMessageLookup, + SearchMessageErr: data.searchMessageErr, + MarketDeals: marketDeals, + } + dealInfoMgr := CurrentDealInfoManager{mockApi} + + res, err := dealInfoMgr.GetCurrentDealInfo(ctx, ts.Key().Bytes(), data.targetProposal, data.publishCid) + require.Equal(t, data.expectedDealID, res.DealID) + require.Equal(t, data.expectedMarketDeal, res.MarketDeal) + if data.expectedError == nil { + require.NoError(t, err) + } else { + require.EqualError(t, err, data.expectedError.Error()) + } + }) + } + for testCase, data := range testCases { + runTestCase(testCase, data) + } +} + +type marketDealKey struct { + abi.DealID + types.TipSetKey +} + +type CurrentDealInfoMockAPI struct { + SearchMessageLookup *MsgLookup + SearchMessageErr error + + MarketDeals map[marketDealKey]*api.MarketDeal +} + +func (mapi *CurrentDealInfoMockAPI) ChainGetMessage(ctx context.Context, c cid.Cid) (*types.Message, error) { + var dealIDs []abi.DealID + var deals []market2.ClientDealProposal + for k, dl := range mapi.MarketDeals { + dealIDs = append(dealIDs, k.DealID) + deals = append(deals, market2.ClientDealProposal{ + Proposal: market2.DealProposal(dl.Proposal), + ClientSignature: crypto.Signature{ + Data: []byte("foo bar cat dog"), + Type: crypto.SigTypeBLS, + }, + }) + } + sort.SliceStable(deals, func(i, j int) bool { + return dealIDs[i] < dealIDs[j] + }) + buf := new(bytes.Buffer) + params := market2.PublishStorageDealsParams{Deals: deals} + err := params.MarshalCBOR(buf) + if err != nil { + panic(err) + } + return &types.Message{ + Params: buf.Bytes(), + }, nil +} + +func (mapi *CurrentDealInfoMockAPI) StateLookupID(ctx context.Context, addr address.Address, token TipSetToken) (address.Address, error) { + return addr, nil +} + +func (mapi *CurrentDealInfoMockAPI) StateMarketStorageDeal(ctx context.Context, dealID abi.DealID, tok TipSetToken) (*api.MarketDeal, error) { + tsk, err := types.TipSetKeyFromBytes(tok) + if err != nil { + return nil, err + } + deal, ok := mapi.MarketDeals[marketDealKey{dealID, tsk}] + if !ok { + return nil, errNotFound + } + return deal, nil +} + +func (mapi *CurrentDealInfoMockAPI) StateSearchMsg(ctx context.Context, c cid.Cid) (*MsgLookup, error) { + if mapi.SearchMessageLookup == nil { + return mapi.SearchMessageLookup, mapi.SearchMessageErr + } + + return mapi.SearchMessageLookup, mapi.SearchMessageErr +} + +func makePublishDealsReturnBytes(t *testing.T, dealIDs []abi.DealID) []byte { + buf := new(bytes.Buffer) + dealsReturn := market.PublishStorageDealsReturn{ + IDs: dealIDs, + } + err := dealsReturn.MarshalCBOR(buf) + require.NoError(t, err) + return buf.Bytes() +} diff --git a/extern/storage-sealing/fsm.go b/extern/storage-sealing/fsm.go index 3a5931c8bf8..d04aef7904f 100644 --- a/extern/storage-sealing/fsm.go +++ b/extern/storage-sealing/fsm.go @@ -37,14 +37,23 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto // Sealing UndefinedSectorState: planOne( - on(SectorStart{}, Empty), + on(SectorStart{}, WaitDeals), on(SectorStartCC{}, Packing), ), - Empty: planOne(on(SectorAddPiece{}, WaitDeals)), + Empty: planOne( // deprecated + on(SectorAddPiece{}, AddPiece), + on(SectorStartPacking{}, Packing), + ), WaitDeals: planOne( - on(SectorAddPiece{}, WaitDeals), + on(SectorAddPiece{}, AddPiece), on(SectorStartPacking{}, Packing), ), + AddPiece: planOne( + on(SectorPieceAdded{}, WaitDeals), + apply(SectorStartPacking{}), + apply(SectorAddPiece{}), + on(SectorAddPieceFailed{}, AddPieceFailed), + ), Packing: planOne(on(SectorPacked{}, GetTicket)), GetTicket: planOne( on(SectorTicket{}, PreCommit1), @@ -63,13 +72,27 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed), ), PreCommitting: planOne( - on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed), + on(SectorPreCommitBatch{}, SubmitPreCommitBatch), on(SectorPreCommitted{}, PreCommitWait), + on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed), on(SectorChainPreCommitFailed{}, PreCommitFailed), on(SectorPreCommitLanded{}, WaitSeed), on(SectorDealsExpired{}, DealsExpired), on(SectorInvalidDealIDs{}, RecoverDealIDs), ), + SubmitPreCommitBatch: planOne( + on(SectorPreCommitBatchSent{}, PreCommitBatchWait), + on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed), + on(SectorChainPreCommitFailed{}, PreCommitFailed), + on(SectorPreCommitLanded{}, WaitSeed), + on(SectorDealsExpired{}, DealsExpired), + on(SectorInvalidDealIDs{}, RecoverDealIDs), + ), + PreCommitBatchWait: planOne( + on(SectorChainPreCommitFailed{}, PreCommitFailed), + on(SectorPreCommitLanded{}, WaitSeed), + on(SectorRetryPreCommit{}, PreCommitting), + ), PreCommitWait: planOne( on(SectorChainPreCommitFailed{}, PreCommitFailed), on(SectorPreCommitLanded{}, WaitSeed), @@ -80,15 +103,30 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto on(SectorChainPreCommitFailed{}, PreCommitFailed), ), Committing: planCommitting, + CommitFinalize: planOne( + on(SectorFinalized{}, SubmitCommit), + on(SectorFinalizeFailed{}, CommitFinalizeFailed), + ), SubmitCommit: planOne( on(SectorCommitSubmitted{}, CommitWait), + on(SectorSubmitCommitAggregate{}, SubmitCommitAggregate), + on(SectorCommitFailed{}, CommitFailed), + ), + SubmitCommitAggregate: planOne( + on(SectorCommitAggregateSent{}, CommitWait), on(SectorCommitFailed{}, CommitFailed), + on(SectorRetrySubmitCommit{}, SubmitCommit), ), CommitWait: planOne( on(SectorProving{}, FinalizeSector), on(SectorCommitFailed{}, CommitFailed), on(SectorRetrySubmitCommit{}, SubmitCommit), ), + CommitAggregateWait: planOne( + on(SectorProving{}, FinalizeSector), + on(SectorCommitFailed{}, CommitFailed), + on(SectorRetrySubmitCommit{}, SubmitCommit), + ), FinalizeSector: planOne( on(SectorFinalized{}, Proving), @@ -97,6 +135,7 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto // Sealing errors + AddPieceFailed: planOne(), SealPreCommit1Failed: planOne( on(SectorRetrySealPreCommit1{}, PreCommit1), ), @@ -106,6 +145,7 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto ), PreCommitFailed: planOne( on(SectorRetryPreCommit{}, PreCommitting), + on(SectorRetryPreCommitWait{}, PreCommitWait), on(SectorRetryWaitSeed{}, WaitSeed), on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed), on(SectorPreCommitLanded{}, WaitSeed), @@ -116,6 +156,9 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto on(SectorRetryComputeProof{}, Committing), on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed), ), + CommitFinalizeFailed: planOne( + on(SectorRetryFinalize{}, CommitFinalize), + ), CommitFailed: planOne( on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed), on(SectorRetryWaitSeed{}, WaitSeed), @@ -125,6 +168,7 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto on(SectorChainPreCommitFailed{}, PreCommitFailed), on(SectorRetryPreCommit{}, PreCommitting), on(SectorRetryCommitWait{}, CommitWait), + on(SectorRetrySubmitCommit{}, SubmitCommit), on(SectorDealsExpired{}, DealsExpired), on(SectorInvalidDealIDs{}, RecoverDealIDs), on(SectorTicketExpired{}, Removing), @@ -146,6 +190,21 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto on(SectorFaultReported{}, FaultReported), on(SectorFaulty{}, Faulty), ), + Terminating: planOne( + on(SectorTerminating{}, TerminateWait), + on(SectorTerminateFailed{}, TerminateFailed), + ), + TerminateWait: planOne( + on(SectorTerminated{}, TerminateFinality), + on(SectorTerminateFailed{}, TerminateFailed), + ), + TerminateFinality: planOne( + on(SectorTerminateFailed{}, TerminateFailed), + // SectorRemove (global) + ), + TerminateFailed: planOne( + // SectorTerminating (global) + ), Removing: planOne( on(SectorRemoved{}, Removed), on(SectorRemoveFailed{}, RemoveFailed), @@ -157,21 +216,32 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto on(SectorFaultReported{}, FaultReported), ), + FaultReported: final, // not really supported right now + FaultedFinal: final, Removed: final, -} -func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(statemachine.Context, SectorInfo) error, uint64, error) { - ///// - // First process all events + FailedUnrecoverable: final, +} +func (m *Sealing) logEvents(events []statemachine.Event, state *SectorInfo) { for _, event := range events { + log.Debugw("sector event", "sector", state.SectorNumber, "type", fmt.Sprintf("%T", event.User), "event", event.User) + e, err := json.Marshal(event) if err != nil { log.Errorf("marshaling event for logging: %+v", err) continue } + if event.User == (SectorRestart{}) { + continue // don't log on every fsm restart + } + + if len(e) > 8000 { + e = []byte(string(e[:8000]) + "... truncated") + } + l := Log{ Timestamp: uint64(time.Now().Unix()), Message: string(e), @@ -190,11 +260,18 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta Kind: fmt.Sprintf("truncate"), } - state.Log = append(state.Log[:2000], state.Log[:6000]...) + state.Log = append(state.Log[:2000], state.Log[6000:]...) } state.Log = append(state.Log, l) } +} + +func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(statemachine.Context, SectorInfo) error, uint64, error) { + ///// + // First process all events + + m.logEvents(events, state) if m.notifee != nil { defer func(before SectorInfo) { @@ -204,7 +281,15 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta p := fsmPlanners[state.State] if p == nil { - return nil, 0, xerrors.Errorf("planner for state %s not found", state.State) + if len(events) == 1 { + if _, ok := events[0].User.(globalMutator); ok { + p = planOne() // in case we're in a really weird state, allow restart / update state / remove + } + } + + if p == nil { + return nil, 0, xerrors.Errorf("planner for state %s not found", state.State) + } } processed, err := p(events, state) @@ -217,12 +302,11 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta /* - * Empty <- incoming deals - | | - | v - *<- WaitDeals <- incoming deals - | | - | v + UndefinedSectorState (start) + v | + *<- WaitDeals <-> AddPiece | + | | /--------------------/ + | v v *<- Packing <- incoming committed capacity | | | v @@ -261,20 +345,20 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta v FailedUnrecoverable - UndefinedSectorState <- ¯\_(ツ)_/¯ - | ^ - *---------------------/ - */ - m.stats.updateSector(m.minerSector(state.SectorNumber), state.State) + if err := m.onUpdateSector(context.TODO(), state); err != nil { + log.Errorw("update sector stats", "error", err) + } switch state.State { // Happy path case Empty: fallthrough case WaitDeals: - log.Infof("Waiting for deals %d", state.SectorNumber) + return m.handleWaitDeals, processed, nil + case AddPiece: + return m.handleAddPiece, processed, nil case Packing: return m.handlePacking, processed, nil case GetTicket: @@ -285,6 +369,10 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta return m.handlePreCommit2, processed, nil case PreCommitting: return m.handlePreCommitting, processed, nil + case SubmitPreCommitBatch: + return m.handleSubmitPreCommitBatch, processed, nil + case PreCommitBatchWait: + fallthrough case PreCommitWait: return m.handlePreCommitWait, processed, nil case WaitSeed: @@ -293,8 +381,14 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta return m.handleCommitting, processed, nil case SubmitCommit: return m.handleSubmitCommit, processed, nil + case SubmitCommitAggregate: + return m.handleSubmitCommitAggregate, processed, nil + case CommitAggregateWait: + fallthrough case CommitWait: return m.handleCommitWait, processed, nil + case CommitFinalize: + fallthrough case FinalizeSector: return m.handleFinalizeSector, processed, nil @@ -309,6 +403,8 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta return m.handleComputeProofFailed, processed, nil case CommitFailed: return m.handleCommitFailed, processed, nil + case CommitFinalizeFailed: + fallthrough case FinalizeFailed: return m.handleFinalizeFailed, processed, nil case PackingFailed: // DEPRECATED: remove this for the next reset @@ -322,6 +418,14 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta // Post-seal case Proving: return m.handleProvingSector, processed, nil + case Terminating: + return m.handleTerminating, processed, nil + case TerminateWait: + return m.handleTerminateWait, processed, nil + case TerminateFinality: + return m.handleTerminateFinality, processed, nil + case TerminateFailed: + return m.handleTerminateFailed, processed, nil case Removing: return m.handleRemoving, processed, nil case Removed: @@ -348,6 +452,38 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta return nil, processed, nil } +func (m *Sealing) onUpdateSector(ctx context.Context, state *SectorInfo) error { + if m.getConfig == nil { + return nil // tests + } + + cfg, err := m.getConfig() + if err != nil { + return xerrors.Errorf("getting config: %w", err) + } + + shouldUpdateInput := m.stats.updateSector(cfg, m.minerSectorID(state.SectorNumber), state.State) + + // trigger more input processing when we've dipped below max sealing limits + if shouldUpdateInput { + sp, err := m.currentSealProof(ctx) + if err != nil { + return xerrors.Errorf("getting seal proof type: %w", err) + } + + go func() { + m.inputLk.Lock() + defer m.inputLk.Unlock() + + if err := m.updateInput(ctx, sp); err != nil { + log.Errorf("%+v", err) + } + }() + } + + return nil +} + func planCommitting(events []statemachine.Event, state *SectorInfo) (uint64, error) { for i, event := range events { switch e := event.User.(type) { @@ -358,6 +494,9 @@ func planCommitting(events []statemachine.Event, state *SectorInfo) (uint64, err case SectorCommitted: // the normal case e.apply(state) state.State = SubmitCommit + case SectorProofReady: // early finalize + e.apply(state) + state.State = CommitFinalize case SectorSeedReady: // seed changed :/ if e.SeedEpoch == state.SeedEpoch && bytes.Equal(e.SeedValue, state.SeedValue) { log.Warnf("planCommitting: got SectorSeedReady, but the seed didn't change") @@ -384,53 +523,17 @@ func planCommitting(events []statemachine.Event, state *SectorInfo) (uint64, err } func (m *Sealing) restartSectors(ctx context.Context) error { + defer m.startupWait.Done() + trackedSectors, err := m.ListSectors() if err != nil { log.Errorf("loading sector list: %+v", err) } - cfg, err := m.getConfig() - if err != nil { - return xerrors.Errorf("getting the sealing delay: %w", err) - } - - m.unsealedInfoMap.lk.Lock() - defer m.unsealedInfoMap.lk.Unlock() for _, sector := range trackedSectors { if err := m.sectors.Send(uint64(sector.SectorNumber), SectorRestart{}); err != nil { log.Errorf("restarting sector %d: %+v", sector.SectorNumber, err) } - - if sector.State == WaitDeals { - - // put the sector in the unsealedInfoMap - if _, ok := m.unsealedInfoMap.infos[sector.SectorNumber]; ok { - // something's funky here, but probably safe to move on - log.Warnf("sector %v was already in the unsealedInfoMap when restarting", sector.SectorNumber) - } else { - ui := UnsealedSectorInfo{} - for _, p := range sector.Pieces { - if p.DealInfo != nil { - ui.numDeals++ - } - ui.stored += p.Piece.Size - ui.pieceSizes = append(ui.pieceSizes, p.Piece.Size.Unpadded()) - } - - m.unsealedInfoMap.infos[sector.SectorNumber] = ui - } - - // start a fresh timer for the sector - if cfg.WaitDealsDelay > 0 { - timer := time.NewTimer(cfg.WaitDealsDelay) - go func() { - <-timer.C - if err := m.StartPacking(sector.SectorNumber); err != nil { - log.Errorf("starting sector %d: %+v", sector.SectorNumber, err) - } - }() - } - } } // TODO: Grab on-chain sector set and diff with trackedSectors @@ -439,63 +542,90 @@ func (m *Sealing) restartSectors(ctx context.Context) error { } func (m *Sealing) ForceSectorState(ctx context.Context, id abi.SectorNumber, state SectorState) error { + m.startupWait.Wait() return m.sectors.Send(id, SectorForceState{state}) } func final(events []statemachine.Event, state *SectorInfo) (uint64, error) { + if len(events) > 0 { + if gm, ok := events[0].User.(globalMutator); ok { + gm.applyGlobal(state) + return 1, nil + } + } + return 0, xerrors.Errorf("didn't expect any events in state %s, got %+v", state.State, events) } -func on(mut mutator, next SectorState) func() (mutator, func(*SectorInfo) error) { - return func() (mutator, func(*SectorInfo) error) { - return mut, func(state *SectorInfo) error { +func on(mut mutator, next SectorState) func() (mutator, func(*SectorInfo) (bool, error)) { + return func() (mutator, func(*SectorInfo) (bool, error)) { + return mut, func(state *SectorInfo) (bool, error) { state.State = next - return nil + return false, nil } } } -func onReturning(mut mutator) func() (mutator, func(*SectorInfo) error) { - return func() (mutator, func(*SectorInfo) error) { - return mut, func(state *SectorInfo) error { +// like `on`, but doesn't change state +func apply(mut mutator) func() (mutator, func(*SectorInfo) (bool, error)) { + return func() (mutator, func(*SectorInfo) (bool, error)) { + return mut, func(state *SectorInfo) (bool, error) { + return true, nil + } + } +} + +func onReturning(mut mutator) func() (mutator, func(*SectorInfo) (bool, error)) { + return func() (mutator, func(*SectorInfo) (bool, error)) { + return mut, func(state *SectorInfo) (bool, error) { if state.Return == "" { - return xerrors.Errorf("return state not set") + return false, xerrors.Errorf("return state not set") } state.State = SectorState(state.Return) state.Return = "" - return nil + return false, nil } } } -func planOne(ts ...func() (mut mutator, next func(*SectorInfo) error)) func(events []statemachine.Event, state *SectorInfo) (uint64, error) { +func planOne(ts ...func() (mut mutator, next func(*SectorInfo) (more bool, err error))) func(events []statemachine.Event, state *SectorInfo) (uint64, error) { return func(events []statemachine.Event, state *SectorInfo) (uint64, error) { - if gm, ok := events[0].User.(globalMutator); ok { - gm.applyGlobal(state) - return 1, nil - } + eloop: + for i, event := range events { + if gm, ok := event.User.(globalMutator); ok { + gm.applyGlobal(state) + return uint64(i + 1), nil + } - for _, t := range ts { - mut, next := t() + for _, t := range ts { + mut, next := t() - if reflect.TypeOf(events[0].User) != reflect.TypeOf(mut) { - continue - } + if reflect.TypeOf(event.User) != reflect.TypeOf(mut) { + continue + } - if err, iserr := events[0].User.(error); iserr { - log.Warnf("sector %d got error event %T: %+v", state.SectorNumber, events[0].User, err) + if err, iserr := event.User.(error); iserr { + log.Warnf("sector %d got error event %T: %+v", state.SectorNumber, event.User, err) + } + + event.User.(mutator).apply(state) + more, err := next(state) + if err != nil || !more { + return uint64(i + 1), err + } + + continue eloop } - events[0].User.(mutator).apply(state) - return 1, next(state) - } + _, ok := event.User.(Ignorable) + if ok { + continue + } - _, ok := events[0].User.(Ignorable) - if ok { - return 1, nil + return uint64(i + 1), xerrors.Errorf("planner for state %s received unexpected event %T (%+v)", state.State, event.User, event) } - return 0, xerrors.Errorf("planner for state %s received unexpected event %T (%+v)", state.State, events[0].User, events[0]) + return uint64(len(events)), nil } } diff --git a/extern/storage-sealing/fsm_events.go b/extern/storage-sealing/fsm_events.go index 59f5e77e68c..3dab6d40356 100644 --- a/extern/storage-sealing/fsm_events.go +++ b/extern/storage-sealing/fsm_events.go @@ -1,13 +1,16 @@ package sealing import ( - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "time" + "github.com/ipfs/go-cid" "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/specs-storage/storage" + + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" ) type mutator interface { @@ -67,23 +70,34 @@ func (evt SectorStart) apply(state *SectorInfo) { type SectorStartCC struct { ID abi.SectorNumber SectorType abi.RegisteredSealProof - Pieces []Piece } func (evt SectorStartCC) apply(state *SectorInfo) { state.SectorNumber = evt.ID - state.Pieces = evt.Pieces state.SectorType = evt.SectorType } -type SectorAddPiece struct { - NewPiece Piece -} +type SectorAddPiece struct{} func (evt SectorAddPiece) apply(state *SectorInfo) { - state.Pieces = append(state.Pieces, evt.NewPiece) + if state.CreationTime == 0 { + state.CreationTime = time.Now().Unix() + } } +type SectorPieceAdded struct { + NewPieces []Piece +} + +func (evt SectorPieceAdded) apply(state *SectorInfo) { + state.Pieces = append(state.Pieces, evt.NewPieces...) +} + +type SectorAddPieceFailed struct{ error } + +func (evt SectorAddPieceFailed) FormatError(xerrors.Printer) (next error) { return evt.error } +func (evt SectorAddPieceFailed) apply(si *SectorInfo) {} + type SectorStartPacking struct{} func (evt SectorStartPacking) apply(*SectorInfo) {} @@ -136,6 +150,18 @@ func (evt SectorPreCommit2) apply(state *SectorInfo) { state.CommR = &commr } +type SectorPreCommitBatch struct{} + +func (evt SectorPreCommitBatch) apply(*SectorInfo) {} + +type SectorPreCommitBatchSent struct { + Message cid.Cid +} + +func (evt SectorPreCommitBatchSent) apply(state *SectorInfo) { + state.PreCommitMessage = &evt.Message +} + type SectorPreCommitLanded struct { TipSet TipSetToken } @@ -219,6 +245,19 @@ func (evt SectorCommitted) apply(state *SectorInfo) { state.Proof = evt.Proof } +// like SectorCommitted, but finalizes before sending the proof to the chain +type SectorProofReady struct { + Proof []byte +} + +func (evt SectorProofReady) apply(state *SectorInfo) { + state.Proof = evt.Proof +} + +type SectorSubmitCommitAggregate struct{} + +func (evt SectorSubmitCommitAggregate) apply(*SectorInfo) {} + type SectorCommitSubmitted struct { Message cid.Cid } @@ -227,6 +266,14 @@ func (evt SectorCommitSubmitted) apply(state *SectorInfo) { state.CommitMessage = &evt.Message } +type SectorCommitAggregateSent struct { + Message cid.Cid +} + +func (evt SectorCommitAggregateSent) apply(state *SectorInfo) { + state.CommitMessage = &evt.Message +} + type SectorProving struct{} func (evt SectorProving) apply(*SectorInfo) {} @@ -314,6 +361,32 @@ func (evt SectorFaultReported) apply(state *SectorInfo) { type SectorFaultedFinal struct{} +// Terminating + +type SectorTerminate struct{} + +func (evt SectorTerminate) applyGlobal(state *SectorInfo) bool { + state.State = Terminating + return true +} + +type SectorTerminating struct{ Message *cid.Cid } + +func (evt SectorTerminating) apply(state *SectorInfo) { + state.TerminateMessage = evt.Message +} + +type SectorTerminated struct{ TerminatedAt abi.ChainEpoch } + +func (evt SectorTerminated) apply(state *SectorInfo) { + state.TerminatedAt = evt.TerminatedAt +} + +type SectorTerminateFailed struct{ error } + +func (evt SectorTerminateFailed) FormatError(xerrors.Printer) (next error) { return evt.error } +func (evt SectorTerminateFailed) apply(*SectorInfo) {} + // External events type SectorRemove struct{} diff --git a/extern/storage-sealing/fsm_test.go b/extern/storage-sealing/fsm_test.go index 5b4541f7516..1d2df27846e 100644 --- a/extern/storage-sealing/fsm_test.go +++ b/extern/storage-sealing/fsm_test.go @@ -87,6 +87,112 @@ func TestHappyPath(t *testing.T) { } } +func TestHappyPathFinalizeEarly(t *testing.T) { + var notif []struct{ before, after SectorInfo } + ma, _ := address.NewIDAddress(55151) + m := test{ + s: &Sealing{ + maddr: ma, + stats: SectorStats{ + bySector: map[abi.SectorID]statSectorState{}, + }, + notifee: func(before, after SectorInfo) { + notif = append(notif, struct{ before, after SectorInfo }{before, after}) + }, + }, + t: t, + state: &SectorInfo{State: Packing}, + } + + m.planSingle(SectorPacked{}) + require.Equal(m.t, m.state.State, GetTicket) + + m.planSingle(SectorTicket{}) + require.Equal(m.t, m.state.State, PreCommit1) + + m.planSingle(SectorPreCommit1{}) + require.Equal(m.t, m.state.State, PreCommit2) + + m.planSingle(SectorPreCommit2{}) + require.Equal(m.t, m.state.State, PreCommitting) + + m.planSingle(SectorPreCommitted{}) + require.Equal(m.t, m.state.State, PreCommitWait) + + m.planSingle(SectorPreCommitLanded{}) + require.Equal(m.t, m.state.State, WaitSeed) + + m.planSingle(SectorSeedReady{}) + require.Equal(m.t, m.state.State, Committing) + + m.planSingle(SectorProofReady{}) + require.Equal(m.t, m.state.State, CommitFinalize) + + m.planSingle(SectorFinalized{}) + require.Equal(m.t, m.state.State, SubmitCommit) + + m.planSingle(SectorSubmitCommitAggregate{}) + require.Equal(m.t, m.state.State, SubmitCommitAggregate) + + m.planSingle(SectorCommitAggregateSent{}) + require.Equal(m.t, m.state.State, CommitWait) + + m.planSingle(SectorProving{}) + require.Equal(m.t, m.state.State, FinalizeSector) + + m.planSingle(SectorFinalized{}) + require.Equal(m.t, m.state.State, Proving) + + expected := []SectorState{Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, WaitSeed, Committing, CommitFinalize, SubmitCommit, SubmitCommitAggregate, CommitWait, FinalizeSector, Proving} + for i, n := range notif { + if n.before.State != expected[i] { + t.Fatalf("expected before state: %s, got: %s", expected[i], n.before.State) + } + if n.after.State != expected[i+1] { + t.Fatalf("expected after state: %s, got: %s", expected[i+1], n.after.State) + } + } +} + +func TestCommitFinalizeFailed(t *testing.T) { + var notif []struct{ before, after SectorInfo } + ma, _ := address.NewIDAddress(55151) + m := test{ + s: &Sealing{ + maddr: ma, + stats: SectorStats{ + bySector: map[abi.SectorID]statSectorState{}, + }, + notifee: func(before, after SectorInfo) { + notif = append(notif, struct{ before, after SectorInfo }{before, after}) + }, + }, + t: t, + state: &SectorInfo{State: Committing}, + } + + m.planSingle(SectorProofReady{}) + require.Equal(m.t, m.state.State, CommitFinalize) + + m.planSingle(SectorFinalizeFailed{}) + require.Equal(m.t, m.state.State, CommitFinalizeFailed) + + m.planSingle(SectorRetryFinalize{}) + require.Equal(m.t, m.state.State, CommitFinalize) + + m.planSingle(SectorFinalized{}) + require.Equal(m.t, m.state.State, SubmitCommit) + + expected := []SectorState{Committing, CommitFinalize, CommitFinalizeFailed, CommitFinalize, SubmitCommit} + for i, n := range notif { + if n.before.State != expected[i] { + t.Fatalf("expected before state: %s, got: %s", expected[i], n.before.State) + } + if n.after.State != expected[i+1] { + t.Fatalf("expected after state: %s, got: %s", expected[i+1], n.after.State) + } + } +} func TestSeedRevert(t *testing.T) { ma, _ := address.NewIDAddress(55151) m := test{ @@ -160,3 +266,93 @@ func TestPlanCommittingHandlesSectorCommitFailed(t *testing.T) { require.Equal(t, CommitFailed, m.state.State) } + +func TestPlannerList(t *testing.T) { + for state := range ExistSectorStateList { + _, ok := fsmPlanners[state] + require.True(t, ok, "state %s", state) + } + + for state := range fsmPlanners { + if state == UndefinedSectorState { + continue + } + _, ok := ExistSectorStateList[state] + require.True(t, ok, "state %s", state) + } +} + +func TestBrokenState(t *testing.T) { + var notif []struct{ before, after SectorInfo } + ma, _ := address.NewIDAddress(55151) + m := test{ + s: &Sealing{ + maddr: ma, + stats: SectorStats{ + bySector: map[abi.SectorID]statSectorState{}, + }, + notifee: func(before, after SectorInfo) { + notif = append(notif, struct{ before, after SectorInfo }{before, after}) + }, + }, + t: t, + state: &SectorInfo{State: "not a state"}, + } + + _, _, err := m.s.plan([]statemachine.Event{{User: SectorPacked{}}}, m.state) + require.Error(t, err) + require.Equal(m.t, m.state.State, SectorState("not a state")) + + m.planSingle(SectorRemove{}) + require.Equal(m.t, m.state.State, Removing) + + expected := []SectorState{"not a state", "not a state", Removing} + for i, n := range notif { + if n.before.State != expected[i] { + t.Fatalf("expected before state: %s, got: %s", expected[i], n.before.State) + } + if n.after.State != expected[i+1] { + t.Fatalf("expected after state: %s, got: %s", expected[i+1], n.after.State) + } + } +} + +func TestTicketExpired(t *testing.T) { + var notif []struct{ before, after SectorInfo } + ma, _ := address.NewIDAddress(55151) + m := test{ + s: &Sealing{ + maddr: ma, + stats: SectorStats{ + bySector: map[abi.SectorID]statSectorState{}, + }, + notifee: func(before, after SectorInfo) { + notif = append(notif, struct{ before, after SectorInfo }{before, after}) + }, + }, + t: t, + state: &SectorInfo{State: Packing}, + } + + m.planSingle(SectorPacked{}) + require.Equal(m.t, m.state.State, GetTicket) + + m.planSingle(SectorTicket{}) + require.Equal(m.t, m.state.State, PreCommit1) + + expired := checkTicketExpired(0, MaxTicketAge+1) + require.True(t, expired) + + m.planSingle(SectorOldTicket{}) + require.Equal(m.t, m.state.State, GetTicket) + + expected := []SectorState{Packing, GetTicket, PreCommit1, GetTicket} + for i, n := range notif { + if n.before.State != expected[i] { + t.Fatalf("expected before state: %s, got: %s", expected[i], n.before.State) + } + if n.after.State != expected[i+1] { + t.Fatalf("expected after state: %s, got: %s", expected[i+1], n.after.State) + } + } +} diff --git a/extern/storage-sealing/garbage.go b/extern/storage-sealing/garbage.go index caf371806dd..d429b5b438d 100644 --- a/extern/storage-sealing/garbage.go +++ b/extern/storage-sealing/garbage.go @@ -5,79 +5,39 @@ import ( "golang.org/x/xerrors" - "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-storage/storage" ) -func (m *Sealing) pledgeSector(ctx context.Context, sectorID abi.SectorID, existingPieceSizes []abi.UnpaddedPieceSize, sizes ...abi.UnpaddedPieceSize) ([]abi.PieceInfo, error) { - if len(sizes) == 0 { - return nil, nil - } - - log.Infof("Pledge %d, contains %+v", sectorID, existingPieceSizes) - - out := make([]abi.PieceInfo, len(sizes)) - for i, size := range sizes { - ppi, err := m.sealer.AddPiece(ctx, sectorID, existingPieceSizes, size, NewNullReader(size)) - if err != nil { - return nil, xerrors.Errorf("add piece: %w", err) - } - - existingPieceSizes = append(existingPieceSizes, size) +func (m *Sealing) PledgeSector(ctx context.Context) (storage.SectorRef, error) { + m.startupWait.Wait() - out[i] = ppi - } - - return out, nil -} + m.inputLk.Lock() + defer m.inputLk.Unlock() -func (m *Sealing) PledgeSector() error { cfg, err := m.getConfig() if err != nil { - return xerrors.Errorf("getting config: %w", err) + return storage.SectorRef{}, xerrors.Errorf("getting config: %w", err) } if cfg.MaxSealingSectors > 0 { - if m.stats.curSealing() > cfg.MaxSealingSectors { - return xerrors.Errorf("too many sectors sealing (curSealing: %d, max: %d)", m.stats.curSealing(), cfg.MaxSealingSectors) + if m.stats.curSealing() >= cfg.MaxSealingSectors { + return storage.SectorRef{}, xerrors.Errorf("too many sectors sealing (curSealing: %d, max: %d)", m.stats.curSealing(), cfg.MaxSealingSectors) } } - go func() { - ctx := context.TODO() // we can't use the context from command which invokes - // this, as we run everything here async, and it's cancelled when the - // command exits - - size := abi.PaddedPieceSize(m.sealer.SectorSize()).Unpadded() - - sid, err := m.sc.Next() - if err != nil { - log.Errorf("%+v", err) - return - } - err = m.sealer.NewSector(ctx, m.minerSector(sid)) - if err != nil { - log.Errorf("%+v", err) - return - } - - pieces, err := m.pledgeSector(ctx, m.minerSector(sid), []abi.UnpaddedPieceSize{}, size) - if err != nil { - log.Errorf("%+v", err) - return - } + spt, err := m.currentSealProof(ctx) + if err != nil { + return storage.SectorRef{}, xerrors.Errorf("getting seal proof type: %w", err) + } - ps := make([]Piece, len(pieces)) - for idx := range ps { - ps[idx] = Piece{ - Piece: pieces[idx], - DealInfo: nil, - } - } + sid, err := m.createSector(ctx, cfg, spt) + if err != nil { + return storage.SectorRef{}, err + } - if err := m.newSectorCC(sid, ps); err != nil { - log.Errorf("%+v", err) - return - } - }() - return nil + log.Infof("Creating CC sector %d", sid) + return m.minerSector(spt, sid), m.sectors.Send(uint64(sid), SectorStartCC{ + ID: sid, + SectorType: spt, + }) } diff --git a/extern/storage-sealing/gen/main.go b/extern/storage-sealing/gen/main.go index 97c2bacd5bd..825ce8d284b 100644 --- a/extern/storage-sealing/gen/main.go +++ b/extern/storage-sealing/gen/main.go @@ -12,8 +12,6 @@ import ( func main() { err := gen.WriteMapEncodersToFile("./cbor_gen.go", "sealing", sealing.Piece{}, - sealing.DealInfo{}, - sealing.DealSchedule{}, sealing.SectorInfo{}, sealing.Log{}, ) diff --git a/extern/storage-sealing/input.go b/extern/storage-sealing/input.go new file mode 100644 index 00000000000..1a0b7bf1e8b --- /dev/null +++ b/extern/storage-sealing/input.go @@ -0,0 +1,466 @@ +package sealing + +import ( + "context" + "sort" + "time" + + "golang.org/x/xerrors" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-padreader" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-statemachine" + "github.com/filecoin-project/specs-storage/storage" + + "github.com/filecoin-project/lotus/api" + sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" +) + +func (m *Sealing) handleWaitDeals(ctx statemachine.Context, sector SectorInfo) error { + var used abi.UnpaddedPieceSize + for _, piece := range sector.Pieces { + used += piece.Piece.Size.Unpadded() + } + + m.inputLk.Lock() + + if m.creating != nil && *m.creating == sector.SectorNumber { + m.creating = nil + } + + sid := m.minerSectorID(sector.SectorNumber) + + if len(m.assignedPieces[sid]) > 0 { + m.inputLk.Unlock() + // got assigned more pieces in the AddPiece state + return ctx.Send(SectorAddPiece{}) + } + + started, err := m.maybeStartSealing(ctx, sector, used) + if err != nil || started { + delete(m.openSectors, m.minerSectorID(sector.SectorNumber)) + + m.inputLk.Unlock() + + return err + } + + if _, has := m.openSectors[sid]; !has { + m.openSectors[sid] = &openSector{ + used: used, + maybeAccept: func(cid cid.Cid) error { + // todo check deal start deadline (configurable) + m.assignedPieces[sid] = append(m.assignedPieces[sid], cid) + + return ctx.Send(SectorAddPiece{}) + }, + } + } + + go func() { + defer m.inputLk.Unlock() + if err := m.updateInput(ctx.Context(), sector.SectorType); err != nil { + log.Errorf("%+v", err) + } + }() + + return nil +} + +func (m *Sealing) maybeStartSealing(ctx statemachine.Context, sector SectorInfo, used abi.UnpaddedPieceSize) (bool, error) { + now := time.Now() + st := m.sectorTimers[m.minerSectorID(sector.SectorNumber)] + if st != nil { + if !st.Stop() { // timer expired, SectorStartPacking was/is being sent + // we send another SectorStartPacking in case one was sent in the handleAddPiece state + log.Infow("starting to seal deal sector", "sector", sector.SectorNumber, "trigger", "wait-timeout") + return true, ctx.Send(SectorStartPacking{}) + } + } + + ssize, err := sector.SectorType.SectorSize() + if err != nil { + return false, xerrors.Errorf("getting sector size") + } + + maxDeals, err := getDealPerSectorLimit(ssize) + if err != nil { + return false, xerrors.Errorf("getting per-sector deal limit: %w", err) + } + + if len(sector.dealIDs()) >= maxDeals { + // can't accept more deals + log.Infow("starting to seal deal sector", "sector", sector.SectorNumber, "trigger", "maxdeals") + return true, ctx.Send(SectorStartPacking{}) + } + + if used.Padded() == abi.PaddedPieceSize(ssize) { + // sector full + log.Infow("starting to seal deal sector", "sector", sector.SectorNumber, "trigger", "filled") + return true, ctx.Send(SectorStartPacking{}) + } + + if sector.CreationTime != 0 { + cfg, err := m.getConfig() + if err != nil { + return false, xerrors.Errorf("getting storage config: %w", err) + } + + // todo check deal age, start sealing if any deal has less than X (configurable) to start deadline + sealTime := time.Unix(sector.CreationTime, 0).Add(cfg.WaitDealsDelay) + + if now.After(sealTime) { + log.Infow("starting to seal deal sector", "sector", sector.SectorNumber, "trigger", "wait-timeout") + return true, ctx.Send(SectorStartPacking{}) + } + + m.sectorTimers[m.minerSectorID(sector.SectorNumber)] = time.AfterFunc(sealTime.Sub(now), func() { + log.Infow("starting to seal deal sector", "sector", sector.SectorNumber, "trigger", "wait-timer") + + if err := ctx.Send(SectorStartPacking{}); err != nil { + log.Errorw("sending SectorStartPacking event failed", "sector", sector.SectorNumber, "error", err) + } + }) + } + + return false, nil +} + +func (m *Sealing) handleAddPiece(ctx statemachine.Context, sector SectorInfo) error { + ssize, err := sector.SectorType.SectorSize() + if err != nil { + return err + } + + res := SectorPieceAdded{} + + m.inputLk.Lock() + + pending, ok := m.assignedPieces[m.minerSectorID(sector.SectorNumber)] + if ok { + delete(m.assignedPieces, m.minerSectorID(sector.SectorNumber)) + } + m.inputLk.Unlock() + if !ok { + // nothing to do here (might happen after a restart in AddPiece) + return ctx.Send(res) + } + + var offset abi.UnpaddedPieceSize + pieceSizes := make([]abi.UnpaddedPieceSize, len(sector.Pieces)) + for i, p := range sector.Pieces { + pieceSizes[i] = p.Piece.Size.Unpadded() + offset += p.Piece.Size.Unpadded() + } + + maxDeals, err := getDealPerSectorLimit(ssize) + if err != nil { + return xerrors.Errorf("getting per-sector deal limit: %w", err) + } + + for i, piece := range pending { + m.inputLk.Lock() + deal, ok := m.pendingPieces[piece] + m.inputLk.Unlock() + if !ok { + return xerrors.Errorf("piece %s assigned to sector %d not found", piece, sector.SectorNumber) + } + + if len(sector.dealIDs())+(i+1) > maxDeals { + // todo: this is rather unlikely to happen, but in case it does, return the deal to waiting queue instead of failing it + deal.accepted(sector.SectorNumber, offset, xerrors.Errorf("too many deals assigned to sector %d, dropping deal", sector.SectorNumber)) + continue + } + + pads, padLength := ffiwrapper.GetRequiredPadding(offset.Padded(), deal.size.Padded()) + + if offset.Padded()+padLength+deal.size.Padded() > abi.PaddedPieceSize(ssize) { + // todo: this is rather unlikely to happen, but in case it does, return the deal to waiting queue instead of failing it + deal.accepted(sector.SectorNumber, offset, xerrors.Errorf("piece %s assigned to sector %d with not enough space", piece, sector.SectorNumber)) + continue + } + + offset += padLength.Unpadded() + + for _, p := range pads { + ppi, err := m.sealer.AddPiece(sectorstorage.WithPriority(ctx.Context(), DealSectorPriority), + m.minerSector(sector.SectorType, sector.SectorNumber), + pieceSizes, + p.Unpadded(), + NewNullReader(p.Unpadded())) + if err != nil { + err = xerrors.Errorf("writing padding piece: %w", err) + deal.accepted(sector.SectorNumber, offset, err) + return ctx.Send(SectorAddPieceFailed{err}) + } + + pieceSizes = append(pieceSizes, p.Unpadded()) + res.NewPieces = append(res.NewPieces, Piece{ + Piece: ppi, + }) + } + + ppi, err := m.sealer.AddPiece(sectorstorage.WithPriority(ctx.Context(), DealSectorPriority), + m.minerSector(sector.SectorType, sector.SectorNumber), + pieceSizes, + deal.size, + deal.data) + if err != nil { + err = xerrors.Errorf("writing piece: %w", err) + deal.accepted(sector.SectorNumber, offset, err) + return ctx.Send(SectorAddPieceFailed{err}) + } + + log.Infow("deal added to a sector", "deal", deal.deal.DealID, "sector", sector.SectorNumber, "piece", ppi.PieceCID) + + deal.accepted(sector.SectorNumber, offset, nil) + + offset += deal.size + pieceSizes = append(pieceSizes, deal.size) + + res.NewPieces = append(res.NewPieces, Piece{ + Piece: ppi, + DealInfo: &deal.deal, + }) + } + + return ctx.Send(res) +} + +func (m *Sealing) handleAddPieceFailed(ctx statemachine.Context, sector SectorInfo) error { + log.Errorf("No recovery plan for AddPiece failing") + // todo: cleanup sector / just go retry (requires adding offset param to AddPiece in sector-storage for this to be safe) + return nil +} + +func (m *Sealing) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, data storage.Data, deal api.PieceDealInfo) (api.SectorOffset, error) { + log.Infof("Adding piece for deal %d (publish msg: %s)", deal.DealID, deal.PublishCid) + if (padreader.PaddedSize(uint64(size))) != size { + return api.SectorOffset{}, xerrors.Errorf("cannot allocate unpadded piece") + } + + sp, err := m.currentSealProof(ctx) + if err != nil { + return api.SectorOffset{}, xerrors.Errorf("getting current seal proof type: %w", err) + } + + ssize, err := sp.SectorSize() + if err != nil { + return api.SectorOffset{}, err + } + + if size > abi.PaddedPieceSize(ssize).Unpadded() { + return api.SectorOffset{}, xerrors.Errorf("piece cannot fit into a sector") + } + + if _, err := deal.DealProposal.Cid(); err != nil { + return api.SectorOffset{}, xerrors.Errorf("getting proposal CID: %w", err) + } + + m.inputLk.Lock() + if _, exist := m.pendingPieces[proposalCID(deal)]; exist { + m.inputLk.Unlock() + return api.SectorOffset{}, xerrors.Errorf("piece for deal %s already pending", proposalCID(deal)) + } + + resCh := make(chan struct { + sn abi.SectorNumber + offset abi.UnpaddedPieceSize + err error + }, 1) + + m.pendingPieces[proposalCID(deal)] = &pendingPiece{ + size: size, + deal: deal, + data: data, + assigned: false, + accepted: func(sn abi.SectorNumber, offset abi.UnpaddedPieceSize, err error) { + resCh <- struct { + sn abi.SectorNumber + offset abi.UnpaddedPieceSize + err error + }{sn: sn, offset: offset, err: err} + }, + } + + go func() { + defer m.inputLk.Unlock() + if err := m.updateInput(ctx, sp); err != nil { + log.Errorf("%+v", err) + } + }() + + res := <-resCh + + return api.SectorOffset{Sector: res.sn, Offset: res.offset.Padded()}, res.err +} + +// called with m.inputLk +func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) error { + ssize, err := sp.SectorSize() + if err != nil { + return err + } + + type match struct { + sector abi.SectorID + deal cid.Cid + + size abi.UnpaddedPieceSize + padding abi.UnpaddedPieceSize + } + + var matches []match + toAssign := map[cid.Cid]struct{}{} // used to maybe create new sectors + + // todo: this is distinctly O(n^2), may need to be optimized for tiny deals and large scale miners + // (unlikely to be a problem now) + for proposalCid, piece := range m.pendingPieces { + if piece.assigned { + continue // already assigned to a sector, skip + } + + toAssign[proposalCid] = struct{}{} + + for id, sector := range m.openSectors { + avail := abi.PaddedPieceSize(ssize).Unpadded() - sector.used + + if piece.size <= avail { // (note: if we have enough space for the piece, we also have enough space for inter-piece padding) + matches = append(matches, match{ + sector: id, + deal: proposalCid, + + size: piece.size, + padding: avail % piece.size, + }) + } + } + } + sort.Slice(matches, func(i, j int) bool { + if matches[i].padding != matches[j].padding { // less padding is better + return matches[i].padding < matches[j].padding + } + + if matches[i].size != matches[j].size { // larger pieces are better + return matches[i].size < matches[j].size + } + + return matches[i].sector.Number < matches[j].sector.Number // prefer older sectors + }) + + var assigned int + for _, mt := range matches { + if m.pendingPieces[mt.deal].assigned { + assigned++ + continue + } + + if _, found := m.openSectors[mt.sector]; !found { + continue + } + + avail := abi.PaddedPieceSize(ssize).Unpadded() - m.openSectors[mt.sector].used + + if mt.size > avail { + continue + } + + err := m.openSectors[mt.sector].maybeAccept(mt.deal) + if err != nil { + m.pendingPieces[mt.deal].accepted(mt.sector.Number, 0, err) // non-error case in handleAddPiece + } + + m.openSectors[mt.sector].used += mt.padding + mt.size + + m.pendingPieces[mt.deal].assigned = true + delete(toAssign, mt.deal) + + if err != nil { + log.Errorf("sector %d rejected deal %s: %+v", mt.sector, mt.deal, err) + continue + } + } + + if len(toAssign) > 0 { + if err := m.tryCreateDealSector(ctx, sp); err != nil { + log.Errorw("Failed to create a new sector for deals", "error", err) + } + } + + return nil +} + +func (m *Sealing) tryCreateDealSector(ctx context.Context, sp abi.RegisteredSealProof) error { + m.startupWait.Wait() + + if m.creating != nil { + return nil // new sector is being created right now + } + + cfg, err := m.getConfig() + if err != nil { + return xerrors.Errorf("getting storage config: %w", err) + } + + if cfg.MaxSealingSectorsForDeals > 0 && m.stats.curSealing() >= cfg.MaxSealingSectorsForDeals { + return nil + } + + if cfg.MaxWaitDealsSectors > 0 && m.stats.curStaging() >= cfg.MaxWaitDealsSectors { + return nil + } + + sid, err := m.createSector(ctx, cfg, sp) + if err != nil { + return err + } + + m.creating = &sid + + log.Infow("Creating sector", "number", sid, "type", "deal", "proofType", sp) + return m.sectors.Send(uint64(sid), SectorStart{ + ID: sid, + SectorType: sp, + }) +} + +// call with m.inputLk +func (m *Sealing) createSector(ctx context.Context, cfg sealiface.Config, sp abi.RegisteredSealProof) (abi.SectorNumber, error) { + // Now actually create a new sector + + sid, err := m.sc.Next() + if err != nil { + return 0, xerrors.Errorf("getting sector number: %w", err) + } + + err = m.sealer.NewSector(ctx, m.minerSector(sp, sid)) + if err != nil { + return 0, xerrors.Errorf("initializing sector: %w", err) + } + + // update stats early, fsm planner would do that async + m.stats.updateSector(cfg, m.minerSectorID(sid), UndefinedSectorState) + + return sid, nil +} + +func (m *Sealing) StartPacking(sid abi.SectorNumber) error { + m.startupWait.Wait() + + log.Infow("starting to seal deal sector", "sector", sid, "trigger", "user") + return m.sectors.Send(uint64(sid), SectorStartPacking{}) +} + +func proposalCID(deal api.PieceDealInfo) cid.Cid { + pc, err := deal.DealProposal.Cid() + if err != nil { + log.Errorf("DealProposal.Cid error: %+v", err) + return cid.Undef + } + + return pc +} diff --git a/extern/storage-sealing/mocks/mock_commit_batcher.go b/extern/storage-sealing/mocks/mock_commit_batcher.go new file mode 100644 index 00000000000..061121899c8 --- /dev/null +++ b/extern/storage-sealing/mocks/mock_commit_batcher.go @@ -0,0 +1,164 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/filecoin-project/lotus/extern/storage-sealing (interfaces: CommitBatcherApi) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + address "github.com/filecoin-project/go-address" + abi "github.com/filecoin-project/go-state-types/abi" + big "github.com/filecoin-project/go-state-types/big" + network "github.com/filecoin-project/go-state-types/network" + miner "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + gomock "github.com/golang/mock/gomock" + cid "github.com/ipfs/go-cid" +) + +// MockCommitBatcherApi is a mock of CommitBatcherApi interface. +type MockCommitBatcherApi struct { + ctrl *gomock.Controller + recorder *MockCommitBatcherApiMockRecorder +} + +// MockCommitBatcherApiMockRecorder is the mock recorder for MockCommitBatcherApi. +type MockCommitBatcherApiMockRecorder struct { + mock *MockCommitBatcherApi +} + +// NewMockCommitBatcherApi creates a new mock instance. +func NewMockCommitBatcherApi(ctrl *gomock.Controller) *MockCommitBatcherApi { + mock := &MockCommitBatcherApi{ctrl: ctrl} + mock.recorder = &MockCommitBatcherApiMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockCommitBatcherApi) EXPECT() *MockCommitBatcherApiMockRecorder { + return m.recorder +} + +// ChainBaseFee mocks base method. +func (m *MockCommitBatcherApi) ChainBaseFee(arg0 context.Context, arg1 sealing.TipSetToken) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainBaseFee", arg0, arg1) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainBaseFee indicates an expected call of ChainBaseFee. +func (mr *MockCommitBatcherApiMockRecorder) ChainBaseFee(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainBaseFee", reflect.TypeOf((*MockCommitBatcherApi)(nil).ChainBaseFee), arg0, arg1) +} + +// ChainHead mocks base method. +func (m *MockCommitBatcherApi) ChainHead(arg0 context.Context) (sealing.TipSetToken, abi.ChainEpoch, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainHead", arg0) + ret0, _ := ret[0].(sealing.TipSetToken) + ret1, _ := ret[1].(abi.ChainEpoch) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// ChainHead indicates an expected call of ChainHead. +func (mr *MockCommitBatcherApiMockRecorder) ChainHead(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHead", reflect.TypeOf((*MockCommitBatcherApi)(nil).ChainHead), arg0) +} + +// SendMsg mocks base method. +func (m *MockCommitBatcherApi) SendMsg(arg0 context.Context, arg1, arg2 address.Address, arg3 abi.MethodNum, arg4, arg5 big.Int, arg6 []byte) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendMsg", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SendMsg indicates an expected call of SendMsg. +func (mr *MockCommitBatcherApiMockRecorder) SendMsg(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockCommitBatcherApi)(nil).SendMsg), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +// StateMinerAvailableBalance mocks base method. +func (m *MockCommitBatcherApi) StateMinerAvailableBalance(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerAvailableBalance", arg0, arg1, arg2) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerAvailableBalance indicates an expected call of StateMinerAvailableBalance. +func (mr *MockCommitBatcherApiMockRecorder) StateMinerAvailableBalance(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerAvailableBalance", reflect.TypeOf((*MockCommitBatcherApi)(nil).StateMinerAvailableBalance), arg0, arg1, arg2) +} + +// StateMinerInfo mocks base method. +func (m *MockCommitBatcherApi) StateMinerInfo(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) (miner.MinerInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerInfo", arg0, arg1, arg2) + ret0, _ := ret[0].(miner.MinerInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerInfo indicates an expected call of StateMinerInfo. +func (mr *MockCommitBatcherApiMockRecorder) StateMinerInfo(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerInfo", reflect.TypeOf((*MockCommitBatcherApi)(nil).StateMinerInfo), arg0, arg1, arg2) +} + +// StateMinerInitialPledgeCollateral mocks base method. +func (m *MockCommitBatcherApi) StateMinerInitialPledgeCollateral(arg0 context.Context, arg1 address.Address, arg2 miner0.SectorPreCommitInfo, arg3 sealing.TipSetToken) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerInitialPledgeCollateral", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerInitialPledgeCollateral indicates an expected call of StateMinerInitialPledgeCollateral. +func (mr *MockCommitBatcherApiMockRecorder) StateMinerInitialPledgeCollateral(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerInitialPledgeCollateral", reflect.TypeOf((*MockCommitBatcherApi)(nil).StateMinerInitialPledgeCollateral), arg0, arg1, arg2, arg3) +} + +// StateNetworkVersion mocks base method. +func (m *MockCommitBatcherApi) StateNetworkVersion(arg0 context.Context, arg1 sealing.TipSetToken) (network.Version, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateNetworkVersion", arg0, arg1) + ret0, _ := ret[0].(network.Version) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateNetworkVersion indicates an expected call of StateNetworkVersion. +func (mr *MockCommitBatcherApiMockRecorder) StateNetworkVersion(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateNetworkVersion", reflect.TypeOf((*MockCommitBatcherApi)(nil).StateNetworkVersion), arg0, arg1) +} + +// StateSectorPreCommitInfo mocks base method. +func (m *MockCommitBatcherApi) StateSectorPreCommitInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 sealing.TipSetToken) (*miner.SectorPreCommitOnChainInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSectorPreCommitInfo", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*miner.SectorPreCommitOnChainInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSectorPreCommitInfo indicates an expected call of StateSectorPreCommitInfo. +func (mr *MockCommitBatcherApiMockRecorder) StateSectorPreCommitInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorPreCommitInfo", reflect.TypeOf((*MockCommitBatcherApi)(nil).StateSectorPreCommitInfo), arg0, arg1, arg2, arg3) +} diff --git a/extern/storage-sealing/mocks/mock_precommit_batcher.go b/extern/storage-sealing/mocks/mock_precommit_batcher.go new file mode 100644 index 00000000000..ed97229b405 --- /dev/null +++ b/extern/storage-sealing/mocks/mock_precommit_batcher.go @@ -0,0 +1,102 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/filecoin-project/lotus/extern/storage-sealing (interfaces: PreCommitBatcherApi) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + address "github.com/filecoin-project/go-address" + abi "github.com/filecoin-project/go-state-types/abi" + big "github.com/filecoin-project/go-state-types/big" + miner "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + gomock "github.com/golang/mock/gomock" + cid "github.com/ipfs/go-cid" +) + +// MockPreCommitBatcherApi is a mock of PreCommitBatcherApi interface. +type MockPreCommitBatcherApi struct { + ctrl *gomock.Controller + recorder *MockPreCommitBatcherApiMockRecorder +} + +// MockPreCommitBatcherApiMockRecorder is the mock recorder for MockPreCommitBatcherApi. +type MockPreCommitBatcherApiMockRecorder struct { + mock *MockPreCommitBatcherApi +} + +// NewMockPreCommitBatcherApi creates a new mock instance. +func NewMockPreCommitBatcherApi(ctrl *gomock.Controller) *MockPreCommitBatcherApi { + mock := &MockPreCommitBatcherApi{ctrl: ctrl} + mock.recorder = &MockPreCommitBatcherApiMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockPreCommitBatcherApi) EXPECT() *MockPreCommitBatcherApiMockRecorder { + return m.recorder +} + +// ChainHead mocks base method. +func (m *MockPreCommitBatcherApi) ChainHead(arg0 context.Context) (sealing.TipSetToken, abi.ChainEpoch, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainHead", arg0) + ret0, _ := ret[0].(sealing.TipSetToken) + ret1, _ := ret[1].(abi.ChainEpoch) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// ChainHead indicates an expected call of ChainHead. +func (mr *MockPreCommitBatcherApiMockRecorder) ChainHead(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHead", reflect.TypeOf((*MockPreCommitBatcherApi)(nil).ChainHead), arg0) +} + +// SendMsg mocks base method. +func (m *MockPreCommitBatcherApi) SendMsg(arg0 context.Context, arg1, arg2 address.Address, arg3 abi.MethodNum, arg4, arg5 big.Int, arg6 []byte) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendMsg", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SendMsg indicates an expected call of SendMsg. +func (mr *MockPreCommitBatcherApiMockRecorder) SendMsg(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockPreCommitBatcherApi)(nil).SendMsg), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +// StateMinerAvailableBalance mocks base method. +func (m *MockPreCommitBatcherApi) StateMinerAvailableBalance(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerAvailableBalance", arg0, arg1, arg2) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerAvailableBalance indicates an expected call of StateMinerAvailableBalance. +func (mr *MockPreCommitBatcherApiMockRecorder) StateMinerAvailableBalance(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerAvailableBalance", reflect.TypeOf((*MockPreCommitBatcherApi)(nil).StateMinerAvailableBalance), arg0, arg1, arg2) +} + +// StateMinerInfo mocks base method. +func (m *MockPreCommitBatcherApi) StateMinerInfo(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) (miner.MinerInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerInfo", arg0, arg1, arg2) + ret0, _ := ret[0].(miner.MinerInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerInfo indicates an expected call of StateMinerInfo. +func (mr *MockPreCommitBatcherApiMockRecorder) StateMinerInfo(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerInfo", reflect.TypeOf((*MockPreCommitBatcherApi)(nil).StateMinerInfo), arg0, arg1, arg2) +} diff --git a/extern/storage-sealing/precommit_batch.go b/extern/storage-sealing/precommit_batch.go new file mode 100644 index 00000000000..719455b909f --- /dev/null +++ b/extern/storage-sealing/precommit_batch.go @@ -0,0 +1,371 @@ +package sealing + +import ( + "bytes" + "context" + "sort" + "sync" + "time" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" + "github.com/filecoin-project/lotus/node/config" +) + +//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_precommit_batcher.go -package=mocks . PreCommitBatcherApi + +type PreCommitBatcherApi interface { + SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error) + StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error) + StateMinerAvailableBalance(context.Context, address.Address, TipSetToken) (big.Int, error) + ChainHead(ctx context.Context) (TipSetToken, abi.ChainEpoch, error) +} + +type preCommitEntry struct { + deposit abi.TokenAmount + pci *miner0.SectorPreCommitInfo +} + +type PreCommitBatcher struct { + api PreCommitBatcherApi + maddr address.Address + mctx context.Context + addrSel AddrSel + feeCfg config.MinerFeeConfig + getConfig GetSealingConfigFunc + + cutoffs map[abi.SectorNumber]time.Time + todo map[abi.SectorNumber]*preCommitEntry + waiting map[abi.SectorNumber][]chan sealiface.PreCommitBatchRes + + notify, stop, stopped chan struct{} + force chan chan []sealiface.PreCommitBatchRes + lk sync.Mutex +} + +func NewPreCommitBatcher(mctx context.Context, maddr address.Address, api PreCommitBatcherApi, addrSel AddrSel, feeCfg config.MinerFeeConfig, getConfig GetSealingConfigFunc) *PreCommitBatcher { + b := &PreCommitBatcher{ + api: api, + maddr: maddr, + mctx: mctx, + addrSel: addrSel, + feeCfg: feeCfg, + getConfig: getConfig, + + cutoffs: map[abi.SectorNumber]time.Time{}, + todo: map[abi.SectorNumber]*preCommitEntry{}, + waiting: map[abi.SectorNumber][]chan sealiface.PreCommitBatchRes{}, + + notify: make(chan struct{}, 1), + force: make(chan chan []sealiface.PreCommitBatchRes), + stop: make(chan struct{}), + stopped: make(chan struct{}), + } + + go b.run() + + return b +} + +func (b *PreCommitBatcher) run() { + var forceRes chan []sealiface.PreCommitBatchRes + var lastRes []sealiface.PreCommitBatchRes + + cfg, err := b.getConfig() + if err != nil { + panic(err) + } + + timer := time.NewTimer(b.batchWait(cfg.PreCommitBatchWait, cfg.PreCommitBatchSlack)) + for { + if forceRes != nil { + forceRes <- lastRes + forceRes = nil + } + lastRes = nil + + var sendAboveMax bool + select { + case <-b.stop: + close(b.stopped) + return + case <-b.notify: + sendAboveMax = true + case <-timer.C: + // do nothing + case fr := <-b.force: // user triggered + forceRes = fr + } + + var err error + lastRes, err = b.maybeStartBatch(sendAboveMax) + if err != nil { + log.Warnw("PreCommitBatcher processBatch error", "error", err) + } + + if !timer.Stop() { + select { + case <-timer.C: + default: + } + } + + timer.Reset(b.batchWait(cfg.PreCommitBatchWait, cfg.PreCommitBatchSlack)) + } +} + +func (b *PreCommitBatcher) batchWait(maxWait, slack time.Duration) time.Duration { + now := time.Now() + + b.lk.Lock() + defer b.lk.Unlock() + + if len(b.todo) == 0 { + return maxWait + } + + var cutoff time.Time + for sn := range b.todo { + sectorCutoff := b.cutoffs[sn] + if cutoff.IsZero() || (!sectorCutoff.IsZero() && sectorCutoff.Before(cutoff)) { + cutoff = sectorCutoff + } + } + for sn := range b.waiting { + sectorCutoff := b.cutoffs[sn] + if cutoff.IsZero() || (!sectorCutoff.IsZero() && sectorCutoff.Before(cutoff)) { + cutoff = sectorCutoff + } + } + + if cutoff.IsZero() { + return maxWait + } + + cutoff = cutoff.Add(-slack) + if cutoff.Before(now) { + return time.Nanosecond // can't return 0 + } + + wait := cutoff.Sub(now) + if wait > maxWait { + wait = maxWait + } + + return wait +} + +func (b *PreCommitBatcher) maybeStartBatch(notif bool) ([]sealiface.PreCommitBatchRes, error) { + b.lk.Lock() + defer b.lk.Unlock() + + total := len(b.todo) + if total == 0 { + return nil, nil // nothing to do + } + + cfg, err := b.getConfig() + if err != nil { + return nil, xerrors.Errorf("getting config: %w", err) + } + + if notif && total < cfg.MaxPreCommitBatch { + return nil, nil + } + + // todo support multiple batches + res, err := b.processBatch(cfg) + if err != nil && len(res) == 0 { + return nil, err + } + + for _, r := range res { + if err != nil { + r.Error = err.Error() + } + + for _, sn := range r.Sectors { + for _, ch := range b.waiting[sn] { + ch <- r // buffered + } + + delete(b.waiting, sn) + delete(b.todo, sn) + delete(b.cutoffs, sn) + } + } + + return res, nil +} + +func (b *PreCommitBatcher) processBatch(cfg sealiface.Config) ([]sealiface.PreCommitBatchRes, error) { + params := miner5.PreCommitSectorBatchParams{} + deposit := big.Zero() + var res sealiface.PreCommitBatchRes + + for _, p := range b.todo { + if len(params.Sectors) >= cfg.MaxPreCommitBatch { + log.Infow("precommit batch full") + break + } + + res.Sectors = append(res.Sectors, p.pci.SectorNumber) + params.Sectors = append(params.Sectors, *p.pci) + deposit = big.Add(deposit, p.deposit) + } + + deposit, err := collateralSendAmount(b.mctx, b.api, b.maddr, cfg, deposit) + if err != nil { + return []sealiface.PreCommitBatchRes{res}, err + } + + enc := new(bytes.Buffer) + if err := params.MarshalCBOR(enc); err != nil { + return []sealiface.PreCommitBatchRes{res}, xerrors.Errorf("couldn't serialize PreCommitSectorBatchParams: %w", err) + } + + mi, err := b.api.StateMinerInfo(b.mctx, b.maddr, nil) + if err != nil { + return []sealiface.PreCommitBatchRes{res}, xerrors.Errorf("couldn't get miner info: %w", err) + } + + maxFee := b.feeCfg.MaxPreCommitBatchGasFee.FeeForSectors(len(params.Sectors)) + goodFunds := big.Add(deposit, maxFee) + + from, _, err := b.addrSel(b.mctx, mi, api.PreCommitAddr, goodFunds, deposit) + if err != nil { + return []sealiface.PreCommitBatchRes{res}, xerrors.Errorf("no good address found: %w", err) + } + + mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.PreCommitSectorBatch, deposit, maxFee, enc.Bytes()) + if err != nil { + return []sealiface.PreCommitBatchRes{res}, xerrors.Errorf("sending message failed: %w", err) + } + + res.Msg = &mcid + + log.Infow("Sent PreCommitSectorBatch message", "cid", mcid, "from", from, "sectors", len(b.todo)) + + return []sealiface.PreCommitBatchRes{res}, nil +} + +// register PreCommit, wait for batch message, return message CID +func (b *PreCommitBatcher) AddPreCommit(ctx context.Context, s SectorInfo, deposit abi.TokenAmount, in *miner0.SectorPreCommitInfo) (res sealiface.PreCommitBatchRes, err error) { + _, curEpoch, err := b.api.ChainHead(b.mctx) + if err != nil { + log.Errorf("getting chain head: %s", err) + return sealiface.PreCommitBatchRes{}, err + } + + sn := s.SectorNumber + + b.lk.Lock() + b.cutoffs[sn] = getPreCommitCutoff(curEpoch, s) + b.todo[sn] = &preCommitEntry{ + deposit: deposit, + pci: in, + } + + sent := make(chan sealiface.PreCommitBatchRes, 1) + b.waiting[sn] = append(b.waiting[sn], sent) + + select { + case b.notify <- struct{}{}: + default: // already have a pending notification, don't need more + } + b.lk.Unlock() + + select { + case c := <-sent: + return c, nil + case <-ctx.Done(): + return sealiface.PreCommitBatchRes{}, ctx.Err() + } +} + +func (b *PreCommitBatcher) Flush(ctx context.Context) ([]sealiface.PreCommitBatchRes, error) { + resCh := make(chan []sealiface.PreCommitBatchRes, 1) + select { + case b.force <- resCh: + select { + case res := <-resCh: + return res, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +func (b *PreCommitBatcher) Pending(ctx context.Context) ([]abi.SectorID, error) { + b.lk.Lock() + defer b.lk.Unlock() + + mid, err := address.IDFromAddress(b.maddr) + if err != nil { + return nil, err + } + + res := make([]abi.SectorID, 0) + for _, s := range b.todo { + res = append(res, abi.SectorID{ + Miner: abi.ActorID(mid), + Number: s.pci.SectorNumber, + }) + } + + sort.Slice(res, func(i, j int) bool { + if res[i].Miner != res[j].Miner { + return res[i].Miner < res[j].Miner + } + + return res[i].Number < res[j].Number + }) + + return res, nil +} + +func (b *PreCommitBatcher) Stop(ctx context.Context) error { + close(b.stop) + + select { + case <-b.stopped: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// TODO: If this returned epochs, it would make testing much easier +func getPreCommitCutoff(curEpoch abi.ChainEpoch, si SectorInfo) time.Time { + cutoffEpoch := si.TicketEpoch + policy.MaxPreCommitRandomnessLookback + for _, p := range si.Pieces { + if p.DealInfo == nil { + continue + } + + startEpoch := p.DealInfo.DealSchedule.StartEpoch + if startEpoch < cutoffEpoch { + cutoffEpoch = startEpoch + } + } + + if cutoffEpoch <= curEpoch { + return time.Now() + } + + return time.Now().Add(time.Duration(cutoffEpoch-curEpoch) * time.Duration(build.BlockDelaySecs) * time.Second) +} diff --git a/extern/storage-sealing/precommit_batch_test.go b/extern/storage-sealing/precommit_batch_test.go new file mode 100644 index 00000000000..b6c35362e02 --- /dev/null +++ b/extern/storage-sealing/precommit_batch_test.go @@ -0,0 +1,257 @@ +package sealing_test + +import ( + "bytes" + "context" + "sort" + "sync" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/lotus/extern/storage-sealing/mocks" + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" + "github.com/filecoin-project/lotus/node/config" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" +) + +var fc = config.MinerFeeConfig{ + MaxPreCommitGasFee: types.FIL(types.FromFil(1)), + MaxCommitGasFee: types.FIL(types.FromFil(1)), + MaxTerminateGasFee: types.FIL(types.FromFil(1)), + MaxPreCommitBatchGasFee: config.BatchFeeConfig{Base: types.FIL(types.FromFil(3)), PerSector: types.FIL(types.FromFil(1))}, + MaxCommitBatchGasFee: config.BatchFeeConfig{Base: types.FIL(types.FromFil(3)), PerSector: types.FIL(types.FromFil(1))}, +} + +func TestPrecommitBatcher(t *testing.T) { + t0123, err := address.NewFromString("t0123") + require.NoError(t, err) + + ctx := context.Background() + + as := func(ctx context.Context, mi miner.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) { + return t0123, big.Zero(), nil + } + + maxBatch := miner5.PreCommitSectorBatchMaxSize + + cfg := func() (sealiface.Config, error) { + return sealiface.Config{ + MaxWaitDealsSectors: 2, + MaxSealingSectors: 0, + MaxSealingSectorsForDeals: 0, + WaitDealsDelay: time.Hour * 6, + AlwaysKeepUnsealedCopy: true, + + BatchPreCommits: true, + MaxPreCommitBatch: maxBatch, + PreCommitBatchWait: 24 * time.Hour, + PreCommitBatchSlack: 3 * time.Hour, + + AggregateCommits: true, + MinCommitBatch: miner5.MinAggregatedSectors, + MaxCommitBatch: miner5.MaxAggregatedSectors, + CommitBatchWait: 24 * time.Hour, + CommitBatchSlack: 1 * time.Hour, + + TerminateBatchMin: 1, + TerminateBatchMax: 100, + TerminateBatchWait: 5 * time.Minute, + }, nil + } + + type promise func(t *testing.T) + type action func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise + + actions := func(as ...action) action { + return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise { + var ps []promise + for _, a := range as { + p := a(t, s, pcb) + if p != nil { + ps = append(ps, p) + } + } + + if len(ps) > 0 { + return func(t *testing.T) { + for _, p := range ps { + p(t) + } + } + } + return nil + } + } + + addSector := func(sn abi.SectorNumber) action { + return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise { + var pcres sealiface.PreCommitBatchRes + var pcerr error + done := sync.Mutex{} + done.Lock() + + si := sealing.SectorInfo{ + SectorNumber: sn, + } + + s.EXPECT().ChainHead(gomock.Any()).Return(nil, abi.ChainEpoch(1), nil) + + go func() { + defer done.Unlock() + pcres, pcerr = pcb.AddPreCommit(ctx, si, big.Zero(), &miner0.SectorPreCommitInfo{ + SectorNumber: si.SectorNumber, + SealedCID: fakePieceCid(t), + DealIDs: nil, + Expiration: 0, + }) + }() + + return func(t *testing.T) { + done.Lock() + require.NoError(t, pcerr) + require.Empty(t, pcres.Error) + require.Contains(t, pcres.Sectors, si.SectorNumber) + } + } + } + + addSectors := func(sectors []abi.SectorNumber) action { + as := make([]action, len(sectors)) + for i, sector := range sectors { + as[i] = addSector(sector) + } + return actions(as...) + } + + waitPending := func(n int) action { + return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise { + require.Eventually(t, func() bool { + p, err := pcb.Pending(ctx) + require.NoError(t, err) + return len(p) == n + }, time.Second*5, 10*time.Millisecond) + + return nil + } + } + + expectSend := func(expect []abi.SectorNumber) action { + return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise { + s.EXPECT().StateMinerInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(miner.MinerInfo{Owner: t0123, Worker: t0123}, nil) + s.EXPECT().SendMsg(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), funMatcher(func(i interface{}) bool { + b := i.([]byte) + var params miner5.PreCommitSectorBatchParams + require.NoError(t, params.UnmarshalCBOR(bytes.NewReader(b))) + for s, number := range expect { + require.Equal(t, number, params.Sectors[s].SectorNumber) + } + return true + })) + return nil + } + } + + flush := func(expect []abi.SectorNumber) action { + return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise { + _ = expectSend(expect)(t, s, pcb) + + r, err := pcb.Flush(ctx) + require.NoError(t, err) + require.Len(t, r, 1) + require.Empty(t, r[0].Error) + sort.Slice(r[0].Sectors, func(i, j int) bool { + return r[0].Sectors[i] < r[0].Sectors[j] + }) + require.Equal(t, expect, r[0].Sectors) + + return nil + } + } + + getSectors := func(n int) []abi.SectorNumber { + out := make([]abi.SectorNumber, n) + for i := range out { + out[i] = abi.SectorNumber(i) + } + return out + } + + tcs := map[string]struct { + actions []action + }{ + "addSingle": { + actions: []action{ + addSector(0), + waitPending(1), + flush([]abi.SectorNumber{0}), + }, + }, + "addTwo": { + actions: []action{ + addSectors(getSectors(2)), + waitPending(2), + flush(getSectors(2)), + }, + }, + "addMax": { + actions: []action{ + expectSend(getSectors(maxBatch)), + addSectors(getSectors(maxBatch)), + }, + }, + } + + for name, tc := range tcs { + tc := tc + + t.Run(name, func(t *testing.T) { + // create go mock controller here + mockCtrl := gomock.NewController(t) + // when test is done, assert expectations on all mock objects. + defer mockCtrl.Finish() + + // create them mocks + pcapi := mocks.NewMockPreCommitBatcherApi(mockCtrl) + + pcb := sealing.NewPreCommitBatcher(ctx, t0123, pcapi, as, fc, cfg) + + var promises []promise + + for _, a := range tc.actions { + p := a(t, pcapi, pcb) + if p != nil { + promises = append(promises, p) + } + } + + for _, p := range promises { + p(t) + } + + err := pcb.Stop(ctx) + require.NoError(t, err) + }) + } +} + +type funMatcher func(interface{}) bool + +func (funMatcher) Matches(interface{}) bool { + return true +} + +func (funMatcher) String() string { + return "fun" +} diff --git a/extern/storage-sealing/precommit_policy.go b/extern/storage-sealing/precommit_policy.go index 0b774b56ff7..a6add56930f 100644 --- a/extern/storage-sealing/precommit_policy.go +++ b/extern/storage-sealing/precommit_policy.go @@ -40,7 +40,10 @@ type BasicPreCommitPolicy struct { duration abi.ChainEpoch } -// NewBasicPreCommitPolicy produces a BasicPreCommitPolicy +// NewBasicPreCommitPolicy produces a BasicPreCommitPolicy. +// +// The provided duration is used as the default sector expiry when the sector +// contains no deals. The proving boundary is used to adjust/align the sector's expiration. func NewBasicPreCommitPolicy(api Chain, duration abi.ChainEpoch, provingBoundary abi.ChainEpoch) BasicPreCommitPolicy { return BasicPreCommitPolicy{ api: api, diff --git a/extern/storage-sealing/precommit_policy_test.go b/extern/storage-sealing/precommit_policy_test.go index 52814167a57..a6c17d3fdc5 100644 --- a/extern/storage-sealing/precommit_policy_test.go +++ b/extern/storage-sealing/precommit_policy_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/filecoin-project/go-state-types/network" + api "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/ipfs/go-cid" @@ -58,9 +59,9 @@ func TestBasicPolicyMostConstrictiveSchedule(t *testing.T) { Size: abi.PaddedPieceSize(1024), PieceCID: fakePieceCid(t), }, - DealInfo: &sealing.DealInfo{ + DealInfo: &api.PieceDealInfo{ DealID: abi.DealID(42), - DealSchedule: sealing.DealSchedule{ + DealSchedule: api.DealSchedule{ StartEpoch: abi.ChainEpoch(70), EndEpoch: abi.ChainEpoch(75), }, @@ -71,9 +72,9 @@ func TestBasicPolicyMostConstrictiveSchedule(t *testing.T) { Size: abi.PaddedPieceSize(1024), PieceCID: fakePieceCid(t), }, - DealInfo: &sealing.DealInfo{ + DealInfo: &api.PieceDealInfo{ DealID: abi.DealID(43), - DealSchedule: sealing.DealSchedule{ + DealSchedule: api.DealSchedule{ StartEpoch: abi.ChainEpoch(80), EndEpoch: abi.ChainEpoch(100), }, @@ -98,9 +99,9 @@ func TestBasicPolicyIgnoresExistingScheduleIfExpired(t *testing.T) { Size: abi.PaddedPieceSize(1024), PieceCID: fakePieceCid(t), }, - DealInfo: &sealing.DealInfo{ + DealInfo: &api.PieceDealInfo{ DealID: abi.DealID(44), - DealSchedule: sealing.DealSchedule{ + DealSchedule: api.DealSchedule{ StartEpoch: abi.ChainEpoch(1), EndEpoch: abi.ChainEpoch(10), }, @@ -125,9 +126,9 @@ func TestMissingDealIsIgnored(t *testing.T) { Size: abi.PaddedPieceSize(1024), PieceCID: fakePieceCid(t), }, - DealInfo: &sealing.DealInfo{ + DealInfo: &api.PieceDealInfo{ DealID: abi.DealID(44), - DealSchedule: sealing.DealSchedule{ + DealSchedule: api.DealSchedule{ StartEpoch: abi.ChainEpoch(1), EndEpoch: abi.ChainEpoch(10), }, diff --git a/extern/storage-sealing/sealiface/batching.go b/extern/storage-sealing/sealiface/batching.go new file mode 100644 index 00000000000..d0e6d4178c0 --- /dev/null +++ b/extern/storage-sealing/sealiface/batching.go @@ -0,0 +1,23 @@ +package sealiface + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-state-types/abi" +) + +type CommitBatchRes struct { + Sectors []abi.SectorNumber + + FailedSectors map[abi.SectorNumber]string + + Msg *cid.Cid + Error string // if set, means that all sectors are failed, implies Msg==nil +} + +type PreCommitBatchRes struct { + Sectors []abi.SectorNumber + + Msg *cid.Cid + Error string // if set, means that all sectors are failed, implies Msg==nil +} diff --git a/extern/storage-sealing/sealiface/config.go b/extern/storage-sealing/sealiface/config.go index 945565562bd..e33b3626319 100644 --- a/extern/storage-sealing/sealiface/config.go +++ b/extern/storage-sealing/sealiface/config.go @@ -1,6 +1,10 @@ package sealiface -import "time" +import ( + "time" + + "github.com/filecoin-project/go-state-types/abi" +) // this has to be in a separate package to not make lotus API depend on filecoin-ffi @@ -15,4 +19,29 @@ type Config struct { MaxSealingSectorsForDeals uint64 WaitDealsDelay time.Duration + + AlwaysKeepUnsealedCopy bool + + FinalizeEarly bool + + CollateralFromMinerBalance bool + AvailableBalanceBuffer abi.TokenAmount + DisableCollateralFallback bool + + BatchPreCommits bool + MaxPreCommitBatch int + PreCommitBatchWait time.Duration + PreCommitBatchSlack time.Duration + + AggregateCommits bool + MinCommitBatch int + MaxCommitBatch int + CommitBatchWait time.Duration + CommitBatchSlack time.Duration + + AggregateAboveBaseFee abi.TokenAmount + + TerminateBatchMax uint64 + TerminateBatchMin uint64 + TerminateBatchWait time.Duration } diff --git a/extern/storage-sealing/sealing.go b/extern/storage-sealing/sealing.go index d9953eee009..3e40d10f396 100644 --- a/extern/storage-sealing/sealing.go +++ b/extern/storage-sealing/sealing.go @@ -3,12 +3,9 @@ package sealing import ( "context" "errors" - "io" - "math" "sync" "time" - "github.com/filecoin-project/go-state-types/network" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/namespace" @@ -16,15 +13,22 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/go-address" - padreader "github.com/filecoin-project/go-padreader" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/go-state-types/network" statemachine "github.com/filecoin-project/go-statemachine" + "github.com/filecoin-project/specs-storage/storage" + + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" + "github.com/filecoin-project/lotus/node/config" ) const SectorStorePrefix = "/sectors" @@ -49,15 +53,23 @@ type SealingAPI interface { StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok TipSetToken) (*miner.SectorPreCommitOnChainInfo, error) StateSectorGetInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok TipSetToken) (*miner.SectorOnChainInfo, error) StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok TipSetToken) (*SectorLocation, error) + StateLookupID(context.Context, address.Address, TipSetToken) (address.Address, error) StateMinerSectorSize(context.Context, address.Address, TipSetToken) (abi.SectorSize, error) StateMinerWorkerAddress(ctx context.Context, maddr address.Address, tok TipSetToken) (address.Address, error) StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error) StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error) + StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error) + StateMinerAvailableBalance(context.Context, address.Address, TipSetToken) (big.Int, error) StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, TipSetToken) (bool, error) - StateMarketStorageDeal(context.Context, abi.DealID, TipSetToken) (market.DealProposal, error) + StateMarketStorageDeal(context.Context, abi.DealID, TipSetToken) (*api.MarketDeal, error) + StateMarketStorageDealProposal(context.Context, abi.DealID, TipSetToken) (market.DealProposal, error) StateNetworkVersion(ctx context.Context, tok TipSetToken) (network.Version, error) + StateMinerProvingDeadline(context.Context, address.Address, TipSetToken) (*dline.Info, error) + StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tok TipSetToken) ([]api.Partition, error) SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error) ChainHead(ctx context.Context) (TipSetToken, abi.ChainEpoch, error) + ChainBaseFee(context.Context, TipSetToken) (abi.TokenAmount, error) + ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) ChainGetRandomnessFromBeacon(ctx context.Context, tok TipSetToken, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) ChainGetRandomnessFromTickets(ctx context.Context, tok TipSetToken, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) ChainReadObj(context.Context, cid.Cid) ([]byte, error) @@ -65,49 +77,63 @@ type SealingAPI interface { type SectorStateNotifee func(before, after SectorInfo) +type AddrSel func(ctx context.Context, mi miner.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) + type Sealing struct { api SealingAPI - feeCfg FeeConfig + feeCfg config.MinerFeeConfig events Events + startupWait sync.WaitGroup + maddr address.Address sealer sectorstorage.SectorManager sectors *statemachine.StateGroup sc SectorIDCounter verif ffiwrapper.Verifier + pcp PreCommitPolicy - pcp PreCommitPolicy - unsealedInfoMap UnsealedSectorMap + inputLk sync.Mutex + openSectors map[abi.SectorID]*openSector + sectorTimers map[abi.SectorID]*time.Timer + pendingPieces map[cid.Cid]*pendingPiece + assignedPieces map[abi.SectorID][]cid.Cid + creating *abi.SectorNumber // used to prevent a race where we could create a new sector more than once upgradeLk sync.Mutex toUpgrade map[abi.SectorNumber]struct{} notifee SectorStateNotifee + addrSel AddrSel stats SectorStats + terminator *TerminateBatcher + precommiter *PreCommitBatcher + commiter *CommitBatcher + getConfig GetSealingConfigFunc + dealInfo *CurrentDealInfoManager } -type FeeConfig struct { - MaxPreCommitGasFee abi.TokenAmount - MaxCommitGasFee abi.TokenAmount -} +type openSector struct { + used abi.UnpaddedPieceSize // change to bitfield/rle when AddPiece gains offset support to better fill sectors -type UnsealedSectorMap struct { - infos map[abi.SectorNumber]UnsealedSectorInfo - lk sync.Mutex + maybeAccept func(cid.Cid) error // called with inputLk } -type UnsealedSectorInfo struct { - numDeals uint64 - // stored should always equal sum of pieceSizes.Padded() - stored abi.PaddedPieceSize - pieceSizes []abi.UnpaddedPieceSize +type pendingPiece struct { + size abi.UnpaddedPieceSize + deal api.PieceDealInfo + + data storage.Data + + assigned bool // assigned to a sector? + accepted func(abi.SectorNumber, abi.UnpaddedPieceSize, error) } -func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds datastore.Batching, sealer sectorstorage.SectorManager, sc SectorIDCounter, verif ffiwrapper.Verifier, pcp PreCommitPolicy, gc GetSealingConfigFunc, notifee SectorStateNotifee) *Sealing { +func New(mctx context.Context, api SealingAPI, fc config.MinerFeeConfig, events Events, maddr address.Address, ds datastore.Batching, sealer sectorstorage.SectorManager, sc SectorIDCounter, verif ffiwrapper.Verifier, prov ffiwrapper.Prover, pcp PreCommitPolicy, gc GetSealingConfigFunc, notifee SectorStateNotifee, as AddrSel) *Sealing { s := &Sealing{ api: api, feeCfg: fc, @@ -118,21 +144,28 @@ func New(api SealingAPI, fc FeeConfig, events Events, maddr address.Address, ds sc: sc, verif: verif, pcp: pcp, - unsealedInfoMap: UnsealedSectorMap{ - infos: make(map[abi.SectorNumber]UnsealedSectorInfo), - lk: sync.Mutex{}, - }, - toUpgrade: map[abi.SectorNumber]struct{}{}, + openSectors: map[abi.SectorID]*openSector{}, + sectorTimers: map[abi.SectorID]*time.Timer{}, + pendingPieces: map[cid.Cid]*pendingPiece{}, + assignedPieces: map[abi.SectorID][]cid.Cid{}, + toUpgrade: map[abi.SectorNumber]struct{}{}, notifee: notifee, + addrSel: as, + + terminator: NewTerminationBatcher(mctx, maddr, api, as, fc, gc), + precommiter: NewPreCommitBatcher(mctx, maddr, api, as, fc, gc), + commiter: NewCommitBatcher(mctx, maddr, api, as, fc, gc, prov), getConfig: gc, + dealInfo: &CurrentDealInfoManager{api}, stats: SectorStats{ bySector: map[abi.SectorID]statSectorState{}, }, } + s.startupWait.Add(1) s.sectors = statemachine.New(namespace.Wrap(ds, datastore.NewKey(SectorStorePrefix)), s, SectorInfo{}) @@ -149,256 +182,74 @@ func (m *Sealing) Run(ctx context.Context) error { } func (m *Sealing) Stop(ctx context.Context) error { - return m.sectors.Stop(ctx) -} -func (m *Sealing) AddPieceToAnySector(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, d DealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) { - log.Infof("Adding piece for deal %d (publish msg: %s)", d.DealID, d.PublishCid) - if (padreader.PaddedSize(uint64(size))) != size { - return 0, 0, xerrors.Errorf("cannot allocate unpadded piece") - } - - if size > abi.PaddedPieceSize(m.sealer.SectorSize()).Unpadded() { - return 0, 0, xerrors.Errorf("piece cannot fit into a sector") - } - - m.unsealedInfoMap.lk.Lock() - - sid, pads, err := m.getSectorAndPadding(size) - if err != nil { - m.unsealedInfoMap.lk.Unlock() - return 0, 0, xerrors.Errorf("getting available sector: %w", err) - } - - for _, p := range pads { - err = m.addPiece(ctx, sid, p.Unpadded(), NewNullReader(p.Unpadded()), nil) - if err != nil { - m.unsealedInfoMap.lk.Unlock() - return 0, 0, xerrors.Errorf("writing pads: %w", err) - } - } - - offset := m.unsealedInfoMap.infos[sid].stored - err = m.addPiece(ctx, sid, size, r, &d) - - if err != nil { - m.unsealedInfoMap.lk.Unlock() - return 0, 0, xerrors.Errorf("adding piece to sector: %w", err) - } - - startPacking := m.unsealedInfoMap.infos[sid].numDeals >= getDealPerSectorLimit(m.sealer.SectorSize()) - - m.unsealedInfoMap.lk.Unlock() - - if startPacking { - if err := m.StartPacking(sid); err != nil { - return 0, 0, xerrors.Errorf("start packing: %w", err) - } - } - - return sid, offset, nil -} - -// Caller should hold m.unsealedInfoMap.lk -func (m *Sealing) addPiece(ctx context.Context, sectorID abi.SectorNumber, size abi.UnpaddedPieceSize, r io.Reader, di *DealInfo) error { - log.Infof("Adding piece to sector %d", sectorID) - ppi, err := m.sealer.AddPiece(sectorstorage.WithPriority(ctx, DealSectorPriority), m.minerSector(sectorID), m.unsealedInfoMap.infos[sectorID].pieceSizes, size, r) - if err != nil { - return xerrors.Errorf("writing piece: %w", err) - } - piece := Piece{ - Piece: ppi, - DealInfo: di, - } - - err = m.sectors.Send(uint64(sectorID), SectorAddPiece{NewPiece: piece}) - if err != nil { + if err := m.terminator.Stop(ctx); err != nil { return err } - ui := m.unsealedInfoMap.infos[sectorID] - num := m.unsealedInfoMap.infos[sectorID].numDeals - if di != nil { - num = num + 1 - } - m.unsealedInfoMap.infos[sectorID] = UnsealedSectorInfo{ - numDeals: num, - stored: ui.stored + piece.Piece.Size, - pieceSizes: append(ui.pieceSizes, piece.Piece.Size.Unpadded()), + if err := m.sectors.Stop(ctx); err != nil { + return err } - return nil } func (m *Sealing) Remove(ctx context.Context, sid abi.SectorNumber) error { + m.startupWait.Wait() + return m.sectors.Send(uint64(sid), SectorRemove{}) } -// Caller should NOT hold m.unsealedInfoMap.lk -func (m *Sealing) StartPacking(sectorID abi.SectorNumber) error { - // locking here ensures that when the SectorStartPacking event is sent, the sector won't be picked up anywhere else - m.unsealedInfoMap.lk.Lock() - defer m.unsealedInfoMap.lk.Unlock() - - // cannot send SectorStartPacking to sectors that have already been packed, otherwise it will cause the state machine to exit - if _, ok := m.unsealedInfoMap.infos[sectorID]; !ok { - log.Warnf("call start packing, but sector %v not in unsealedInfoMap.infos, maybe have called", sectorID) - return nil - } - log.Infof("Starting packing sector %d", sectorID) - err := m.sectors.Send(uint64(sectorID), SectorStartPacking{}) - if err != nil { - return err - } - log.Infof("send Starting packing event success sector %d", sectorID) - - delete(m.unsealedInfoMap.infos, sectorID) +func (m *Sealing) Terminate(ctx context.Context, sid abi.SectorNumber) error { + m.startupWait.Wait() - return nil + return m.sectors.Send(uint64(sid), SectorTerminate{}) } -// Caller should hold m.unsealedInfoMap.lk -func (m *Sealing) getSectorAndPadding(size abi.UnpaddedPieceSize) (abi.SectorNumber, []abi.PaddedPieceSize, error) { - ss := abi.PaddedPieceSize(m.sealer.SectorSize()) - for k, v := range m.unsealedInfoMap.infos { - pads, padLength := ffiwrapper.GetRequiredPadding(v.stored, size.Padded()) - if v.stored+size.Padded()+padLength <= ss { - return k, pads, nil - } - } - - ns, err := m.newDealSector() - if err != nil { - return 0, nil, err - } - - m.unsealedInfoMap.infos[ns] = UnsealedSectorInfo{ - numDeals: 0, - stored: 0, - pieceSizes: nil, - } - - return ns, nil, nil +func (m *Sealing) TerminateFlush(ctx context.Context) (*cid.Cid, error) { + return m.terminator.Flush(ctx) } -// newDealSector creates a new sector for deal storage -func (m *Sealing) newDealSector() (abi.SectorNumber, error) { - // First make sure we don't have too many 'open' sectors - - cfg, err := m.getConfig() - if err != nil { - return 0, xerrors.Errorf("getting config: %w", err) - } - - if cfg.MaxSealingSectorsForDeals > 0 { - if m.stats.curSealing() > cfg.MaxSealingSectorsForDeals { - return 0, ErrTooManySectorsSealing - } - } - - if cfg.MaxWaitDealsSectors > 0 { - // run in a loop because we have to drop the map lock here for a bit - tries := 0 - - // we have to run in a loop as we're dropping unsealedInfoMap.lk - // to actually call StartPacking. When we do that, another entry can - // get added to unsealedInfoMap. - for uint64(len(m.unsealedInfoMap.infos)) >= cfg.MaxWaitDealsSectors { - if tries > 10 { - // whatever... - break - } - - if tries > 0 { - m.unsealedInfoMap.lk.Unlock() - time.Sleep(time.Second) - m.unsealedInfoMap.lk.Lock() - } - - tries++ - var mostStored abi.PaddedPieceSize = math.MaxUint64 - var best abi.SectorNumber = math.MaxUint64 - - for sn, info := range m.unsealedInfoMap.infos { - if info.stored+1 > mostStored+1 { // 18446744073709551615 + 1 = 0 - best = sn - } - } - - if best == math.MaxUint64 { - // probably not possible, but who knows - break - } - - m.unsealedInfoMap.lk.Unlock() - if err := m.StartPacking(best); err != nil { - log.Error("newDealSector StartPacking error: %+v", err) - continue // let's pretend this is fine - } - m.unsealedInfoMap.lk.Lock() - } - } - - // Now actually create a new sector +func (m *Sealing) TerminatePending(ctx context.Context) ([]abi.SectorID, error) { + return m.terminator.Pending(ctx) +} - sid, err := m.sc.Next() - if err != nil { - return 0, xerrors.Errorf("getting sector number: %w", err) - } +func (m *Sealing) SectorPreCommitFlush(ctx context.Context) ([]sealiface.PreCommitBatchRes, error) { + return m.precommiter.Flush(ctx) +} - err = m.sealer.NewSector(context.TODO(), m.minerSector(sid)) - if err != nil { - return 0, xerrors.Errorf("initializing sector: %w", err) - } +func (m *Sealing) SectorPreCommitPending(ctx context.Context) ([]abi.SectorID, error) { + return m.precommiter.Pending(ctx) +} - rt, err := ffiwrapper.SealProofTypeFromSectorSize(m.sealer.SectorSize()) - if err != nil { - return 0, xerrors.Errorf("bad sector size: %w", err) - } +func (m *Sealing) CommitFlush(ctx context.Context) ([]sealiface.CommitBatchRes, error) { + return m.commiter.Flush(ctx) +} - log.Infof("Creating sector %d", sid) - err = m.sectors.Send(uint64(sid), SectorStart{ - ID: sid, - SectorType: rt, - }) +func (m *Sealing) CommitPending(ctx context.Context) ([]abi.SectorID, error) { + return m.commiter.Pending(ctx) +} +func (m *Sealing) currentSealProof(ctx context.Context) (abi.RegisteredSealProof, error) { + mi, err := m.api.StateMinerInfo(ctx, m.maddr, nil) if err != nil { - return 0, xerrors.Errorf("starting the sector fsm: %w", err) + return 0, err } - cf, err := m.getConfig() + ver, err := m.api.StateNetworkVersion(ctx, nil) if err != nil { - return 0, xerrors.Errorf("getting the sealing delay: %w", err) + return 0, err } - if cf.WaitDealsDelay > 0 { - timer := time.NewTimer(cf.WaitDealsDelay) - go func() { - <-timer.C - if err := m.StartPacking(sid); err != nil { - log.Errorf("starting sector %d: %+v", sid, err) - } - }() - } - - return sid, nil + return miner.PreferredSealProofTypeFromWindowPoStType(ver, mi.WindowPoStProofType) } -// newSectorCC accepts a slice of pieces with no deal (junk data) -func (m *Sealing) newSectorCC(sid abi.SectorNumber, pieces []Piece) error { - rt, err := ffiwrapper.SealProofTypeFromSectorSize(m.sealer.SectorSize()) - if err != nil { - return xerrors.Errorf("bad sector size: %w", err) +func (m *Sealing) minerSector(spt abi.RegisteredSealProof, num abi.SectorNumber) storage.SectorRef { + return storage.SectorRef{ + ID: m.minerSectorID(num), + ProofType: spt, } - - log.Infof("Creating CC sector %d", sid) - return m.sectors.Send(uint64(sid), SectorStartCC{ - ID: sid, - Pieces: pieces, - SectorType: rt, - }) } -func (m *Sealing) minerSector(num abi.SectorNumber) abi.SectorID { +func (m *Sealing) minerSectorID(num abi.SectorNumber) abi.SectorID { mid, err := address.IDFromAddress(m.maddr) if err != nil { panic(err) @@ -414,9 +265,9 @@ func (m *Sealing) Address() address.Address { return m.maddr } -func getDealPerSectorLimit(size abi.SectorSize) uint64 { +func getDealPerSectorLimit(size abi.SectorSize) (int, error) { if size < 64<<30 { - return 256 + return 256, nil } - return 512 + return 512, nil } diff --git a/extern/storage-sealing/sector_state.go b/extern/storage-sealing/sector_state.go index 8b0bff24a70..deb5e9f28e6 100644 --- a/extern/storage-sealing/sector_state.go +++ b/extern/storage-sealing/sector_state.go @@ -3,57 +3,85 @@ package sealing type SectorState string var ExistSectorStateList = map[SectorState]struct{}{ - Empty: {}, - WaitDeals: {}, - Packing: {}, - PreCommit1: {}, - PreCommit2: {}, - PreCommitting: {}, - PreCommitWait: {}, - WaitSeed: {}, - Committing: {}, - SubmitCommit: {}, - CommitWait: {}, - FinalizeSector: {}, - Proving: {}, - FailedUnrecoverable: {}, - SealPreCommit1Failed: {}, - SealPreCommit2Failed: {}, - PreCommitFailed: {}, - ComputeProofFailed: {}, - CommitFailed: {}, - PackingFailed: {}, - FinalizeFailed: {}, - DealsExpired: {}, - RecoverDealIDs: {}, - Faulty: {}, - FaultReported: {}, - FaultedFinal: {}, - Removing: {}, - RemoveFailed: {}, - Removed: {}, + Empty: {}, + WaitDeals: {}, + Packing: {}, + AddPiece: {}, + AddPieceFailed: {}, + GetTicket: {}, + PreCommit1: {}, + PreCommit2: {}, + PreCommitting: {}, + PreCommitWait: {}, + SubmitPreCommitBatch: {}, + PreCommitBatchWait: {}, + WaitSeed: {}, + Committing: {}, + CommitFinalize: {}, + CommitFinalizeFailed: {}, + SubmitCommit: {}, + CommitWait: {}, + SubmitCommitAggregate: {}, + CommitAggregateWait: {}, + FinalizeSector: {}, + Proving: {}, + FailedUnrecoverable: {}, + SealPreCommit1Failed: {}, + SealPreCommit2Failed: {}, + PreCommitFailed: {}, + ComputeProofFailed: {}, + CommitFailed: {}, + PackingFailed: {}, + FinalizeFailed: {}, + DealsExpired: {}, + RecoverDealIDs: {}, + Faulty: {}, + FaultReported: {}, + FaultedFinal: {}, + Terminating: {}, + TerminateWait: {}, + TerminateFinality: {}, + TerminateFailed: {}, + Removing: {}, + RemoveFailed: {}, + Removed: {}, } const ( UndefinedSectorState SectorState = "" // happy path - Empty SectorState = "Empty" - WaitDeals SectorState = "WaitDeals" // waiting for more pieces (deals) to be added to the sector - Packing SectorState = "Packing" // sector not in sealStore, and not on chain - GetTicket SectorState = "GetTicket" // generate ticket - PreCommit1 SectorState = "PreCommit1" // do PreCommit1 - PreCommit2 SectorState = "PreCommit2" // do PreCommit2 - PreCommitting SectorState = "PreCommitting" // on chain pre-commit - PreCommitWait SectorState = "PreCommitWait" // waiting for precommit to land on chain - WaitSeed SectorState = "WaitSeed" // waiting for seed - Committing SectorState = "Committing" // compute PoRep - SubmitCommit SectorState = "SubmitCommit" // send commit message to the chain - CommitWait SectorState = "CommitWait" // wait for the commit message to land on chain + Empty SectorState = "Empty" // deprecated + WaitDeals SectorState = "WaitDeals" // waiting for more pieces (deals) to be added to the sector + AddPiece SectorState = "AddPiece" // put deal data (and padding if required) into the sector + Packing SectorState = "Packing" // sector not in sealStore, and not on chain + GetTicket SectorState = "GetTicket" // generate ticket + PreCommit1 SectorState = "PreCommit1" // do PreCommit1 + PreCommit2 SectorState = "PreCommit2" // do PreCommit2 + + PreCommitting SectorState = "PreCommitting" // on chain pre-commit + PreCommitWait SectorState = "PreCommitWait" // waiting for precommit to land on chain + + SubmitPreCommitBatch SectorState = "SubmitPreCommitBatch" + PreCommitBatchWait SectorState = "PreCommitBatchWait" + + WaitSeed SectorState = "WaitSeed" // waiting for seed + Committing SectorState = "Committing" // compute PoRep + CommitFinalize SectorState = "CommitFinalize" // cleanup sector metadata before submitting the proof (early finalize) + CommitFinalizeFailed SectorState = "CommitFinalizeFailed" + + // single commit + SubmitCommit SectorState = "SubmitCommit" // send commit message to the chain + CommitWait SectorState = "CommitWait" // wait for the commit message to land on chain + + SubmitCommitAggregate SectorState = "SubmitCommitAggregate" + CommitAggregateWait SectorState = "CommitAggregateWait" + FinalizeSector SectorState = "FinalizeSector" Proving SectorState = "Proving" // error modes FailedUnrecoverable SectorState = "FailedUnrecoverable" + AddPieceFailed SectorState = "AddPieceFailed" SealPreCommit1Failed SectorState = "SealPreCommit1Failed" SealPreCommit2Failed SectorState = "SealPreCommit2Failed" PreCommitFailed SectorState = "PreCommitFailed" @@ -68,6 +96,11 @@ const ( FaultReported SectorState = "FaultReported" // sector has been declared as a fault on chain FaultedFinal SectorState = "FaultedFinal" // fault declared on chain + Terminating SectorState = "Terminating" + TerminateWait SectorState = "TerminateWait" + TerminateFinality SectorState = "TerminateFinality" + TerminateFailed SectorState = "TerminateFailed" + Removing SectorState = "Removing" RemoveFailed SectorState = "RemoveFailed" Removed SectorState = "Removed" @@ -75,9 +108,11 @@ const ( func toStatState(st SectorState) statSectorState { switch st { - case Empty, WaitDeals, Packing, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, WaitSeed, Committing, CommitWait, FinalizeSector: + case UndefinedSectorState, Empty, WaitDeals, AddPiece: + return sstStaging + case Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, SubmitPreCommitBatch, PreCommitBatchWait, WaitSeed, Committing, CommitFinalize, SubmitCommit, CommitWait, SubmitCommitAggregate, CommitAggregateWait, FinalizeSector: return sstSealing - case Proving, Removed, Removing: + case Proving, Removed, Removing, Terminating, TerminateWait, TerminateFinality, TerminateFailed: return sstProving } diff --git a/extern/storage-sealing/states_failed.go b/extern/storage-sealing/states_failed.go index b583701aea9..bd5f489b40e 100644 --- a/extern/storage-sealing/states_failed.go +++ b/extern/storage-sealing/states_failed.go @@ -1,9 +1,9 @@ package sealing import ( - "bytes" "time" + "github.com/hashicorp/go-multierror" "golang.org/x/xerrors" "github.com/filecoin-project/lotus/chain/actors/builtin/market" @@ -13,7 +13,7 @@ import ( "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/go-statemachine" - "github.com/filecoin-project/lotus/extern/sector-storage/zerocomm" + "github.com/filecoin-project/go-commp-utils/zerocomm" ) const minRetryTime = 1 * time.Minute @@ -77,6 +77,34 @@ func (m *Sealing) handlePreCommitFailed(ctx statemachine.Context, sector SectorI return nil } + if sector.PreCommitMessage != nil { + mw, err := m.api.StateSearchMsg(ctx.Context(), *sector.PreCommitMessage) + if err != nil { + // API error + if err := failedCooldown(ctx, sector); err != nil { + return err + } + + return ctx.Send(SectorRetryPreCommitWait{}) + } + + if mw == nil { + // API error in precommit + return ctx.Send(SectorRetryPreCommitWait{}) + } + + switch mw.Receipt.ExitCode { + case exitcode.Ok: + // API error in PreCommitWait + return ctx.Send(SectorRetryPreCommitWait{}) + case exitcode.SysErrOutOfGas: + // API error in PreCommitWait AND gas estimator guessed a wrong number in PreCommit + return ctx.Send(SectorRetryPreCommit{}) + default: + // something else went wrong + } + } + if err := checkPrecommit(ctx.Context(), m.Address(), sector, tok, height, m.api); err != nil { switch err.(type) { case *ErrApi: @@ -109,12 +137,12 @@ func (m *Sealing) handlePreCommitFailed(ctx statemachine.Context, sector SectorI if pci, is := m.checkPreCommitted(ctx, sector); is && pci != nil { if sector.PreCommitMessage == nil { - log.Warn("sector %d is precommitted on chain, but we don't have precommit message", sector.SectorNumber) + log.Warnf("sector %d is precommitted on chain, but we don't have precommit message", sector.SectorNumber) return ctx.Send(SectorPreCommitLanded{TipSet: tok}) } if pci.Info.SealedCID != *sector.CommR { - log.Warn("sector %d is precommitted on chain, with different CommR: %x != %x", sector.SectorNumber, pci.Info.SealedCID, sector.CommR) + log.Warnf("sector %d is precommitted on chain, with different CommR: %s != %s", sector.SectorNumber, pci.Info.SealedCID, sector.CommR) return nil // TODO: remove when the actor allows re-precommit } @@ -154,36 +182,37 @@ func (m *Sealing) handleComputeProofFailed(ctx statemachine.Context, sector Sect } func (m *Sealing) handleCommitFailed(ctx statemachine.Context, sector SectorInfo) error { - tok, height, err := m.api.ChainHead(ctx.Context()) + tok, _, err := m.api.ChainHead(ctx.Context()) if err != nil { log.Errorf("handleCommitting: api error, not proceeding: %+v", err) return nil } - if err := checkPrecommit(ctx.Context(), m.maddr, sector, tok, height, m.api); err != nil { - switch err.(type) { - case *ErrApi: - log.Errorf("handleCommitFailed: api error, not proceeding: %+v", err) - return nil - case *ErrBadCommD: - return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("bad CommD error: %w", err)}) - case *ErrExpiredTicket: - return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("ticket expired error: %w", err)}) - case *ErrBadTicket: - return ctx.Send(SectorTicketExpired{xerrors.Errorf("expired ticket: %w", err)}) - case *ErrInvalidDeals: - log.Warnf("invalid deals in sector %d: %v", sector.SectorNumber, err) - return ctx.Send(SectorInvalidDealIDs{Return: RetCommitFailed}) - case *ErrExpiredDeals: - return ctx.Send(SectorDealsExpired{xerrors.Errorf("sector deals expired: %w", err)}) - case nil: - return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("no precommit: %w", err)}) - case *ErrPrecommitOnChain: - // noop, this is expected - case *ErrSectorNumberAllocated: - // noop, already committed? + if sector.CommitMessage != nil { + mw, err := m.api.StateSearchMsg(ctx.Context(), *sector.CommitMessage) + if err != nil { + // API error + if err := failedCooldown(ctx, sector); err != nil { + return err + } + + return ctx.Send(SectorRetryCommitWait{}) + } + + if mw == nil { + // API error in commit + return ctx.Send(SectorRetryCommitWait{}) + } + + switch mw.Receipt.ExitCode { + case exitcode.Ok: + // API error in CcommitWait + return ctx.Send(SectorRetryCommitWait{}) + case exitcode.SysErrOutOfGas: + // API error in CommitWait AND gas estimator guessed a wrong number in SubmitCommit + return ctx.Send(SectorRetrySubmitCommit{}) default: - return xerrors.Errorf("checkPrecommit sanity check error (%T): %w", err, err) + // something else went wrong } } @@ -253,6 +282,22 @@ func (m *Sealing) handleRemoveFailed(ctx statemachine.Context, sector SectorInfo return ctx.Send(SectorRemove{}) } +func (m *Sealing) handleTerminateFailed(ctx statemachine.Context, sector SectorInfo) error { + // ignoring error as it's most likely an API error - `pci` will be nil, and we'll go back to + // the Terminating state after cooldown. If the API is still failing, well get back to here + // with the error in SectorInfo log. + pci, _ := m.api.StateSectorPreCommitInfo(ctx.Context(), m.maddr, sector.SectorNumber, nil) + if pci != nil { + return nil // pause the fsm, needs manual user action + } + + if err := failedCooldown(ctx, sector); err != nil { + return err + } + + return ctx.Send(SectorTerminate{}) +} + func (m *Sealing) handleDealsExpired(ctx statemachine.Context, sector SectorInfo) error { // First make vary sure the sector isn't committed si, err := m.api.StateSectorGetInfo(ctx.Context(), m.maddr, sector.SectorNumber, nil) @@ -281,6 +326,7 @@ func (m *Sealing) handleRecoverDealIDs(ctx statemachine.Context, sector SectorIn } var toFix []int + paddingPieces := 0 for i, p := range sector.Pieces { // if no deal is associated with the piece, ensure that we added it as @@ -290,10 +336,11 @@ func (m *Sealing) handleRecoverDealIDs(ctx statemachine.Context, sector SectorIn if !p.Piece.PieceCID.Equals(exp) { return xerrors.Errorf("sector %d piece %d had non-zero PieceCID %+v", sector.SectorNumber, i, p.Piece.PieceCID) } + paddingPieces++ continue } - proposal, err := m.api.StateMarketStorageDeal(ctx.Context(), p.DealInfo.DealID, tok) + proposal, err := m.api.StateMarketStorageDealProposal(ctx.Context(), p.DealInfo.DealID, tok) if err != nil { log.Warnf("getting deal %d for piece %d: %+v", p.DealInfo.DealID, i, err) toFix = append(toFix, i) @@ -307,7 +354,7 @@ func (m *Sealing) handleRecoverDealIDs(ctx statemachine.Context, sector SectorIn } if proposal.PieceCID != p.Piece.PieceCID { - log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %x != %x", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID, proposal.PieceCID) + log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID, proposal.PieceCID) toFix = append(toFix, i) continue } @@ -325,37 +372,44 @@ func (m *Sealing) handleRecoverDealIDs(ctx statemachine.Context, sector SectorIn } } + failed := map[int]error{} updates := map[int]abi.DealID{} for _, i := range toFix { p := sector.Pieces[i] if p.DealInfo.PublishCid == nil { // TODO: check if we are in an early enough state try to remove this piece - log.Error("can't fix sector deals: piece %d (of %d) of sector %d has nil DealInfo.PublishCid (refers to deal %d)", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID) + log.Errorf("can't fix sector deals: piece %d (of %d) of sector %d has nil DealInfo.PublishCid (refers to deal %d)", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID) // Not much to do here (and this can only happen for old spacerace sectors) return ctx.Send(SectorRemove{}) } - ml, err := m.api.StateSearchMsg(ctx.Context(), *p.DealInfo.PublishCid) + var dp *market.DealProposal + if p.DealInfo.DealProposal != nil { + mdp := market.DealProposal(*p.DealInfo.DealProposal) + dp = &mdp + } + res, err := m.dealInfo.GetCurrentDealInfo(ctx.Context(), tok, dp, *p.DealInfo.PublishCid) if err != nil { - return xerrors.Errorf("looking for publish deal message %s (sector %d, piece %d): %w", *p.DealInfo.PublishCid, sector.SectorNumber, i, err) + failed[i] = xerrors.Errorf("getting current deal info for piece %d: %w", i, err) } - if ml.Receipt.ExitCode != exitcode.Ok { - return xerrors.Errorf("looking for publish deal message %s (sector %d, piece %d): non-ok exit code: %s", *p.DealInfo.PublishCid, sector.SectorNumber, i, ml.Receipt.ExitCode) - } + updates[i] = res.DealID + } - var retval market.PublishStorageDealsReturn - if err := retval.UnmarshalCBOR(bytes.NewReader(ml.Receipt.Return)); err != nil { - return xerrors.Errorf("looking for publish deal message: unmarshaling message return: %w", err) + if len(failed) > 0 { + var merr error + for _, e := range failed { + merr = multierror.Append(merr, e) } - if len(retval.IDs) != 1 { - // market currently only ever sends messages with 1 deal - return xerrors.Errorf("can't recover dealIDs from publish deal message with more than 1 deal") + if len(failed)+paddingPieces == len(sector.Pieces) { + log.Errorf("removing sector %d: all deals expired or unrecoverable: %+v", sector.SectorNumber, merr) + return ctx.Send(SectorRemove{}) } - updates[i] = retval.IDs[0] + // todo: try to remove bad pieces (hard; see the todo above) + return xerrors.Errorf("failed to recover some deals: %w", merr) } // Not much to do here, we can't go back in time to commit this sector diff --git a/extern/storage-sealing/states_proving.go b/extern/storage-sealing/states_proving.go index 6684c714d2d..5e613b20b46 100644 --- a/extern/storage-sealing/states_proving.go +++ b/extern/storage-sealing/states_proving.go @@ -1,9 +1,14 @@ package sealing import ( + "time" + "golang.org/x/xerrors" + "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/go-statemachine" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/policy" ) func (m *Sealing) handleFaulty(ctx statemachine.Context, sector SectorInfo) error { @@ -31,10 +36,112 @@ func (m *Sealing) handleFaultReported(ctx statemachine.Context, sector SectorInf return ctx.Send(SectorFaultedFinal{}) } +func (m *Sealing) handleTerminating(ctx statemachine.Context, sector SectorInfo) error { + // First step of sector termination + // * See if sector is live + // * If not, goto removing + // * Add to termination queue + // * Wait for message to land on-chain + // * Check for correct termination + // * wait for expiration (+winning lookback?) + + si, err := m.api.StateSectorGetInfo(ctx.Context(), m.maddr, sector.SectorNumber, nil) + if err != nil { + return ctx.Send(SectorTerminateFailed{xerrors.Errorf("getting sector info: %w", err)}) + } + + if si == nil { + // either already terminated or not committed yet + + pci, err := m.api.StateSectorPreCommitInfo(ctx.Context(), m.maddr, sector.SectorNumber, nil) + if err != nil { + return ctx.Send(SectorTerminateFailed{xerrors.Errorf("checking precommit presence: %w", err)}) + } + if pci != nil { + return ctx.Send(SectorTerminateFailed{xerrors.Errorf("sector was precommitted but not proven, remove instead of terminating")}) + } + + return ctx.Send(SectorRemove{}) + } + + termCid, terminated, err := m.terminator.AddTermination(ctx.Context(), m.minerSectorID(sector.SectorNumber)) + if err != nil { + return ctx.Send(SectorTerminateFailed{xerrors.Errorf("queueing termination: %w", err)}) + } + + if terminated { + return ctx.Send(SectorTerminating{Message: nil}) + } + + return ctx.Send(SectorTerminating{Message: &termCid}) +} + +func (m *Sealing) handleTerminateWait(ctx statemachine.Context, sector SectorInfo) error { + if sector.TerminateMessage == nil { + return xerrors.New("entered TerminateWait with nil TerminateMessage") + } + + mw, err := m.api.StateWaitMsg(ctx.Context(), *sector.TerminateMessage) + if err != nil { + return ctx.Send(SectorTerminateFailed{xerrors.Errorf("waiting for terminate message to land on chain: %w", err)}) + } + + if mw.Receipt.ExitCode != exitcode.Ok { + return ctx.Send(SectorTerminateFailed{xerrors.Errorf("terminate message failed to execute: exit %d: %w", mw.Receipt.ExitCode, err)}) + } + + return ctx.Send(SectorTerminated{TerminatedAt: mw.Height}) +} + +func (m *Sealing) handleTerminateFinality(ctx statemachine.Context, sector SectorInfo) error { + for { + tok, epoch, err := m.api.ChainHead(ctx.Context()) + if err != nil { + return ctx.Send(SectorTerminateFailed{xerrors.Errorf("getting chain head: %w", err)}) + } + + nv, err := m.api.StateNetworkVersion(ctx.Context(), tok) + if err != nil { + return ctx.Send(SectorTerminateFailed{xerrors.Errorf("getting network version: %w", err)}) + } + + if epoch >= sector.TerminatedAt+policy.GetWinningPoStSectorSetLookback(nv) { + return ctx.Send(SectorRemove{}) + } + + toWait := time.Duration(epoch-sector.TerminatedAt+policy.GetWinningPoStSectorSetLookback(nv)) * time.Duration(build.BlockDelaySecs) * time.Second + select { + case <-time.After(toWait): + continue + case <-ctx.Context().Done(): + return ctx.Context().Err() + } + } +} + func (m *Sealing) handleRemoving(ctx statemachine.Context, sector SectorInfo) error { - if err := m.sealer.Remove(ctx.Context(), m.minerSector(sector.SectorNumber)); err != nil { + if err := m.sealer.Remove(ctx.Context(), m.minerSector(sector.SectorType, sector.SectorNumber)); err != nil { return ctx.Send(SectorRemoveFailed{err}) } return ctx.Send(SectorRemoved{}) } + +func (m *Sealing) handleProvingSector(ctx statemachine.Context, sector SectorInfo) error { + // TODO: track sector health / expiration + log.Infof("Proving sector %d", sector.SectorNumber) + + cfg, err := m.getConfig() + if err != nil { + return xerrors.Errorf("getting sealing config: %w", err) + } + + if err := m.sealer.ReleaseUnsealed(ctx.Context(), m.minerSector(sector.SectorType, sector.SectorNumber), sector.keepUnsealedRanges(true, cfg.AlwaysKeepUnsealedCopy)); err != nil { + log.Error(err) + } + + // TODO: Watch termination + // TODO: Auto-extend if set + + return nil +} diff --git a/extern/storage-sealing/states_sealing.go b/extern/storage-sealing/states_sealing.go index d4e5535a2ee..5334fc72e74 100644 --- a/extern/storage-sealing/states_sealing.go +++ b/extern/storage-sealing/states_sealing.go @@ -11,19 +11,39 @@ import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/go-statemachine" - builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/filecoin-project/specs-storage/storage" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/policy" ) var DealSectorPriority = 1024 -var MaxTicketAge = abi.ChainEpoch(builtin0.EpochsInDay * 2) +var MaxTicketAge = policy.MaxPreCommitRandomnessLookback func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) error { + m.inputLk.Lock() + // make sure we not accepting deals into this sector + for _, c := range m.assignedPieces[m.minerSectorID(sector.SectorNumber)] { + pp := m.pendingPieces[c] + delete(m.pendingPieces, c) + if pp == nil { + log.Errorf("nil assigned pending piece %s", c) + continue + } + + // todo: return to the sealing queue (this is extremely unlikely to happen) + pp.accepted(sector.SectorNumber, 0, xerrors.Errorf("sector %d entered packing state early", sector.SectorNumber)) + } + + delete(m.openSectors, m.minerSectorID(sector.SectorNumber)) + delete(m.assignedPieces, m.minerSectorID(sector.SectorNumber)) + m.inputLk.Unlock() + log.Infow("performing filling up rest of the sector...", "sector", sector.SectorNumber) var allocated abi.UnpaddedPieceSize @@ -31,7 +51,12 @@ func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) err allocated += piece.Piece.Size.Unpadded() } - ubytes := abi.PaddedPieceSize(m.sealer.SectorSize()).Unpadded() + ssize, err := sector.SectorType.SectorSize() + if err != nil { + return err + } + + ubytes := abi.PaddedPieceSize(ssize).Unpadded() if allocated > ubytes { return xerrors.Errorf("too much data in sector: %d > %d", allocated, ubytes) @@ -46,7 +71,7 @@ func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) err log.Warnf("Creating %d filler pieces for sector %d", len(fillerSizes), sector.SectorNumber) } - fillerPieces, err := m.pledgeSector(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorNumber), sector.existingPieceSizes(), fillerSizes...) + fillerPieces, err := m.padSector(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.existingPieceSizes(), fillerSizes...) if err != nil { return xerrors.Errorf("filling up the sector (%v): %w", fillerSizes, err) } @@ -54,44 +79,92 @@ func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) err return ctx.Send(SectorPacked{FillerPieces: fillerPieces}) } -func (m *Sealing) getTicket(ctx statemachine.Context, sector SectorInfo) (abi.SealRandomness, abi.ChainEpoch, error) { +func (m *Sealing) padSector(ctx context.Context, sectorID storage.SectorRef, existingPieceSizes []abi.UnpaddedPieceSize, sizes ...abi.UnpaddedPieceSize) ([]abi.PieceInfo, error) { + if len(sizes) == 0 { + return nil, nil + } + + log.Infof("Pledge %d, contains %+v", sectorID, existingPieceSizes) + + out := make([]abi.PieceInfo, len(sizes)) + for i, size := range sizes { + ppi, err := m.sealer.AddPiece(ctx, sectorID, existingPieceSizes, size, NewNullReader(size)) + if err != nil { + return nil, xerrors.Errorf("add piece: %w", err) + } + + existingPieceSizes = append(existingPieceSizes, size) + + out[i] = ppi + } + + return out, nil +} + +func checkTicketExpired(ticket, head abi.ChainEpoch) bool { + return head-ticket > MaxTicketAge // TODO: allow configuring expected seal durations +} + +func checkProveCommitExpired(preCommitEpoch, msd abi.ChainEpoch, currEpoch abi.ChainEpoch) bool { + return currEpoch > preCommitEpoch+msd +} + +func (m *Sealing) getTicket(ctx statemachine.Context, sector SectorInfo) (abi.SealRandomness, abi.ChainEpoch, bool, error) { tok, epoch, err := m.api.ChainHead(ctx.Context()) if err != nil { - log.Errorf("handlePreCommit1: api error, not proceeding: %+v", err) - return nil, 0, nil + log.Errorf("getTicket: api error, not proceeding: %+v", err) + return nil, 0, false, nil + } + + // the reason why the StateMinerSectorAllocated function is placed here, if it is outside, + // if the MarshalCBOR function and StateSectorPreCommitInfo function return err, it will be executed + allocated, aerr := m.api.StateMinerSectorAllocated(ctx.Context(), m.maddr, sector.SectorNumber, nil) + if aerr != nil { + log.Errorf("getTicket: api error, checking if sector is allocated: %+v", aerr) + return nil, 0, false, nil } ticketEpoch := epoch - policy.SealRandomnessLookback buf := new(bytes.Buffer) if err := m.maddr.MarshalCBOR(buf); err != nil { - return nil, 0, err + return nil, 0, allocated, err } pci, err := m.api.StateSectorPreCommitInfo(ctx.Context(), m.maddr, sector.SectorNumber, tok) if err != nil { - return nil, 0, xerrors.Errorf("getting precommit info: %w", err) + return nil, 0, allocated, xerrors.Errorf("getting precommit info: %w", err) } if pci != nil { ticketEpoch = pci.Info.SealRandEpoch + + nv, err := m.api.StateNetworkVersion(ctx.Context(), tok) + if err != nil { + return nil, 0, allocated, xerrors.Errorf("getTicket: StateNetworkVersion: api error, not proceeding: %+v", err) + } + + msd := policy.GetMaxProveCommitDuration(actors.VersionForNetwork(nv), sector.SectorType) + + if checkProveCommitExpired(pci.PreCommitEpoch, msd, epoch) { + return nil, 0, allocated, xerrors.Errorf("ticket expired for precommitted sector") + } + } + + if pci == nil && allocated { // allocated is true, sector precommitted but expired, will SectorCommitFailed or SectorRemove + return nil, 0, allocated, xerrors.Errorf("sector %s precommitted but expired", sector.SectorNumber) } rand, err := m.api.ChainGetRandomnessFromTickets(ctx.Context(), tok, crypto.DomainSeparationTag_SealRandomness, ticketEpoch, buf.Bytes()) if err != nil { - return nil, 0, err + return nil, 0, allocated, err } - return abi.SealRandomness(rand), ticketEpoch, nil + return abi.SealRandomness(rand), ticketEpoch, allocated, nil } func (m *Sealing) handleGetTicket(ctx statemachine.Context, sector SectorInfo) error { - ticketValue, ticketEpoch, err := m.getTicket(ctx, sector) + ticketValue, ticketEpoch, allocated, err := m.getTicket(ctx, sector) if err != nil { - allocated, aerr := m.api.StateMinerSectorAllocated(ctx.Context(), m.maddr, sector.SectorNumber, nil) - if aerr == nil { - log.Errorf("error checking if sector is allocated: %+v", err) - } - if allocated { if sector.CommitMessage != nil { // Some recovery paths with unfortunate timing lead here @@ -127,17 +200,38 @@ func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo) } } - _, height, err := m.api.ChainHead(ctx.Context()) + tok, height, err := m.api.ChainHead(ctx.Context()) if err != nil { log.Errorf("handlePreCommit1: api error, not proceeding: %+v", err) return nil } - if height-sector.TicketEpoch > MaxTicketAge { - return ctx.Send(SectorOldTicket{}) + if checkTicketExpired(sector.TicketEpoch, height) { + pci, err := m.api.StateSectorPreCommitInfo(ctx.Context(), m.maddr, sector.SectorNumber, tok) + if err != nil { + log.Errorf("handlePreCommit1: StateSectorPreCommitInfo: api error, not proceeding: %+v", err) + return nil + } + + if pci == nil { + return ctx.Send(SectorOldTicket{}) // go get new ticket + } + + nv, err := m.api.StateNetworkVersion(ctx.Context(), tok) + if err != nil { + log.Errorf("handlePreCommit1: StateNetworkVersion: api error, not proceeding: %+v", err) + return nil + } + + msd := policy.GetMaxProveCommitDuration(actors.VersionForNetwork(nv), sector.SectorType) + + // if height > PreCommitEpoch + msd, there is no need to recalculate + if checkProveCommitExpired(pci.PreCommitEpoch, msd, height) { + return ctx.Send(SectorOldTicket{}) // will be removed + } } - pc1o, err := m.sealer.SealPreCommit1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorNumber), sector.TicketValue, sector.pieceInfos()) + pc1o, err := m.sealer.SealPreCommit1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.TicketValue, sector.pieceInfos()) if err != nil { return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("seal pre commit(1) failed: %w", err)}) } @@ -148,7 +242,7 @@ func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo) } func (m *Sealing) handlePreCommit2(ctx statemachine.Context, sector SectorInfo) error { - cids, err := m.sealer.SealPreCommit2(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorNumber), sector.PreCommit1Out) + cids, err := m.sealer.SealPreCommit2(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.PreCommit1Out) if err != nil { return ctx.Send(SectorSealPreCommit2Failed{xerrors.Errorf("seal pre commit(2) failed: %w", err)}) } @@ -171,61 +265,55 @@ func (m *Sealing) remarkForUpgrade(sid abi.SectorNumber) { } } -func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInfo) error { +func (m *Sealing) preCommitParams(ctx statemachine.Context, sector SectorInfo) (*miner.SectorPreCommitInfo, big.Int, TipSetToken, error) { tok, height, err := m.api.ChainHead(ctx.Context()) if err != nil { log.Errorf("handlePreCommitting: api error, not proceeding: %+v", err) - return nil - } - - waddr, err := m.api.StateMinerWorkerAddress(ctx.Context(), m.maddr, tok) - if err != nil { - log.Errorf("handlePreCommitting: api error, not proceeding: %+v", err) - return nil + return nil, big.Zero(), nil, nil } if err := checkPrecommit(ctx.Context(), m.Address(), sector, tok, height, m.api); err != nil { switch err := err.(type) { case *ErrApi: log.Errorf("handlePreCommitting: api error, not proceeding: %+v", err) - return nil + return nil, big.Zero(), nil, nil case *ErrBadCommD: // TODO: Should this just back to packing? (not really needed since handlePreCommit1 will do that too) - return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("bad CommD error: %w", err)}) + return nil, big.Zero(), nil, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("bad CommD error: %w", err)}) case *ErrExpiredTicket: - return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("ticket expired: %w", err)}) + return nil, big.Zero(), nil, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("ticket expired: %w", err)}) case *ErrBadTicket: - return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("bad ticket: %w", err)}) + return nil, big.Zero(), nil, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("bad ticket: %w", err)}) case *ErrInvalidDeals: log.Warnf("invalid deals in sector %d: %v", sector.SectorNumber, err) - return ctx.Send(SectorInvalidDealIDs{Return: RetPreCommitting}) + return nil, big.Zero(), nil, ctx.Send(SectorInvalidDealIDs{Return: RetPreCommitting}) case *ErrExpiredDeals: - return ctx.Send(SectorDealsExpired{xerrors.Errorf("sector deals expired: %w", err)}) + return nil, big.Zero(), nil, ctx.Send(SectorDealsExpired{xerrors.Errorf("sector deals expired: %w", err)}) case *ErrPrecommitOnChain: - return ctx.Send(SectorPreCommitLanded{TipSet: tok}) // we re-did precommit + return nil, big.Zero(), nil, ctx.Send(SectorPreCommitLanded{TipSet: tok}) // we re-did precommit case *ErrSectorNumberAllocated: log.Errorf("handlePreCommitFailed: sector number already allocated, not proceeding: %+v", err) // TODO: check if the sector is committed (not sure how we'd end up here) - return nil + return nil, big.Zero(), nil, nil default: - return xerrors.Errorf("checkPrecommit sanity check error: %w", err) + return nil, big.Zero(), nil, xerrors.Errorf("checkPrecommit sanity check error: %w", err) } } expiration, err := m.pcp.Expiration(ctx.Context(), sector.Pieces...) if err != nil { - return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("handlePreCommitting: failed to compute pre-commit expiry: %w", err)}) + return nil, big.Zero(), nil, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("handlePreCommitting: failed to compute pre-commit expiry: %w", err)}) } // Sectors must last _at least_ MinSectorExpiration + MaxSealDuration. // TODO: The "+10" allows the pre-commit to take 10 blocks to be accepted. nv, err := m.api.StateNetworkVersion(ctx.Context(), tok) if err != nil { - return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("failed to get network version: %w", err)}) + return nil, big.Zero(), nil, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("failed to get network version: %w", err)}) } msd := policy.GetMaxProveCommitDuration(actors.VersionForNetwork(nv), sector.SectorType) - if minExpiration := height + msd + miner.MinSectorExpiration + 10; expiration < minExpiration { + if minExpiration := sector.TicketEpoch + policy.MaxPreCommitRandomnessLookback + msd + miner.MinSectorExpiration; expiration < minExpiration { expiration = minExpiration } // TODO: enforce a reasonable _maximum_ sector lifetime? @@ -242,20 +330,66 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf depositMinimum := m.tryUpgradeSector(ctx.Context(), params) + collateral, err := m.api.StateMinerPreCommitDepositForPower(ctx.Context(), m.maddr, *params, tok) + if err != nil { + return nil, big.Zero(), nil, xerrors.Errorf("getting initial pledge collateral: %w", err) + } + + deposit := big.Max(depositMinimum, collateral) + + return params, deposit, tok, nil +} + +func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInfo) error { + cfg, err := m.getConfig() + if err != nil { + return xerrors.Errorf("getting config: %w", err) + } + + if cfg.BatchPreCommits { + nv, err := m.api.StateNetworkVersion(ctx.Context(), nil) + if err != nil { + return xerrors.Errorf("getting network version: %w", err) + } + + if nv >= network.Version13 { + return ctx.Send(SectorPreCommitBatch{}) + } + } + + params, pcd, tok, err := m.preCommitParams(ctx, sector) + if err != nil { + return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("preCommitParams: %w", err)}) + } + if params == nil { + return nil // event was sent in preCommitParams + } + + deposit, err := collateralSendAmount(ctx.Context(), m.api, m.maddr, cfg, pcd) + if err != nil { + return err + } + enc := new(bytes.Buffer) if err := params.MarshalCBOR(enc); err != nil { return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("could not serialize pre-commit sector parameters: %w", err)}) } - collateral, err := m.api.StateMinerPreCommitDepositForPower(ctx.Context(), m.maddr, *params, tok) + mi, err := m.api.StateMinerInfo(ctx.Context(), m.maddr, tok) if err != nil { - return xerrors.Errorf("getting initial pledge collateral: %w", err) + log.Errorf("handlePreCommitting: api error, not proceeding: %+v", err) + return nil } - deposit := big.Max(depositMinimum, collateral) + goodFunds := big.Add(deposit, big.Int(m.feeCfg.MaxPreCommitGasFee)) + + from, _, err := m.addrSel(ctx.Context(), mi, api.PreCommitAddr, goodFunds, deposit) + if err != nil { + return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("no good address to send precommit message from: %w", err)}) + } log.Infof("submitting precommit for sector %d (deposit: %s): ", sector.SectorNumber, deposit) - mcid, err := m.api.SendMsg(ctx.Context(), waddr, m.maddr, miner.Methods.PreCommitSector, deposit, m.feeCfg.MaxPreCommitGasFee, enc.Bytes()) + mcid, err := m.api.SendMsg(ctx.Context(), from, m.maddr, miner.Methods.PreCommitSector, deposit, big.Int(m.feeCfg.MaxPreCommitGasFee), enc.Bytes()) if err != nil { if params.ReplaceCapacity { m.remarkForUpgrade(params.ReplaceSectorNumber) @@ -263,7 +397,36 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("pushing message to mpool: %w", err)}) } - return ctx.Send(SectorPreCommitted{Message: mcid, PreCommitDeposit: deposit, PreCommitInfo: *params}) + return ctx.Send(SectorPreCommitted{Message: mcid, PreCommitDeposit: pcd, PreCommitInfo: *params}) +} + +func (m *Sealing) handleSubmitPreCommitBatch(ctx statemachine.Context, sector SectorInfo) error { + if sector.CommD == nil || sector.CommR == nil { + return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("sector had nil commR or commD")}) + } + + params, deposit, _, err := m.preCommitParams(ctx, sector) + if err != nil { + return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("preCommitParams: %w", err)}) + } + if params == nil { + return nil // event was sent in preCommitParams + } + + res, err := m.precommiter.AddPreCommit(ctx.Context(), sector, deposit, params) + if err != nil { + return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("queuing precommit batch failed: %w", err)}) + } + + if res.Error != "" { + return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("precommit batch error: %s", res.Error)}) + } + + if res.Msg == nil { + return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("batch message was nil")}) + } + + return ctx.Send(SectorPreCommitBatchSent{*res.Msg}) } func (m *Sealing) handlePreCommitWait(ctx statemachine.Context, sector SectorInfo) error { @@ -281,8 +444,10 @@ func (m *Sealing) handlePreCommitWait(ctx statemachine.Context, sector SectorInf switch mw.Receipt.ExitCode { case exitcode.Ok: // this is what we expect + case exitcode.SysErrInsufficientFunds: + fallthrough case exitcode.SysErrOutOfGas: - // gas estimator guessed a wrong number + // gas estimator guessed a wrong number / out of funds: return ctx.Send(SectorRetryPreCommit{}) default: log.Error("sector precommit failed: ", mw.Receipt.ExitCode) @@ -363,9 +528,14 @@ func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo) } } + cfg, err := m.getConfig() + if err != nil { + return xerrors.Errorf("getting config: %w", err) + } + log.Info("scheduling seal proof computation...") - log.Infof("KOMIT %d %x(%d); %x(%d); %v; r:%x; d:%x", sector.SectorNumber, sector.TicketValue, sector.TicketEpoch, sector.SeedValue, sector.SeedEpoch, sector.pieceInfos(), sector.CommR, sector.CommD) + log.Infof("KOMIT %d %x(%d); %x(%d); %v; r:%s; d:%s", sector.SectorNumber, sector.TicketValue, sector.TicketEpoch, sector.SeedValue, sector.SeedEpoch, sector.pieceInfos(), sector.CommR, sector.CommD) if sector.CommD == nil || sector.CommR == nil { return ctx.Send(SectorCommitFailed{xerrors.Errorf("sector had nil commR or commD")}) @@ -375,25 +545,59 @@ func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo) Unsealed: *sector.CommD, Sealed: *sector.CommR, } - c2in, err := m.sealer.SealCommit1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorNumber), sector.TicketValue, sector.SeedValue, sector.pieceInfos(), cids) + c2in, err := m.sealer.SealCommit1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.TicketValue, sector.SeedValue, sector.pieceInfos(), cids) if err != nil { return ctx.Send(SectorComputeProofFailed{xerrors.Errorf("computing seal proof failed(1): %w", err)}) } - proof, err := m.sealer.SealCommit2(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorNumber), c2in) + proof, err := m.sealer.SealCommit2(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), c2in) if err != nil { return ctx.Send(SectorComputeProofFailed{xerrors.Errorf("computing seal proof failed(2): %w", err)}) } + { + tok, _, err := m.api.ChainHead(ctx.Context()) + if err != nil { + log.Errorf("handleCommitting: api error, not proceeding: %+v", err) + return nil + } + + if err := m.checkCommit(ctx.Context(), sector, proof, tok); err != nil { + return ctx.Send(SectorComputeProofFailed{xerrors.Errorf("commit check error: %w", err)}) + } + } + + if cfg.FinalizeEarly { + return ctx.Send(SectorProofReady{ + Proof: proof, + }) + } + return ctx.Send(SectorCommitted{ Proof: proof, }) } func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo) error { + cfg, err := m.getConfig() + if err != nil { + return xerrors.Errorf("getting config: %w", err) + } + + if cfg.AggregateCommits { + nv, err := m.api.StateNetworkVersion(ctx.Context(), nil) + if err != nil { + return xerrors.Errorf("getting network version: %w", err) + } + + if nv >= network.Version13 { + return ctx.Send(SectorSubmitCommitAggregate{}) + } + } + tok, _, err := m.api.ChainHead(ctx.Context()) if err != nil { - log.Errorf("handleCommitting: api error, not proceeding: %+v", err) + log.Errorf("handleSubmitCommit: api error, not proceeding: %+v", err) return nil } @@ -411,7 +615,7 @@ func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo return ctx.Send(SectorCommitFailed{xerrors.Errorf("could not serialize commit sector parameters: %w", err)}) } - waddr, err := m.api.StateMinerWorkerAddress(ctx.Context(), m.maddr, tok) + mi, err := m.api.StateMinerInfo(ctx.Context(), m.maddr, tok) if err != nil { log.Errorf("handleCommitting: api error, not proceeding: %+v", err) return nil @@ -435,8 +639,20 @@ func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo collateral = big.Zero() } + collateral, err = collateralSendAmount(ctx.Context(), m.api, m.maddr, cfg, collateral) + if err != nil { + return err + } + + goodFunds := big.Add(collateral, big.Int(m.feeCfg.MaxCommitGasFee)) + + from, _, err := m.addrSel(ctx.Context(), mi, api.CommitAddr, goodFunds, collateral) + if err != nil { + return ctx.Send(SectorCommitFailed{xerrors.Errorf("no good address to send commit message from: %w", err)}) + } + // TODO: check seed / ticket / deals are up to date - mcid, err := m.api.SendMsg(ctx.Context(), waddr, m.maddr, miner.Methods.ProveCommitSector, collateral, m.feeCfg.MaxCommitGasFee, enc.Bytes()) + mcid, err := m.api.SendMsg(ctx.Context(), from, m.maddr, miner.Methods.ProveCommitSector, collateral, big.Int(m.feeCfg.MaxCommitGasFee), enc.Bytes()) if err != nil { return ctx.Send(SectorCommitFailed{xerrors.Errorf("pushing message to mpool: %w", err)}) } @@ -446,6 +662,51 @@ func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo }) } +func (m *Sealing) handleSubmitCommitAggregate(ctx statemachine.Context, sector SectorInfo) error { + if sector.CommD == nil || sector.CommR == nil { + return ctx.Send(SectorCommitFailed{xerrors.Errorf("sector had nil commR or commD")}) + } + + res, err := m.commiter.AddCommit(ctx.Context(), sector, AggregateInput{ + Info: proof.AggregateSealVerifyInfo{ + Number: sector.SectorNumber, + Randomness: sector.TicketValue, + InteractiveRandomness: sector.SeedValue, + SealedCID: *sector.CommR, + UnsealedCID: *sector.CommD, + }, + Proof: sector.Proof, // todo: this correct?? + Spt: sector.SectorType, + }) + if err != nil { + return ctx.Send(SectorRetrySubmitCommit{}) + } + + if res.Error != "" { + tok, _, err := m.api.ChainHead(ctx.Context()) + if err != nil { + log.Errorf("handleSubmitCommit: api error, not proceeding: %+v", err) + return nil + } + + if err := m.checkCommit(ctx.Context(), sector, sector.Proof, tok); err != nil { + return ctx.Send(SectorCommitFailed{xerrors.Errorf("commit check error: %w", err)}) + } + + return ctx.Send(SectorRetrySubmitCommit{}) + } + + if e, found := res.FailedSectors[sector.SectorNumber]; found { + return ctx.Send(SectorCommitFailed{xerrors.Errorf("sector failed in aggregate processing: %s", e)}) + } + + if res.Msg == nil { + return ctx.Send(SectorCommitFailed{xerrors.Errorf("aggregate message was nil")}) + } + + return ctx.Send(SectorCommitAggregateSent{*res.Msg}) +} + func (m *Sealing) handleCommitWait(ctx statemachine.Context, sector SectorInfo) error { if sector.CommitMessage == nil { log.Errorf("sector %d entered commit wait state without a message cid", sector.SectorNumber) @@ -460,8 +721,10 @@ func (m *Sealing) handleCommitWait(ctx statemachine.Context, sector SectorInfo) switch mw.Receipt.ExitCode { case exitcode.Ok: // this is what we expect + case exitcode.SysErrInsufficientFunds: + fallthrough case exitcode.SysErrOutOfGas: - // gas estimator guessed a wrong number + // gas estimator guessed a wrong number / out of funds return ctx.Send(SectorRetrySubmitCommit{}) default: return ctx.Send(SectorCommitFailed{xerrors.Errorf("submitting sector proof failed (exit=%d, msg=%s) (t:%x; s:%x(%d); p:%x)", mw.Receipt.ExitCode, sector.CommitMessage, sector.TicketValue, sector.SeedValue, sector.SeedEpoch, sector.Proof)}) @@ -481,23 +744,14 @@ func (m *Sealing) handleCommitWait(ctx statemachine.Context, sector SectorInfo) func (m *Sealing) handleFinalizeSector(ctx statemachine.Context, sector SectorInfo) error { // TODO: Maybe wait for some finality - if err := m.sealer.FinalizeSector(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorNumber), sector.keepUnsealedRanges(false)); err != nil { - return ctx.Send(SectorFinalizeFailed{xerrors.Errorf("finalize sector: %w", err)}) + cfg, err := m.getConfig() + if err != nil { + return xerrors.Errorf("getting sealing config: %w", err) } - return ctx.Send(SectorFinalized{}) -} - -func (m *Sealing) handleProvingSector(ctx statemachine.Context, sector SectorInfo) error { - // TODO: track sector health / expiration - log.Infof("Proving sector %d", sector.SectorNumber) - - if err := m.sealer.ReleaseUnsealed(ctx.Context(), m.minerSector(sector.SectorNumber), sector.keepUnsealedRanges(true)); err != nil { - log.Error(err) + if err := m.sealer.FinalizeSector(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.keepUnsealedRanges(false, cfg.AlwaysKeepUnsealedCopy)); err != nil { + return ctx.Send(SectorFinalizeFailed{xerrors.Errorf("finalize sector: %w", err)}) } - // TODO: Watch termination - // TODO: Auto-extend if set - - return nil + return ctx.Send(SectorFinalized{}) } diff --git a/extern/storage-sealing/stats.go b/extern/storage-sealing/stats.go index 78630c216be..2688d849405 100644 --- a/extern/storage-sealing/stats.go +++ b/extern/storage-sealing/stats.go @@ -4,12 +4,14 @@ import ( "sync" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" ) type statSectorState int const ( - sstSealing statSectorState = iota + sstStaging statSectorState = iota + sstSealing sstFailed sstProving nsst @@ -22,10 +24,14 @@ type SectorStats struct { totals [nsst]uint64 } -func (ss *SectorStats) updateSector(id abi.SectorID, st SectorState) { +func (ss *SectorStats) updateSector(cfg sealiface.Config, id abi.SectorID, st SectorState) (updateInput bool) { ss.lk.Lock() defer ss.lk.Unlock() + preSealing := ss.curSealingLocked() + preStaging := ss.curStagingLocked() + + // update totals oldst, found := ss.bySector[id] if found { ss.totals[oldst]-- @@ -34,6 +40,34 @@ func (ss *SectorStats) updateSector(id abi.SectorID, st SectorState) { sst := toStatState(st) ss.bySector[id] = sst ss.totals[sst]++ + + // check if we may need be able to process more deals + sealing := ss.curSealingLocked() + staging := ss.curStagingLocked() + + log.Debugw("sector stats", "sealing", sealing, "staging", staging) + + if cfg.MaxSealingSectorsForDeals > 0 && // max sealing deal sector limit set + preSealing >= cfg.MaxSealingSectorsForDeals && // we were over limit + sealing < cfg.MaxSealingSectorsForDeals { // and we're below the limit now + updateInput = true + } + + if cfg.MaxWaitDealsSectors > 0 && // max waiting deal sector limit set + preStaging >= cfg.MaxWaitDealsSectors && // we were over limit + staging < cfg.MaxWaitDealsSectors { // and we're below the limit now + updateInput = true + } + + return updateInput +} + +func (ss *SectorStats) curSealingLocked() uint64 { + return ss.totals[sstStaging] + ss.totals[sstSealing] + ss.totals[sstFailed] +} + +func (ss *SectorStats) curStagingLocked() uint64 { + return ss.totals[sstStaging] } // return the number of sectors currently in the sealing pipeline @@ -41,5 +75,13 @@ func (ss *SectorStats) curSealing() uint64 { ss.lk.Lock() defer ss.lk.Unlock() - return ss.totals[sstSealing] + ss.totals[sstFailed] + return ss.curSealingLocked() +} + +// return the number of sectors waiting to enter the sealing pipeline +func (ss *SectorStats) curStaging() uint64 { + ss.lk.Lock() + defer ss.lk.Unlock() + + return ss.curStagingLocked() } diff --git a/extern/storage-sealing/terminate_batch.go b/extern/storage-sealing/terminate_batch.go new file mode 100644 index 00000000000..13fa281c3ee --- /dev/null +++ b/extern/storage-sealing/terminate_batch.go @@ -0,0 +1,367 @@ +package sealing + +import ( + "bytes" + "context" + "sort" + "sync" + "time" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/dline" + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/node/config" +) + +type TerminateBatcherApi interface { + StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok TipSetToken) (*SectorLocation, error) + SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error) + StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error) + StateMinerProvingDeadline(context.Context, address.Address, TipSetToken) (*dline.Info, error) + StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tok TipSetToken) ([]api.Partition, error) +} + +type TerminateBatcher struct { + api TerminateBatcherApi + maddr address.Address + mctx context.Context + addrSel AddrSel + feeCfg config.MinerFeeConfig + getConfig GetSealingConfigFunc + + todo map[SectorLocation]*bitfield.BitField // MinerSectorLocation -> BitField + + waiting map[abi.SectorNumber][]chan cid.Cid + + notify, stop, stopped chan struct{} + force chan chan *cid.Cid + lk sync.Mutex +} + +func NewTerminationBatcher(mctx context.Context, maddr address.Address, api TerminateBatcherApi, addrSel AddrSel, feeCfg config.MinerFeeConfig, getConfig GetSealingConfigFunc) *TerminateBatcher { + b := &TerminateBatcher{ + api: api, + maddr: maddr, + mctx: mctx, + addrSel: addrSel, + feeCfg: feeCfg, + getConfig: getConfig, + + todo: map[SectorLocation]*bitfield.BitField{}, + waiting: map[abi.SectorNumber][]chan cid.Cid{}, + + notify: make(chan struct{}, 1), + force: make(chan chan *cid.Cid), + stop: make(chan struct{}), + stopped: make(chan struct{}), + } + + go b.run() + + return b +} + +func (b *TerminateBatcher) run() { + var forceRes chan *cid.Cid + var lastMsg *cid.Cid + + for { + if forceRes != nil { + forceRes <- lastMsg + forceRes = nil + } + lastMsg = nil + + cfg, err := b.getConfig() + if err != nil { + log.Warnw("TerminateBatcher getconfig error", "error", err) + } + + var sendAboveMax, sendAboveMin bool + select { + case <-b.stop: + close(b.stopped) + return + case <-b.notify: + sendAboveMax = true + case <-time.After(cfg.TerminateBatchWait): + sendAboveMin = true + case fr := <-b.force: // user triggered + forceRes = fr + } + + lastMsg, err = b.processBatch(sendAboveMax, sendAboveMin) + if err != nil { + log.Warnw("TerminateBatcher processBatch error", "error", err) + } + } +} + +func (b *TerminateBatcher) processBatch(notif, after bool) (*cid.Cid, error) { + dl, err := b.api.StateMinerProvingDeadline(b.mctx, b.maddr, nil) + if err != nil { + return nil, xerrors.Errorf("getting proving deadline info failed: %w", err) + } + + cfg, err := b.getConfig() + if err != nil { + return nil, xerrors.Errorf("getting sealing config: %W", err) + } + + b.lk.Lock() + defer b.lk.Unlock() + params := miner2.TerminateSectorsParams{} + + var total uint64 + for loc, sectors := range b.todo { + n, err := sectors.Count() + if err != nil { + log.Errorw("TerminateBatcher: failed to count sectors to terminate", "deadline", loc.Deadline, "partition", loc.Partition, "error", err) + continue + } + + // don't send terminations for currently challenged sectors + if loc.Deadline == (dl.Index+1)%miner.WPoStPeriodDeadlines || // not in next (in case the terminate message takes a while to get on chain) + loc.Deadline == dl.Index || // not in current + (loc.Deadline+1)%miner.WPoStPeriodDeadlines == dl.Index { // not in previous + continue + } + + if n < 1 { + log.Warnw("TerminateBatcher: zero sectors in bucket", "deadline", loc.Deadline, "partition", loc.Partition) + continue + } + + toTerminate, err := sectors.Copy() + if err != nil { + log.Warnw("TerminateBatcher: copy sectors bitfield", "deadline", loc.Deadline, "partition", loc.Partition, "error", err) + continue + } + + ps, err := b.api.StateMinerPartitions(b.mctx, b.maddr, loc.Deadline, nil) + if err != nil { + log.Warnw("TerminateBatcher: getting miner partitions", "deadline", loc.Deadline, "partition", loc.Partition, "error", err) + continue + } + + toTerminate, err = bitfield.IntersectBitField(ps[loc.Partition].LiveSectors, toTerminate) + if err != nil { + log.Warnw("TerminateBatcher: intersecting liveSectors and toTerminate bitfields", "deadline", loc.Deadline, "partition", loc.Partition, "error", err) + continue + } + + if total+n > uint64(miner.AddressedSectorsMax) { + n = uint64(miner.AddressedSectorsMax) - total + + toTerminate, err = toTerminate.Slice(0, n) + if err != nil { + log.Warnw("TerminateBatcher: slice toTerminate bitfield", "deadline", loc.Deadline, "partition", loc.Partition, "error", err) + continue + } + + s, err := bitfield.SubtractBitField(*sectors, toTerminate) + if err != nil { + log.Warnw("TerminateBatcher: sectors-toTerminate", "deadline", loc.Deadline, "partition", loc.Partition, "error", err) + continue + } + *sectors = s + } + + total += n + + params.Terminations = append(params.Terminations, miner2.TerminationDeclaration{ + Deadline: loc.Deadline, + Partition: loc.Partition, + Sectors: toTerminate, + }) + + if total >= uint64(miner.AddressedSectorsMax) || total >= cfg.TerminateBatchMax { + break + } + + if len(params.Terminations) >= miner.DeclarationsMax { + break + } + } + + if len(params.Terminations) == 0 { + return nil, nil // nothing to do + } + + if notif && total < cfg.TerminateBatchMax { + return nil, nil + } + + if after && total < cfg.TerminateBatchMin { + return nil, nil + } + + enc := new(bytes.Buffer) + if err := params.MarshalCBOR(enc); err != nil { + return nil, xerrors.Errorf("couldn't serialize TerminateSectors params: %w", err) + } + + mi, err := b.api.StateMinerInfo(b.mctx, b.maddr, nil) + if err != nil { + return nil, xerrors.Errorf("couldn't get miner info: %w", err) + } + + from, _, err := b.addrSel(b.mctx, mi, api.TerminateSectorsAddr, big.Int(b.feeCfg.MaxTerminateGasFee), big.Int(b.feeCfg.MaxTerminateGasFee)) + if err != nil { + return nil, xerrors.Errorf("no good address found: %w", err) + } + + mcid, err := b.api.SendMsg(b.mctx, from, b.maddr, miner.Methods.TerminateSectors, big.Zero(), big.Int(b.feeCfg.MaxTerminateGasFee), enc.Bytes()) + if err != nil { + return nil, xerrors.Errorf("sending message failed: %w", err) + } + log.Infow("Sent TerminateSectors message", "cid", mcid, "from", from, "terminations", len(params.Terminations)) + + for _, t := range params.Terminations { + delete(b.todo, SectorLocation{ + Deadline: t.Deadline, + Partition: t.Partition, + }) + + err := t.Sectors.ForEach(func(sn uint64) error { + for _, ch := range b.waiting[abi.SectorNumber(sn)] { + ch <- mcid // buffered + } + delete(b.waiting, abi.SectorNumber(sn)) + + return nil + }) + if err != nil { + return nil, xerrors.Errorf("sectors foreach: %w", err) + } + } + + return &mcid, nil +} + +// register termination, wait for batch message, return message CID +// can return cid.Undef,true if the sector is already terminated on-chain +func (b *TerminateBatcher) AddTermination(ctx context.Context, s abi.SectorID) (mcid cid.Cid, terminated bool, err error) { + maddr, err := address.NewIDAddress(uint64(s.Miner)) + if err != nil { + return cid.Undef, false, err + } + + loc, err := b.api.StateSectorPartition(ctx, maddr, s.Number, nil) + if err != nil { + return cid.Undef, false, xerrors.Errorf("getting sector location: %w", err) + } + if loc == nil { + return cid.Undef, false, xerrors.New("sector location not found") + } + + { + // check if maybe already terminated + parts, err := b.api.StateMinerPartitions(ctx, maddr, loc.Deadline, nil) + if err != nil { + return cid.Cid{}, false, xerrors.Errorf("getting partitions: %w", err) + } + live, err := parts[loc.Partition].LiveSectors.IsSet(uint64(s.Number)) + if err != nil { + return cid.Cid{}, false, xerrors.Errorf("checking if sector is in live set: %w", err) + } + if !live { + // already terminated + return cid.Undef, true, nil + } + } + + b.lk.Lock() + bf, ok := b.todo[*loc] + if !ok { + n := bitfield.New() + bf = &n + b.todo[*loc] = bf + } + bf.Set(uint64(s.Number)) + + sent := make(chan cid.Cid, 1) + b.waiting[s.Number] = append(b.waiting[s.Number], sent) + + select { + case b.notify <- struct{}{}: + default: // already have a pending notification, don't need more + } + b.lk.Unlock() + + select { + case c := <-sent: + return c, false, nil + case <-ctx.Done(): + return cid.Undef, false, ctx.Err() + } +} + +func (b *TerminateBatcher) Flush(ctx context.Context) (*cid.Cid, error) { + resCh := make(chan *cid.Cid, 1) + select { + case b.force <- resCh: + select { + case res := <-resCh: + return res, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +func (b *TerminateBatcher) Pending(ctx context.Context) ([]abi.SectorID, error) { + b.lk.Lock() + defer b.lk.Unlock() + + mid, err := address.IDFromAddress(b.maddr) + if err != nil { + return nil, err + } + + res := make([]abi.SectorID, 0) + for _, bf := range b.todo { + err := bf.ForEach(func(id uint64) error { + res = append(res, abi.SectorID{ + Miner: abi.ActorID(mid), + Number: abi.SectorNumber(id), + }) + return nil + }) + if err != nil { + return nil, err + } + } + + sort.Slice(res, func(i, j int) bool { + if res[i].Miner != res[j].Miner { + return res[i].Miner < res[j].Miner + } + + return res[i].Number < res[j].Number + }) + + return res, nil +} + +func (b *TerminateBatcher) Stop(ctx context.Context) error { + close(b.stop) + + select { + case <-b.stopped: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} diff --git a/extern/storage-sealing/types.go b/extern/storage-sealing/types.go index 8f3e82a0b76..c5aed505a65 100644 --- a/extern/storage-sealing/types.go +++ b/extern/storage-sealing/types.go @@ -11,6 +11,7 @@ import ( "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/specs-storage/storage" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" @@ -19,29 +20,13 @@ import ( // Piece is a tuple of piece and deal info type PieceWithDealInfo struct { Piece abi.PieceInfo - DealInfo DealInfo + DealInfo api.PieceDealInfo } // Piece is a tuple of piece info and optional deal type Piece struct { Piece abi.PieceInfo - DealInfo *DealInfo // nil for pieces which do not appear in deals (e.g. filler pieces) -} - -// DealInfo is a tuple of deal identity and its schedule -type DealInfo struct { - PublishCid *cid.Cid - DealID abi.DealID - DealSchedule DealSchedule - KeepUnsealed bool -} - -// DealSchedule communicates the time interval of a storage deal. The deal must -// appear in a sealed (proven) sector no later than StartEpoch, otherwise it -// is invalid. -type DealSchedule struct { - StartEpoch abi.ChainEpoch - EndEpoch abi.ChainEpoch + DealInfo *api.PieceDealInfo // nil for pieces which do not appear in deals (e.g. filler pieces) } type Log struct { @@ -70,7 +55,8 @@ type SectorInfo struct { SectorType abi.RegisteredSealProof // Packing - Pieces []Piece + CreationTime int64 // unix seconds + Pieces []Piece // PreCommit1 TicketValue abi.SealRandomness @@ -103,6 +89,10 @@ type SectorInfo struct { // Recovery Return ReturnState + // Termination + TerminateMessage *cid.Cid + TerminatedAt abi.ChainEpoch + // Debug LastErr string @@ -159,7 +149,7 @@ func (t *SectorInfo) sealingCtx(ctx context.Context) context.Context { // Returns list of offset/length tuples of sector data ranges which clients // requested to keep unsealed -func (t *SectorInfo) keepUnsealedRanges(invert bool) []storage.Range { +func (t *SectorInfo) keepUnsealedRanges(invert, alwaysKeep bool) []storage.Range { var out []storage.Range var at abi.UnpaddedPieceSize @@ -170,7 +160,10 @@ func (t *SectorInfo) keepUnsealedRanges(invert bool) []storage.Range { if piece.DealInfo == nil { continue } - if piece.DealInfo.KeepUnsealed == invert { + + keep := piece.DealInfo.KeepUnsealed || alwaysKeep + + if keep == invert { continue } diff --git a/extern/storage-sealing/types_test.go b/extern/storage-sealing/types_test.go index 0b3c9703212..68e2b1111c8 100644 --- a/extern/storage-sealing/types_test.go +++ b/extern/storage-sealing/types_test.go @@ -4,26 +4,42 @@ import ( "bytes" "testing" + "github.com/ipfs/go-cid" + "gotest.tools/assert" cborutil "github.com/filecoin-project/go-cbor-util" "github.com/filecoin-project/go-state-types/abi" - builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + api "github.com/filecoin-project/lotus/api" + market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" + tutils "github.com/filecoin-project/specs-actors/v2/support/testing" ) -func TestSectorInfoSelialization(t *testing.T) { +func TestSectorInfoSerialization(t *testing.T) { d := abi.DealID(1234) - dealInfo := DealInfo{ + dummyCid, err := cid.Parse("bafkqaaa") + if err != nil { + t.Fatal(err) + } + + dealInfo := api.PieceDealInfo{ DealID: d, - DealSchedule: DealSchedule{ + DealSchedule: api.DealSchedule{ StartEpoch: 0, EndEpoch: 100, }, + DealProposal: &market2.DealProposal{ + PieceCID: dummyCid, + PieceSize: 5, + Client: tutils.NewActorAddr(t, "client"), + Provider: tutils.NewActorAddr(t, "provider"), + StoragePricePerEpoch: abi.NewTokenAmount(10), + ProviderCollateral: abi.NewTokenAmount(20), + ClientCollateral: abi.NewTokenAmount(15), + }, } - dummyCid := builtin2.AccountActorCodeID - si := &SectorInfo{ State: "stateful", SectorNumber: 234, @@ -53,18 +69,18 @@ func TestSectorInfoSelialization(t *testing.T) { } var si2 SectorInfo - if err := cborutil.ReadCborRPC(bytes.NewReader(b), &si); err != nil { + if err := cborutil.ReadCborRPC(bytes.NewReader(b), &si2); err != nil { + t.Fatal(err) return } assert.Equal(t, si.State, si2.State) assert.Equal(t, si.SectorNumber, si2.SectorNumber) - assert.Equal(t, si.Pieces, si2.Pieces) - assert.Equal(t, si.CommD, si2.CommD) - assert.Equal(t, si.TicketValue, si2.TicketValue) + assert.Equal(t, si.Pieces[0].DealInfo.DealID, si2.Pieces[0].DealInfo.DealID) + assert.Equal(t, si.Pieces[0].DealInfo.DealProposal.PieceCID, si2.Pieces[0].DealInfo.DealProposal.PieceCID) + assert.Equal(t, *si.CommD, *si2.CommD) + assert.DeepEqual(t, si.TicketValue, si2.TicketValue) + assert.Equal(t, si.TicketEpoch, si2.TicketEpoch) assert.Equal(t, si.TicketEpoch, si2.TicketEpoch) - - assert.Equal(t, si, si2) - } diff --git a/extern/storage-sealing/utils.go b/extern/storage-sealing/utils.go index dadef227d66..3dc4c4d1ea3 100644 --- a/extern/storage-sealing/utils.go +++ b/extern/storage-sealing/utils.go @@ -1,9 +1,16 @@ package sealing import ( + "context" "math/bits" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" ) func fillersFromRem(in abi.UnpaddedPieceSize) ([]abi.UnpaddedPieceSize, error) { @@ -55,3 +62,30 @@ func (m *Sealing) GetSectorInfo(sid abi.SectorNumber) (SectorInfo, error) { err := m.sectors.Get(uint64(sid)).Get(&out) return out, err } + +func collateralSendAmount(ctx context.Context, api interface { + StateMinerAvailableBalance(context.Context, address.Address, TipSetToken) (big.Int, error) +}, maddr address.Address, cfg sealiface.Config, collateral abi.TokenAmount) (abi.TokenAmount, error) { + if cfg.CollateralFromMinerBalance { + if cfg.DisableCollateralFallback { + return big.Zero(), nil + } + + avail, err := api.StateMinerAvailableBalance(ctx, maddr, nil) + if err != nil { + return big.Zero(), xerrors.Errorf("getting available miner balance: %w", err) + } + + avail = big.Sub(avail, cfg.AvailableBalanceBuffer) + if avail.LessThan(big.Zero()) { + avail = big.Zero() + } + + collateral = big.Sub(collateral, avail) + if collateral.LessThan(big.Zero()) { + collateral = big.Zero() + } + } + + return collateral, nil +} diff --git a/gateway/handler.go b/gateway/handler.go new file mode 100644 index 00000000000..3273c66db8b --- /dev/null +++ b/gateway/handler.go @@ -0,0 +1,48 @@ +package gateway + +import ( + "net/http" + + "contrib.go.opencensus.io/exporter/prometheus" + "github.com/filecoin-project/go-jsonrpc" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v0api" + "github.com/filecoin-project/lotus/api/v1api" + "github.com/filecoin-project/lotus/metrics" + "github.com/gorilla/mux" + promclient "github.com/prometheus/client_golang/prometheus" +) + +// Handler returns a gateway http.Handler, to be mounted as-is on the server. +func Handler(a api.Gateway, opts ...jsonrpc.ServerOption) (http.Handler, error) { + m := mux.NewRouter() + + serveRpc := func(path string, hnd interface{}) { + rpcServer := jsonrpc.NewServer(opts...) + rpcServer.Register("Filecoin", hnd) + m.Handle(path, rpcServer) + } + + ma := metrics.MetricedGatewayAPI(a) + + serveRpc("/rpc/v1", ma) + serveRpc("/rpc/v0", api.Wrap(new(v1api.FullNodeStruct), new(v0api.WrapperV1Full), ma)) + + registry := promclient.DefaultRegisterer.(*promclient.Registry) + exporter, err := prometheus.NewExporter(prometheus.Options{ + Registry: registry, + Namespace: "lotus_gw", + }) + if err != nil { + return nil, err + } + m.Handle("/debug/metrics", exporter) + m.PathPrefix("/").Handler(http.DefaultServeMux) + + /*ah := &auth.Handler{ + Verify: nodeApi.AuthVerify, + Next: mux.ServeHTTP, + }*/ + + return m, nil +} diff --git a/gateway/node.go b/gateway/node.go new file mode 100644 index 00000000000..3c7a67196a0 --- /dev/null +++ b/gateway/node.go @@ -0,0 +1,424 @@ +package gateway + +import ( + "context" + "fmt" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/lib/sigs" + _ "github.com/filecoin-project/lotus/lib/sigs/bls" + _ "github.com/filecoin-project/lotus/lib/sigs/secp" + "github.com/filecoin-project/lotus/node/impl/full" + "github.com/ipfs/go-cid" +) + +const ( + DefaultLookbackCap = time.Hour * 24 + DefaultStateWaitLookbackLimit = abi.ChainEpoch(20) +) + +// TargetAPI defines the API methods that the Node depends on +// (to make it easy to mock for tests) +type TargetAPI interface { + Version(context.Context) (api.APIVersion, error) + ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error) + ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) + ChainGetNode(ctx context.Context, p string) (*api.IpldObject, error) + ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) + ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) + ChainHasObj(context.Context, cid.Cid) (bool, error) + ChainHead(ctx context.Context) (*types.TipSet, error) + ChainNotify(context.Context) (<-chan []*api.HeadChange, error) + ChainReadObj(context.Context, cid.Cid) ([]byte, error) + GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) + MpoolPushUntrusted(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) + MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) + MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) + MsigGetPending(ctx context.Context, addr address.Address, ts types.TipSetKey) ([]*api.MsigTransaction, error) + StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) + StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) + StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) + StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) + StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) + StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) + StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) + StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error) + StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) + StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) + StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*api.ActorState, error) + StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) + StateMinerFaults(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) + StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) + StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error) + StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]api.Deadline, error) + StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) + StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) + StateCirculatingSupply(context.Context, types.TipSetKey) (abi.TokenAmount, error) + StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) + StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) + StateVMCirculatingSupplyInternal(context.Context, types.TipSetKey) (api.CirculatingSupply, error) + WalletBalance(context.Context, address.Address) (types.BigInt, error) //perm:read +} + +var _ TargetAPI = *new(api.FullNode) // gateway depends on latest + +type Node struct { + target TargetAPI + lookbackCap time.Duration + stateWaitLookbackLimit abi.ChainEpoch + errLookback error +} + +var ( + _ api.Gateway = (*Node)(nil) + _ full.ChainModuleAPI = (*Node)(nil) + _ full.GasModuleAPI = (*Node)(nil) + _ full.MpoolModuleAPI = (*Node)(nil) + _ full.StateModuleAPI = (*Node)(nil) +) + +// NewNode creates a new gateway node. +func NewNode(api TargetAPI, lookbackCap time.Duration, stateWaitLookbackLimit abi.ChainEpoch) *Node { + return &Node{ + target: api, + lookbackCap: lookbackCap, + stateWaitLookbackLimit: stateWaitLookbackLimit, + errLookback: fmt.Errorf("lookbacks of more than %s are disallowed", lookbackCap), + } +} + +func (gw *Node) checkTipsetKey(ctx context.Context, tsk types.TipSetKey) error { + if tsk.IsEmpty() { + return nil + } + + ts, err := gw.target.ChainGetTipSet(ctx, tsk) + if err != nil { + return err + } + + return gw.checkTipset(ts) +} + +func (gw *Node) checkTipset(ts *types.TipSet) error { + at := time.Unix(int64(ts.Blocks()[0].Timestamp), 0) + if err := gw.checkTimestamp(at); err != nil { + return fmt.Errorf("bad tipset: %w", err) + } + return nil +} + +func (gw *Node) checkTipsetHeight(ts *types.TipSet, h abi.ChainEpoch) error { + tsBlock := ts.Blocks()[0] + heightDelta := time.Duration(uint64(tsBlock.Height-h)*build.BlockDelaySecs) * time.Second + timeAtHeight := time.Unix(int64(tsBlock.Timestamp), 0).Add(-heightDelta) + + if err := gw.checkTimestamp(timeAtHeight); err != nil { + return fmt.Errorf("bad tipset height: %w", err) + } + return nil +} + +func (gw *Node) checkTimestamp(at time.Time) error { + if time.Since(at) > gw.lookbackCap { + return gw.errLookback + } + return nil +} + +func (gw *Node) Version(ctx context.Context) (api.APIVersion, error) { + return gw.target.Version(ctx) +} + +func (gw *Node) ChainGetBlockMessages(ctx context.Context, c cid.Cid) (*api.BlockMessages, error) { + return gw.target.ChainGetBlockMessages(ctx, c) +} + +func (gw *Node) ChainHasObj(ctx context.Context, c cid.Cid) (bool, error) { + return gw.target.ChainHasObj(ctx, c) +} + +func (gw *Node) ChainHead(ctx context.Context) (*types.TipSet, error) { + // TODO: cache and invalidate cache when timestamp is up (or have internal ChainNotify) + + return gw.target.ChainHead(ctx) +} + +func (gw *Node) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) { + return gw.target.ChainGetMessage(ctx, mc) +} + +func (gw *Node) ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { + return gw.target.ChainGetTipSet(ctx, tsk) +} + +func (gw *Node) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) { + var ts *types.TipSet + if tsk.IsEmpty() { + head, err := gw.target.ChainHead(ctx) + if err != nil { + return nil, err + } + ts = head + } else { + gts, err := gw.target.ChainGetTipSet(ctx, tsk) + if err != nil { + return nil, err + } + ts = gts + } + + // Check if the tipset key refers to gw tipset that's too far in the past + if err := gw.checkTipset(ts); err != nil { + return nil, err + } + + // Check if the height is too far in the past + if err := gw.checkTipsetHeight(ts, h); err != nil { + return nil, err + } + + return gw.target.ChainGetTipSetByHeight(ctx, h, tsk) +} + +func (gw *Node) ChainGetNode(ctx context.Context, p string) (*api.IpldObject, error) { + return gw.target.ChainGetNode(ctx, p) +} + +func (gw *Node) ChainNotify(ctx context.Context) (<-chan []*api.HeadChange, error) { + return gw.target.ChainNotify(ctx) +} + +func (gw *Node) ChainReadObj(ctx context.Context, c cid.Cid) ([]byte, error) { + return gw.target.ChainReadObj(ctx, c) +} + +func (gw *Node) GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return nil, err + } + + return gw.target.GasEstimateMessageGas(ctx, msg, spec, tsk) +} + +func (gw *Node) MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) { + // TODO: additional anti-spam checks + return gw.target.MpoolPushUntrusted(ctx, sm) +} + +func (gw *Node) MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return types.NewInt(0), err + } + + return gw.target.MsigGetAvailableBalance(ctx, addr, tsk) +} + +func (gw *Node) MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) { + if err := gw.checkTipsetKey(ctx, start); err != nil { + return types.NewInt(0), err + } + if err := gw.checkTipsetKey(ctx, end); err != nil { + return types.NewInt(0), err + } + + return gw.target.MsigGetVested(ctx, addr, start, end) +} + +func (gw *Node) MsigGetPending(ctx context.Context, addr address.Address, tsk types.TipSetKey) ([]*api.MsigTransaction, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return nil, err + } + + return gw.target.MsigGetPending(ctx, addr, tsk) +} + +func (gw *Node) StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return address.Undef, err + } + + return gw.target.StateAccountKey(ctx, addr, tsk) +} + +func (gw *Node) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return api.DealCollateralBounds{}, err + } + + return gw.target.StateDealProviderCollateralBounds(ctx, size, verified, tsk) +} + +func (gw *Node) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return nil, err + } + + return gw.target.StateGetActor(ctx, actor, tsk) +} + +func (gw *Node) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return nil, err + } + + return gw.target.StateListMiners(ctx, tsk) +} + +func (gw *Node) StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return address.Undef, err + } + + return gw.target.StateLookupID(ctx, addr, tsk) +} + +func (gw *Node) StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return api.MarketBalance{}, err + } + + return gw.target.StateMarketBalance(ctx, addr, tsk) +} + +func (gw *Node) StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return nil, err + } + + return gw.target.StateMarketStorageDeal(ctx, dealId, tsk) +} + +func (gw *Node) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (network.Version, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return network.VersionMax, err + } + + return gw.target.StateNetworkVersion(ctx, tsk) +} + +func (gw *Node) StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) { + if limit == api.LookbackNoLimit { + limit = gw.stateWaitLookbackLimit + } + if gw.stateWaitLookbackLimit != api.LookbackNoLimit && limit > gw.stateWaitLookbackLimit { + limit = gw.stateWaitLookbackLimit + } + if err := gw.checkTipsetKey(ctx, from); err != nil { + return nil, err + } + + return gw.target.StateSearchMsg(ctx, from, msg, limit, allowReplaced) +} + +func (gw *Node) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) { + if limit == api.LookbackNoLimit { + limit = gw.stateWaitLookbackLimit + } + if gw.stateWaitLookbackLimit != api.LookbackNoLimit && limit > gw.stateWaitLookbackLimit { + limit = gw.stateWaitLookbackLimit + } + + return gw.target.StateWaitMsg(ctx, msg, confidence, limit, allowReplaced) +} + +func (gw *Node) StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*api.ActorState, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return nil, err + } + return gw.target.StateReadState(ctx, actor, tsk) +} + +func (gw *Node) StateMinerPower(ctx context.Context, m address.Address, tsk types.TipSetKey) (*api.MinerPower, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return nil, err + } + return gw.target.StateMinerPower(ctx, m, tsk) +} + +func (gw *Node) StateMinerFaults(ctx context.Context, m address.Address, tsk types.TipSetKey) (bitfield.BitField, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return bitfield.BitField{}, err + } + return gw.target.StateMinerFaults(ctx, m, tsk) +} +func (gw *Node) StateMinerRecoveries(ctx context.Context, m address.Address, tsk types.TipSetKey) (bitfield.BitField, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return bitfield.BitField{}, err + } + return gw.target.StateMinerRecoveries(ctx, m, tsk) +} + +func (gw *Node) StateMinerInfo(ctx context.Context, m address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return miner.MinerInfo{}, err + } + return gw.target.StateMinerInfo(ctx, m, tsk) +} + +func (gw *Node) StateMinerDeadlines(ctx context.Context, m address.Address, tsk types.TipSetKey) ([]api.Deadline, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return nil, err + } + return gw.target.StateMinerDeadlines(ctx, m, tsk) +} + +func (gw *Node) StateMinerAvailableBalance(ctx context.Context, m address.Address, tsk types.TipSetKey) (types.BigInt, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return types.BigInt{}, err + } + return gw.target.StateMinerAvailableBalance(ctx, m, tsk) +} + +func (gw *Node) StateMinerProvingDeadline(ctx context.Context, m address.Address, tsk types.TipSetKey) (*dline.Info, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return nil, err + } + return gw.target.StateMinerProvingDeadline(ctx, m, tsk) +} + +func (gw *Node) StateCirculatingSupply(ctx context.Context, tsk types.TipSetKey) (abi.TokenAmount, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return types.BigInt{}, err + } + return gw.target.StateCirculatingSupply(ctx, tsk) +} + +func (gw *Node) StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return nil, err + } + return gw.target.StateSectorGetInfo(ctx, maddr, n, tsk) +} + +func (gw *Node) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return nil, err + } + return gw.target.StateVerifiedClientStatus(ctx, addr, tsk) +} + +func (gw *Node) StateVMCirculatingSupplyInternal(ctx context.Context, tsk types.TipSetKey) (api.CirculatingSupply, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return api.CirculatingSupply{}, err + } + return gw.target.StateVMCirculatingSupplyInternal(ctx, tsk) +} + +func (gw *Node) WalletVerify(ctx context.Context, k address.Address, msg []byte, sig *crypto.Signature) (bool, error) { + return sigs.Verify(sig, k, msg) == nil, nil +} + +func (gw *Node) WalletBalance(ctx context.Context, k address.Address) (types.BigInt, error) { + return gw.target.WalletBalance(ctx, k) +} diff --git a/cmd/lotus-gateway/api_test.go b/gateway/node_test.go similarity index 91% rename from cmd/lotus-gateway/api_test.go rename to gateway/node_test.go index 23d2cbf3afa..68711cca688 100644 --- a/cmd/lotus-gateway/api_test.go +++ b/gateway/node_test.go @@ -1,4 +1,4 @@ -package main +package gateway import ( "context" @@ -6,26 +6,24 @@ import ( "testing" "time" - "github.com/filecoin-project/go-state-types/network" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - - "github.com/filecoin-project/lotus/build" - + "github.com/ipfs/go-cid" "github.com/stretchr/testify/require" - "github.com/filecoin-project/lotus/chain/types/mock" - "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" - "github.com/ipfs/go-cid" + "github.com/filecoin-project/lotus/chain/types/mock" ) func TestGatewayAPIChainGetTipSetByHeight(t *testing.T) { ctx := context.Background() - lookbackTimestamp := uint64(time.Now().Unix()) - uint64(LookbackCap.Seconds()) + lookbackTimestamp := uint64(time.Now().Unix()) - uint64(DefaultLookbackCap.Seconds()) type args struct { h abi.ChainEpoch tskh abi.ChainEpoch @@ -91,7 +89,7 @@ func TestGatewayAPIChainGetTipSetByHeight(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { mock := &mockGatewayDepsAPI{} - a := NewGatewayAPI(mock) + a := NewNode(mock, DefaultLookbackCap, DefaultStateWaitLookbackLimit) // Create tipsets from genesis up to tskh and return the highest ts := mock.createTipSets(tt.args.tskh, tt.args.genesisTS) @@ -111,7 +109,7 @@ type mockGatewayDepsAPI struct { lk sync.RWMutex tipsets []*types.TipSet - gatewayDepsAPI // satisfies all interface requirements but will panic if + TargetAPI // satisfies all interface requirements but will panic if // methods are called. easier than filling out with panic stubs IMO } @@ -235,3 +233,19 @@ func (m *mockGatewayDepsAPI) StateWaitMsgLimited(ctx context.Context, msg cid.Ci func (m *mockGatewayDepsAPI) StateReadState(ctx context.Context, act address.Address, ts types.TipSetKey) (*api.ActorState, error) { panic("implement me") } + +func (m *mockGatewayDepsAPI) Version(context.Context) (api.APIVersion, error) { + return api.APIVersion{ + APIVersion: api.FullAPIVersion1, + }, nil +} + +func TestGatewayVersion(t *testing.T) { + ctx := context.Background() + mock := &mockGatewayDepsAPI{} + a := NewNode(mock, DefaultLookbackCap, DefaultStateWaitLookbackLimit) + + v, err := a.Version(ctx) + require.NoError(t, err) + require.Equal(t, api.FullAPIVersion1, v.APIVersion) +} diff --git a/gen/api/proxygen.go b/gen/api/proxygen.go new file mode 100644 index 00000000000..71c2f414dd8 --- /dev/null +++ b/gen/api/proxygen.go @@ -0,0 +1,345 @@ +package main + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "io" + "os" + "path/filepath" + "strings" + "text/template" + "unicode" + + "golang.org/x/xerrors" +) + +type methodMeta struct { + node ast.Node + ftype *ast.FuncType +} + +type Visitor struct { + Methods map[string]map[string]*methodMeta + Include map[string][]string +} + +func (v *Visitor) Visit(node ast.Node) ast.Visitor { + st, ok := node.(*ast.TypeSpec) + if !ok { + return v + } + + iface, ok := st.Type.(*ast.InterfaceType) + if !ok { + return v + } + if v.Methods[st.Name.Name] == nil { + v.Methods[st.Name.Name] = map[string]*methodMeta{} + } + for _, m := range iface.Methods.List { + switch ft := m.Type.(type) { + case *ast.Ident: + v.Include[st.Name.Name] = append(v.Include[st.Name.Name], ft.Name) + case *ast.FuncType: + v.Methods[st.Name.Name][m.Names[0].Name] = &methodMeta{ + node: m, + ftype: ft, + } + } + } + + return v +} + +func main() { + // latest (v1) + if err := generate("./api", "api", "api", "./api/proxy_gen.go"); err != nil { + fmt.Println("error: ", err) + } + + // v0 + if err := generate("./api/v0api", "v0api", "v0api", "./api/v0api/proxy_gen.go"); err != nil { + fmt.Println("error: ", err) + } +} + +func typeName(e ast.Expr, pkg string) (string, error) { + switch t := e.(type) { + case *ast.SelectorExpr: + return t.X.(*ast.Ident).Name + "." + t.Sel.Name, nil + case *ast.Ident: + pstr := t.Name + if !unicode.IsLower(rune(pstr[0])) && pkg != "api" { + pstr = "api." + pstr // todo src pkg name + } + return pstr, nil + case *ast.ArrayType: + subt, err := typeName(t.Elt, pkg) + if err != nil { + return "", err + } + return "[]" + subt, nil + case *ast.StarExpr: + subt, err := typeName(t.X, pkg) + if err != nil { + return "", err + } + return "*" + subt, nil + case *ast.MapType: + k, err := typeName(t.Key, pkg) + if err != nil { + return "", err + } + v, err := typeName(t.Value, pkg) + if err != nil { + return "", err + } + return "map[" + k + "]" + v, nil + case *ast.StructType: + if len(t.Fields.List) != 0 { + return "", xerrors.Errorf("can't struct") + } + return "struct{}", nil + case *ast.InterfaceType: + if len(t.Methods.List) != 0 { + return "", xerrors.Errorf("can't interface") + } + return "interface{}", nil + case *ast.ChanType: + subt, err := typeName(t.Value, pkg) + if err != nil { + return "", err + } + if t.Dir == ast.SEND { + subt = "->chan " + subt + } else { + subt = "<-chan " + subt + } + return subt, nil + default: + return "", xerrors.Errorf("unknown type") + } +} + +func generate(path, pkg, outpkg, outfile string) error { + fset := token.NewFileSet() + apiDir, err := filepath.Abs(path) + if err != nil { + return err + } + outfile, err = filepath.Abs(outfile) + if err != nil { + return err + } + pkgs, err := parser.ParseDir(fset, apiDir, nil, parser.AllErrors|parser.ParseComments) + if err != nil { + return err + } + + ap := pkgs[pkg] + + v := &Visitor{make(map[string]map[string]*methodMeta), map[string][]string{}} + ast.Walk(v, ap) + + type methodInfo struct { + Name string + node ast.Node + Tags map[string][]string + NamedParams, ParamNames, Results, DefRes string + } + + type strinfo struct { + Name string + Methods map[string]*methodInfo + Include []string + } + + type meta struct { + Infos map[string]*strinfo + Imports map[string]string + OutPkg string + } + + m := &meta{ + OutPkg: outpkg, + Infos: map[string]*strinfo{}, + Imports: map[string]string{}, + } + + for fn, f := range ap.Files { + if strings.HasSuffix(fn, "gen.go") { + continue + } + + //fmt.Println("F:", fn) + cmap := ast.NewCommentMap(fset, f, f.Comments) + + for _, im := range f.Imports { + m.Imports[im.Path.Value] = im.Path.Value + if im.Name != nil { + m.Imports[im.Path.Value] = im.Name.Name + " " + m.Imports[im.Path.Value] + } + } + + for ifname, methods := range v.Methods { + if _, ok := m.Infos[ifname]; !ok { + m.Infos[ifname] = &strinfo{ + Name: ifname, + Methods: map[string]*methodInfo{}, + Include: v.Include[ifname], + } + } + info := m.Infos[ifname] + for mname, node := range methods { + filteredComments := cmap.Filter(node.node).Comments() + + if _, ok := info.Methods[mname]; !ok { + var params, pnames []string + for _, param := range node.ftype.Params.List { + pstr, err := typeName(param.Type, outpkg) + if err != nil { + return err + } + + c := len(param.Names) + if c == 0 { + c = 1 + } + + for i := 0; i < c; i++ { + pname := fmt.Sprintf("p%d", len(params)) + pnames = append(pnames, pname) + params = append(params, pname+" "+pstr) + } + } + + results := []string{} + for _, result := range node.ftype.Results.List { + rs, err := typeName(result.Type, outpkg) + if err != nil { + return err + } + results = append(results, rs) + } + + defRes := "" + if len(results) > 1 { + defRes = results[0] + switch { + case defRes[0] == '*' || defRes[0] == '<', defRes == "interface{}": + defRes = "nil" + case defRes == "bool": + defRes = "false" + case defRes == "string": + defRes = `""` + case defRes == "int", defRes == "int64", defRes == "uint64", defRes == "uint": + defRes = "0" + default: + defRes = "*new(" + defRes + ")" + } + defRes += ", " + } + + info.Methods[mname] = &methodInfo{ + Name: mname, + node: node.node, + Tags: map[string][]string{}, + NamedParams: strings.Join(params, ", "), + ParamNames: strings.Join(pnames, ", "), + Results: strings.Join(results, ", "), + DefRes: defRes, + } + } + + // try to parse tag info + if len(filteredComments) > 0 { + tagstr := filteredComments[len(filteredComments)-1].List[0].Text + tagstr = strings.TrimPrefix(tagstr, "//") + tl := strings.Split(strings.TrimSpace(tagstr), " ") + for _, ts := range tl { + tf := strings.Split(ts, ":") + if len(tf) != 2 { + continue + } + if tf[0] != "perm" { // todo: allow more tag types + continue + } + info.Methods[mname].Tags[tf[0]] = tf + } + } + } + } + } + + /*jb, err := json.MarshalIndent(Infos, "", " ") + if err != nil { + return err + } + fmt.Println(string(jb))*/ + + w, err := os.OpenFile(outfile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0666) + if err != nil { + return err + } + + err = doTemplate(w, m, `// Code generated by github.com/filecoin-project/lotus/gen/api. DO NOT EDIT. + +package {{.OutPkg}} + +import ( +{{range .Imports}} {{.}} +{{end}} +) +`) + if err != nil { + return err + } + + err = doTemplate(w, m, ` +{{range .Infos}} +type {{.Name}}Struct struct { +{{range .Include}} + {{.}}Struct +{{end}} + Internal struct { +{{range .Methods}} + {{.Name}} func({{.NamedParams}}) ({{.Results}}) `+"`"+`{{range .Tags}}{{index . 0}}:"{{index . 1}}"{{end}}`+"`"+` +{{end}} + } +} + +type {{.Name}}Stub struct { +{{range .Include}} + {{.}}Stub +{{end}} +} +{{end}} + +{{range .Infos}} +{{$name := .Name}} +{{range .Methods}} +func (s *{{$name}}Struct) {{.Name}}({{.NamedParams}}) ({{.Results}}) { + return s.Internal.{{.Name}}({{.ParamNames}}) +} + +func (s *{{$name}}Stub) {{.Name}}({{.NamedParams}}) ({{.Results}}) { + return {{.DefRes}}xerrors.New("method not supported") +} +{{end}} +{{end}} + +{{range .Infos}}var _ {{.Name}} = new({{.Name}}Struct) +{{end}} + +`) + return err +} + +func doTemplate(w io.Writer, info interface{}, templ string) error { + t := template.Must(template.New(""). + Funcs(template.FuncMap{}).Parse(templ)) + + return t.Execute(w, info) +} diff --git a/gen/main.go b/gen/main.go index c2a6d009ba4..0018b241d62 100644 --- a/gen/main.go +++ b/gen/main.go @@ -8,6 +8,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/exchange" + "github.com/filecoin-project/lotus/chain/market" "github.com/filecoin-project/lotus/chain/types" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" @@ -52,6 +53,8 @@ func main() { api.SealedRefs{}, api.SealTicket{}, api.SealSeed{}, + api.PieceDealInfo{}, + api.DealSchedule{}, ) if err != nil { fmt.Println(err) @@ -67,6 +70,14 @@ func main() { os.Exit(1) } + err = gen.WriteTupleEncodersToFile("./chain/market/cbor_gen.go", "market", + market.FundedAddressState{}, + ) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + err = gen.WriteTupleEncodersToFile("./chain/exchange/cbor_gen.go", "exchange", exchange.Request{}, exchange.Response{}, diff --git a/genesis/types.go b/genesis/types.go index db8d32a3bd7..d4c04113a0c 100644 --- a/genesis/types.go +++ b/genesis/types.go @@ -3,6 +3,8 @@ package genesis import ( "encoding/json" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/ipfs/go-cid" @@ -75,8 +77,9 @@ type Actor struct { } type Template struct { - Accounts []Actor - Miners []Miner + NetworkVersion network.Version + Accounts []Actor + Miners []Miner NetworkName string Timestamp uint64 `json:",omitempty"` diff --git a/go.mod b/go.mod index 723640930bd..198b862e6aa 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/filecoin-project/lotus -go 1.14 +go 1.16 require ( contrib.go.opencensus.io/exporter/jaeger v0.1.0 @@ -8,12 +8,14 @@ require ( github.com/BurntSushi/toml v0.3.1 github.com/GeertJohan/go.rice v1.0.0 github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee + github.com/Kubuxu/imtui v0.0.0-20210401140320-41663d68d0fa github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d + github.com/alecthomas/jsonschema v0.0.0-20200530073317-71f438968921 github.com/buger/goterm v0.0.0-20200322175922-2f3e71b85129 github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e github.com/cockroachdb/pebble v0.0.0-20201001221639-879f3bfeef07 - github.com/coreos/go-systemd/v22 v22.0.0 + github.com/coreos/go-systemd/v22 v22.1.0 github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e github.com/dgraph-io/badger/v2 v2.2007.2 github.com/docker/go-units v0.4.0 @@ -21,32 +23,40 @@ require ( github.com/drand/kyber v1.1.4 github.com/dustin/go-humanize v1.0.0 github.com/elastic/go-sysinfo v1.3.0 + github.com/elastic/gosigar v0.12.0 + github.com/etclabscore/go-openrpc-reflect v0.0.36 github.com/fatih/color v1.9.0 github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200910194244-f640612a1a1f - github.com/filecoin-project/go-address v0.0.4 + github.com/filecoin-project/go-address v0.0.5 github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349 // indirect - github.com/filecoin-project/go-bitfield v0.2.1 + github.com/filecoin-project/go-bitfield v0.2.4 github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 + github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7 github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 - github.com/filecoin-project/go-data-transfer v0.9.0 - github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f - github.com/filecoin-project/go-fil-markets v1.0.0 - github.com/filecoin-project/go-jsonrpc v0.1.2-0.20201008195726-68c6a2704e49 + github.com/filecoin-project/go-data-transfer v1.6.0 + github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a + github.com/filecoin-project/go-fil-markets v1.5.0 + github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec github.com/filecoin-project/go-multistore v0.0.3 github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20 - github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261 - github.com/filecoin-project/go-state-types v0.0.0-20201013222834-41ea465f274f + github.com/filecoin-project/go-paramfetch v0.0.2-0.20210614165157-25a6c7769498 + github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48 github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe - github.com/filecoin-project/go-statestore v0.1.0 + github.com/filecoin-project/go-statestore v0.1.1 github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b - github.com/filecoin-project/specs-actors v0.9.12 - github.com/filecoin-project/specs-actors/v2 v2.2.0 - github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796 + github.com/filecoin-project/specs-actors v0.9.14 + github.com/filecoin-project/specs-actors/v2 v2.3.5 + github.com/filecoin-project/specs-actors/v3 v3.1.1 + github.com/filecoin-project/specs-actors/v4 v4.0.1 + github.com/filecoin-project/specs-actors/v5 v5.0.1 + github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 github.com/filecoin-project/test-vectors/schema v0.0.5 github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1 + github.com/gdamore/tcell/v2 v2.2.0 github.com/go-kit/kit v0.10.0 github.com/go-ole/go-ole v1.2.4 // indirect - github.com/google/uuid v1.1.1 + github.com/golang/mock v1.5.0 + github.com/google/uuid v1.1.2 github.com/gorilla/mux v1.7.4 github.com/gorilla/websocket v1.4.2 github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026 @@ -55,9 +65,9 @@ require ( github.com/hashicorp/golang-lru v0.5.4 github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d github.com/ipfs/bbloom v0.0.4 - github.com/ipfs/go-bitswap v0.2.20 - github.com/ipfs/go-block-format v0.0.2 - github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834 + github.com/ipfs/go-bitswap v0.3.2 + github.com/ipfs/go-block-format v0.0.3 + github.com/ipfs/go-blockservice v0.1.4 github.com/ipfs/go-cid v0.0.7 github.com/ipfs/go-cidutil v0.0.2 github.com/ipfs/go-datastore v0.4.5 @@ -67,8 +77,8 @@ require ( github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459 github.com/ipfs/go-filestore v1.0.0 github.com/ipfs/go-fs-lock v0.0.6 - github.com/ipfs/go-graphsync v0.3.1 - github.com/ipfs/go-ipfs-blockstore v1.0.1 + github.com/ipfs/go-graphsync v0.6.1 + github.com/ipfs/go-ipfs-blockstore v1.0.3 github.com/ipfs/go-ipfs-chunker v0.0.5 github.com/ipfs/go-ipfs-ds-help v1.0.0 github.com/ipfs/go-ipfs-exchange-interface v0.0.1 @@ -76,80 +86,84 @@ require ( github.com/ipfs/go-ipfs-files v0.0.8 github.com/ipfs/go-ipfs-http-client v0.0.5 github.com/ipfs/go-ipfs-routing v0.1.0 - github.com/ipfs/go-ipld-cbor v0.0.5-0.20200428170625-a0bd04d3cbdf + github.com/ipfs/go-ipfs-util v0.0.2 + github.com/ipfs/go-ipld-cbor v0.0.5 github.com/ipfs/go-ipld-format v0.2.0 - github.com/ipfs/go-log v1.0.4 - github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4 + github.com/ipfs/go-log/v2 v2.3.0 github.com/ipfs/go-merkledag v0.3.2 + github.com/ipfs/go-metrics-interface v0.0.1 github.com/ipfs/go-metrics-prometheus v0.0.2 github.com/ipfs/go-path v0.0.7 github.com/ipfs/go-unixfs v0.2.4 github.com/ipfs/interface-go-ipfs-core v0.2.3 - github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4 - github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f + github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d + github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018 github.com/kelseyhightower/envconfig v1.4.0 github.com/lib/pq v1.7.0 + github.com/libp2p/go-buffer-pool v0.0.2 github.com/libp2p/go-eventbus v0.2.1 - github.com/libp2p/go-libp2p v0.11.0 + github.com/libp2p/go-libp2p v0.14.2 github.com/libp2p/go-libp2p-connmgr v0.2.4 - github.com/libp2p/go-libp2p-core v0.6.1 + github.com/libp2p/go-libp2p-core v0.8.5 github.com/libp2p/go-libp2p-discovery v0.5.0 - github.com/libp2p/go-libp2p-kad-dht v0.8.3 - github.com/libp2p/go-libp2p-mplex v0.2.4 - github.com/libp2p/go-libp2p-noise v0.1.2 - github.com/libp2p/go-libp2p-peerstore v0.2.6 - github.com/libp2p/go-libp2p-pubsub v0.3.6 - github.com/libp2p/go-libp2p-quic-transport v0.8.2 + github.com/libp2p/go-libp2p-kad-dht v0.11.0 + github.com/libp2p/go-libp2p-mplex v0.4.1 + github.com/libp2p/go-libp2p-noise v0.2.0 + github.com/libp2p/go-libp2p-peerstore v0.2.7 + github.com/libp2p/go-libp2p-pubsub v0.5.0 + github.com/libp2p/go-libp2p-quic-transport v0.10.0 github.com/libp2p/go-libp2p-record v0.1.3 github.com/libp2p/go-libp2p-routing-helpers v0.2.3 - github.com/libp2p/go-libp2p-swarm v0.2.8 + github.com/libp2p/go-libp2p-swarm v0.5.0 github.com/libp2p/go-libp2p-tls v0.1.3 - github.com/libp2p/go-libp2p-yamux v0.2.8 + github.com/libp2p/go-libp2p-yamux v0.5.4 github.com/libp2p/go-maddr-filter v0.1.0 github.com/mattn/go-colorable v0.1.6 // indirect + github.com/mattn/go-isatty v0.0.13 github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 github.com/mitchellh/go-homedir v1.1.0 github.com/multiformats/go-base32 v0.0.3 github.com/multiformats/go-multiaddr v0.3.1 - github.com/multiformats/go-multiaddr-dns v0.2.0 + github.com/multiformats/go-multiaddr-dns v0.3.1 github.com/multiformats/go-multibase v0.0.3 github.com/multiformats/go-multihash v0.0.14 + github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333 github.com/opentracing/opentracing-go v1.2.0 github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a github.com/prometheus/client_golang v1.6.0 github.com/raulk/clock v1.1.0 - github.com/stretchr/testify v1.6.1 - github.com/supranational/blst v0.1.1 + github.com/raulk/go-watchdog v1.0.1 + github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25 + github.com/stretchr/objx v0.2.0 // indirect + github.com/stretchr/testify v1.7.0 github.com/syndtr/goleveldb v1.0.0 github.com/urfave/cli/v2 v2.2.0 github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba - github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163 + github.com/whyrusleeping/cbor-gen v0.0.0-20210219115102-f37d292932f2 github.com/whyrusleeping/ledger-filecoin-go v0.9.1-0.20201010031517-c3dcc1bddce4 github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 - github.com/whyrusleeping/pubsub v0.0.0-20131020042734-02de8aa2db3d + github.com/whyrusleeping/pubsub v0.0.0-20190708150250-92bcb0691325 github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 - go.opencensus.io v0.22.4 + go.opencensus.io v0.23.0 go.uber.org/dig v1.10.0 // indirect go.uber.org/fx v1.9.0 - go.uber.org/multierr v1.5.0 - go.uber.org/zap v1.15.0 - golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 - golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c + go.uber.org/multierr v1.6.0 + go.uber.org/zap v1.16.0 + golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6 + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c + golang.org/x/sys v0.0.0-20210426080607-c94f62235c83 golang.org/x/time v0.0.0-20191024005414-555d28b269f0 + golang.org/x/tools v0.0.0-20210106214847-113979e3529a golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 gopkg.in/cheggaaa/pb.v1 v1.0.28 gotest.tools v2.2.0+incompatible - launchpad.net/gocheck v0.0.0-20140225173054-000000000087 // indirect + honnef.co/go/tools v0.0.1-2020.1.3 // indirect ) -replace github.com/filecoin-project/lotus => ./ +replace github.com/libp2p/go-libp2p-yamux => github.com/libp2p/go-libp2p-yamux v0.5.1 -replace github.com/golangci/golangci-lint => github.com/golangci/golangci-lint v1.18.0 +replace github.com/filecoin-project/lotus => ./ replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi replace github.com/filecoin-project/test-vectors => ./extern/test-vectors - -replace github.com/supranational/blst => ./extern/fil-blst/blst - -replace github.com/filecoin-project/fil-blst => ./extern/fil-blst diff --git a/go.sum b/go.sum index 3ced99135ba..2624a0d3f3b 100644 --- a/go.sum +++ b/go.sum @@ -42,8 +42,17 @@ github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee h1:8doiS7ib3zi6/K1 github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee/go.mod h1:W0GbEAA4uFNYOGG2cJpmFJ04E6SD1NLELPYZB57/7AY= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= +github.com/Kubuxu/imtui v0.0.0-20210401140320-41663d68d0fa h1:1PPxEyGdIGVkX/kqMvLJ95a1dGS1Sz7tpNEgehEYYt0= +github.com/Kubuxu/imtui v0.0.0-20210401140320-41663d68d0fa/go.mod h1:WUmMvh9wMtqj1Xhf1hf3kp9RvL+y6odtdYxpyZjb90U= +github.com/Masterminds/glide v0.13.2/go.mod h1:STyF5vcenH/rUqTEv+/hBXlSTo7KYwg2oc2f4tzPWic= +github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/vcs v1.13.0/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= @@ -58,12 +67,12 @@ github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBA github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/akavel/rsrc v0.8.0 h1:zjWn7ukO9Kc5Q62DOJCcxGpXC18RawVtYAGdz2aLlfw= github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= +github.com/alecthomas/jsonschema v0.0.0-20200530073317-71f438968921 h1:T3+cD5fYvuH36h7EZq+TDpm+d8a6FSD4pQsbmuGGQ8o= +github.com/alecthomas/jsonschema v0.0.0-20200530073317-71f438968921/go.mod h1:/n6+1/DWPltRLWL/VKyUxg6tzsl5kHUCcraimt4vr60= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -89,22 +98,27 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bep/debounce v1.2.0 h1:wXds8Kq8qRfwAOpAxHrJDbCXgC5aHSzgQb/0gKsHQqo= +github.com/bep/debounce v1.2.0/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= -github.com/briandowns/spinner v1.11.1 h1:OixPqDEcX3juo5AjQZAnFPbeUA0jvkp2qzB5gOZJ/L0= github.com/briandowns/spinner v1.11.1/go.mod h1:QOuQk7x+EaDASo80FEXwlwiA+j/PPIcX3FScO+3/ZPQ= github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= -github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btcd v0.21.0-beta h1:At9hIZdJW0s9E/fAz28nrz6AmcNlSVucCH796ZteX1M= +github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/buger/goterm v0.0.0-20200322175922-2f3e71b85129 h1:gfAMKE626QEuKG3si0pdTRcr/YEbBoxY+3GOH3gWvl4= @@ -127,6 +141,7 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5O github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -135,14 +150,15 @@ github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcK github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/cockroachdb/pebble v0.0.0-20200916222308-4e219a90ba5b h1:OKALTB609+19AM7wsO0k8yMwAqjEIppcnYvyIhA+ZlQ= github.com/cockroachdb/pebble v0.0.0-20200916222308-4e219a90ba5b/go.mod h1:hU7vhtrqonEphNF+xt8/lHdaBprxmV1h8BOGrd9XwmQ= github.com/cockroachdb/pebble v0.0.0-20201001221639-879f3bfeef07 h1:Cb2pZUCFXlLA8i7My+wrN51D41GeuhYOKa1dJeZt6NY= github.com/cockroachdb/pebble v0.0.0-20201001221639-879f3bfeef07/go.mod h1:hU7vhtrqonEphNF+xt8/lHdaBprxmV1h8BOGrd9XwmQ= github.com/cockroachdb/redact v0.0.0-20200622112456-cd282804bbd3 h1:2+dpIJzYMSbLi0587YXpi8tOJT52qCOI/1I0UNThc/I= github.com/cockroachdb/redact v0.0.0-20200622112456-cd282804bbd3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA= +github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327 h1:7grrpcfCtbZLsjtB0DgMuzs1umsJmpzaHMZ6cO6iAWw= +github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -153,9 +169,10 @@ github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.0.0 h1:XJIw/+VlJ+87J+doOxznsAWIdmWuViOVhkQamW5YV28= -github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0 h1:kq/SbG2BCKLkDKkjQf5OWwKWUKj1lgs3lFI4PxnR5lg= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/corpix/uarand v0.1.1/go.mod h1:SFKZvkcRoLqVRFZ4u25xPmp6m9ktANfbpXZ7SJ0/FNU= github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= @@ -173,8 +190,10 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= -github.com/davidlazar/go-crypto v0.0.0-20190912175916-7055855a373f h1:BOaYiTvg8p9vBUXpklC22XSK/mifLF7lG9jtmYYi3Tc= github.com/davidlazar/go-crypto v0.0.0-20190912175916-7055855a373f/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= +github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e h1:lj77EKYUpYXTd8CD/+QMIf8b6OIOTsfEBSXiAzuEHTU= github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e/go.mod h1:3ZQK6DMPSz/QZ73jlWxBtUhNA8xZx7LzUFSq/OfP8vk= github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= @@ -195,7 +214,6 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczC github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/drand/bls12-381 v0.3.2 h1:RImU8Wckmx8XQx1tp1q04OV73J9Tj6mmpQLYDP7V1XE= github.com/drand/bls12-381 v0.3.2/go.mod h1:dtcLgPtYT38L3NO6mPDYH0nbpc5tjPassDqiniuAt4Y= github.com/drand/drand v1.2.1 h1:KB7z+69YbnQ5z22AH/LMi0ObDR8DzYmrkS6vZXTR9jI= github.com/drand/drand v1.2.1/go.mod h1:j0P7RGmVaY7E/OuO2yQOcQj7OgeZCuhgu2gdv0JAm+g= @@ -217,83 +235,112 @@ github.com/elastic/go-sysinfo v1.3.0 h1:eb2XFGTMlSwG/yyU9Y8jVAYLIzU2sFzWXwo2gmet github.com/elastic/go-sysinfo v1.3.0/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= github.com/elastic/go-windows v1.0.0 h1:qLURgZFkkrYyTTkvYpsZIgf83AUsdIHfvlJaqaZ7aSY= github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= +github.com/elastic/gosigar v0.12.0 h1:AsdhYCJlTudhfOYQyFNgx+fIVTfrDO0V1ST0vHgiapU= +github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/ema/qdisc v0.0.0-20190904071900-b82c76788043/go.mod h1:ix4kG2zvdUd8kEKSW0ZTr1XLks0epFpI4j745DXxlNE= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/etclabscore/go-jsonschema-walk v0.0.6 h1:DrNzoKWKd8f8XB5nFGBY00IcjakRE22OTI12k+2LkyY= +github.com/etclabscore/go-jsonschema-walk v0.0.6/go.mod h1:VdfDY72AFAiUhy0ZXEaWSpveGjMT5JcDIm903NGqFwQ= +github.com/etclabscore/go-openrpc-reflect v0.0.36 h1:kSqNB2U8RVoW4si+4fsv13NGNkRAQ5j78zTUx1qiehk= +github.com/etclabscore/go-openrpc-reflect v0.0.36/go.mod h1:0404Ky3igAasAOpyj1eESjstTyneBAIk5PgJFbK4s5E= github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 h1:BBso6MBKW8ncyZLv37o+KNyy0HrrHgfnOaGQC2qvN+A= github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5/go.mod h1:JpoxHjuQauoxiFMl1ie8Xc/7TfLuMZ5eOCONd1sUBHg= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.8.0 h1:5bzFgL+oy7JITMTxUPJ00n7VxmYd/PdMp5mHFX40/RY= github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGjnw8= github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fd/go-nat v1.0.0/go.mod h1:BTBu/CKvMmOMUPkKVef1pngt2WFH/lg7E6yQnulfp6E= -github.com/filecoin-project/go-address v0.0.3 h1:eVfbdjEbpbzIrbiSa+PiGUY+oDK9HnUn+M1R/ggoHf8= github.com/filecoin-project/go-address v0.0.3/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= -github.com/filecoin-project/go-address v0.0.4 h1:gSNMv0qWwH16fGQs7ycOUrDjY6YCSsgLUl0I0KLjo8w= -github.com/filecoin-project/go-address v0.0.4/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= -github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 h1:t6qDiuGYYngDqaLc2ZUvdtAg4UNxPeOYaXhBWSNsVaM= +github.com/filecoin-project/go-address v0.0.5 h1:SSaFT/5aLfPXycUlFyemoHYhRgdyXClXCyDdNJKPlDM= +github.com/filecoin-project/go-address v0.0.5/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs= github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349 h1:pIuR0dnMD0i+as8wNnjjHyQrnhP5O5bmba/lmgQeRgU= github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349/go.mod h1:vgmwKBkx+ca5OIeEvstiQgzAZnb7R6QaqE1oEDSqa6g= -github.com/filecoin-project/go-bitfield v0.2.0 h1:gCtLcjskIPtdg4NfN7gQZSQF9yrBQ7mkT0qCJxzGI2Q= +github.com/filecoin-project/go-amt-ipld/v3 v3.0.0/go.mod h1:Qa95YNAbtoVCTSVtX38aAC1ptBnJfPma1R/zZsKmx4o= +github.com/filecoin-project/go-amt-ipld/v3 v3.1.0 h1:ZNJ9tEG5bE72vBWYiuh5bkxJVM3ViHNOmQ7qew9n6RE= +github.com/filecoin-project/go-amt-ipld/v3 v3.1.0/go.mod h1:UjM2QhDFrrjD5s1CdnkJkat4ga+LqZBZgTMniypABRo= github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= -github.com/filecoin-project/go-bitfield v0.2.1 h1:S6Uuqcspqu81sWJ0He4OAfFLm1tSwPdVjtKTkl5m/xQ= -github.com/filecoin-project/go-bitfield v0.2.1/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= +github.com/filecoin-project/go-bitfield v0.2.3/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= +github.com/filecoin-project/go-bitfield v0.2.4 h1:uZ7MeE+XfM5lqrHJZ93OnhQKc/rveW8p9au0C68JPgk= +github.com/filecoin-project/go-bitfield v0.2.4/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 h1:av5fw6wmm58FYMgJeoB/lK9XXrgdugYiTqkdxjTy9k8= github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= +github.com/filecoin-project/go-commp-utils v0.0.0-20201119054358-b88f7a96a434/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U= +github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7 h1:U9Z+76pHCKBmtdxFV7JFZJj7OVm12I6dEKwtMVbq5p0= +github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= -github.com/filecoin-project/go-data-transfer v0.9.0 h1:nTT8j7Hu3TM0wRWrGy83/ctawG7sleJGdFWtIsUsKgY= -github.com/filecoin-project/go-data-transfer v0.9.0/go.mod h1:i2CqUy7TMQGKukj9BgqIxiP8nDHDXU2VLd771KVaCaQ= +github.com/filecoin-project/go-data-transfer v1.0.1/go.mod h1:UxvfUAY9v3ub0a21BSK9u3pB2aq30Y0KMsG+w9/ysyo= +github.com/filecoin-project/go-data-transfer v1.6.0 h1:DHIzEc23ydRCCBwtFet3MfgO8gMpZEnw60Y+s71oX6o= +github.com/filecoin-project/go-data-transfer v1.6.0/go.mod h1:E3WW4mCEYwU2y65swPEajSZoFWFmfXt7uwGduoACZQc= github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl8btmWxyFMEeeWGUxIQ= github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s= -github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f h1:GxJzR3oRIMTPtpZ0b7QF8FKPK6/iPAc7trhlL5k/g+s= github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= -github.com/filecoin-project/go-fil-markets v1.0.0 h1:np9+tlnWXh9xYG4oZfha6HZFLYOaAZoMGR3V4w6DM48= -github.com/filecoin-project/go-fil-markets v1.0.0/go.mod h1:lXExJyYHwpMMddCqhEdNrc7euYJKNkp04K76NZqJLGg= +github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a h1:hyJ+pUm/4U4RdEZBlg6k8Ma4rDiuvqyGpoICXAxwsTg= +github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= +github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c= +github.com/filecoin-project/go-fil-markets v1.5.0 h1:3KEs01L8XFCEgujZ6ggFjr1XWjpjTQcmSSeo3I99I0k= +github.com/filecoin-project/go-fil-markets v1.5.0/go.mod h1:7be6zzFwaN8kxVeYZf/UUj/JilHC0ogPvWqE1TW8Ptk= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= -github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= -github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0/go.mod h1:7aWZdaQ1b16BVoQUYR+eEvrDCGJoPLxFpDynFjYfBjI= -github.com/filecoin-project/go-jsonrpc v0.1.2-0.20201008195726-68c6a2704e49 h1:FSY245KeXFCUgyfFEu+bhrZNk8BGGJyfpSmQl2aiPU8= -github.com/filecoin-project/go-jsonrpc v0.1.2-0.20201008195726-68c6a2704e49/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4= +github.com/filecoin-project/go-hamt-ipld/v3 v3.0.1/go.mod h1:gXpNmr3oQx8l3o7qkGyDjJjYSRX7hp/FGOStdqrWyDI= +github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 h1:rVVNq0x6RGQIzCo1iiJlGFm9AGIZzeifggxtKMU7zmI= +github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0/go.mod h1:bxmzgT8tmeVQA1/gvBwFmYdT8SOFUwB3ovSUfG1Ux0g= +github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec h1:rGI5I7fdU4viManxmDdbk5deZO7afe6L1Wc04dAmlOM= +github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4= github.com/filecoin-project/go-multistore v0.0.3 h1:vaRBY4YiA2UZFPK57RNuewypB8u0DzzQwqsL0XarpnI= github.com/filecoin-project/go-multistore v0.0.3/go.mod h1:kaNqCC4IhU4B1uyr7YWFHd23TL4KM32aChS0jNkyUvQ= github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20 h1:+/4aUeUoKr6AKfPE3mBhXA5spIV6UcKdTYDPNU2Tdmg= github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20/go.mod h1:mPn+LRRd5gEKNAtc+r3ScpW2JRU/pj4NBKdADYWHiak= -github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261 h1:A256QonvzRaknIIAuWhe/M2dpV2otzs3NBhi5TWa/UA= -github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= +github.com/filecoin-project/go-paramfetch v0.0.2-0.20210614165157-25a6c7769498 h1:G10ezOvpH1CLXQ19EA9VWNwyL0mg536ujSayjV0yg0k= +github.com/filecoin-project/go-paramfetch v0.0.2-0.20210614165157-25a6c7769498/go.mod h1:1FH85P8U+DUEmWk1Jkw3Bw7FrwTVUNHk/95PSPG+dts= github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= -github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab h1:cEDC5Ei8UuT99hPWhCjA72SM9AuRtnpvdSTIYbnzN8I= github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= -github.com/filecoin-project/go-state-types v0.0.0-20201013222834-41ea465f274f h1:TZDTu4MtBKSFLXWGKLy+cvC3nHfMFIrVgWLAz/+GgZQ= -github.com/filecoin-project/go-state-types v0.0.0-20201013222834-41ea465f274f/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48 h1:Jc4OprDp3bRDxbsrXNHPwJabZJM3iDy+ri8/1e0ZnX4= +github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe h1:dF8u+LEWeIcTcfUcCf3WFVlc81Fr2JKg8zPzIbBDKDw= github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= -github.com/filecoin-project/go-statestore v0.1.0 h1:t56reH59843TwXHkMcwyuayStBIiWBRilQjQ+5IiwdQ= github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= +github.com/filecoin-project/go-statestore v0.1.1 h1:ufMFq00VqnT2CAuDpcGnwLnCX1I/c3OROw/kXVNSTZk= +github.com/filecoin-project/go-statestore v0.1.1/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b h1:fkRZSPrYpk42PV3/lIXiL0LHetxde7vyYYvSsttQtfg= github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8= github.com/filecoin-project/specs-actors v0.9.4/go.mod h1:BStZQzx5x7TmCkLv0Bpa07U6cPKol6fd3w9KjMPZ6Z4= -github.com/filecoin-project/specs-actors v0.9.12 h1:iIvk58tuMtmloFNHhAOQHG+4Gci6Lui0n7DYQGi3cJk= github.com/filecoin-project/specs-actors v0.9.12/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= +github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= +github.com/filecoin-project/specs-actors v0.9.14 h1:68PVstg2UB3ZsMLF+DKFTAs/YKsqhKWynkr0IqmVRQY= +github.com/filecoin-project/specs-actors v0.9.14/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= github.com/filecoin-project/specs-actors/v2 v2.0.1/go.mod h1:v2NZVYinNIKA9acEMBm5wWXxqv5+frFEbekBFemYghY= -github.com/filecoin-project/specs-actors/v2 v2.2.0 h1:IyCICb0NHYeD0sdSqjVGwWydn/7r7xXuxdpvGAcRCGY= -github.com/filecoin-project/specs-actors/v2 v2.2.0/go.mod h1:rlv5Mx9wUhV8Qsz+vUezZNm+zL4tK08O0HreKKPB2Wc= -github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796 h1:dJsTPWpG2pcTeojO2pyn0c6l+x/3MZYCBgo/9d11JEk= -github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g= +github.com/filecoin-project/specs-actors/v2 v2.3.2/go.mod h1:UuJQLoTx/HPvvWeqlIFmC/ywlOLHNe8SNQ3OunFbu2Y= +github.com/filecoin-project/specs-actors/v2 v2.3.5-0.20210114162132-5b58b773f4fb/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc= +github.com/filecoin-project/specs-actors/v2 v2.3.5 h1:PbT4tPlSXZ8sRgajhb4D8AOEmiaaZ+jg6tc6BBv8VQc= +github.com/filecoin-project/specs-actors/v2 v2.3.5/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc= +github.com/filecoin-project/specs-actors/v3 v3.1.0/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= +github.com/filecoin-project/specs-actors/v3 v3.1.1 h1:BE8fsns1GnEOxt1DTE5LxBK2FThXtWmCChgcJoHTg0E= +github.com/filecoin-project/specs-actors/v3 v3.1.1/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= +github.com/filecoin-project/specs-actors/v4 v4.0.0/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng= +github.com/filecoin-project/specs-actors/v4 v4.0.1 h1:AiWrtvJZ63MHGe6rn7tPu4nSUY8bA1KDNszqJaD5+Fg= +github.com/filecoin-project/specs-actors/v4 v4.0.1/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng= +github.com/filecoin-project/specs-actors/v5 v5.0.0-20210512015452-4fe3889fff57/go.mod h1:283yBMMUSDB2abcjP/hhrwTkhb9h3sfM6KGrep/ZlBI= +github.com/filecoin-project/specs-actors/v5 v5.0.1 h1:PrYm5AKdMlJ/55eRW5laWcnaX66gyyDYBWvH38kNAMo= +github.com/filecoin-project/specs-actors/v5 v5.0.1/go.mod h1:74euMDIXorusOBs/QL/LNkYsXZdDpLJwojWw6T03pdE= +github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw= +github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g= github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg= github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6 h1:u/UEqS66A5ckRmS4yNpjmVH56sVtS/RfclBAYocb4as= github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= +github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= +github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= @@ -303,6 +350,10 @@ github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1 h1:EzDjxMg43q1tA2c0MV3tNbaontnHLplHyFF6M5KiVP0= github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1/go.mod h1:0eHX/BVySxPc6SE2mZRoppGq7qcEagxdmQnA3dzork8= +github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko= +github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= +github.com/gdamore/tcell/v2 v2.2.0 h1:vSyEgKwraXPSOkvCk7IwOSyX+Pv3V2cV9CikJMXg4U4= +github.com/gdamore/tcell/v2 v2.2.0/go.mod h1:cTTuF84Dlj/RqmaCIV5p4w8uG1zWdk0SF6oBpwHp4fU= github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs= github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= @@ -323,6 +374,21 @@ github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.4 h1:3Vw+rh13uq2JFNxgnMTGE1rnoieU9FmyE1gvnyylsYg= +github.com/go-openapi/jsonreference v0.19.4/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/spec v0.19.7/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.11 h1:ogU5q8dtp3MMPn59a9VRrPKVxvJHEs5P7yNMR5sNnis= +github.com/go-openapi/spec v0.19.11/go.mod h1:vqK/dIdLGCosfvYsQV3WfC7N3TiZSnGY2RZKoFK7X28= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.8/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.11 h1:RFTu/dlFySpyVvJDfp/7674JY4SDglYWKztbiIGFpmc= +github.com/go-openapi/swag v0.19.11/go.mod h1:Uc0gKkdR+ojzsEpjh39QChyu92vPgIr72POcgHMAgSY= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= @@ -339,8 +405,9 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/status v1.0.3/go.mod h1:SavQ51ycCLnc7dGyJxp8YAmudx8xqiVrRf+6IXRsugc= github.com/gogo/status v1.1.0 h1:+eIkrewn5q6b30y+g/BJINVVdi2xH7je5MPJ3ZPK3JA= github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= @@ -356,8 +423,10 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0 h1:jlYHihg//f7RRwuPfptm04yp4s7O6Kw8EZiVYIGcH0g= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -369,10 +438,10 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf h1:gFVkHXmVAhEbxZVDln5V9GKrLaluNoFHDbrZwAWZgws= github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -381,24 +450,27 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3 h1:x95R7cp+rSeeqAMI2knLtQ0DKlaBhv2NrtrOvafPHRo= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= -github.com/google/gopacket v1.1.18 h1:lum7VRA9kdlvBi7/v2p7/zcbkduHaCH/SVVyurs7OpY= github.com/google/gopacket v1.1.18/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -474,7 +546,11 @@ github.com/huin/goupnp v0.0.0-20180415215157-1395d1447324/go.mod h1:MZ2ZmwcBpvOo github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= +github.com/iancoleman/orderedmap v0.1.0 h1:2orAxZBJsvimgEBmMWfXaFlzSG2fbQil5qzP3F6cCkg= +github.com/iancoleman/orderedmap v0.1.0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428/go.mod h1:uhpZMVGznybq1itEKXj6RYw9I71qK4kH+OGMjRC4KEo= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d h1:/WZQPMZNsjZ7IlCpsLGdQBINg5bxKQ1K1sh6awxLtkA= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= @@ -486,17 +562,19 @@ github.com/ipfs/go-bitswap v0.0.9/go.mod h1:kAPf5qgn2W2DrgAcscZ3HrM9qh4pH+X8Fkk3 github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs= github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM= -github.com/ipfs/go-bitswap v0.2.20 h1:Zfi5jDUoqxDThORUznqdeL77DdGniAzlccNJ4vr+Itc= -github.com/ipfs/go-bitswap v0.2.20/go.mod h1:C7TwBgHnu89Q8sHsTJP7IhUqF9XYLe71P4tT5adgmYo= +github.com/ipfs/go-bitswap v0.3.2 h1:TdKx7lpidYe2dMAKfdeNS26y6Pc/AZX/i8doI1GV210= +github.com/ipfs/go-bitswap v0.3.2/go.mod h1:AyWWfN3moBzQX0banEtfKOfbXb3ZeoOeXnZGNPV9S6w= github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= -github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE= github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= +github.com/ipfs/go-block-format v0.0.3 h1:r8t66QstRp/pd/or4dpnbVfXT5Gt7lOqRvC+/dDTpMc= +github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= github.com/ipfs/go-blockservice v0.0.3/go.mod h1:/NNihwTi6V2Yr6g8wBI+BSwPuURpBRMtYNGrlxZ8KuI= github.com/ipfs/go-blockservice v0.0.7/go.mod h1:EOfb9k/Y878ZTRY/CH0x5+ATtaipfbRhbvNSdgc/7So= github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= github.com/ipfs/go-blockservice v0.1.3/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= -github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834 h1:hFJoI1D2a3MqiNkSb4nKwrdkhCngUxUTFNwVwovZX2s= github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= +github.com/ipfs/go-blockservice v0.1.4 h1:Vq+MlsH8000KbbUciRyYMEw/NNP8UAGmcqKi4uWmFGA= +github.com/ipfs/go-blockservice v0.1.4/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= @@ -518,7 +596,6 @@ github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRV github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.2/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.4.4 h1:rjvQ9+muFaJ+QZ7dN5B1MSDNQ0JVZKkkES/rMZmA8X8= github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.5 h1:cwOUcGMLdLPWgu3SlrCckCMznaGADbPqE0r8h768/Dg= github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs= @@ -547,18 +624,18 @@ github.com/ipfs/go-filestore v1.0.0/go.mod h1:/XOCuNtIe2f1YPbiXdYvD0BKLA0JR1MgPi github.com/ipfs/go-fs-lock v0.0.6 h1:sn3TWwNVQqSeNjlWy6zQ1uUGAZrV3hPOyEA6y1/N2a0= github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28L7zESmM= github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= -github.com/ipfs/go-graphsync v0.3.0 h1:I6Y20kSuCWkUvPoUWo4V3am704/9QjgDVVkf0zIV8+8= -github.com/ipfs/go-graphsync v0.3.0/go.mod h1:gEBvJUNelzMkaRPJTpg/jaKN4AQW/7wDWu0K92D8o10= -github.com/ipfs/go-graphsync v0.3.1 h1:dJLYrck4oyJDfMVhGEKiWHxaY8oYMWko4m2Fi+4bofo= -github.com/ipfs/go-graphsync v0.3.1/go.mod h1:bw4LiLM5Oq/uLdzEtih9LK8GrwSijv+XqYiWCTxHMqs= -github.com/ipfs/go-hamt-ipld v0.1.1 h1:0IQdvwnAAUKmDE+PMJa5y1QiwOPHpI9+eAbQEEEYthk= +github.com/ipfs/go-graphsync v0.4.2/go.mod h1:/VmbZTUdUMTbNkgzAiCEucIIAU3BkLE2cZrDCVUhyi0= +github.com/ipfs/go-graphsync v0.4.3/go.mod h1:mPOwDYv128gf8gxPFgXnz4fNrSYPsWyqisJ7ych+XDY= +github.com/ipfs/go-graphsync v0.6.1 h1:i9wN7YkBXWwIsUjVQeuaDxFB59yWZrG1xL564Nz7aGE= +github.com/ipfs/go-graphsync v0.6.1/go.mod h1:e2ZxnClqBBYAtd901g9vXMJzS47labjAtOzsWtOzKNk= github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= github.com/ipfs/go-ipfs-blockstore v1.0.0/go.mod h1:knLVdhVU9L7CC4T+T4nvGdeUIPAXlnd9zmXfp+9MIjU= -github.com/ipfs/go-ipfs-blockstore v1.0.1 h1:fnuVj4XdZp4yExhd0CnUwAiMNJHiPnfInhiuwz4lW1w= github.com/ipfs/go-ipfs-blockstore v1.0.1/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE= +github.com/ipfs/go-ipfs-blockstore v1.0.3 h1:RDhK6fdg5YsonkpMuMpdvk/pRtOQlrIRIybuQfkvB2M= +github.com/ipfs/go-ipfs-blockstore v1.0.3/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE= github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw= @@ -603,8 +680,8 @@ github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= github.com/ipfs/go-ipld-cbor v0.0.5-0.20200204214505-252690b78669/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= -github.com/ipfs/go-ipld-cbor v0.0.5-0.20200428170625-a0bd04d3cbdf h1:PRCy+w3GocY77CBEwTprp6hn7PLiEU1YToKe7B+1FVk= -github.com/ipfs/go-ipld-cbor v0.0.5-0.20200428170625-a0bd04d3cbdf/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-cbor v0.0.5 h1:ovz4CHKogtG2KB/h1zUp5U0c/IzZrL435rCh5+K/5G8= +github.com/ipfs/go-ipld-cbor v0.0.5/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= github.com/ipfs/go-ipld-format v0.2.0 h1:xGlJKkArkmBvowr+GMCX0FEZtkro71K1AwiKnL37mwA= @@ -624,11 +701,16 @@ github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBW github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= github.com/ipfs/go-log/v2 v2.0.8/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= -github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4 h1:3bijxqzQ1O9yg7gd7Aqk80oaEvsJ+uXw0zSvi2qR3Jw= github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= +github.com/ipfs/go-log/v2 v2.1.2/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= +github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= +github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= +github.com/ipfs/go-log/v2 v2.3.0 h1:31Re/cPqFHpsRHgyVwjWADPoF0otB1WrjTy8ZFYwEZU= +github.com/ipfs/go-log/v2 v2.3.0/go.mod h1:QqGoj30OTpnKaG/LKTGTxoP2mmQtjVMEnK72gynbe/g= github.com/ipfs/go-merkledag v0.0.3/go.mod h1:Oc5kIXLHokkE1hWGMBHw+oxehkAaTOqtEb7Zbh6BhLA= github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto= github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= +github.com/ipfs/go-merkledag v0.2.4/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= github.com/ipfs/go-merkledag v0.3.1/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= github.com/ipfs/go-merkledag v0.3.2 h1:MRqj40QkrWkvPswXs4EfSslhZ4RVPRbxwX11js0t1xY= github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= @@ -647,6 +729,7 @@ github.com/ipfs/go-peertaskqueue v0.2.0/go.mod h1:5/eNrBEbtSKWCG+kQK8K8fGNixoYUn github.com/ipfs/go-todocounter v0.0.1/go.mod h1:l5aErvQc8qKE2r7NDMjmq5UNAvuZy0rC8BHOplkWvZ4= github.com/ipfs/go-unixfs v0.0.4/go.mod h1:eIo/p9ADu/MFOuyxzwU+Th8D6xoxU//r590vUpWyfz8= github.com/ipfs/go-unixfs v0.2.1/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k= +github.com/ipfs/go-unixfs v0.2.2-0.20190827150610-868af2e9e5cb/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k= github.com/ipfs/go-unixfs v0.2.4 h1:6NwppOXefWIyysZ4LR/qUBPvXd5//8J3jiMdvpbw6Lo= github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw= github.com/ipfs/go-verifcid v0.0.1 h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E= @@ -657,14 +740,20 @@ github.com/ipfs/iptb v1.4.0 h1:YFYTrCkLMRwk/35IMyC6+yjoQSHTEcNcefBStLJzgvo= github.com/ipfs/iptb v1.4.0/go.mod h1:1rzHpCYtNp87/+hTxG5TfCVn/yMY3dKnLn8tBiMfdmg= github.com/ipfs/iptb-plugins v0.2.1 h1:au4HWn9/pRPbkxA08pDx2oRAs4cnbgQWgV0teYXuuGA= github.com/ipfs/iptb-plugins v0.2.1/go.mod h1:QXMbtIWZ+jRsW8a4h13qAKU7jcM7qaittO8wOsTP0Rs= -github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4 h1:6phjU3kXvCEWOZpu+Ob0w6DzgPFZmDLgLPxJhD8RxEY= +github.com/ipld/go-car v0.1.0/go.mod h1:RCWzaUh2i4mOEkB3W45Vc+9jnS/M6Qay5ooytiBHl3g= github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4/go.mod h1:xrMEcuSq+D1vEwl+YAXsg/JfA98XGpXDwnkIL4Aimqw= +github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d h1:iphSzTuPqyDgH7WUVZsdqUnQNzYgIblsVr1zhVNA33U= +github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d/go.mod h1:2Gys8L8MJ6zkh1gktTSXreY63t4UbyvNp5JaudTyxHQ= +github.com/ipld/go-ipld-prime v0.0.2-0.20191108012745-28a82f04c785/go.mod h1:bDDSvVz7vaK12FNvMeRYnpRFkSUPNQOiCYQezMD/P3w= github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e/go.mod h1:uVIwe/u0H4VdKv3kaN1ck7uCb6yD9cFLS9/ELyXbsw8= -github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f h1:XpOuNQ5GbXxUcSukbQcW9jkE7REpaFGJU2/T00fo9kA= github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= +github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018 h1:RbRHv8epkmvBYA5cGfz68GUSbOgx5j/7ObLIl4Rsif0= +github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= +github.com/ipld/go-ipld-prime-proto v0.0.0-20191113031812-e32bd156a1e5/go.mod h1:gcvzoEDBjwycpXt3LBE061wT9f46szXGHAmj9uoP6fU= github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1/go.mod h1:OAV6xBmuTLsPZ+epzKkPB1e25FHk/vCtyatkdHcArLs= -github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6 h1:6Mq+tZGSEMEoJJ1NbJRhddeelkXZcU8yfH/ZRYUo/Es= github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6/go.mod h1:3pHYooM9Ea65jewRwrb2u5uHZCNkNTe9ABsVB+SrkH0= +github.com/ipld/go-ipld-prime-proto v0.1.0 h1:j7gjqrfwbT4+gXpHwEx5iMssma3mnctC7YaCimsFP70= +github.com/ipld/go-ipld-prime-proto v0.1.0/go.mod h1:11zp8f3sHVgIqtb/c9Kr5ZGqpnCLF1IVTNOez9TopzE= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4= github.com/jackpal/gateway v1.0.4/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= @@ -695,6 +784,8 @@ github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9 github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.1.1-0.20190114141812-62fb9bc030d1 h1:qBCV/RLV02TSfQa7tFmxTihnG+u+7JXByOkhlkR5rmQ= github.com/jonboulle/clockwork v0.1.1-0.20190114141812-62fb9bc030d1/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= @@ -711,30 +802,29 @@ github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVY github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kabukky/httpscerts v0.0.0-20150320125433-617593d7dcb3 h1:Iy7Ifq2ysilWU4QlCx/97OoI4xT1IV7i8byT/EyIT/M= github.com/kabukky/httpscerts v0.0.0-20150320125433-617593d7dcb3/go.mod h1:BYpt4ufZiIGv2nXn4gMxnfKV306n3mWXgNu/d2TqdTU= github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= -github.com/kilic/bls12-381 v0.0.0-20200607163746-32e1441c8a9f h1:qET3Wx0v8tMtoTOQnsJXVvqvCopSf48qobR6tcJuDHo= github.com/kilic/bls12-381 v0.0.0-20200607163746-32e1441c8a9f/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s= github.com/kilic/bls12-381 v0.0.0-20200731194930-64c428e1bff5/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s= github.com/kilic/bls12-381 v0.0.0-20200820230200-6b2c19996391 h1:51kHw7l/dUDdOdW06AlUGT5jnpj6nqQSILebcsikSjA= github.com/kilic/bls12-381 v0.0.0-20200820230200-6b2c19996391/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d h1:68u9r4wEvL3gYg2jvAOgROwZ3H+Y3hIDk4tbbmIjcYQ= github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= @@ -747,12 +837,15 @@ github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwn github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= +github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= +github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= github.com/libp2p/go-conn-security v0.0.1/go.mod h1:bGmu51N0KU9IEjX7kl2PQjgZa40JQWnayTvNMgD/vyk= github.com/libp2p/go-conn-security-multistream v0.0.1/go.mod h1:nc9vud7inQ+d6SO0I/6dSWrdMnHnzZNHeyUQqrAJulE= github.com/libp2p/go-conn-security-multistream v0.0.2/go.mod h1:nc9vud7inQ+d6SO0I/6dSWrdMnHnzZNHeyUQqrAJulE= github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= -github.com/libp2p/go-conn-security-multistream v0.2.0 h1:uNiDjS58vrvJTg9jO6bySd1rMKejieG7v45ekqHbZ1M= github.com/libp2p/go-conn-security-multistream v0.2.0/go.mod h1:hZN4MjlNetKD3Rq5Jb/P5ohUnFLNzEAR4DLSzpn2QLU= +github.com/libp2p/go-conn-security-multistream v0.2.1 h1:ft6/POSK7F+vl/2qzegnHDaXFU0iWB4yVTYrioC6Zy0= +github.com/libp2p/go-conn-security-multistream v0.2.1/go.mod h1:cR1d8gA0Hr59Fj6NhaTpFhJZrjSYuNmhpT2r25zYR70= github.com/libp2p/go-eventbus v0.0.2/go.mod h1:Hr/yGlwxA/stuLnpMiu82lpNKpvRy3EaJxPu40XYOwk= github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= github.com/libp2p/go-eventbus v0.2.1 h1:VanAdErQnpTioN2TowqNcOijf6YwhuODe4pPKSDpxGc= @@ -775,8 +868,11 @@ github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qD github.com/libp2p/go-libp2p v0.8.3/go.mod h1:EsH1A+8yoWK+L4iKcbPYu6MPluZ+CHWI9El8cTaefiM= github.com/libp2p/go-libp2p v0.9.2/go.mod h1:cunHNLDVus66Ct9iXXcjKRLdmHdFdHVe1TAnbubJQqQ= github.com/libp2p/go-libp2p v0.10.0/go.mod h1:yBJNpb+mGJdgrwbKAKrhPU0u3ogyNFTfjJ6bdM+Q/G8= -github.com/libp2p/go-libp2p v0.11.0 h1:jb5mqdqYEBAybTEhD8io43Cz5LzVKuWxOK7znSN69jE= -github.com/libp2p/go-libp2p v0.11.0/go.mod h1:3/ogJDXsbbepEfqtZKBR/DedzxJXCeK17t2Z9RE9bEE= +github.com/libp2p/go-libp2p v0.12.0/go.mod h1:FpHZrfC1q7nA8jitvdjKBDF31hguaC676g/nT9PgQM0= +github.com/libp2p/go-libp2p v0.14.2 h1:qs0ABtjjNjS+RIXT1uM7sMJEvIc0pq2nKR0VQxFXhHI= +github.com/libp2p/go-libp2p v0.14.2/go.mod h1:0PQMADQEjCM2l8cSMYDpTgsb8gr6Zq7i4LUgq1mlW2E= +github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052 h1:BM7aaOF7RpmNn9+9g6uTjGJ0cTzWr5j9i9IKeun2M8U= +github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo= github.com/libp2p/go-libp2p-autonat v0.0.2/go.mod h1:fs71q5Xk+pdnKU014o2iq1RhMs9/PMaG5zXRFNnIIT4= github.com/libp2p/go-libp2p-autonat v0.0.6/go.mod h1:uZneLdOkZHro35xIhpbtTzLlgYturpu4J5+0cZK3MqE= github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= @@ -785,8 +881,9 @@ github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQ github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A= github.com/libp2p/go-libp2p-autonat v0.2.3/go.mod h1:2U6bNWCNsAG9LEbwccBDQbjzQ8Krdjge1jLTE9rdoMM= -github.com/libp2p/go-libp2p-autonat v0.3.2 h1:OhDSwVVaq7liTaRIsFFYvsaPp0pn2yi0WazejZ4DUmo= -github.com/libp2p/go-libp2p-autonat v0.3.2/go.mod h1:0OzOi1/cVc7UcxfOddemYD5vzEqi4fwRbnZcJGLi68U= +github.com/libp2p/go-libp2p-autonat v0.4.0/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= +github.com/libp2p/go-libp2p-autonat v0.4.2 h1:YMp7StMi2dof+baaxkbxaizXjY1RPvU71CXfxExzcUU= +github.com/libp2p/go-libp2p-autonat v0.4.2/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= github.com/libp2p/go-libp2p-autonat-svc v0.1.0/go.mod h1:fqi8Obl/z3R4PFVLm8xFtZ6PBL9MlV/xumymRFkKq5A= github.com/libp2p/go-libp2p-blankhost v0.0.1/go.mod h1:Ibpbw/7cPPYwFb7PACIWdvxxv0t0XCCI10t7czjAjTc= github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= @@ -804,8 +901,8 @@ github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3 github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo= github.com/libp2p/go-libp2p-circuit v0.2.2/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCyJp1Eo4A1xYdpjfs4= github.com/libp2p/go-libp2p-circuit v0.2.3/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCyJp1Eo4A1xYdpjfs4= -github.com/libp2p/go-libp2p-circuit v0.3.1 h1:69ENDoGnNN45BNDnBd+8SXSetDuw0eJFcGmOvvtOgBw= -github.com/libp2p/go-libp2p-circuit v0.3.1/go.mod h1:8RMIlivu1+RxhebipJwFDA45DasLx+kkrp4IlJj53F4= +github.com/libp2p/go-libp2p-circuit v0.4.0 h1:eqQ3sEYkGTtybWgr6JLqJY6QLtPWRErvFjFDfAOO1wc= +github.com/libp2p/go-libp2p-circuit v0.4.0/go.mod h1:t/ktoFIUzM6uLQ+o1G6NuBl2ANhBKN9Bc8jRIk31MoA= github.com/libp2p/go-libp2p-connmgr v0.1.1/go.mod h1:wZxh8veAmU5qdrfJ0ZBLcU8oJe9L82ciVP/fl1VHjXk= github.com/libp2p/go-libp2p-connmgr v0.2.3/go.mod h1:Gqjg29zI8CwXX21zRxy6gOg8VYu3zVerJRt2KyktzH4= github.com/libp2p/go-libp2p-connmgr v0.2.4 h1:TMS0vc0TCBomtQJyWr7fYxcVYYhx+q/2gF++G5Jkl/w= @@ -832,10 +929,17 @@ github.com/libp2p/go-libp2p-core v0.5.5/go.mod h1:vj3awlOr9+GMZJFH9s4mpt9RHHgGqe github.com/libp2p/go-libp2p-core v0.5.6/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= github.com/libp2p/go-libp2p-core v0.5.7/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= github.com/libp2p/go-libp2p-core v0.6.0/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= -github.com/libp2p/go-libp2p-core v0.6.1 h1:XS+Goh+QegCDojUZp00CaPMfiEADCrLjNZskWE7pvqs= github.com/libp2p/go-libp2p-core v0.6.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.7.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.2/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.3/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.5 h1:aEgbIcPGsKy6zYcC+5AJivYFedhYa4sW7mIpWpUaLKw= +github.com/libp2p/go-libp2p-core v0.8.5/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE= github.com/libp2p/go-libp2p-crypto v0.0.2/go.mod h1:eETI5OUfBnvARGOHrJz2eWNyTUxEGZnBxMcbUjfIj4I= +github.com/libp2p/go-libp2p-crypto v0.1.0 h1:k9MFy+o2zGDNGsaoZl0MA3iZ75qXxr9OOoAZF+sD5OQ= github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= github.com/libp2p/go-libp2p-daemon v0.2.2/go.mod h1:kyrpsLB2JeNYR2rvXSVWyY0iZuRIMhqzWR3im9BV6NQ= github.com/libp2p/go-libp2p-discovery v0.0.1/go.mod h1:ZkkF9xIFRLA1xCc7bstYFkd80gBGK8Fc1JqGoU2i+zI= @@ -853,11 +957,11 @@ github.com/libp2p/go-libp2p-interface-connmgr v0.0.4/go.mod h1:GarlRLH0LdeWcLnYM github.com/libp2p/go-libp2p-interface-connmgr v0.0.5/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= github.com/libp2p/go-libp2p-interface-pnet v0.0.1/go.mod h1:el9jHpQAXK5dnTpKA4yfCNBZXvrzdOU75zz+C6ryp3k= github.com/libp2p/go-libp2p-kad-dht v0.2.1/go.mod h1:k7ONOlup7HKzQ68dE6lSnp07cdxdkmnRa+6B4Fh9/w0= -github.com/libp2p/go-libp2p-kad-dht v0.8.3 h1:ceK5ML6s/I8UAcw6veoNsuEHdHvfo88leU/5uWOIFWs= -github.com/libp2p/go-libp2p-kad-dht v0.8.3/go.mod h1:HnYYy8taJWESkqiESd1ngb9XX/XGGsMA5G0Vj2HoSh4= +github.com/libp2p/go-libp2p-kad-dht v0.11.0 h1:ZLhlmDKsFiOkPhTzfEqBrMy/1Tqx+Dk6UgbHM5//IQM= +github.com/libp2p/go-libp2p-kad-dht v0.11.0/go.mod h1:5ojtR2acDPqh/jXf5orWy8YGb8bHQDS+qeDcoscL/PI= github.com/libp2p/go-libp2p-kbucket v0.2.1/go.mod h1:/Rtu8tqbJ4WQ2KTCOMJhggMukOLNLNPY1EtEWWLxUvc= -github.com/libp2p/go-libp2p-kbucket v0.4.2 h1:wg+VPpCtY61bCasGRexCuXOmEmdKjN+k1w+JtTwu9gA= -github.com/libp2p/go-libp2p-kbucket v0.4.2/go.mod h1:7sCeZx2GkNK1S6lQnGUW5JYZCFPnXzAZCCBBS70lytY= +github.com/libp2p/go-libp2p-kbucket v0.4.7 h1:spZAcgxifvFZHBD8tErvppbnNiKA5uokDu3CV7axu70= +github.com/libp2p/go-libp2p-kbucket v0.4.7/go.mod h1:XyVo99AfQH0foSf176k4jY1xUJ2+jUJIZCSDm7r2YKk= github.com/libp2p/go-libp2p-loggables v0.0.1/go.mod h1:lDipDlBNYbpyqyPX/KcoO+eq0sJYEVR2JgOexcivchg= github.com/libp2p/go-libp2p-loggables v0.1.0 h1:h3w8QFfCt2UJl/0/NW4K829HX/0S4KD31PQ7m8UXXO8= github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= @@ -867,8 +971,10 @@ github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3 github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= github.com/libp2p/go-libp2p-mplex v0.2.2/go.mod h1:74S9eum0tVQdAfFiKxAyKzNdSuLqw5oadDq7+L/FELo= github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxWDkm+dVvjfuG3ek= -github.com/libp2p/go-libp2p-mplex v0.2.4 h1:XFFXaN4jhqnIuJVjYOR3k6bnRj0mFfJOlIuDVww+4Zo= -github.com/libp2p/go-libp2p-mplex v0.2.4/go.mod h1:mI7iOezdWFOisvUwaYd3IDrJ4oVmgoXK8H331ui39CE= +github.com/libp2p/go-libp2p-mplex v0.3.0/go.mod h1:l9QWxRbbb5/hQMECEb908GbS9Sm2UAR2KFZKUJEynEs= +github.com/libp2p/go-libp2p-mplex v0.4.0/go.mod h1:yCyWJE2sc6TBTnFpjvLuEJgTSw/u+MamvzILKdX7asw= +github.com/libp2p/go-libp2p-mplex v0.4.1 h1:/pyhkP1nLwjG3OM+VuaNJkQT/Pqq73WzB3aDN3Fx1sc= +github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g= github.com/libp2p/go-libp2p-nat v0.0.2/go.mod h1:QrjXQSD5Dj4IJOdEcjHRkWTSomyxRo6HnUkf/TfQpLQ= github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= @@ -879,12 +985,12 @@ github.com/libp2p/go-libp2p-net v0.0.2/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8 github.com/libp2p/go-libp2p-netutil v0.0.1/go.mod h1:GdusFvujWZI9Vt0X5BKqwWWmZFxecf9Gt03cKxm2f/Q= github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ= github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= -github.com/libp2p/go-libp2p-noise v0.1.1 h1:vqYQWvnIcHpIoWJKC7Al4D6Hgj0H012TuXRhPwSMGpQ= github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM= -github.com/libp2p/go-libp2p-noise v0.1.2 h1:IH9GRihQJTx56obm+GnpdPX4KeVIlvpXrP6xnJ0wxWk= -github.com/libp2p/go-libp2p-noise v0.1.2/go.mod h1:9B10b7ueo7TIxZHHcjcDCo5Hd6kfKT2m77by82SFRfE= +github.com/libp2p/go-libp2p-noise v0.2.0 h1:wmk5nhB9a2w2RxMOyvsoKjizgJOEaJdfAakr0jN8gds= +github.com/libp2p/go-libp2p-noise v0.2.0/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= github.com/libp2p/go-libp2p-peer v0.0.1/go.mod h1:nXQvOBbwVqoP+T5Y5nCjeH4sP9IX/J0AMzcDUVruVoo= github.com/libp2p/go-libp2p-peer v0.1.1/go.mod h1:jkF12jGB4Gk/IOo+yomm+7oLWxF278F7UnrYUQ1Q8es= +github.com/libp2p/go-libp2p-peer v0.2.0 h1:EQ8kMjaCUwt/Y5uLgjT8iY2qg0mGUT0N1zUjer50DsY= github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= github.com/libp2p/go-libp2p-peerstore v0.0.1/go.mod h1:RabLyPVJLuNQ+GFyoEkfi8H4Ti6k/HtZJ7YKgtSq+20= github.com/libp2p/go-libp2p-peerstore v0.0.6/go.mod h1:RabLyPVJLuNQ+GFyoEkfi8H4Ti6k/HtZJ7YKgtSq+20= @@ -896,20 +1002,21 @@ github.com/libp2p/go-libp2p-peerstore v0.2.1/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRj github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= github.com/libp2p/go-libp2p-peerstore v0.2.3/go.mod h1:K8ljLdFn590GMttg/luh4caB/3g0vKuY01psze0upRw= github.com/libp2p/go-libp2p-peerstore v0.2.4/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= -github.com/libp2p/go-libp2p-peerstore v0.2.6 h1:2ACefBX23iMdJU9Ke+dcXt3w86MIryes9v7In4+Qq3U= github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= +github.com/libp2p/go-libp2p-peerstore v0.2.7 h1:83JoLxyR9OYTnNfB5vvFqvMUv/xDNa6NoPHnENhBsGw= +github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k= github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1VZNHYcK8cLgFJLZ4s= github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk= github.com/libp2p/go-libp2p-pubsub v0.1.1/go.mod h1:ZwlKzRSe1eGvSIdU5bD7+8RZN/Uzw0t1Bp9R1znpR/Q= github.com/libp2p/go-libp2p-pubsub v0.3.2-0.20200527132641-c0712c6e92cf/go.mod h1:TxPOBuo1FPdsTjFnv+FGZbNbWYsp74Culx+4ViQpato= -github.com/libp2p/go-libp2p-pubsub v0.3.6 h1:9oO8W7qIWCYQYyz5z8nUsPcb3rrFehBlkbqvbSVjBxY= -github.com/libp2p/go-libp2p-pubsub v0.3.6/go.mod h1:DTMSVmZZfXodB/pvdTGrY2eHPZ9W2ev7hzTH83OKHrI= +github.com/libp2p/go-libp2p-pubsub v0.5.0 h1:OzcIuCWyJpOrWH0PTOfvxTzqFur4tiXpY5jXC8OxjyE= +github.com/libp2p/go-libp2p-pubsub v0.5.0/go.mod h1:MKnrsQkFgPcrQs1KVmOXy6Uz2RDQ1xO7dQo/P0Ba+ig= github.com/libp2p/go-libp2p-quic-transport v0.1.1/go.mod h1:wqG/jzhF3Pu2NrhJEvE+IE0NTHNXslOPn9JQzyCAxzU= github.com/libp2p/go-libp2p-quic-transport v0.5.0/go.mod h1:IEcuC5MLxvZ5KuHKjRu+dr3LjCT1Be3rcD/4d8JrX8M= -github.com/libp2p/go-libp2p-quic-transport v0.8.2 h1:FDaXBCBJ1e5hY6gnWEJ4NbYyLk8eezr4J6AY3q3KqwM= -github.com/libp2p/go-libp2p-quic-transport v0.8.2/go.mod h1:L+e0q15ZNaYm3seHgbsXjWP8kXLEqz+elLWKk9l8DhM= +github.com/libp2p/go-libp2p-quic-transport v0.10.0 h1:koDCbWD9CCHwcHZL3/WEvP2A+e/o5/W5L3QS/2SPMA0= +github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA= github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q= github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg= @@ -934,16 +1041,22 @@ github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaT github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM= github.com/libp2p/go-libp2p-swarm v0.2.4/go.mod h1:/xIpHFPPh3wmSthtxdGbkHZ0OET1h/GGZes8Wku/M5Y= github.com/libp2p/go-libp2p-swarm v0.2.7/go.mod h1:ZSJ0Q+oq/B1JgfPHJAT2HTall+xYRNYp1xs4S2FBWKA= -github.com/libp2p/go-libp2p-swarm v0.2.8 h1:cIUUvytBzNQmGSjnXFlI6UpoBGsaud82mJPIJVfkDlg= github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= +github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= +github.com/libp2p/go-libp2p-swarm v0.3.1/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= +github.com/libp2p/go-libp2p-swarm v0.4.3/go.mod h1:mmxP1pGBSc1Arw4F5DIjcpjFAmsRzA1KADuMtMuCT4g= +github.com/libp2p/go-libp2p-swarm v0.5.0 h1:HIK0z3Eqoo8ugmN8YqWAhD2RORgR+3iNXYG4U2PFd1E= +github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4= github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= -github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8 h1:v4dvk7YEW8buwCdIVWnhpv0Hp/AAJKRWIxBhmLRZrsk= github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= +github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g= +github.com/libp2p/go-libp2p-testing v0.4.0 h1:PrwHRi0IGqOwVQWR3xzgigSlhlLfxgfXgkHxr77EghQ= +github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= github.com/libp2p/go-libp2p-tls v0.1.3 h1:twKMhMu44jQO+HgQK9X8NHO5HkeJu2QbhLzLJpa8oNM= github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= github.com/libp2p/go-libp2p-transport v0.0.1/go.mod h1:UzbUs9X+PHOSw7S3ZmeOxfnwaQY5vGDzZmKPod3N3tk= @@ -953,17 +1066,11 @@ github.com/libp2p/go-libp2p-transport-upgrader v0.0.1/go.mod h1:NJpUAgQab/8K6K0m github.com/libp2p/go-libp2p-transport-upgrader v0.0.4/go.mod h1:RGq+tupk+oj7PzL2kn/m1w6YXxcIAYJYeI90h6BGgUc= github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= -github.com/libp2p/go-libp2p-transport-upgrader v0.3.0 h1:q3ULhsknEQ34eVDhv4YwKS8iet69ffs9+Fir6a7weN4= github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o= -github.com/libp2p/go-libp2p-yamux v0.1.2/go.mod h1:xUoV/RmYkg6BW/qGxA9XJyg+HzXFYkeXbnhjmnYzKp8= -github.com/libp2p/go-libp2p-yamux v0.1.3/go.mod h1:VGSQVrqkh6y4nm0189qqxMtvyBft44MOYYPpYKXiVt4= -github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= -github.com/libp2p/go-libp2p-yamux v0.2.1/go.mod h1:1FBXiHDk1VyRM1C0aez2bCfHQ4vMZKkAQzZbkSQt5fI= -github.com/libp2p/go-libp2p-yamux v0.2.2/go.mod h1:lIohaR0pT6mOt0AZ0L2dFze9hds9Req3OfS+B+dv4qw= -github.com/libp2p/go-libp2p-yamux v0.2.5/go.mod h1:Zpgj6arbyQrmZ3wxSZxfBmbdnWtbZ48OpsfmQVTErwA= -github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhLEn0XhIoZ5viCwU= -github.com/libp2p/go-libp2p-yamux v0.2.8 h1:0s3ELSLu2O7hWKfX1YjzudBKCP0kZ+m9e2+0veXzkn4= -github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4= +github.com/libp2p/go-libp2p-transport-upgrader v0.4.2 h1:4JsnbfJzgZeRS9AWN7B9dPqn/LY/HoQTlO9gtdJTIYM= +github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk= +github.com/libp2p/go-libp2p-yamux v0.5.1 h1:sX4WQPHMhRxJE5UZTfjEuBvlQWXB5Bo3A2JK9ZJ9EM0= +github.com/libp2p/go-libp2p-yamux v0.5.1/go.mod h1:dowuvDu8CRWmr0iqySMiSxK+W0iL5cMVO9S94Y6gkv4= github.com/libp2p/go-maddr-filter v0.0.1/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= @@ -974,8 +1081,10 @@ github.com/libp2p/go-mplex v0.0.3/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTW github.com/libp2p/go-mplex v0.0.4/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU= github.com/libp2p/go-mplex v0.1.1/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= -github.com/libp2p/go-mplex v0.1.2 h1:qOg1s+WdGLlpkrczDqmhYzyk3vCfsQ8+RxRTQjOZWwI= github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= +github.com/libp2p/go-mplex v0.2.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= +github.com/libp2p/go-mplex v0.3.0 h1:U1T+vmCYJaEoDJPV1aq31N56hS+lJgb397GsylNSgrU= +github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= github.com/libp2p/go-msgio v0.0.1/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= @@ -987,8 +1096,9 @@ github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/ github.com/libp2p/go-nat v0.0.5 h1:qxnwkco8RLKqVh1NmjQ+tJ8p8khNLFxuElYG/TwqW4Q= github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= -github.com/libp2p/go-netroute v0.1.3 h1:1ngWRx61us/EpaKkdqkMjKk/ufr/JlIFYQAxV2XX8Ig= github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= +github.com/libp2p/go-netroute v0.1.6 h1:ruPJStbYyXVYGQ81uzEDzuvbYRLKRrLvTYd33yomC38= +github.com/libp2p/go-netroute v0.1.6/go.mod h1:AqhkMh0VuWmfgtxKPp3Oc1LdU5QSWS7wl0QLhSZqXxQ= github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= @@ -1004,8 +1114,9 @@ github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2 github.com/libp2p/go-reuseport-transport v0.0.4 h1:OZGz0RB620QDGpv300n1zaOcKGGAoGVf8h9txtt/1uM= github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= -github.com/libp2p/go-sockaddr v0.1.0 h1:Y4s3/jNoryVRKEBrkJ576F17CPOaMIzUeCsg7dlTDj0= github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= +github.com/libp2p/go-sockaddr v0.1.1 h1:yD80l2ZOdGksnOyHrhxDdTDFrf7Oy+v3FMVArIRgZxQ= +github.com/libp2p/go-sockaddr v0.1.1/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= github.com/libp2p/go-stream-muxer v0.1.0/go.mod h1:8JAVsjeRBCWwPoZeH0W1imLOcriqXJyFvB0mR4A04sQ= github.com/libp2p/go-stream-muxer-multistream v0.1.1/go.mod h1:zmGdfkQ1AzOECIAcccoL8L//laqawOsO03zX8Sa+eGw= @@ -1027,55 +1138,59 @@ github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw github.com/libp2p/go-ws-transport v0.1.2/go.mod h1:dsh2Ld8F+XNmzpkaAijmg5Is+e9l6/1tK/6VFOdN69Y= github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM= github.com/libp2p/go-ws-transport v0.3.0/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= -github.com/libp2p/go-ws-transport v0.3.1 h1:ZX5rWB8nhRRJVaPO6tmkGI/Xx8XNboYX20PW5hXIscw= github.com/libp2p/go-ws-transport v0.3.1/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= -github.com/libp2p/go-yamux v1.2.1/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-ws-transport v0.4.0 h1:9tvtQ9xbws6cA5LvqdE6Ne3vcmGB4f1z9SByggk4s0k= +github.com/libp2p/go-ws-transport v0.4.0/go.mod h1:EcIEKqf/7GDjth6ksuS/6p7R49V4CBY6/E7R/iyhYUA= github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.6 h1:O5qcBXRcfqecvQ/My9NqDNHB3/5t58yuJYqthcKhhgE= github.com/libp2p/go-yamux v1.3.6/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.7 h1:v40A1eSPJDIZwz2AvrV3cxpTZEGDP11QJbukmEhYyQI= -github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= +github.com/libp2p/go-yamux/v2 v2.0.0 h1:vSGhAy5u6iHBq11ZDcyHH4Blcf9xlBhT4WQDoOE90LU= +github.com/libp2p/go-yamux/v2 v2.0.0/go.mod h1:NVWira5+sVUIU6tu1JWvaRn1dRnG+cawOJiflsAM+7U= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lucas-clemente/quic-go v0.11.2/go.mod h1:PpMmPfPKO9nKJ/psF49ESTAGQSdfXxlg1otPbEB2nOw= github.com/lucas-clemente/quic-go v0.16.0/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE= -github.com/lucas-clemente/quic-go v0.18.1 h1:DMR7guC0NtVS8zNZR3IO7NARZvZygkSC56GGtC6cyys= -github.com/lucas-clemente/quic-go v0.18.1/go.mod h1:yXttHsSNxQi8AWijC/vLP+OJczXqzHSOcJrM5ITUlCg= +github.com/lucas-clemente/quic-go v0.19.3 h1:eCDQqvGBB+kCTkA0XrAFtNe81FMa0/fn4QSoeAbmiF4= +github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= +github.com/lucasb-eyer/go-colorful v1.0.3 h1:QIbQXiugsb+q10B+MI+7DI1oQLdmnep86tWFlaaUAac= +github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/marten-seemann/qpack v0.1.0/go.mod h1:LFt1NU/Ptjip0C2CPkhimBz5CGE3WGDAUWqna+CNTrI= -github.com/marten-seemann/qpack v0.2.0/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= +github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= github.com/marten-seemann/qtls v0.9.1/go.mod h1:T1MmAdDPyISzxlK6kjRr0pcZFBVd1OZbBb/j3cvzHhk= github.com/marten-seemann/qtls v0.10.0 h1:ECsuYUKalRL240rRD4Ri33ISb7kAQ3qGDlrrl55b2pc= github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= -github.com/marten-seemann/qtls-go1-15 v0.1.0 h1:i/YPXVxz8q9umso/5y474CNcHmTpA+5DH+mFPjx6PZg= -github.com/marten-seemann/qtls-go1-15 v0.1.0/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= +github.com/marten-seemann/qtls-go1-15 v0.1.1 h1:LIH6K34bPVttyXnUWixk0bzH6/N07VxbSabxn5A5gZQ= +github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.13 h1:qdl+GuBjcsKKDco5BsxPJlId98mSWNKqYA+Co0SC1yA= +github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.10 h1:CoZ3S2P7pvtP45xOtBw+/mDL2z0RKI576gSkzRRpdGg= +github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-xmlrpc v0.0.3/go.mod h1:mqc2dz7tP5x5BKlCahN/n+hs7OSZKJkS9JsHNBRlrxA= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -1092,6 +1207,8 @@ github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nr github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= @@ -1138,8 +1255,9 @@ github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/94 github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.3/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.1.0/go.mod h1:01k2RAqtoXIuPa3DCavAE9/6jc6nM0H3EgZyfUhN2oY= -github.com/multiformats/go-multiaddr-dns v0.2.0 h1:YWJoIDwLePniH7OU5hBnDZV6SWuvJqJ0YtN6pLeH9zA= github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= +github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= +github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= @@ -1169,8 +1287,10 @@ github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wS github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= -github.com/multiformats/go-multistream v0.1.2 h1:knyamLYMPFPngQjGQ0lhnlys3jtVR/3xV6TREUJr+fE= -github.com/multiformats/go-multistream v0.1.2/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= +github.com/multiformats/go-multistream v0.2.0/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= +github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= +github.com/multiformats/go-multistream v0.2.2 h1:TCYu1BHTDr1F/Qm75qwYISQdzGcRdC21nFgQW7l7GBo= +github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= @@ -1186,6 +1306,7 @@ github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/ngdinhtoan/glide-cleanup v0.2.0/go.mod h1:UQzsmiDOb8YV3nOsCxK/c9zPpCZVNoHScRE3EO9pVMM= github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c h1:5bFTChQxSKNwy8ALwOebjekYExl9HTT9urdawqC95tA= github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c/go.mod h1:7qN3Y0BvzRUf4LofcoJplQL10lsFDb4PYlePTVwrP28= github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229 h1:E2B8qYyeSgv5MXpmzZXRNp8IAQ4vjxIjhpAf5hv/tAg= @@ -1203,6 +1324,7 @@ github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0 github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= @@ -1211,6 +1333,10 @@ github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333 h1:CznVS40zms0Dj5he4ERo+fRPtO0qxUk8lA8Xu3ddet0= +github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333/go.mod h1:Ag6rSXkHIckQmjFBCweJEEt1mrTPBv8b9W4aU/NQWfI= +github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc= github.com/opentracing-contrib/go-grpc v0.0.0-20191001143057-db30781987df h1:vdYtBU6zvL7v+Tr+0xFM/qhahw/EvY8DMMunZHKH6eE= github.com/opentracing-contrib/go-grpc v0.0.0-20191001143057-db30781987df/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= @@ -1288,9 +1414,13 @@ github.com/prometheus/procfs v0.1.0 h1:jhMy6QXfi3y2HEzFoyuCj40z4OZIIHHPtFyCMftmv github.com/prometheus/procfs v0.1.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/raulk/clock v1.1.0 h1:dpb29+UKMbLqiU/jqIJptgLR1nn23HLgMY0sTCDza5Y= github.com/raulk/clock v1.1.0/go.mod h1:3MpVxdZ/ODBQDxbN+kzshf5OSZwPjtMDx6BBXBmOeY0= +github.com/raulk/go-watchdog v1.0.1 h1:qgm3DIJAeb+2byneLrQJ7kvmDLGxN2vy3apXyGaDKN4= +github.com/raulk/go-watchdog v1.0.1/go.mod h1:lzSbAl5sh4rtI8tYHU01BWIDzgzqaQLj6RcA1i4mlqI= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rivo/uniseg v0.1.0 h1:+2KBaVoUmb9XzDsrx/Ct0W/EYOSFf/nWTauy++DprtY= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -1337,8 +1467,9 @@ github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5k github.com/siebenmann/go-kstat v0.0.0-20160321171754-d34789b79745/go.mod h1:G81aIFAMS9ECrwBYR9YxhlPjWgrItd+Kje78O6+uqm8= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= @@ -1372,20 +1503,31 @@ github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/ github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25 h1:7z3LSn867ex6VSaahyKadf4WtSsJIgne6A1WLOAGM8A= +github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25/go.mod h1:lbP8tGiBjZ5YWIc2fzuRpTaz0b/53vT6PEs3QuAWzuU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/texttheater/golang-levenshtein v0.0.0-20180516184445-d188e65d659e/go.mod h1:XDKHRm5ThF8YJjx001LtgelzsoaEcvnA7lVWz9EeX3g= +github.com/tidwall/gjson v1.6.0 h1:9VEQWz6LLMUsUl6PueE49ir4Ka6CzLymOAZDxpFsTDc= +github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= +github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= +github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tj/go-spin v1.1.0 h1:lhdWZsvImxvZ3q1C5OIB7d72DuOwP4O2NdBg9PyzNds= github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -1397,8 +1539,9 @@ github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/ github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.0.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= github.com/urfave/cli/v2 v2.2.0 h1:JTTnM6wKzdA0Jqodd966MVj4vWbbquZykeX1sKbe2C4= github.com/urfave/cli/v2 v2.2.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= @@ -1408,7 +1551,6 @@ github.com/valyala/fasttemplate v1.0.1 h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8W github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= -github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE= github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a h1:G++j5e0OC488te356JvdhaM8YS6nMsjLAYF7JxCv07w= @@ -1431,8 +1573,10 @@ github.com/whyrusleeping/cbor-gen v0.0.0-20200723185710-6a3894a6352b/go.mod h1:f github.com/whyrusleeping/cbor-gen v0.0.0-20200806213330-63aa96ca5488/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200810223238-211df3b9e24c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200812213548-958ddffe352c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= -github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163 h1:TtcUeY2XZSriVWR1pXyfCBWIf/NGC2iUdNw1lofUjUU= github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20210118024343-169e9d70c0c2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20210219115102-f37d292932f2 h1:bsUlNhdmbtlfdLVXAVfuvKQ01RnWAM09TVrJkI7NZs4= +github.com/whyrusleeping/cbor-gen v0.0.0-20210219115102-f37d292932f2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= github.com/whyrusleeping/go-ctrlnet v0.0.0-20180313164037-f564fbbdaa95/go.mod h1:SJqKCCPXRfBFCwXjfNT/skfsceF7+MBFLI2OrvuRA7g= @@ -1452,22 +1596,24 @@ github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84 github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 h1:E9S12nwJwEOXe2d6gT6qxdvqMnNq+VnSsKPgm2ZZNds= github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= -github.com/whyrusleeping/pubsub v0.0.0-20131020042734-02de8aa2db3d h1:wnjWu1N8UTNf2zzF5FWlEyNNbNw5GMVHaHaaLdvdTdA= -github.com/whyrusleeping/pubsub v0.0.0-20131020042734-02de8aa2db3d/go.mod h1:g7ckxrjiFh8mi1AY7ox23PZD0g6QU/TxW3U3unX7I3A= +github.com/whyrusleeping/pubsub v0.0.0-20190708150250-92bcb0691325 h1:++Zf4xQ7YrkE81gNHIjVqx5JZsn0nbMeHOkY1ILAIME= +github.com/whyrusleeping/pubsub v0.0.0-20190708150250-92bcb0691325/go.mod h1:g7ckxrjiFh8mi1AY7ox23PZD0g6QU/TxW3U3unX7I3A= github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow= github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg= github.com/whyrusleeping/yamux v1.1.5/go.mod h1:E8LnQQ8HKx5KD29HZFUwM1PxCOdPRzGwur1mcYhXcD8= github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/c-for-go v0.0.0-20201002084316-c134bfab968f h1:nMhj+x/m7ZQsHBz0L3gpytp0v6ogokdbrQDnhB8Kh7s= -github.com/xlab/c-for-go v0.0.0-20201002084316-c134bfab968f/go.mod h1:h/1PEBwj7Ym/8kOuMWvO2ujZ6Lt+TMbySEXNhjjR87I= +github.com/xlab/c-for-go v0.0.0-20201112171043-ea6dce5809cb h1:/7/dQyiKnxAOj9L69FhST7uMe17U015XPzX7cy+5ykM= +github.com/xlab/c-for-go v0.0.0-20201112171043-ea6dce5809cb/go.mod h1:pbNsDSxn1ICiNn9Ct4ZGNrwzfkkwYbx/lw8VuyutFIg= github.com/xlab/pkgconfig v0.0.0-20170226114623-cea12a0fd245 h1:Sw125DKxZhPUI4JLlWugkzsrlB50jR9v2khiD9FxuSo= github.com/xlab/pkgconfig v0.0.0-20170226114623-cea12a0fd245/go.mod h1:C+diUUz7pxhNY6KAoLgrTYARGWnt82zWTylZlxT92vk= github.com/xorcare/golden v0.6.0/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ= github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 h1:oWgZJmC1DorFZDpfMfWg7xk29yEOZiXmo/wZl+utTI8= github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/zondax/hid v0.9.0 h1:eiT3P6vNxAEVxXMw66eZUAAnU2zD33JBkfG/EnfAKl8= github.com/zondax/hid v0.9.0/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= github.com/zondax/ledger-go v0.12.1 h1:hYRcyznPRJp+5mzF2sazTLP2nGvGjYDD2VzhHhFomLU= @@ -1475,7 +1621,6 @@ github.com/zondax/ledger-go v0.12.1/go.mod h1:KatxXrVDzgWwbssUWsF5+cOJHXPvzQ09YS go.dedis.ch/fixbuf v1.0.3 h1:hGcV9Cd/znUxlusJ64eAlExS+5cJDIyTyEG+otu5wQs= go.dedis.ch/fixbuf v1.0.3/go.mod h1:yzJMt34Wa5xD37V5RTdmp38cz3QhMagdGoem9anUalw= go.dedis.ch/kyber/v3 v3.0.4/go.mod h1:OzvaEnPvKlyrWyp3kGXlFdp7ap1VC6RkZDTaPikqhsQ= -go.dedis.ch/kyber/v3 v3.0.9 h1:i0ZbOQocHUjfFasBiUql5zVeC7u/vahFd96DFA8UOWk= go.dedis.ch/kyber/v3 v3.0.9/go.mod h1:rhNjUUg6ahf8HEg5HUvVBYoWY4boAafX8tYxX+PS+qg= go.dedis.ch/protobuf v1.0.5/go.mod h1:eIV4wicvi6JK0q/QnfIEGeSFNG0ZeB24kzut5+HaRLo= go.dedis.ch/protobuf v1.0.7/go.mod h1:pv5ysfkDX/EawiPqcW3ikOxsL5t+BqnV6xHSmE79KI4= @@ -1493,14 +1638,16 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/dig v1.10.0 h1:yLmDDj9/zuDjv3gz8GQGviXMs9TfysIUMUilCpgzUJY= go.uber.org/dig v1.10.0/go.mod h1:X34SnWGr8Fyla9zQNO2GSO2D+TIuqB14OS8JhYocIyw= go.uber.org/fx v1.9.0 h1:7OAz8ucp35AU8eydejpYG7QrbE8rLKzGhHbZlJi5LYY= @@ -1510,15 +1657,16 @@ go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= go4.org v0.0.0-20200411211856-f5505b9728dd h1:BNJlw5kRTzdmyfh5U8F93HA2OwkP7ZGwA51eJ/0wKOU= go4.org v0.0.0-20200411211856-f5505b9728dd/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg= @@ -1545,17 +1693,20 @@ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/exp v0.0.0-20181106170214-d68db9428509/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1577,8 +1728,9 @@ golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -1589,6 +1741,7 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180524181706-dfa909b99c79/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1624,12 +1777,19 @@ golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200519113804-d87ec0cfa476/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201022231255-08b38378de70/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6 h1:0PC75Fz/kyMGhL0e1QnypqK2kQMqKt9csD1GnMJR+Zk= +golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1645,9 +1805,12 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1708,16 +1871,25 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980 h1:OjiUf46hAmXblsZdnoSXsEUSKU8r1UEzcL5RVZ4gO9Y= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c h1:38q6VNPWR010vN82/SB121GujZNIfAUb4YttE2rhGuc= golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426080607-c94f62235c83 h1:kHSDPqCtsHZOg0nVylfTo20DDhE9gG4Y0jn7hKQ0QAM= +golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf h1:MZ2shdL+ZM/XzY3ZGOnh4Nlpnxz5GSOhOmtHo3iPU6M= +golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1752,6 +1924,7 @@ golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1760,9 +1933,12 @@ golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200711155855-7342f9734a7d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3 h1:r3P/5xOq/dK1991B65Oy6E1fRF/2d/fSYZJ/fXGVfJc= golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1828,8 +2004,11 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= +google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1838,16 +2017,15 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk= gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= @@ -1880,16 +2058,19 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= -launchpad.net/gocheck v0.0.0-20140225173054-000000000087 h1:Izowp2XBH6Ya6rv+hqbceQyw/gSGoXfH/UPoTGduL54= -launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM= modernc.org/cc v1.0.0 h1:nPibNuDEx6tvYrUAtvDTTw98rx5juGsa5zuDnKwEEQQ= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= -modernc.org/golex v1.0.0 h1:wWpDlbK8ejRfSyi0frMyhilD3JBvtcx2AdGDnU+JtsE= +modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +modernc.org/golex v1.0.1 h1:EYKY1a3wStt0RzHaH8mdSRNg78Ub0OHxYfCRWw35YtM= +modernc.org/golex v1.0.1/go.mod h1:QCA53QtsT1NdGkaZZkF5ezFwk4IXh4BGNafAARTC254= +modernc.org/lex v1.0.0/go.mod h1:G6rxMTy3cH2iA0iXL/HRRv4Znu8MK4higxph/lE7ypk= +modernc.org/lexer v1.0.0/go.mod h1:F/Dld0YKYdZCLQ7bD0USbWL4YKCyTDRDHiDTOs0q0vk= modernc.org/mathutil v1.1.1 h1:FeylZSVX8S+58VsyJlkEj2bcpdytmp9MmDKZkKx8OIE= modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/strutil v1.1.0 h1:+1/yCzZxY2pZwwrsbH+4T7BQMoLQ9QiBshRC9eicYsc= diff --git a/itests/api_test.go b/itests/api_test.go new file mode 100644 index 00000000000..ba77701a245 --- /dev/null +++ b/itests/api_test.go @@ -0,0 +1,200 @@ +package itests + +import ( + "context" + "strings" + "testing" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/exitcode" + lapi "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/stretchr/testify/require" +) + +func TestAPI(t *testing.T) { + t.Run("direct", func(t *testing.T) { + runAPITest(t) + }) + t.Run("rpc", func(t *testing.T) { + runAPITest(t, kit.ThroughRPC()) + }) +} + +type apiSuite struct { + opts []interface{} +} + +// runAPITest is the entry point to API test suite +func runAPITest(t *testing.T, opts ...interface{}) { + ts := apiSuite{opts: opts} + + t.Run("version", ts.testVersion) + t.Run("id", ts.testID) + t.Run("testConnectTwo", ts.testConnectTwo) + t.Run("testMining", ts.testMining) + t.Run("testMiningReal", ts.testMiningReal) + t.Run("testSearchMsg", ts.testSearchMsg) + t.Run("testNonGenesisMiner", ts.testNonGenesisMiner) +} + +func (ts *apiSuite) testVersion(t *testing.T) { + lapi.RunningNodeType = lapi.NodeFull + t.Cleanup(func() { + lapi.RunningNodeType = lapi.NodeUnknown + }) + + full, _, _ := kit.EnsembleMinimal(t, ts.opts...) + + v, err := full.Version(context.Background()) + require.NoError(t, err) + + versions := strings.Split(v.Version, "+") + require.NotZero(t, len(versions), "empty version") + require.Equal(t, versions[0], build.BuildVersion) +} + +func (ts *apiSuite) testID(t *testing.T) { + ctx := context.Background() + + full, _, _ := kit.EnsembleMinimal(t, ts.opts...) + + id, err := full.ID(ctx) + if err != nil { + t.Fatal(err) + } + require.Regexp(t, "^12", id.Pretty()) +} + +func (ts *apiSuite) testConnectTwo(t *testing.T) { + ctx := context.Background() + + one, two, _, ens := kit.EnsembleTwoOne(t, ts.opts...) + + p, err := one.NetPeers(ctx) + require.NoError(t, err) + require.Empty(t, p, "node one has peers") + + p, err = two.NetPeers(ctx) + require.NoError(t, err) + require.Empty(t, p, "node two has peers") + + ens.InterconnectAll() + + peers, err := one.NetPeers(ctx) + require.NoError(t, err) + require.Lenf(t, peers, 2, "node one doesn't have 2 peers") + + peers, err = two.NetPeers(ctx) + require.NoError(t, err) + require.Lenf(t, peers, 2, "node two doesn't have 2 peers") +} + +func (ts *apiSuite) testSearchMsg(t *testing.T) { + ctx := context.Background() + + full, _, ens := kit.EnsembleMinimal(t, ts.opts...) + + senderAddr, err := full.WalletDefaultAddress(ctx) + require.NoError(t, err) + + msg := &types.Message{ + From: senderAddr, + To: senderAddr, + Value: big.Zero(), + } + + ens.BeginMining(100 * time.Millisecond) + + sm, err := full.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err) + + res, err := full.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) + require.NoError(t, err) + + require.Equal(t, exitcode.Ok, res.Receipt.ExitCode, "message not successful") + + searchRes, err := full.StateSearchMsg(ctx, types.EmptyTSK, sm.Cid(), lapi.LookbackNoLimit, true) + require.NoError(t, err) + require.NotNil(t, searchRes) + + require.Equalf(t, res.TipSet, searchRes.TipSet, "search ts: %s, different from wait ts: %s", searchRes.TipSet, res.TipSet) +} + +func (ts *apiSuite) testMining(t *testing.T) { + ctx := context.Background() + + full, miner, _ := kit.EnsembleMinimal(t, ts.opts...) + + newHeads, err := full.ChainNotify(ctx) + require.NoError(t, err) + initHead := (<-newHeads)[0] + baseHeight := initHead.Val.Height() + + h1, err := full.ChainHead(ctx) + require.NoError(t, err) + require.Equal(t, int64(h1.Height()), int64(baseHeight)) + + bm := kit.NewBlockMiner(t, miner) + bm.MineUntilBlock(ctx, full, nil) + require.NoError(t, err) + + <-newHeads + + h2, err := full.ChainHead(ctx) + require.NoError(t, err) + require.Greater(t, int64(h2.Height()), int64(h1.Height())) + + bm.MineUntilBlock(ctx, full, nil) + require.NoError(t, err) + + <-newHeads + + h3, err := full.ChainHead(ctx) + require.NoError(t, err) + require.Greater(t, int64(h3.Height()), int64(h2.Height())) +} + +func (ts *apiSuite) testMiningReal(t *testing.T) { + build.InsecurePoStValidation = false + defer func() { + build.InsecurePoStValidation = true + }() + + ts.testMining(t) +} + +func (ts *apiSuite) testNonGenesisMiner(t *testing.T) { + ctx := context.Background() + + full, genesisMiner, ens := kit.EnsembleMinimal(t, append(ts.opts, kit.MockProofs())...) + ens.InterconnectAll().BeginMining(4 * time.Millisecond) + + time.Sleep(1 * time.Second) + + gaa, err := genesisMiner.ActorAddress(ctx) + require.NoError(t, err) + + _, err = full.StateMinerInfo(ctx, gaa, types.EmptyTSK) + require.NoError(t, err) + + var newMiner kit.TestMiner + ens.Miner(&newMiner, full, + kit.OwnerAddr(full.DefaultKey), + kit.ProofType(abi.RegisteredSealProof_StackedDrg2KiBV1), // we're using v0 actors with old proofs. + kit.WithAllSubsystems(), + ).Start().InterconnectAll() + + ta, err := newMiner.ActorAddress(ctx) + require.NoError(t, err) + + tid, err := address.IDFromAddress(ta) + require.NoError(t, err) + + require.Equal(t, uint64(1001), tid) +} diff --git a/itests/batch_deal_test.go b/itests/batch_deal_test.go new file mode 100644 index 00000000000..3881c917aa0 --- /dev/null +++ b/itests/batch_deal_test.go @@ -0,0 +1,129 @@ +package itests + +import ( + "context" + "fmt" + "sort" + "testing" + "time" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/filecoin-project/lotus/markets/storageadapter" + "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/stretchr/testify/require" +) + +func TestBatchDealInput(t *testing.T) { + kit.QuietMiningLogs() + + var ( + blockTime = 10 * time.Millisecond + + // For these tests where the block time is artificially short, just use + // a deal start epoch that is guaranteed to be far enough in the future + // so that the deal starts sealing in time + dealStartEpoch = abi.ChainEpoch(2 << 12) + ) + + run := func(piece, deals, expectSectors int) func(t *testing.T) { + return func(t *testing.T) { + ctx := context.Background() + + publishPeriod := 10 * time.Second + maxDealsPerMsg := uint64(deals) + + // Set max deals per publish deals message to maxDealsPerMsg + opts := kit.ConstructorOpts(node.Options( + node.Override( + new(*storageadapter.DealPublisher), + storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{ + Period: publishPeriod, + MaxDealsPerMsg: maxDealsPerMsg, + })), + node.Override(new(dtypes.GetSealingConfigFunc), func() (dtypes.GetSealingConfigFunc, error) { + return func() (sealiface.Config, error) { + return sealiface.Config{ + MaxWaitDealsSectors: 2, + MaxSealingSectors: 1, + MaxSealingSectorsForDeals: 3, + AlwaysKeepUnsealedCopy: true, + WaitDealsDelay: time.Hour, + }, nil + }, nil + }), + )) + client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts) + ens.InterconnectAll().BeginMining(blockTime) + dh := kit.NewDealHarness(t, client, miner, miner) + + err := miner.MarketSetAsk(ctx, big.Zero(), big.Zero(), 200, 128, 32<<30) + require.NoError(t, err) + + checkNoPadding := func() { + sl, err := miner.SectorsList(ctx) + require.NoError(t, err) + + sort.Slice(sl, func(i, j int) bool { + return sl[i] < sl[j] + }) + + for _, snum := range sl { + si, err := miner.SectorsStatus(ctx, snum, false) + require.NoError(t, err) + + // fmt.Printf("S %d: %+v %s\n", snum, si.Deals, si.State) + + for _, deal := range si.Deals { + if deal == 0 { + fmt.Printf("sector %d had a padding piece!\n", snum) + } + } + } + } + + // Starts a deal and waits until it's published + runDealTillSeal := func(rseed int) { + res, _, _, err := kit.CreateImportFile(ctx, client, rseed, piece) + require.NoError(t, err) + + deal := dh.StartDeal(ctx, res.Root, false, dealStartEpoch) + dh.WaitDealSealed(ctx, deal, false, true, checkNoPadding) + } + + // Run maxDealsPerMsg deals in parallel + done := make(chan struct{}, maxDealsPerMsg) + for rseed := 0; rseed < int(maxDealsPerMsg); rseed++ { + rseed := rseed + go func() { + runDealTillSeal(rseed) + done <- struct{}{} + }() + } + + // Wait for maxDealsPerMsg of the deals to be published + for i := 0; i < int(maxDealsPerMsg); i++ { + <-done + } + + checkNoPadding() + + sl, err := miner.SectorsList(ctx) + require.NoError(t, err) + require.Equal(t, len(sl), expectSectors) + } + } + + t.Run("4-p1600B", run(1600, 4, 4)) + t.Run("4-p513B", run(513, 4, 2)) + if !testing.Short() { + t.Run("32-p257B", run(257, 32, 8)) + t.Run("32-p10B", run(10, 32, 2)) + + // fixme: this appears to break data-transfer / markets in some really creative ways + // t.Run("128-p10B", run(10, 128, 8)) + } +} diff --git a/itests/ccupgrade_test.go b/itests/ccupgrade_test.go new file mode 100644 index 00000000000..dfd0144f21e --- /dev/null +++ b/itests/ccupgrade_test.go @@ -0,0 +1,105 @@ +package itests + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/itests/kit" + + "github.com/stretchr/testify/require" +) + +func TestCCUpgrade(t *testing.T) { + kit.QuietMiningLogs() + + for _, height := range []abi.ChainEpoch{ + -1, // before + 162, // while sealing + 530, // after upgrade deal + 5000, // after + } { + height := height // make linters happy by copying + t.Run(fmt.Sprintf("upgrade-%d", height), func(t *testing.T) { + runTestCCUpgrade(t, height) + }) + } +} + +func runTestCCUpgrade(t *testing.T, upgradeHeight abi.ChainEpoch) { + ctx := context.Background() + blockTime := 5 * time.Millisecond + + opts := kit.ConstructorOpts(kit.LatestActorsAt(upgradeHeight)) + client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts) + ens.InterconnectAll().BeginMining(blockTime) + + maddr, err := miner.ActorAddress(ctx) + if err != nil { + t.Fatal(err) + } + + CC := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner + 1) + Upgraded := CC + 1 + + miner.PledgeSectors(ctx, 1, 0, nil) + + sl, err := miner.SectorsList(ctx) + require.NoError(t, err) + require.Len(t, sl, 1, "expected 1 sector") + require.Equal(t, CC, sl[0], "unexpected sector number") + + { + si, err := client.StateSectorGetInfo(ctx, maddr, CC, types.EmptyTSK) + require.NoError(t, err) + require.Less(t, 50000, int(si.Expiration)) + } + + err = miner.SectorMarkForUpgrade(ctx, sl[0]) + require.NoError(t, err) + + dh := kit.NewDealHarness(t, client, miner, miner) + deal, res, inPath := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{ + Rseed: 6, + SuspendUntilCryptoeconStable: true, + }) + outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, false) + kit.AssertFilesEqual(t, inPath, outPath) + + // Validate upgrade + + { + exp, err := client.StateSectorExpiration(ctx, maddr, CC, types.EmptyTSK) + if err != nil { + require.Contains(t, err.Error(), "failed to find sector 3") // already cleaned up + } else { + require.NoError(t, err) + require.NotNil(t, exp) + require.Greater(t, 50000, int(exp.OnTime)) + } + } + { + exp, err := client.StateSectorExpiration(ctx, maddr, Upgraded, types.EmptyTSK) + require.NoError(t, err) + require.Less(t, 50000, int(exp.OnTime)) + } + + dlInfo, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + // Sector should expire. + for { + // Wait for the sector to expire. + status, err := miner.SectorsStatus(ctx, CC, true) + require.NoError(t, err) + if status.OnTime == 0 && status.Early == 0 { + break + } + t.Log("waiting for sector to expire") + // wait one deadline per loop. + time.Sleep(time.Duration(dlInfo.WPoStChallengeWindow) * blockTime) + } +} diff --git a/itests/cli_test.go b/itests/cli_test.go new file mode 100644 index 00000000000..0bd1ec3b421 --- /dev/null +++ b/itests/cli_test.go @@ -0,0 +1,21 @@ +package itests + +import ( + "os" + "testing" + "time" + + "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/itests/kit" +) + +// TestClient does a basic test to exercise the client CLI commands. +func TestClient(t *testing.T) { + _ = os.Setenv("BELLMAN_NO_GPU", "1") + kit.QuietMiningLogs() + + blockTime := 5 * time.Millisecond + client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC()) + ens.InterconnectAll().BeginMining(blockTime) + kit.RunClientTest(t, cli.Commands, client) +} diff --git a/itests/deadlines_test.go b/itests/deadlines_test.go new file mode 100644 index 00000000000..19b0a10dc3a --- /dev/null +++ b/itests/deadlines_test.go @@ -0,0 +1,357 @@ +package itests + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/extern/sector-storage/mock" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/filecoin-project/lotus/node/impl" + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/stretchr/testify/require" +) + +// TestDeadlineToggling: +// * spins up a v3 network (miner A) +// * creates an inactive miner (miner B) +// * creates another miner, pledges a sector, waits for power (miner C) +// +// * goes through v4 upgrade +// * goes through PP +// * creates minerD, minerE +// * makes sure that miner B/D are inactive, A/C still are +// * pledges sectors on miner B/D +// * precommits a sector on minerE +// * disables post on miner C +// * goes through PP 0.5PP +// * asserts that minerE is active +// * goes through rest of PP (1.5) +// * asserts that miner C loses power +// * asserts that miner B/D is active and has power +// * asserts that minerE is inactive +// * disables post on miner B +// * terminates sectors on miner D +// * goes through another PP +// * asserts that miner B loses power +// * asserts that miner D loses power, is inactive +func TestDeadlineToggling(t *testing.T) { + kit.Expensive(t) + + kit.QuietMiningLogs() + + const sectorsC, sectorsD, sectorsB = 10, 9, 8 + + var ( + upgradeH abi.ChainEpoch = 4000 + provingPeriod abi.ChainEpoch = 2880 + blocktime = 2 * time.Millisecond + ) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var ( + client kit.TestFullNode + minerA kit.TestMiner + minerB kit.TestMiner + minerC kit.TestMiner + minerD kit.TestMiner + minerE kit.TestMiner + ) + opts := []kit.NodeOpt{kit.ConstructorOpts(kit.NetworkUpgradeAt(network.Version12, upgradeH))} + opts = append(opts, kit.WithAllSubsystems()) + ens := kit.NewEnsemble(t, kit.MockProofs()). + FullNode(&client, opts...). + Miner(&minerA, &client, opts...). + Start(). + InterconnectAll() + ens.BeginMining(blocktime) + + opts = append(opts, kit.OwnerAddr(client.DefaultKey)) + ens.Miner(&minerB, &client, opts...). + Miner(&minerC, &client, opts...). + Start() + + defaultFrom, err := client.WalletDefaultAddress(ctx) + require.NoError(t, err) + + maddrA, err := minerA.ActorAddress(ctx) + require.NoError(t, err) + + build.Clock.Sleep(time.Second) + + maddrB, err := minerB.ActorAddress(ctx) + require.NoError(t, err) + maddrC, err := minerC.ActorAddress(ctx) + require.NoError(t, err) + + ssz, err := minerC.ActorSectorSize(ctx, maddrC) + require.NoError(t, err) + + // pledge sectors on C, go through a PP, check for power + { + minerC.PledgeSectors(ctx, sectorsC, 0, nil) + + di, err := client.StateMinerProvingDeadline(ctx, maddrC, types.EmptyTSK) + require.NoError(t, err) + + t.Log("Running one proving period (miner C)") + t.Logf("End for head.Height > %d", di.PeriodStart+di.WPoStProvingPeriod*2) + + for { + head, err := client.ChainHead(ctx) + require.NoError(t, err) + + if head.Height() > di.PeriodStart+provingPeriod*2 { + t.Logf("Now head.Height = %d", head.Height()) + break + } + build.Clock.Sleep(blocktime) + } + + expectedPower := types.NewInt(uint64(ssz) * sectorsC) + + p, err := client.StateMinerPower(ctx, maddrC, types.EmptyTSK) + require.NoError(t, err) + + // make sure it has gained power. + require.Equal(t, p.MinerPower.RawBytePower, expectedPower) + } + + // go through upgrade + PP + for { + head, err := client.ChainHead(ctx) + require.NoError(t, err) + + if head.Height() > upgradeH+provingPeriod { + t.Logf("Now head.Height = %d", head.Height()) + break + } + build.Clock.Sleep(blocktime) + } + + checkMiner := func(ma address.Address, power abi.StoragePower, active, activeIfCron bool, tsk types.TipSetKey) { + p, err := client.StateMinerPower(ctx, ma, tsk) + require.NoError(t, err) + + // make sure it has the expected power. + require.Equal(t, p.MinerPower.RawBytePower, power) + + mact, err := client.StateGetActor(ctx, ma, tsk) + require.NoError(t, err) + + mst, err := miner.Load(adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(client))), mact) + require.NoError(t, err) + + act, err := mst.DeadlineCronActive() + require.NoError(t, err) + + if tsk != types.EmptyTSK { + ts, err := client.ChainGetTipSet(ctx, tsk) + require.NoError(t, err) + di, err := mst.DeadlineInfo(ts.Height()) + require.NoError(t, err) + + // cron happened on the same epoch some other condition would have happened + if di.Open == ts.Height() { + act, err := mst.DeadlineCronActive() + require.NoError(t, err) + require.Equal(t, activeIfCron, act) + return + } + } + + require.Equal(t, active, act) + } + + // check that just after the upgrade minerB was still active + { + uts, err := client.ChainGetTipSetByHeight(ctx, upgradeH+2, types.EmptyTSK) + require.NoError(t, err) + checkMiner(maddrB, types.NewInt(0), true, true, uts.Key()) + } + + nv, err := client.StateNetworkVersion(ctx, types.EmptyTSK) + require.NoError(t, err) + require.GreaterOrEqual(t, nv, network.Version12) + + ens.Miner(&minerD, &client, opts...). + Miner(&minerE, &client, opts...). + Start() + + maddrD, err := minerD.ActorAddress(ctx) + require.NoError(t, err) + maddrE, err := minerE.ActorAddress(ctx) + require.NoError(t, err) + + // first round of miner checks + checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.DefaultPresealsPerBootstrapMiner), true, true, types.EmptyTSK) + checkMiner(maddrC, types.NewInt(uint64(ssz)*sectorsC), true, true, types.EmptyTSK) + + checkMiner(maddrB, types.NewInt(0), false, false, types.EmptyTSK) + checkMiner(maddrD, types.NewInt(0), false, false, types.EmptyTSK) + checkMiner(maddrE, types.NewInt(0), false, false, types.EmptyTSK) + + // pledge sectors on minerB/minerD, stop post on minerC + minerB.PledgeSectors(ctx, sectorsB, 0, nil) + checkMiner(maddrB, types.NewInt(0), true, true, types.EmptyTSK) + + minerD.PledgeSectors(ctx, sectorsD, 0, nil) + checkMiner(maddrD, types.NewInt(0), true, true, types.EmptyTSK) + + minerC.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).Fail() + + // precommit a sector on minerE + { + head, err := client.ChainHead(ctx) + require.NoError(t, err) + + cr, err := cid.Parse("bagboea4b5abcatlxechwbp7kjpjguna6r6q7ejrhe6mdp3lf34pmswn27pkkiekz") + require.NoError(t, err) + + params := &miner.SectorPreCommitInfo{ + Expiration: 2880 * 300, + SectorNumber: 22, + SealProof: kit.TestSpt, + + SealedCID: cr, + SealRandEpoch: head.Height() - 200, + } + + enc := new(bytes.Buffer) + require.NoError(t, params.MarshalCBOR(enc)) + + m, err := client.MpoolPushMessage(ctx, &types.Message{ + To: maddrE, + From: defaultFrom, + Value: types.FromFil(1), + Method: miner.Methods.PreCommitSector, + Params: enc.Bytes(), + }, nil) + require.NoError(t, err) + + r, err := client.StateWaitMsg(ctx, m.Cid(), 2, api.LookbackNoLimit, true) + require.NoError(t, err) + require.Equal(t, exitcode.Ok, r.Receipt.ExitCode) + } + + // go through 0.5 PP + for { + head, err := client.ChainHead(ctx) + require.NoError(t, err) + + if head.Height() > upgradeH+provingPeriod+(provingPeriod/2) { + t.Logf("Now head.Height = %d", head.Height()) + break + } + build.Clock.Sleep(blocktime) + } + + checkMiner(maddrE, types.NewInt(0), true, true, types.EmptyTSK) + + // go through rest of the PP + for { + head, err := client.ChainHead(ctx) + require.NoError(t, err) + + if head.Height() > upgradeH+(provingPeriod*3) { + t.Logf("Now head.Height = %d", head.Height()) + break + } + build.Clock.Sleep(blocktime) + } + + // second round of miner checks + checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.DefaultPresealsPerBootstrapMiner), true, true, types.EmptyTSK) + checkMiner(maddrC, types.NewInt(0), true, true, types.EmptyTSK) + checkMiner(maddrB, types.NewInt(uint64(ssz)*sectorsB), true, true, types.EmptyTSK) + checkMiner(maddrD, types.NewInt(uint64(ssz)*sectorsD), true, true, types.EmptyTSK) + checkMiner(maddrE, types.NewInt(0), false, false, types.EmptyTSK) + + // disable post on minerB + minerB.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).Fail() + + // terminate sectors on minerD + { + var terminationDeclarationParams []miner2.TerminationDeclaration + secs, err := minerD.SectorsList(ctx) + require.NoError(t, err) + require.Len(t, secs, sectorsD) + + for _, sectorNum := range secs { + sectorbit := bitfield.New() + sectorbit.Set(uint64(sectorNum)) + + loca, err := client.StateSectorPartition(ctx, maddrD, sectorNum, types.EmptyTSK) + require.NoError(t, err) + + para := miner2.TerminationDeclaration{ + Deadline: loca.Deadline, + Partition: loca.Partition, + Sectors: sectorbit, + } + + terminationDeclarationParams = append(terminationDeclarationParams, para) + } + + terminateSectorParams := &miner2.TerminateSectorsParams{ + Terminations: terminationDeclarationParams, + } + + sp, aerr := actors.SerializeParams(terminateSectorParams) + require.NoError(t, aerr) + + smsg, err := client.MpoolPushMessage(ctx, &types.Message{ + From: defaultFrom, + To: maddrD, + Method: miner.Methods.TerminateSectors, + + Value: big.Zero(), + Params: sp, + }, nil) + require.NoError(t, err) + + t.Log("sent termination message:", smsg.Cid()) + + r, err := client.StateWaitMsg(ctx, smsg.Cid(), 2, api.LookbackNoLimit, true) + require.NoError(t, err) + require.Equal(t, exitcode.Ok, r.Receipt.ExitCode) + + // assert inactive if the message landed in the tipset we run cron in + checkMiner(maddrD, types.NewInt(0), true, false, r.TipSet) + } + + // go through another PP + for { + head, err := client.ChainHead(ctx) + require.NoError(t, err) + + if head.Height() > upgradeH+(provingPeriod*5) { + t.Logf("Now head.Height = %d", head.Height()) + break + } + build.Clock.Sleep(blocktime) + } + + checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.DefaultPresealsPerBootstrapMiner), true, true, types.EmptyTSK) + checkMiner(maddrC, types.NewInt(0), true, true, types.EmptyTSK) + checkMiner(maddrB, types.NewInt(0), true, true, types.EmptyTSK) + checkMiner(maddrD, types.NewInt(0), false, false, types.EmptyTSK) +} diff --git a/itests/deals_concurrent_test.go b/itests/deals_concurrent_test.go new file mode 100644 index 00000000000..d7932b896a5 --- /dev/null +++ b/itests/deals_concurrent_test.go @@ -0,0 +1,176 @@ +package itests + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/modules" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/repo" +) + +// TestDealWithMarketAndMinerNode is running concurrently a number of storage and retrieval deals towards a miner +// architecture where the `mining/sealing/proving` node is a separate process from the `markets` node +func TestDealWithMarketAndMinerNode(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + + kit.QuietMiningLogs() + + oldDelay := policy.GetPreCommitChallengeDelay() + policy.SetPreCommitChallengeDelay(5) + t.Cleanup(func() { + policy.SetPreCommitChallengeDelay(oldDelay) + }) + + // For these tests where the block time is artificially short, just use + // a deal start epoch that is guaranteed to be far enough in the future + // so that the deal starts sealing in time + startEpoch := abi.ChainEpoch(2 << 12) + + runTest := func(t *testing.T, n int, fastRetrieval bool, carExport bool) { + api.RunningNodeType = api.NodeMiner // TODO(anteva): fix me + + client, main, market, _ := kit.EnsembleWithMinerAndMarketNodes(t, kit.ThroughRPC()) + + dh := kit.NewDealHarness(t, client, main, market) + + dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{ + N: n, + FastRetrieval: fastRetrieval, + CarExport: carExport, + StartEpoch: startEpoch, + }) + } + + // TODO: add 2, 4, 8, more when this graphsync issue is fixed: https://github.com/ipfs/go-graphsync/issues/175# + cycles := []int{1} + for _, n := range cycles { + n := n + ns := fmt.Sprintf("%d", n) + t.Run(ns+"-fastretrieval-CAR", func(t *testing.T) { runTest(t, n, true, true) }) + t.Run(ns+"-fastretrieval-NoCAR", func(t *testing.T) { runTest(t, n, true, false) }) + t.Run(ns+"-stdretrieval-CAR", func(t *testing.T) { runTest(t, n, true, false) }) + t.Run(ns+"-stdretrieval-NoCAR", func(t *testing.T) { runTest(t, n, false, false) }) + } +} + +func TestDealCyclesConcurrent(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + + kit.QuietMiningLogs() + + blockTime := 10 * time.Millisecond + + // For these tests where the block time is artificially short, just use + // a deal start epoch that is guaranteed to be far enough in the future + // so that the deal starts sealing in time + startEpoch := abi.ChainEpoch(2 << 12) + + runTest := func(t *testing.T, n int, fastRetrieval bool, carExport bool) { + client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs()) + ens.InterconnectAll().BeginMining(blockTime) + dh := kit.NewDealHarness(t, client, miner, miner) + + dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{ + N: n, + FastRetrieval: fastRetrieval, + CarExport: carExport, + StartEpoch: startEpoch, + }) + } + + // TODO: add 2, 4, 8, more when this graphsync issue is fixed: https://github.com/ipfs/go-graphsync/issues/175# + cycles := []int{1} + for _, n := range cycles { + n := n + ns := fmt.Sprintf("%d", n) + t.Run(ns+"-fastretrieval-CAR", func(t *testing.T) { runTest(t, n, true, true) }) + t.Run(ns+"-fastretrieval-NoCAR", func(t *testing.T) { runTest(t, n, true, false) }) + t.Run(ns+"-stdretrieval-CAR", func(t *testing.T) { runTest(t, n, true, false) }) + t.Run(ns+"-stdretrieval-NoCAR", func(t *testing.T) { runTest(t, n, false, false) }) + } +} + +func TestSimultenousTransferLimit(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + + kit.QuietMiningLogs() + + blockTime := 10 * time.Millisecond + + // For these tests where the block time is artificially short, just use + // a deal start epoch that is guaranteed to be far enough in the future + // so that the deal starts sealing in time + startEpoch := abi.ChainEpoch(2 << 12) + + runTest := func(t *testing.T) { + client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ConstructorOpts( + node.ApplyIf(node.IsType(repo.StorageMiner), node.Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(2))), + )) + ens.InterconnectAll().BeginMining(blockTime) + dh := kit.NewDealHarness(t, client, miner, miner) + + ctx, cancel := context.WithCancel(context.Background()) + + du, err := miner.MarketDataTransferUpdates(ctx) + require.NoError(t, err) + + var maxOngoing int + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + + ongoing := map[datatransfer.TransferID]struct{}{} + + for { + select { + case u := <-du: + t.Logf("%d - %s", u.TransferID, datatransfer.Statuses[u.Status]) + if u.Status == datatransfer.Ongoing { + ongoing[u.TransferID] = struct{}{} + } else { + delete(ongoing, u.TransferID) + } + + if len(ongoing) > maxOngoing { + maxOngoing = len(ongoing) + } + case <-ctx.Done(): + return + } + } + }() + + dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{ + N: 1, // TODO: set to 20 after https://github.com/ipfs/go-graphsync/issues/175 is fixed + FastRetrieval: true, + StartEpoch: startEpoch, + }) + + cancel() + wg.Wait() + + require.LessOrEqual(t, maxOngoing, 2) + } + + runTest(t) +} diff --git a/itests/deals_offline_test.go b/itests/deals_offline_test.go new file mode 100644 index 00000000000..ceae46fdf4f --- /dev/null +++ b/itests/deals_offline_test.go @@ -0,0 +1,101 @@ +package itests + +import ( + "context" + "path/filepath" + "testing" + "time" + + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/stretchr/testify/require" +) + +func TestOfflineDealFlow(t *testing.T) { + blocktime := 10 * time.Millisecond + + // For these tests where the block time is artificially short, just use + // a deal start epoch that is guaranteed to be far enough in the future + // so that the deal starts sealing in time + startEpoch := abi.ChainEpoch(2 << 12) + + runTest := func(t *testing.T, fastRet bool) { + ctx := context.Background() + client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs()) + ens.InterconnectAll().BeginMining(blocktime) + + dh := kit.NewDealHarness(t, client, miner, miner) + + // Create a random file and import on the client. + res, inFile := client.CreateImportFile(ctx, 1, 0) + + // Get the piece size and commP + rootCid := res.Root + pieceInfo, err := client.ClientDealPieceCID(ctx, rootCid) + require.NoError(t, err) + t.Log("FILE CID:", rootCid) + + // Create a storage deal with the miner + maddr, err := miner.ActorAddress(ctx) + require.NoError(t, err) + + addr, err := client.WalletDefaultAddress(ctx) + require.NoError(t, err) + + // Manual storage deal (offline deal) + dataRef := &storagemarket.DataRef{ + TransferType: storagemarket.TTManual, + Root: rootCid, + PieceCid: &pieceInfo.PieceCID, + PieceSize: pieceInfo.PieceSize.Unpadded(), + } + + proposalCid, err := client.ClientStartDeal(ctx, &api.StartDealParams{ + Data: dataRef, + Wallet: addr, + Miner: maddr, + EpochPrice: types.NewInt(1000000), + DealStartEpoch: startEpoch, + MinBlocksDuration: uint64(build.MinDealDuration), + FastRetrieval: fastRet, + }) + require.NoError(t, err) + + // Wait for the deal to reach StorageDealCheckForAcceptance on the client + cd, err := client.ClientGetDealInfo(ctx, *proposalCid) + require.NoError(t, err) + require.Eventually(t, func() bool { + cd, _ := client.ClientGetDealInfo(ctx, *proposalCid) + return cd.State == storagemarket.StorageDealCheckForAcceptance + }, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State]) + + // Create a CAR file from the raw file + carFileDir := t.TempDir() + carFilePath := filepath.Join(carFileDir, "out.car") + err = client.ClientGenCar(ctx, api.FileRef{Path: inFile}, carFilePath) + require.NoError(t, err) + + // Import the CAR file on the miner - this is the equivalent to + // transferring the file across the wire in a normal (non-offline) deal + err = miner.DealsImportData(ctx, *proposalCid, carFilePath) + require.NoError(t, err) + + // Wait for the deal to be published + dh.WaitDealPublished(ctx, proposalCid) + + t.Logf("deal published, retrieving") + + // Retrieve the deal + outFile := dh.PerformRetrieval(ctx, proposalCid, rootCid, false) + + kit.AssertFilesEqual(t, inFile, outFile) + + } + + t.Run("stdretrieval", func(t *testing.T) { runTest(t, false) }) + t.Run("fastretrieval", func(t *testing.T) { runTest(t, true) }) +} diff --git a/itests/deals_power_test.go b/itests/deals_power_test.go new file mode 100644 index 00000000000..16ad8ae6a87 --- /dev/null +++ b/itests/deals_power_test.go @@ -0,0 +1,61 @@ +package itests + +import ( + "context" + "testing" + "time" + + "github.com/filecoin-project/lotus/itests/kit" +) + +func TestFirstDealEnablesMining(t *testing.T) { + // test making a deal with a fresh miner, and see if it starts to mine. + if testing.Short() { + t.Skip("skipping test in short mode") + } + + kit.QuietMiningLogs() + + var ( + client kit.TestFullNode + genMiner kit.TestMiner // bootstrap + provider kit.TestMiner // no sectors, will need to create one + ) + + ens := kit.NewEnsemble(t, kit.MockProofs()) + ens.FullNode(&client) + ens.Miner(&genMiner, &client, kit.WithAllSubsystems()) + ens.Miner(&provider, &client, kit.WithAllSubsystems(), kit.PresealSectors(0)) + ens.Start().InterconnectAll().BeginMining(50 * time.Millisecond) + + ctx := context.Background() + + dh := kit.NewDealHarness(t, &client, &provider, &provider) + + ref, _ := client.CreateImportFile(ctx, 5, 0) + + t.Log("FILE CID:", ref.Root) + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // start a goroutine to monitor head changes from the client + // once the provider has mined a block, thanks to the power acquired from the deal, + // we pass the test. + providerMined := make(chan struct{}) + + go func() { + _ = client.WaitTillChain(ctx, kit.BlockMinedBy(provider.ActorAddr)) + close(providerMined) + }() + + // now perform the deal. + deal := dh.StartDeal(ctx, ref.Root, false, 0) + + // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this + time.Sleep(time.Second) + + dh.WaitDealSealed(ctx, deal, false, false, nil) + + <-providerMined +} diff --git a/itests/deals_pricing_test.go b/itests/deals_pricing_test.go new file mode 100644 index 00000000000..eb28af0bd1e --- /dev/null +++ b/itests/deals_pricing_test.go @@ -0,0 +1,131 @@ +package itests + +import ( + "context" + "testing" + "time" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/stretchr/testify/require" +) + +func TestQuotePriceForUnsealedRetrieval(t *testing.T) { + var ( + ctx = context.Background() + blocktime = 50 * time.Millisecond + ) + + kit.QuietMiningLogs() + + client, miner, ens := kit.EnsembleMinimal(t) + ens.InterconnectAll().BeginMining(blocktime) + + var ( + ppb = int64(1) + unsealPrice = int64(77) + ) + + // Set unsealed price to non-zero + ask, err := miner.MarketGetRetrievalAsk(ctx) + require.NoError(t, err) + ask.PricePerByte = abi.NewTokenAmount(ppb) + ask.UnsealPrice = abi.NewTokenAmount(unsealPrice) + err = miner.MarketSetRetrievalAsk(ctx, ask) + require.NoError(t, err) + + dh := kit.NewDealHarness(t, client, miner, miner) + + deal1, res1, _ := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{Rseed: 6}) + + // one more storage deal for the same data + _, res2, _ := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{Rseed: 6}) + require.Equal(t, res1.Root, res2.Root) + + // Retrieval + dealInfo, err := client.ClientGetDealInfo(ctx, *deal1) + require.NoError(t, err) + + // fetch quote -> zero for unsealed price since unsealed file already exists. + offers, err := client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID) + require.NoError(t, err) + require.Len(t, offers, 2) + require.Equal(t, offers[0], offers[1]) + require.Equal(t, uint64(0), offers[0].UnsealPrice.Uint64()) + require.Equal(t, dealInfo.Size*uint64(ppb), offers[0].MinPrice.Uint64()) + + // remove ONLY one unsealed file + ss, err := miner.StorageList(context.Background()) + require.NoError(t, err) + _, err = miner.SectorsList(ctx) + require.NoError(t, err) + +iLoop: + for storeID, sd := range ss { + for _, sector := range sd { + err := miner.StorageDropSector(ctx, storeID, sector.SectorID, storiface.FTUnsealed) + require.NoError(t, err) + break iLoop // remove ONLY one + } + } + + // get retrieval quote -> zero for unsealed price as unsealed file exists. + offers, err = client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID) + require.NoError(t, err) + require.Len(t, offers, 2) + require.Equal(t, offers[0], offers[1]) + require.Equal(t, uint64(0), offers[0].UnsealPrice.Uint64()) + require.Equal(t, dealInfo.Size*uint64(ppb), offers[0].MinPrice.Uint64()) + + // remove the other unsealed file as well + ss, err = miner.StorageList(context.Background()) + require.NoError(t, err) + _, err = miner.SectorsList(ctx) + require.NoError(t, err) + for storeID, sd := range ss { + for _, sector := range sd { + require.NoError(t, miner.StorageDropSector(ctx, storeID, sector.SectorID, storiface.FTUnsealed)) + } + } + + // fetch quote -> non-zero for unseal price as we no more unsealed files. + offers, err = client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID) + require.NoError(t, err) + require.Len(t, offers, 2) + require.Equal(t, offers[0], offers[1]) + require.Equal(t, uint64(unsealPrice), offers[0].UnsealPrice.Uint64()) + total := (dealInfo.Size * uint64(ppb)) + uint64(unsealPrice) + require.Equal(t, total, offers[0].MinPrice.Uint64()) +} + +func TestZeroPricePerByteRetrieval(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + + kit.QuietMiningLogs() + + var ( + blockTime = 10 * time.Millisecond + startEpoch = abi.ChainEpoch(2 << 12) + ) + + client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs()) + ens.InterconnectAll().BeginMining(blockTime) + + ctx := context.Background() + + ask, err := miner.MarketGetRetrievalAsk(ctx) + require.NoError(t, err) + + ask.PricePerByte = abi.NewTokenAmount(0) + err = miner.MarketSetRetrievalAsk(ctx, ask) + require.NoError(t, err) + + dh := kit.NewDealHarness(t, client, miner, miner) + dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{ + N: 1, + StartEpoch: startEpoch, + }) +} diff --git a/itests/deals_publish_test.go b/itests/deals_publish_test.go new file mode 100644 index 00000000000..10592d8b426 --- /dev/null +++ b/itests/deals_publish_test.go @@ -0,0 +1,131 @@ +package itests + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/wallet" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/filecoin-project/lotus/markets/storageadapter" + "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/modules" + "github.com/filecoin-project/lotus/storage" + market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" + "github.com/stretchr/testify/require" +) + +func TestPublishDealsBatching(t *testing.T) { + var ( + ctx = context.Background() + publishPeriod = 10 * time.Second + maxDealsPerMsg = uint64(2) // Set max deals per publish deals message to 2 + startEpoch = abi.ChainEpoch(2 << 12) + ) + + kit.QuietMiningLogs() + + publisherKey, err := wallet.GenerateKey(types.KTSecp256k1) + require.NoError(t, err) + + opts := node.Options( + node.Override(new(*storageadapter.DealPublisher), + storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{ + Period: publishPeriod, + MaxDealsPerMsg: maxDealsPerMsg, + }), + ), + node.Override(new(*storage.AddressSelector), modules.AddressSelector(&config.MinerAddressConfig{ + DealPublishControl: []string{ + publisherKey.Address.String(), + }, + DisableOwnerFallback: true, + DisableWorkerFallback: true, + })), + kit.LatestActorsAt(-1), + ) + + client, miner, ens := kit.EnsembleMinimal(t, kit.Account(publisherKey, types.FromFil(10)), kit.MockProofs(), kit.ConstructorOpts(opts)) + ens.InterconnectAll().BeginMining(10 * time.Millisecond) + + _, err = client.WalletImport(ctx, &publisherKey.KeyInfo) + require.NoError(t, err) + + miner.SetControlAddresses(publisherKey.Address) + + dh := kit.NewDealHarness(t, client, miner, miner) + + // Starts a deal and waits until it's published + runDealTillPublish := func(rseed int) { + res, _ := client.CreateImportFile(ctx, rseed, 0) + + upds, err := client.ClientGetDealUpdates(ctx) + require.NoError(t, err) + + dh.StartDeal(ctx, res.Root, false, startEpoch) + + // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this + time.Sleep(time.Second) + + done := make(chan struct{}) + go func() { + for upd := range upds { + if upd.DataRef.Root == res.Root && upd.State == storagemarket.StorageDealAwaitingPreCommit { + done <- struct{}{} + } + } + }() + <-done + } + + // Run three deals in parallel + done := make(chan struct{}, maxDealsPerMsg+1) + for rseed := 1; rseed <= 3; rseed++ { + rseed := rseed + go func() { + runDealTillPublish(rseed) + done <- struct{}{} + }() + } + + // Wait for two of the deals to be published + for i := 0; i < int(maxDealsPerMsg); i++ { + <-done + } + + // Expect a single PublishStorageDeals message that includes the first two deals + msgCids, err := client.StateListMessages(ctx, &api.MessageMatch{To: market.Address}, types.EmptyTSK, 1) + require.NoError(t, err) + count := 0 + for _, msgCid := range msgCids { + msg, err := client.ChainGetMessage(ctx, msgCid) + require.NoError(t, err) + + if msg.Method == market.Methods.PublishStorageDeals { + count++ + var pubDealsParams market2.PublishStorageDealsParams + err = pubDealsParams.UnmarshalCBOR(bytes.NewReader(msg.Params)) + require.NoError(t, err) + require.Len(t, pubDealsParams.Deals, int(maxDealsPerMsg)) + require.Equal(t, publisherKey.Address.String(), msg.From.String()) + } + } + require.Equal(t, 1, count) + + // The third deal should be published once the publish period expires. + // Allow a little padding as it takes a moment for the state change to + // be noticed by the client. + padding := 10 * time.Second + select { + case <-time.After(publishPeriod + padding): + require.Fail(t, "Expected 3rd deal to be published once publish period elapsed") + case <-done: // Success + } +} diff --git a/itests/deals_test.go b/itests/deals_test.go new file mode 100644 index 00000000000..f2e106f1f7c --- /dev/null +++ b/itests/deals_test.go @@ -0,0 +1,42 @@ +package itests + +import ( + "testing" + "time" + + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/itests/kit" +) + +func TestDealsWithSealingAndRPC(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + + kit.QuietMiningLogs() + + oldDelay := policy.GetPreCommitChallengeDelay() + policy.SetPreCommitChallengeDelay(5) + t.Cleanup(func() { + policy.SetPreCommitChallengeDelay(oldDelay) + }) + + var blockTime = 50 * time.Millisecond + + client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.WithAllSubsystems()) // no mock proofs. + ens.InterconnectAll().BeginMining(blockTime) + dh := kit.NewDealHarness(t, client, miner, miner) + + t.Run("stdretrieval", func(t *testing.T) { + dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1}) + }) + + t.Run("fastretrieval", func(t *testing.T) { + dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1, FastRetrieval: true}) + }) + + t.Run("fastretrieval-twodeals-sequential", func(t *testing.T) { + dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1, FastRetrieval: true}) + dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1, FastRetrieval: true}) + }) +} diff --git a/itests/doc.go b/itests/doc.go new file mode 100644 index 00000000000..474e5727780 --- /dev/null +++ b/itests/doc.go @@ -0,0 +1,2 @@ +// Package itests contains integration tests for Lotus. +package itests diff --git a/cmd/lotus-gateway/endtoend_test.go b/itests/gateway_test.go similarity index 60% rename from cmd/lotus-gateway/endtoend_test.go rename to itests/gateway_test.go index 1e1e5e22931..f9e4a0fb6fd 100644 --- a/cmd/lotus-gateway/endtoend_test.go +++ b/itests/gateway_test.go @@ -1,55 +1,48 @@ -package main +package itests import ( "bytes" "context" "fmt" "math" - "os" + "net" "testing" "time" - "github.com/filecoin-project/lotus/cli" - clitest "github.com/filecoin-project/lotus/cli/test" - - init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init" - multisig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig" - - "github.com/stretchr/testify/require" - "golang.org/x/xerrors" - "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-jsonrpc" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api/client" - "github.com/filecoin-project/lotus/api/test" - "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/gateway" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/filecoin-project/lotus/itests/multisig" "github.com/filecoin-project/lotus/node" - builder "github.com/filecoin-project/lotus/node/test" -) -const maxLookbackCap = time.Duration(math.MaxInt64) -const maxStateWaitLookbackLimit = stmgr.LookbackNoLimit + init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init" + multisig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig" -func init() { - policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) - policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048)) - policy.SetMinVerifiedDealSize(abi.NewStoragePower(256)) -} + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" + "golang.org/x/xerrors" +) + +const ( + maxLookbackCap = time.Duration(math.MaxInt64) + maxStateWaitLookbackLimit = stmgr.LookbackNoLimit +) -// TestWalletMsig tests that API calls to wallet and msig can be made on a lite +// TestGatewayWalletMsig tests that API calls to wallet and msig can be made on a lite // node that is connected through a gateway to a full API node -func TestWalletMsig(t *testing.T) { - _ = os.Setenv("BELLMAN_NO_GPU", "1") - clitest.QuietMiningLogs() +func TestGatewayWalletMsig(t *testing.T) { + kit.QuietMiningLogs() blocktime := 5 * time.Millisecond ctx := context.Background() nodes := startNodes(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit) - defer nodes.closer() lite := nodes.lite full := nodes.full @@ -100,10 +93,30 @@ func TestWalletMsig(t *testing.T) { // Create an msig with three of the addresses and threshold of two sigs msigAddrs := walletAddrs[:3] amt := types.NewInt(1000) - addProposal, err := lite.MsigCreate(ctx, 2, msigAddrs, abi.ChainEpoch(50), amt, liteWalletAddr, types.NewInt(0)) + proto, err := lite.MsigCreate(ctx, 2, msigAddrs, abi.ChainEpoch(50), amt, liteWalletAddr, types.NewInt(0)) + require.NoError(t, err) + + doSend := func(proto *api.MessagePrototype) (cid.Cid, error) { + if proto.ValidNonce { + sm, err := lite.WalletSignMessage(ctx, proto.Message.From, &proto.Message) + if err != nil { + return cid.Undef, err + } + return lite.MpoolPush(ctx, sm) + } + + sm, err := lite.MpoolPushMessage(ctx, &proto.Message, nil) + if err != nil { + return cid.Undef, err + } + + return sm.Cid(), nil + } + + addProposal, err := doSend(proto) require.NoError(t, err) - res, err := lite.StateWaitMsg(ctx, addProposal, 1) + res, err := lite.StateWaitMsg(ctx, addProposal, 1, api.LookbackNoLimit, true) require.NoError(t, err) require.EqualValues(t, 0, res.Receipt.ExitCode) @@ -120,10 +133,13 @@ func TestWalletMsig(t *testing.T) { require.Less(t, msigBalance.Int64(), amt.Int64()) // Propose to add a new address to the msig - addProposal, err = lite.MsigAddPropose(ctx, msig, walletAddrs[0], walletAddrs[3], false) + proto, err = lite.MsigAddPropose(ctx, msig, walletAddrs[0], walletAddrs[3], false) require.NoError(t, err) - res, err = lite.StateWaitMsg(ctx, addProposal, 1) + addProposal, err = doSend(proto) + require.NoError(t, err) + + res, err = lite.StateWaitMsg(ctx, addProposal, 1, api.LookbackNoLimit, true) require.NoError(t, err) require.EqualValues(t, 0, res.Receipt.ExitCode) @@ -134,10 +150,13 @@ func TestWalletMsig(t *testing.T) { // Approve proposal (proposer is first (implicit) signer, approver is // second signer txnID := uint64(proposeReturn.TxnID) - approval1, err := lite.MsigAddApprove(ctx, msig, walletAddrs[1], txnID, walletAddrs[0], walletAddrs[3], false) + proto, err = lite.MsigAddApprove(ctx, msig, walletAddrs[1], txnID, walletAddrs[0], walletAddrs[3], false) + require.NoError(t, err) + + approval1, err := doSend(proto) require.NoError(t, err) - res, err = lite.StateWaitMsg(ctx, approval1, 1) + res, err = lite.StateWaitMsg(ctx, approval1, 1, api.LookbackNoLimit, true) require.NoError(t, err) require.EqualValues(t, 0, res.Receipt.ExitCode) @@ -147,50 +166,55 @@ func TestWalletMsig(t *testing.T) { require.True(t, approveReturn.Applied) } -// TestMsigCLI tests that msig CLI calls can be made +// TestGatewayMsigCLI tests that msig CLI calls can be made // on a lite node that is connected through a gateway to a full API node -func TestMsigCLI(t *testing.T) { - _ = os.Setenv("BELLMAN_NO_GPU", "1") - clitest.QuietMiningLogs() +func TestGatewayMsigCLI(t *testing.T) { + kit.QuietMiningLogs() blocktime := 5 * time.Millisecond ctx := context.Background() nodes := startNodesWithFunds(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit) - defer nodes.closer() lite := nodes.lite - clitest.RunMultisigTest(t, cli.Commands, lite) + multisig.RunMultisigTests(t, lite) } -func TestDealFlow(t *testing.T) { - _ = os.Setenv("BELLMAN_NO_GPU", "1") - clitest.QuietMiningLogs() +func TestGatewayDealFlow(t *testing.T) { + kit.QuietMiningLogs() blocktime := 5 * time.Millisecond ctx := context.Background() nodes := startNodesWithFunds(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit) - defer nodes.closer() - test.MakeDeal(t, ctx, 6, nodes.lite, nodes.miner, false, false) + time.Sleep(5 * time.Second) + + // For these tests where the block time is artificially short, just use + // a deal start epoch that is guaranteed to be far enough in the future + // so that the deal starts sealing in time + dealStartEpoch := abi.ChainEpoch(2 << 12) + + dh := kit.NewDealHarness(t, nodes.lite, nodes.miner, nodes.miner) + dealCid, res, _ := dh.MakeOnlineDeal(context.Background(), kit.MakeFullDealParams{ + Rseed: 6, + StartEpoch: dealStartEpoch, + }) + dh.PerformRetrieval(ctx, dealCid, res.Root, false) } -func TestCLIDealFlow(t *testing.T) { - _ = os.Setenv("BELLMAN_NO_GPU", "1") - clitest.QuietMiningLogs() +func TestGatewayCLIDealFlow(t *testing.T) { + kit.QuietMiningLogs() blocktime := 5 * time.Millisecond ctx := context.Background() nodes := startNodesWithFunds(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit) - defer nodes.closer() - clitest.RunClientTest(t, cli.Commands, nodes.lite) + kit.RunClientTest(t, cli.Commands, nodes.lite) } type testNodes struct { - lite test.TestNode - full test.TestNode - miner test.TestStorageNode - closer jsonrpc.ClientCloser + lite *kit.TestFullNode + full *kit.TestFullNode + miner *kit.TestMiner } func startNodesWithFunds( @@ -206,8 +230,8 @@ func startNodesWithFunds( fullWalletAddr, err := nodes.full.WalletDefaultAddress(ctx) require.NoError(t, err) - // Create a wallet on the lite node - liteWalletAddr, err := nodes.lite.WalletNew(ctx, types.KTSecp256k1) + // Get the lite node default wallet address. + liteWalletAddr, err := nodes.lite.WalletDefaultAddress(ctx) require.NoError(t, err) // Send some funds from the full node to the lite node @@ -226,63 +250,50 @@ func startNodes( ) *testNodes { var closer jsonrpc.ClientCloser - // Create one miner and two full nodes. + var ( + full *kit.TestFullNode + miner *kit.TestMiner + lite kit.TestFullNode + ) + + // - Create one full node and one lite node // - Put a gateway server in front of full node 1 // - Start full node 2 in lite mode // - Connect lite node -> gateway server -> full node - opts := append( - // Full node - test.OneFull, - // Lite node - test.FullNodeOpts{ - Lite: true, - Opts: func(nodes []test.TestNode) node.Option { - fullNode := nodes[0] - - // Create a gateway server in front of the full node - gapiImpl := newGatewayAPI(fullNode, lookbackCap, stateWaitLookbackLimit) - _, addr, err := builder.CreateRPCServer(gapiImpl) - require.NoError(t, err) - - // Create a gateway client API that connects to the gateway server - var gapi api.GatewayAPI - gapi, closer, err = client.NewGatewayRPC(ctx, addr, nil) - require.NoError(t, err) - - // Provide the gateway API to dependency injection - return node.Override(new(api.GatewayAPI), gapi) - }, - }, - ) - n, sn := builder.RPCMockSbBuilder(t, opts, test.OneMiner) - full := n[0] - lite := n[1] - miner := sn[0] + // create the full node and the miner. + var ens *kit.Ensemble + full, miner, ens = kit.EnsembleMinimal(t, kit.MockProofs()) + ens.InterconnectAll().BeginMining(blocktime) - // Get the listener address for the full node - fullAddr, err := full.NetAddrsListen(ctx) + // Create a gateway server in front of the full node + gwapi := gateway.NewNode(full, lookbackCap, stateWaitLookbackLimit) + handler, err := gateway.Handler(gwapi) require.NoError(t, err) - // Connect the miner and the full node - err = miner.NetConnect(ctx, fullAddr) + l, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) - // Connect the miner and the lite node (so that the lite node can send - // data to the miner) - liteAddr, err := lite.NetAddrsListen(ctx) - require.NoError(t, err) - err = miner.NetConnect(ctx, liteAddr) + srv, _ := kit.CreateRPCServer(t, handler, l) + + // Create a gateway client API that connects to the gateway server + var gapi api.Gateway + gapi, closer, err = client.NewGatewayRPCV1(ctx, "ws://"+srv.Listener.Addr().String()+"/rpc/v1", nil) require.NoError(t, err) + t.Cleanup(closer) - // Start mining blocks - bm := test.NewBlockMiner(ctx, t, miner, blocktime) - bm.MineBlocks() + ens.FullNode(&lite, + kit.LiteNode(), + kit.ThroughRPC(), + kit.ConstructorOpts( + node.Override(new(api.Gateway), gapi), + ), + ).Start().InterconnectAll() - return &testNodes{lite: lite, full: full, miner: miner, closer: closer} + return &testNodes{lite: &lite, full: full, miner: miner} } -func sendFunds(ctx context.Context, fromNode test.TestNode, fromAddr address.Address, toAddr address.Address, amt types.BigInt) error { +func sendFunds(ctx context.Context, fromNode *kit.TestFullNode, fromAddr address.Address, toAddr address.Address, amt types.BigInt) error { msg := &types.Message{ From: fromAddr, To: toAddr, @@ -294,7 +305,7 @@ func sendFunds(ctx context.Context, fromNode test.TestNode, fromAddr address.Add return err } - res, err := fromNode.StateWaitMsg(ctx, sm.Cid(), 1) + res, err := fromNode.StateWaitMsg(ctx, sm.Cid(), 3, api.LookbackNoLimit, true) if err != nil { return err } diff --git a/itests/get_messages_in_ts_test.go b/itests/get_messages_in_ts_test.go new file mode 100644 index 00000000000..61219a316c3 --- /dev/null +++ b/itests/get_messages_in_ts_test.go @@ -0,0 +1,104 @@ +package itests + +import ( + "context" + "testing" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/stretchr/testify/require" + + "time" + + "github.com/filecoin-project/go-state-types/big" +) + +func TestChainGetMessagesInTs(t *testing.T) { + ctx := context.Background() + + kit.QuietMiningLogs() + + client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs()) + ens.InterconnectAll().BeginMining(10 * time.Millisecond) + + // create a new address where to send funds. + addr, err := client.WalletNew(ctx, types.KTBLS) + require.NoError(t, err) + + // get the existing balance from the default wallet to then split it. + bal, err := client.WalletBalance(ctx, client.DefaultKey.Address) + require.NoError(t, err) + + const iterations = 100 + + // we'll send half our balance (saving the other half for gas), + // in `iterations` increments. + toSend := big.Div(bal, big.NewInt(2)) + each := big.Div(toSend, big.NewInt(iterations)) + + waitAllCh := make(chan struct{}) + go func() { + headChangeCh, err := client.ChainNotify(ctx) + require.NoError(t, err) + <-headChangeCh //skip hccurrent + + count := 0 + for { + select { + case headChanges := <-headChangeCh: + for _, change := range headChanges { + if change.Type == store.HCApply { + msgs, err := client.ChainGetMessagesInTipset(ctx, change.Val.Key()) + require.NoError(t, err) + count += len(msgs) + if count == iterations { + waitAllCh <- struct{}{} + } + } + } + } + } + }() + + var sms []*types.SignedMessage + for i := 0; i < iterations; i++ { + msg := &types.Message{ + From: client.DefaultKey.Address, + To: addr, + Value: each, + } + + sm, err := client.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err) + require.EqualValues(t, i, sm.Message.Nonce) + + sms = append(sms, sm) + } + + select { + case <-waitAllCh: + case <-time.After(time.Minute): + t.Errorf("timeout to wait for pack messages") + } + + for _, sm := range sms { + msgLookup, err := client.StateWaitMsg(ctx, sm.Cid(), 3, api.LookbackNoLimit, true) + require.NoError(t, err) + + ts, err := client.ChainGetTipSet(ctx, msgLookup.TipSet) + require.NoError(t, err) + + msgs, err := client.ChainGetMessagesInTipset(ctx, ts.Parents()) + require.NoError(t, err) + + var found bool + for _, msg := range msgs { + if msg.Cid == sm.Cid() { + found = true + } + } + require.EqualValues(t, true, found, "expect got message in tipset %v", msgLookup.TipSet) + } +} diff --git a/itests/kit/blockminer.go b/itests/kit/blockminer.go new file mode 100644 index 00000000000..2c9bd47c6cf --- /dev/null +++ b/itests/kit/blockminer.go @@ -0,0 +1,124 @@ +package kit + +import ( + "context" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/miner" + "github.com/stretchr/testify/require" +) + +// BlockMiner is a utility that makes a test miner Mine blocks on a timer. +type BlockMiner struct { + t *testing.T + miner *TestMiner + + nextNulls int64 + wg sync.WaitGroup + cancel context.CancelFunc +} + +func NewBlockMiner(t *testing.T, miner *TestMiner) *BlockMiner { + return &BlockMiner{ + t: t, + miner: miner, + cancel: func() {}, + } +} + +func (bm *BlockMiner) MineBlocks(ctx context.Context, blocktime time.Duration) { + time.Sleep(time.Second) + + // wrap context in a cancellable context. + ctx, bm.cancel = context.WithCancel(ctx) + + bm.wg.Add(1) + go func() { + defer bm.wg.Done() + + for { + select { + case <-time.After(blocktime): + case <-ctx.Done(): + return + } + + nulls := atomic.SwapInt64(&bm.nextNulls, 0) + err := bm.miner.MineOne(ctx, miner.MineReq{ + InjectNulls: abi.ChainEpoch(nulls), + Done: func(bool, abi.ChainEpoch, error) {}, + }) + switch { + case err == nil: // wrap around + case ctx.Err() != nil: // context fired. + return + default: // log error + bm.t.Error(err) + } + } + }() +} + +// InjectNulls injects the specified amount of null rounds in the next +// mining rounds. +func (bm *BlockMiner) InjectNulls(rounds abi.ChainEpoch) { + atomic.AddInt64(&bm.nextNulls, int64(rounds)) +} + +func (bm *BlockMiner) MineUntilBlock(ctx context.Context, fn *TestFullNode, cb func(abi.ChainEpoch)) { + for i := 0; i < 1000; i++ { + var ( + success bool + err error + epoch abi.ChainEpoch + wait = make(chan struct{}) + ) + + doneFn := func(win bool, ep abi.ChainEpoch, e error) { + success = win + err = e + epoch = ep + wait <- struct{}{} + } + + mineErr := bm.miner.MineOne(ctx, miner.MineReq{Done: doneFn}) + require.NoError(bm.t, mineErr) + <-wait + + require.NoError(bm.t, err) + + if success { + // Wait until it shows up on the given full nodes ChainHead + nloops := 200 + for i := 0; i < nloops; i++ { + ts, err := fn.ChainHead(ctx) + require.NoError(bm.t, err) + + if ts.Height() == epoch { + break + } + + require.NotEqual(bm.t, i, nloops-1, "block never managed to sync to node") + time.Sleep(time.Millisecond * 10) + } + + if cb != nil { + cb(epoch) + } + return + } + bm.t.Log("did not Mine block, trying again", i) + } + bm.t.Fatal("failed to Mine 1000 times in a row...") +} + +// Stop stops the block miner. +func (bm *BlockMiner) Stop() { + bm.t.Log("shutting down mining") + bm.cancel() + bm.wg.Wait() +} diff --git a/cli/test/client.go b/itests/kit/client.go similarity index 57% rename from cli/test/client.go rename to itests/kit/client.go index 95abd39c2b8..bd81e0c04e8 100644 --- a/cli/test/client.go +++ b/itests/kit/client.go @@ -1,9 +1,10 @@ -package test +package kit import ( "context" "fmt" "io/ioutil" + "math/rand" "os" "path/filepath" "regexp" @@ -11,9 +12,7 @@ import ( "testing" "time" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/api/test" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/specs-actors/v2/actors/builtin" @@ -21,8 +20,8 @@ import ( lcli "github.com/urfave/cli/v2" ) -// RunClientTest exercises some of the client CLI commands -func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode) { +// RunClientTest exercises some of the Client CLI commands +func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode *TestFullNode) { ctx, cancel := context.WithTimeout(context.Background(), time.Minute) defer cancel() @@ -30,7 +29,7 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode) mockCLI := NewMockCLI(ctx, t, cmds) clientCLI := mockCLI.Client(clientNode.ListenAddr) - // Get the miner address + // Get the Miner address addrs, err := clientNode.StateListMiners(ctx, types.EmptyTSK) require.NoError(t, err) require.Len(t, addrs, 1) @@ -38,18 +37,20 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode) minerAddr := addrs[0] fmt.Println("Miner:", minerAddr) - // client query-ask + // client query-ask out := clientCLI.RunCmd("client", "query-ask", minerAddr.String()) require.Regexp(t, regexp.MustCompile("Ask:"), out) // Create a deal (non-interactive) - // client deal 1000000attofil - res, _, err := test.CreateClientFile(ctx, clientNode, 1) + // client deal --start-epoch= 1000000attofil + res, _, _, err := CreateImportFile(ctx, clientNode, 1, 0) + require.NoError(t, err) + startEpoch := fmt.Sprintf("--start-epoch=%d", 2<<12) dataCid := res.Root price := "1000000attofil" duration := fmt.Sprintf("%d", build.MinDealDuration) - out = clientCLI.RunCmd("client", "deal", dataCid.String(), minerAddr.String(), price, duration) + out = clientCLI.RunCmd("client", "deal", startEpoch, dataCid.String(), minerAddr.String(), price, duration) fmt.Println("client deal", out) // Create a deal (interactive) @@ -57,9 +58,9 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode) // // (in days) // - // "no" (verified client) + // "no" (verified Client) // "yes" (confirm deal) - res, _, err = test.CreateClientFile(ctx, clientNode, 2) + res, _, _, err = CreateImportFile(ctx, clientNode, 2, 0) require.NoError(t, err) dataCid2 := res.Root duration = fmt.Sprintf("%d", build.MinDealDuration/builtin.EpochsInDay) @@ -82,7 +83,7 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode) fmt.Println("list-deals:\n", out) lines := strings.Split(out, "\n") - require.Len(t, lines, 2) + require.GreaterOrEqual(t, len(lines), 2) re := regexp.MustCompile(`\s+`) parts := re.Split(lines[1], -1) if len(parts) < 4 { @@ -90,16 +91,19 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode) } dealStatus = parts[3] fmt.Println(" Deal status:", dealStatus) - if dealComplete(t, dealStatus) { + + st := CategorizeDealState(dealStatus) + require.NotEqual(t, TestDealStateFailed, st) + if st == TestDealStateComplete { break } time.Sleep(time.Second) } - // Retrieve the first file from the miner + // Retrieve the first file from the Miner // client retrieve - tmpdir, err := ioutil.TempDir(os.TempDir(), "test-cli-client") + tmpdir, err := ioutil.TempDir(os.TempDir(), "test-cli-Client") require.NoError(t, err) path := filepath.Join(tmpdir, "outfile.dat") out = clientCLI.RunCmd("client", "retrieve", dataCid.String(), path) @@ -107,13 +111,36 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode) require.Regexp(t, regexp.MustCompile("Success"), out) } -func dealComplete(t *testing.T, dealStatus string) bool { - switch dealStatus { - case "StorageDealFailing", "StorageDealError": - t.Fatal(xerrors.Errorf("Storage deal failed with status: " + dealStatus)) - case "StorageDealStaged", "StorageDealSealing", "StorageDealActive", "StorageDealExpired", "StorageDealSlashed": - return true +func CreateImportFile(ctx context.Context, client api.FullNode, rseed int, size int) (res *api.ImportRes, path string, data []byte, err error) { + data, path, err = createRandomFile(rseed, size) + if err != nil { + return nil, "", nil, err + } + + res, err = client.ClientImport(ctx, api.FileRef{Path: path}) + if err != nil { + return nil, "", nil, err + } + return res, path, data, nil +} + +func createRandomFile(rseed, size int) ([]byte, string, error) { + if size == 0 { + size = 1600 + } + data := make([]byte, size) + rand.New(rand.NewSource(int64(rseed))).Read(data) + + dir, err := ioutil.TempDir(os.TempDir(), "test-make-deal-") + if err != nil { + return nil, "", err + } + + path := filepath.Join(dir, "sourcefile.dat") + err = ioutil.WriteFile(path, data, 0644) + if err != nil { + return nil, "", err } - return false + return data, path, nil } diff --git a/itests/kit/control.go b/itests/kit/control.go new file mode 100644 index 00000000000..73ac39b7a14 --- /dev/null +++ b/itests/kit/control.go @@ -0,0 +1,42 @@ +package kit + +import ( + "context" + + "github.com/stretchr/testify/require" + + addr "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" +) + +func (tm *TestMiner) SetControlAddresses(addrs ...addr.Address) { + ctx := context.TODO() + + mi, err := tm.FullNode.StateMinerInfo(ctx, tm.ActorAddr, types.EmptyTSK) + require.NoError(tm.t, err) + + cwp := &miner2.ChangeWorkerAddressParams{ + NewWorker: mi.Worker, + NewControlAddrs: addrs, + } + + sp, err := actors.SerializeParams(cwp) + require.NoError(tm.t, err) + + smsg, err := tm.FullNode.MpoolPushMessage(ctx, &types.Message{ + From: mi.Owner, + To: tm.ActorAddr, + Method: miner.Methods.ChangeWorkerAddress, + + Value: big.Zero(), + Params: sp, + }, nil) + require.NoError(tm.t, err) + + tm.FullNode.WaitMsg(ctx, smsg.Cid()) +} diff --git a/itests/kit/deals.go b/itests/kit/deals.go new file mode 100644 index 00000000000..4cee139253a --- /dev/null +++ b/itests/kit/deals.go @@ -0,0 +1,313 @@ +package kit + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/ipfs/go-cid" + files "github.com/ipfs/go-ipfs-files" + ipld "github.com/ipfs/go-ipld-format" + dag "github.com/ipfs/go-merkledag" + dstest "github.com/ipfs/go-merkledag/test" + unixfile "github.com/ipfs/go-unixfs/file" + "github.com/ipld/go-car" + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" +) + +type DealHarness struct { + t *testing.T + client *TestFullNode + main *TestMiner + market *TestMiner +} + +type MakeFullDealParams struct { + Rseed int + FastRet bool + StartEpoch abi.ChainEpoch + + // SuspendUntilCryptoeconStable suspends deal-making, until cryptoecon + // parameters are stabilised. This affects projected collateral, and tests + // will fail in network version 13 and higher if deals are started too soon + // after network birth. + // + // The reason is that the formula for collateral calculation takes + // circulating supply into account: + // + // [portion of power this deal will be] * [~1% of tokens]. + // + // In the first epochs after genesis, the total circulating supply is + // changing dramatically in percentual terms. Therefore, if the deal is + // proposed too soon, by the time it gets published on chain, the quoted + // provider collateral will no longer be valid. + // + // The observation is that deals fail with: + // + // GasEstimateMessageGas error: estimating gas used: message execution + // failed: exit 16, reason: Provider collateral out of bounds. (RetCode=16) + // + // Enabling this will suspend deal-making until the network has reached a + // height of 300. + SuspendUntilCryptoeconStable bool +} + +// NewDealHarness creates a test harness that contains testing utilities for deals. +func NewDealHarness(t *testing.T, client *TestFullNode, main *TestMiner, market *TestMiner) *DealHarness { + return &DealHarness{ + t: t, + client: client, + main: main, + market: market, + } +} + +// MakeOnlineDeal makes an online deal, generating a random file with the +// supplied seed, and setting the specified fast retrieval flag and start epoch +// on the storage deal. It returns when the deal is sealed. +// +// TODO: convert input parameters to struct, and add size as an input param. +func (dh *DealHarness) MakeOnlineDeal(ctx context.Context, params MakeFullDealParams) (deal *cid.Cid, res *api.ImportRes, path string) { + res, path = dh.client.CreateImportFile(ctx, params.Rseed, 0) + + dh.t.Logf("FILE CID: %s", res.Root) + + if params.SuspendUntilCryptoeconStable { + dh.t.Logf("deal-making suspending until cryptecon parameters have stabilised") + ts := dh.client.WaitTillChain(ctx, HeightAtLeast(300)) + dh.t.Logf("deal-making continuing; current height is %d", ts.Height()) + } + + deal = dh.StartDeal(ctx, res.Root, params.FastRet, params.StartEpoch) + + // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this + time.Sleep(time.Second) + dh.WaitDealSealed(ctx, deal, false, false, nil) + + return deal, res, path +} + +// StartDeal starts a storage deal between the client and the miner. +func (dh *DealHarness) StartDeal(ctx context.Context, fcid cid.Cid, fastRet bool, startEpoch abi.ChainEpoch) *cid.Cid { + maddr, err := dh.main.ActorAddress(ctx) + require.NoError(dh.t, err) + + addr, err := dh.client.WalletDefaultAddress(ctx) + require.NoError(dh.t, err) + + deal, err := dh.client.ClientStartDeal(ctx, &api.StartDealParams{ + Data: &storagemarket.DataRef{ + TransferType: storagemarket.TTGraphsync, + Root: fcid, + }, + Wallet: addr, + Miner: maddr, + EpochPrice: types.NewInt(1000000), + DealStartEpoch: startEpoch, + MinBlocksDuration: uint64(build.MinDealDuration), + FastRetrieval: fastRet, + }) + require.NoError(dh.t, err) + + return deal +} + +// WaitDealSealed waits until the deal is sealed. +func (dh *DealHarness) WaitDealSealed(ctx context.Context, deal *cid.Cid, noseal, noSealStart bool, cb func()) { +loop: + for { + di, err := dh.client.ClientGetDealInfo(ctx, *deal) + require.NoError(dh.t, err) + + switch di.State { + case storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing: + if noseal { + return + } + if !noSealStart { + dh.StartSealingWaiting(ctx) + } + case storagemarket.StorageDealProposalRejected: + dh.t.Fatal("deal rejected") + case storagemarket.StorageDealFailing: + dh.t.Fatal("deal failed") + case storagemarket.StorageDealError: + dh.t.Fatal("deal errored", di.Message) + case storagemarket.StorageDealActive: + dh.t.Log("COMPLETE", di) + break loop + } + + mds, err := dh.market.MarketListIncompleteDeals(ctx) + require.NoError(dh.t, err) + + var minerState storagemarket.StorageDealStatus + for _, md := range mds { + if md.DealID == di.DealID { + minerState = md.State + break + } + } + + dh.t.Logf("Deal %d state: client:%s provider:%s\n", di.DealID, storagemarket.DealStates[di.State], storagemarket.DealStates[minerState]) + time.Sleep(time.Second / 2) + if cb != nil { + cb() + } + } +} + +// WaitDealPublished waits until the deal is published. +func (dh *DealHarness) WaitDealPublished(ctx context.Context, deal *cid.Cid) { + subCtx, cancel := context.WithCancel(ctx) + defer cancel() + + updates, err := dh.market.MarketGetDealUpdates(subCtx) + require.NoError(dh.t, err) + + for { + select { + case <-ctx.Done(): + dh.t.Fatal("context timeout") + case di := <-updates: + if deal.Equals(di.ProposalCid) { + switch di.State { + case storagemarket.StorageDealProposalRejected: + dh.t.Fatal("deal rejected") + case storagemarket.StorageDealFailing: + dh.t.Fatal("deal failed") + case storagemarket.StorageDealError: + dh.t.Fatal("deal errored", di.Message) + case storagemarket.StorageDealFinalizing, storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing, storagemarket.StorageDealActive: + dh.t.Log("COMPLETE", di) + return + } + dh.t.Log("Deal state: ", storagemarket.DealStates[di.State]) + } + } + } +} + +func (dh *DealHarness) StartSealingWaiting(ctx context.Context) { + snums, err := dh.main.SectorsList(ctx) + require.NoError(dh.t, err) + + for _, snum := range snums { + si, err := dh.main.SectorsStatus(ctx, snum, false) + require.NoError(dh.t, err) + + dh.t.Logf("Sector state: %s", si.State) + if si.State == api.SectorState(sealing.WaitDeals) { + require.NoError(dh.t, dh.main.SectorStartSealing(ctx, snum)) + } + + dh.main.FlushSealingBatches(ctx) + } +} + +func (dh *DealHarness) PerformRetrieval(ctx context.Context, deal *cid.Cid, root cid.Cid, carExport bool) (path string) { + // perform retrieval. + info, err := dh.client.ClientGetDealInfo(ctx, *deal) + require.NoError(dh.t, err) + + offers, err := dh.client.ClientFindData(ctx, root, &info.PieceCID) + require.NoError(dh.t, err) + require.NotEmpty(dh.t, offers, "no offers") + + carFile, err := ioutil.TempFile(dh.t.TempDir(), "ret-car") + require.NoError(dh.t, err) + + defer carFile.Close() //nolint:errcheck + + caddr, err := dh.client.WalletDefaultAddress(ctx) + require.NoError(dh.t, err) + + ref := &api.FileRef{ + Path: carFile.Name(), + IsCAR: carExport, + } + + updates, err := dh.client.ClientRetrieveWithEvents(ctx, offers[0].Order(caddr), ref) + require.NoError(dh.t, err) + + for update := range updates { + require.Emptyf(dh.t, update.Err, "retrieval failed: %s", update.Err) + } + + ret := carFile.Name() + if carExport { + actualFile := dh.ExtractFileFromCAR(ctx, carFile) + ret = actualFile.Name() + _ = actualFile.Close() //nolint:errcheck + } + + return ret +} + +func (dh *DealHarness) ExtractFileFromCAR(ctx context.Context, file *os.File) (out *os.File) { + bserv := dstest.Bserv() + ch, err := car.LoadCar(bserv.Blockstore(), file) + require.NoError(dh.t, err) + + b, err := bserv.GetBlock(ctx, ch.Roots[0]) + require.NoError(dh.t, err) + + nd, err := ipld.Decode(b) + require.NoError(dh.t, err) + + dserv := dag.NewDAGService(bserv) + fil, err := unixfile.NewUnixfsFile(ctx, dserv, nd) + require.NoError(dh.t, err) + + tmpfile, err := ioutil.TempFile(dh.t.TempDir(), "file-in-car") + require.NoError(dh.t, err) + + defer tmpfile.Close() //nolint:errcheck + + err = files.WriteTo(fil, tmpfile.Name()) + require.NoError(dh.t, err) + + return tmpfile +} + +type RunConcurrentDealsOpts struct { + N int + FastRetrieval bool + CarExport bool + StartEpoch abi.ChainEpoch +} + +func (dh *DealHarness) RunConcurrentDeals(opts RunConcurrentDealsOpts) { + errgrp, _ := errgroup.WithContext(context.Background()) + for i := 0; i < opts.N; i++ { + i := i + errgrp.Go(func() (err error) { + defer func() { + // This is necessary because golang can't deal with test + // failures being reported from children goroutines ¯\_(ツ)_/¯ + if r := recover(); r != nil { + err = fmt.Errorf("deal failed: %s", r) + } + }() + deal, res, inPath := dh.MakeOnlineDeal(context.Background(), MakeFullDealParams{ + Rseed: 5 + i, + FastRet: opts.FastRetrieval, + StartEpoch: opts.StartEpoch, + }) + outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, opts.CarExport) + AssertFilesEqual(dh.t, inPath, outPath) + return nil + }) + } + require.NoError(dh.t, errgrp.Wait()) +} diff --git a/itests/kit/deals_state.go b/itests/kit/deals_state.go new file mode 100644 index 00000000000..617a6d28e8d --- /dev/null +++ b/itests/kit/deals_state.go @@ -0,0 +1,21 @@ +package kit + +type TestDealState int + +const ( + TestDealStateFailed = TestDealState(-1) + TestDealStateInProgress = TestDealState(0) + TestDealStateComplete = TestDealState(1) +) + +// CategorizeDealState categorizes deal states into one of three states: +// Complete, InProgress, Failed. +func CategorizeDealState(dealStatus string) TestDealState { + switch dealStatus { + case "StorageDealFailing", "StorageDealError": + return TestDealStateFailed + case "StorageDealStaged", "StorageDealAwaitingPreCommit", "StorageDealSealing", "StorageDealActive", "StorageDealExpired", "StorageDealSlashed": + return TestDealStateComplete + } + return TestDealStateInProgress +} diff --git a/itests/kit/ensemble.go b/itests/kit/ensemble.go new file mode 100644 index 00000000000..77a743d0cea --- /dev/null +++ b/itests/kit/ensemble.go @@ -0,0 +1,706 @@ +package kit + +import ( + "bytes" + "context" + "crypto/rand" + "fmt" + "io/ioutil" + "net" + "sync" + "testing" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/go-storedcounter" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v1api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/gen" + genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis" + "github.com/filecoin-project/lotus/chain/messagepool" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/wallet" + "github.com/filecoin-project/lotus/cmd/lotus-seed/seed" + sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/lotus/extern/sector-storage/mock" + "github.com/filecoin-project/lotus/genesis" + lotusminer "github.com/filecoin-project/lotus/miner" + "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/modules" + "github.com/filecoin-project/lotus/node/modules/dtypes" + testing2 "github.com/filecoin-project/lotus/node/modules/testing" + "github.com/filecoin-project/lotus/node/repo" + "github.com/filecoin-project/lotus/storage/mockstorage" + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + power2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/power" + "github.com/ipfs/go-datastore" + libp2pcrypto "github.com/libp2p/go-libp2p-core/crypto" + "github.com/libp2p/go-libp2p-core/peer" + mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" + "github.com/stretchr/testify/require" +) + +func init() { + chain.BootstrapPeerThreshold = 1 + messagepool.HeadChangeCoalesceMinDelay = time.Microsecond + messagepool.HeadChangeCoalesceMaxDelay = 2 * time.Microsecond + messagepool.HeadChangeCoalesceMergeInterval = 100 * time.Nanosecond +} + +// Ensemble is a collection of nodes instantiated within a test. +// +// Create a new ensemble with: +// +// ens := kit.NewEnsemble() +// +// Create full nodes and miners: +// +// var full TestFullNode +// var miner TestMiner +// ens.FullNode(&full, opts...) // populates a full node +// ens.Miner(&miner, &full, opts...) // populates a miner, using the full node as its chain daemon +// +// It is possible to pass functional options to set initial balances, +// presealed sectors, owner keys, etc. +// +// After the initial nodes are added, call `ens.Start()` to forge genesis +// and start the network. Mining will NOT be started automatically. It needs +// to be started explicitly by calling `BeginMining`. +// +// Nodes also need to be connected with one another, either via `ens.Connect()` +// or `ens.InterconnectAll()`. A common inchantation for simple tests is to do: +// +// ens.InterconnectAll().BeginMining(blocktime) +// +// You can continue to add more nodes, but you must always follow with +// `ens.Start()` to activate the new nodes. +// +// The API is chainable, so it's possible to do a lot in a very succinct way: +// +// kit.NewEnsemble().FullNode(&full).Miner(&miner, &full).Start().InterconnectAll().BeginMining() +// +// You can also find convenient fullnode:miner presets, such as 1:1, 1:2, +// and 2:1, e.g.: +// +// kit.EnsembleMinimal() +// kit.EnsembleOneTwo() +// kit.EnsembleTwoOne() +// +type Ensemble struct { + t *testing.T + bootstrapped bool + genesisBlock bytes.Buffer + mn mocknet.Mocknet + options *ensembleOpts + + inactive struct { + fullnodes []*TestFullNode + miners []*TestMiner + } + active struct { + fullnodes []*TestFullNode + miners []*TestMiner + bms map[*TestMiner]*BlockMiner + } + genesis struct { + miners []genesis.Miner + accounts []genesis.Actor + } +} + +// NewEnsemble instantiates a new blank Ensemble. +func NewEnsemble(t *testing.T, opts ...EnsembleOpt) *Ensemble { + options := DefaultEnsembleOpts + for _, o := range opts { + err := o(&options) + require.NoError(t, err) + } + + n := &Ensemble{t: t, options: &options} + n.active.bms = make(map[*TestMiner]*BlockMiner) + + // add accounts from ensemble options to genesis. + for _, acc := range options.accounts { + n.genesis.accounts = append(n.genesis.accounts, genesis.Actor{ + Type: genesis.TAccount, + Balance: acc.initialBalance, + Meta: (&genesis.AccountMeta{Owner: acc.key.Address}).ActorMeta(), + }) + } + + return n +} + +// FullNode enrolls a new full node. +func (n *Ensemble) FullNode(full *TestFullNode, opts ...NodeOpt) *Ensemble { + options := DefaultNodeOpts + for _, o := range opts { + err := o(&options) + require.NoError(n.t, err) + } + + key, err := wallet.GenerateKey(types.KTBLS) + require.NoError(n.t, err) + + if !n.bootstrapped && !options.balance.IsZero() { + // if we still haven't forged genesis, create a key+address, and assign + // it some FIL; this will be set as the default wallet when the node is + // started. + genacc := genesis.Actor{ + Type: genesis.TAccount, + Balance: options.balance, + Meta: (&genesis.AccountMeta{Owner: key.Address}).ActorMeta(), + } + + n.genesis.accounts = append(n.genesis.accounts, genacc) + } + + *full = TestFullNode{t: n.t, options: options, DefaultKey: key} + n.inactive.fullnodes = append(n.inactive.fullnodes, full) + return n +} + +// Miner enrolls a new miner, using the provided full node for chain +// interactions. +func (n *Ensemble) Miner(miner *TestMiner, full *TestFullNode, opts ...NodeOpt) *Ensemble { + require.NotNil(n.t, full, "full node required when instantiating miner") + + options := DefaultNodeOpts + for _, o := range opts { + err := o(&options) + require.NoError(n.t, err) + } + + privkey, _, err := libp2pcrypto.GenerateEd25519Key(rand.Reader) + require.NoError(n.t, err) + + peerId, err := peer.IDFromPrivateKey(privkey) + require.NoError(n.t, err) + + tdir, err := ioutil.TempDir("", "preseal-memgen") + require.NoError(n.t, err) + + minerCnt := len(n.inactive.miners) + len(n.active.miners) + + actorAddr, err := address.NewIDAddress(genesis2.MinerStart + uint64(minerCnt)) + require.NoError(n.t, err) + + if options.mainMiner != nil { + actorAddr = options.mainMiner.ActorAddr + } + + ownerKey := options.ownerKey + if !n.bootstrapped { + var ( + sectors = options.sectors + k *types.KeyInfo + genm *genesis.Miner + ) + + // create the preseal commitment. + if n.options.mockProofs { + genm, k, err = mockstorage.PreSeal(abi.RegisteredSealProof_StackedDrg2KiBV1, actorAddr, sectors) + } else { + genm, k, err = seed.PreSeal(actorAddr, abi.RegisteredSealProof_StackedDrg2KiBV1, 0, sectors, tdir, []byte("make genesis mem random"), nil, true) + } + require.NoError(n.t, err) + + genm.PeerId = peerId + + // create an owner key, and assign it some FIL. + ownerKey, err = wallet.NewKey(*k) + require.NoError(n.t, err) + + genacc := genesis.Actor{ + Type: genesis.TAccount, + Balance: options.balance, + Meta: (&genesis.AccountMeta{Owner: ownerKey.Address}).ActorMeta(), + } + + n.genesis.miners = append(n.genesis.miners, *genm) + n.genesis.accounts = append(n.genesis.accounts, genacc) + } else { + require.NotNil(n.t, ownerKey, "worker key can't be null if initializing a miner after genesis") + } + + rl, err := net.Listen("tcp", "127.0.0.1:") + require.NoError(n.t, err) + + *miner = TestMiner{ + t: n.t, + ActorAddr: actorAddr, + OwnerKey: ownerKey, + FullNode: full, + PresealDir: tdir, + options: options, + RemoteListener: rl, + } + + miner.Libp2p.PeerID = peerId + miner.Libp2p.PrivKey = privkey + + n.inactive.miners = append(n.inactive.miners, miner) + + return n +} + +// Start starts all enrolled nodes. +func (n *Ensemble) Start() *Ensemble { + ctx := context.Background() + + var gtempl *genesis.Template + if !n.bootstrapped { + // We haven't been bootstrapped yet, we need to generate genesis and + // create the networking backbone. + gtempl = n.generateGenesis() + n.mn = mocknet.New(ctx) + } + + // --------------------- + // FULL NODES + // --------------------- + + // Create all inactive full nodes. + for i, full := range n.inactive.fullnodes { + r := repo.NewMemory(nil) + opts := []node.Option{ + node.FullAPI(&full.FullNode, node.Lite(full.options.lite)), + node.Base(), + node.Repo(r), + node.MockHost(n.mn), + node.Test(), + + // so that we subscribe to pubsub topics immediately + node.Override(new(dtypes.Bootstrapper), dtypes.Bootstrapper(true)), + } + + // append any node builder options. + opts = append(opts, full.options.extraNodeOpts...) + + // Either generate the genesis or inject it. + if i == 0 && !n.bootstrapped { + opts = append(opts, node.Override(new(modules.Genesis), testing2.MakeGenesisMem(&n.genesisBlock, *gtempl))) + } else { + opts = append(opts, node.Override(new(modules.Genesis), modules.LoadGenesis(n.genesisBlock.Bytes()))) + } + + // Are we mocking proofs? + if n.options.mockProofs { + opts = append(opts, + node.Override(new(ffiwrapper.Verifier), mock.MockVerifier), + node.Override(new(ffiwrapper.Prover), mock.MockProver), + ) + } + + // Call option builders, passing active nodes as the parameter + for _, bopt := range full.options.optBuilders { + opts = append(opts, bopt(n.active.fullnodes)) + } + + // Construct the full node. + stop, err := node.New(ctx, opts...) + + require.NoError(n.t, err) + + addr, err := full.WalletImport(context.Background(), &full.DefaultKey.KeyInfo) + require.NoError(n.t, err) + + err = full.WalletSetDefault(context.Background(), addr) + require.NoError(n.t, err) + + // Are we hitting this node through its RPC? + if full.options.rpc { + withRPC := fullRpc(n.t, full) + n.inactive.fullnodes[i] = withRPC + } + + n.t.Cleanup(func() { _ = stop(context.Background()) }) + + n.active.fullnodes = append(n.active.fullnodes, full) + } + + // If we are here, we have processed all inactive fullnodes and moved them + // to active, so clear the slice. + n.inactive.fullnodes = n.inactive.fullnodes[:0] + + // Link all the nodes. + err := n.mn.LinkAll() + require.NoError(n.t, err) + + // --------------------- + // MINERS + // --------------------- + + // Create all inactive miners. + for i, m := range n.inactive.miners { + if n.bootstrapped { + if m.options.mainMiner == nil { + // this is a miner created after genesis, so it won't have a preseal. + // we need to create it on chain. + params, aerr := actors.SerializeParams(&power2.CreateMinerParams{ + Owner: m.OwnerKey.Address, + Worker: m.OwnerKey.Address, + SealProofType: m.options.proofType, + Peer: abi.PeerID(m.Libp2p.PeerID), + }) + require.NoError(n.t, aerr) + + createStorageMinerMsg := &types.Message{ + From: m.OwnerKey.Address, + To: power.Address, + Value: big.Zero(), + + Method: power.Methods.CreateMiner, + Params: params, + } + signed, err := m.FullNode.FullNode.MpoolPushMessage(ctx, createStorageMinerMsg, nil) + require.NoError(n.t, err) + + mw, err := m.FullNode.FullNode.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, api.LookbackNoLimit, true) + require.NoError(n.t, err) + require.Equal(n.t, exitcode.Ok, mw.Receipt.ExitCode) + + var retval power2.CreateMinerReturn + err = retval.UnmarshalCBOR(bytes.NewReader(mw.Receipt.Return)) + require.NoError(n.t, err, "failed to create miner") + + m.ActorAddr = retval.IDAddress + } else { + params, err := actors.SerializeParams(&miner2.ChangePeerIDParams{NewID: abi.PeerID(m.Libp2p.PeerID)}) + require.NoError(n.t, err) + + msg := &types.Message{ + To: m.options.mainMiner.ActorAddr, + From: m.options.mainMiner.OwnerKey.Address, + Method: miner.Methods.ChangePeerID, + Params: params, + Value: types.NewInt(0), + } + + signed, err2 := m.FullNode.FullNode.MpoolPushMessage(ctx, msg, nil) + require.NoError(n.t, err2) + + mw, err2 := m.FullNode.FullNode.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, api.LookbackNoLimit, true) + require.NoError(n.t, err2) + require.Equal(n.t, exitcode.Ok, mw.Receipt.ExitCode) + } + } + + has, err := m.FullNode.WalletHas(ctx, m.OwnerKey.Address) + require.NoError(n.t, err) + + // Only import the owner's full key into our companion full node, if we + // don't have it still. + if !has { + _, err = m.FullNode.WalletImport(ctx, &m.OwnerKey.KeyInfo) + require.NoError(n.t, err) + } + + // // Set it as the default address. + // err = m.FullNode.WalletSetDefault(ctx, m.OwnerAddr.Address) + // require.NoError(n.t, err) + + r := repo.NewMemory(nil) + + lr, err := r.Lock(repo.StorageMiner) + require.NoError(n.t, err) + + c, err := lr.Config() + require.NoError(n.t, err) + + cfg, ok := c.(*config.StorageMiner) + if !ok { + n.t.Fatalf("invalid config from repo, got: %T", c) + } + cfg.Common.API.RemoteListenAddress = m.RemoteListener.Addr().String() + cfg.Subsystems.EnableMarkets = m.options.subsystems.Has(SMarkets) + cfg.Subsystems.EnableMining = m.options.subsystems.Has(SMining) + cfg.Subsystems.EnableSealing = m.options.subsystems.Has(SSealing) + cfg.Subsystems.EnableSectorStorage = m.options.subsystems.Has(SSectorStorage) + + if m.options.mainMiner != nil { + token, err := m.options.mainMiner.FullNode.AuthNew(ctx, api.AllPermissions) + require.NoError(n.t, err) + + cfg.Subsystems.SectorIndexApiInfo = fmt.Sprintf("%s:%s", token, m.options.mainMiner.ListenAddr) + cfg.Subsystems.SealerApiInfo = fmt.Sprintf("%s:%s", token, m.options.mainMiner.ListenAddr) + + fmt.Println("config for market node, setting SectorIndexApiInfo to: ", cfg.Subsystems.SectorIndexApiInfo) + fmt.Println("config for market node, setting SealerApiInfo to: ", cfg.Subsystems.SealerApiInfo) + } + + err = lr.SetConfig(func(raw interface{}) { + rcfg := raw.(*config.StorageMiner) + *rcfg = *cfg + }) + require.NoError(n.t, err) + + ks, err := lr.KeyStore() + require.NoError(n.t, err) + + pk, err := m.Libp2p.PrivKey.Bytes() + require.NoError(n.t, err) + + err = ks.Put("libp2p-host", types.KeyInfo{ + Type: "libp2p-host", + PrivateKey: pk, + }) + require.NoError(n.t, err) + + ds, err := lr.Datastore(context.TODO(), "/metadata") + require.NoError(n.t, err) + + err = ds.Put(datastore.NewKey("miner-address"), m.ActorAddr.Bytes()) + require.NoError(n.t, err) + + nic := storedcounter.New(ds, datastore.NewKey(modules.StorageCounterDSPrefix)) + for i := 0; i < m.options.sectors; i++ { + _, err := nic.Next() + require.NoError(n.t, err) + } + _, err = nic.Next() + require.NoError(n.t, err) + + err = lr.Close() + require.NoError(n.t, err) + + if m.options.mainMiner == nil { + enc, err := actors.SerializeParams(&miner2.ChangePeerIDParams{NewID: abi.PeerID(m.Libp2p.PeerID)}) + require.NoError(n.t, err) + + msg := &types.Message{ + From: m.OwnerKey.Address, + To: m.ActorAddr, + Method: miner.Methods.ChangePeerID, + Params: enc, + Value: types.NewInt(0), + } + + _, err2 := m.FullNode.MpoolPushMessage(ctx, msg, nil) + require.NoError(n.t, err2) + } + + var mineBlock = make(chan lotusminer.MineReq) + opts := []node.Option{ + node.StorageMiner(&m.StorageMiner, cfg.Subsystems), + node.Base(), + node.Repo(r), + node.Test(), + + node.If(!m.options.disableLibp2p, node.MockHost(n.mn)), + + node.Override(new(v1api.FullNode), m.FullNode.FullNode), + node.Override(new(*lotusminer.Miner), lotusminer.NewTestMiner(mineBlock, m.ActorAddr)), + + // disable resource filtering so that local worker gets assigned tasks + // regardless of system pressure. + node.Override(new(sectorstorage.SealerConfig), func() sectorstorage.SealerConfig { + scfg := config.DefaultStorageMiner() + scfg.Storage.ResourceFiltering = sectorstorage.ResourceFilteringDisabled + return scfg.Storage + }), + } + + // append any node builder options. + opts = append(opts, m.options.extraNodeOpts...) + + idAddr, err := address.IDFromAddress(m.ActorAddr) + require.NoError(n.t, err) + + // preload preseals if the network still hasn't bootstrapped. + var presealSectors []abi.SectorID + if !n.bootstrapped { + sectors := n.genesis.miners[i].Sectors + for _, sector := range sectors { + presealSectors = append(presealSectors, abi.SectorID{ + Miner: abi.ActorID(idAddr), + Number: sector.SectorID, + }) + } + } + + if n.options.mockProofs { + opts = append(opts, + node.Override(new(*mock.SectorMgr), func() (*mock.SectorMgr, error) { + return mock.NewMockSectorMgr(presealSectors), nil + }), + node.Override(new(sectorstorage.SectorManager), node.From(new(*mock.SectorMgr))), + node.Override(new(sectorstorage.Unsealer), node.From(new(*mock.SectorMgr))), + node.Override(new(sectorstorage.PieceProvider), node.From(new(*mock.SectorMgr))), + + node.Override(new(ffiwrapper.Verifier), mock.MockVerifier), + node.Override(new(ffiwrapper.Prover), mock.MockProver), + node.Unset(new(*sectorstorage.Manager)), + ) + } + + // start node + stop, err := node.New(ctx, opts...) + require.NoError(n.t, err) + + // using real proofs, therefore need real sectors. + if !n.bootstrapped && !n.options.mockProofs { + err := m.StorageAddLocal(ctx, m.PresealDir) + require.NoError(n.t, err) + } + + n.t.Cleanup(func() { _ = stop(context.Background()) }) + + // Are we hitting this node through its RPC? + if m.options.rpc { + withRPC := minerRpc(n.t, m) + n.inactive.miners[i] = withRPC + } + + mineOne := func(ctx context.Context, req lotusminer.MineReq) error { + select { + case mineBlock <- req: + return nil + case <-ctx.Done(): + return ctx.Err() + } + } + + m.MineOne = mineOne + m.Stop = stop + + n.active.miners = append(n.active.miners, m) + } + + // If we are here, we have processed all inactive miners and moved them + // to active, so clear the slice. + n.inactive.miners = n.inactive.miners[:0] + + // Link all the nodes. + err = n.mn.LinkAll() + require.NoError(n.t, err) + + if !n.bootstrapped && len(n.active.miners) > 0 { + // We have *just* bootstrapped, so mine 2 blocks to setup some CE stuff in some actors + var wait sync.Mutex + wait.Lock() + + observer := n.active.fullnodes[0] + + bm := NewBlockMiner(n.t, n.active.miners[0]) + n.t.Cleanup(bm.Stop) + + bm.MineUntilBlock(ctx, observer, func(epoch abi.ChainEpoch) { + wait.Unlock() + }) + wait.Lock() + bm.MineUntilBlock(ctx, observer, func(epoch abi.ChainEpoch) { + wait.Unlock() + }) + wait.Lock() + } + + n.bootstrapped = true + return n +} + +// InterconnectAll connects all miners and full nodes to one another. +func (n *Ensemble) InterconnectAll() *Ensemble { + // connect full nodes to miners. + for _, from := range n.active.fullnodes { + for _, to := range n.active.miners { + // []*TestMiner to []api.CommonAPI type coercion not possible + // so cannot use variadic form. + n.Connect(from, to) + } + } + + // connect full nodes between each other, skipping ourselves. + last := len(n.active.fullnodes) - 1 + for i, from := range n.active.fullnodes { + if i == last { + continue + } + for _, to := range n.active.fullnodes[i+1:] { + n.Connect(from, to) + } + } + return n +} + +// Connect connects one full node to the provided full nodes. +func (n *Ensemble) Connect(from api.Net, to ...api.Net) *Ensemble { + addr, err := from.NetAddrsListen(context.Background()) + require.NoError(n.t, err) + + for _, other := range to { + err = other.NetConnect(context.Background(), addr) + require.NoError(n.t, err) + } + return n +} + +// BeginMining kicks off mining for the specified miners. If nil or 0-length, +// it will kick off mining for all enrolled and active miners. It also adds a +// cleanup function to stop all mining operations on test teardown. +func (n *Ensemble) BeginMining(blocktime time.Duration, miners ...*TestMiner) []*BlockMiner { + ctx := context.Background() + + // wait one second to make sure that nodes are connected and have handshaken. + // TODO make this deterministic by listening to identify events on the + // libp2p eventbus instead (or something else). + time.Sleep(1 * time.Second) + + var bms []*BlockMiner + if len(miners) == 0 { + // no miners have been provided explicitly, instantiate block miners + // for all active miners that aren't still mining. + for _, m := range n.active.miners { + if _, ok := n.active.bms[m]; ok { + continue // skip, already have a block miner + } + miners = append(miners, m) + } + } + + for _, m := range miners { + bm := NewBlockMiner(n.t, m) + bm.MineBlocks(ctx, blocktime) + n.t.Cleanup(bm.Stop) + + bms = append(bms, bm) + + n.active.bms[m] = bm + } + + return bms +} + +func (n *Ensemble) generateGenesis() *genesis.Template { + var verifRoot = gen.DefaultVerifregRootkeyActor + if k := n.options.verifiedRoot.key; k != nil { + verifRoot = genesis.Actor{ + Type: genesis.TAccount, + Balance: n.options.verifiedRoot.initialBalance, + Meta: (&genesis.AccountMeta{Owner: k.Address}).ActorMeta(), + } + } + + templ := &genesis.Template{ + NetworkVersion: network.Version0, + Accounts: n.genesis.accounts, + Miners: n.genesis.miners, + NetworkName: "test", + Timestamp: uint64(time.Now().Unix() - int64(n.options.pastOffset.Seconds())), + VerifregRootKey: verifRoot, + RemainderAccount: gen.DefaultRemainderAccountActor, + } + + return templ +} diff --git a/itests/kit/ensemble_opts.go b/itests/kit/ensemble_opts.go new file mode 100644 index 00000000000..440362ed142 --- /dev/null +++ b/itests/kit/ensemble_opts.go @@ -0,0 +1,55 @@ +package kit + +import ( + "time" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/wallet" +) + +type EnsembleOpt func(opts *ensembleOpts) error + +type genesisAccount struct { + key *wallet.Key + initialBalance abi.TokenAmount +} + +type ensembleOpts struct { + pastOffset time.Duration + verifiedRoot genesisAccount + accounts []genesisAccount + mockProofs bool +} + +var DefaultEnsembleOpts = ensembleOpts{ + pastOffset: 10000000 * time.Second, // time sufficiently in the past to trigger catch-up mining. +} + +// MockProofs activates mock proofs for the entire ensemble. +func MockProofs() EnsembleOpt { + return func(opts *ensembleOpts) error { + opts.mockProofs = true + return nil + } +} + +// RootVerifier specifies the key to be enlisted as the verified registry root, +// as well as the initial balance to be attributed during genesis. +func RootVerifier(key *wallet.Key, balance abi.TokenAmount) EnsembleOpt { + return func(opts *ensembleOpts) error { + opts.verifiedRoot.key = key + opts.verifiedRoot.initialBalance = balance + return nil + } +} + +// Account sets up an account at genesis with the specified key and balance. +func Account(key *wallet.Key, balance abi.TokenAmount) EnsembleOpt { + return func(opts *ensembleOpts) error { + opts.accounts = append(opts.accounts, genesisAccount{ + key: key, + initialBalance: balance, + }) + return nil + } +} diff --git a/itests/kit/ensemble_presets.go b/itests/kit/ensemble_presets.go new file mode 100644 index 00000000000..b7ff80aa122 --- /dev/null +++ b/itests/kit/ensemble_presets.go @@ -0,0 +1,102 @@ +package kit + +import ( + "testing" + "time" +) + +// EnsembleMinimal creates and starts an Ensemble with a single full node and a single miner. +// It does not interconnect nodes nor does it begin mining. +// +// This function supports passing both ensemble and node functional options. +// Functional options are applied to all nodes. +func EnsembleMinimal(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMiner, *Ensemble) { + opts = append(opts, WithAllSubsystems()) + + eopts, nopts := siftOptions(t, opts) + + var ( + full TestFullNode + miner TestMiner + ) + ens := NewEnsemble(t, eopts...).FullNode(&full, nopts...).Miner(&miner, &full, nopts...).Start() + return &full, &miner, ens +} + +func EnsembleWithMinerAndMarketNodes(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMiner, *TestMiner, *Ensemble) { + eopts, nopts := siftOptions(t, opts) + + var ( + fullnode TestFullNode + main, market TestMiner + ) + + mainNodeOpts := []NodeOpt{WithSubsystems(SSealing, SSectorStorage, SMining), DisableLibp2p()} + mainNodeOpts = append(mainNodeOpts, nopts...) + + blockTime := 100 * time.Millisecond + ens := NewEnsemble(t, eopts...).FullNode(&fullnode, nopts...).Miner(&main, &fullnode, mainNodeOpts...).Start() + ens.BeginMining(blockTime) + + marketNodeOpts := []NodeOpt{OwnerAddr(fullnode.DefaultKey), MainMiner(&main), WithSubsystems(SMarkets)} + marketNodeOpts = append(marketNodeOpts, nopts...) + + ens.Miner(&market, &fullnode, marketNodeOpts...).Start().Connect(market, fullnode) + + return &fullnode, &main, &market, ens +} + +// EnsembleTwoOne creates and starts an Ensemble with two full nodes and one miner. +// It does not interconnect nodes nor does it begin mining. +// +// This function supports passing both ensemble and node functional options. +// Functional options are applied to all nodes. +func EnsembleTwoOne(t *testing.T, opts ...interface{}) (*TestFullNode, *TestFullNode, *TestMiner, *Ensemble) { + opts = append(opts, WithAllSubsystems()) + + eopts, nopts := siftOptions(t, opts) + + var ( + one, two TestFullNode + miner TestMiner + ) + ens := NewEnsemble(t, eopts...).FullNode(&one, nopts...).FullNode(&two, nopts...).Miner(&miner, &one, nopts...).Start() + return &one, &two, &miner, ens +} + +// EnsembleOneTwo creates and starts an Ensemble with one full node and two miners. +// It does not interconnect nodes nor does it begin mining. +// +// This function supports passing both ensemble and node functional options. +// Functional options are applied to all nodes. +func EnsembleOneTwo(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMiner, *TestMiner, *Ensemble) { + opts = append(opts, WithAllSubsystems()) + + eopts, nopts := siftOptions(t, opts) + + var ( + full TestFullNode + one, two TestMiner + ) + ens := NewEnsemble(t, eopts...). + FullNode(&full, nopts...). + Miner(&one, &full, nopts...). + Miner(&two, &full, nopts...). + Start() + + return &full, &one, &two, ens +} + +func siftOptions(t *testing.T, opts []interface{}) (eopts []EnsembleOpt, nopts []NodeOpt) { + for _, v := range opts { + switch o := v.(type) { + case EnsembleOpt: + eopts = append(eopts, o) + case NodeOpt: + nopts = append(nopts, o) + default: + t.Fatalf("invalid option type: %T", o) + } + } + return eopts, nopts +} diff --git a/itests/kit/files.go b/itests/kit/files.go new file mode 100644 index 00000000000..48592b51835 --- /dev/null +++ b/itests/kit/files.go @@ -0,0 +1,58 @@ +package kit + +import ( + "bytes" + "io" + "math/rand" + "os" + "testing" + + "github.com/minio/blake2b-simd" + + "github.com/stretchr/testify/require" +) + +// CreateRandomFile creates a random file with the provided seed and the +// provided size. +func CreateRandomFile(t *testing.T, rseed, size int) (path string) { + if size == 0 { + size = 1600 + } + + source := io.LimitReader(rand.New(rand.NewSource(int64(rseed))), int64(size)) + + file, err := os.CreateTemp(t.TempDir(), "sourcefile.dat") + require.NoError(t, err) + + n, err := io.Copy(file, source) + require.NoError(t, err) + require.EqualValues(t, n, size) + + return file.Name() +} + +// AssertFilesEqual compares two files by blake2b hash equality and +// fails the test if unequal. +func AssertFilesEqual(t *testing.T, left, right string) { + // initialize hashes. + leftH, rightH := blake2b.New256(), blake2b.New256() + + // open files. + leftF, err := os.Open(left) + require.NoError(t, err) + + rightF, err := os.Open(right) + require.NoError(t, err) + + // feed hash functions. + _, err = io.Copy(leftH, leftF) + require.NoError(t, err) + + _, err = io.Copy(rightH, rightF) + require.NoError(t, err) + + // compute digests. + leftD, rightD := leftH.Sum(nil), rightH.Sum(nil) + + require.True(t, bytes.Equal(leftD, rightD)) +} diff --git a/itests/kit/funds.go b/itests/kit/funds.go new file mode 100644 index 00000000000..e49c708ea9b --- /dev/null +++ b/itests/kit/funds.go @@ -0,0 +1,40 @@ +package kit + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" +) + +// SendFunds sends funds from the default wallet of the specified sender node +// to the recipient address. +func SendFunds(ctx context.Context, t *testing.T, sender *TestFullNode, recipient address.Address, amount abi.TokenAmount) { + senderAddr, err := sender.WalletDefaultAddress(ctx) + require.NoError(t, err) + + msg := &types.Message{ + From: senderAddr, + To: recipient, + Value: amount, + } + + sm, err := sender.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err) + + sender.WaitMsg(ctx, sm.Cid()) +} + +func (f *TestFullNode) WaitMsg(ctx context.Context, msg cid.Cid) { + res, err := f.StateWaitMsg(ctx, msg, 3, api.LookbackNoLimit, true) + require.NoError(f.t, err) + + require.EqualValues(f.t, 0, res.Receipt.ExitCode, "message did not successfully execute") +} diff --git a/itests/kit/init.go b/itests/kit/init.go new file mode 100644 index 00000000000..dc8463cb4e4 --- /dev/null +++ b/itests/kit/init.go @@ -0,0 +1,32 @@ +package kit + +import ( + "fmt" + "os" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/policy" + logging "github.com/ipfs/go-log/v2" +) + +func init() { + _ = logging.SetLogLevel("*", "INFO") + + policy.SetProviderCollateralSupplyTarget(big.Zero(), big.NewInt(1)) + + policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048)) + policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) + policy.SetMinVerifiedDealSize(abi.NewStoragePower(256)) + + build.InsecurePoStValidation = true + + if err := os.Setenv("BELLMAN_NO_GPU", "1"); err != nil { + panic(fmt.Sprintf("failed to set BELLMAN_NO_GPU env variable: %s", err)) + } + + if err := os.Setenv("LOTUS_DISABLE_WATCHDOG", "1"); err != nil { + panic(fmt.Sprintf("failed to set LOTUS_DISABLE_WATCHDOG env variable: %s", err)) + } +} diff --git a/itests/kit/log.go b/itests/kit/log.go new file mode 100644 index 00000000000..3dce3af9d0c --- /dev/null +++ b/itests/kit/log.go @@ -0,0 +1,19 @@ +package kit + +import ( + "github.com/filecoin-project/lotus/lib/lotuslog" + logging "github.com/ipfs/go-log/v2" +) + +func QuietMiningLogs() { + lotuslog.SetupLogLevels() + + _ = logging.SetLogLevel("miner", "ERROR") // set this to INFO to watch mining happen. + _ = logging.SetLogLevel("chainstore", "ERROR") + _ = logging.SetLogLevel("chain", "ERROR") + _ = logging.SetLogLevel("sub", "ERROR") + _ = logging.SetLogLevel("storageminer", "ERROR") + _ = logging.SetLogLevel("pubsub", "ERROR") + _ = logging.SetLogLevel("gen", "ERROR") + _ = logging.SetLogLevel("dht/RtRefreshManager", "ERROR") +} diff --git a/cli/test/mockcli.go b/itests/kit/mockcli.go similarity index 98% rename from cli/test/mockcli.go rename to itests/kit/mockcli.go index e8eb78f1b4c..c0f21892033 100644 --- a/cli/test/mockcli.go +++ b/itests/kit/mockcli.go @@ -1,4 +1,4 @@ -package test +package kit import ( "bytes" @@ -56,7 +56,7 @@ type MockCLIClient struct { func (c *MockCLIClient) RunCmd(input ...string) string { out, err := c.RunCmdRaw(input...) - require.NoError(c.t, err) + require.NoError(c.t, err, "output:\n%s", out) return out } diff --git a/itests/kit/node_full.go b/itests/kit/node_full.go new file mode 100644 index 00000000000..83586e1881e --- /dev/null +++ b/itests/kit/node_full.go @@ -0,0 +1,85 @@ +package kit + +import ( + "context" + "testing" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v1api" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/wallet" + "github.com/multiformats/go-multiaddr" + "github.com/stretchr/testify/require" +) + +// TestFullNode represents a full node enrolled in an Ensemble. +type TestFullNode struct { + v1api.FullNode + + t *testing.T + + // ListenAddr is the address on which an API server is listening, if an + // API server is created for this Node. + ListenAddr multiaddr.Multiaddr + DefaultKey *wallet.Key + + options nodeOpts +} + +// CreateImportFile creates a random file with the specified seed and size, and +// imports it into the full node. +func (f *TestFullNode) CreateImportFile(ctx context.Context, rseed int, size int) (res *api.ImportRes, path string) { + path = CreateRandomFile(f.t, rseed, size) + res, err := f.ClientImport(ctx, api.FileRef{Path: path}) + require.NoError(f.t, err) + return res, path +} + +// WaitTillChain waits until a specified chain condition is met. It returns +// the first tipset where the condition is met. +func (f *TestFullNode) WaitTillChain(ctx context.Context, pred ChainPredicate) *types.TipSet { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + heads, err := f.ChainNotify(ctx) + require.NoError(f.t, err) + + for chg := range heads { + for _, c := range chg { + if c.Type != "apply" { + continue + } + if ts := c.Val; pred(ts) { + return ts + } + } + } + require.Fail(f.t, "chain condition not met") + return nil +} + +// ChainPredicate encapsulates a chain condition. +type ChainPredicate func(set *types.TipSet) bool + +// HeightAtLeast returns a ChainPredicate that is satisfied when the chain +// height is equal or higher to the target. +func HeightAtLeast(target abi.ChainEpoch) ChainPredicate { + return func(ts *types.TipSet) bool { + return ts.Height() >= target + } +} + +// BlockMinedBy returns a ChainPredicate that is satisfied when we observe the +// first block mined by the specified miner. +func BlockMinedBy(miner address.Address) ChainPredicate { + return func(ts *types.TipSet) bool { + for _, b := range ts.Blocks() { + if b.Miner == miner { + return true + } + } + return false + } +} diff --git a/itests/kit/node_miner.go b/itests/kit/node_miner.go new file mode 100644 index 00000000000..ff406629ca6 --- /dev/null +++ b/itests/kit/node_miner.go @@ -0,0 +1,198 @@ +package kit + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/google/uuid" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/wallet" + "github.com/filecoin-project/lotus/extern/sector-storage/stores" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/lotus/miner" + libp2pcrypto "github.com/libp2p/go-libp2p-core/crypto" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/multiformats/go-multiaddr" +) + +type MinerSubsystem int + +const ( + SMarkets MinerSubsystem = 1 << iota + SMining + SSealing + SSectorStorage + + MinerSubsystems = iota +) + +func (ms MinerSubsystem) Add(single MinerSubsystem) MinerSubsystem { + return ms | single +} + +func (ms MinerSubsystem) Has(single MinerSubsystem) bool { + return ms&single == single +} + +func (ms MinerSubsystem) All() [MinerSubsystems]bool { + var out [MinerSubsystems]bool + + for i := range out { + out[i] = ms&(1< 0 + } + + return out +} + +// TestMiner represents a miner enrolled in an Ensemble. +type TestMiner struct { + api.StorageMiner + + t *testing.T + + // ListenAddr is the address on which an API server is listening, if an + // API server is created for this Node + ListenAddr multiaddr.Multiaddr + + ActorAddr address.Address + OwnerKey *wallet.Key + MineOne func(context.Context, miner.MineReq) error + Stop func(context.Context) error + + FullNode *TestFullNode + PresealDir string + + Libp2p struct { + PeerID peer.ID + PrivKey libp2pcrypto.PrivKey + } + + RemoteListener net.Listener + + options nodeOpts +} + +func (tm *TestMiner) PledgeSectors(ctx context.Context, n, existing int, blockNotif <-chan struct{}) { + toCheck := tm.StartPledge(ctx, n, existing, blockNotif) + + for len(toCheck) > 0 { + tm.FlushSealingBatches(ctx) + + states := map[api.SectorState]int{} + for n := range toCheck { + st, err := tm.StorageMiner.SectorsStatus(ctx, n, false) + require.NoError(tm.t, err) + states[st.State]++ + if st.State == api.SectorState(sealing.Proving) { + delete(toCheck, n) + } + if strings.Contains(string(st.State), "Fail") { + tm.t.Fatal("sector in a failed state", st.State) + } + } + + build.Clock.Sleep(100 * time.Millisecond) + fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states) + } + +} + +func (tm *TestMiner) StartPledge(ctx context.Context, n, existing int, blockNotif <-chan struct{}) map[abi.SectorNumber]struct{} { + for i := 0; i < n; i++ { + if i%3 == 0 && blockNotif != nil { + <-blockNotif + tm.t.Log("WAIT") + } + tm.t.Logf("PLEDGING %d", i) + _, err := tm.StorageMiner.PledgeSector(ctx) + require.NoError(tm.t, err) + } + + for { + s, err := tm.StorageMiner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM + require.NoError(tm.t, err) + fmt.Printf("Sectors: %d\n", len(s)) + if len(s) >= n+existing { + break + } + + build.Clock.Sleep(100 * time.Millisecond) + } + + fmt.Printf("All sectors is fsm\n") + + s, err := tm.StorageMiner.SectorsList(ctx) + require.NoError(tm.t, err) + + toCheck := map[abi.SectorNumber]struct{}{} + for _, number := range s { + toCheck[number] = struct{}{} + } + + return toCheck +} + +func (tm *TestMiner) FlushSealingBatches(ctx context.Context) { + pcb, err := tm.StorageMiner.SectorPreCommitFlush(ctx) + require.NoError(tm.t, err) + if pcb != nil { + fmt.Printf("PRECOMMIT BATCH: %+v\n", pcb) + } + + cb, err := tm.StorageMiner.SectorCommitFlush(ctx) + require.NoError(tm.t, err) + if cb != nil { + fmt.Printf("COMMIT BATCH: %+v\n", cb) + } +} + +const metaFile = "sectorstore.json" + +func (tm *TestMiner) AddStorage(ctx context.Context, t *testing.T, weight uint64, seal, store bool) { + p, err := ioutil.TempDir("", "lotus-testsectors-") + require.NoError(t, err) + + if err := os.MkdirAll(p, 0755); err != nil { + if !os.IsExist(err) { + require.NoError(t, err) + } + } + + _, err = os.Stat(filepath.Join(p, metaFile)) + if !os.IsNotExist(err) { + require.NoError(t, err) + } + + cfg := &stores.LocalStorageMeta{ + ID: stores.ID(uuid.New().String()), + Weight: weight, + CanSeal: seal, + CanStore: store, + } + + if !(cfg.CanStore || cfg.CanSeal) { + t.Fatal("must specify at least one of CanStore or cfg.CanSeal") + } + + b, err := json.MarshalIndent(cfg, "", " ") + require.NoError(t, err) + + err = ioutil.WriteFile(filepath.Join(p, metaFile), b, 0644) + require.NoError(t, err) + + err = tm.StorageAddLocal(ctx, p) + require.NoError(t, err) +} diff --git a/itests/kit/node_opts.go b/itests/kit/node_opts.go new file mode 100644 index 00000000000..87707aa16c8 --- /dev/null +++ b/itests/kit/node_opts.go @@ -0,0 +1,145 @@ +package kit + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/wallet" + "github.com/filecoin-project/lotus/node" +) + +// DefaultPresealsPerBootstrapMiner is the number of preseals that every +// bootstrap miner has by default. It can be overridden through the +// PresealSectors option. +const DefaultPresealsPerBootstrapMiner = 2 + +const TestSpt = abi.RegisteredSealProof_StackedDrg2KiBV1_1 + +// nodeOpts is an options accumulating struct, where functional options are +// merged into. +type nodeOpts struct { + balance abi.TokenAmount + lite bool + sectors int + rpc bool + ownerKey *wallet.Key + extraNodeOpts []node.Option + + subsystems MinerSubsystem + mainMiner *TestMiner + disableLibp2p bool + optBuilders []OptBuilder + proofType abi.RegisteredSealProof +} + +// DefaultNodeOpts are the default options that will be applied to test nodes. +var DefaultNodeOpts = nodeOpts{ + balance: big.Mul(big.NewInt(100000000), types.NewInt(build.FilecoinPrecision)), + sectors: DefaultPresealsPerBootstrapMiner, + proofType: abi.RegisteredSealProof_StackedDrg2KiBV1_1, // default _concrete_ proof type for non-genesis miners (notice the _1) for new actors versions. +} + +// OptBuilder is used to create an option after some other node is already +// active. Takes all active nodes as a parameter. +type OptBuilder func(activeNodes []*TestFullNode) node.Option + +// NodeOpt is a functional option for test nodes. +type NodeOpt func(opts *nodeOpts) error + +func WithAllSubsystems() NodeOpt { + return func(opts *nodeOpts) error { + opts.subsystems = opts.subsystems.Add(SMarkets) + opts.subsystems = opts.subsystems.Add(SMining) + opts.subsystems = opts.subsystems.Add(SSealing) + opts.subsystems = opts.subsystems.Add(SSectorStorage) + + return nil + } +} + +func WithSubsystems(systems ...MinerSubsystem) NodeOpt { + return func(opts *nodeOpts) error { + for _, s := range systems { + opts.subsystems = opts.subsystems.Add(s) + } + return nil + } +} + +func DisableLibp2p() NodeOpt { + return func(opts *nodeOpts) error { + opts.disableLibp2p = true + return nil + } +} + +func MainMiner(m *TestMiner) NodeOpt { + return func(opts *nodeOpts) error { + opts.mainMiner = m + return nil + } +} + +// OwnerBalance specifies the balance to be attributed to a miner's owner +// account. Only relevant when creating a miner. +func OwnerBalance(balance abi.TokenAmount) NodeOpt { + return func(opts *nodeOpts) error { + opts.balance = balance + return nil + } +} + +// LiteNode specifies that this node will be a lite node. Only relevant when +// creating a fullnode. +func LiteNode() NodeOpt { + return func(opts *nodeOpts) error { + opts.lite = true + return nil + } +} + +// PresealSectors specifies the amount of preseal sectors to give to a miner +// at genesis. Only relevant when creating a miner. +func PresealSectors(sectors int) NodeOpt { + return func(opts *nodeOpts) error { + opts.sectors = sectors + return nil + } +} + +// ThroughRPC makes interactions with this node throughout the test flow through +// the JSON-RPC API. +func ThroughRPC() NodeOpt { + return func(opts *nodeOpts) error { + opts.rpc = true + return nil + } +} + +// OwnerAddr sets the owner address of a miner. Only relevant when creating +// a miner. +func OwnerAddr(wk *wallet.Key) NodeOpt { + return func(opts *nodeOpts) error { + opts.ownerKey = wk + return nil + } +} + +// ConstructorOpts are Lotus node constructor options that are passed as-is to +// the node. +func ConstructorOpts(extra ...node.Option) NodeOpt { + return func(opts *nodeOpts) error { + opts.extraNodeOpts = extra + return nil + } +} + +// ProofType sets the proof type for this node. If you're using new actor +// versions, this should be a _1 proof type. +func ProofType(proofType abi.RegisteredSealProof) NodeOpt { + return func(opts *nodeOpts) error { + opts.proofType = proofType + return nil + } +} diff --git a/itests/kit/node_opts_nv.go b/itests/kit/node_opts_nv.go new file mode 100644 index 00000000000..d4c84b4f157 --- /dev/null +++ b/itests/kit/node_opts_nv.go @@ -0,0 +1,90 @@ +package kit + +import ( + "context" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/node" + "github.com/ipfs/go-cid" +) + +// DefaultTestUpgradeSchedule +var DefaultTestUpgradeSchedule = stmgr.UpgradeSchedule{{ + Network: network.Version9, + Height: 1, + Migration: stmgr.UpgradeActorsV2, +}, { + Network: network.Version10, + Height: 2, + Migration: stmgr.UpgradeActorsV3, +}, { + Network: network.Version12, + Height: 3, + Migration: stmgr.UpgradeActorsV4, +}, { + Network: network.Version13, + Height: 4, + Migration: stmgr.UpgradeActorsV5, +}} + +func LatestActorsAt(upgradeHeight abi.ChainEpoch) node.Option { + // Attention: Update this when introducing new actor versions or your tests will be sad + return NetworkUpgradeAt(network.Version13, upgradeHeight) +} + +// InstantaneousNetworkVersion starts the network instantaneously at the +// specified version in height 1. +func InstantaneousNetworkVersion(version network.Version) node.Option { + // composes all migration functions + var mf stmgr.MigrationFunc = func(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, oldState cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (newState cid.Cid, err error) { + var state = oldState + for _, u := range DefaultTestUpgradeSchedule { + if u.Network > version { + break + } + state, err = u.Migration(ctx, sm, cache, cb, state, height, ts) + if err != nil { + return cid.Undef, err + } + } + return state, nil + } + return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{ + {Network: version, Height: 1, Migration: mf}, + }) +} + +func NetworkUpgradeAt(version network.Version, upgradeHeight abi.ChainEpoch) node.Option { + schedule := stmgr.UpgradeSchedule{} + for _, upgrade := range DefaultTestUpgradeSchedule { + if upgrade.Network > version { + break + } + + schedule = append(schedule, upgrade) + } + + if upgradeHeight > 0 { + schedule[len(schedule)-1].Height = upgradeHeight + } + + return node.Override(new(stmgr.UpgradeSchedule), schedule) +} + +func SDRUpgradeAt(calico, persian abi.ChainEpoch) node.Option { + return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{ + Network: network.Version6, + Height: 1, + Migration: stmgr.UpgradeActorsV2, + }, { + Network: network.Version7, + Height: calico, + Migration: stmgr.UpgradeCalico, + }, { + Network: network.Version8, + Height: persian, + }}) +} diff --git a/itests/kit/rpc.go b/itests/kit/rpc.go new file mode 100644 index 00000000000..35153eb644b --- /dev/null +++ b/itests/kit/rpc.go @@ -0,0 +1,65 @@ +package kit + +import ( + "context" + "fmt" + "net" + "net/http" + "net/http/httptest" + "testing" + + "github.com/filecoin-project/lotus/api/client" + "github.com/filecoin-project/lotus/node" + "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" + "github.com/stretchr/testify/require" +) + +func CreateRPCServer(t *testing.T, handler http.Handler, listener net.Listener) (*httptest.Server, multiaddr.Multiaddr) { + testServ := &httptest.Server{ + Listener: listener, + Config: &http.Server{Handler: handler}, + } + testServ.Start() + + t.Cleanup(testServ.Close) + t.Cleanup(testServ.CloseClientConnections) + + addr := testServ.Listener.Addr() + maddr, err := manet.FromNetAddr(addr) + require.NoError(t, err) + return testServ, maddr +} + +func fullRpc(t *testing.T, f *TestFullNode) *TestFullNode { + handler, err := node.FullNodeHandler(f.FullNode, false) + require.NoError(t, err) + + l, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + srv, maddr := CreateRPCServer(t, handler, l) + + cl, stop, err := client.NewFullNodeRPCV1(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v1", nil) + require.NoError(t, err) + t.Cleanup(stop) + f.ListenAddr, f.FullNode = maddr, cl + + return f +} + +func minerRpc(t *testing.T, m *TestMiner) *TestMiner { + handler, err := node.MinerHandler(m.StorageMiner, false) + require.NoError(t, err) + + srv, maddr := CreateRPCServer(t, handler, m.RemoteListener) + + fmt.Println("creating RPC server for", m.ActorAddr, "at: ", srv.Listener.Addr().String()) + url := "ws://" + srv.Listener.Addr().String() + "/rpc/v0" + cl, stop, err := client.NewStorageMinerRPCV0(context.Background(), url, nil) + require.NoError(t, err) + t.Cleanup(stop) + + m.ListenAddr, m.StorageMiner = maddr, cl + return m +} diff --git a/itests/kit/run.go b/itests/kit/run.go new file mode 100644 index 00000000000..713efa3b831 --- /dev/null +++ b/itests/kit/run.go @@ -0,0 +1,20 @@ +package kit + +import ( + "os" + "testing" +) + +// EnvRunExpensiveTests is the environment variable that needs to be present +// and set to value "1" to enable running expensive tests outside of CI. +const EnvRunExpensiveTests = "LOTUS_RUN_EXPENSIVE_TESTS" + +// Expensive marks a test as expensive, skipping it immediately if not running an +func Expensive(t *testing.T) { + switch { + case os.Getenv("CI") == "true": + return + case os.Getenv(EnvRunExpensiveTests) != "1": + t.Skipf("skipping expensive test outside of CI; enable by setting env var %s=1", EnvRunExpensiveTests) + } +} diff --git a/cli/test/multisig.go b/itests/multisig/suite.go similarity index 86% rename from cli/test/multisig.go rename to itests/multisig/suite.go index 5a60894e650..86a8ab7383d 100644 --- a/cli/test/multisig.go +++ b/itests/multisig/suite.go @@ -1,4 +1,4 @@ -package test +package multisig import ( "context" @@ -8,28 +8,27 @@ import ( "testing" "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/api/test" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/itests/kit" "github.com/stretchr/testify/require" - lcli "github.com/urfave/cli/v2" ) -func RunMultisigTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode) { - ctx := context.Background() - +func RunMultisigTests(t *testing.T, client *kit.TestFullNode) { // Create mock CLI - mockCLI := NewMockCLI(ctx, t, cmds) - clientCLI := mockCLI.Client(clientNode.ListenAddr) + ctx := context.Background() + mockCLI := kit.NewMockCLI(ctx, t, cli.Commands) + clientCLI := mockCLI.Client(client.ListenAddr) // Create some wallets on the node to use for testing multisig var walletAddrs []address.Address for i := 0; i < 4; i++ { - addr, err := clientNode.WalletNew(ctx, types.KTSecp256k1) + addr, err := client.WalletNew(ctx, types.KTSecp256k1) require.NoError(t, err) walletAddrs = append(walletAddrs, addr) - test.SendFunds(ctx, t, clientNode, addr, types.NewInt(1e15)) + kit.SendFunds(ctx, t, client, addr, types.NewInt(1e15)) } // Create an msig with three of the addresses and threshold of two sigs diff --git a/itests/multisig_test.go b/itests/multisig_test.go new file mode 100644 index 00000000000..9a15e8c0ef0 --- /dev/null +++ b/itests/multisig_test.go @@ -0,0 +1,20 @@ +package itests + +import ( + "testing" + "time" + + "github.com/filecoin-project/lotus/itests/kit" + "github.com/filecoin-project/lotus/itests/multisig" +) + +// TestMultisig does a basic test to exercise the multisig CLI commands +func TestMultisig(t *testing.T) { + kit.QuietMiningLogs() + + blockTime := 5 * time.Millisecond + client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC()) + ens.InterconnectAll().BeginMining(blockTime) + + multisig.RunMultisigTests(t, client) +} diff --git a/itests/nonce_test.go b/itests/nonce_test.go new file mode 100644 index 00000000000..b50fcbe2660 --- /dev/null +++ b/itests/nonce_test.go @@ -0,0 +1,57 @@ +package itests + +import ( + "context" + "testing" + "time" + + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/stretchr/testify/require" +) + +func TestNonceIncremental(t *testing.T) { + ctx := context.Background() + + kit.QuietMiningLogs() + + client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs()) + ens.InterconnectAll().BeginMining(10 * time.Millisecond) + + // create a new address where to send funds. + addr, err := client.WalletNew(ctx, types.KTBLS) + require.NoError(t, err) + + // get the existing balance from the default wallet to then split it. + bal, err := client.WalletBalance(ctx, client.DefaultKey.Address) + require.NoError(t, err) + + const iterations = 100 + + // we'll send half our balance (saving the other half for gas), + // in `iterations` increments. + toSend := big.Div(bal, big.NewInt(2)) + each := big.Div(toSend, big.NewInt(iterations)) + + var sms []*types.SignedMessage + for i := 0; i < iterations; i++ { + msg := &types.Message{ + From: client.DefaultKey.Address, + To: addr, + Value: each, + } + + sm, err := client.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err) + require.EqualValues(t, i, sm.Message.Nonce) + + sms = append(sms, sm) + } + + for _, sm := range sms { + _, err := client.StateWaitMsg(ctx, sm.Cid(), 3, api.LookbackNoLimit, true) + require.NoError(t, err) + } +} diff --git a/api/test/paych.go b/itests/paych_api_test.go similarity index 60% rename from api/test/paych.go rename to itests/paych_api_test.go index 2bcea436966..647db21e00f 100644 --- a/api/test/paych.go +++ b/itests/paych_api_test.go @@ -1,21 +1,21 @@ -package test +package itests import ( "context" - "fmt" - "sync/atomic" "testing" "time" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/itests/kit" "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" "github.com/filecoin-project/go-address" cbor "github.com/ipfs/go-ipld-cbor" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/apibstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -26,64 +26,49 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) -func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) { - ctx := context.Background() - n, sn := b(t, TwoFull, OneMiner) - - paymentCreator := n[0] - paymentReceiver := n[1] - miner := sn[0] - - // get everyone connected - addrs, err := paymentCreator.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := paymentReceiver.NetConnect(ctx, addrs); err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrs); err != nil { - t.Fatal(err) - } +func TestPaymentChannelsAPI(t *testing.T) { + kit.QuietMiningLogs() - // start mining blocks - bm := NewBlockMiner(ctx, t, miner, blocktime) - bm.MineBlocks() + ctx := context.Background() + blockTime := 5 * time.Millisecond + + var ( + paymentCreator kit.TestFullNode + paymentReceiver kit.TestFullNode + miner kit.TestMiner + ) + + ens := kit.NewEnsemble(t, kit.MockProofs()). + FullNode(&paymentCreator). + FullNode(&paymentReceiver). + Miner(&miner, &paymentCreator, kit.WithAllSubsystems()). + Start(). + InterconnectAll() + bms := ens.BeginMining(blockTime) + bm := bms[0] // send some funds to register the receiver receiverAddr, err := paymentReceiver.WalletNew(ctx, types.KTSecp256k1) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - SendFunds(ctx, t, paymentCreator, receiverAddr, abi.NewTokenAmount(1e18)) + kit.SendFunds(ctx, t, &paymentCreator, receiverAddr, abi.NewTokenAmount(1e18)) // setup the payment channel createrAddr, err := paymentCreator.WalletDefaultAddress(ctx) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) channelAmt := int64(7000) channelInfo, err := paymentCreator.PaychGet(ctx, createrAddr, receiverAddr, abi.NewTokenAmount(channelAmt)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) channel, err := paymentCreator.PaychGetWaitReady(ctx, channelInfo.WaitSentinel) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // allocate three lanes var lanes []uint64 for i := 0; i < 3; i++ { lane, err := paymentCreator.PaychAllocateLane(ctx, channel) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) lanes = append(lanes, lane) } @@ -92,47 +77,30 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) { // supersedes the voucher with a value of 1000 for _, lane := range lanes { vouch1, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(1000), lane) - if err != nil { - t.Fatal(err) - } - if vouch1.Voucher == nil { - t.Fatal(fmt.Errorf("Not enough funds to create voucher: missing %d", vouch1.Shortfall)) - } + require.NoError(t, err) + require.NotNil(t, vouch1.Voucher, "Not enough funds to create voucher: missing %d", vouch1.Shortfall) + vouch2, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(2000), lane) - if err != nil { - t.Fatal(err) - } - if vouch2.Voucher == nil { - t.Fatal(fmt.Errorf("Not enough funds to create voucher: missing %d", vouch2.Shortfall)) - } + require.NoError(t, err) + require.NotNil(t, vouch2.Voucher, "Not enough funds to create voucher: missing %d", vouch2.Shortfall) + delta1, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouch1.Voucher, nil, abi.NewTokenAmount(1000)) - if err != nil { - t.Fatal(err) - } - if !delta1.Equals(abi.NewTokenAmount(1000)) { - t.Fatal("voucher didn't have the right amount") - } + require.NoError(t, err) + require.EqualValues(t, abi.NewTokenAmount(1000), delta1, "voucher didn't have the right amount") + delta2, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouch2.Voucher, nil, abi.NewTokenAmount(1000)) - if err != nil { - t.Fatal(err) - } - if !delta2.Equals(abi.NewTokenAmount(1000)) { - t.Fatal("voucher didn't have the right amount") - } + require.NoError(t, err) + require.EqualValues(t, abi.NewTokenAmount(1000), delta2, "voucher didn't have the right amount") } // settle the payment channel settleMsgCid, err := paymentCreator.PaychSettle(ctx, channel) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) res := waitForMessage(ctx, t, paymentCreator, settleMsgCid, time.Second*10, "settle") - if res.Receipt.ExitCode != 0 { - t.Fatal("Unable to settle payment channel") - } + require.EqualValues(t, 0, res.Receipt.ExitCode, "Unable to settle payment channel") - creatorStore := adt.WrapStore(ctx, cbor.NewCborStore(apibstore.NewAPIBlockstore(paymentCreator))) + creatorStore := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(paymentCreator))) // wait for the receiver to submit their vouchers ev := events.NewEvents(ctx, paymentCreator) @@ -167,87 +135,59 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) { }, int(build.MessageConfidence)+1, build.Finality, func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) { return preds.OnPaymentChannelActorChanged(channel, preds.OnToSendAmountChanges())(ctx, oldTs.Key(), newTs.Key()) }) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) select { case <-finished: - case <-time.After(time.Second): + case <-time.After(10 * time.Second): t.Fatal("Timed out waiting for receiver to submit vouchers") } // Create a new voucher now that some vouchers have already been submitted vouchRes, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(1000), 3) - if err != nil { - t.Fatal(err) - } - if vouchRes.Voucher == nil { - t.Fatal(fmt.Errorf("Not enough funds to create voucher: missing %d", vouchRes.Shortfall)) - } + require.NoError(t, err) + require.NotNil(t, vouchRes.Voucher, "Not enough funds to create voucher: missing %d", vouchRes.Shortfall) + vdelta, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouchRes.Voucher, nil, abi.NewTokenAmount(1000)) - if err != nil { - t.Fatal(err) - } - if !vdelta.Equals(abi.NewTokenAmount(1000)) { - t.Fatal("voucher didn't have the right amount") - } + require.NoError(t, err) + require.EqualValues(t, abi.NewTokenAmount(1000), vdelta, "voucher didn't have the right amount") // Create a new voucher whose value would exceed the channel balance excessAmt := abi.NewTokenAmount(1000) vouchRes, err = paymentCreator.PaychVoucherCreate(ctx, channel, excessAmt, 4) - if err != nil { - t.Fatal(err) - } - if vouchRes.Voucher != nil { - t.Fatal("Expected not to be able to create voucher whose value would exceed channel balance") - } - if !vouchRes.Shortfall.Equals(excessAmt) { - t.Fatal(fmt.Errorf("Expected voucher shortfall of %d, got %d", excessAmt, vouchRes.Shortfall)) - } + require.NoError(t, err) + require.Nil(t, vouchRes.Voucher, "Expected not to be able to create voucher whose value would exceed channel balance") + require.EqualValues(t, excessAmt, vouchRes.Shortfall, "Expected voucher shortfall of %d, got %d", excessAmt, vouchRes.Shortfall) // Add a voucher whose value would exceed the channel balance vouch := &paych.SignedVoucher{ChannelAddr: channel, Amount: excessAmt, Lane: 4, Nonce: 1} vb, err := vouch.SigningBytes() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) + sig, err := paymentCreator.WalletSign(ctx, createrAddr, vb) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) + vouch.Signature = sig _, err = paymentReceiver.PaychVoucherAdd(ctx, channel, vouch, nil, abi.NewTokenAmount(1000)) - if err == nil { - t.Fatal(fmt.Errorf("Expected shortfall error of %d", excessAmt)) - } + require.Errorf(t, err, "Expected shortfall error of %d", excessAmt) // wait for the settlement period to pass before collecting waitForBlocks(ctx, t, bm, paymentReceiver, receiverAddr, policy.PaychSettleDelay) creatorPreCollectBalance, err := paymentCreator.WalletBalance(ctx, createrAddr) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // collect funds (from receiver, though either party can do it) collectMsg, err := paymentReceiver.PaychCollect(ctx, channel) - if err != nil { - t.Fatal(err) - } - res, err = paymentReceiver.StateWaitMsg(ctx, collectMsg, 3) - if err != nil { - t.Fatal(err) - } - if res.Receipt.ExitCode != 0 { - t.Fatal("unable to collect on payment channel") - } + require.NoError(t, err) + + res, err = paymentReceiver.StateWaitMsg(ctx, collectMsg, 3, api.LookbackNoLimit, true) + require.NoError(t, err) + require.EqualValues(t, 0, res.Receipt.ExitCode, "unable to collect on payment channel") // Finally, check the balance for the creator currentCreatorBalance, err := paymentCreator.WalletBalance(ctx, createrAddr) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // The highest nonce voucher that the creator sent on each lane is 2000 totalVouchers := int64(len(lanes) * 2000) @@ -257,15 +197,10 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) { // channel amount - total voucher value expectedRefund := channelAmt - totalVouchers delta := big.Sub(currentCreatorBalance, creatorPreCollectBalance) - if !delta.Equals(abi.NewTokenAmount(expectedRefund)) { - t.Fatalf("did not send correct funds from creator: expected %d, got %d", expectedRefund, delta) - } - - // shut down mining - bm.Stop() + require.EqualValues(t, abi.NewTokenAmount(expectedRefund), delta, "did not send correct funds from creator: expected %d, got %d", expectedRefund, delta) } -func waitForBlocks(ctx context.Context, t *testing.T, bm *BlockMiner, paymentReceiver TestNode, receiverAddr address.Address, count int) { +func waitForBlocks(ctx context.Context, t *testing.T, bm *kit.BlockMiner, paymentReceiver kit.TestFullNode, receiverAddr address.Address, count int) { // We need to add null blocks in batches, if we add too many the chain can't sync batchSize := 60 for i := 0; i < count; i += batchSize { @@ -274,8 +209,8 @@ func waitForBlocks(ctx context.Context, t *testing.T, bm *BlockMiner, paymentRec size = count - i } - // Add a batch of null blocks - atomic.StoreInt64(&bm.nulls, int64(size-1)) + // Add a batch of null blocks to advance the chain quicker through finalities. + bm.InjectNulls(abi.ChainEpoch(size - 1)) // Add a real block m, err := paymentReceiver.MpoolPushMessage(ctx, &types.Message{ @@ -283,30 +218,23 @@ func waitForBlocks(ctx context.Context, t *testing.T, bm *BlockMiner, paymentRec From: receiverAddr, Value: types.NewInt(0), }, nil) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - _, err = paymentReceiver.StateWaitMsg(ctx, m.Cid(), 1) - if err != nil { - t.Fatal(err) - } + _, err = paymentReceiver.StateWaitMsg(ctx, m.Cid(), 1, api.LookbackNoLimit, true) + require.NoError(t, err) } } -func waitForMessage(ctx context.Context, t *testing.T, paymentCreator TestNode, msgCid cid.Cid, duration time.Duration, desc string) *api.MsgLookup { +func waitForMessage(ctx context.Context, t *testing.T, paymentCreator kit.TestFullNode, msgCid cid.Cid, duration time.Duration, desc string) *api.MsgLookup { ctx, cancel := context.WithTimeout(ctx, duration) defer cancel() - fmt.Println("Waiting for", desc) - res, err := paymentCreator.StateWaitMsg(ctx, msgCid, 1) - if err != nil { - fmt.Println("Error waiting for", desc, err) - t.Fatal(err) - } - if res.Receipt.ExitCode != 0 { - t.Fatalf("did not successfully send %s", desc) - } - fmt.Println("Confirmed", desc) + t.Log("Waiting for", desc) + + res, err := paymentCreator.StateWaitMsg(ctx, msgCid, 1, api.LookbackNoLimit, true) + require.NoError(t, err) + require.EqualValues(t, 0, res.Receipt.ExitCode, "did not successfully send %s", desc) + + t.Log("Confirmed", desc) return res } diff --git a/cli/paych_test.go b/itests/paych_cli_test.go similarity index 82% rename from cli/paych_test.go rename to itests/paych_cli_test.go index dac8411c5df..82955e6c1e8 100644 --- a/cli/paych_test.go +++ b/itests/paych_cli_test.go @@ -1,4 +1,4 @@ -package cli +package itests import ( "context" @@ -10,45 +10,39 @@ import ( "testing" "time" - clitest "github.com/filecoin-project/lotus/cli/test" + "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/itests/kit" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin/paych" - "github.com/filecoin-project/lotus/chain/actors/policy" cbor "github.com/ipfs/go-ipld-cbor" "github.com/stretchr/testify/require" - "github.com/filecoin-project/lotus/api/apibstore" - "github.com/filecoin-project/lotus/api/test" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/events" "github.com/filecoin-project/lotus/chain/types" ) -func init() { - policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) - policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048)) - policy.SetMinVerifiedDealSize(abi.NewStoragePower(256)) -} - -// TestPaymentChannels does a basic test to exercise the payment channel CLI +// TestPaymentChannelsBasic does a basic test to exercise the payment channel CLI // commands -func TestPaymentChannels(t *testing.T) { +func TestPaymentChannelsBasic(t *testing.T) { _ = os.Setenv("BELLMAN_NO_GPU", "1") - clitest.QuietMiningLogs() + kit.QuietMiningLogs() blocktime := 5 * time.Millisecond ctx := context.Background() - nodes, addrs := clitest.StartTwoNodesOneMiner(ctx, t, blocktime) - paymentCreator := nodes[0] - paymentReceiver := nodes[1] - creatorAddr := addrs[0] - receiverAddr := addrs[1] + + var ( + paymentCreator kit.TestFullNode + paymentReceiver kit.TestFullNode + ) + creatorAddr, receiverAddr := startPaychCreatorReceiverMiner(ctx, t, &paymentCreator, &paymentReceiver, blocktime) // Create mock CLI - mockCLI := clitest.NewMockCLI(ctx, t, Commands) + mockCLI := kit.NewMockCLI(ctx, t, cli.Commands) creatorCLI := mockCLI.Client(paymentCreator.ListenAddr) receiverCLI := mockCLI.Client(paymentReceiver.ListenAddr) @@ -70,12 +64,16 @@ func TestPaymentChannels(t *testing.T) { // creator: paych settle creatorCLI.RunCmd("paych", "settle", chAddr.String()) + t.Log("wait for chain to reach settle height") + // Wait for the chain to reach the settle height chState := getPaychState(ctx, t, paymentReceiver, chAddr) sa, err := chState.SettlingAt() require.NoError(t, err) waitForHeight(ctx, t, paymentReceiver, sa) + t.Log("settle height reached") + // receiver: paych collect receiverCLI.RunCmd("paych", "collect", chAddr.String()) } @@ -89,17 +87,18 @@ type voucherSpec struct { // TestPaymentChannelStatus tests the payment channel status CLI command func TestPaymentChannelStatus(t *testing.T) { _ = os.Setenv("BELLMAN_NO_GPU", "1") - clitest.QuietMiningLogs() + kit.QuietMiningLogs() blocktime := 5 * time.Millisecond ctx := context.Background() - nodes, addrs := clitest.StartTwoNodesOneMiner(ctx, t, blocktime) - paymentCreator := nodes[0] - creatorAddr := addrs[0] - receiverAddr := addrs[1] + var ( + paymentCreator kit.TestFullNode + paymentReceiver kit.TestFullNode + ) + creatorAddr, receiverAddr := startPaychCreatorReceiverMiner(ctx, t, &paymentCreator, &paymentReceiver, blocktime) // Create mock CLI - mockCLI := clitest.NewMockCLI(ctx, t, Commands) + mockCLI := kit.NewMockCLI(ctx, t, cli.Commands) creatorCLI := mockCLI.Client(paymentCreator.ListenAddr) // creator: paych status-by-from-to @@ -168,18 +167,18 @@ func TestPaymentChannelStatus(t *testing.T) { // channel voucher commands func TestPaymentChannelVouchers(t *testing.T) { _ = os.Setenv("BELLMAN_NO_GPU", "1") - clitest.QuietMiningLogs() + kit.QuietMiningLogs() blocktime := 5 * time.Millisecond ctx := context.Background() - nodes, addrs := clitest.StartTwoNodesOneMiner(ctx, t, blocktime) - paymentCreator := nodes[0] - paymentReceiver := nodes[1] - creatorAddr := addrs[0] - receiverAddr := addrs[1] + var ( + paymentCreator kit.TestFullNode + paymentReceiver kit.TestFullNode + ) + creatorAddr, receiverAddr := startPaychCreatorReceiverMiner(ctx, t, &paymentCreator, &paymentReceiver, blocktime) // Create mock CLI - mockCLI := clitest.NewMockCLI(ctx, t, Commands) + mockCLI := kit.NewMockCLI(ctx, t, cli.Commands) creatorCLI := mockCLI.Client(paymentCreator.ListenAddr) receiverCLI := mockCLI.Client(paymentReceiver.ListenAddr) @@ -300,17 +299,18 @@ func TestPaymentChannelVouchers(t *testing.T) { // is greater than what's left in the channel, voucher create fails func TestPaymentChannelVoucherCreateShortfall(t *testing.T) { _ = os.Setenv("BELLMAN_NO_GPU", "1") - clitest.QuietMiningLogs() + kit.QuietMiningLogs() blocktime := 5 * time.Millisecond ctx := context.Background() - nodes, addrs := clitest.StartTwoNodesOneMiner(ctx, t, blocktime) - paymentCreator := nodes[0] - creatorAddr := addrs[0] - receiverAddr := addrs[1] + var ( + paymentCreator kit.TestFullNode + paymentReceiver kit.TestFullNode + ) + creatorAddr, receiverAddr := startPaychCreatorReceiverMiner(ctx, t, &paymentCreator, &paymentReceiver, blocktime) // Create mock CLI - mockCLI := clitest.NewMockCLI(ctx, t, Commands) + mockCLI := kit.NewMockCLI(ctx, t, cli.Commands) creatorCLI := mockCLI.Client(paymentCreator.ListenAddr) // creator: paych add-funds @@ -378,7 +378,7 @@ func checkVoucherOutput(t *testing.T, list string, vouchers []voucherSpec) { } // waitForHeight waits for the node to reach the given chain epoch -func waitForHeight(ctx context.Context, t *testing.T, node test.TestNode, height abi.ChainEpoch) { +func waitForHeight(ctx context.Context, t *testing.T, node kit.TestFullNode, height abi.ChainEpoch) { atHeight := make(chan struct{}) chainEvents := events.NewEvents(ctx, node) err := chainEvents.ChainAt(func(ctx context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { @@ -396,13 +396,35 @@ func waitForHeight(ctx context.Context, t *testing.T, node test.TestNode, height } // getPaychState gets the state of the payment channel with the given address -func getPaychState(ctx context.Context, t *testing.T, node test.TestNode, chAddr address.Address) paych.State { +func getPaychState(ctx context.Context, t *testing.T, node kit.TestFullNode, chAddr address.Address) paych.State { act, err := node.StateGetActor(ctx, chAddr, types.EmptyTSK) require.NoError(t, err) - store := cbor.NewCborStore(apibstore.NewAPIBlockstore(node)) + store := cbor.NewCborStore(blockstore.NewAPIBlockstore(node)) chState, err := paych.Load(adt.WrapStore(ctx, store), act) require.NoError(t, err) return chState } + +func startPaychCreatorReceiverMiner(ctx context.Context, t *testing.T, paymentCreator *kit.TestFullNode, paymentReceiver *kit.TestFullNode, blocktime time.Duration) (address.Address, address.Address) { + var miner kit.TestMiner + opts := kit.ThroughRPC() + kit.NewEnsemble(t, kit.MockProofs()). + FullNode(paymentCreator, opts). + FullNode(paymentReceiver, opts). + Miner(&miner, paymentCreator, kit.WithAllSubsystems()). + Start(). + InterconnectAll(). + BeginMining(blocktime) + + // Send some funds to the second node + receiverAddr, err := paymentReceiver.WalletDefaultAddress(ctx) + require.NoError(t, err) + kit.SendFunds(ctx, t, paymentCreator, receiverAddr, abi.NewTokenAmount(1e18)) + + // Get the first node's address + creatorAddr, err := paymentCreator.WalletDefaultAddress(ctx) + require.NoError(t, err) + return creatorAddr, receiverAddr +} diff --git a/itests/sdr_upgrade_test.go b/itests/sdr_upgrade_test.go new file mode 100644 index 00000000000..3aa685b0933 --- /dev/null +++ b/itests/sdr_upgrade_test.go @@ -0,0 +1,103 @@ +package itests + +import ( + "context" + "sort" + "sync/atomic" + "testing" + "time" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/itests/kit" + bminer "github.com/filecoin-project/lotus/miner" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSDRUpgrade(t *testing.T) { + kit.QuietMiningLogs() + + // oldDelay := policy.GetPreCommitChallengeDelay() + // policy.SetPreCommitChallengeDelay(5) + // t.Cleanup(func() { + // policy.SetPreCommitChallengeDelay(oldDelay) + // }) + + blocktime := 50 * time.Millisecond + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + opts := kit.ConstructorOpts(kit.SDRUpgradeAt(500, 1000)) + client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts) + ens.InterconnectAll() + + build.Clock.Sleep(time.Second) + + pledge := make(chan struct{}) + mine := int64(1) + done := make(chan struct{}) + go func() { + defer close(done) + round := 0 + for atomic.LoadInt64(&mine) != 0 { + build.Clock.Sleep(blocktime) + if err := miner.MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) { + + }}); err != nil { + t.Error(err) + } + + // 3 sealing rounds: before, during after. + if round >= 3 { + continue + } + + head, err := client.ChainHead(ctx) + assert.NoError(t, err) + + // rounds happen every 100 blocks, with a 50 block offset. + if head.Height() >= abi.ChainEpoch(round*500+50) { + round++ + pledge <- struct{}{} + + ver, err := client.StateNetworkVersion(ctx, head.Key()) + assert.NoError(t, err) + switch round { + case 1: + assert.Equal(t, network.Version6, ver) + case 2: + assert.Equal(t, network.Version7, ver) + case 3: + assert.Equal(t, network.Version8, ver) + } + } + + } + }() + + // before. + miner.PledgeSectors(ctx, 9, 0, pledge) + + s, err := miner.SectorsList(ctx) + require.NoError(t, err) + sort.Slice(s, func(i, j int) bool { + return s[i] < s[j] + }) + + for i, id := range s { + info, err := miner.SectorsStatus(ctx, id, true) + require.NoError(t, err) + expectProof := abi.RegisteredSealProof_StackedDrg2KiBV1 + if i >= 3 { + // after + expectProof = abi.RegisteredSealProof_StackedDrg2KiBV1_1 + } + assert.Equal(t, expectProof, info.SealProof, "sector %d, id %d", i, id) + } + + atomic.StoreInt64(&mine, 0) + <-done +} diff --git a/itests/sector_finalize_early_test.go b/itests/sector_finalize_early_test.go new file mode 100644 index 00000000000..fa5cc9dd303 --- /dev/null +++ b/itests/sector_finalize_early_test.go @@ -0,0 +1,66 @@ +package itests + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/modules" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/repo" +) + +func TestDealsWithFinalizeEarly(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + + kit.QuietMiningLogs() + + var blockTime = 50 * time.Millisecond + + client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.ConstructorOpts( + node.ApplyIf(node.IsType(repo.StorageMiner), node.Override(new(dtypes.GetSealingConfigFunc), func() (dtypes.GetSealingConfigFunc, error) { + return func() (sealiface.Config, error) { + cf := config.DefaultStorageMiner() + cf.Sealing.FinalizeEarly = true + return modules.ToSealingConfig(cf), nil + }, nil + })))) // no mock proofs. + ens.InterconnectAll().BeginMining(blockTime) + dh := kit.NewDealHarness(t, client, miner, miner) + + ctx := context.Background() + + miner.AddStorage(ctx, t, 1000000000, true, false) + miner.AddStorage(ctx, t, 1000000000, false, true) + + sl, err := miner.StorageList(ctx) + require.NoError(t, err) + for si, d := range sl { + i, err := miner.StorageInfo(ctx, si) + require.NoError(t, err) + + fmt.Printf("stor d:%d %+v\n", len(d), i) + } + + t.Run("single", func(t *testing.T) { + dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1}) + }) + + sl, err = miner.StorageList(ctx) + require.NoError(t, err) + for si, d := range sl { + i, err := miner.StorageInfo(ctx, si) + require.NoError(t, err) + + fmt.Printf("stor d:%d %+v\n", len(d), i) + } +} diff --git a/itests/sector_miner_collateral_test.go b/itests/sector_miner_collateral_test.go new file mode 100644 index 00000000000..8e7525dba1d --- /dev/null +++ b/itests/sector_miner_collateral_test.go @@ -0,0 +1,132 @@ +package itests + +import ( + "context" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/repo" +) + +func TestMinerBalanceCollateral(t *testing.T) { + kit.QuietMiningLogs() + + blockTime := 5 * time.Millisecond + + runTest := func(t *testing.T, enabled bool, nSectors int, batching bool) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + opts := kit.ConstructorOpts( + kit.LatestActorsAt(-1), + node.ApplyIf(node.IsType(repo.StorageMiner), node.Override(new(dtypes.GetSealingConfigFunc), func() (dtypes.GetSealingConfigFunc, error) { + return func() (sealiface.Config, error) { + return sealiface.Config{ + MaxWaitDealsSectors: 4, + MaxSealingSectors: 4, + MaxSealingSectorsForDeals: 4, + AlwaysKeepUnsealedCopy: true, + WaitDealsDelay: time.Hour, + + BatchPreCommits: batching, + AggregateCommits: batching, + + PreCommitBatchWait: time.Hour, + CommitBatchWait: time.Hour, + + MinCommitBatch: nSectors, + MaxPreCommitBatch: nSectors, + MaxCommitBatch: nSectors, + + CollateralFromMinerBalance: enabled, + AvailableBalanceBuffer: big.Zero(), + DisableCollateralFallback: false, + AggregateAboveBaseFee: big.Zero(), + }, nil + }, nil + })), + ) + full, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts) + ens.InterconnectAll().BeginMining(blockTime) + full.WaitTillChain(ctx, kit.HeightAtLeast(10)) + + toCheck := miner.StartPledge(ctx, nSectors, 0, nil) + + for len(toCheck) > 0 { + states := map[api.SectorState]int{} + for n := range toCheck { + st, err := miner.StorageMiner.SectorsStatus(ctx, n, false) + require.NoError(t, err) + states[st.State]++ + if st.State == api.SectorState(sealing.Proving) { + delete(toCheck, n) + } + if strings.Contains(string(st.State), "Fail") { + t.Fatal("sector in a failed state", st.State) + } + } + + build.Clock.Sleep(100 * time.Millisecond) + } + + // check that sector messages had zero value set + sl, err := miner.SectorsList(ctx) + require.NoError(t, err) + + for _, number := range sl { + si, err := miner.SectorsStatus(ctx, number, false) + require.NoError(t, err) + + require.NotNil(t, si.PreCommitMsg) + pc, err := full.ChainGetMessage(ctx, *si.PreCommitMsg) + require.NoError(t, err) + if enabled { + require.Equal(t, big.Zero(), pc.Value) + } else { + require.NotEqual(t, big.Zero(), pc.Value) + } + + require.NotNil(t, si.CommitMsg) + c, err := full.ChainGetMessage(ctx, *si.CommitMsg) + require.NoError(t, err) + if enabled { + require.Equal(t, big.Zero(), c.Value) + } + // commit value might be zero even with !enabled because in test devnets + // precommit deposit tends to be greater than collateral required at + // commit time. + } + } + + t.Run("nobatch", func(t *testing.T) { + runTest(t, true, 1, false) + }) + t.Run("batch-1", func(t *testing.T) { + runTest(t, true, 1, true) // individual commit instead of aggregate + }) + t.Run("batch-4", func(t *testing.T) { + runTest(t, true, 4, true) + }) + + t.Run("nobatch-frombalance-disabled", func(t *testing.T) { + runTest(t, false, 1, false) + }) + t.Run("batch-1-frombalance-disabled", func(t *testing.T) { + runTest(t, false, 1, true) // individual commit instead of aggregate + }) + t.Run("batch-4-frombalance-disabled", func(t *testing.T) { + runTest(t, false, 4, true) + }) +} diff --git a/itests/sector_pledge_test.go b/itests/sector_pledge_test.go new file mode 100644 index 00000000000..d911dcb68c4 --- /dev/null +++ b/itests/sector_pledge_test.go @@ -0,0 +1,145 @@ +package itests + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/filecoin-project/lotus/itests/kit" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" +) + +func TestPledgeSectors(t *testing.T) { + kit.QuietMiningLogs() + + blockTime := 50 * time.Millisecond + + runTest := func(t *testing.T, nSectors int) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + _, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs()) + ens.InterconnectAll().BeginMining(blockTime) + + miner.PledgeSectors(ctx, nSectors, 0, nil) + } + + t.Run("1", func(t *testing.T) { + runTest(t, 1) + }) + + t.Run("100", func(t *testing.T) { + runTest(t, 100) + }) + + t.Run("1000", func(t *testing.T) { + if testing.Short() { // takes ~16s + t.Skip("skipping test in short mode") + } + + runTest(t, 1000) + }) +} + +func TestPledgeBatching(t *testing.T) { + blockTime := 50 * time.Millisecond + + runTest := func(t *testing.T, nSectors int) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + opts := kit.ConstructorOpts(kit.LatestActorsAt(-1)) + client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts) + ens.InterconnectAll().BeginMining(blockTime) + + client.WaitTillChain(ctx, kit.HeightAtLeast(10)) + + toCheck := miner.StartPledge(ctx, nSectors, 0, nil) + + for len(toCheck) > 0 { + states := map[api.SectorState]int{} + + for n := range toCheck { + st, err := miner.SectorsStatus(ctx, n, false) + require.NoError(t, err) + states[st.State]++ + if st.State == api.SectorState(sealing.Proving) { + delete(toCheck, n) + } + if strings.Contains(string(st.State), "Fail") { + t.Fatal("sector in a failed state", st.State) + } + } + if states[api.SectorState(sealing.SubmitPreCommitBatch)] == nSectors || + (states[api.SectorState(sealing.SubmitPreCommitBatch)] > 0 && states[api.SectorState(sealing.PreCommit1)] == 0 && states[api.SectorState(sealing.PreCommit2)] == 0) { + pcb, err := miner.SectorPreCommitFlush(ctx) + require.NoError(t, err) + if pcb != nil { + fmt.Printf("PRECOMMIT BATCH: %+v\n", pcb) + } + } + + if states[api.SectorState(sealing.SubmitCommitAggregate)] == nSectors || + (states[api.SectorState(sealing.SubmitCommitAggregate)] > 0 && states[api.SectorState(sealing.WaitSeed)] == 0 && states[api.SectorState(sealing.Committing)] == 0) { + cb, err := miner.SectorCommitFlush(ctx) + require.NoError(t, err) + if cb != nil { + fmt.Printf("COMMIT BATCH: %+v\n", cb) + } + } + + build.Clock.Sleep(100 * time.Millisecond) + fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states) + } + } + + t.Run("100", func(t *testing.T) { + runTest(t, 100) + }) +} + +func TestPledgeBeforeNv13(t *testing.T) { + blocktime := 50 * time.Millisecond + + runTest := func(t *testing.T, nSectors int) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + opts := kit.ConstructorOpts(kit.LatestActorsAt(1000000000)) + client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts) + ens.InterconnectAll().BeginMining(blocktime) + + client.WaitTillChain(ctx, kit.HeightAtLeast(10)) + + toCheck := miner.StartPledge(ctx, nSectors, 0, nil) + + for len(toCheck) > 0 { + states := map[api.SectorState]int{} + + for n := range toCheck { + st, err := miner.SectorsStatus(ctx, n, false) + require.NoError(t, err) + states[st.State]++ + if st.State == api.SectorState(sealing.Proving) { + delete(toCheck, n) + } + if strings.Contains(string(st.State), "Fail") { + t.Fatal("sector in a failed state", st.State) + } + } + + build.Clock.Sleep(100 * time.Millisecond) + fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states) + } + } + + t.Run("100-before-nv13", func(t *testing.T) { + runTest(t, 100) + }) +} diff --git a/itests/sector_terminate_test.go b/itests/sector_terminate_test.go new file mode 100644 index 00000000000..2fb4ef0f50a --- /dev/null +++ b/itests/sector_terminate_test.go @@ -0,0 +1,150 @@ +package itests + +import ( + "context" + "testing" + "time" + + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/types" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/stretchr/testify/require" +) + +func TestTerminate(t *testing.T) { + kit.Expensive(t) + + kit.QuietMiningLogs() + + var ( + blocktime = 2 * time.Millisecond + nSectors = 2 + ctx = context.Background() + ) + + opts := kit.ConstructorOpts(kit.LatestActorsAt(-1)) + client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.PresealSectors(nSectors), opts) + ens.InterconnectAll().BeginMining(blocktime) + + maddr, err := miner.ActorAddress(ctx) + require.NoError(t, err) + + ssz, err := miner.ActorSectorSize(ctx, maddr) + require.NoError(t, err) + + p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + require.Equal(t, p.MinerPower, p.TotalPower) + require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(nSectors))) + + t.Log("Seal a sector") + + miner.PledgeSectors(ctx, 1, 0, nil) + + t.Log("wait for power") + + { + // Wait until proven. + di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 20 // 20 is some slack for the proof to be submitted + applied + t.Logf("End for head.Height > %d", waitUntil) + + ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil)) + t.Logf("Now head.Height = %d", ts.Height()) + } + + nSectors++ + + p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + require.Equal(t, p.MinerPower, p.TotalPower) + require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(nSectors))) + + t.Log("Terminate a sector") + + toTerminate := abi.SectorNumber(3) + + err = miner.SectorTerminate(ctx, toTerminate) + require.NoError(t, err) + + msgTriggerred := false +loop: + for { + si, err := miner.SectorsStatus(ctx, toTerminate, false) + require.NoError(t, err) + + t.Log("state: ", si.State, msgTriggerred) + + switch sealing.SectorState(si.State) { + case sealing.Terminating: + if !msgTriggerred { + { + p, err := miner.SectorTerminatePending(ctx) + require.NoError(t, err) + require.Len(t, p, 1) + require.Equal(t, abi.SectorNumber(3), p[0].Number) + } + + c, err := miner.SectorTerminateFlush(ctx) + require.NoError(t, err) + if c != nil { + msgTriggerred = true + t.Log("terminate message:", c) + + { + p, err := miner.SectorTerminatePending(ctx) + require.NoError(t, err) + require.Len(t, p, 0) + } + } + } + case sealing.TerminateWait, sealing.TerminateFinality, sealing.Removed: + break loop + } + + time.Sleep(100 * time.Millisecond) + } + + // need to wait for message to be mined and applied. + time.Sleep(5 * time.Second) + + // check power decreased + p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + require.Equal(t, p.MinerPower, p.TotalPower) + require.Equal(t, types.NewInt(uint64(ssz)*uint64(nSectors-1)), p.MinerPower.RawBytePower) + + // check in terminated set + { + parts, err := client.StateMinerPartitions(ctx, maddr, 1, types.EmptyTSK) + require.NoError(t, err) + require.Greater(t, len(parts), 0) + + bflen := func(b bitfield.BitField) uint64 { + l, err := b.Count() + require.NoError(t, err) + return l + } + + require.Equal(t, uint64(1), bflen(parts[0].AllSectors)) + require.Equal(t, uint64(0), bflen(parts[0].LiveSectors)) + } + + di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 20 // slack like above + t.Logf("End for head.Height > %d", waitUntil) + ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil)) + t.Logf("Now head.Height = %d", ts.Height()) + + p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + require.Equal(t, p.MinerPower, p.TotalPower) + require.Equal(t, types.NewInt(uint64(ssz)*uint64(nSectors-1)), p.MinerPower.RawBytePower) +} diff --git a/itests/tape_test.go b/itests/tape_test.go new file mode 100644 index 00000000000..08970152fce --- /dev/null +++ b/itests/tape_test.go @@ -0,0 +1,72 @@ +package itests + +import ( + "context" + "testing" + "time" + + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/stmgr" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/filecoin-project/lotus/node" + "github.com/stretchr/testify/require" +) + +func TestTapeFix(t *testing.T) { + kit.QuietMiningLogs() + + var blocktime = 2 * time.Millisecond + + // The "before" case is disabled, because we need the builder to mock 32 GiB sectors to accurately repro this case + // TODO: Make the mock sector size configurable and reenable this + // t.Run("before", func(t *testing.T) { testTapeFix(t, b, blocktime, false) }) + t.Run("after", func(t *testing.T) { testTapeFix(t, blocktime, true) }) +} + +func testTapeFix(t *testing.T, blocktime time.Duration, after bool) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + upgradeSchedule := stmgr.UpgradeSchedule{{ + Network: build.ActorUpgradeNetworkVersion, + Height: 1, + Migration: stmgr.UpgradeActorsV2, + }} + if after { + upgradeSchedule = append(upgradeSchedule, stmgr.Upgrade{ + Network: network.Version5, + Height: 2, + }) + } + + nopts := kit.ConstructorOpts(node.Override(new(stmgr.UpgradeSchedule), upgradeSchedule)) + _, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), nopts) + ens.InterconnectAll().BeginMining(blocktime) + + sid, err := miner.PledgeSector(ctx) + require.NoError(t, err) + + t.Log("All sectors is fsm") + + // If before, we expect the precommit to fail + successState := api.SectorState(sealing.CommitFailed) + failureState := api.SectorState(sealing.Proving) + if after { + // otherwise, it should succeed. + successState, failureState = failureState, successState + } + + for { + st, err := miner.SectorsStatus(ctx, sid.Number, false) + require.NoError(t, err) + if st.State == successState { + break + } + require.NotEqual(t, failureState, st.State) + build.Clock.Sleep(100 * time.Millisecond) + t.Log("WaitSeal") + } +} diff --git a/itests/verifreg_test.go b/itests/verifreg_test.go new file mode 100644 index 00000000000..28a72263e57 --- /dev/null +++ b/itests/verifreg_test.go @@ -0,0 +1,144 @@ +package itests + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/chain/wallet" + "github.com/filecoin-project/lotus/itests/kit" + verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg" + "github.com/stretchr/testify/require" + + lapi "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/node/impl" +) + +func TestVerifiedClientTopUp(t *testing.T) { + blockTime := 100 * time.Millisecond + + test := func(nv network.Version, shouldWork bool) func(*testing.T) { + return func(t *testing.T) { + rootKey, err := wallet.GenerateKey(types.KTSecp256k1) + require.NoError(t, err) + + verifierKey, err := wallet.GenerateKey(types.KTSecp256k1) + require.NoError(t, err) + + verifiedClientKey, err := wallet.GenerateKey(types.KTBLS) + require.NoError(t, err) + + bal, err := types.ParseFIL("100fil") + require.NoError(t, err) + + node, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), + kit.RootVerifier(rootKey, abi.NewTokenAmount(bal.Int64())), + kit.Account(verifierKey, abi.NewTokenAmount(bal.Int64())), // assign some balance to the verifier so they can send an AddClient message. + kit.ConstructorOpts(kit.InstantaneousNetworkVersion(nv))) + + ens.InterconnectAll().BeginMining(blockTime) + + api := node.FullNode.(*impl.FullNodeAPI) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // get VRH + vrh, err := api.StateVerifiedRegistryRootKey(ctx, types.TipSetKey{}) + fmt.Println(vrh.String()) + require.NoError(t, err) + + // import the root key. + rootAddr, err := api.WalletImport(ctx, &rootKey.KeyInfo) + require.NoError(t, err) + + // import the verifier's key. + verifierAddr, err := api.WalletImport(ctx, &verifierKey.KeyInfo) + require.NoError(t, err) + + // import the verified client's key. + verifiedClientAddr, err := api.WalletImport(ctx, &verifiedClientKey.KeyInfo) + require.NoError(t, err) + + params, err := actors.SerializeParams(&verifreg4.AddVerifierParams{Address: verifierAddr, Allowance: big.NewInt(100000000000)}) + require.NoError(t, err) + + msg := &types.Message{ + From: rootAddr, + To: verifreg.Address, + Method: verifreg.Methods.AddVerifier, + Params: params, + Value: big.Zero(), + } + + sm, err := api.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err, "AddVerifier failed") + + res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) + require.NoError(t, err) + require.EqualValues(t, 0, res.Receipt.ExitCode) + + // assign datacap to a client + datacap := big.NewInt(10000) + + params, err = actors.SerializeParams(&verifreg4.AddVerifiedClientParams{Address: verifiedClientAddr, Allowance: datacap}) + require.NoError(t, err) + + msg = &types.Message{ + From: verifierAddr, + To: verifreg.Address, + Method: verifreg.Methods.AddVerifiedClient, + Params: params, + Value: big.Zero(), + } + + sm, err = api.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err) + + res, err = api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) + require.NoError(t, err) + require.EqualValues(t, 0, res.Receipt.ExitCode) + + // check datacap balance + dcap, err := api.StateVerifiedClientStatus(ctx, verifiedClientAddr, types.EmptyTSK) + require.NoError(t, err) + + if !dcap.Equals(datacap) { + t.Fatal("") + } + + // try to assign datacap to the same client should fail for actor v4 and below + params, err = actors.SerializeParams(&verifreg4.AddVerifiedClientParams{Address: verifiedClientAddr, Allowance: datacap}) + if err != nil { + t.Fatal(err) + } + + msg = &types.Message{ + From: verifierAddr, + To: verifreg.Address, + Method: verifreg.Methods.AddVerifiedClient, + Params: params, + Value: big.Zero(), + } + + _, err = api.MpoolPushMessage(ctx, msg, nil) + if shouldWork && err != nil { + t.Fatal("expected nil err", err) + } + + if !shouldWork && (err == nil || !strings.Contains(err.Error(), "verified client already exists")) { + t.Fatal("Add datacap to an existing verified client should fail") + } + } + } + + t.Run("nv12", test(network.Version12, false)) + t.Run("nv13", test(network.Version13, true)) +} diff --git a/itests/wdpost_dispute_test.go b/itests/wdpost_dispute_test.go new file mode 100644 index 00000000000..f7388203273 --- /dev/null +++ b/itests/wdpost_dispute_test.go @@ -0,0 +1,368 @@ +package itests + +import ( + "context" + "testing" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors" + minerActor "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/itests/kit" + proof3 "github.com/filecoin-project/specs-actors/v3/actors/runtime/proof" + "github.com/stretchr/testify/require" +) + +func TestWindowPostDispute(t *testing.T) { + kit.Expensive(t) + + kit.QuietMiningLogs() + + blocktime := 2 * time.Millisecond + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var ( + client kit.TestFullNode + chainMiner kit.TestMiner + evilMiner kit.TestMiner + ) + + // First, we configure two miners. After sealing, we're going to turn off the first miner so + // it doesn't submit proofs. + // + // Then we're going to manually submit bad proofs. + opts := []kit.NodeOpt{kit.ConstructorOpts(kit.LatestActorsAt(-1))} + opts = append(opts, kit.WithAllSubsystems()) + ens := kit.NewEnsemble(t, kit.MockProofs()). + FullNode(&client, opts...). + Miner(&chainMiner, &client, opts...). + Miner(&evilMiner, &client, append(opts, kit.PresealSectors(0))...). + Start() + + defaultFrom, err := client.WalletDefaultAddress(ctx) + require.NoError(t, err) + + // Mine with the _second_ node (the good one). + ens.InterconnectAll().BeginMining(blocktime, &chainMiner) + + // Give the chain miner enough sectors to win every block. + chainMiner.PledgeSectors(ctx, 10, 0, nil) + // And the evil one 1 sector. No cookie for you. + evilMiner.PledgeSectors(ctx, 1, 0, nil) + + // Let the evil miner's sectors gain power. + evilMinerAddr, err := evilMiner.ActorAddress(ctx) + require.NoError(t, err) + + di, err := client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK) + require.NoError(t, err) + + t.Logf("Running one proving period\n") + + waitUntil := di.PeriodStart + di.WPoStProvingPeriod*2 + 1 + t.Logf("End for head.Height > %d", waitUntil) + + ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil)) + t.Logf("Now head.Height = %d", ts.Height()) + + p, err := client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK) + require.NoError(t, err) + + ssz, err := evilMiner.ActorSectorSize(ctx, evilMinerAddr) + require.NoError(t, err) + + // make sure it has gained power. + require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz))) + + evilSectors, err := evilMiner.SectorsList(ctx) + require.NoError(t, err) + evilSectorNo := evilSectors[0] // only one. + evilSectorLoc, err := client.StateSectorPartition(ctx, evilMinerAddr, evilSectorNo, types.EmptyTSK) + require.NoError(t, err) + + t.Log("evil miner stopping") + + // Now stop the evil miner, and start manually submitting bad proofs. + require.NoError(t, evilMiner.Stop(ctx)) + + t.Log("evil miner stopped") + + // Wait until we need to prove our sector. + for { + di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK) + require.NoError(t, err) + if di.Index == evilSectorLoc.Deadline && di.CurrentEpoch-di.PeriodStart > 1 { + break + } + build.Clock.Sleep(blocktime) + } + + err = submitBadProof(ctx, client, evilMiner.OwnerKey.Address, evilMinerAddr, di, evilSectorLoc.Deadline, evilSectorLoc.Partition) + require.NoError(t, err, "evil proof not accepted") + + // Wait until after the proving period. + for { + di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK) + require.NoError(t, err) + if di.Index != evilSectorLoc.Deadline { + break + } + build.Clock.Sleep(blocktime) + } + + t.Log("accepted evil proof") + + // Make sure the evil node didn't lose any power. + p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK) + require.NoError(t, err) + require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz))) + + // OBJECTION! The good miner files a DISPUTE!!!! + { + params := &minerActor.DisputeWindowedPoStParams{ + Deadline: evilSectorLoc.Deadline, + PoStIndex: 0, + } + + enc, aerr := actors.SerializeParams(params) + require.NoError(t, aerr) + + msg := &types.Message{ + To: evilMinerAddr, + Method: minerActor.Methods.DisputeWindowedPoSt, + Params: enc, + Value: types.NewInt(0), + From: defaultFrom, + } + sm, err := client.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err) + + t.Log("waiting dispute") + rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true) + require.NoError(t, err) + require.Zero(t, rec.Receipt.ExitCode, "dispute not accepted: %s", rec.Receipt.ExitCode.Error()) + } + + // Objection SUSTAINED! + // Make sure the evil node lost power. + p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK) + require.NoError(t, err) + require.True(t, p.MinerPower.RawBytePower.IsZero()) + + // Now we begin the redemption arc. + require.True(t, p.MinerPower.RawBytePower.IsZero()) + + // First, recover the sector. + + { + minerInfo, err := client.StateMinerInfo(ctx, evilMinerAddr, types.EmptyTSK) + require.NoError(t, err) + + params := &minerActor.DeclareFaultsRecoveredParams{ + Recoveries: []minerActor.RecoveryDeclaration{{ + Deadline: evilSectorLoc.Deadline, + Partition: evilSectorLoc.Partition, + Sectors: bitfield.NewFromSet([]uint64{uint64(evilSectorNo)}), + }}, + } + + enc, aerr := actors.SerializeParams(params) + require.NoError(t, aerr) + + msg := &types.Message{ + To: evilMinerAddr, + Method: minerActor.Methods.DeclareFaultsRecovered, + Params: enc, + Value: types.FromFil(30), // repay debt. + From: minerInfo.Owner, + } + sm, err := client.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err) + + rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true) + require.NoError(t, err) + require.Zero(t, rec.Receipt.ExitCode, "recovery not accepted: %s", rec.Receipt.ExitCode.Error()) + } + + // Then wait for the deadline. + for { + di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK) + require.NoError(t, err) + if di.Index == evilSectorLoc.Deadline { + break + } + build.Clock.Sleep(blocktime) + } + + // Now try to be evil again + err = submitBadProof(ctx, client, evilMiner.OwnerKey.Address, evilMinerAddr, di, evilSectorLoc.Deadline, evilSectorLoc.Partition) + require.Error(t, err) + require.Contains(t, err.Error(), "message execution failed: exit 16, reason: window post failed: invalid PoSt") + + // It didn't work because we're recovering. +} + +func TestWindowPostDisputeFails(t *testing.T) { + kit.Expensive(t) + + kit.QuietMiningLogs() + + blocktime := 2 * time.Millisecond + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + opts := kit.ConstructorOpts(kit.LatestActorsAt(-1)) + client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts) + ens.InterconnectAll().BeginMining(blocktime) + + defaultFrom, err := client.WalletDefaultAddress(ctx) + require.NoError(t, err) + + maddr, err := miner.ActorAddress(ctx) + require.NoError(t, err) + + build.Clock.Sleep(time.Second) + + miner.PledgeSectors(ctx, 10, 0, nil) + + di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + t.Log("Running one proving period") + waitUntil := di.PeriodStart + di.WPoStProvingPeriod*2 + 1 + t.Logf("End for head.Height > %d", waitUntil) + + ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil)) + t.Logf("Now head.Height = %d", ts.Height()) + + ssz, err := miner.ActorSectorSize(ctx, maddr) + require.NoError(t, err) + expectedPower := types.NewInt(uint64(ssz) * (kit.DefaultPresealsPerBootstrapMiner + 10)) + + p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + // make sure it has gained power. + require.Equal(t, p.MinerPower.RawBytePower, expectedPower) + + // Wait until a proof has been submitted. + var targetDeadline uint64 +waitForProof: + for { + deadlines, err := client.StateMinerDeadlines(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + for dlIdx, dl := range deadlines { + nonEmpty, err := dl.PostSubmissions.IsEmpty() + require.NoError(t, err) + if nonEmpty { + targetDeadline = uint64(dlIdx) + break waitForProof + } + } + + build.Clock.Sleep(blocktime) + } + + for { + di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + // wait until the deadline finishes. + if di.Index == ((targetDeadline + 1) % di.WPoStPeriodDeadlines) { + break + } + + build.Clock.Sleep(blocktime) + } + + // Try to object to the proof. This should fail. + { + params := &minerActor.DisputeWindowedPoStParams{ + Deadline: targetDeadline, + PoStIndex: 0, + } + + enc, aerr := actors.SerializeParams(params) + require.NoError(t, aerr) + + msg := &types.Message{ + To: maddr, + Method: minerActor.Methods.DisputeWindowedPoSt, + Params: enc, + Value: types.NewInt(0), + From: defaultFrom, + } + _, err := client.MpoolPushMessage(ctx, msg, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to dispute valid post (RetCode=16)") + } +} + +func submitBadProof( + ctx context.Context, + client api.FullNode, owner address.Address, maddr address.Address, + di *dline.Info, dlIdx, partIdx uint64, +) error { + head, err := client.ChainHead(ctx) + if err != nil { + return err + } + + minerInfo, err := client.StateMinerInfo(ctx, maddr, head.Key()) + if err != nil { + return err + } + + commEpoch := di.Open + commRand, err := client.ChainGetRandomnessFromTickets( + ctx, head.Key(), crypto.DomainSeparationTag_PoStChainCommit, + commEpoch, nil, + ) + if err != nil { + return err + } + params := &minerActor.SubmitWindowedPoStParams{ + ChainCommitEpoch: commEpoch, + ChainCommitRand: commRand, + Deadline: dlIdx, + Partitions: []minerActor.PoStPartition{{Index: partIdx}}, + Proofs: []proof3.PoStProof{{ + PoStProof: minerInfo.WindowPoStProofType, + ProofBytes: []byte("I'm soooo very evil."), + }}, + } + + enc, aerr := actors.SerializeParams(params) + if aerr != nil { + return aerr + } + + msg := &types.Message{ + To: maddr, + Method: minerActor.Methods.SubmitWindowedPoSt, + Params: enc, + Value: types.NewInt(0), + From: owner, + } + sm, err := client.MpoolPushMessage(ctx, msg, nil) + if err != nil { + return err + } + + rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true) + if err != nil { + return err + } + if rec.Receipt.ExitCode.IsError() { + return rec.Receipt.ExitCode + } + return nil +} diff --git a/itests/wdpost_test.go b/itests/wdpost_test.go new file mode 100644 index 00000000000..6764350ccb0 --- /dev/null +++ b/itests/wdpost_test.go @@ -0,0 +1,314 @@ +package itests + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/specs-storage/storage" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/extern/sector-storage/mock" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/filecoin-project/lotus/node/impl" +) + +func TestWindowedPost(t *testing.T) { + kit.Expensive(t) + + kit.QuietMiningLogs() + + var ( + blocktime = 2 * time.Millisecond + nSectors = 10 + ) + + for _, height := range []abi.ChainEpoch{ + -1, // before + 162, // while sealing + 5000, // while proving + } { + height := height // copy to satisfy lints + t.Run(fmt.Sprintf("upgrade-%d", height), func(t *testing.T) { + testWindowPostUpgrade(t, blocktime, nSectors, height) + }) + } +} + +func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int, upgradeHeight abi.ChainEpoch) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + opts := kit.ConstructorOpts(kit.LatestActorsAt(upgradeHeight)) + client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts) + ens.InterconnectAll().BeginMining(blocktime) + + miner.PledgeSectors(ctx, nSectors, 0, nil) + + maddr, err := miner.ActorAddress(ctx) + require.NoError(t, err) + + di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + mid, err := address.IDFromAddress(maddr) + require.NoError(t, err) + + t.Log("Running one proving period") + waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 2 + t.Logf("End for head.Height > %d", waitUntil) + + ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil)) + t.Logf("Now head.Height = %d", ts.Height()) + + p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + ssz, err := miner.ActorSectorSize(ctx, maddr) + require.NoError(t, err) + + require.Equal(t, p.MinerPower, p.TotalPower) + require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(nSectors+kit.DefaultPresealsPerBootstrapMiner))) + + t.Log("Drop some sectors") + + // Drop 2 sectors from deadline 2 partition 0 (full partition / deadline) + { + parts, err := client.StateMinerPartitions(ctx, maddr, 2, types.EmptyTSK) + require.NoError(t, err) + require.Greater(t, len(parts), 0) + + secs := parts[0].AllSectors + n, err := secs.Count() + require.NoError(t, err) + require.Equal(t, uint64(2), n) + + // Drop the partition + err = secs.ForEach(func(sid uint64) error { + return miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkCorrupted(storage.SectorRef{ + ID: abi.SectorID{ + Miner: abi.ActorID(mid), + Number: abi.SectorNumber(sid), + }, + }, true) + }) + require.NoError(t, err) + } + + var s storage.SectorRef + + // Drop 1 sectors from deadline 3 partition 0 + { + parts, err := client.StateMinerPartitions(ctx, maddr, 3, types.EmptyTSK) + require.NoError(t, err) + require.Greater(t, len(parts), 0) + + secs := parts[0].AllSectors + n, err := secs.Count() + require.NoError(t, err) + require.Equal(t, uint64(2), n) + + // Drop the sector + sn, err := secs.First() + require.NoError(t, err) + + all, err := secs.All(2) + require.NoError(t, err) + t.Log("the sectors", all) + + s = storage.SectorRef{ + ID: abi.SectorID{ + Miner: abi.ActorID(mid), + Number: abi.SectorNumber(sn), + }, + } + + err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, true) + require.NoError(t, err) + } + + di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + t.Log("Go through another PP, wait for sectors to become faulty") + waitUntil = di.PeriodStart + di.WPoStProvingPeriod + 2 + t.Logf("End for head.Height > %d", waitUntil) + + ts = client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil)) + t.Logf("Now head.Height = %d", ts.Height()) + + p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + require.Equal(t, p.MinerPower, p.TotalPower) + + sectors := p.MinerPower.RawBytePower.Uint64() / uint64(ssz) + require.Equal(t, nSectors+kit.DefaultPresealsPerBootstrapMiner-3, int(sectors)) // -3 just removed sectors + + t.Log("Recover one sector") + + err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, false) + require.NoError(t, err) + + di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + waitUntil = di.PeriodStart + di.WPoStProvingPeriod + 2 + t.Logf("End for head.Height > %d", waitUntil) + + ts = client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil)) + t.Logf("Now head.Height = %d", ts.Height()) + + p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + require.Equal(t, p.MinerPower, p.TotalPower) + + sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz) + require.Equal(t, nSectors+kit.DefaultPresealsPerBootstrapMiner-2, int(sectors)) // -2 not recovered sectors + + // pledge a sector after recovery + + miner.PledgeSectors(ctx, 1, nSectors, nil) + + { + // Wait until proven. + di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 2 + t.Logf("End for head.Height > %d\n", waitUntil) + + ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil)) + t.Logf("Now head.Height = %d", ts.Height()) + } + + p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + require.Equal(t, p.MinerPower, p.TotalPower) + + sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz) + require.Equal(t, nSectors+kit.DefaultPresealsPerBootstrapMiner-2+1, int(sectors)) // -2 not recovered sectors + 1 just pledged +} + +func TestWindowPostBaseFeeNoBurn(t *testing.T) { + kit.Expensive(t) + + kit.QuietMiningLogs() + + var ( + blocktime = 2 * time.Millisecond + nSectors = 10 + ) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sched := kit.DefaultTestUpgradeSchedule + lastUpgradeHeight := sched[len(sched)-1].Height + + och := build.UpgradeClausHeight + build.UpgradeClausHeight = lastUpgradeHeight + 1 + + client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs()) + ens.InterconnectAll().BeginMining(blocktime) + + // Wait till all upgrades are done and we've passed the clause epoch. + client.WaitTillChain(ctx, kit.HeightAtLeast(build.UpgradeClausHeight+1)) + + maddr, err := miner.ActorAddress(ctx) + require.NoError(t, err) + + mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + miner.PledgeSectors(ctx, nSectors, 0, nil) + wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK) + require.NoError(t, err) + en := wact.Nonce + + // wait for a new message to be sent from worker address, it will be a PoSt + +waitForProof: + for { + wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK) + require.NoError(t, err) + if wact.Nonce > en { + break waitForProof + } + + build.Clock.Sleep(blocktime) + } + + slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0) + require.NoError(t, err) + + pmr, err := client.StateReplay(ctx, types.EmptyTSK, slm[0]) + require.NoError(t, err) + + require.Equal(t, pmr.GasCost.BaseFeeBurn, big.Zero()) + + build.UpgradeClausHeight = och +} + +func TestWindowPostBaseFeeBurn(t *testing.T) { + kit.Expensive(t) + + kit.QuietMiningLogs() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + blocktime := 2 * time.Millisecond + + opts := kit.ConstructorOpts(kit.LatestActorsAt(-1)) + client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts) + ens.InterconnectAll().BeginMining(blocktime) + + // Ideally we'd be a bit more precise here, but getting the information we need from the + // test framework is more work than it's worth. + // + // We just need to wait till all upgrades are done. + client.WaitTillChain(ctx, kit.HeightAtLeast(20)) + + maddr, err := miner.ActorAddress(ctx) + require.NoError(t, err) + + mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + miner.PledgeSectors(ctx, 10, 0, nil) + wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK) + require.NoError(t, err) + en := wact.Nonce + + // wait for a new message to be sent from worker address, it will be a PoSt + +waitForProof: + for { + wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK) + require.NoError(t, err) + if wact.Nonce > en { + break waitForProof + } + + build.Clock.Sleep(blocktime) + } + + slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0) + require.NoError(t, err) + + pmr, err := client.StateReplay(ctx, types.EmptyTSK, slm[0]) + require.NoError(t, err) + + require.NotEqual(t, pmr.GasCost.BaseFeeBurn, big.Zero()) +} diff --git a/journal/types.go b/journal/types.go index 5b51ed4c8c3..3e240a9f1de 100644 --- a/journal/types.go +++ b/journal/types.go @@ -5,7 +5,7 @@ import ( "strings" "time" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" ) var log = logging.Logger("journal") diff --git a/lib/backupds/backupds_test.go b/lib/backupds/backupds_test.go new file mode 100644 index 00000000000..f7bc36e2292 --- /dev/null +++ b/lib/backupds/backupds_test.go @@ -0,0 +1,85 @@ +package backupds + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/ipfs/go-datastore" + "github.com/stretchr/testify/require" +) + +const valSize = 512 << 10 + +func putVals(t *testing.T, ds datastore.Datastore, start, end int) { + for i := start; i < end; i++ { + err := ds.Put(datastore.NewKey(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%d-%s", i, strings.Repeat("~", valSize)))) + require.NoError(t, err) + } +} + +func checkVals(t *testing.T, ds datastore.Datastore, start, end int, exist bool) { + for i := start; i < end; i++ { + v, err := ds.Get(datastore.NewKey(fmt.Sprintf("%d", i))) + if exist { + require.NoError(t, err) + expect := []byte(fmt.Sprintf("%d-%s", i, strings.Repeat("~", valSize))) + require.EqualValues(t, expect, v) + } else { + require.ErrorIs(t, err, datastore.ErrNotFound) + } + } +} + +func TestNoLogRestore(t *testing.T) { + ds1 := datastore.NewMapDatastore() + + putVals(t, ds1, 0, 10) + + bds, err := Wrap(ds1, NoLogdir) + require.NoError(t, err) + + var bup bytes.Buffer + require.NoError(t, bds.Backup(&bup)) + + putVals(t, ds1, 10, 20) + + ds2 := datastore.NewMapDatastore() + require.NoError(t, RestoreInto(&bup, ds2)) + + checkVals(t, ds2, 0, 10, true) + checkVals(t, ds2, 10, 20, false) +} + +func TestLogRestore(t *testing.T) { + logdir, err := ioutil.TempDir("", "backupds-test-") + require.NoError(t, err) + defer os.RemoveAll(logdir) // nolint + + ds1 := datastore.NewMapDatastore() + + putVals(t, ds1, 0, 10) + + bds, err := Wrap(ds1, logdir) + require.NoError(t, err) + + putVals(t, bds, 10, 20) + + require.NoError(t, bds.Close()) + + fls, err := ioutil.ReadDir(logdir) + require.NoError(t, err) + require.Equal(t, 1, len(fls)) + + bf, err := ioutil.ReadFile(filepath.Join(logdir, fls[0].Name())) + require.NoError(t, err) + + ds2 := datastore.NewMapDatastore() + require.NoError(t, RestoreInto(bytes.NewReader(bf), ds2)) + + checkVals(t, ds2, 0, 20, true) +} diff --git a/lib/backupds/cbor.go b/lib/backupds/cbor.go new file mode 100644 index 00000000000..6951d8f9206 --- /dev/null +++ b/lib/backupds/cbor.go @@ -0,0 +1,132 @@ +package backupds + +import ( + "fmt" + "io" + + cbg "github.com/whyrusleeping/cbor-gen" +) + +var lengthBufEntry = []byte{131} + +func (t *Entry) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufEntry); err != nil { + return err + } + + scratch := make([]byte, 9) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Key))); err != nil { + return err + } + + if _, err := w.Write(t.Key[:]); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Value))); err != nil { + return err + } + + if _, err := w.Write(t.Value[:]); err != nil { + return err + } + + // t.Timestamp (int64) (int64) + if t.Timestamp >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Timestamp)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Timestamp-1)); err != nil { + return err + } + } + return nil +} + +func (t *Entry) UnmarshalCBOR(r io.Reader) error { + *t = Entry{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Key ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Key = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.Key[:]); err != nil { + return err + } + // t.Value ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Value = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.Value[:]); err != nil { + return err + } + // t.Timestamp (int64) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Timestamp = extraI + } + return nil +} diff --git a/lib/backupds/datastore.go b/lib/backupds/datastore.go index 1555577f346..350988a501f 100644 --- a/lib/backupds/datastore.go +++ b/lib/backupds/datastore.go @@ -4,27 +4,50 @@ import ( "crypto/sha256" "io" "sync" + "time" - logging "github.com/ipfs/go-log/v2" - cbg "github.com/whyrusleeping/cbor-gen" + "go.uber.org/multierr" "golang.org/x/xerrors" "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/query" + logging "github.com/ipfs/go-log/v2" + cbg "github.com/whyrusleeping/cbor-gen" ) var log = logging.Logger("backupds") +const NoLogdir = "" + type Datastore struct { child datastore.Batching backupLk sync.RWMutex + + log chan Entry + closing, closed chan struct{} } -func Wrap(child datastore.Batching) *Datastore { - return &Datastore{ +type Entry struct { + Key, Value []byte + Timestamp int64 +} + +func Wrap(child datastore.Batching, logdir string) (*Datastore, error) { + ds := &Datastore{ child: child, } + + if logdir != NoLogdir { + ds.closing, ds.closed = make(chan struct{}), make(chan struct{}) + ds.log = make(chan Entry) + + if err := ds.startLog(logdir); err != nil { + return nil, err + } + } + + return ds, nil } // Writes a datastore dump into the provided writer as @@ -129,6 +152,14 @@ func (d *Datastore) Put(key datastore.Key, value []byte) error { d.backupLk.RLock() defer d.backupLk.RUnlock() + if d.log != nil { + d.log <- Entry{ + Key: []byte(key.String()), + Value: value, + Timestamp: time.Now().Unix(), + } + } + return d.child.Put(key, value) } @@ -146,11 +177,23 @@ func (d *Datastore) Sync(prefix datastore.Key) error { return d.child.Sync(prefix) } -func (d *Datastore) Close() error { +func (d *Datastore) CloseLog() error { d.backupLk.RLock() defer d.backupLk.RUnlock() - return d.child.Close() + if d.closing != nil { + close(d.closing) + <-d.closed + } + + return nil +} + +func (d *Datastore) Close() error { + return multierr.Combine( + d.child.Close(), + d.CloseLog(), + ) } func (d *Datastore) Batch() (datastore.Batch, error) { @@ -160,17 +203,27 @@ func (d *Datastore) Batch() (datastore.Batch, error) { } return &bbatch{ + d: d, b: b, rlk: d.backupLk.RLocker(), }, nil } type bbatch struct { + d *Datastore b datastore.Batch rlk sync.Locker } func (b *bbatch) Put(key datastore.Key, value []byte) error { + if b.d.log != nil { + b.d.log <- Entry{ + Key: []byte(key.String()), + Value: value, + Timestamp: time.Now().Unix(), + } + } + return b.b.Put(key, value) } diff --git a/lib/backupds/log.go b/lib/backupds/log.go new file mode 100644 index 00000000000..b76dfbfe653 --- /dev/null +++ b/lib/backupds/log.go @@ -0,0 +1,254 @@ +package backupds + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/google/uuid" + "golang.org/x/xerrors" + + "github.com/ipfs/go-datastore" +) + +var loghead = datastore.NewKey("/backupds/log/head") // string([logfile base name];[uuid];[unix ts]) + +func (d *Datastore) startLog(logdir string) error { + if err := os.MkdirAll(logdir, 0755); err != nil && !os.IsExist(err) { + return xerrors.Errorf("mkdir logdir ('%s'): %w", logdir, err) + } + + files, err := ioutil.ReadDir(logdir) + if err != nil { + return xerrors.Errorf("read logdir ('%s'): %w", logdir, err) + } + + var latest string + var latestTs int64 + + for _, file := range files { + fn := file.Name() + if !strings.HasSuffix(fn, ".log.cbor") { + log.Warn("logfile with wrong file extension", fn) + continue + } + sec, err := strconv.ParseInt(fn[:len(".log.cbor")], 10, 64) + if err != nil { + return xerrors.Errorf("parsing logfile as a number: %w", err) + } + + if sec > latestTs { + latestTs = sec + latest = file.Name() + } + } + + var l *logfile + if latest == "" { + l, latest, err = d.createLog(logdir) + if err != nil { + return xerrors.Errorf("creating log: %w", err) + } + } else { + l, latest, err = d.openLog(filepath.Join(logdir, latest)) + if err != nil { + return xerrors.Errorf("opening log: %w", err) + } + } + + if err := l.writeLogHead(latest, d.child); err != nil { + return xerrors.Errorf("writing new log head: %w", err) + } + + go d.runLog(l) + + return nil +} + +func (d *Datastore) runLog(l *logfile) { + defer close(d.closed) + for { + select { + case ent := <-d.log: + if err := l.writeEntry(&ent); err != nil { + log.Errorw("failed to write log entry", "error", err) + // todo try to do something, maybe start a new log file (but not when we're out of disk space) + } + + // todo: batch writes when multiple are pending; flush on a timer + if err := l.file.Sync(); err != nil { + log.Errorw("failed to sync log", "error", err) + } + case <-d.closing: + if err := l.Close(); err != nil { + log.Errorw("failed to close log", "error", err) + } + return + } + } +} + +type logfile struct { + file *os.File +} + +var compactThresh = 2 + +func (d *Datastore) createLog(logdir string) (*logfile, string, error) { + p := filepath.Join(logdir, strconv.FormatInt(time.Now().Unix(), 10)+".log.cbor") + log.Infow("creating log", "file", p) + + f, err := os.OpenFile(p, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0644) + if err != nil { + return nil, "", err + } + + if err := d.Backup(f); err != nil { + return nil, "", xerrors.Errorf("writing log base: %w", err) + } + if err := f.Sync(); err != nil { + return nil, "", xerrors.Errorf("sync log base: %w", err) + } + log.Infow("log opened", "file", p) + + return &logfile{ + file: f, + }, filepath.Base(p), nil +} + +func (d *Datastore) openLog(p string) (*logfile, string, error) { + log.Infow("opening log", "file", p) + lh, err := d.child.Get(loghead) + if err != nil { + return nil, "", xerrors.Errorf("checking log head (logfile '%s'): %w", p, err) + } + + lhp := strings.Split(string(lh), ";") + if len(lhp) != 3 { + return nil, "", xerrors.Errorf("expected loghead to have 3 parts") + } + + if lhp[0] != filepath.Base(p) { + return nil, "", xerrors.Errorf("loghead log file doesn't match, opening %s, expected %s", p, lhp[0]) + } + + f, err := os.OpenFile(p, os.O_RDWR, 0644) + if err != nil { + return nil, "", err + } + + var lastLogHead string + var openCount, vals, logvals int64 + // check file integrity + clean, err := ReadBackup(f, func(k datastore.Key, v []byte, log bool) error { + if log { + logvals++ + } else { + vals++ + } + if k == loghead { + lastLogHead = string(v) + openCount++ + } + return nil + }) + if err != nil { + return nil, "", xerrors.Errorf("reading backup part of the logfile: %w", err) + } + if string(lh) != lastLogHead && clean { // if not clean, user has opted in to ignore truncated logs, this will almost certainly happen + return nil, "", xerrors.Errorf("loghead didn't match, expected '%s', last in logfile '%s'", string(lh), lastLogHead) + } + + // make sure we're at the end of the file + at, err := f.Seek(0, io.SeekCurrent) + if err != nil { + return nil, "", xerrors.Errorf("get current logfile offset: %w", err) + } + end, err := f.Seek(0, io.SeekEnd) + if err != nil { + return nil, "", xerrors.Errorf("get current logfile offset: %w", err) + } + if at != end { + return nil, "", xerrors.Errorf("logfile %s validated %d bytes, but the file has %d bytes (%d more)", p, at, end, end-at) + } + + compact := logvals > vals*int64(compactThresh) + if compact || !clean { + log.Infow("compacting log", "current", p, "openCount", openCount, "baseValues", vals, "logValues", logvals, "truncated", !clean) + if err := f.Close(); err != nil { + return nil, "", xerrors.Errorf("closing current log: %w", err) + } + + l, latest, err := d.createLog(filepath.Dir(p)) + if err != nil { + return nil, "", xerrors.Errorf("creating compacted log: %w", err) + } + + if clean { + log.Infow("compacted log created, cleaning up old", "old", p, "new", latest) + if err := os.Remove(p); err != nil { + l.Close() // nolint + return nil, "", xerrors.Errorf("cleaning up old logfile: %w", err) + } + } else { + log.Errorw("LOG FILE WAS TRUNCATED, KEEPING THE FILE", "old", p, "new", latest) + } + + return l, latest, nil + } + + log.Infow("log opened", "file", p, "openCount", openCount, "baseValues", vals, "logValues", logvals) + + // todo: maybe write a magic 'opened at' entry; pad the log to filesystem page to prevent more exotic types of corruption + + return &logfile{ + file: f, + }, filepath.Base(p), nil +} + +func (l *logfile) writeLogHead(logname string, ds datastore.Batching) error { + lval := []byte(fmt.Sprintf("%s;%s;%d", logname, uuid.New(), time.Now().Unix())) + + err := l.writeEntry(&Entry{ + Key: loghead.Bytes(), + Value: lval, + Timestamp: time.Now().Unix(), + }) + if err != nil { + return xerrors.Errorf("writing loghead to the log: %w", err) + } + + if err := ds.Put(loghead, lval); err != nil { + return xerrors.Errorf("writing loghead to the datastore: %w", err) + } + + log.Infow("new log head", "loghead", string(lval)) + + return nil +} + +func (l *logfile) writeEntry(e *Entry) error { + // todo: maybe marshal to some temp buffer, then put into the file? + if err := e.MarshalCBOR(l.file); err != nil { + return xerrors.Errorf("writing log entry: %w", err) + } + + return nil +} + +func (l *logfile) Close() error { + // todo: maybe write a magic 'close at' entry; pad the log to filesystem page to prevent more exotic types of corruption + + if err := l.file.Close(); err != nil { + return err + } + + l.file = nil + + return nil +} diff --git a/lib/backupds/read.go b/lib/backupds/read.go index f9a4336374c..a44442af167 100644 --- a/lib/backupds/read.go +++ b/lib/backupds/read.go @@ -4,75 +4,116 @@ import ( "bytes" "crypto/sha256" "io" + "os" "github.com/ipfs/go-datastore" cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" ) -func ReadBackup(r io.Reader, cb func(key datastore.Key, value []byte) error) error { +func ReadBackup(r io.Reader, cb func(key datastore.Key, value []byte, log bool) error) (bool, error) { scratch := make([]byte, 9) + // read array[2]( if _, err := r.Read(scratch[:1]); err != nil { - return xerrors.Errorf("reading array header: %w", err) + return false, xerrors.Errorf("reading array header: %w", err) } if scratch[0] != 0x82 { - return xerrors.Errorf("expected array(2) header byte 0x82, got %x", scratch[0]) + return false, xerrors.Errorf("expected array(2) header byte 0x82, got %x", scratch[0]) } hasher := sha256.New() hr := io.TeeReader(r, hasher) + // read array[*]( if _, err := hr.Read(scratch[:1]); err != nil { - return xerrors.Errorf("reading array header: %w", err) + return false, xerrors.Errorf("reading array header: %w", err) } if scratch[0] != 0x9f { - return xerrors.Errorf("expected indefinite length array header byte 0x9f, got %x", scratch[0]) + return false, xerrors.Errorf("expected indefinite length array header byte 0x9f, got %x", scratch[0]) } for { if _, err := hr.Read(scratch[:1]); err != nil { - return xerrors.Errorf("reading tuple header: %w", err) + return false, xerrors.Errorf("reading tuple header: %w", err) } + // close array[*] if scratch[0] == 0xff { break } + // read array[2](key:[]byte, value:[]byte) if scratch[0] != 0x82 { - return xerrors.Errorf("expected array(2) header 0x82, got %x", scratch[0]) + return false, xerrors.Errorf("expected array(2) header 0x82, got %x", scratch[0]) } keyb, err := cbg.ReadByteArray(hr, 1<<40) if err != nil { - return xerrors.Errorf("reading key: %w", err) + return false, xerrors.Errorf("reading key: %w", err) } key := datastore.NewKey(string(keyb)) value, err := cbg.ReadByteArray(hr, 1<<40) if err != nil { - return xerrors.Errorf("reading value: %w", err) + return false, xerrors.Errorf("reading value: %w", err) } - if err := cb(key, value); err != nil { - return err + if err := cb(key, value, false); err != nil { + return false, err } } sum := hasher.Sum(nil) + // read the [32]byte checksum expSum, err := cbg.ReadByteArray(r, 32) if err != nil { - return xerrors.Errorf("reading expected checksum: %w", err) + return false, xerrors.Errorf("reading expected checksum: %w", err) } if !bytes.Equal(sum, expSum) { - return xerrors.Errorf("checksum didn't match; expected %x, got %x", expSum, sum) + return false, xerrors.Errorf("checksum didn't match; expected %x, got %x", expSum, sum) } - return nil + // read the log, set of Entry-ies + + var ent Entry + bp := cbg.GetPeeker(r) + for { + _, err := bp.ReadByte() + switch err { + case io.EOF, io.ErrUnexpectedEOF: + return true, nil + case nil: + default: + return false, xerrors.Errorf("peek log: %w", err) + } + if err := bp.UnreadByte(); err != nil { + return false, xerrors.Errorf("unread log byte: %w", err) + } + + if err := ent.UnmarshalCBOR(bp); err != nil { + switch err { + case io.EOF, io.ErrUnexpectedEOF: + if os.Getenv("LOTUS_ALLOW_TRUNCATED_LOG") == "1" { + log.Errorw("log entry potentially truncated") + return false, nil + } + return false, xerrors.Errorf("log entry potentially truncated, set LOTUS_ALLOW_TRUNCATED_LOG=1 to proceed: %w", err) + default: + return false, xerrors.Errorf("unmarshaling log entry: %w", err) + } + } + + key := datastore.NewKey(string(ent.Key)) + + if err := cb(key, ent.Value, true); err != nil { + return false, err + } + } } func RestoreInto(r io.Reader, dest datastore.Batching) error { @@ -81,7 +122,7 @@ func RestoreInto(r io.Reader, dest datastore.Batching) error { return xerrors.Errorf("creating batch: %w", err) } - err = ReadBackup(r, func(key datastore.Key, value []byte) error { + _, err = ReadBackup(r, func(key datastore.Key, value []byte, _ bool) error { if err := batch.Put(key, value); err != nil { return xerrors.Errorf("put key: %w", err) } diff --git a/lib/blockstore/blockstore.go b/lib/blockstore/blockstore.go deleted file mode 100644 index 99d8491887f..00000000000 --- a/lib/blockstore/blockstore.go +++ /dev/null @@ -1,69 +0,0 @@ -// blockstore contains all the basic blockstore constructors used by lotus. Any -// blockstores not ultimately constructed out of the building blocks in this -// package may not work properly. -// -// * This package correctly wraps blockstores with the IdBlockstore. This blockstore: -// * Filters out all puts for blocks with CIDs using the "identity" hash function. -// * Extracts inlined blocks from CIDs using the identity hash function and -// returns them on get/has, ignoring the contents of the blockstore. -// * In the future, this package may enforce additional restrictions on block -// sizes, CID validity, etc. -// -// To make auditing for misuse of blockstores tractable, this package re-exports -// parts of the go-ipfs-blockstore package such that no other package needs to -// import it directly. -package blockstore - -import ( - "context" - - ds "github.com/ipfs/go-datastore" - - blockstore "github.com/ipfs/go-ipfs-blockstore" -) - -// NewTemporary returns a temporary blockstore. -func NewTemporary() MemStore { - return make(MemStore) -} - -// NewTemporarySync returns a thread-safe temporary blockstore. -func NewTemporarySync() *SyncStore { - return &SyncStore{bs: make(MemStore)} -} - -// WrapIDStore wraps the underlying blockstore in an "identity" blockstore. -func WrapIDStore(bstore blockstore.Blockstore) blockstore.Blockstore { - return blockstore.NewIdStore(bstore) -} - -// NewBlockstore creates a new blockstore wrapped by the given datastore. -func NewBlockstore(dstore ds.Batching) blockstore.Blockstore { - return WrapIDStore(blockstore.NewBlockstore(dstore)) -} - -// Alias so other packages don't have to import go-ipfs-blockstore -type Blockstore = blockstore.Blockstore -type GCBlockstore = blockstore.GCBlockstore -type CacheOpts = blockstore.CacheOpts -type GCLocker = blockstore.GCLocker - -var NewGCLocker = blockstore.NewGCLocker -var NewGCBlockstore = blockstore.NewGCBlockstore -var ErrNotFound = blockstore.ErrNotFound - -func DefaultCacheOpts() CacheOpts { - return CacheOpts{ - HasBloomFilterSize: 0, - HasBloomFilterHashes: 0, - HasARCCacheSize: 512 << 10, - } -} - -func CachedBlockstore(ctx context.Context, bs Blockstore, opts CacheOpts) (Blockstore, error) { - bs, err := blockstore.CachedBlockstore(ctx, bs, opts) - if err != nil { - return nil, err - } - return WrapIDStore(bs), nil -} diff --git a/lib/blockstore/syncstore.go b/lib/blockstore/syncstore.go deleted file mode 100644 index be9f6b5c40c..00000000000 --- a/lib/blockstore/syncstore.go +++ /dev/null @@ -1,68 +0,0 @@ -package blockstore - -import ( - "context" - "sync" - - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" -) - -type SyncStore struct { - mu sync.RWMutex - bs MemStore // specifically use a memStore to save indirection overhead. -} - -func (m *SyncStore) DeleteBlock(k cid.Cid) error { - m.mu.Lock() - defer m.mu.Unlock() - return m.bs.DeleteBlock(k) -} -func (m *SyncStore) Has(k cid.Cid) (bool, error) { - m.mu.RLock() - defer m.mu.RUnlock() - return m.bs.Has(k) -} -func (m *SyncStore) Get(k cid.Cid) (blocks.Block, error) { - m.mu.RLock() - defer m.mu.RUnlock() - return m.bs.Get(k) -} - -// GetSize returns the CIDs mapped BlockSize -func (m *SyncStore) GetSize(k cid.Cid) (int, error) { - m.mu.RLock() - defer m.mu.RUnlock() - return m.bs.GetSize(k) -} - -// Put puts a given block to the underlying datastore -func (m *SyncStore) Put(b blocks.Block) error { - m.mu.Lock() - defer m.mu.Unlock() - return m.bs.Put(b) -} - -// PutMany puts a slice of blocks at the same time using batching -// capabilities of the underlying datastore whenever possible. -func (m *SyncStore) PutMany(bs []blocks.Block) error { - m.mu.Lock() - defer m.mu.Unlock() - return m.bs.PutMany(bs) -} - -// AllKeysChan returns a channel from which -// the CIDs in the Blockstore can be read. It should respect -// the given context, closing the channel if it becomes Done. -func (m *SyncStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { - m.mu.RLock() - defer m.mu.RUnlock() - // this blockstore implementation doesn't do any async work. - return m.bs.AllKeysChan(ctx) -} - -// HashOnRead specifies if every read block should be -// rehashed to make sure it matches its CID. -func (m *SyncStore) HashOnRead(enabled bool) { - // noop -} diff --git a/lib/bufbstore/buf_bstore.go b/lib/bufbstore/buf_bstore.go deleted file mode 100644 index 4ea746444fd..00000000000 --- a/lib/bufbstore/buf_bstore.go +++ /dev/null @@ -1,153 +0,0 @@ -package bufbstore - -import ( - "context" - "os" - - block "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - - bstore "github.com/filecoin-project/lotus/lib/blockstore" -) - -var log = logging.Logger("bufbs") - -type BufferedBS struct { - read bstore.Blockstore - write bstore.Blockstore -} - -func NewBufferedBstore(base bstore.Blockstore) *BufferedBS { - var buf bstore.Blockstore - if os.Getenv("LOTUS_DISABLE_VM_BUF") == "iknowitsabadidea" { - log.Warn("VM BLOCKSTORE BUFFERING IS DISABLED") - buf = base - } else { - buf = bstore.NewTemporary() - } - - return &BufferedBS{ - read: base, - write: buf, - } -} - -func NewTieredBstore(r bstore.Blockstore, w bstore.Blockstore) *BufferedBS { - return &BufferedBS{ - read: r, - write: w, - } -} - -var _ (bstore.Blockstore) = &BufferedBS{} - -func (bs *BufferedBS) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { - a, err := bs.read.AllKeysChan(ctx) - if err != nil { - return nil, err - } - - b, err := bs.write.AllKeysChan(ctx) - if err != nil { - return nil, err - } - - out := make(chan cid.Cid) - go func() { - defer close(out) - for a != nil || b != nil { - select { - case val, ok := <-a: - if !ok { - a = nil - } else { - select { - case out <- val: - case <-ctx.Done(): - return - } - } - case val, ok := <-b: - if !ok { - b = nil - } else { - select { - case out <- val: - case <-ctx.Done(): - return - } - } - } - } - }() - - return out, nil -} - -func (bs *BufferedBS) DeleteBlock(c cid.Cid) error { - if err := bs.read.DeleteBlock(c); err != nil { - return err - } - - return bs.write.DeleteBlock(c) -} - -func (bs *BufferedBS) Get(c cid.Cid) (block.Block, error) { - if out, err := bs.read.Get(c); err != nil { - if err != bstore.ErrNotFound { - return nil, err - } - } else { - return out, nil - } - - return bs.write.Get(c) -} - -func (bs *BufferedBS) GetSize(c cid.Cid) (int, error) { - s, err := bs.read.GetSize(c) - if err == bstore.ErrNotFound || s == 0 { - return bs.write.GetSize(c) - } - - return s, err -} - -func (bs *BufferedBS) Put(blk block.Block) error { - has, err := bs.read.Has(blk.Cid()) - if err != nil { - return err - } - - if has { - return nil - } - - return bs.write.Put(blk) -} - -func (bs *BufferedBS) Has(c cid.Cid) (bool, error) { - has, err := bs.read.Has(c) - if err != nil { - return false, err - } - if has { - return true, nil - } - - return bs.write.Has(c) -} - -func (bs *BufferedBS) HashOnRead(hor bool) { - bs.read.HashOnRead(hor) - bs.write.HashOnRead(hor) -} - -func (bs *BufferedBS) PutMany(blks []block.Block) error { - return bs.write.PutMany(blks) -} - -func (bs *BufferedBS) Read() bstore.Blockstore { - return bs.read -} diff --git a/lib/cachebs/cachebs.go b/lib/cachebs/cachebs.go deleted file mode 100644 index 046f100c011..00000000000 --- a/lib/cachebs/cachebs.go +++ /dev/null @@ -1,89 +0,0 @@ -package cachebs - -import ( - "context" - - lru "github.com/hashicorp/golang-lru" - block "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - - bstore "github.com/filecoin-project/lotus/lib/blockstore" -) - -//nolint:deadcode,varcheck -var log = logging.Logger("cachebs") - -type CacheBS struct { - cache *lru.ARCCache - bs bstore.Blockstore -} - -func NewBufferedBstore(base bstore.Blockstore, size int) bstore.Blockstore { - c, err := lru.NewARC(size) - if err != nil { - panic(err) - } - // Wrap this in an ID blockstore to avoid caching blocks inlined into - // CIDs. - return bstore.WrapIDStore(&CacheBS{ - cache: c, - bs: base, - }) -} - -var _ (bstore.Blockstore) = &CacheBS{} - -func (bs *CacheBS) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { - return bs.bs.AllKeysChan(ctx) -} - -func (bs *CacheBS) DeleteBlock(c cid.Cid) error { - bs.cache.Remove(c) - - return bs.bs.DeleteBlock(c) -} - -func (bs *CacheBS) Get(c cid.Cid) (block.Block, error) { - v, ok := bs.cache.Get(c) - if ok { - return v.(block.Block), nil - } - - out, err := bs.bs.Get(c) - if err != nil { - return nil, err - } - - bs.cache.Add(c, out) - return out, nil -} - -func (bs *CacheBS) GetSize(c cid.Cid) (int, error) { - return bs.bs.GetSize(c) -} - -func (bs *CacheBS) Put(blk block.Block) error { - bs.cache.Add(blk.Cid(), blk) - - return bs.bs.Put(blk) -} - -func (bs *CacheBS) Has(c cid.Cid) (bool, error) { - if bs.cache.Contains(c) { - return true, nil - } - - return bs.bs.Has(c) -} - -func (bs *CacheBS) HashOnRead(hor bool) { - bs.bs.HashOnRead(hor) -} - -func (bs *CacheBS) PutMany(blks []block.Block) error { - for _, blk := range blks { - bs.cache.Add(blk.Cid(), blk) - } - return bs.bs.PutMany(blks) -} diff --git a/lib/commp/writer.go b/lib/commp/writer.go deleted file mode 100644 index 4c5e3350c84..00000000000 --- a/lib/commp/writer.go +++ /dev/null @@ -1,113 +0,0 @@ -package commp - -import ( - "bytes" - "math/bits" - - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" - - ffi "github.com/filecoin-project/filecoin-ffi" - "github.com/filecoin-project/go-padreader" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" - "github.com/filecoin-project/lotus/extern/sector-storage/zerocomm" -) - -const commPBufPad = abi.PaddedPieceSize(8 << 20) -const CommPBuf = abi.UnpaddedPieceSize(commPBufPad - (commPBufPad / 128)) // can't use .Unpadded() for const - -type Writer struct { - len int64 - buf [CommPBuf]byte - leaves []cid.Cid -} - -func (w *Writer) Write(p []byte) (int, error) { - n := len(p) - for len(p) > 0 { - buffered := int(w.len % int64(len(w.buf))) - toBuffer := len(w.buf) - buffered - if toBuffer > len(p) { - toBuffer = len(p) - } - - copied := copy(w.buf[buffered:], p[:toBuffer]) - p = p[copied:] - w.len += int64(copied) - - if copied > 0 && w.len%int64(len(w.buf)) == 0 { - leaf, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg32GiBV1, bytes.NewReader(w.buf[:]), CommPBuf) - if err != nil { - return 0, err - } - w.leaves = append(w.leaves, leaf) - } - } - return n, nil -} - -func (w *Writer) Sum() (api.DataCIDSize, error) { - // process last non-zero leaf if exists - lastLen := w.len % int64(len(w.buf)) - rawLen := w.len - - // process remaining bit of data - if lastLen != 0 { - if len(w.leaves) != 0 { - copy(w.buf[lastLen:], make([]byte, int(int64(CommPBuf)-lastLen))) - lastLen = int64(CommPBuf) - } - - r, sz := padreader.New(bytes.NewReader(w.buf[:lastLen]), uint64(lastLen)) - p, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg32GiBV1, r, sz) - if err != nil { - return api.DataCIDSize{}, err - } - - if sz < CommPBuf { // special case for pieces smaller than 16MiB - return api.DataCIDSize{ - PayloadSize: w.len, - PieceSize: sz.Padded(), - PieceCID: p, - }, nil - } - - w.leaves = append(w.leaves, p) - } - - // pad with zero pieces to power-of-two size - fillerLeaves := (1 << (bits.Len(uint(len(w.leaves) - 1)))) - len(w.leaves) - for i := 0; i < fillerLeaves; i++ { - w.leaves = append(w.leaves, zerocomm.ZeroPieceCommitment(CommPBuf)) - } - - if len(w.leaves) == 1 { - return api.DataCIDSize{ - PayloadSize: rawLen, - PieceSize: abi.PaddedPieceSize(len(w.leaves)) * commPBufPad, - PieceCID: w.leaves[0], - }, nil - } - - pieces := make([]abi.PieceInfo, len(w.leaves)) - for i, leaf := range w.leaves { - pieces[i] = abi.PieceInfo{ - Size: commPBufPad, - PieceCID: leaf, - } - } - - p, err := ffi.GenerateUnsealedCID(abi.RegisteredSealProof_StackedDrg32GiBV1, pieces) - if err != nil { - return api.DataCIDSize{}, xerrors.Errorf("generating unsealed CID: %w", err) - } - - return api.DataCIDSize{ - PayloadSize: rawLen, - PieceSize: abi.PaddedPieceSize(len(w.leaves)) * commPBufPad, - PieceCID: p, - }, nil -} diff --git a/lib/commp/writer_test.go b/lib/commp/writer_test.go deleted file mode 100644 index 284648e4e05..00000000000 --- a/lib/commp/writer_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package commp - -import ( - "bytes" - "crypto/rand" - "fmt" - "io" - "io/ioutil" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-padreader" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" - "github.com/filecoin-project/lotus/extern/sector-storage/zerocomm" -) - -func TestWriterZero(t *testing.T) { - for i, s := range []struct { - writes []int - expect abi.PaddedPieceSize - }{ - {writes: []int{200}, expect: 256}, - {writes: []int{200, 200}, expect: 512}, - - {writes: []int{int(CommPBuf)}, expect: commPBufPad}, - {writes: []int{int(CommPBuf) * 2}, expect: 2 * commPBufPad}, - {writes: []int{int(CommPBuf), int(CommPBuf), int(CommPBuf)}, expect: 4 * commPBufPad}, - {writes: []int{int(CommPBuf), int(CommPBuf), int(CommPBuf), int(CommPBuf), int(CommPBuf), int(CommPBuf), int(CommPBuf), int(CommPBuf), int(CommPBuf)}, expect: 16 * commPBufPad}, - - {writes: []int{200, int(CommPBuf)}, expect: 2 * commPBufPad}, - } { - s := s - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { - w := &Writer{} - var rawSum int64 - for _, write := range s.writes { - rawSum += int64(write) - _, err := w.Write(make([]byte, write)) - require.NoError(t, err) - } - - p, err := w.Sum() - require.NoError(t, err) - require.Equal(t, rawSum, p.PayloadSize) - require.Equal(t, s.expect, p.PieceSize) - require.Equal(t, zerocomm.ZeroPieceCommitment(s.expect.Unpadded()).String(), p.PieceCID.String()) - }) - } -} - -func TestWriterData(t *testing.T) { - dataLen := float64(CommPBuf) * 6.78 - data, _ := ioutil.ReadAll(io.LimitReader(rand.Reader, int64(dataLen))) - - pr, sz := padreader.New(bytes.NewReader(data), uint64(dataLen)) - exp, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg32GiBV1, pr, sz) - require.NoError(t, err) - - w := &Writer{} - _, err = io.Copy(w, bytes.NewReader(data)) - require.NoError(t, err) - - res, err := w.Sum() - require.NoError(t, err) - - require.Equal(t, exp.String(), res.PieceCID.String()) -} - -func BenchmarkWriterZero(b *testing.B) { - buf := make([]byte, int(CommPBuf)*b.N) - b.SetBytes(int64(CommPBuf)) - b.ResetTimer() - - w := &Writer{} - - _, err := w.Write(buf) - require.NoError(b, err) - o, err := w.Sum() - - b.StopTimer() - - require.NoError(b, err) - require.Equal(b, zerocomm.ZeroPieceCommitment(o.PieceSize.Unpadded()).String(), o.PieceCID.String()) - require.Equal(b, int64(CommPBuf)*int64(b.N), o.PayloadSize) -} diff --git a/lib/peermgr/peermgr.go b/lib/peermgr/peermgr.go index 2f9d3467499..ee158cc040a 100644 --- a/lib/peermgr/peermgr.go +++ b/lib/peermgr/peermgr.go @@ -53,16 +53,24 @@ type PeerMgr struct { h host.Host dht *dht.IpfsDHT - notifee *net.NotifyBundle - filPeerEmitter event.Emitter + notifee *net.NotifyBundle + emitter event.Emitter done chan struct{} } -type NewFilPeer struct { - Id peer.ID +type FilPeerEvt struct { + Type FilPeerEvtType + ID peer.ID } +type FilPeerEvtType int + +const ( + AddFilPeerEvt FilPeerEvtType = iota + RemoveFilPeerEvt +) + func NewPeerMgr(lc fx.Lifecycle, h host.Host, dht *dht.IpfsDHT, bootstrap dtypes.BootstrapPeers) (*PeerMgr, error) { pm := &PeerMgr{ h: h, @@ -77,16 +85,16 @@ func NewPeerMgr(lc fx.Lifecycle, h host.Host, dht *dht.IpfsDHT, bootstrap dtypes done: make(chan struct{}), } - emitter, err := h.EventBus().Emitter(new(NewFilPeer)) + emitter, err := h.EventBus().Emitter(new(FilPeerEvt)) if err != nil { - return nil, xerrors.Errorf("creating NewFilPeer emitter: %w", err) + return nil, xerrors.Errorf("creating FilPeerEvt emitter: %w", err) } - pm.filPeerEmitter = emitter + pm.emitter = emitter lc.Append(fx.Hook{ OnStop: func(ctx context.Context) error { return multierr.Combine( - pm.filPeerEmitter.Close(), + pm.emitter.Close(), pm.Stop(ctx), ) }, @@ -104,7 +112,7 @@ func NewPeerMgr(lc fx.Lifecycle, h host.Host, dht *dht.IpfsDHT, bootstrap dtypes } func (pmgr *PeerMgr) AddFilecoinPeer(p peer.ID) { - _ = pmgr.filPeerEmitter.Emit(NewFilPeer{Id: p}) //nolint:errcheck + _ = pmgr.emitter.Emit(FilPeerEvt{Type: AddFilPeerEvt, ID: p}) //nolint:errcheck pmgr.peersLk.Lock() defer pmgr.peersLk.Unlock() pmgr.peers[p] = time.Duration(0) @@ -127,10 +135,19 @@ func (pmgr *PeerMgr) SetPeerLatency(p peer.ID, latency time.Duration) { } func (pmgr *PeerMgr) Disconnect(p peer.ID) { + disconnected := false + if pmgr.h.Network().Connectedness(p) == net.NotConnected { pmgr.peersLk.Lock() - defer pmgr.peersLk.Unlock() - delete(pmgr.peers, p) + _, disconnected = pmgr.peers[p] + if disconnected { + delete(pmgr.peers, p) + } + pmgr.peersLk.Unlock() + } + + if disconnected { + _ = pmgr.emitter.Emit(FilPeerEvt{Type: RemoveFilPeerEvt, ID: p}) //nolint:errcheck } } @@ -191,11 +208,17 @@ func (pmgr *PeerMgr) doExpand(ctx context.Context) { } log.Info("connecting to bootstrap peers") + wg := sync.WaitGroup{} for _, bsp := range pmgr.bootstrappers { - if err := pmgr.h.Connect(ctx, bsp); err != nil { - log.Warnf("failed to connect to bootstrap peer: %s", err) - } + wg.Add(1) + go func(bsp peer.AddrInfo) { + defer wg.Done() + if err := pmgr.h.Connect(ctx, bsp); err != nil { + log.Warnf("failed to connect to bootstrap peer: %s", err) + } + }(bsp) } + wg.Wait() return } diff --git a/lib/rpcenc/reader.go b/lib/rpcenc/reader.go index 617c6495eec..23944af6cd7 100644 --- a/lib/rpcenc/reader.go +++ b/lib/rpcenc/reader.go @@ -78,27 +78,38 @@ func ReaderParamEncoder(addr string) jsonrpc.Option { }) } -type waitReadCloser struct { +// watchReadCloser watches the ReadCloser and closes the watch channel when +// either: (1) the ReaderCloser fails on Read (including with a benign error +// like EOF), or (2) when Close is called. +// +// Use it be notified of terminal states, in situations where a Read failure (or +// EOF) is considered a terminal state too (besides Close). +type watchReadCloser struct { io.ReadCloser - wait chan struct{} + watch chan struct{} + closeOnce sync.Once } -func (w *waitReadCloser) Read(p []byte) (int, error) { +func (w *watchReadCloser) Read(p []byte) (int, error) { n, err := w.ReadCloser.Read(p) if err != nil { - close(w.wait) + w.closeOnce.Do(func() { + close(w.watch) + }) } return n, err } -func (w *waitReadCloser) Close() error { - close(w.wait) +func (w *watchReadCloser) Close() error { + w.closeOnce.Do(func() { + close(w.watch) + }) return w.ReadCloser.Close() } func ReaderParamDecoder() (http.HandlerFunc, jsonrpc.ServerOption) { var readersLk sync.Mutex - readers := map[uuid.UUID]chan *waitReadCloser{} + readers := map[uuid.UUID]chan *watchReadCloser{} hnd := func(resp http.ResponseWriter, req *http.Request) { strId := path.Base(req.URL.Path) @@ -111,14 +122,14 @@ func ReaderParamDecoder() (http.HandlerFunc, jsonrpc.ServerOption) { readersLk.Lock() ch, found := readers[u] if !found { - ch = make(chan *waitReadCloser) + ch = make(chan *watchReadCloser) readers[u] = ch } readersLk.Unlock() - wr := &waitReadCloser{ + wr := &watchReadCloser{ ReadCloser: req.Body, - wait: make(chan struct{}), + watch: make(chan struct{}), } tctx, cancel := context.WithTimeout(req.Context(), Timeout) @@ -128,15 +139,17 @@ func ReaderParamDecoder() (http.HandlerFunc, jsonrpc.ServerOption) { case ch <- wr: case <-tctx.Done(): close(ch) - log.Error("context error in reader stream handler (1): %v", tctx.Err()) + log.Errorf("context error in reader stream handler (1): %v", tctx.Err()) resp.WriteHeader(500) return } select { - case <-wr.wait: + case <-wr.watch: + // TODO should we check if we failed the Read, and if so + // return an HTTP 500? i.e. turn watch into a chan error? case <-req.Context().Done(): - log.Error("context error in reader stream handler (2): %v", req.Context().Err()) + log.Errorf("context error in reader stream handler (2): %v", req.Context().Err()) resp.WriteHeader(500) return } @@ -167,7 +180,7 @@ func ReaderParamDecoder() (http.HandlerFunc, jsonrpc.ServerOption) { readersLk.Lock() ch, found := readers[u] if !found { - ch = make(chan *waitReadCloser) + ch = make(chan *watchReadCloser) readers[u] = ch } readersLk.Unlock() diff --git a/lib/sigs/bls/bls_test.go b/lib/sigs/bls/bls_test.go new file mode 100644 index 00000000000..4508d0eb9b2 --- /dev/null +++ b/lib/sigs/bls/bls_test.go @@ -0,0 +1,77 @@ +package bls_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/wallet" + "github.com/filecoin-project/lotus/lib/sigs" + _ "github.com/filecoin-project/lotus/lib/sigs/bls" +) + +func TestRoundtrip(t *testing.T) { + pk, err := sigs.Generate(crypto.SigTypeBLS) + require.NoError(t, err) + + ki := types.KeyInfo{ + Type: types.KTBLS, + PrivateKey: pk, + } + k, err := wallet.NewKey(ki) + require.NoError(t, err) + + p := []byte("potato") + + si, err := sigs.Sign(crypto.SigTypeBLS, pk, p) + require.NoError(t, err) + + err = sigs.Verify(si, k.Address, p) + require.NoError(t, err) +} + +func TestUncompressedFails(t *testing.T) { + // compressed + err := sigs.Verify(&crypto.Signature{ + Type: crypto.SigTypeBLS, + Data: []byte{0x99, 0x27, 0x44, 0x4b, 0xfc, 0xff, 0xdc, 0xa3, 0x4a, 0xf5, 0x7b, 0x78, 0x75, 0x7b, 0x9b, 0x90, 0xf1, 0xcd, 0x28, 0xd2, 0xa3, 0xae, 0xed, 0x2a, 0xa6, 0xbd, 0xe2, 0x99, 0xf8, 0xbb, 0xb9, 0x18, 0x47, 0x56, 0xf2, 0x28, 0x7b, 0x5, 0x88, 0xe6, 0xd3, 0xf2, 0x86, 0xd, 0x2b, 0xb2, 0x6, 0x6e, 0xc, 0x59, 0x77, 0x8c, 0x1e, 0x64, 0x4f, 0xb2, 0xcf, 0xb3, 0x5f, 0xba, 0x8f, 0x9, 0xfa, 0x82, 0x4a, 0x9e, 0xd8, 0x25, 0x10, 0x8c, 0x82, 0xff, 0x4b, 0xf6, 0x34, 0xc1, 0x3, 0x7e, 0xea, 0xf1, 0x85, 0xf4, 0x56, 0x73, 0xd4, 0xa1, 0xc1, 0xc6, 0xee, 0xb7, 0x12, 0xb7, 0xd7, 0x2a, 0x54, 0x98}, + }, mustAddr("f3tcgq5scpfhdwh4dbalwktzf6mbv3ng2nw7tyzni5cyrsgvineid6jybnweecpa6misa6lk4tvwtxj2gkwpzq"), []byte{0x70, 0x6f, 0x74, 0x61, 0x74, 0x6f}) + require.NoError(t, err) + + // compressed byte changed + err = sigs.Verify(&crypto.Signature{ + Type: crypto.SigTypeBLS, + Data: []byte{0x99, 0x27, 0x44, 0x4b, 0xfc, 0xff, 0xdc, 0xa3, 0x4a, 0xf5, 0x7b, 0x78, 0x75, 0x7b, 0x9b, 0x90, 0xf1, 0xcd, 0x28, 0xd2, 0xa3, 0xae, 0xed, 0x2a, 0xa6, 0xbd, 0xe2, 0x99, 0xf8, 0xbb, 0xb9, 0x18, 0x47, 0x56, 0xf2, 0x28, 0x7b, 0x5, 0x88, 0xf6, 0xd3, 0xf2, 0x86, 0xd, 0x2b, 0xb2, 0x6, 0x6e, 0xc, 0x59, 0x77, 0x8c, 0x1e, 0x64, 0x4f, 0xb2, 0xcf, 0xb3, 0x5f, 0xba, 0x8f, 0x9, 0xfa, 0x82, 0x4a, 0x9e, 0xd8, 0x25, 0x10, 0x8c, 0x82, 0xff, 0x4b, 0xf6, 0x34, 0xc1, 0x3, 0x7e, 0xea, 0xf1, 0x85, 0xf4, 0x56, 0x73, 0xd4, 0xa1, 0xc1, 0xc6, 0xee, 0xb7, 0x12, 0xb7, 0xd7, 0x2a, 0x54, 0x98}, + }, mustAddr("f3tcgq5scpfhdwh4dbalwktzf6mbv3ng2nw7tyzni5cyrsgvineid6jybnweecpa6misa6lk4tvwtxj2gkwpzq"), []byte{0x70, 0x6f, 0x74, 0x61, 0x74, 0x6f}) + require.Error(t, err) + + // compressed prefix + err = sigs.Verify(&crypto.Signature{ + Type: crypto.SigTypeBLS, + Data: []byte{0x99, 0x27, 0x44, 0x4b, 0xfc, 0xff, 0xdc, 0xa3, 0x4a, 0xf5, 0x7b, 0x78, 0x75, 0x7b, 0x9b, 0x90, 0xf1, 0xcd, 0x28, 0xd2, 0xa3, 0xae, 0xed, 0x2a, 0xa6, 0xbd, 0xe2, 0x99, 0xf8, 0xbb, 0xb9, 0x18, 0x47, 0x56, 0xf2, 0x28, 0x7b, 0x5, 0x88, 0xe6, 0xd3, 0xf2, 0x86, 0xd, 0x2b, 0xb2, 0x6, 0x6e, 0xc, 0x59, 0x77, 0x8c, 0x1e, 0x64, 0x4f, 0xb2, 0xcf, 0xb3, 0x5f, 0xba, 0x8f, 0x9, 0xfa, 0x82, 0x4a, 0x9e, 0xd8, 0x25, 0x10, 0x8c, 0x82, 0xff, 0x4b, 0xf6, 0x34, 0xc1, 0x3, 0x7e, 0xea, 0xf1, 0x85, 0xf4, 0x56, 0x73, 0xd4, 0xa1, 0xc1, 0xc6, 0xee, 0xb7, 0x12, 0xb7, 0xd7, 0x2a, 0x54, 0x98, 0x55}, + }, mustAddr("f3tcgq5scpfhdwh4dbalwktzf6mbv3ng2nw7tyzni5cyrsgvineid6jybnweecpa6misa6lk4tvwtxj2gkwpzq"), []byte{0x70, 0x6f, 0x74, 0x61, 0x74, 0x6f}) + require.Error(t, err) + + // uncompressed + err = sigs.Verify(&crypto.Signature{ + Type: crypto.SigTypeBLS, + Data: []byte{0x19, 0x27, 0x44, 0x4b, 0xfc, 0xff, 0xdc, 0xa3, 0x4a, 0xf5, 0x7b, 0x78, 0x75, 0x7b, 0x9b, 0x90, 0xf1, 0xcd, 0x28, 0xd2, 0xa3, 0xae, 0xed, 0x2a, 0xa6, 0xbd, 0xe2, 0x99, 0xf8, 0xbb, 0xb9, 0x18, 0x47, 0x56, 0xf2, 0x28, 0x7b, 0x5, 0x88, 0xe6, 0xd3, 0xf2, 0x86, 0xd, 0x2b, 0xb2, 0x6, 0x6e, 0xc, 0x59, 0x77, 0x8c, 0x1e, 0x64, 0x4f, 0xb2, 0xcf, 0xb3, 0x5f, 0xba, 0x8f, 0x9, 0xfa, 0x82, 0x4a, 0x9e, 0xd8, 0x25, 0x10, 0x8c, 0x82, 0xff, 0x4b, 0xf6, 0x34, 0xc1, 0x3, 0x7e, 0xea, 0xf1, 0x85, 0xf4, 0x56, 0x73, 0xd4, 0xa1, 0xc1, 0xc6, 0xee, 0xb7, 0x12, 0xb7, 0xd7, 0x2a, 0x54, 0x98, 0x8, 0x94, 0x23, 0x78, 0xdb, 0xce, 0x2a, 0xd7, 0x2e, 0x87, 0xdf, 0x8, 0x3b, 0x66, 0xc6, 0x31, 0xc1, 0x8c, 0x58, 0x2f, 0x9f, 0x9e, 0x10, 0x4d, 0x2a, 0x7e, 0x13, 0xe7, 0x9c, 0xbb, 0x22, 0xde, 0xcc, 0xf6, 0x77, 0x77, 0xb0, 0x9c, 0x25, 0x5d, 0x5d, 0xe6, 0x88, 0x9, 0x8c, 0x63, 0x35, 0xd4, 0xa, 0x85, 0x76, 0x8d, 0xb7, 0x66, 0xa6, 0xc6, 0xec, 0xe6, 0xde, 0x2a, 0x9f, 0x34, 0x87, 0x28, 0x1a, 0x48, 0xfe, 0xca, 0xb1, 0x47, 0x2, 0xf6, 0x51, 0x26, 0x52, 0x70, 0x9d, 0x7e, 0xdb, 0x7e, 0x8b, 0xc9, 0xf6, 0x41, 0xaa, 0xa8, 0x3b, 0x7e, 0x8a, 0xfd, 0x7a, 0xe4, 0x79, 0xe6, 0x59, 0xe4}, + }, mustAddr("f3tcgq5scpfhdwh4dbalwktzf6mbv3ng2nw7tyzni5cyrsgvineid6jybnweecpa6misa6lk4tvwtxj2gkwpzq"), []byte{0x70, 0x6f, 0x74, 0x61, 0x74, 0x6f}) + require.Error(t, err) + + // uncompressed one byte change + err = sigs.Verify(&crypto.Signature{ + Type: crypto.SigTypeBLS, + Data: []byte{0x19, 0x27, 0x44, 0x4b, 0xfc, 0xff, 0xdc, 0xa3, 0x4a, 0xf5, 0x7b, 0x78, 0x75, 0x7b, 0x9b, 0x90, 0xf1, 0xcd, 0x28, 0xd2, 0xa3, 0xae, 0xed, 0x2a, 0xa6, 0xbd, 0xe2, 0x99, 0xf8, 0xbb, 0xb9, 0x18, 0x47, 0x56, 0xf2, 0x28, 0x7b, 0x5, 0x88, 0xe6, 0xd3, 0xf2, 0x86, 0xd, 0x2b, 0xb2, 0x6, 0x6e, 0xc, 0x59, 0x77, 0x8c, 0x1e, 0x64, 0x4f, 0xb2, 0xcf, 0xb3, 0x5f, 0xba, 0x8f, 0x9, 0xfa, 0x82, 0x4a, 0x9e, 0xd8, 0x25, 0x10, 0x8c, 0x82, 0xff, 0x4b, 0xf6, 0x34, 0xc1, 0x3, 0x7e, 0xea, 0xf1, 0x85, 0xf4, 0x56, 0x73, 0xd4, 0xa1, 0xc1, 0xc6, 0xee, 0xb7, 0x12, 0xb7, 0xd7, 0x2a, 0x54, 0x98, 0x8, 0x94, 0x23, 0x78, 0xdb, 0xce, 0x2a, 0xd7, 0x2e, 0x87, 0xdf, 0x8, 0x3b, 0x66, 0xc6, 0x31, 0xc1, 0x8c, 0x58, 0x2f, 0x9f, 0x9e, 0x10, 0x4d, 0x2a, 0x7e, 0x13, 0xe7, 0x9c, 0xbb, 0x22, 0xde, 0xcc, 0xf6, 0x77, 0x77, 0xb0, 0x9c, 0x25, 0x5d, 0x5d, 0xe6, 0x88, 0x9, 0x8c, 0x63, 0x35, 0xd4, 0xa, 0x85, 0x66, 0x8d, 0xb7, 0x66, 0xa6, 0xc6, 0xec, 0xe6, 0xde, 0x2a, 0x9f, 0x34, 0x87, 0x28, 0x1a, 0x48, 0xfe, 0xca, 0xb1, 0x47, 0x2, 0xf6, 0x51, 0x26, 0x52, 0x70, 0x9d, 0x7e, 0xdb, 0x7e, 0x8b, 0xc9, 0xf6, 0x41, 0xaa, 0xa8, 0x3b, 0x7e, 0x8a, 0xfd, 0x7a, 0xe4, 0x79, 0xe6, 0x59, 0xe4}, + }, mustAddr("f3tcgq5scpfhdwh4dbalwktzf6mbv3ng2nw7tyzni5cyrsgvineid6jybnweecpa6misa6lk4tvwtxj2gkwpzq"), []byte{0x70, 0x6f, 0x74, 0x61, 0x74, 0x6f}) + require.Error(t, err) +} + +func mustAddr(a string) address.Address { + ad, _ := address.NewFromString(a) + return ad +} diff --git a/lib/sigs/bls/init.go b/lib/sigs/bls/init.go index 42633eee88a..9bc69c3a460 100644 --- a/lib/sigs/bls/init.go +++ b/lib/sigs/bls/init.go @@ -7,17 +7,17 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/crypto" - blst "github.com/supranational/blst/bindings/go" + ffi "github.com/filecoin-project/filecoin-ffi" "github.com/filecoin-project/lotus/lib/sigs" ) const DST = string("BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_NUL_") -type SecretKey = blst.SecretKey -type PublicKey = blst.P1Affine -type Signature = blst.P2Affine -type AggregateSignature = blst.P2Aggregate +type SecretKey = ffi.PrivateKey +type PublicKey = ffi.PublicKey +type Signature = ffi.Signature +type AggregateSignature = ffi.Signature type blsSigner struct{} @@ -29,30 +29,55 @@ func (blsSigner) GenPrivate() ([]byte, error) { return nil, fmt.Errorf("bls signature error generating random data") } // Note private keys seem to be serialized little-endian! - pk := blst.KeyGen(ikm[:]).ToLEndian() - return pk, nil + sk := ffi.PrivateKeyGenerateWithSeed(ikm) + return sk[:], nil } func (blsSigner) ToPublic(priv []byte) ([]byte, error) { - pk := new(SecretKey).FromLEndian(priv) - if pk == nil || !pk.Valid() { + if priv == nil || len(priv) != ffi.PrivateKeyBytes { return nil, fmt.Errorf("bls signature invalid private key") } - return new(PublicKey).From(pk).Compress(), nil + + sk := new(SecretKey) + copy(sk[:], priv[:ffi.PrivateKeyBytes]) + + pubkey := ffi.PrivateKeyPublicKey(*sk) + + return pubkey[:], nil } func (blsSigner) Sign(p []byte, msg []byte) ([]byte, error) { - pk := new(SecretKey).FromLEndian(p) - if pk == nil || !pk.Valid() { + if p == nil || len(p) != ffi.PrivateKeyBytes { return nil, fmt.Errorf("bls signature invalid private key") } - return new(Signature).Sign(pk, msg, []byte(DST)).Compress(), nil + + sk := new(SecretKey) + copy(sk[:], p[:ffi.PrivateKeyBytes]) + + sig := ffi.PrivateKeySign(*sk, msg) + + return sig[:], nil } func (blsSigner) Verify(sig []byte, a address.Address, msg []byte) error { - if !new(Signature).VerifyCompressed(sig, a.Payload()[:], msg, []byte(DST)) { + payload := a.Payload() + if sig == nil || len(sig) != ffi.SignatureBytes || len(payload) != ffi.PublicKeyBytes { return fmt.Errorf("bls signature failed to verify") } + + pk := new(PublicKey) + copy(pk[:], payload[:ffi.PublicKeyBytes]) + + sigS := new(Signature) + copy(sigS[:], sig[:ffi.SignatureBytes]) + + msgs := [1]ffi.Message{msg} + pks := [1]PublicKey{*pk} + + if !ffi.HashVerify(sigS, msgs[:], pks[:]) { + return fmt.Errorf("bls signature failed to verify") + } + return nil } diff --git a/lib/stati/covar.go b/lib/stati/covar.go new file mode 100644 index 00000000000..c92fd8b7484 --- /dev/null +++ b/lib/stati/covar.go @@ -0,0 +1,104 @@ +package stati + +import "math" + +type Covar struct { + meanX float64 + meanY float64 + c float64 + n float64 + m2x float64 + m2y float64 +} + +func (cov1 *Covar) MeanX() float64 { + return cov1.meanX +} + +func (cov1 *Covar) MeanY() float64 { + return cov1.meanY +} + +func (cov1 *Covar) N() float64 { + return cov1.n +} + +func (cov1 *Covar) Covariance() float64 { + return cov1.c / (cov1.n - 1) +} + +func (cov1 *Covar) VarianceX() float64 { + return cov1.m2x / (cov1.n - 1) +} + +func (cov1 *Covar) StddevX() float64 { + return math.Sqrt(cov1.VarianceX()) +} + +func (cov1 *Covar) VarianceY() float64 { + return cov1.m2y / (cov1.n - 1) +} + +func (cov1 *Covar) StddevY() float64 { + return math.Sqrt(cov1.VarianceY()) +} + +func (cov1 *Covar) AddPoint(x, y float64) { + cov1.n++ + + dx := x - cov1.meanX + cov1.meanX += dx / cov1.n + dx2 := x - cov1.meanX + cov1.m2x += dx * dx2 + + dy := y - cov1.meanY + cov1.meanY += dy / cov1.n + dy2 := y - cov1.meanY + cov1.m2y += dy * dy2 + + cov1.c += dx * dy +} + +func (cov1 *Covar) Combine(cov2 *Covar) { + if cov1.n == 0 { + *cov1 = *cov2 + return + } + if cov2.n == 0 { + return + } + + if cov1.n == 1 { + cpy := *cov2 + cpy.AddPoint(cov2.meanX, cov2.meanY) + *cov1 = cpy + return + } + if cov2.n == 1 { + cov1.AddPoint(cov2.meanX, cov2.meanY) + } + + out := Covar{} + out.n = cov1.n + cov2.n + + dx := cov1.meanX - cov2.meanX + out.meanX = cov1.meanX - dx*cov2.n/out.n + out.m2x = cov1.m2x + cov2.m2x + dx*dx*cov1.n*cov2.n/out.n + + dy := cov1.meanY - cov2.meanY + out.meanY = cov1.meanY - dy*cov2.n/out.n + out.m2y = cov1.m2y + cov2.m2y + dy*dy*cov1.n*cov2.n/out.n + + out.c = cov1.c + cov2.c + dx*dy*cov1.n*cov2.n/out.n + *cov1 = out +} + +func (cov1 *Covar) A() float64 { + return cov1.Covariance() / cov1.VarianceX() +} +func (cov1 *Covar) B() float64 { + return cov1.meanY - cov1.meanX*cov1.A() +} +func (cov1 *Covar) Correl() float64 { + return cov1.Covariance() / cov1.StddevX() / cov1.StddevY() +} diff --git a/lib/stati/histo.go b/lib/stati/histo.go new file mode 100644 index 00000000000..3c410c0d026 --- /dev/null +++ b/lib/stati/histo.go @@ -0,0 +1,56 @@ +package stati + +import ( + "math" + + "golang.org/x/xerrors" +) + +type Histogram struct { + Buckets []float64 + Counts []uint64 +} + +// NewHistogram creates a histograme with buckets defined as: +// {x > -Inf, x >= buckets[0], x >= buckets[1], ..., x >= buckets[i]} +func NewHistogram(buckets []float64) (*Histogram, error) { + if len(buckets) == 0 { + return nil, xerrors.Errorf("empty buckets") + } + prev := buckets[0] + for i, v := range buckets[1:] { + if v < prev { + return nil, xerrors.Errorf("bucket at index %d is smaller than previous %f < %f", i+1, v, prev) + } + prev = v + } + h := &Histogram{ + Buckets: append([]float64{math.Inf(-1)}, buckets...), + Counts: make([]uint64, len(buckets)+1), + } + return h, nil +} + +func (h *Histogram) Observe(x float64) { + for i, b := range h.Buckets { + if x >= b { + h.Counts[i]++ + } else { + break + } + } +} + +func (h *Histogram) Total() uint64 { + return h.Counts[0] +} + +func (h *Histogram) Get(i int) uint64 { + if i >= len(h.Counts)-2 { + return h.Counts[i] + } + return h.Counts[i+1] - h.Counts[i+2] +} +func (h *Histogram) GetRatio(i int) float64 { + return float64(h.Get(i)) / float64(h.Total()) +} diff --git a/lib/stati/meanvar.go b/lib/stati/meanvar.go new file mode 100644 index 00000000000..b77aaa63867 --- /dev/null +++ b/lib/stati/meanvar.go @@ -0,0 +1,66 @@ +package stati + +import ( + "fmt" + "math" +) + +type MeanVar struct { + n float64 + mean float64 + m2 float64 +} + +func (v1 *MeanVar) AddPoint(value float64) { + // based on https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm + v1.n++ + delta := value - v1.mean + v1.mean += delta / v1.n + delta2 := value - v1.mean + v1.m2 += delta * delta2 +} + +func (v1 *MeanVar) Mean() float64 { + return v1.mean +} +func (v1 *MeanVar) N() float64 { + return v1.n +} +func (v1 *MeanVar) Variance() float64 { + return v1.m2 / (v1.n - 1) +} +func (v1 *MeanVar) Stddev() float64 { + return math.Sqrt(v1.Variance()) +} + +func (v1 MeanVar) String() string { + return fmt.Sprintf("%f stddev: %f (%.0f)", v1.Mean(), v1.Stddev(), v1.N()) +} + +func (v1 *MeanVar) Combine(v2 *MeanVar) { + if v1.n == 0 { + *v1 = *v2 + return + } + if v2.n == 0 { + return + } + if v1.n == 1 { + cpy := *v2 + cpy.AddPoint(v1.mean) + *v1 = cpy + return + } + if v2.n == 1 { + v1.AddPoint(v2.mean) + return + } + + newCount := v1.n + v2.n + delta := v2.mean - v1.mean + meanDelta := delta * v2.n / newCount + m2 := v1.m2 + v2.m2 + delta*meanDelta*v1.n + v1.n = newCount + v1.mean += meanDelta + v1.m2 = m2 +} diff --git a/lib/stati/stats_test.go b/lib/stati/stats_test.go new file mode 100644 index 00000000000..fa92913b669 --- /dev/null +++ b/lib/stati/stats_test.go @@ -0,0 +1,47 @@ +package stati + +import ( + "math/rand" + "testing" +) + +func TestMeanVar(t *testing.T) { + N := 16 + ss := make([]*MeanVar, N) + rng := rand.New(rand.NewSource(1)) + for i := 0; i < N; i++ { + ss[i] = &MeanVar{} + maxJ := rng.Intn(1000) + for j := 0; j < maxJ; j++ { + ss[i].AddPoint(rng.NormFloat64()*5 + 500) + } + t.Logf("mean: %f, stddev: %f, count %f", ss[i].mean, ss[i].Stddev(), ss[i].n) + } + out := &MeanVar{} + for i := 0; i < N; i++ { + out.Combine(ss[i]) + t.Logf("combine: mean: %f, stddev: %f", out.mean, out.Stddev()) + } +} + +func TestCovar(t *testing.T) { + N := 16 + ss := make([]*Covar, N) + rng := rand.New(rand.NewSource(1)) + for i := 0; i < N; i++ { + ss[i] = &Covar{} + maxJ := rng.Intn(1000) + 500 + for j := 0; j < maxJ; j++ { + x := rng.NormFloat64()*5 + 500 + ss[i].AddPoint(x, x*2-1000) + } + t.Logf("corell: %f, y = %f*x+%f @%.0f", ss[i].Correl(), ss[i].A(), ss[i].B(), ss[i].n) + t.Logf("\txVar: %f yVar: %f covar: %f", ss[i].StddevX(), ss[i].StddevY(), ss[i].Covariance()) + } + out := &Covar{} + for i := 0; i < N; i++ { + out.Combine(ss[i]) + t.Logf("combine: corell: %f, y = %f*x+%f", out.Correl(), out.A(), out.B()) + t.Logf("\txVar: %f yVar: %f covar: %f", out.StddevX(), out.StddevY(), out.Covariance()) + } +} diff --git a/lib/tracing/setup.go b/lib/tracing/setup.go index 141683b393a..b8c0399ad9d 100644 --- a/lib/tracing/setup.go +++ b/lib/tracing/setup.go @@ -2,6 +2,7 @@ package tracing import ( "os" + "strings" "contrib.go.opencensus.io/exporter/jaeger" logging "github.com/ipfs/go-log/v2" @@ -10,19 +11,63 @@ import ( var log = logging.Logger("tracing") -func SetupJaegerTracing(serviceName string) *jaeger.Exporter { +const ( + // environment variable names + envCollectorEndpoint = "LOTUS_JAEGER_COLLECTOR_ENDPOINT" + envAgentEndpoint = "LOTUS_JAEGER_AGENT_ENDPOINT" + envAgentHost = "LOTUS_JAEGER_AGENT_HOST" + envAgentPort = "LOTUS_JAEGER_AGENT_PORT" + envJaegerUser = "LOTUS_JAEGER_USERNAME" + envJaegerCred = "LOTUS_JAEGER_PASSWORD" +) - if _, ok := os.LookupEnv("LOTUS_JAEGER"); !ok { - return nil +// When sending directly to the collector, agent options are ignored. +// The collector endpoint is an HTTP or HTTPs URL. +// The agent endpoint is a thrift/udp protocol and should be given +// as a string like "hostname:port". The agent can also be configured +// with separate host and port variables. +func jaegerOptsFromEnv(opts *jaeger.Options) bool { + var e string + var ok bool + if e, ok = os.LookupEnv(envJaegerUser); ok { + if p, ok := os.LookupEnv(envJaegerCred); ok { + opts.Username = e + opts.Password = p + } else { + log.Warn("jaeger username supplied with no password. authentication will not be used.") + } + } + if e, ok = os.LookupEnv(envCollectorEndpoint); ok { + opts.CollectorEndpoint = e + log.Infof("jaeger tracess will send to collector %s", e) + return true + } + if e, ok = os.LookupEnv(envAgentEndpoint); ok { + log.Infof("jaeger traces will be sent to agent %s", e) + opts.AgentEndpoint = e + return true + } + if e, ok = os.LookupEnv(envAgentHost); ok { + if p, ok := os.LookupEnv(envAgentPort); ok { + opts.AgentEndpoint = strings.Join([]string{e, p}, ":") + } else { + opts.AgentEndpoint = strings.Join([]string{e, "6831"}, ":") + } + log.Infof("jaeger traces will be sent to agent %s", opts.AgentEndpoint) + return true } - agentEndpointURI := os.Getenv("LOTUS_JAEGER") + return false +} - je, err := jaeger.NewExporter(jaeger.Options{ - AgentEndpoint: agentEndpointURI, - ServiceName: serviceName, - }) +func SetupJaegerTracing(serviceName string) *jaeger.Exporter { + opts := jaeger.Options{} + if !jaegerOptsFromEnv(&opts) { + return nil + } + opts.ServiceName = serviceName + je, err := jaeger.NewExporter(opts) if err != nil { - log.Errorw("Failed to create the Jaeger exporter", "error", err) + log.Errorw("failed to create the jaeger exporter", "error", err) return nil } diff --git a/lib/ulimit/ulimit.go b/lib/ulimit/ulimit.go index f9999cf6893..16bd4c9c148 100644 --- a/lib/ulimit/ulimit.go +++ b/lib/ulimit/ulimit.go @@ -8,7 +8,7 @@ import ( "strconv" "syscall" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" ) var log = logging.Logger("ulimit") diff --git a/lotuspond/front/src/chain/methods.json b/lotuspond/front/src/chain/methods.json index b271bfae545..5aced814a98 100644 --- a/lotuspond/front/src/chain/methods.json +++ b/lotuspond/front/src/chain/methods.json @@ -202,5 +202,319 @@ "AddVerifiedClient", "UseBytes", "RestoreBytes" + ], + "fil/3/account": [ + "Send", + "Constructor", + "PubkeyAddress" + ], + "fil/3/cron": [ + "Send", + "Constructor", + "EpochTick" + ], + "fil/3/init": [ + "Send", + "Constructor", + "Exec" + ], + "fil/3/multisig": [ + "Send", + "Constructor", + "Propose", + "Approve", + "Cancel", + "AddSigner", + "RemoveSigner", + "SwapSigner", + "ChangeNumApprovalsThreshold", + "LockBalance" + ], + "fil/3/paymentchannel": [ + "Send", + "Constructor", + "UpdateChannelState", + "Settle", + "Collect" + ], + "fil/3/reward": [ + "Send", + "Constructor", + "AwardBlockReward", + "ThisEpochReward", + "UpdateNetworkKPI" + ], + "fil/3/storagemarket": [ + "Send", + "Constructor", + "AddBalance", + "WithdrawBalance", + "PublishStorageDeals", + "VerifyDealsForActivation", + "ActivateDeals", + "OnMinerSectorsTerminate", + "ComputeDataCommitment", + "CronTick" + ], + "fil/3/storageminer": [ + "Send", + "Constructor", + "ControlAddresses", + "ChangeWorkerAddress", + "ChangePeerID", + "SubmitWindowedPoSt", + "PreCommitSector", + "ProveCommitSector", + "ExtendSectorExpiration", + "TerminateSectors", + "DeclareFaults", + "DeclareFaultsRecovered", + "OnDeferredCronEvent", + "CheckSectorProven", + "ApplyRewards", + "ReportConsensusFault", + "WithdrawBalance", + "ConfirmSectorProofsValid", + "ChangeMultiaddrs", + "CompactPartitions", + "CompactSectorNumbers", + "ConfirmUpdateWorkerKey", + "RepayDebt", + "ChangeOwnerAddress", + "DisputeWindowedPoSt" + ], + "fil/3/storagepower": [ + "Send", + "Constructor", + "CreateMiner", + "UpdateClaimedPower", + "EnrollCronEvent", + "OnEpochTickEnd", + "UpdatePledgeTotal", + "SubmitPoRepForBulkVerify", + "CurrentTotalPower" + ], + "fil/3/system": [ + "Send", + "Constructor" + ], + "fil/3/verifiedregistry": [ + "Send", + "Constructor", + "AddVerifier", + "RemoveVerifier", + "AddVerifiedClient", + "UseBytes", + "RestoreBytes" + ], + "fil/4/account": [ + "Send", + "Constructor", + "PubkeyAddress" + ], + "fil/4/cron": [ + "Send", + "Constructor", + "EpochTick" + ], + "fil/4/init": [ + "Send", + "Constructor", + "Exec" + ], + "fil/4/multisig": [ + "Send", + "Constructor", + "Propose", + "Approve", + "Cancel", + "AddSigner", + "RemoveSigner", + "SwapSigner", + "ChangeNumApprovalsThreshold", + "LockBalance" + ], + "fil/4/paymentchannel": [ + "Send", + "Constructor", + "UpdateChannelState", + "Settle", + "Collect" + ], + "fil/4/reward": [ + "Send", + "Constructor", + "AwardBlockReward", + "ThisEpochReward", + "UpdateNetworkKPI" + ], + "fil/4/storagemarket": [ + "Send", + "Constructor", + "AddBalance", + "WithdrawBalance", + "PublishStorageDeals", + "VerifyDealsForActivation", + "ActivateDeals", + "OnMinerSectorsTerminate", + "ComputeDataCommitment", + "CronTick" + ], + "fil/4/storageminer": [ + "Send", + "Constructor", + "ControlAddresses", + "ChangeWorkerAddress", + "ChangePeerID", + "SubmitWindowedPoSt", + "PreCommitSector", + "ProveCommitSector", + "ExtendSectorExpiration", + "TerminateSectors", + "DeclareFaults", + "DeclareFaultsRecovered", + "OnDeferredCronEvent", + "CheckSectorProven", + "ApplyRewards", + "ReportConsensusFault", + "WithdrawBalance", + "ConfirmSectorProofsValid", + "ChangeMultiaddrs", + "CompactPartitions", + "CompactSectorNumbers", + "ConfirmUpdateWorkerKey", + "RepayDebt", + "ChangeOwnerAddress", + "DisputeWindowedPoSt" + ], + "fil/4/storagepower": [ + "Send", + "Constructor", + "CreateMiner", + "UpdateClaimedPower", + "EnrollCronEvent", + "OnEpochTickEnd", + "UpdatePledgeTotal", + "SubmitPoRepForBulkVerify", + "CurrentTotalPower" + ], + "fil/4/system": [ + "Send", + "Constructor" + ], + "fil/4/verifiedregistry": [ + "Send", + "Constructor", + "AddVerifier", + "RemoveVerifier", + "AddVerifiedClient", + "UseBytes", + "RestoreBytes" + ], + "fil/5/account": [ + "Send", + "Constructor", + "PubkeyAddress" + ], + "fil/5/cron": [ + "Send", + "Constructor", + "EpochTick" + ], + "fil/5/init": [ + "Send", + "Constructor", + "Exec" + ], + "fil/5/multisig": [ + "Send", + "Constructor", + "Propose", + "Approve", + "Cancel", + "AddSigner", + "RemoveSigner", + "SwapSigner", + "ChangeNumApprovalsThreshold", + "LockBalance" + ], + "fil/5/paymentchannel": [ + "Send", + "Constructor", + "UpdateChannelState", + "Settle", + "Collect" + ], + "fil/5/reward": [ + "Send", + "Constructor", + "AwardBlockReward", + "ThisEpochReward", + "UpdateNetworkKPI" + ], + "fil/5/storagemarket": [ + "Send", + "Constructor", + "AddBalance", + "WithdrawBalance", + "PublishStorageDeals", + "VerifyDealsForActivation", + "ActivateDeals", + "OnMinerSectorsTerminate", + "ComputeDataCommitment", + "CronTick" + ], + "fil/5/storageminer": [ + "Send", + "Constructor", + "ControlAddresses", + "ChangeWorkerAddress", + "ChangePeerID", + "SubmitWindowedPoSt", + "PreCommitSector", + "ProveCommitSector", + "ExtendSectorExpiration", + "TerminateSectors", + "DeclareFaults", + "DeclareFaultsRecovered", + "OnDeferredCronEvent", + "CheckSectorProven", + "ApplyRewards", + "ReportConsensusFault", + "WithdrawBalance", + "ConfirmSectorProofsValid", + "ChangeMultiaddrs", + "CompactPartitions", + "CompactSectorNumbers", + "ConfirmUpdateWorkerKey", + "RepayDebt", + "ChangeOwnerAddress", + "DisputeWindowedPoSt", + "PreCommitSectorBatch", + "ProveCommitAggregate" + ], + "fil/5/storagepower": [ + "Send", + "Constructor", + "CreateMiner", + "UpdateClaimedPower", + "EnrollCronEvent", + "OnEpochTickEnd", + "UpdatePledgeTotal", + "SubmitPoRepForBulkVerify", + "CurrentTotalPower" + ], + "fil/5/system": [ + "Send", + "Constructor" + ], + "fil/5/verifiedregistry": [ + "Send", + "Constructor", + "AddVerifier", + "RemoveVerifier", + "AddVerifiedClient", + "UseBytes", + "RestoreBytes" ] } \ No newline at end of file diff --git a/lotuspond/front/src/chain/send.js b/lotuspond/front/src/chain/send.js index a0fc89437d9..c0d36b0a33c 100644 --- a/lotuspond/front/src/chain/send.js +++ b/lotuspond/front/src/chain/send.js @@ -30,7 +30,7 @@ async function pushMessage(client, from, inmsg) { console.log(inmsg) - await client.call('Filecoin.MpoolPushMessage', [inmsg]) + await client.call('Filecoin.MpoolPushMessage', [inmsg, null]) } export default pushMessage diff --git a/lotuspond/spawn.go b/lotuspond/spawn.go index ce01b115e50..900c372b1ac 100644 --- a/lotuspond/spawn.go +++ b/lotuspond/spawn.go @@ -11,6 +11,9 @@ import ( "sync/atomic" "time" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/google/uuid" "golang.org/x/xerrors" @@ -48,7 +51,12 @@ func (api *api) Spawn() (nodeInfo, error) { } sbroot := filepath.Join(dir, "preseal") - genm, ki, err := seed.PreSeal(genMiner, abi.RegisteredSealProof_StackedDrg2KiBV1, 0, 2, sbroot, []byte("8"), nil, false) + spt, err := miner.SealProofTypeFromSectorSize(2<<10, build.NewestNetworkVersion) + if err != nil { + return nodeInfo{}, err + } + + genm, ki, err := seed.PreSeal(genMiner, spt, 0, 2, sbroot, []byte("8"), nil, false) if err != nil { return nodeInfo{}, xerrors.Errorf("preseal failed: %w", err) } @@ -71,6 +79,7 @@ func (api *api) Spawn() (nodeInfo, error) { template.VerifregRootKey = gen.DefaultVerifregRootkeyActor template.RemainderAccount = gen.DefaultRemainderAccountActor template.NetworkName = "pond-" + uuid.New().String() + template.NetworkVersion = build.NewestNetworkVersion tb, err := json.Marshal(&template) if err != nil { @@ -142,11 +151,6 @@ func (api *api) Spawn() (nodeInfo, error) { api.runningLk.Lock() api.running[id].meta.State = NodeStopped api.runningLk.Unlock() - - //logfile.Close() - //errlogfile.Close() - - //close(mux.stop) }, } api.runningLk.Unlock() @@ -221,11 +225,6 @@ func (api *api) SpawnStorage(fullNodeRepo string) (nodeInfo, error) { api.runningLk.Lock() api.running[id].meta.State = NodeStopped api.runningLk.Unlock() - - //logfile.Close() - //errlogfile.Close() - - //close(mux.stop) }, } api.runningLk.Unlock() @@ -272,11 +271,6 @@ func (api *api) RestartNode(id int32) (nodeInfo, error) { api.runningLk.Lock() api.running[id].meta.State = NodeStopped api.runningLk.Unlock() - - //logfile.Close() - //errlogfile.Close() - - //close(mux.stop) } nd.meta.State = NodeRunning diff --git a/markets/loggers/loggers.go b/markets/loggers/loggers.go index a8e1c20aa69..e5f669f2f5c 100644 --- a/markets/loggers/loggers.go +++ b/markets/loggers/loggers.go @@ -1,6 +1,7 @@ package marketevents import ( + datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" @@ -11,22 +12,39 @@ var log = logging.Logger("markets") // StorageClientLogger logs events from the storage client func StorageClientLogger(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) { - log.Infow("storage event", "name", storagemarket.ClientEvents[event], "proposal CID", deal.ProposalCid, "state", storagemarket.DealStates[deal.State], "message", deal.Message) + log.Infow("storage client event", "name", storagemarket.ClientEvents[event], "proposal CID", deal.ProposalCid, "state", storagemarket.DealStates[deal.State], "message", deal.Message) } // StorageProviderLogger logs events from the storage provider func StorageProviderLogger(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) { - log.Infow("storage event", "name", storagemarket.ProviderEvents[event], "proposal CID", deal.ProposalCid, "state", storagemarket.DealStates[deal.State], "message", deal.Message) + log.Infow("storage provider event", "name", storagemarket.ProviderEvents[event], "proposal CID", deal.ProposalCid, "state", storagemarket.DealStates[deal.State], "message", deal.Message) } // RetrievalClientLogger logs events from the retrieval client func RetrievalClientLogger(event retrievalmarket.ClientEvent, deal retrievalmarket.ClientDealState) { - log.Infow("retrieval event", "name", retrievalmarket.ClientEvents[event], "deal ID", deal.ID, "state", retrievalmarket.DealStatuses[deal.Status], "message", deal.Message) + log.Infow("retrieval client event", "name", retrievalmarket.ClientEvents[event], "deal ID", deal.ID, "state", retrievalmarket.DealStatuses[deal.Status], "message", deal.Message) } // RetrievalProviderLogger logs events from the retrieval provider func RetrievalProviderLogger(event retrievalmarket.ProviderEvent, deal retrievalmarket.ProviderDealState) { - log.Infow("retrieval event", "name", retrievalmarket.ProviderEvents[event], "deal ID", deal.ID, "receiver", deal.Receiver, "state", retrievalmarket.DealStatuses[deal.Status], "message", deal.Message) + log.Infow("retrieval provider event", "name", retrievalmarket.ProviderEvents[event], "deal ID", deal.ID, "receiver", deal.Receiver, "state", retrievalmarket.DealStatuses[deal.Status], "message", deal.Message) +} + +// DataTransferLogger logs events from the data transfer module +func DataTransferLogger(event datatransfer.Event, state datatransfer.ChannelState) { + log.Debugw("data transfer event", + "name", datatransfer.Events[event.Code], + "status", datatransfer.Statuses[state.Status()], + "transfer ID", state.TransferID(), + "channel ID", state.ChannelID(), + "sent", state.Sent(), + "received", state.Received(), + "queued", state.Queued(), + "received count", len(state.ReceivedCids()), + "total size", state.TotalSize(), + "remote peer", state.OtherPeer(), + "event message", event.Message, + "channel message", state.Message()) } // ReadyLogger returns a function to log the results of module initialization diff --git a/markets/pricing/cli.go b/markets/pricing/cli.go new file mode 100644 index 00000000000..3c2a5f2489c --- /dev/null +++ b/markets/pricing/cli.go @@ -0,0 +1,48 @@ +package pricing + +import ( + "bytes" + "context" + "encoding/json" + "os/exec" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "golang.org/x/xerrors" +) + +func ExternalRetrievalPricingFunc(cmd string) dtypes.RetrievalPricingFunc { + return func(ctx context.Context, pricingInput retrievalmarket.PricingInput) (retrievalmarket.Ask, error) { + return runPricingFunc(ctx, cmd, pricingInput) + } +} + +func runPricingFunc(_ context.Context, cmd string, params interface{}) (retrievalmarket.Ask, error) { + j, err := json.Marshal(params) + if err != nil { + return retrievalmarket.Ask{}, err + } + + var out bytes.Buffer + var errb bytes.Buffer + + c := exec.Command("sh", "-c", cmd) + c.Stdin = bytes.NewReader(j) + c.Stdout = &out + c.Stderr = &errb + + switch err := c.Run().(type) { + case nil: + bz := out.Bytes() + resp := retrievalmarket.Ask{} + + if err := json.Unmarshal(bz, &resp); err != nil { + return resp, xerrors.Errorf("failed to parse pricing output %s, err=%w", string(bz), err) + } + return resp, nil + case *exec.ExitError: + return retrievalmarket.Ask{}, xerrors.Errorf("pricing func exited with error: %s", errb.String()) + default: + return retrievalmarket.Ask{}, xerrors.Errorf("pricing func cmd run error: %w", err) + } +} diff --git a/markets/retrievaladapter/provider.go b/markets/retrievaladapter/provider.go index 674ec479385..2f630580569 100644 --- a/markets/retrievaladapter/provider.go +++ b/markets/retrievaladapter/provider.go @@ -5,30 +5,41 @@ import ( "io" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v1api" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/storage/sectorblocks" + "github.com/hashicorp/go-multierror" + "golang.org/x/xerrors" + + "github.com/ipfs/go-cid" + "github.com/filecoin-project/lotus/chain/actors/builtin/paych" "github.com/filecoin-project/lotus/chain/types" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" - "github.com/filecoin-project/lotus/storage" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/shared" "github.com/filecoin-project/go-state-types/abi" + specstorage "github.com/filecoin-project/specs-storage/storage" - "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" ) +var log = logging.Logger("retrievaladapter") + type retrievalProviderNode struct { - miner *storage.Miner - sealer sectorstorage.SectorManager - full api.FullNode + maddr address.Address + secb sectorblocks.SectorBuilder + pp sectorstorage.PieceProvider + full v1api.FullNode } // NewRetrievalProviderNode returns a new node adapter for a retrieval provider that talks to the // Lotus Node -func NewRetrievalProviderNode(miner *storage.Miner, sealer sectorstorage.SectorManager, full api.FullNode) retrievalmarket.RetrievalProviderNode { - return &retrievalProviderNode{miner, sealer, full} +func NewRetrievalProviderNode(maddr dtypes.MinerAddress, secb sectorblocks.SectorBuilder, pp sectorstorage.PieceProvider, full v1api.FullNode) retrievalmarket.RetrievalProviderNode { + return &retrievalProviderNode{address.Address(maddr), secb, pp, full} } func (rpn *retrievalProviderNode) GetMinerWorkerAddress(ctx context.Context, miner address.Address, tok shared.TipSetToken) (address.Address, error) { @@ -42,30 +53,37 @@ func (rpn *retrievalProviderNode) GetMinerWorkerAddress(ctx context.Context, min } func (rpn *retrievalProviderNode) UnsealSector(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (io.ReadCloser, error) { - si, err := rpn.miner.GetSectorInfo(sectorID) + log.Debugf("get sector %d, offset %d, length %d", sectorID, offset, length) + si, err := rpn.sectorsStatus(ctx, sectorID, false) if err != nil { return nil, err } - mid, err := address.IDFromAddress(rpn.miner.Address()) + mid, err := address.IDFromAddress(rpn.maddr) if err != nil { return nil, err } - sid := abi.SectorID{ - Miner: abi.ActorID(mid), - Number: sectorID, + ref := specstorage.SectorRef{ + ID: abi.SectorID{ + Miner: abi.ActorID(mid), + Number: sectorID, + }, + ProofType: si.SealProof, } - r, w := io.Pipe() - go func() { - var commD cid.Cid - if si.CommD != nil { - commD = *si.CommD - } - err := rpn.sealer.ReadPiece(ctx, w, sid, storiface.UnpaddedByteIndex(offset), length, si.TicketValue, commD) - _ = w.CloseWithError(err) - }() + var commD cid.Cid + if si.CommD != nil { + commD = *si.CommD + } + + // Get a reader for the piece, unsealing the piece if necessary + log.Debugf("read piece in sector %d, offset %d, length %d from miner %d", sectorID, offset, length, mid) + r, unsealed, err := rpn.pp.ReadPiece(ctx, ref, storiface.UnpaddedByteIndex(offset), length, si.Ticket.Value, commD) + if err != nil { + return nil, xerrors.Errorf("failed to unseal piece from sector %d: %w", sectorID, err) + } + _ = unsealed // todo: use return r, nil } @@ -85,3 +103,109 @@ func (rpn *retrievalProviderNode) GetChainHead(ctx context.Context) (shared.TipS return head.Key().Bytes(), head.Height(), nil } + +func (rpn *retrievalProviderNode) IsUnsealed(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (bool, error) { + si, err := rpn.sectorsStatus(ctx, sectorID, true) + if err != nil { + return false, xerrors.Errorf("failed to get sector info: %w", err) + } + + mid, err := address.IDFromAddress(rpn.maddr) + if err != nil { + return false, err + } + + ref := specstorage.SectorRef{ + ID: abi.SectorID{ + Miner: abi.ActorID(mid), + Number: sectorID, + }, + ProofType: si.SealProof, + } + + log.Debugf("will call IsUnsealed now sector=%+v, offset=%d, size=%d", sectorID, offset, length) + return rpn.pp.IsUnsealed(ctx, ref, storiface.UnpaddedByteIndex(offset), length) +} + +// GetRetrievalPricingInput takes a set of candidate storage deals that can serve a retrieval request, +// and returns an minimally populated PricingInput. This PricingInput should be enhanced +// with more data, and passed to the pricing function to determine the final quoted price. +func (rpn *retrievalProviderNode) GetRetrievalPricingInput(ctx context.Context, pieceCID cid.Cid, storageDeals []abi.DealID) (retrievalmarket.PricingInput, error) { + resp := retrievalmarket.PricingInput{} + + head, err := rpn.full.ChainHead(ctx) + if err != nil { + return resp, xerrors.Errorf("failed to get chain head: %w", err) + } + tsk := head.Key() + + var mErr error + + for _, dealID := range storageDeals { + ds, err := rpn.full.StateMarketStorageDeal(ctx, dealID, tsk) + if err != nil { + log.Warnf("failed to look up deal %d on chain: err=%w", dealID, err) + mErr = multierror.Append(mErr, err) + continue + } + if ds.Proposal.VerifiedDeal { + resp.VerifiedDeal = true + } + + if ds.Proposal.PieceCID.Equals(pieceCID) { + resp.PieceSize = ds.Proposal.PieceSize.Unpadded() + } + + // If we've discovered a verified deal with the required PieceCID, we don't need + // to lookup more deals and we're done. + if resp.VerifiedDeal && resp.PieceSize != 0 { + break + } + } + + // Note: The piece size can never actually be zero. We only use it to here + // to assert that we didn't find a matching piece. + if resp.PieceSize == 0 { + if mErr == nil { + return resp, xerrors.New("failed to find matching piece") + } + + return resp, xerrors.Errorf("failed to fetch storage deal state: %w", mErr) + } + + return resp, nil +} + +func (rpn *retrievalProviderNode) sectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) { + sInfo, err := rpn.secb.SectorsStatus(ctx, sid, false) + if err != nil { + return api.SectorInfo{}, err + } + + if !showOnChainInfo { + return sInfo, nil + } + + onChainInfo, err := rpn.full.StateSectorGetInfo(ctx, rpn.maddr, sid, types.EmptyTSK) + if err != nil { + return sInfo, err + } + if onChainInfo == nil { + return sInfo, nil + } + sInfo.SealProof = onChainInfo.SealProof + sInfo.Activation = onChainInfo.Activation + sInfo.Expiration = onChainInfo.Expiration + sInfo.DealWeight = onChainInfo.DealWeight + sInfo.VerifiedDealWeight = onChainInfo.VerifiedDealWeight + sInfo.InitialPledge = onChainInfo.InitialPledge + + ex, err := rpn.full.StateSectorExpiration(ctx, rpn.maddr, sid, types.EmptyTSK) + if err != nil { + return sInfo, nil + } + sInfo.OnTime = ex.OnTime + sInfo.Early = ex.Early + + return sInfo, nil +} diff --git a/markets/retrievaladapter/provider_test.go b/markets/retrievaladapter/provider_test.go new file mode 100644 index 00000000000..eca3b11527e --- /dev/null +++ b/markets/retrievaladapter/provider_test.go @@ -0,0 +1,202 @@ +package retrievaladapter + +import ( + "context" + "testing" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + testnet "github.com/filecoin-project/go-fil-markets/shared_testutil" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/mocks" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/types" + "github.com/golang/mock/gomock" + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" + "golang.org/x/xerrors" +) + +func TestGetPricingInput(t *testing.T) { + ctx := context.Background() + tsk := &types.TipSet{} + key := tsk.Key() + + pcid := testnet.GenerateCids(1)[0] + deals := []abi.DealID{1, 2} + paddedSize := abi.PaddedPieceSize(128) + unpaddedSize := paddedSize.Unpadded() + + tcs := map[string]struct { + pieceCid cid.Cid + deals []abi.DealID + fFnc func(node *mocks.MockFullNode) + + expectedErrorStr string + expectedVerified bool + expectedPieceSize abi.UnpaddedPieceSize + }{ + "error when fails to fetch chain head": { + fFnc: func(n *mocks.MockFullNode) { + n.EXPECT().ChainHead(gomock.Any()).Return(tsk, xerrors.New("chain head error")).Times(1) + }, + expectedErrorStr: "chain head error", + }, + + "error when no piece matches": { + fFnc: func(n *mocks.MockFullNode) { + out1 := &api.MarketDeal{ + Proposal: market.DealProposal{ + PieceCID: testnet.GenerateCids(1)[0], + }, + } + out2 := &api.MarketDeal{ + Proposal: market.DealProposal{ + PieceCID: testnet.GenerateCids(1)[0], + }, + } + + n.EXPECT().ChainHead(gomock.Any()).Return(tsk, nil).Times(1) + gomock.InOrder( + n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[0], key).Return(out1, nil), + n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[1], key).Return(out2, nil), + ) + + }, + expectedErrorStr: "failed to find matching piece", + }, + + "error when fails to fetch deal state": { + fFnc: func(n *mocks.MockFullNode) { + out1 := &api.MarketDeal{ + Proposal: market.DealProposal{ + PieceCID: pcid, + PieceSize: paddedSize, + }, + } + out2 := &api.MarketDeal{ + Proposal: market.DealProposal{ + PieceCID: testnet.GenerateCids(1)[0], + VerifiedDeal: true, + }, + } + + n.EXPECT().ChainHead(gomock.Any()).Return(tsk, nil).Times(1) + gomock.InOrder( + n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[0], key).Return(out1, xerrors.New("error 1")), + n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[1], key).Return(out2, xerrors.New("error 2")), + ) + + }, + expectedErrorStr: "failed to fetch storage deal state", + }, + + "verified is true even if one deal is verified and we get the correct piecesize": { + fFnc: func(n *mocks.MockFullNode) { + out1 := &api.MarketDeal{ + Proposal: market.DealProposal{ + PieceCID: pcid, + PieceSize: paddedSize, + }, + } + out2 := &api.MarketDeal{ + Proposal: market.DealProposal{ + PieceCID: testnet.GenerateCids(1)[0], + VerifiedDeal: true, + }, + } + + n.EXPECT().ChainHead(gomock.Any()).Return(tsk, nil).Times(1) + gomock.InOrder( + n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[0], key).Return(out1, nil), + n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[1], key).Return(out2, nil), + ) + + }, + expectedPieceSize: unpaddedSize, + expectedVerified: true, + }, + + "success even if one deal state fetch errors out but the other deal is verified and has the required piececid": { + fFnc: func(n *mocks.MockFullNode) { + out1 := &api.MarketDeal{ + Proposal: market.DealProposal{ + PieceCID: testnet.GenerateCids(1)[0], + }, + } + out2 := &api.MarketDeal{ + Proposal: market.DealProposal{ + PieceCID: pcid, + PieceSize: paddedSize, + VerifiedDeal: true, + }, + } + + n.EXPECT().ChainHead(gomock.Any()).Return(tsk, nil).Times(1) + gomock.InOrder( + n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[0], key).Return(out1, xerrors.New("some error")), + n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[1], key).Return(out2, nil), + ) + + }, + expectedPieceSize: unpaddedSize, + expectedVerified: true, + }, + + "verified is false if both deals are unverified and we get the correct piece size": { + fFnc: func(n *mocks.MockFullNode) { + out1 := &api.MarketDeal{ + Proposal: market.DealProposal{ + PieceCID: pcid, + PieceSize: paddedSize, + VerifiedDeal: false, + }, + } + out2 := &api.MarketDeal{ + Proposal: market.DealProposal{ + PieceCID: testnet.GenerateCids(1)[0], + VerifiedDeal: false, + }, + } + + n.EXPECT().ChainHead(gomock.Any()).Return(tsk, nil).Times(1) + gomock.InOrder( + n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[0], key).Return(out1, nil), + n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[1], key).Return(out2, nil), + ) + + }, + expectedPieceSize: unpaddedSize, + expectedVerified: false, + }, + } + + for name, tc := range tcs { + tc := tc + t.Run(name, func(t *testing.T) { + mockCtrl := gomock.NewController(t) + // when test is done, assert expectations on all mock objects. + defer mockCtrl.Finish() + + mockFull := mocks.NewMockFullNode(mockCtrl) + rpn := &retrievalProviderNode{ + full: mockFull, + } + if tc.fFnc != nil { + tc.fFnc(mockFull) + } + + resp, err := rpn.GetRetrievalPricingInput(ctx, pcid, deals) + + if tc.expectedErrorStr != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tc.expectedErrorStr) + require.Equal(t, retrievalmarket.PricingInput{}, resp) + } else { + require.NoError(t, err) + require.Equal(t, tc.expectedPieceSize, resp.PieceSize) + require.Equal(t, tc.expectedVerified, resp.VerifiedDeal) + } + }) + } +} diff --git a/markets/storageadapter/api.go b/markets/storageadapter/api.go new file mode 100644 index 00000000000..c49a96f885b --- /dev/null +++ b/markets/storageadapter/api.go @@ -0,0 +1,53 @@ +package storageadapter + +import ( + "context" + + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/chain/actors/adt" + + "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" +) + +type apiWrapper struct { + api interface { + StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) + ChainReadObj(context.Context, cid.Cid) ([]byte, error) + ChainHasObj(context.Context, cid.Cid) (bool, error) + } +} + +func (ca *apiWrapper) diffPreCommits(ctx context.Context, actor address.Address, pre, cur types.TipSetKey) (*miner.PreCommitChanges, error) { + store := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(ca.api))) + + preAct, err := ca.api.StateGetActor(ctx, actor, pre) + if err != nil { + return nil, xerrors.Errorf("getting pre actor: %w", err) + } + curAct, err := ca.api.StateGetActor(ctx, actor, cur) + if err != nil { + return nil, xerrors.Errorf("getting cur actor: %w", err) + } + + preSt, err := miner.Load(store, preAct) + if err != nil { + return nil, xerrors.Errorf("loading miner actor: %w", err) + } + curSt, err := miner.Load(store, curAct) + if err != nil { + return nil, xerrors.Errorf("loading miner actor: %w", err) + } + + diff, err := miner.DiffPreCommits(preSt, curSt) + if err != nil { + return nil, xerrors.Errorf("diff precommits: %w", err) + } + + return diff, err +} diff --git a/markets/storageadapter/client.go b/markets/storageadapter/client.go index f299dd4d510..80ead2be3b4 100644 --- a/markets/storageadapter/client.go +++ b/markets/storageadapter/client.go @@ -6,11 +6,12 @@ import ( "bytes" "context" - "github.com/filecoin-project/go-address" - cborutil "github.com/filecoin-project/go-cbor-util" "github.com/ipfs/go-cid" + "go.uber.org/fx" "golang.org/x/xerrors" + "github.com/filecoin-project/go-address" + cborutil "github.com/filecoin-project/go-cbor-util" "github.com/filecoin-project/go-fil-markets/shared" "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" @@ -23,7 +24,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + marketactor "github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/events" "github.com/filecoin-project/lotus/chain/events/state" "github.com/filecoin-project/lotus/chain/market" @@ -31,31 +32,38 @@ import ( "github.com/filecoin-project/lotus/lib/sigs" "github.com/filecoin-project/lotus/markets/utils" "github.com/filecoin-project/lotus/node/impl/full" + "github.com/filecoin-project/lotus/node/modules/helpers" ) type ClientNodeAdapter struct { - full.StateAPI - full.ChainAPI - full.MpoolAPI + *clientApi - fm *market.FundMgr - ev *events.Events + fundmgr *market.FundManager + ev *events.Events + dsMatcher *dealStateMatcher + scMgr *SectorCommittedManager } type clientApi struct { full.ChainAPI full.StateAPI + full.MpoolAPI } -func NewClientNodeAdapter(state full.StateAPI, chain full.ChainAPI, mpool full.MpoolAPI, fm *market.FundMgr) storagemarket.StorageClientNode { - return &ClientNodeAdapter{ - StateAPI: state, - ChainAPI: chain, - MpoolAPI: mpool, +func NewClientNodeAdapter(mctx helpers.MetricsCtx, lc fx.Lifecycle, stateapi full.StateAPI, chain full.ChainAPI, mpool full.MpoolAPI, fundmgr *market.FundManager) storagemarket.StorageClientNode { + capi := &clientApi{chain, stateapi, mpool} + ctx := helpers.LifecycleCtx(mctx, lc) - fm: fm, - ev: events.NewEvents(context.TODO(), &clientApi{chain, state}), + ev := events.NewEvents(ctx, capi) + a := &ClientNodeAdapter{ + clientApi: capi, + + fundmgr: fundmgr, + ev: ev, + dsMatcher: newDealStateMatcher(state.NewStatePredicates(state.WrapFastAPI(capi))), } + a.scMgr = NewSectorCommittedManager(ev, a, &apiWrapper{api: capi}) + return a } func (c *ClientNodeAdapter) ListStorageProviders(ctx context.Context, encodedTs shared.TipSetToken) ([]*storagemarket.StorageProviderInfo, error) { @@ -109,8 +117,12 @@ func (c *ClientNodeAdapter) AddFunds(ctx context.Context, addr address.Address, return smsg.Cid(), nil } -func (c *ClientNodeAdapter) EnsureFunds(ctx context.Context, addr, wallet address.Address, amount abi.TokenAmount, ts shared.TipSetToken) (cid.Cid, error) { - return c.fm.EnsureAvailable(ctx, addr, wallet, amount) +func (c *ClientNodeAdapter) ReserveFunds(ctx context.Context, wallet, addr address.Address, amt abi.TokenAmount) (cid.Cid, error) { + return c.fundmgr.Reserve(ctx, wallet, addr, amt) +} + +func (c *ClientNodeAdapter) ReleaseFunds(ctx context.Context, addr address.Address, amt abi.TokenAmount) error { + return c.fundmgr.Release(addr, amt) } func (c *ClientNodeAdapter) GetBalance(ctx context.Context, addr address.Address, encodedTs shared.TipSetToken) (storagemarket.Balance, error) { @@ -129,6 +141,7 @@ func (c *ClientNodeAdapter) GetBalance(ctx context.Context, addr address.Address // ValidatePublishedDeal validates that the provided deal has appeared on chain and references the same ClientDeal // returns the Deal id if there is no error +// TODO: Don't return deal ID func (c *ClientNodeAdapter) ValidatePublishedDeal(ctx context.Context, deal storagemarket.ClientDeal) (abi.DealID, error) { log.Infow("DEAL ACCEPTED!") @@ -147,8 +160,16 @@ func (c *ClientNodeAdapter) ValidatePublishedDeal(ctx context.Context, deal stor return 0, xerrors.Errorf("failed to resolve from msg ID addr: %w", err) } - if fromid != mi.Worker { - return 0, xerrors.Errorf("deal wasn't published by storage provider: from=%s, provider=%s", pubmsg.From, deal.Proposal.Provider) + var pubOk bool + pubAddrs := append([]address.Address{mi.Worker, mi.Owner}, mi.ControlAddresses...) + for _, a := range pubAddrs { + if fromid == a { + pubOk = true + break + } + } + if !pubOk { + return 0, xerrors.Errorf("deal wasn't published by storage provider: from=%s, provider=%s,%+v", pubmsg.From, deal.Proposal.Provider, pubAddrs) } if pubmsg.To != miner2.StorageMarketActorAddr { @@ -183,7 +204,7 @@ func (c *ClientNodeAdapter) ValidatePublishedDeal(ctx context.Context, deal stor } // TODO: timeout - ret, err := c.StateWaitMsg(ctx, *deal.PublishMessage, build.MessageConfidence) + ret, err := c.StateWaitMsg(ctx, *deal.PublishMessage, build.MessageConfidence, api.LookbackNoLimit, true) if err != nil { return 0, xerrors.Errorf("waiting for deal publish message: %w", err) } @@ -199,7 +220,13 @@ func (c *ClientNodeAdapter) ValidatePublishedDeal(ctx context.Context, deal stor return res.IDs[dealIdx], nil } -const clientOverestimation = 2 +var clientOverestimation = struct { + numerator int64 + denominator int64 +}{ + numerator: 12, + denominator: 10, +} func (c *ClientNodeAdapter) DealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, isVerified bool) (abi.TokenAmount, abi.TokenAmount, error) { bounds, err := c.StateDealProviderCollateralBounds(ctx, size, isVerified, types.EmptyTSK) @@ -207,110 +234,22 @@ func (c *ClientNodeAdapter) DealProviderCollateralBounds(ctx context.Context, si return abi.TokenAmount{}, abi.TokenAmount{}, err } - return big.Mul(bounds.Min, big.NewInt(clientOverestimation)), bounds.Max, nil + min := big.Mul(bounds.Min, big.NewInt(clientOverestimation.numerator)) + min = big.Div(min, big.NewInt(clientOverestimation.denominator)) + return min, bounds.Max, nil } -func (c *ClientNodeAdapter) OnDealSectorCommitted(ctx context.Context, provider address.Address, dealId abi.DealID, cb storagemarket.DealSectorCommittedCallback) error { - checkFunc := func(ts *types.TipSet) (done bool, more bool, err error) { - sd, err := c.StateMarketStorageDeal(ctx, dealId, ts.Key()) - - if err != nil { - // TODO: This may be fine for some errors - return false, false, xerrors.Errorf("client: failed to look up deal on chain: %w", err) - } - - if sd.State.SectorStartEpoch > 0 { - cb(nil) - return true, false, nil - } - - return false, true, nil - } - - called := func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error) { - defer func() { - if err != nil { - cb(xerrors.Errorf("handling applied event: %w", err)) - } - }() - - if msg == nil { - log.Error("timed out waiting for deal activation... what now?") - return false, nil - } - - sd, err := c.StateMarketStorageDeal(ctx, dealId, ts.Key()) - if err != nil { - return false, xerrors.Errorf("failed to look up deal on chain: %w", err) - } - - if sd.State.SectorStartEpoch < 1 { - return false, xerrors.Errorf("deal wasn't active: deal=%d, parentState=%s, h=%d", dealId, ts.ParentState(), ts.Height()) - } - - log.Infof("Storage deal %d activated at epoch %d", dealId, sd.State.SectorStartEpoch) - - cb(nil) - - return false, nil - } - - revert := func(ctx context.Context, ts *types.TipSet) error { - log.Warn("deal activation reverted; TODO: actually handle this!") - // TODO: Just go back to DealSealing? - return nil - } - - var sectorNumber abi.SectorNumber - var sectorFound bool - matchEvent := func(msg *types.Message) (matchOnce bool, matched bool, err error) { - if msg.To != provider { - return true, false, nil - } - - switch msg.Method { - case miner2.MethodsMiner.PreCommitSector: - var params miner.SectorPreCommitInfo - if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { - return true, false, xerrors.Errorf("unmarshal pre commit: %w", err) - } - - for _, did := range params.DealIDs { - if did == dealId { - sectorNumber = params.SectorNumber - sectorFound = true - return true, false, nil - } - } - - return true, false, nil - case miner2.MethodsMiner.ProveCommitSector: - var params miner.ProveCommitSectorParams - if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { - return true, false, xerrors.Errorf("failed to unmarshal prove commit sector params: %w", err) - } - - if !sectorFound { - return true, false, nil - } - - if params.SectorNumber != sectorNumber { - return true, false, nil - } - - return false, true, nil - default: - return true, false, nil - } - } - - if err := c.ev.Called(checkFunc, called, revert, int(build.MessageConfidence+1), events.NoTimeout, matchEvent); err != nil { - return xerrors.Errorf("failed to set up called handler: %w", err) - } +// TODO: Remove dealID parameter, change publishCid to be cid.Cid (instead of pointer) +func (c *ClientNodeAdapter) OnDealSectorPreCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, proposal market2.DealProposal, publishCid *cid.Cid, cb storagemarket.DealSectorPreCommittedCallback) error { + return c.scMgr.OnDealSectorPreCommitted(ctx, provider, marketactor.DealProposal(proposal), *publishCid, cb) +} - return nil +// TODO: Remove dealID parameter, change publishCid to be cid.Cid (instead of pointer) +func (c *ClientNodeAdapter) OnDealSectorCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, sectorNumber abi.SectorNumber, proposal market2.DealProposal, publishCid *cid.Cid, cb storagemarket.DealSectorCommittedCallback) error { + return c.scMgr.OnDealSectorCommitted(ctx, provider, sectorNumber, marketactor.DealProposal(proposal), *publishCid, cb) } +// TODO: Replace dealID parameter with DealProposal func (c *ClientNodeAdapter) OnDealExpiredOrSlashed(ctx context.Context, dealID abi.DealID, onDealExpired storagemarket.DealExpiredCallback, onDealSlashed storagemarket.DealSlashedCallback) error { head, err := c.ChainHead(ctx) if err != nil { @@ -350,7 +289,7 @@ func (c *ClientNodeAdapter) OnDealExpiredOrSlashed(ctx context.Context, dealID a // and the chain has advanced to the confidence height stateChanged := func(ts *types.TipSet, ts2 *types.TipSet, states events.StateChange, h abi.ChainEpoch) (more bool, err error) { // Check if the deal has already expired - if sd.Proposal.EndEpoch <= ts2.Height() { + if ts2 == nil || sd.Proposal.EndEpoch <= ts2.Height() { onDealExpired(nil) return false, nil } @@ -389,13 +328,7 @@ func (c *ClientNodeAdapter) OnDealExpiredOrSlashed(ctx context.Context, dealID a } // Watch for state changes to the deal - preds := state.NewStatePredicates(c) - dealDiff := preds.OnStorageMarketActorChanged( - preds.OnDealStateChanged( - preds.DealStateChangedForIDs([]abi.DealID{dealID}))) - match := func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) { - return dealDiff(ctx, oldTs.Key(), newTs.Key()) - } + match := c.dsMatcher.matcher(ctx, dealID) // Wait until after the end epoch for the deal and then timeout timeout := (sd.Proposal.EndEpoch - head.Height()) + 1 @@ -446,7 +379,7 @@ func (c *ClientNodeAdapter) GetChainHead(ctx context.Context) (shared.TipSetToke } func (c *ClientNodeAdapter) WaitForMessage(ctx context.Context, mcid cid.Cid, cb func(code exitcode.ExitCode, bytes []byte, finalCid cid.Cid, err error) error) error { - receipt, err := c.StateWaitMsg(ctx, mcid, build.MessageConfidence) + receipt, err := c.StateWaitMsg(ctx, mcid, build.MessageConfidence, api.LookbackNoLimit, true) if err != nil { return cb(0, nil, cid.Undef, err) } diff --git a/markets/storageadapter/dealpublisher.go b/markets/storageadapter/dealpublisher.go new file mode 100644 index 00000000000..9f7ba162953 --- /dev/null +++ b/markets/storageadapter/dealpublisher.go @@ -0,0 +1,394 @@ +package storageadapter + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + "github.com/ipfs/go-cid" + "go.uber.org/fx" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/storage" +) + +type dealPublisherAPI interface { + ChainHead(context.Context) (*types.TipSet, error) + MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) + StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error) + + WalletBalance(context.Context, address.Address) (types.BigInt, error) + WalletHas(context.Context, address.Address) (bool, error) + StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) + StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) +} + +// DealPublisher batches deal publishing so that many deals can be included in +// a single publish message. This saves gas for miners that publish deals +// frequently. +// When a deal is submitted, the DealPublisher waits a configurable amount of +// time for other deals to be submitted before sending the publish message. +// There is a configurable maximum number of deals that can be included in one +// message. When the limit is reached the DealPublisher immediately submits a +// publish message with all deals in the queue. +type DealPublisher struct { + api dealPublisherAPI + as *storage.AddressSelector + + ctx context.Context + Shutdown context.CancelFunc + + maxDealsPerPublishMsg uint64 + publishPeriod time.Duration + publishSpec *api.MessageSendSpec + + lk sync.Mutex + pending []*pendingDeal + cancelWaitForMoreDeals context.CancelFunc + publishPeriodStart time.Time +} + +// A deal that is queued to be published +type pendingDeal struct { + ctx context.Context + deal market2.ClientDealProposal + Result chan publishResult +} + +// The result of publishing a deal +type publishResult struct { + msgCid cid.Cid + err error +} + +func newPendingDeal(ctx context.Context, deal market2.ClientDealProposal) *pendingDeal { + return &pendingDeal{ + ctx: ctx, + deal: deal, + Result: make(chan publishResult), + } +} + +type PublishMsgConfig struct { + // The amount of time to wait for more deals to arrive before + // publishing + Period time.Duration + // The maximum number of deals to include in a single PublishStorageDeals + // message + MaxDealsPerMsg uint64 +} + +func NewDealPublisher( + feeConfig *config.MinerFeeConfig, + publishMsgCfg PublishMsgConfig, +) func(lc fx.Lifecycle, full api.FullNode, as *storage.AddressSelector) *DealPublisher { + return func(lc fx.Lifecycle, full api.FullNode, as *storage.AddressSelector) *DealPublisher { + maxFee := abi.NewTokenAmount(0) + if feeConfig != nil { + maxFee = abi.TokenAmount(feeConfig.MaxPublishDealsFee) + } + publishSpec := &api.MessageSendSpec{MaxFee: maxFee} + dp := newDealPublisher(full, as, publishMsgCfg, publishSpec) + lc.Append(fx.Hook{ + OnStop: func(ctx context.Context) error { + dp.Shutdown() + return nil + }, + }) + return dp + } +} + +func newDealPublisher( + dpapi dealPublisherAPI, + as *storage.AddressSelector, + publishMsgCfg PublishMsgConfig, + publishSpec *api.MessageSendSpec, +) *DealPublisher { + ctx, cancel := context.WithCancel(context.Background()) + return &DealPublisher{ + api: dpapi, + as: as, + ctx: ctx, + Shutdown: cancel, + maxDealsPerPublishMsg: publishMsgCfg.MaxDealsPerMsg, + publishPeriod: publishMsgCfg.Period, + publishSpec: publishSpec, + } +} + +// PendingDeals returns the list of deals that are queued up to be published +func (p *DealPublisher) PendingDeals() api.PendingDealInfo { + p.lk.Lock() + defer p.lk.Unlock() + + // Filter out deals whose context has been cancelled + deals := make([]*pendingDeal, 0, len(p.pending)) + for _, dl := range p.pending { + if dl.ctx.Err() == nil { + deals = append(deals, dl) + } + } + + pending := make([]market2.ClientDealProposal, len(deals)) + for i, deal := range deals { + pending[i] = deal.deal + } + + return api.PendingDealInfo{ + Deals: pending, + PublishPeriodStart: p.publishPeriodStart, + PublishPeriod: p.publishPeriod, + } +} + +// ForcePublishPendingDeals publishes all pending deals without waiting for +// the publish period to elapse +func (p *DealPublisher) ForcePublishPendingDeals() { + p.lk.Lock() + defer p.lk.Unlock() + + log.Infof("force publishing deals") + p.publishAllDeals() +} + +func (p *DealPublisher) Publish(ctx context.Context, deal market2.ClientDealProposal) (cid.Cid, error) { + pdeal := newPendingDeal(ctx, deal) + + // Add the deal to the queue + p.processNewDeal(pdeal) + + // Wait for the deal to be submitted + select { + case <-ctx.Done(): + return cid.Undef, ctx.Err() + case res := <-pdeal.Result: + return res.msgCid, res.err + } +} + +func (p *DealPublisher) processNewDeal(pdeal *pendingDeal) { + p.lk.Lock() + defer p.lk.Unlock() + + // Filter out any cancelled deals + p.filterCancelledDeals() + + // If all deals have been cancelled, clear the wait-for-deals timer + if len(p.pending) == 0 && p.cancelWaitForMoreDeals != nil { + p.cancelWaitForMoreDeals() + p.cancelWaitForMoreDeals = nil + } + + // Make sure the new deal hasn't been cancelled + if pdeal.ctx.Err() != nil { + return + } + + // Add the new deal to the queue + p.pending = append(p.pending, pdeal) + log.Infof("add deal with piece CID %s to publish deals queue - %d deals in queue (max queue size %d)", + pdeal.deal.Proposal.PieceCID, len(p.pending), p.maxDealsPerPublishMsg) + + // If the maximum number of deals per message has been reached, + // send a publish message + if uint64(len(p.pending)) >= p.maxDealsPerPublishMsg { + log.Infof("publish deals queue has reached max size of %d, publishing deals", p.maxDealsPerPublishMsg) + p.publishAllDeals() + return + } + + // Otherwise wait for more deals to arrive or the timeout to be reached + p.waitForMoreDeals() +} + +func (p *DealPublisher) waitForMoreDeals() { + // Check if we're already waiting for deals + if !p.publishPeriodStart.IsZero() { + elapsed := time.Since(p.publishPeriodStart) + log.Infof("%s elapsed of / %s until publish deals queue is published", + elapsed, p.publishPeriod) + return + } + + // Set a timeout to wait for more deals to arrive + log.Infof("waiting publish deals queue period of %s before publishing", p.publishPeriod) + ctx, cancel := context.WithCancel(p.ctx) + p.publishPeriodStart = time.Now() + p.cancelWaitForMoreDeals = cancel + + go func() { + timer := time.NewTimer(p.publishPeriod) + select { + case <-ctx.Done(): + timer.Stop() + case <-timer.C: + p.lk.Lock() + defer p.lk.Unlock() + + // The timeout has expired so publish all pending deals + log.Infof("publish deals queue period of %s has expired, publishing deals", p.publishPeriod) + p.publishAllDeals() + } + }() +} + +func (p *DealPublisher) publishAllDeals() { + // If the timeout hasn't yet been cancelled, cancel it + if p.cancelWaitForMoreDeals != nil { + p.cancelWaitForMoreDeals() + p.cancelWaitForMoreDeals = nil + p.publishPeriodStart = time.Time{} + } + + // Filter out any deals that have been cancelled + p.filterCancelledDeals() + deals := p.pending[:] + p.pending = nil + + // Send the publish message + go p.publishReady(deals) +} + +func (p *DealPublisher) publishReady(ready []*pendingDeal) { + if len(ready) == 0 { + return + } + + // onComplete is called when the publish message has been sent or there + // was an error + onComplete := func(pd *pendingDeal, msgCid cid.Cid, err error) { + // Send the publish result on the pending deal's Result channel + res := publishResult{ + msgCid: msgCid, + err: err, + } + select { + case <-p.ctx.Done(): + case <-pd.ctx.Done(): + case pd.Result <- res: + } + } + + // Validate each deal to make sure it can be published + validated := make([]*pendingDeal, 0, len(ready)) + deals := make([]market2.ClientDealProposal, 0, len(ready)) + for _, pd := range ready { + // Validate the deal + if err := p.validateDeal(pd.deal); err != nil { + // Validation failed, complete immediately with an error + go onComplete(pd, cid.Undef, err) + continue + } + + validated = append(validated, pd) + deals = append(deals, pd.deal) + } + + // Send the publish message + msgCid, err := p.publishDealProposals(deals) + + // Signal that each deal has been published + for _, pd := range validated { + go onComplete(pd, msgCid, err) + } +} + +// validateDeal checks that the deal proposal start epoch hasn't already +// elapsed +func (p *DealPublisher) validateDeal(deal market2.ClientDealProposal) error { + head, err := p.api.ChainHead(p.ctx) + if err != nil { + return err + } + if head.Height() > deal.Proposal.StartEpoch { + return xerrors.Errorf( + "cannot publish deal with piece CID %s: current epoch %d has passed deal proposal start epoch %d", + deal.Proposal.PieceCID, head.Height(), deal.Proposal.StartEpoch) + } + return nil +} + +// Sends the publish message +func (p *DealPublisher) publishDealProposals(deals []market2.ClientDealProposal) (cid.Cid, error) { + if len(deals) == 0 { + return cid.Undef, nil + } + + log.Infof("publishing %d deals in publish deals queue with piece CIDs: %s", len(deals), pieceCids(deals)) + + provider := deals[0].Proposal.Provider + for _, dl := range deals { + if dl.Proposal.Provider != provider { + msg := fmt.Sprintf("publishing %d deals failed: ", len(deals)) + + "not all deals are for same provider: " + + fmt.Sprintf("deal with piece CID %s is for provider %s ", deals[0].Proposal.PieceCID, deals[0].Proposal.Provider) + + fmt.Sprintf("but deal with piece CID %s is for provider %s", dl.Proposal.PieceCID, dl.Proposal.Provider) + return cid.Undef, xerrors.Errorf(msg) + } + } + + mi, err := p.api.StateMinerInfo(p.ctx, provider, types.EmptyTSK) + if err != nil { + return cid.Undef, err + } + + params, err := actors.SerializeParams(&market2.PublishStorageDealsParams{ + Deals: deals, + }) + + if err != nil { + return cid.Undef, xerrors.Errorf("serializing PublishStorageDeals params failed: %w", err) + } + + addr, _, err := p.as.AddressFor(p.ctx, p.api, mi, api.DealPublishAddr, big.Zero(), big.Zero()) + if err != nil { + return cid.Undef, xerrors.Errorf("selecting address for publishing deals: %w", err) + } + + smsg, err := p.api.MpoolPushMessage(p.ctx, &types.Message{ + To: market.Address, + From: addr, + Value: types.NewInt(0), + Method: market.Methods.PublishStorageDeals, + Params: params, + }, p.publishSpec) + + if err != nil { + return cid.Undef, err + } + return smsg.Cid(), nil +} + +func pieceCids(deals []market2.ClientDealProposal) string { + cids := make([]string, 0, len(deals)) + for _, dl := range deals { + cids = append(cids, dl.Proposal.PieceCID.String()) + } + return strings.Join(cids, ", ") +} + +// filter out deals that have been cancelled +func (p *DealPublisher) filterCancelledDeals() { + i := 0 + for _, pd := range p.pending { + if pd.ctx.Err() == nil { + p.pending[i] = pd + i++ + } + } + p.pending = p.pending[:i] +} diff --git a/markets/storageadapter/dealpublisher_test.go b/markets/storageadapter/dealpublisher_test.go new file mode 100644 index 00000000000..b2f107bf4e9 --- /dev/null +++ b/markets/storageadapter/dealpublisher_test.go @@ -0,0 +1,350 @@ +package storageadapter + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/filecoin-project/go-state-types/crypto" + market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" + "github.com/ipfs/go-cid" + + "github.com/stretchr/testify/require" + + tutils "github.com/filecoin-project/specs-actors/v2/support/testing" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" + market0 "github.com/filecoin-project/specs-actors/actors/builtin/market" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" +) + +func TestDealPublisher(t *testing.T) { + t.Skip("this test randomly fails in various subtests; see issue #6799") + testCases := []struct { + name string + publishPeriod time.Duration + maxDealsPerMsg uint64 + dealCountWithinPublishPeriod int + ctxCancelledWithinPublishPeriod int + expiredDeals int + dealCountAfterPublishPeriod int + expectedDealsPerMsg []int + }{{ + name: "publish one deal within publish period", + publishPeriod: 10 * time.Millisecond, + maxDealsPerMsg: 5, + dealCountWithinPublishPeriod: 1, + dealCountAfterPublishPeriod: 0, + expectedDealsPerMsg: []int{1}, + }, { + name: "publish two deals within publish period", + publishPeriod: 10 * time.Millisecond, + maxDealsPerMsg: 5, + dealCountWithinPublishPeriod: 2, + dealCountAfterPublishPeriod: 0, + expectedDealsPerMsg: []int{2}, + }, { + name: "publish one deal within publish period, and one after", + publishPeriod: 10 * time.Millisecond, + maxDealsPerMsg: 5, + dealCountWithinPublishPeriod: 1, + dealCountAfterPublishPeriod: 1, + expectedDealsPerMsg: []int{1, 1}, + }, { + name: "publish deals that exceed max deals per message within publish period, and one after", + publishPeriod: 10 * time.Millisecond, + maxDealsPerMsg: 2, + dealCountWithinPublishPeriod: 3, + dealCountAfterPublishPeriod: 1, + expectedDealsPerMsg: []int{2, 1, 1}, + }, { + name: "ignore deals with cancelled context", + publishPeriod: 10 * time.Millisecond, + maxDealsPerMsg: 5, + dealCountWithinPublishPeriod: 2, + ctxCancelledWithinPublishPeriod: 2, + dealCountAfterPublishPeriod: 1, + expectedDealsPerMsg: []int{2, 1}, + }, { + name: "ignore expired deals", + publishPeriod: 10 * time.Millisecond, + maxDealsPerMsg: 5, + dealCountWithinPublishPeriod: 2, + expiredDeals: 2, + dealCountAfterPublishPeriod: 1, + expectedDealsPerMsg: []int{2, 1}, + }, { + name: "zero config", + publishPeriod: 0, + maxDealsPerMsg: 0, + dealCountWithinPublishPeriod: 2, + ctxCancelledWithinPublishPeriod: 0, + dealCountAfterPublishPeriod: 2, + expectedDealsPerMsg: []int{1, 1, 1, 1}, + }} + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + dpapi := newDPAPI(t) + + // Create a deal publisher + dp := newDealPublisher(dpapi, nil, PublishMsgConfig{ + Period: tc.publishPeriod, + MaxDealsPerMsg: tc.maxDealsPerMsg, + }, &api.MessageSendSpec{MaxFee: abi.NewTokenAmount(1)}) + + // Keep a record of the deals that were submitted to be published + var dealsToPublish []market.ClientDealProposal + + // Publish deals within publish period + for i := 0; i < tc.dealCountWithinPublishPeriod; i++ { + deal := publishDeal(t, dp, false, false) + dealsToPublish = append(dealsToPublish, deal) + } + for i := 0; i < tc.ctxCancelledWithinPublishPeriod; i++ { + publishDeal(t, dp, true, false) + } + for i := 0; i < tc.expiredDeals; i++ { + publishDeal(t, dp, false, true) + } + + // Wait until publish period has elapsed + time.Sleep(2 * tc.publishPeriod) + + // Publish deals after publish period + for i := 0; i < tc.dealCountAfterPublishPeriod; i++ { + deal := publishDeal(t, dp, false, false) + dealsToPublish = append(dealsToPublish, deal) + } + + checkPublishedDeals(t, dpapi, dealsToPublish, tc.expectedDealsPerMsg) + }) + } +} + +func TestForcePublish(t *testing.T) { + dpapi := newDPAPI(t) + + // Create a deal publisher + start := time.Now() + publishPeriod := time.Hour + dp := newDealPublisher(dpapi, nil, PublishMsgConfig{ + Period: publishPeriod, + MaxDealsPerMsg: 10, + }, &api.MessageSendSpec{MaxFee: abi.NewTokenAmount(1)}) + + // Queue three deals for publishing, one with a cancelled context + var dealsToPublish []market.ClientDealProposal + // 1. Regular deal + deal := publishDeal(t, dp, false, false) + dealsToPublish = append(dealsToPublish, deal) + // 2. Deal with cancelled context + publishDeal(t, dp, true, false) + // 3. Regular deal + deal = publishDeal(t, dp, false, false) + dealsToPublish = append(dealsToPublish, deal) + + // Allow a moment for them to be queued + time.Sleep(10 * time.Millisecond) + + // Should be two deals in the pending deals list + // (deal with cancelled context is ignored) + pendingInfo := dp.PendingDeals() + require.Len(t, pendingInfo.Deals, 2) + require.Equal(t, publishPeriod, pendingInfo.PublishPeriod) + require.True(t, pendingInfo.PublishPeriodStart.After(start)) + require.True(t, pendingInfo.PublishPeriodStart.Before(time.Now())) + + // Force publish all pending deals + dp.ForcePublishPendingDeals() + + // Should be no pending deals + pendingInfo = dp.PendingDeals() + require.Len(t, pendingInfo.Deals, 0) + + // Make sure the expected deals were published + checkPublishedDeals(t, dpapi, dealsToPublish, []int{2}) +} + +func publishDeal(t *testing.T, dp *DealPublisher, ctxCancelled bool, expired bool) market.ClientDealProposal { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + pctx := ctx + if ctxCancelled { + pctx, cancel = context.WithCancel(ctx) + cancel() + } + + startEpoch := abi.ChainEpoch(20) + if expired { + startEpoch = abi.ChainEpoch(5) + } + deal := market.ClientDealProposal{ + Proposal: market0.DealProposal{ + PieceCID: generateCids(1)[0], + Client: getClientActor(t), + Provider: getProviderActor(t), + StartEpoch: startEpoch, + EndEpoch: abi.ChainEpoch(120), + }, + ClientSignature: crypto.Signature{ + Type: crypto.SigTypeSecp256k1, + Data: []byte("signature data"), + }, + } + + go func() { + _, err := dp.Publish(pctx, deal) + + // If the test has completed just bail out without checking for errors + if ctx.Err() != nil { + return + } + + if ctxCancelled || expired { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }() + + return deal +} + +func checkPublishedDeals(t *testing.T, dpapi *dpAPI, dealsToPublish []market.ClientDealProposal, expectedDealsPerMsg []int) { + // For each message that was expected to be sent + var publishedDeals []market.ClientDealProposal + for _, expectedDealsInMsg := range expectedDealsPerMsg { + // Should have called StateMinerInfo with the provider address + stateMinerInfoAddr := <-dpapi.stateMinerInfoCalls + require.Equal(t, getProviderActor(t), stateMinerInfoAddr) + + // Check the fields of the message that was sent + msg := <-dpapi.pushedMsgs + require.Equal(t, getWorkerActor(t), msg.From) + require.Equal(t, market.Address, msg.To) + require.Equal(t, market.Methods.PublishStorageDeals, msg.Method) + + // Check that the expected number of deals was included in the message + var params market2.PublishStorageDealsParams + err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)) + require.NoError(t, err) + require.Len(t, params.Deals, expectedDealsInMsg) + + // Keep track of the deals that were sent + for _, d := range params.Deals { + publishedDeals = append(publishedDeals, d) + } + } + + // Verify that all deals that were submitted to be published were + // sent out (we do this by ensuring all the piece CIDs are present) + require.True(t, matchPieceCids(publishedDeals, dealsToPublish)) +} + +func matchPieceCids(sent []market.ClientDealProposal, exp []market.ClientDealProposal) bool { + cidsA := dealPieceCids(sent) + cidsB := dealPieceCids(exp) + + if len(cidsA) != len(cidsB) { + return false + } + + s1 := cid.NewSet() + for _, c := range cidsA { + s1.Add(c) + } + + for _, c := range cidsB { + if !s1.Has(c) { + return false + } + } + + return true +} + +func dealPieceCids(deals []market2.ClientDealProposal) []cid.Cid { + cids := make([]cid.Cid, 0, len(deals)) + for _, dl := range deals { + cids = append(cids, dl.Proposal.PieceCID) + } + return cids +} + +type dpAPI struct { + t *testing.T + worker address.Address + + stateMinerInfoCalls chan address.Address + pushedMsgs chan *types.Message +} + +func newDPAPI(t *testing.T) *dpAPI { + return &dpAPI{ + t: t, + worker: getWorkerActor(t), + stateMinerInfoCalls: make(chan address.Address, 128), + pushedMsgs: make(chan *types.Message, 128), + } +} + +func (d *dpAPI) ChainHead(ctx context.Context) (*types.TipSet, error) { + dummyCid, err := cid.Parse("bafkqaaa") + require.NoError(d.t, err) + return types.NewTipSet([]*types.BlockHeader{{ + Miner: tutils.NewActorAddr(d.t, "miner"), + Height: abi.ChainEpoch(10), + ParentStateRoot: dummyCid, + Messages: dummyCid, + ParentMessageReceipts: dummyCid, + BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, + BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, + }}) +} + +func (d *dpAPI) StateMinerInfo(ctx context.Context, address address.Address, key types.TipSetKey) (miner.MinerInfo, error) { + d.stateMinerInfoCalls <- address + return miner.MinerInfo{Worker: d.worker}, nil +} + +func (d *dpAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) { + d.pushedMsgs <- msg + return &types.SignedMessage{Message: *msg}, nil +} + +func (d *dpAPI) WalletBalance(ctx context.Context, a address.Address) (types.BigInt, error) { + panic("don't call me") +} + +func (d *dpAPI) WalletHas(ctx context.Context, a address.Address) (bool, error) { + panic("don't call me") +} + +func (d *dpAPI) StateAccountKey(ctx context.Context, a address.Address, key types.TipSetKey) (address.Address, error) { + panic("don't call me") +} + +func (d *dpAPI) StateLookupID(ctx context.Context, a address.Address, key types.TipSetKey) (address.Address, error) { + panic("don't call me") +} + +func getClientActor(t *testing.T) address.Address { + return tutils.NewActorAddr(t, "client") +} + +func getWorkerActor(t *testing.T) address.Address { + return tutils.NewActorAddr(t, "worker") +} + +func getProviderActor(t *testing.T) address.Address { + return tutils.NewActorAddr(t, "provider") +} diff --git a/markets/storageadapter/dealstatematcher.go b/markets/storageadapter/dealstatematcher.go new file mode 100644 index 00000000000..b8b47ef8e5a --- /dev/null +++ b/markets/storageadapter/dealstatematcher.go @@ -0,0 +1,84 @@ +package storageadapter + +import ( + "context" + "sync" + + "github.com/filecoin-project/go-state-types/abi" + actorsmarket "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/events" + "github.com/filecoin-project/lotus/chain/events/state" + "github.com/filecoin-project/lotus/chain/types" +) + +// dealStateMatcher caches the DealStates for the most recent +// old/new tipset combination +type dealStateMatcher struct { + preds *state.StatePredicates + + lk sync.Mutex + oldTsk types.TipSetKey + newTsk types.TipSetKey + oldDealStateRoot actorsmarket.DealStates + newDealStateRoot actorsmarket.DealStates +} + +func newDealStateMatcher(preds *state.StatePredicates) *dealStateMatcher { + return &dealStateMatcher{preds: preds} +} + +// matcher returns a function that checks if the state of the given dealID +// has changed. +// It caches the DealStates for the most recent old/new tipset combination. +func (mc *dealStateMatcher) matcher(ctx context.Context, dealID abi.DealID) events.StateMatchFunc { + // The function that is called to check if the deal state has changed for + // the target deal ID + dealStateChangedForID := mc.preds.DealStateChangedForIDs([]abi.DealID{dealID}) + + // The match function is called by the events API to check if there's + // been a state change for the deal with the target deal ID + match := func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) { + mc.lk.Lock() + defer mc.lk.Unlock() + + // Check if we've already fetched the DealStates for the given tipsets + if mc.oldTsk == oldTs.Key() && mc.newTsk == newTs.Key() { + // If we fetch the DealStates and there is no difference between + // them, they are stored as nil. So we can just bail out. + if mc.oldDealStateRoot == nil || mc.newDealStateRoot == nil { + return false, nil, nil + } + + // Check if the deal state has changed for the target ID + return dealStateChangedForID(ctx, mc.oldDealStateRoot, mc.newDealStateRoot) + } + + // We haven't already fetched the DealStates for the given tipsets, so + // do so now + + // Replace dealStateChangedForID with a function that records the + // DealStates so that we can cache them + var oldDealStateRootSaved, newDealStateRootSaved actorsmarket.DealStates + recorder := func(ctx context.Context, oldDealStateRoot, newDealStateRoot actorsmarket.DealStates) (changed bool, user state.UserData, err error) { + // Record DealStates + oldDealStateRootSaved = oldDealStateRoot + newDealStateRootSaved = newDealStateRoot + + return dealStateChangedForID(ctx, oldDealStateRoot, newDealStateRoot) + } + + // Call the match function + dealDiff := mc.preds.OnStorageMarketActorChanged( + mc.preds.OnDealStateChanged(recorder)) + matched, data, err := dealDiff(ctx, oldTs.Key(), newTs.Key()) + + // Save the recorded DealStates for the tipsets + mc.oldTsk = oldTs.Key() + mc.newTsk = newTs.Key() + mc.oldDealStateRoot = oldDealStateRootSaved + mc.newDealStateRoot = newDealStateRootSaved + + return matched, data, err + } + return match +} diff --git a/markets/storageadapter/dealstatematcher_test.go b/markets/storageadapter/dealstatematcher_test.go new file mode 100644 index 00000000000..cb036077842 --- /dev/null +++ b/markets/storageadapter/dealstatematcher_test.go @@ -0,0 +1,157 @@ +package storageadapter + +import ( + "context" + "testing" + + "github.com/filecoin-project/lotus/chain/events" + "golang.org/x/sync/errgroup" + + cbornode "github.com/ipfs/go-ipld-cbor" + + adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + bstore "github.com/filecoin-project/lotus/blockstore" + test "github.com/filecoin-project/lotus/chain/events/state/mock" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/lotus/chain/events/state" + "github.com/filecoin-project/lotus/chain/types" +) + +func TestDealStateMatcher(t *testing.T) { + ctx := context.Background() + bs := bstore.NewMemorySync() + store := adt2.WrapStore(ctx, cbornode.NewCborStore(bs)) + + deal1 := &market2.DealState{ + SectorStartEpoch: 1, + LastUpdatedEpoch: 2, + } + deal2 := &market2.DealState{ + SectorStartEpoch: 4, + LastUpdatedEpoch: 5, + } + deal3 := &market2.DealState{ + SectorStartEpoch: 7, + LastUpdatedEpoch: 8, + } + deals1 := map[abi.DealID]*market2.DealState{ + abi.DealID(1): deal1, + } + deals2 := map[abi.DealID]*market2.DealState{ + abi.DealID(1): deal2, + } + deals3 := map[abi.DealID]*market2.DealState{ + abi.DealID(1): deal3, + } + + deal1StateC := createMarketState(ctx, t, store, deals1) + deal2StateC := createMarketState(ctx, t, store, deals2) + deal3StateC := createMarketState(ctx, t, store, deals3) + + minerAddr, err := address.NewFromString("t00") + require.NoError(t, err) + ts1, err := test.MockTipset(minerAddr, 1) + require.NoError(t, err) + ts2, err := test.MockTipset(minerAddr, 2) + require.NoError(t, err) + ts3, err := test.MockTipset(minerAddr, 3) + require.NoError(t, err) + + api := test.NewMockAPI(bs) + api.SetActor(ts1.Key(), &types.Actor{Code: builtin2.StorageMarketActorCodeID, Head: deal1StateC}) + api.SetActor(ts2.Key(), &types.Actor{Code: builtin2.StorageMarketActorCodeID, Head: deal2StateC}) + api.SetActor(ts3.Key(), &types.Actor{Code: builtin2.StorageMarketActorCodeID, Head: deal3StateC}) + + t.Run("caching", func(t *testing.T) { + dsm := newDealStateMatcher(state.NewStatePredicates(api)) + matcher := dsm.matcher(ctx, abi.DealID(1)) + + // Call matcher with tipsets that have the same state + ok, stateChange, err := matcher(ts1, ts1) + require.NoError(t, err) + require.False(t, ok) + require.Nil(t, stateChange) + // Should call StateGetActor once for each tipset + require.Equal(t, 2, api.StateGetActorCallCount()) + + // Call matcher with tipsets that have different state + api.ResetCallCounts() + ok, stateChange, err = matcher(ts1, ts2) + require.NoError(t, err) + require.True(t, ok) + require.NotNil(t, stateChange) + // Should call StateGetActor once for each tipset + require.Equal(t, 2, api.StateGetActorCallCount()) + + // Call matcher again with the same tipsets as above, should be cached + api.ResetCallCounts() + ok, stateChange, err = matcher(ts1, ts2) + require.NoError(t, err) + require.True(t, ok) + require.NotNil(t, stateChange) + // Should not call StateGetActor (because it should hit the cache) + require.Equal(t, 0, api.StateGetActorCallCount()) + + // Call matcher with different tipsets, should not be cached + api.ResetCallCounts() + ok, stateChange, err = matcher(ts2, ts3) + require.NoError(t, err) + require.True(t, ok) + require.NotNil(t, stateChange) + // Should call StateGetActor once for each tipset + require.Equal(t, 2, api.StateGetActorCallCount()) + }) + + t.Run("parallel", func(t *testing.T) { + api.ResetCallCounts() + dsm := newDealStateMatcher(state.NewStatePredicates(api)) + matcher := dsm.matcher(ctx, abi.DealID(1)) + + // Call matcher with lots of go-routines in parallel + var eg errgroup.Group + res := make([]struct { + ok bool + stateChange events.StateChange + }, 20) + for i := 0; i < len(res); i++ { + i := i + eg.Go(func() error { + ok, stateChange, err := matcher(ts1, ts2) + res[i].ok = ok + res[i].stateChange = stateChange + return err + }) + } + err := eg.Wait() + require.NoError(t, err) + + // All go-routines should have got the same (cached) result + for i := 1; i < len(res); i++ { + require.Equal(t, res[i].ok, res[i-1].ok) + require.Equal(t, res[i].stateChange, res[i-1].stateChange) + } + + // Only one go-routine should have called StateGetActor + // (once for each tipset) + require.Equal(t, 2, api.StateGetActorCallCount()) + }) +} + +func createMarketState(ctx context.Context, t *testing.T, store adt2.Store, deals map[abi.DealID]*market2.DealState) cid.Cid { + dealRootCid := test.CreateDealAMT(ctx, t, store, deals) + state := test.CreateEmptyMarketState(t, store) + state.States = dealRootCid + + stateC, err := store.Put(ctx, state) + require.NoError(t, err) + return stateC +} diff --git a/markets/storageadapter/ondealsectorcommitted.go b/markets/storageadapter/ondealsectorcommitted.go new file mode 100644 index 00000000000..31bc0b8bf9c --- /dev/null +++ b/markets/storageadapter/ondealsectorcommitted.go @@ -0,0 +1,352 @@ +package storageadapter + +import ( + "bytes" + "context" + "sync" + + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-state-types/abi" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/events" + "github.com/filecoin-project/lotus/chain/types" +) + +type eventsCalledAPI interface { + Called(check events.CheckFunc, msgHnd events.MsgHandler, rev events.RevertHandler, confidence int, timeout abi.ChainEpoch, mf events.MsgMatchFunc) error +} + +type dealInfoAPI interface { + GetCurrentDealInfo(ctx context.Context, tok sealing.TipSetToken, proposal *market.DealProposal, publishCid cid.Cid) (sealing.CurrentDealInfo, error) +} + +type diffPreCommitsAPI interface { + diffPreCommits(ctx context.Context, actor address.Address, pre, cur types.TipSetKey) (*miner.PreCommitChanges, error) +} + +type SectorCommittedManager struct { + ev eventsCalledAPI + dealInfo dealInfoAPI + dpc diffPreCommitsAPI +} + +func NewSectorCommittedManager(ev eventsCalledAPI, tskAPI sealing.CurrentDealInfoTskAPI, dpcAPI diffPreCommitsAPI) *SectorCommittedManager { + dim := &sealing.CurrentDealInfoManager{ + CDAPI: &sealing.CurrentDealInfoAPIAdapter{CurrentDealInfoTskAPI: tskAPI}, + } + return newSectorCommittedManager(ev, dim, dpcAPI) +} + +func newSectorCommittedManager(ev eventsCalledAPI, dealInfo dealInfoAPI, dpcAPI diffPreCommitsAPI) *SectorCommittedManager { + return &SectorCommittedManager{ + ev: ev, + dealInfo: dealInfo, + dpc: dpcAPI, + } +} + +func (mgr *SectorCommittedManager) OnDealSectorPreCommitted(ctx context.Context, provider address.Address, proposal market.DealProposal, publishCid cid.Cid, callback storagemarket.DealSectorPreCommittedCallback) error { + // Ensure callback is only called once + var once sync.Once + cb := func(sectorNumber abi.SectorNumber, isActive bool, err error) { + once.Do(func() { + callback(sectorNumber, isActive, err) + }) + } + + // First check if the deal is already active, and if so, bail out + checkFunc := func(ts *types.TipSet) (done bool, more bool, err error) { + dealInfo, isActive, err := mgr.checkIfDealAlreadyActive(ctx, ts, &proposal, publishCid) + if err != nil { + // Note: the error returned from here will end up being returned + // from OnDealSectorPreCommitted so no need to call the callback + // with the error + return false, false, err + } + + if isActive { + // Deal is already active, bail out + cb(0, true, nil) + return true, false, nil + } + + // Check that precommits which landed between when the deal was published + // and now don't already contain the deal we care about. + // (this can happen when the precommit lands vary quickly (in tests), or + // when the client node was down after the deal was published, and when + // the precommit containing it landed on chain) + + publishTs, err := types.TipSetKeyFromBytes(dealInfo.PublishMsgTipSet) + if err != nil { + return false, false, err + } + + diff, err := mgr.dpc.diffPreCommits(ctx, provider, publishTs, ts.Key()) + if err != nil { + return false, false, err + } + + for _, info := range diff.Added { + for _, d := range info.Info.DealIDs { + if d == dealInfo.DealID { + cb(info.Info.SectorNumber, false, nil) + return true, false, nil + } + } + } + + // Not yet active, start matching against incoming messages + return false, true, nil + } + + // Watch for a pre-commit message to the provider. + matchEvent := func(msg *types.Message) (bool, error) { + matched := msg.To == provider && (msg.Method == miner.Methods.PreCommitSector || msg.Method == miner.Methods.PreCommitSectorBatch) + return matched, nil + } + + // The deal must be accepted by the deal proposal start epoch, so timeout + // if the chain reaches that epoch + timeoutEpoch := proposal.StartEpoch + 1 + + // Check if the message params included the deal ID we're looking for. + called := func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error) { + defer func() { + if err != nil { + cb(0, false, xerrors.Errorf("handling applied event: %w", err)) + } + }() + + // If the deal hasn't been activated by the proposed start epoch, the + // deal will timeout (when msg == nil it means the timeout epoch was reached) + if msg == nil { + err = xerrors.Errorf("deal with piece CID %s was not activated by proposed deal start epoch %d", proposal.PieceCID, proposal.StartEpoch) + return false, err + } + + // Ignore the pre-commit message if it was not executed successfully + if rec.ExitCode != 0 { + return true, nil + } + + // When there is a reorg, the deal ID may change, so get the + // current deal ID from the publish message CID + res, err := mgr.dealInfo.GetCurrentDealInfo(ctx, ts.Key().Bytes(), &proposal, publishCid) + if err != nil { + return false, err + } + + // Extract the message parameters + sn, err := dealSectorInPreCommitMsg(msg, res) + if err != nil { + return false, err + } + + if sn != nil { + cb(*sn, false, nil) + } + + // Didn't find the deal ID in this message, so keep looking + return true, nil + } + + revert := func(ctx context.Context, ts *types.TipSet) error { + log.Warn("deal pre-commit reverted; TODO: actually handle this!") + // TODO: Just go back to DealSealing? + return nil + } + + if err := mgr.ev.Called(checkFunc, called, revert, int(build.MessageConfidence+1), timeoutEpoch, matchEvent); err != nil { + return xerrors.Errorf("failed to set up called handler: %w", err) + } + + return nil +} + +func (mgr *SectorCommittedManager) OnDealSectorCommitted(ctx context.Context, provider address.Address, sectorNumber abi.SectorNumber, proposal market.DealProposal, publishCid cid.Cid, callback storagemarket.DealSectorCommittedCallback) error { + // Ensure callback is only called once + var once sync.Once + cb := func(err error) { + once.Do(func() { + callback(err) + }) + } + + // First check if the deal is already active, and if so, bail out + checkFunc := func(ts *types.TipSet) (done bool, more bool, err error) { + _, isActive, err := mgr.checkIfDealAlreadyActive(ctx, ts, &proposal, publishCid) + if err != nil { + // Note: the error returned from here will end up being returned + // from OnDealSectorCommitted so no need to call the callback + // with the error + return false, false, err + } + + if isActive { + // Deal is already active, bail out + cb(nil) + return true, false, nil + } + + // Not yet active, start matching against incoming messages + return false, true, nil + } + + // Match a prove-commit sent to the provider with the given sector number + matchEvent := func(msg *types.Message) (matched bool, err error) { + if msg.To != provider { + return false, nil + } + + return sectorInCommitMsg(msg, sectorNumber) + } + + // The deal must be accepted by the deal proposal start epoch, so timeout + // if the chain reaches that epoch + timeoutEpoch := proposal.StartEpoch + 1 + + called := func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error) { + defer func() { + if err != nil { + cb(xerrors.Errorf("handling applied event: %w", err)) + } + }() + + // If the deal hasn't been activated by the proposed start epoch, the + // deal will timeout (when msg == nil it means the timeout epoch was reached) + if msg == nil { + err := xerrors.Errorf("deal with piece CID %s was not activated by proposed deal start epoch %d", proposal.PieceCID, proposal.StartEpoch) + return false, err + } + + // Ignore the prove-commit message if it was not executed successfully + if rec.ExitCode != 0 { + return true, nil + } + + // Get the deal info + res, err := mgr.dealInfo.GetCurrentDealInfo(ctx, ts.Key().Bytes(), &proposal, publishCid) + if err != nil { + return false, xerrors.Errorf("failed to look up deal on chain: %w", err) + } + + // Make sure the deal is active + if res.MarketDeal.State.SectorStartEpoch < 1 { + return false, xerrors.Errorf("deal wasn't active: deal=%d, parentState=%s, h=%d", res.DealID, ts.ParentState(), ts.Height()) + } + + log.Infof("Storage deal %d activated at epoch %d", res.DealID, res.MarketDeal.State.SectorStartEpoch) + + cb(nil) + + return false, nil + } + + revert := func(ctx context.Context, ts *types.TipSet) error { + log.Warn("deal activation reverted; TODO: actually handle this!") + // TODO: Just go back to DealSealing? + return nil + } + + if err := mgr.ev.Called(checkFunc, called, revert, int(build.MessageConfidence+1), timeoutEpoch, matchEvent); err != nil { + return xerrors.Errorf("failed to set up called handler: %w", err) + } + + return nil +} + +// dealSectorInPreCommitMsg tries to find a sector containing the specified deal +func dealSectorInPreCommitMsg(msg *types.Message, res sealing.CurrentDealInfo) (*abi.SectorNumber, error) { + switch msg.Method { + case miner.Methods.PreCommitSector: + var params miner.SectorPreCommitInfo + if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { + return nil, xerrors.Errorf("unmarshal pre commit: %w", err) + } + + // Check through the deal IDs associated with this message + for _, did := range params.DealIDs { + if did == res.DealID { + // Found the deal ID in this message. Callback with the sector ID. + return ¶ms.SectorNumber, nil + } + } + case miner.Methods.PreCommitSectorBatch: + var params miner5.PreCommitSectorBatchParams + if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { + return nil, xerrors.Errorf("unmarshal pre commit: %w", err) + } + + for _, precommit := range params.Sectors { + // Check through the deal IDs associated with this message + for _, did := range precommit.DealIDs { + if did == res.DealID { + // Found the deal ID in this message. Callback with the sector ID. + return &precommit.SectorNumber, nil + } + } + } + default: + return nil, xerrors.Errorf("unexpected method %d", msg.Method) + } + + return nil, nil +} + +// sectorInCommitMsg checks if the provided message commits specified sector +func sectorInCommitMsg(msg *types.Message, sectorNumber abi.SectorNumber) (bool, error) { + switch msg.Method { + case miner.Methods.ProveCommitSector: + var params miner.ProveCommitSectorParams + if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { + return false, xerrors.Errorf("failed to unmarshal prove commit sector params: %w", err) + } + + return params.SectorNumber == sectorNumber, nil + + case miner.Methods.ProveCommitAggregate: + var params miner5.ProveCommitAggregateParams + if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { + return false, xerrors.Errorf("failed to unmarshal prove commit sector params: %w", err) + } + + set, err := params.SectorNumbers.IsSet(uint64(sectorNumber)) + if err != nil { + return false, xerrors.Errorf("checking if sectorNumber is set in commit aggregate message: %w", err) + } + + return set, nil + + default: + return false, nil + } +} + +func (mgr *SectorCommittedManager) checkIfDealAlreadyActive(ctx context.Context, ts *types.TipSet, proposal *market.DealProposal, publishCid cid.Cid) (sealing.CurrentDealInfo, bool, error) { + res, err := mgr.dealInfo.GetCurrentDealInfo(ctx, ts.Key().Bytes(), proposal, publishCid) + if err != nil { + // TODO: This may be fine for some errors + return res, false, xerrors.Errorf("failed to look up deal on chain: %w", err) + } + + // Sector was slashed + if res.MarketDeal.State.SlashEpoch > 0 { + return res, false, xerrors.Errorf("deal %d was slashed at epoch %d", res.DealID, res.MarketDeal.State.SlashEpoch) + } + + // Sector with deal is already active + if res.MarketDeal.State.SectorStartEpoch > 0 { + return res, true, nil + } + + return res, false, nil +} diff --git a/markets/storageadapter/ondealsectorcommitted_test.go b/markets/storageadapter/ondealsectorcommitted_test.go new file mode 100644 index 00000000000..db56ee65196 --- /dev/null +++ b/markets/storageadapter/ondealsectorcommitted_test.go @@ -0,0 +1,572 @@ +package storageadapter + +import ( + "bytes" + "context" + "errors" + "fmt" + "math/rand" + "testing" + "time" + + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + + "golang.org/x/xerrors" + + blocks "github.com/ipfs/go-block-format" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/events" + test "github.com/filecoin-project/lotus/chain/events/state/mock" + "github.com/filecoin-project/lotus/chain/types" + tutils "github.com/filecoin-project/specs-actors/v2/support/testing" + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" +) + +func TestOnDealSectorPreCommitted(t *testing.T) { + provider := address.TestAddress + ctx := context.Background() + publishCid := generateCids(1)[0] + sealedCid := generateCids(1)[0] + pieceCid := generateCids(1)[0] + dealID := abi.DealID(rand.Uint64()) + sectorNumber := abi.SectorNumber(rand.Uint64()) + proposal := market.DealProposal{ + PieceCID: pieceCid, + PieceSize: abi.PaddedPieceSize(rand.Uint64()), + Client: tutils.NewActorAddr(t, "client"), + Provider: tutils.NewActorAddr(t, "provider"), + StoragePricePerEpoch: abi.NewTokenAmount(1), + ProviderCollateral: abi.NewTokenAmount(1), + ClientCollateral: abi.NewTokenAmount(1), + Label: "success", + } + unfinishedDeal := &api.MarketDeal{ + Proposal: proposal, + State: market.DealState{ + SectorStartEpoch: -1, + LastUpdatedEpoch: 2, + }, + } + activeDeal := &api.MarketDeal{ + Proposal: proposal, + State: market.DealState{ + SectorStartEpoch: 1, + LastUpdatedEpoch: 2, + }, + } + slashedDeal := &api.MarketDeal{ + Proposal: proposal, + State: market.DealState{ + SectorStartEpoch: 1, + LastUpdatedEpoch: 2, + SlashEpoch: 2, + }, + } + type testCase struct { + currentDealInfo sealing.CurrentDealInfo + currentDealInfoErr error + currentDealInfoErr2 error + preCommitDiff *miner.PreCommitChanges + matchStates []matchState + dealStartEpochTimeout bool + expectedCBCallCount uint64 + expectedCBSectorNumber abi.SectorNumber + expectedCBIsActive bool + expectedCBError error + expectedError error + } + testCases := map[string]testCase{ + "normal sequence": { + currentDealInfo: sealing.CurrentDealInfo{ + DealID: dealID, + MarketDeal: unfinishedDeal, + }, + matchStates: []matchState{ + { + msg: makeMessage(t, provider, miner.Methods.PreCommitSector, &miner.SectorPreCommitInfo{ + SectorNumber: sectorNumber, + SealedCID: sealedCid, + DealIDs: []abi.DealID{dealID}, + }), + }, + }, + expectedCBCallCount: 1, + expectedCBIsActive: false, + expectedCBSectorNumber: sectorNumber, + }, + "ignores unsuccessful pre-commit message": { + currentDealInfo: sealing.CurrentDealInfo{ + DealID: dealID, + MarketDeal: unfinishedDeal, + }, + matchStates: []matchState{ + { + msg: makeMessage(t, provider, miner.Methods.PreCommitSector, &miner.SectorPreCommitInfo{ + SectorNumber: sectorNumber, + SealedCID: sealedCid, + DealIDs: []abi.DealID{dealID}, + }), + // non-zero exit code indicates unsuccessful pre-commit message + receipt: &types.MessageReceipt{ExitCode: 1}, + }, + }, + expectedCBCallCount: 0, + }, + "deal already pre-committed": { + currentDealInfo: sealing.CurrentDealInfo{ + DealID: dealID, + MarketDeal: unfinishedDeal, + }, + preCommitDiff: &miner.PreCommitChanges{ + Added: []miner.SectorPreCommitOnChainInfo{{ + Info: miner.SectorPreCommitInfo{ + SectorNumber: sectorNumber, + DealIDs: []abi.DealID{dealID}, + }, + }}, + }, + expectedCBCallCount: 1, + expectedCBIsActive: false, + expectedCBSectorNumber: sectorNumber, + }, + "error getting current deal info in check func": { + currentDealInfoErr: errors.New("something went wrong"), + expectedCBCallCount: 0, + expectedError: xerrors.Errorf("failed to set up called handler: failed to look up deal on chain: something went wrong"), + }, + "sector already active": { + currentDealInfo: sealing.CurrentDealInfo{ + DealID: dealID, + MarketDeal: activeDeal, + }, + expectedCBCallCount: 1, + expectedCBIsActive: true, + }, + "sector was slashed": { + currentDealInfo: sealing.CurrentDealInfo{ + DealID: dealID, + MarketDeal: slashedDeal, + PublishMsgTipSet: nil, + }, + expectedCBCallCount: 0, + expectedError: xerrors.Errorf("failed to set up called handler: deal %d was slashed at epoch %d", dealID, slashedDeal.State.SlashEpoch), + }, + "error getting current deal info in called func": { + currentDealInfo: sealing.CurrentDealInfo{ + DealID: dealID, + MarketDeal: unfinishedDeal, + }, + currentDealInfoErr2: errors.New("something went wrong"), + matchStates: []matchState{ + { + msg: makeMessage(t, provider, miner.Methods.PreCommitSector, &miner.SectorPreCommitInfo{ + SectorNumber: sectorNumber, + SealedCID: sealedCid, + DealIDs: []abi.DealID{dealID}, + }), + }, + }, + expectedCBCallCount: 1, + expectedCBError: errors.New("handling applied event: something went wrong"), + }, + "proposed deal epoch timeout": { + currentDealInfo: sealing.CurrentDealInfo{ + DealID: dealID, + MarketDeal: activeDeal, + }, + dealStartEpochTimeout: true, + expectedCBCallCount: 1, + expectedCBError: xerrors.Errorf("handling applied event: deal with piece CID %s was not activated by proposed deal start epoch 0", unfinishedDeal.Proposal.PieceCID), + }, + } + runTestCase := func(testCase string, data testCase) { + t.Run(testCase, func(t *testing.T) { + checkTs, err := test.MockTipset(provider, rand.Uint64()) + require.NoError(t, err) + matchMessages := make([]matchMessage, len(data.matchStates)) + for i, ms := range data.matchStates { + matchTs, err := test.MockTipset(provider, rand.Uint64()) + require.NoError(t, err) + matchMessages[i] = matchMessage{ + curH: 5, + msg: ms.msg, + msgReceipt: ms.receipt, + ts: matchTs, + } + } + eventsAPI := &fakeEvents{ + Ctx: ctx, + CheckTs: checkTs, + MatchMessages: matchMessages, + DealStartEpochTimeout: data.dealStartEpochTimeout, + } + cbCallCount := uint64(0) + var cbSectorNumber abi.SectorNumber + var cbIsActive bool + var cbError error + cb := func(secNum abi.SectorNumber, isActive bool, err error) { + cbCallCount++ + cbSectorNumber = secNum + cbIsActive = isActive + cbError = err + } + + mockPCAPI := &mockPreCommitsAPI{ + PCChanges: data.preCommitDiff, + } + mockDIAPI := &mockDealInfoAPI{ + CurrentDealInfo: data.currentDealInfo, + CurrentDealInfo2: data.currentDealInfo, + Err: data.currentDealInfoErr, + Err2: data.currentDealInfoErr2, + } + scm := newSectorCommittedManager(eventsAPI, mockDIAPI, mockPCAPI) + err = scm.OnDealSectorPreCommitted(ctx, provider, proposal, publishCid, cb) + if data.expectedError == nil { + require.NoError(t, err) + } else { + require.EqualError(t, err, data.expectedError.Error()) + } + require.Equal(t, data.expectedCBSectorNumber, cbSectorNumber) + require.Equal(t, data.expectedCBIsActive, cbIsActive) + require.Equal(t, data.expectedCBCallCount, cbCallCount) + if data.expectedCBError == nil { + require.NoError(t, cbError) + } else { + require.EqualError(t, cbError, data.expectedCBError.Error()) + } + }) + } + for testCase, data := range testCases { + runTestCase(testCase, data) + } +} + +func TestOnDealSectorCommitted(t *testing.T) { + provider := address.TestAddress + publishCid := generateCids(1)[0] + pieceCid := generateCids(1)[0] + dealID := abi.DealID(rand.Uint64()) + sectorNumber := abi.SectorNumber(rand.Uint64()) + proposal := market.DealProposal{ + PieceCID: pieceCid, + PieceSize: abi.PaddedPieceSize(rand.Uint64()), + Client: tutils.NewActorAddr(t, "client"), + Provider: tutils.NewActorAddr(t, "provider"), + StoragePricePerEpoch: abi.NewTokenAmount(1), + ProviderCollateral: abi.NewTokenAmount(1), + ClientCollateral: abi.NewTokenAmount(1), + Label: "success", + } + unfinishedDeal := &api.MarketDeal{ + Proposal: proposal, + State: market.DealState{ + SectorStartEpoch: -1, + LastUpdatedEpoch: 2, + }, + } + activeDeal := &api.MarketDeal{ + Proposal: proposal, + State: market.DealState{ + SectorStartEpoch: 1, + LastUpdatedEpoch: 2, + }, + } + slashedDeal := &api.MarketDeal{ + Proposal: proposal, + State: market.DealState{ + SectorStartEpoch: 1, + LastUpdatedEpoch: 2, + SlashEpoch: 2, + }, + } + type testCase struct { + currentDealInfo sealing.CurrentDealInfo + currentDealInfoErr error + currentDealInfo2 sealing.CurrentDealInfo + currentDealInfoErr2 error + matchStates []matchState + dealStartEpochTimeout bool + expectedCBCallCount uint64 + expectedCBError error + expectedError error + } + testCases := map[string]testCase{ + "normal sequence": { + currentDealInfo: sealing.CurrentDealInfo{ + DealID: dealID, + MarketDeal: unfinishedDeal, + }, + currentDealInfo2: sealing.CurrentDealInfo{ + DealID: dealID, + MarketDeal: activeDeal, + }, + matchStates: []matchState{ + { + msg: makeMessage(t, provider, miner.Methods.ProveCommitSector, &miner.ProveCommitSectorParams{ + SectorNumber: sectorNumber, + }), + }, + }, + expectedCBCallCount: 1, + }, + "ignores unsuccessful prove-commit message": { + currentDealInfo: sealing.CurrentDealInfo{ + DealID: dealID, + MarketDeal: unfinishedDeal, + }, + currentDealInfo2: sealing.CurrentDealInfo{ + DealID: dealID, + MarketDeal: activeDeal, + }, + matchStates: []matchState{ + { + msg: makeMessage(t, provider, miner.Methods.ProveCommitSector, &miner.ProveCommitSectorParams{ + SectorNumber: sectorNumber, + }), + // Exit-code 1 means the prove-commit was unsuccessful + receipt: &types.MessageReceipt{ExitCode: 1}, + }, + }, + expectedCBCallCount: 0, + }, + "error getting current deal info in check func": { + currentDealInfoErr: errors.New("something went wrong"), + expectedCBCallCount: 0, + expectedError: xerrors.Errorf("failed to set up called handler: failed to look up deal on chain: something went wrong"), + }, + "sector already active": { + currentDealInfo: sealing.CurrentDealInfo{ + DealID: dealID, + MarketDeal: activeDeal, + }, + expectedCBCallCount: 1, + }, + "sector was slashed": { + currentDealInfo: sealing.CurrentDealInfo{ + DealID: dealID, + MarketDeal: slashedDeal, + }, + expectedCBCallCount: 0, + expectedError: xerrors.Errorf("failed to set up called handler: deal %d was slashed at epoch %d", dealID, slashedDeal.State.SlashEpoch), + }, + "error getting current deal info in called func": { + currentDealInfo: sealing.CurrentDealInfo{ + DealID: dealID, + MarketDeal: unfinishedDeal, + }, + currentDealInfoErr2: errors.New("something went wrong"), + matchStates: []matchState{ + { + msg: makeMessage(t, provider, miner.Methods.ProveCommitSector, &miner.ProveCommitSectorParams{ + SectorNumber: sectorNumber, + }), + }, + }, + expectedCBCallCount: 1, + expectedCBError: xerrors.Errorf("handling applied event: failed to look up deal on chain: something went wrong"), + }, + "proposed deal epoch timeout": { + currentDealInfo: sealing.CurrentDealInfo{ + DealID: dealID, + MarketDeal: unfinishedDeal, + }, + dealStartEpochTimeout: true, + expectedCBCallCount: 1, + expectedCBError: xerrors.Errorf("handling applied event: deal with piece CID %s was not activated by proposed deal start epoch 0", unfinishedDeal.Proposal.PieceCID), + }, + "got prove-commit but deal not active": { + currentDealInfo: sealing.CurrentDealInfo{ + DealID: dealID, + MarketDeal: unfinishedDeal, + }, + currentDealInfo2: sealing.CurrentDealInfo{ + DealID: dealID, + MarketDeal: unfinishedDeal, + }, + matchStates: []matchState{ + { + msg: makeMessage(t, provider, miner.Methods.ProveCommitSector, &miner.ProveCommitSectorParams{ + SectorNumber: sectorNumber, + }), + }, + }, + expectedCBCallCount: 1, + expectedCBError: xerrors.Errorf("handling applied event: deal wasn't active: deal=%d, parentState=bafkqaaa, h=5", dealID), + }, + } + runTestCase := func(testCase string, data testCase) { + t.Run(testCase, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + checkTs, err := test.MockTipset(provider, rand.Uint64()) + require.NoError(t, err) + matchMessages := make([]matchMessage, len(data.matchStates)) + for i, ms := range data.matchStates { + matchTs, err := test.MockTipset(provider, rand.Uint64()) + require.NoError(t, err) + matchMessages[i] = matchMessage{ + curH: 5, + msg: ms.msg, + msgReceipt: ms.receipt, + ts: matchTs, + } + } + eventsAPI := &fakeEvents{ + Ctx: ctx, + CheckTs: checkTs, + MatchMessages: matchMessages, + DealStartEpochTimeout: data.dealStartEpochTimeout, + } + cbCallCount := uint64(0) + var cbError error + cb := func(err error) { + cbCallCount++ + cbError = err + } + mockPCAPI := &mockPreCommitsAPI{} + mockDIAPI := &mockDealInfoAPI{ + CurrentDealInfo: data.currentDealInfo, + CurrentDealInfo2: data.currentDealInfo2, + Err: data.currentDealInfoErr, + Err2: data.currentDealInfoErr2, + } + scm := newSectorCommittedManager(eventsAPI, mockDIAPI, mockPCAPI) + err = scm.OnDealSectorCommitted(ctx, provider, sectorNumber, proposal, publishCid, cb) + if data.expectedError == nil { + require.NoError(t, err) + } else { + require.EqualError(t, err, data.expectedError.Error()) + } + require.Equal(t, data.expectedCBCallCount, cbCallCount) + if data.expectedCBError == nil { + require.NoError(t, cbError) + } else { + require.EqualError(t, cbError, data.expectedCBError.Error()) + } + }) + } + for testCase, data := range testCases { + runTestCase(testCase, data) + } +} + +type matchState struct { + msg *types.Message + receipt *types.MessageReceipt +} + +type matchMessage struct { + curH abi.ChainEpoch + msg *types.Message + msgReceipt *types.MessageReceipt + ts *types.TipSet + doesRevert bool +} +type fakeEvents struct { + Ctx context.Context + CheckTs *types.TipSet + MatchMessages []matchMessage + DealStartEpochTimeout bool +} + +func (fe *fakeEvents) Called(check events.CheckFunc, msgHnd events.MsgHandler, rev events.RevertHandler, confidence int, timeout abi.ChainEpoch, mf events.MsgMatchFunc) error { + if fe.DealStartEpochTimeout { + msgHnd(nil, nil, nil, 100) // nolint:errcheck + return nil + } + + _, more, err := check(fe.CheckTs) + if err != nil { + return err + } + if !more { + return nil + } + for _, matchMessage := range fe.MatchMessages { + matched, err := mf(matchMessage.msg) + if err != nil { + return err + } + if matched { + receipt := matchMessage.msgReceipt + if receipt == nil { + receipt = &types.MessageReceipt{ExitCode: 0} + } + more, err := msgHnd(matchMessage.msg, receipt, matchMessage.ts, matchMessage.curH) + if err != nil { + // error is handled through a callback rather than being returned + return nil + } + if matchMessage.doesRevert { + err := rev(fe.Ctx, matchMessage.ts) + if err != nil { + return err + } + } + if !more { + return nil + } + } + } + return nil +} + +func makeMessage(t *testing.T, to address.Address, method abi.MethodNum, params cbor.Marshaler) *types.Message { + buf := new(bytes.Buffer) + err := params.MarshalCBOR(buf) + require.NoError(t, err) + return &types.Message{ + To: to, + Method: method, + Params: buf.Bytes(), + } +} + +var seq int + +func generateCids(n int) []cid.Cid { + cids := make([]cid.Cid, 0, n) + for i := 0; i < n; i++ { + c := blocks.NewBlock([]byte(fmt.Sprint(seq))).Cid() + seq++ + cids = append(cids, c) + } + return cids +} + +type mockPreCommitsAPI struct { + PCChanges *miner.PreCommitChanges + Err error +} + +func (m *mockPreCommitsAPI) diffPreCommits(ctx context.Context, actor address.Address, pre, cur types.TipSetKey) (*miner.PreCommitChanges, error) { + pcc := &miner.PreCommitChanges{} + if m.PCChanges != nil { + pcc = m.PCChanges + } + return pcc, m.Err +} + +type mockDealInfoAPI struct { + count int + CurrentDealInfo sealing.CurrentDealInfo + Err error + CurrentDealInfo2 sealing.CurrentDealInfo + Err2 error +} + +func (m *mockDealInfoAPI) GetCurrentDealInfo(ctx context.Context, tok sealing.TipSetToken, proposal *market.DealProposal, publishCid cid.Cid) (sealing.CurrentDealInfo, error) { + m.count++ + if m.count == 2 { + return m.CurrentDealInfo2, m.Err2 + } + return m.CurrentDealInfo, m.Err +} diff --git a/markets/storageadapter/provider.go b/markets/storageadapter/provider.go index ce7c8e91747..b899c081074 100644 --- a/markets/storageadapter/provider.go +++ b/markets/storageadapter/provider.go @@ -3,27 +3,26 @@ package storageadapter // this file implements storagemarket.StorageProviderNode import ( - "bytes" "context" "io" "time" "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log/v2" + "go.uber.org/fx" "golang.org/x/xerrors" - market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" - "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-fil-markets/shared" "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/exitcode" + market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v1api" "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/events" @@ -34,15 +33,17 @@ import ( "github.com/filecoin-project/lotus/markets/utils" "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/modules/helpers" "github.com/filecoin-project/lotus/storage/sectorblocks" ) var addPieceRetryWait = 5 * time.Minute var addPieceRetryTimeout = 6 * time.Hour +var defaultMaxProviderCollateralMultiplier = uint64(2) var log = logging.Logger("storageadapter") type ProviderNodeAdapter struct { - api.FullNode + v1api.FullNode // this goes away with the data transfer module dag dtypes.StagingDAG @@ -50,54 +51,43 @@ type ProviderNodeAdapter struct { secb *sectorblocks.SectorBlocks ev *events.Events - publishSpec, addBalanceSpec *api.MessageSendSpec + dealPublisher *DealPublisher + + addBalanceSpec *api.MessageSendSpec + maxDealCollateralMultiplier uint64 + dsMatcher *dealStateMatcher + scMgr *SectorCommittedManager } -func NewProviderNodeAdapter(fc *config.MinerFeeConfig) func(dag dtypes.StagingDAG, secb *sectorblocks.SectorBlocks, full api.FullNode) storagemarket.StorageProviderNode { - return func(dag dtypes.StagingDAG, secb *sectorblocks.SectorBlocks, full api.FullNode) storagemarket.StorageProviderNode { +func NewProviderNodeAdapter(fc *config.MinerFeeConfig, dc *config.DealmakingConfig) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, dag dtypes.StagingDAG, secb *sectorblocks.SectorBlocks, full v1api.FullNode, dealPublisher *DealPublisher) storagemarket.StorageProviderNode { + return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, dag dtypes.StagingDAG, secb *sectorblocks.SectorBlocks, full v1api.FullNode, dealPublisher *DealPublisher) storagemarket.StorageProviderNode { + ctx := helpers.LifecycleCtx(mctx, lc) + + ev := events.NewEvents(ctx, full) na := &ProviderNodeAdapter{ FullNode: full, - dag: dag, - secb: secb, - ev: events.NewEvents(context.TODO(), full), + dag: dag, + secb: secb, + ev: ev, + dealPublisher: dealPublisher, + dsMatcher: newDealStateMatcher(state.NewStatePredicates(state.WrapFastAPI(full))), } if fc != nil { - na.publishSpec = &api.MessageSendSpec{MaxFee: abi.TokenAmount(fc.MaxPublishDealsFee)} na.addBalanceSpec = &api.MessageSendSpec{MaxFee: abi.TokenAmount(fc.MaxMarketBalanceAddFee)} } + na.maxDealCollateralMultiplier = defaultMaxProviderCollateralMultiplier + if dc != nil { + na.maxDealCollateralMultiplier = dc.MaxProviderCollateralMultiplier + } + na.scMgr = NewSectorCommittedManager(ev, na, &apiWrapper{api: full}) + return na } } func (n *ProviderNodeAdapter) PublishDeals(ctx context.Context, deal storagemarket.MinerDeal) (cid.Cid, error) { - log.Info("publishing deal") - - mi, err := n.StateMinerInfo(ctx, deal.Proposal.Provider, types.EmptyTSK) - if err != nil { - return cid.Undef, err - } - - params, err := actors.SerializeParams(&market2.PublishStorageDealsParams{ - Deals: []market2.ClientDealProposal{deal.ClientDealProposal}, - }) - - if err != nil { - return cid.Undef, xerrors.Errorf("serializing PublishStorageDeals params failed: %w", err) - } - - // TODO: We may want this to happen after fetching data - smsg, err := n.MpoolPushMessage(ctx, &types.Message{ - To: market.Address, - From: mi.Worker, - Value: types.NewInt(0), - Method: market.Methods.PublishStorageDeals, - Params: params, - }, n.publishSpec) - if err != nil { - return cid.Undef, err - } - return smsg.Cid(), nil + return n.dealPublisher.Publish(ctx, deal.ClientDealProposal) } func (n *ProviderNodeAdapter) OnDealComplete(ctx context.Context, deal storagemarket.MinerDeal, pieceSize abi.UnpaddedPieceSize, pieceData io.Reader) (*storagemarket.PackingResult, error) { @@ -105,10 +95,11 @@ func (n *ProviderNodeAdapter) OnDealComplete(ctx context.Context, deal storagema return nil, xerrors.Errorf("deal.PublishCid can't be nil") } - sdInfo := sealing.DealInfo{ - DealID: deal.DealID, - PublishCid: deal.PublishCid, - DealSchedule: sealing.DealSchedule{ + sdInfo := api.PieceDealInfo{ + DealID: deal.DealID, + DealProposal: &deal.Proposal, + PublishCid: deal.PublishCid, + DealSchedule: api.DealSchedule{ StartEpoch: deal.ClientDealProposal.Proposal.StartEpoch, EndEpoch: deal.ClientDealProposal.Proposal.EndEpoch, }, @@ -120,7 +111,7 @@ func (n *ProviderNodeAdapter) OnDealComplete(ctx context.Context, deal storagema for time.Since(curTime) < addPieceRetryTimeout { if !xerrors.Is(err, sealing.ErrTooManySectorsSealing) { if err != nil { - log.Errorf("failed to addPiece for deal %d, err: %w", deal.DealID, err) + log.Errorf("failed to addPiece for deal %d, err: %v", deal.DealID, err) } break } @@ -154,19 +145,38 @@ func (n *ProviderNodeAdapter) VerifySignature(ctx context.Context, sig crypto.Si return err == nil, err } -func (n *ProviderNodeAdapter) GetMinerWorkerAddress(ctx context.Context, miner address.Address, tok shared.TipSetToken) (address.Address, error) { +func (n *ProviderNodeAdapter) GetMinerWorkerAddress(ctx context.Context, maddr address.Address, tok shared.TipSetToken) (address.Address, error) { tsk, err := types.TipSetKeyFromBytes(tok) if err != nil { return address.Undef, err } - mi, err := n.StateMinerInfo(ctx, miner, tsk) + mi, err := n.StateMinerInfo(ctx, maddr, tsk) if err != nil { return address.Address{}, err } return mi.Worker, nil } +func (n *ProviderNodeAdapter) GetProofType(ctx context.Context, maddr address.Address, tok shared.TipSetToken) (abi.RegisteredSealProof, error) { + tsk, err := types.TipSetKeyFromBytes(tok) + if err != nil { + return 0, err + } + + mi, err := n.StateMinerInfo(ctx, maddr, tsk) + if err != nil { + return 0, err + } + + nver, err := n.StateNetworkVersion(ctx, tsk) + if err != nil { + return 0, err + } + + return miner.PreferredSealProofTypeFromWindowPoStType(nver, mi.WindowPoStProofType) +} + func (n *ProviderNodeAdapter) SignBytes(ctx context.Context, signer address.Address, b []byte) (*crypto.Signature, error) { signer, err := n.StateAccountKey(ctx, signer, types.EmptyTSK) if err != nil { @@ -180,8 +190,12 @@ func (n *ProviderNodeAdapter) SignBytes(ctx context.Context, signer address.Addr return localSignature, nil } -func (n *ProviderNodeAdapter) EnsureFunds(ctx context.Context, addr, wallet address.Address, amt abi.TokenAmount, encodedTs shared.TipSetToken) (cid.Cid, error) { - return n.MarketEnsureAvailable(ctx, addr, wallet, amt) +func (n *ProviderNodeAdapter) ReserveFunds(ctx context.Context, wallet, addr address.Address, amt abi.TokenAmount) (cid.Cid, error) { + return n.MarketReserveFunds(ctx, wallet, addr, amt) +} + +func (n *ProviderNodeAdapter) ReleaseFunds(ctx context.Context, addr address.Address, amt abi.TokenAmount) error { + return n.MarketReleaseFunds(ctx, addr, amt) } // Adds funds with the StorageMinerActor for a storage participant. Used by both providers and clients. @@ -226,19 +240,19 @@ func (n *ProviderNodeAdapter) LocatePieceForDealWithinSector(ctx context.Context // TODO: better strategy (e.g. look for already unsealed) var best api.SealedRef - var bestSi sealing.SectorInfo + var bestSi api.SectorInfo for _, r := range refs { - si, err := n.secb.Miner.GetSectorInfo(r.SectorID) + si, err := n.secb.SectorBuilder.SectorsStatus(ctx, r.SectorID, false) if err != nil { return 0, 0, 0, xerrors.Errorf("getting sector info: %w", err) } - if si.State == sealing.Proving { + if si.State == api.SectorState(sealing.Proving) { best = r bestSi = si break } } - if bestSi.State == sealing.UndefinedSectorState { + if bestSi.State == api.SectorState(sealing.UndefinedSectorState) { return 0, 0, 0, xerrors.New("no sealed sector found") } return best.SectorID, best.Offset, best.Size.Padded(), nil @@ -250,110 +264,21 @@ func (n *ProviderNodeAdapter) DealProviderCollateralBounds(ctx context.Context, return abi.TokenAmount{}, abi.TokenAmount{}, err } - return bounds.Min, bounds.Max, nil -} - -func (n *ProviderNodeAdapter) OnDealSectorCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, cb storagemarket.DealSectorCommittedCallback) error { - checkFunc := func(ts *types.TipSet) (done bool, more bool, err error) { - sd, err := n.StateMarketStorageDeal(ctx, dealID, ts.Key()) - - if err != nil { - // TODO: This may be fine for some errors - return false, false, xerrors.Errorf("failed to look up deal on chain: %w", err) - } - - if sd.State.SectorStartEpoch > 0 { - cb(nil) - return true, false, nil - } - - return false, true, nil - } - - called := func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error) { - defer func() { - if err != nil { - cb(xerrors.Errorf("handling applied event: %w", err)) - } - }() + // The maximum amount of collateral that the provider will put into escrow + // for a deal is calculated as a multiple of the minimum bounded amount + max := types.BigMul(bounds.Min, types.NewInt(n.maxDealCollateralMultiplier)) - if msg == nil { - log.Error("timed out waiting for deal activation... what now?") - return false, nil - } - - sd, err := n.StateMarketStorageDeal(ctx, dealID, ts.Key()) - if err != nil { - return false, xerrors.Errorf("failed to look up deal on chain: %w", err) - } - - if sd.State.SectorStartEpoch < 1 { - return false, xerrors.Errorf("deal wasn't active: deal=%d, parentState=%s, h=%d", dealID, ts.ParentState(), ts.Height()) - } - - log.Infof("Storage deal %d activated at epoch %d", dealID, sd.State.SectorStartEpoch) - - cb(nil) - - return false, nil - } - - revert := func(ctx context.Context, ts *types.TipSet) error { - log.Warn("deal activation reverted; TODO: actually handle this!") - // TODO: Just go back to DealSealing? - return nil - } - - var sectorNumber abi.SectorNumber - var sectorFound bool - - matchEvent := func(msg *types.Message) (matchOnce bool, matched bool, err error) { - if msg.To != provider { - return true, false, nil - } - - switch msg.Method { - case miner.Methods.PreCommitSector: - var params miner.SectorPreCommitInfo - if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { - return true, false, xerrors.Errorf("unmarshal pre commit: %w", err) - } - - for _, did := range params.DealIDs { - if did == dealID { - sectorNumber = params.SectorNumber - sectorFound = true - return true, false, nil - } - } - - return true, false, nil - case miner.Methods.ProveCommitSector: - var params miner.ProveCommitSectorParams - if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { - return true, false, xerrors.Errorf("failed to unmarshal prove commit sector params: %w", err) - } - - if !sectorFound { - return true, false, nil - } - - if params.SectorNumber != sectorNumber { - return true, false, nil - } - - return false, true, nil - default: - return true, false, nil - } - - } + return bounds.Min, max, nil +} - if err := n.ev.Called(checkFunc, called, revert, int(build.MessageConfidence+1), events.NoTimeout, matchEvent); err != nil { - return xerrors.Errorf("failed to set up called handler: %w", err) - } +// TODO: Remove dealID parameter, change publishCid to be cid.Cid (instead of pointer) +func (n *ProviderNodeAdapter) OnDealSectorPreCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, proposal market2.DealProposal, publishCid *cid.Cid, cb storagemarket.DealSectorPreCommittedCallback) error { + return n.scMgr.OnDealSectorPreCommitted(ctx, provider, market.DealProposal(proposal), *publishCid, cb) +} - return nil +// TODO: Remove dealID parameter, change publishCid to be cid.Cid (instead of pointer) +func (n *ProviderNodeAdapter) OnDealSectorCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, sectorNumber abi.SectorNumber, proposal market2.DealProposal, publishCid *cid.Cid, cb storagemarket.DealSectorCommittedCallback) error { + return n.scMgr.OnDealSectorCommitted(ctx, provider, sectorNumber, market.DealProposal(proposal), *publishCid, cb) } func (n *ProviderNodeAdapter) GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) { @@ -366,13 +291,38 @@ func (n *ProviderNodeAdapter) GetChainHead(ctx context.Context) (shared.TipSetTo } func (n *ProviderNodeAdapter) WaitForMessage(ctx context.Context, mcid cid.Cid, cb func(code exitcode.ExitCode, bytes []byte, finalCid cid.Cid, err error) error) error { - receipt, err := n.StateWaitMsg(ctx, mcid, 2*build.MessageConfidence) + receipt, err := n.StateWaitMsg(ctx, mcid, 2*build.MessageConfidence, api.LookbackNoLimit, true) if err != nil { return cb(0, nil, cid.Undef, err) } return cb(receipt.Receipt.ExitCode, receipt.Receipt.Return, receipt.Message, nil) } +func (n *ProviderNodeAdapter) WaitForPublishDeals(ctx context.Context, publishCid cid.Cid, proposal market2.DealProposal) (*storagemarket.PublishDealsWaitResult, error) { + // Wait for deal to be published (plus additional time for confidence) + receipt, err := n.StateWaitMsg(ctx, publishCid, 2*build.MessageConfidence, api.LookbackNoLimit, true) + if err != nil { + return nil, xerrors.Errorf("WaitForPublishDeals errored: %w", err) + } + if receipt.Receipt.ExitCode != exitcode.Ok { + return nil, xerrors.Errorf("WaitForPublishDeals exit code: %s", receipt.Receipt.ExitCode) + } + + // The deal ID may have changed since publish if there was a reorg, so + // get the current deal ID + head, err := n.ChainHead(ctx) + if err != nil { + return nil, xerrors.Errorf("WaitForPublishDeals failed to get chain head: %w", err) + } + + res, err := n.scMgr.dealInfo.GetCurrentDealInfo(ctx, head.Key().Bytes(), (*market.DealProposal)(&proposal), publishCid) + if err != nil { + return nil, xerrors.Errorf("WaitForPublishDeals getting deal info errored: %w", err) + } + + return &storagemarket.PublishDealsWaitResult{DealID: res.DealID, FinalCid: receipt.Message}, nil +} + func (n *ProviderNodeAdapter) GetDataCap(ctx context.Context, addr address.Address, encodedTs shared.TipSetToken) (*abi.StoragePower, error) { tsk, err := types.TipSetKeyFromBytes(encodedTs) if err != nil { @@ -422,7 +372,7 @@ func (n *ProviderNodeAdapter) OnDealExpiredOrSlashed(ctx context.Context, dealID // and the chain has advanced to the confidence height stateChanged := func(ts *types.TipSet, ts2 *types.TipSet, states events.StateChange, h abi.ChainEpoch) (more bool, err error) { // Check if the deal has already expired - if sd.Proposal.EndEpoch <= ts2.Height() { + if ts2 == nil || sd.Proposal.EndEpoch <= ts2.Height() { onDealExpired(nil) return false, nil } @@ -461,13 +411,7 @@ func (n *ProviderNodeAdapter) OnDealExpiredOrSlashed(ctx context.Context, dealID } // Watch for state changes to the deal - preds := state.NewStatePredicates(n) - dealDiff := preds.OnStorageMarketActorChanged( - preds.OnDealStateChanged( - preds.DealStateChangedForIDs([]abi.DealID{dealID}))) - match := func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) { - return dealDiff(ctx, oldTs.Key(), newTs.Key()) - } + match := n.dsMatcher.matcher(ctx, dealID) // Wait until after the end epoch for the deal and then timeout timeout := (sd.Proposal.EndEpoch - head.Height()) + 1 diff --git a/metrics/exporter.go b/metrics/exporter.go new file mode 100644 index 00000000000..92786c26b56 --- /dev/null +++ b/metrics/exporter.go @@ -0,0 +1,32 @@ +package metrics + +import ( + "net/http" + _ "net/http/pprof" + + "contrib.go.opencensus.io/exporter/prometheus" + logging "github.com/ipfs/go-log/v2" + promclient "github.com/prometheus/client_golang/prometheus" +) + +var log = logging.Logger("metrics") + +func Exporter() http.Handler { + // Prometheus globals are exposed as interfaces, but the prometheus + // OpenCensus exporter expects a concrete *Registry. The concrete type of + // the globals are actually *Registry, so we downcast them, staying + // defensive in case things change under the hood. + registry, ok := promclient.DefaultRegisterer.(*promclient.Registry) + if !ok { + log.Warnf("failed to export default prometheus registry; some metrics will be unavailable; unexpected type: %T", promclient.DefaultRegisterer) + } + exporter, err := prometheus.NewExporter(prometheus.Options{ + Registry: registry, + Namespace: "lotus", + }) + if err != nil { + log.Errorf("could not create the prometheus stats exporter: %v", err) + } + + return exporter +} diff --git a/metrics/metrics.go b/metrics/metrics.go index 33d9e91742a..33fecc606ff 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -9,29 +9,52 @@ import ( "go.opencensus.io/tag" rpcmetrics "github.com/filecoin-project/go-jsonrpc/metrics" + + "github.com/filecoin-project/lotus/blockstore" ) // Distribution -var defaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) +var defaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 3000, 4000, 5000, 7500, 10000, 20000, 50000, 100000) +var workMillisecondsDistribution = view.Distribution( + 250, 500, 1000, 2000, 5000, 10_000, 30_000, 60_000, 2*60_000, 5*60_000, 10*60_000, 15*60_000, 30*60_000, // short sealing tasks + 40*60_000, 45*60_000, 50*60_000, 55*60_000, 60*60_000, 65*60_000, 70*60_000, 75*60_000, 80*60_000, 85*60_000, 100*60_000, 120*60_000, // PC2 / C2 range + 130*60_000, 140*60_000, 150*60_000, 160*60_000, 180*60_000, 200*60_000, 220*60_000, 260*60_000, 300*60_000, // PC1 range + 350*60_000, 400*60_000, 600*60_000, 800*60_000, 1000*60_000, 1300*60_000, 1800*60_000, 4000*60_000, 10000*60_000, // intel PC1 range +) // Global Tags var ( - Version, _ = tag.NewKey("version") - Commit, _ = tag.NewKey("commit") - PeerID, _ = tag.NewKey("peer_id") - FailureType, _ = tag.NewKey("failure_type") + // common + Version, _ = tag.NewKey("version") + Commit, _ = tag.NewKey("commit") + NodeType, _ = tag.NewKey("node_type") + PeerID, _ = tag.NewKey("peer_id") + MinerID, _ = tag.NewKey("miner_id") + FailureType, _ = tag.NewKey("failure_type") + + // chain Local, _ = tag.NewKey("local") MessageFrom, _ = tag.NewKey("message_from") MessageTo, _ = tag.NewKey("message_to") MessageNonce, _ = tag.NewKey("message_nonce") ReceivedFrom, _ = tag.NewKey("received_from") + MsgValid, _ = tag.NewKey("message_valid") Endpoint, _ = tag.NewKey("endpoint") APIInterface, _ = tag.NewKey("api") // to distinguish between gateway api and full node api endpoint calls + + // miner + TaskType, _ = tag.NewKey("task_type") + WorkerHostname, _ = tag.NewKey("worker_hostname") ) // Measures var ( - LotusInfo = stats.Int64("info", "Arbitrary counter to tag lotus info to", stats.UnitDimensionless) + // common + LotusInfo = stats.Int64("info", "Arbitrary counter to tag lotus info to", stats.UnitDimensionless) + PeerCount = stats.Int64("peer/count", "Current number of FIL peers", stats.UnitDimensionless) + APIRequestDuration = stats.Float64("api/request_duration_ms", "Duration of API requests", stats.UnitMilliseconds) + + // chain ChainNodeHeight = stats.Int64("chain/node_height", "Current Height of the node", stats.UnitDimensionless) ChainNodeHeightExpected = stats.Int64("chain/node_height_expected", "Expected Height of the node", stats.UnitDimensionless) ChainNodeWorkerHeight = stats.Int64("chain/node_worker_height", "Current Height of workers on the node", stats.UnitDimensionless) @@ -39,12 +62,18 @@ var ( MessageReceived = stats.Int64("message/received", "Counter for total received messages", stats.UnitDimensionless) MessageValidationFailure = stats.Int64("message/failure", "Counter for message validation failures", stats.UnitDimensionless) MessageValidationSuccess = stats.Int64("message/success", "Counter for message validation successes", stats.UnitDimensionless) + MessageValidationDuration = stats.Float64("message/validation_ms", "Duration of message validation", stats.UnitMilliseconds) + MpoolGetNonceDuration = stats.Float64("mpool/getnonce_ms", "Duration of getStateNonce in mpool", stats.UnitMilliseconds) + MpoolGetBalanceDuration = stats.Float64("mpool/getbalance_ms", "Duration of getStateBalance in mpool", stats.UnitMilliseconds) + MpoolAddTsDuration = stats.Float64("mpool/addts_ms", "Duration of addTs in mpool", stats.UnitMilliseconds) + MpoolAddDuration = stats.Float64("mpool/add_ms", "Duration of Add in mpool", stats.UnitMilliseconds) + MpoolPushDuration = stats.Float64("mpool/push_ms", "Duration of Push in mpool", stats.UnitMilliseconds) BlockPublished = stats.Int64("block/published", "Counter for total locally published blocks", stats.UnitDimensionless) BlockReceived = stats.Int64("block/received", "Counter for total received blocks", stats.UnitDimensionless) BlockValidationFailure = stats.Int64("block/failure", "Counter for block validation failures", stats.UnitDimensionless) BlockValidationSuccess = stats.Int64("block/success", "Counter for block validation successes", stats.UnitDimensionless) BlockValidationDurationMilliseconds = stats.Float64("block/validation_ms", "Duration for Block Validation in ms", stats.UnitMilliseconds) - PeerCount = stats.Int64("peer/count", "Current number of FIL peers", stats.UnitDimensionless) + BlockDelay = stats.Int64("block/delay", "Delay of accepted blocks, where delay is >5s", stats.UnitMilliseconds) PubsubPublishMessage = stats.Int64("pubsub/published", "Counter for total published messages", stats.UnitDimensionless) PubsubDeliverMessage = stats.Int64("pubsub/delivered", "Counter for total delivered messages", stats.UnitDimensionless) PubsubRejectMessage = stats.Int64("pubsub/rejected", "Counter for total rejected messages", stats.UnitDimensionless) @@ -52,7 +81,28 @@ var ( PubsubRecvRPC = stats.Int64("pubsub/recv_rpc", "Counter for total received RPCs", stats.UnitDimensionless) PubsubSendRPC = stats.Int64("pubsub/send_rpc", "Counter for total sent RPCs", stats.UnitDimensionless) PubsubDropRPC = stats.Int64("pubsub/drop_rpc", "Counter for total dropped RPCs", stats.UnitDimensionless) - APIRequestDuration = stats.Float64("api/request_duration_ms", "Duration of API requests", stats.UnitMilliseconds) + VMFlushCopyDuration = stats.Float64("vm/flush_copy_ms", "Time spent in VM Flush Copy", stats.UnitMilliseconds) + VMFlushCopyCount = stats.Int64("vm/flush_copy_count", "Number of copied objects", stats.UnitDimensionless) + VMApplyBlocksTotal = stats.Float64("vm/applyblocks_total_ms", "Time spent applying block state", stats.UnitMilliseconds) + VMApplyMessages = stats.Float64("vm/applyblocks_messages", "Time spent applying block messages", stats.UnitMilliseconds) + VMApplyEarly = stats.Float64("vm/applyblocks_early", "Time spent in early apply-blocks (null cron, upgrades)", stats.UnitMilliseconds) + VMApplyCron = stats.Float64("vm/applyblocks_cron", "Time spent in cron", stats.UnitMilliseconds) + VMApplyFlush = stats.Float64("vm/applyblocks_flush", "Time spent flushing vm state", stats.UnitMilliseconds) + VMSends = stats.Int64("vm/sends", "Counter for sends processed by the VM", stats.UnitDimensionless) + VMApplied = stats.Int64("vm/applied", "Counter for messages (including internal messages) processed by the VM", stats.UnitDimensionless) + + // miner + WorkerCallsStarted = stats.Int64("sealing/worker_calls_started", "Counter of started worker tasks", stats.UnitDimensionless) + WorkerCallsReturnedCount = stats.Int64("sealing/worker_calls_returned_count", "Counter of returned worker tasks", stats.UnitDimensionless) + WorkerCallsReturnedDuration = stats.Float64("sealing/worker_calls_returned_ms", "Counter of returned worker tasks", stats.UnitMilliseconds) + WorkerUntrackedCallsReturned = stats.Int64("sealing/worker_untracked_calls_returned", "Counter of returned untracked worker tasks", stats.UnitDimensionless) + + // splitstore + SplitstoreMiss = stats.Int64("splitstore/miss", "Number of misses in hotstre access", stats.UnitDimensionless) + SplitstoreCompactionTimeSeconds = stats.Float64("splitstore/compaction_time", "Compaction time in seconds", stats.UnitSeconds) + SplitstoreCompactionHot = stats.Int64("splitstore/hot", "Number of hot blocks in last compaction", stats.UnitDimensionless) + SplitstoreCompactionCold = stats.Int64("splitstore/cold", "Number of cold blocks in last compaction", stats.UnitDimensionless) + SplitstoreCompactionDead = stats.Int64("splitstore/dead", "Number of dead blocks in last compaction", stats.UnitDimensionless) ) var ( @@ -92,6 +142,24 @@ var ( Measure: BlockValidationDurationMilliseconds, Aggregation: defaultMillisecondsDistribution, } + BlockDelayView = &view.View{ + Measure: BlockDelay, + TagKeys: []tag.Key{MinerID}, + Aggregation: func() *view.Aggregation { + var bounds []float64 + for i := 5; i < 29; i++ { // 5-29s, step 1s + bounds = append(bounds, float64(i*1000)) + } + for i := 30; i < 60; i += 2 { // 30-58s, step 2s + bounds = append(bounds, float64(i*1000)) + } + for i := 60; i <= 300; i += 10 { // 60-300s, step 10s + bounds = append(bounds, float64(i*1000)) + } + bounds = append(bounds, 600*1000) // final cutoff at 10m + return view.Distribution(bounds...) + }(), + } MessagePublishedView = &view.View{ Measure: MessagePublished, Aggregation: view.Count(), @@ -109,6 +177,31 @@ var ( Measure: MessageValidationSuccess, Aggregation: view.Count(), } + MessageValidationDurationView = &view.View{ + Measure: MessageValidationDuration, + Aggregation: defaultMillisecondsDistribution, + TagKeys: []tag.Key{MsgValid, Local}, + } + MpoolGetNonceDurationView = &view.View{ + Measure: MpoolGetNonceDuration, + Aggregation: defaultMillisecondsDistribution, + } + MpoolGetBalanceDurationView = &view.View{ + Measure: MpoolGetBalanceDuration, + Aggregation: defaultMillisecondsDistribution, + } + MpoolAddTsDurationView = &view.View{ + Measure: MpoolAddTsDuration, + Aggregation: defaultMillisecondsDistribution, + } + MpoolAddDurationView = &view.View{ + Measure: MpoolAddDuration, + Aggregation: defaultMillisecondsDistribution, + } + MpoolPushDurationView = &view.View{ + Measure: MpoolPushDuration, + Aggregation: defaultMillisecondsDistribution, + } PeerCountView = &view.View{ Measure: PeerCount, Aggregation: view.LastValue(), @@ -146,11 +239,100 @@ var ( Aggregation: defaultMillisecondsDistribution, TagKeys: []tag.Key{APIInterface, Endpoint}, } + VMFlushCopyDurationView = &view.View{ + Measure: VMFlushCopyDuration, + Aggregation: view.Sum(), + } + VMFlushCopyCountView = &view.View{ + Measure: VMFlushCopyCount, + Aggregation: view.Sum(), + } + VMApplyBlocksTotalView = &view.View{ + Measure: VMApplyBlocksTotal, + Aggregation: defaultMillisecondsDistribution, + } + VMApplyMessagesView = &view.View{ + Measure: VMApplyMessages, + Aggregation: defaultMillisecondsDistribution, + } + VMApplyEarlyView = &view.View{ + Measure: VMApplyEarly, + Aggregation: defaultMillisecondsDistribution, + } + VMApplyCronView = &view.View{ + Measure: VMApplyCron, + Aggregation: defaultMillisecondsDistribution, + } + VMApplyFlushView = &view.View{ + Measure: VMApplyFlush, + Aggregation: defaultMillisecondsDistribution, + } + VMSendsView = &view.View{ + Measure: VMSends, + Aggregation: view.LastValue(), + } + VMAppliedView = &view.View{ + Measure: VMApplied, + Aggregation: view.LastValue(), + } + + // miner + WorkerCallsStartedView = &view.View{ + Measure: WorkerCallsStarted, + Aggregation: view.Count(), + TagKeys: []tag.Key{TaskType, WorkerHostname}, + } + WorkerCallsReturnedCountView = &view.View{ + Measure: WorkerCallsReturnedCount, + Aggregation: view.Count(), + TagKeys: []tag.Key{TaskType, WorkerHostname}, + } + WorkerUntrackedCallsReturnedView = &view.View{ + Measure: WorkerUntrackedCallsReturned, + Aggregation: view.Count(), + } + WorkerCallsReturnedDurationView = &view.View{ + Measure: WorkerCallsReturnedDuration, + Aggregation: workMillisecondsDistribution, + TagKeys: []tag.Key{TaskType, WorkerHostname}, + } + + // splitstore + SplitstoreMissView = &view.View{ + Measure: SplitstoreMiss, + Aggregation: view.Count(), + } + SplitstoreCompactionTimeSecondsView = &view.View{ + Measure: SplitstoreCompactionTimeSeconds, + Aggregation: view.LastValue(), + } + SplitstoreCompactionHotView = &view.View{ + Measure: SplitstoreCompactionHot, + Aggregation: view.LastValue(), + } + SplitstoreCompactionColdView = &view.View{ + Measure: SplitstoreCompactionCold, + Aggregation: view.Sum(), + } + SplitstoreCompactionDeadView = &view.View{ + Measure: SplitstoreCompactionDead, + Aggregation: view.Sum(), + } ) // DefaultViews is an array of OpenCensus views for metric gathering purposes -var DefaultViews = append([]*view.View{ - InfoView, +var DefaultViews = func() []*view.View { + views := []*view.View{ + InfoView, + PeerCountView, + APIRequestDurationView, + } + views = append(views, blockstore.DefaultViews...) + views = append(views, rpcmetrics.DefaultViews...) + return views +}() + +var ChainNodeViews = append([]*view.View{ ChainNodeHeightView, ChainNodeHeightExpectedView, ChainNodeWorkerHeightView, @@ -158,11 +340,17 @@ var DefaultViews = append([]*view.View{ BlockValidationFailureView, BlockValidationSuccessView, BlockValidationDurationView, + BlockDelayView, MessagePublishedView, MessageReceivedView, MessageValidationFailureView, MessageValidationSuccessView, - PeerCountView, + MessageValidationDurationView, + MpoolGetNonceDurationView, + MpoolGetBalanceDurationView, + MpoolAddTsDurationView, + MpoolAddDurationView, + MpoolPushDurationView, PubsubPublishMessageView, PubsubDeliverMessageView, PubsubRejectMessageView, @@ -170,9 +358,28 @@ var DefaultViews = append([]*view.View{ PubsubRecvRPCView, PubsubSendRPCView, PubsubDropRPCView, - APIRequestDurationView, -}, - rpcmetrics.DefaultViews...) + VMFlushCopyCountView, + VMFlushCopyDurationView, + SplitstoreMissView, + SplitstoreCompactionTimeSecondsView, + SplitstoreCompactionHotView, + SplitstoreCompactionColdView, + SplitstoreCompactionDeadView, + VMApplyBlocksTotalView, + VMApplyMessagesView, + VMApplyEarlyView, + VMApplyCronView, + VMApplyFlushView, + VMSendsView, + VMAppliedView, +}, DefaultViews...) + +var MinerNodeViews = append([]*view.View{ + WorkerCallsStartedView, + WorkerCallsReturnedCountView, + WorkerUntrackedCallsReturnedView, + WorkerCallsReturnedDurationView, +}, DefaultViews...) // SinceInMilliseconds returns the duration of time since the provide time as a float64. func SinceInMilliseconds(startTime time.Time) float64 { diff --git a/metrics/proxy.go b/metrics/proxy.go index f3714ec2e7d..94798f5aa10 100644 --- a/metrics/proxy.go +++ b/metrics/proxy.go @@ -7,59 +7,58 @@ import ( "go.opencensus.io/tag" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/apistruct" ) func MetricedStorMinerAPI(a api.StorageMiner) api.StorageMiner { - var out apistruct.StorageMinerStruct - proxy(a, &out.Internal) - proxy(a, &out.CommonStruct.Internal) + var out api.StorageMinerStruct + proxy(a, &out) return &out } func MetricedFullAPI(a api.FullNode) api.FullNode { - var out apistruct.FullNodeStruct - proxy(a, &out.Internal) - proxy(a, &out.CommonStruct.Internal) + var out api.FullNodeStruct + proxy(a, &out) return &out } -func MetricedWorkerAPI(a api.WorkerAPI) api.WorkerAPI { - var out apistruct.WorkerStruct - proxy(a, &out.Internal) +func MetricedWorkerAPI(a api.Worker) api.Worker { + var out api.WorkerStruct + proxy(a, &out) return &out } -func MetricedWalletAPI(a api.WalletAPI) api.WalletAPI { - var out apistruct.WalletStruct - proxy(a, &out.Internal) +func MetricedWalletAPI(a api.Wallet) api.Wallet { + var out api.WalletStruct + proxy(a, &out) return &out } -func MetricedGatewayAPI(a api.GatewayAPI) api.GatewayAPI { - var out apistruct.GatewayStruct - proxy(a, &out.Internal) +func MetricedGatewayAPI(a api.Gateway) api.Gateway { + var out api.GatewayStruct + proxy(a, &out) return &out } -func proxy(in interface{}, out interface{}) { - rint := reflect.ValueOf(out).Elem() - ra := reflect.ValueOf(in) +func proxy(in interface{}, outstr interface{}) { + outs := api.GetInternalStructs(outstr) + for _, out := range outs { + rint := reflect.ValueOf(out).Elem() + ra := reflect.ValueOf(in) - for f := 0; f < rint.NumField(); f++ { - field := rint.Type().Field(f) - fn := ra.MethodByName(field.Name) - - rint.Field(f).Set(reflect.MakeFunc(field.Type, func(args []reflect.Value) (results []reflect.Value) { - ctx := args[0].Interface().(context.Context) - // upsert function name into context - ctx, _ = tag.New(ctx, tag.Upsert(Endpoint, field.Name)) - stop := Timer(ctx, APIRequestDuration) - defer stop() - // pass tagged ctx back into function call - args[0] = reflect.ValueOf(ctx) - return fn.Call(args) - })) + for f := 0; f < rint.NumField(); f++ { + field := rint.Type().Field(f) + fn := ra.MethodByName(field.Name) + rint.Field(f).Set(reflect.MakeFunc(field.Type, func(args []reflect.Value) (results []reflect.Value) { + ctx := args[0].Interface().(context.Context) + // upsert function name into context + ctx, _ = tag.New(ctx, tag.Upsert(Endpoint, field.Name)) + stop := Timer(ctx, APIRequestDuration) + defer stop() + // pass tagged ctx back into function call + args[0] = reflect.ValueOf(ctx) + return fn.Call(args) + })) + } } } diff --git a/miner/miner.go b/miner/miner.go index f2468a91153..1727f69420b 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -6,22 +6,27 @@ import ( "crypto/rand" "encoding/binary" "fmt" + "os" "sync" "time" + "github.com/filecoin-project/lotus/api/v1api" + proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/gen/slashfilter" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" lru "github.com/hashicorp/golang-lru" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/gen" - "github.com/filecoin-project/lotus/chain/messagepool/gasguess" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/journal" @@ -38,7 +43,13 @@ const ( evtTypeBlockMined = iota ) -// returns a callback reporting whether we mined a blocks in this round +// waitFunc is expected to pace block mining at the configured network rate. +// +// baseTime is the timestamp of the mining base, i.e. the timestamp +// of the tipset we're planning to construct upon. +// +// Upon each mining loop iteration, the returned callback is called reporting +// whether we mined a block in this round or not. type waitFunc func(ctx context.Context, baseTime uint64) (func(bool, abi.ChainEpoch, error), abi.ChainEpoch, error) func randTimeOffset(width time.Duration) time.Duration { @@ -49,7 +60,9 @@ func randTimeOffset(width time.Duration) time.Duration { return val - (width / 2) } -func NewMiner(api api.FullNode, epp gen.WinningPoStProver, addr address.Address, sf *slashfilter.SlashFilter, j journal.Journal) *Miner { +// NewMiner instantiates a miner with a concrete WinningPoStProver and a miner +// address (which can be different from the worker's address). +func NewMiner(api v1api.FullNode, epp gen.WinningPoStProver, addr address.Address, sf *slashfilter.SlashFilter, j journal.Journal) *Miner { arc, err := lru.NewARC(10000) if err != nil { panic(err) @@ -60,7 +73,16 @@ func NewMiner(api api.FullNode, epp gen.WinningPoStProver, addr address.Address, epp: epp, address: addr, waitFunc: func(ctx context.Context, baseTime uint64) (func(bool, abi.ChainEpoch, error), abi.ChainEpoch, error) { - // Wait around for half the block time in case other parents come in + // wait around for half the block time in case other parents come in + // + // if we're mining a block in the past via catch-up/rush mining, + // such as when recovering from a network halt, this sleep will be + // for a negative duration, and therefore **will return + // immediately**. + // + // the result is that we WILL NOT wait, therefore fast-forwarding + // and thus healing the chain by backfilling it with null rounds + // rapidly. deadline := baseTime + build.PropagationDelaySecs baseT := time.Unix(int64(deadline), 0) @@ -80,8 +102,11 @@ func NewMiner(api api.FullNode, epp gen.WinningPoStProver, addr address.Address, } } +// Miner encapsulates the mining processes of the system. +// +// Refer to the godocs on mineOne and mine methods for more detail. type Miner struct { - api api.FullNode + api v1api.FullNode epp gen.WinningPoStProver @@ -92,15 +117,20 @@ type Miner struct { waitFunc waitFunc + // lastWork holds the last MiningBase we built upon. lastWork *MiningBase - sf *slashfilter.SlashFilter + sf *slashfilter.SlashFilter + // minedBlockHeights is a safeguard that caches the last heights we mined. + // It is consulted before publishing a newly mined block, for a sanity check + // intended to avoid slashings in case of a bug. minedBlockHeights *lru.ARCCache evtTypes [1]journal.EventType journal journal.Journal } +// Address returns the address of the miner. func (m *Miner) Address() address.Address { m.lk.Lock() defer m.lk.Unlock() @@ -108,7 +138,9 @@ func (m *Miner) Address() address.Address { return m.address } -func (m *Miner) Start(ctx context.Context) error { +// Start starts the mining operation. It spawns a goroutine and returns +// immediately. Start is not idempotent. +func (m *Miner) Start(_ context.Context) error { m.lk.Lock() defer m.lk.Unlock() if m.stop != nil { @@ -119,6 +151,8 @@ func (m *Miner) Start(ctx context.Context) error { return nil } +// Stop stops the mining operation. It is not idempotent, and multiple adjacent +// calls to Stop will fail. func (m *Miner) Stop(ctx context.Context) error { m.lk.Lock() @@ -146,10 +180,34 @@ func (m *Miner) niceSleep(d time.Duration) bool { } } +// mine runs the mining loop. It performs the following: +// +// 1. Queries our current best currently-known mining candidate (tipset to +// build upon). +// 2. Waits until the propagation delay of the network has elapsed (currently +// 6 seconds). The waiting is done relative to the timestamp of the best +// candidate, which means that if it's way in the past, we won't wait at +// all (e.g. in catch-up or rush mining). +// 3. After the wait, we query our best mining candidate. This will be the one +// we'll work with. +// 4. Sanity check that we _actually_ have a new mining base to mine on. If +// not, wait one epoch + propagation delay, and go back to the top. +// 5. We attempt to mine a block, by calling mineOne (refer to godocs). This +// method will either return a block if we were eligible to mine, or nil +// if we weren't. +// 6a. If we mined a block, we update our state and push it out to the network +// via gossipsub. +// 6b. If we didn't mine a block, we consider this to be a nil round on top of +// the mining base we selected. If other miner or miners on the network +// were eligible to mine, we will receive their blocks via gossipsub and +// we will select that tipset on the next iteration of the loop, thus +// discarding our null round. func (m *Miner) mine(ctx context.Context) { ctx, span := trace.StartSpan(ctx, "/mine") defer span.End() + go m.doWinPoStWarmup(ctx) + var lastBase MiningBase minerLoop: for { @@ -268,7 +326,9 @@ minerLoop: if err := m.sf.MinedBlock(b.Header, base.TipSet.Height()+base.NullRounds); err != nil { log.Errorf(" SLASH FILTER ERROR: %s", err) - continue + if os.Getenv("LOTUS_MINER_NO_SLASHFILTER") != "_yes_i_know_i_can_and_probably_will_lose_all_my_fil_and_power_" { + continue + } } blkKey := fmt.Sprintf("%d", b.Header.Height) @@ -304,11 +364,19 @@ minerLoop: } } +// MiningBase is the tipset on top of which we plan to construct our next block. +// Refer to godocs on GetBestMiningCandidate. type MiningBase struct { TipSet *types.TipSet NullRounds abi.ChainEpoch } +// GetBestMiningCandidate implements the fork choice rule from a miner's +// perspective. +// +// It obtains the current chain head (HEAD), and compares it to the last tipset +// we selected as our mining base (LAST). If HEAD's weight is larger than +// LAST's weight, it selects HEAD to build on. Else, it selects LAST. func (m *Miner) GetBestMiningCandidate(ctx context.Context) (*MiningBase, error) { m.lk.Lock() defer m.lk.Unlock() @@ -352,48 +420,97 @@ func (m *Miner) GetBestMiningCandidate(ctx context.Context) (*MiningBase, error) // This method does the following: // // 1. -func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (*types.BlockMsg, error) { +func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *types.BlockMsg, err error) { log.Debugw("attempting to mine a block", "tipset", types.LogCids(base.TipSet.Cids())) - start := build.Clock.Now() + tStart := build.Clock.Now() round := base.TipSet.Height() + base.NullRounds + 1 - mbi, err := m.api.MinerGetBaseInfo(ctx, m.address, round, base.TipSet.Key()) + // always write out a log + var winner *types.ElectionProof + var mbi *api.MiningBaseInfo + var rbase types.BeaconEntry + defer func() { + + var hasMinPower bool + + // mbi can be nil if we are deep in penalty and there are 0 eligible sectors + // in the current deadline. If this case - put together a dummy one for reporting + // https://github.com/filecoin-project/lotus/blob/v1.9.0/chain/stmgr/utils.go#L500-L502 + if mbi == nil { + mbi = &api.MiningBaseInfo{ + NetworkPower: big.NewInt(-1), // we do not know how big the network is at this point + EligibleForMining: false, + MinerPower: big.NewInt(0), // but we do know we do not have anything eligible + } + + // try to opportunistically pull actual power and plug it into the fake mbi + if pow, err := m.api.StateMinerPower(ctx, m.address, base.TipSet.Key()); err == nil && pow != nil { + hasMinPower = pow.HasMinPower + mbi.MinerPower = pow.MinerPower.QualityAdjPower + mbi.NetworkPower = pow.TotalPower.QualityAdjPower + } + } + + isLate := uint64(tStart.Unix()) > (base.TipSet.MinTimestamp() + uint64(base.NullRounds*builtin.EpochDurationSeconds) + build.PropagationDelaySecs) + + logStruct := []interface{}{ + "tookMilliseconds", (build.Clock.Now().UnixNano() - tStart.UnixNano()) / 1_000_000, + "forRound", int64(round), + "baseEpoch", int64(base.TipSet.Height()), + "baseDeltaSeconds", uint64(tStart.Unix()) - base.TipSet.MinTimestamp(), + "nullRounds", int64(base.NullRounds), + "lateStart", isLate, + "beaconEpoch", rbase.Round, + "lookbackEpochs", int64(policy.ChainFinality), // hardcoded as it is unlikely to change again: https://github.com/filecoin-project/lotus/blob/v1.8.0/chain/actors/policy/policy.go#L180-L186 + "networkPowerAtLookback", mbi.NetworkPower.String(), + "minerPowerAtLookback", mbi.MinerPower.String(), + "isEligible", mbi.EligibleForMining, + "isWinner", (winner != nil), + "error", err, + } + + if err != nil { + log.Errorw("completed mineOne", logStruct...) + } else if isLate || (hasMinPower && !mbi.EligibleForMining) { + log.Warnw("completed mineOne", logStruct...) + } else { + log.Infow("completed mineOne", logStruct...) + } + }() + + mbi, err = m.api.MinerGetBaseInfo(ctx, m.address, round, base.TipSet.Key()) if err != nil { - return nil, xerrors.Errorf("failed to get mining base info: %w", err) + err = xerrors.Errorf("failed to get mining base info: %w", err) + return nil, err } if mbi == nil { return nil, nil } + if !mbi.EligibleForMining { // slashed or just have no power yet return nil, nil } - tMBI := build.Clock.Now() - - beaconPrev := mbi.PrevBeaconEntry - - tDrand := build.Clock.Now() - bvals := mbi.BeaconEntries - tPowercheck := build.Clock.Now() - log.Infof("Time delta between now and our mining base: %ds (nulls: %d)", uint64(build.Clock.Now().Unix())-base.TipSet.MinTimestamp(), base.NullRounds) - - rbase := beaconPrev + bvals := mbi.BeaconEntries + rbase = mbi.PrevBeaconEntry if len(bvals) > 0 { rbase = bvals[len(bvals)-1] } ticket, err := m.computeTicket(ctx, &rbase, base, mbi) if err != nil { - return nil, xerrors.Errorf("scratching ticket failed: %w", err) + err = xerrors.Errorf("scratching ticket failed: %w", err) + return nil, err } - winner, err := gen.IsRoundWinner(ctx, base.TipSet, round, m.address, rbase, mbi, m.api) + winner, err = gen.IsRoundWinner(ctx, base.TipSet, round, m.address, rbase, mbi, m.api) if err != nil { - return nil, xerrors.Errorf("failed to check if we win next round: %w", err) + err = xerrors.Errorf("failed to check if we win next round: %w", err) + return nil, err } if winner == nil { @@ -404,12 +521,14 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (*types.BlockMsg, buf := new(bytes.Buffer) if err := m.address.MarshalCBOR(buf); err != nil { - return nil, xerrors.Errorf("failed to marshal miner address: %w", err) + err = xerrors.Errorf("failed to marshal miner address: %w", err) + return nil, err } - rand, err := store.DrawRandomness(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, base.TipSet.Height()+base.NullRounds+1, buf.Bytes()) + rand, err := store.DrawRandomness(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, round, buf.Bytes()) if err != nil { - return nil, xerrors.Errorf("failed to get randomness for winning post: %w", err) + err = xerrors.Errorf("failed to get randomness for winning post: %w", err) + return nil, err } prand := abi.PoStRandomness(rand) @@ -418,42 +537,46 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (*types.BlockMsg, postProof, err := m.epp.ComputeProof(ctx, mbi.Sectors, prand) if err != nil { - return nil, xerrors.Errorf("failed to compute winning post proof: %w", err) + err = xerrors.Errorf("failed to compute winning post proof: %w", err) + return nil, err } + tProof := build.Clock.Now() + // get pending messages early, msgs, err := m.api.MpoolSelect(context.TODO(), base.TipSet.Key(), ticket.Quality()) if err != nil { - return nil, xerrors.Errorf("failed to select messages for block: %w", err) + err = xerrors.Errorf("failed to select messages for block: %w", err) + return nil, err } tPending := build.Clock.Now() // TODO: winning post proof - b, err := m.createBlock(base, m.address, ticket, winner, bvals, postProof, msgs) + minedBlock, err = m.createBlock(base, m.address, ticket, winner, bvals, postProof, msgs) if err != nil { - return nil, xerrors.Errorf("failed to create block: %w", err) + err = xerrors.Errorf("failed to create block: %w", err) + return nil, err } tCreateBlock := build.Clock.Now() - dur := tCreateBlock.Sub(start) + dur := tCreateBlock.Sub(tStart) parentMiners := make([]address.Address, len(base.TipSet.Blocks())) for i, header := range base.TipSet.Blocks() { parentMiners[i] = header.Miner } - log.Infow("mined new block", "cid", b.Cid(), "height", b.Header.Height, "miner", b.Header.Miner, "parents", parentMiners, "took", dur) + log.Infow("mined new block", "cid", minedBlock.Cid(), "height", int64(minedBlock.Header.Height), "miner", minedBlock.Header.Miner, "parents", parentMiners, "parentTipset", base.TipSet.Key().String(), "took", dur) if dur > time.Second*time.Duration(build.BlockDelaySecs) { log.Warnw("CAUTION: block production took longer than the block delay. Your computer may not be fast enough to keep up", - "tMinerBaseInfo ", tMBI.Sub(start), - "tDrand ", tDrand.Sub(tMBI), - "tPowercheck ", tPowercheck.Sub(tDrand), + "tPowercheck ", tPowercheck.Sub(tStart), "tTicket ", tTicket.Sub(tPowercheck), "tSeed ", tSeed.Sub(tTicket), - "tPending ", tPending.Sub(tSeed), + "tProof ", tProof.Sub(tSeed), + "tPending ", tPending.Sub(tProof), "tCreateBlock ", tCreateBlock.Sub(tPending)) } - return b, nil + return minedBlock, nil } func (m *Miner) computeTicket(ctx context.Context, brand *types.BeaconEntry, base *MiningBase, mbi *api.MiningBaseInfo) (*types.Ticket, error) { @@ -501,33 +624,3 @@ func (m *Miner) createBlock(base *MiningBase, addr address.Address, ticket *type WinningPoStProof: wpostProof, }) } - -type actCacheEntry struct { - act *types.Actor - err error -} - -type cachedActorLookup struct { - tsk types.TipSetKey - cache map[address.Address]actCacheEntry - fallback gasguess.ActorLookup -} - -func (c *cachedActorLookup) StateGetActor(ctx context.Context, a address.Address, tsk types.TipSetKey) (*types.Actor, error) { - if c.tsk == tsk { - e, has := c.cache[a] - if has { - return e.act, e.err - } - } - - e, err := c.fallback(ctx, a, tsk) - if c.tsk == tsk { - c.cache[a] = actCacheEntry{ - act: e, err: err, - } - } - return e, err -} - -type ActorLookup func(context.Context, address.Address, types.TipSetKey) (*types.Actor, error) diff --git a/miner/testminer.go b/miner/testminer.go index 5f461d88482..7f29a7ae028 100644 --- a/miner/testminer.go +++ b/miner/testminer.go @@ -8,7 +8,8 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/api" + + "github.com/filecoin-project/lotus/api/v1api" "github.com/filecoin-project/lotus/chain/gen" "github.com/filecoin-project/lotus/chain/gen/slashfilter" "github.com/filecoin-project/lotus/journal" @@ -19,8 +20,8 @@ type MineReq struct { Done func(bool, abi.ChainEpoch, error) } -func NewTestMiner(nextCh <-chan MineReq, addr address.Address) func(api.FullNode, gen.WinningPoStProver) *Miner { - return func(api api.FullNode, epp gen.WinningPoStProver) *Miner { +func NewTestMiner(nextCh <-chan MineReq, addr address.Address) func(v1api.FullNode, gen.WinningPoStProver) *Miner { + return func(api v1api.FullNode, epp gen.WinningPoStProver) *Miner { arc, err := lru.NewARC(10000) if err != nil { panic(err) diff --git a/miner/warmup.go b/miner/warmup.go new file mode 100644 index 00000000000..991679c0936 --- /dev/null +++ b/miner/warmup.go @@ -0,0 +1,84 @@ +package miner + +import ( + "context" + "crypto/rand" + "math" + "time" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + + proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + + "github.com/filecoin-project/lotus/chain/types" +) + +func (m *Miner) winPoStWarmup(ctx context.Context) error { + deadlines, err := m.api.StateMinerDeadlines(ctx, m.address, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting deadlines: %w", err) + } + + var sector abi.SectorNumber = math.MaxUint64 + +out: + for dlIdx := range deadlines { + partitions, err := m.api.StateMinerPartitions(ctx, m.address, uint64(dlIdx), types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting partitions for deadline %d: %w", dlIdx, err) + } + + for _, partition := range partitions { + b, err := partition.ActiveSectors.First() + if err == bitfield.ErrNoBitsSet { + continue + } + if err != nil { + return err + } + + sector = abi.SectorNumber(b) + break out + } + } + + if sector == math.MaxUint64 { + log.Info("skipping winning PoSt warmup, no sectors") + return nil + } + + log.Infow("starting winning PoSt warmup", "sector", sector) + start := time.Now() + + var r abi.PoStRandomness = make([]byte, abi.RandomnessLength) + _, _ = rand.Read(r) + + si, err := m.api.StateSectorGetInfo(ctx, m.address, sector, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting sector info: %w", err) + } + + _, err = m.epp.ComputeProof(ctx, []proof2.SectorInfo{ + { + SealProof: si.SealProof, + SectorNumber: sector, + SealedCID: si.SealedCID, + }, + }, r) + if err != nil { + return xerrors.Errorf("failed to compute proof: %w", err) + } + + log.Infow("winning PoSt warmup successful", "took", time.Now().Sub(start)) + return nil +} + +func (m *Miner) doWinPoStWarmup(ctx context.Context) { + err := m.winPoStWarmup(ctx) + if err != nil { + log.Errorw("winning PoSt warmup failed", "error", err) + } +} diff --git a/node/builder.go b/node/builder.go index bb039cb7680..6963cf4a455 100644 --- a/node/builder.go +++ b/node/builder.go @@ -3,16 +3,16 @@ package node import ( "context" "errors" + "os" "time" - "github.com/filecoin-project/lotus/chain" - "github.com/filecoin-project/lotus/chain/exchange" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/chain/wallet" - "github.com/filecoin-project/lotus/node/hello" + "github.com/filecoin-project/lotus/node/impl/net" + metricsi "github.com/ipfs/go-metrics-interface" - logging "github.com/ipfs/go-log" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/system" + + logging "github.com/ipfs/go-log/v2" ci "github.com/libp2p/go-libp2p-core/crypto" "github.com/libp2p/go-libp2p-core/host" "github.com/libp2p/go-libp2p-core/peer" @@ -22,57 +22,27 @@ import ( "github.com/libp2p/go-libp2p-peerstore/pstoremem" pubsub "github.com/libp2p/go-libp2p-pubsub" record "github.com/libp2p/go-libp2p-record" + "github.com/libp2p/go-libp2p/p2p/net/conngater" "github.com/multiformats/go-multiaddr" "go.uber.org/fx" "golang.org/x/xerrors" - "github.com/filecoin-project/go-fil-markets/discovery" - discoveryimpl "github.com/filecoin-project/go-fil-markets/discovery/impl" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask" - - storage2 "github.com/filecoin-project/specs-storage/storage" - - "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/beacon" - "github.com/filecoin-project/lotus/chain/gen" - "github.com/filecoin-project/lotus/chain/gen/slashfilter" - "github.com/filecoin-project/lotus/chain/market" - "github.com/filecoin-project/lotus/chain/messagepool" - "github.com/filecoin-project/lotus/chain/messagesigner" - "github.com/filecoin-project/lotus/chain/metrics" - "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" - ledgerwallet "github.com/filecoin-project/lotus/chain/wallet/ledger" - "github.com/filecoin-project/lotus/chain/wallet/remotewallet" - sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" - "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/stores" - "github.com/filecoin-project/lotus/extern/sector-storage/storiface" - sealing "github.com/filecoin-project/lotus/extern/storage-sealing" "github.com/filecoin-project/lotus/journal" - "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/lib/peermgr" _ "github.com/filecoin-project/lotus/lib/sigs/bls" _ "github.com/filecoin-project/lotus/lib/sigs/secp" - "github.com/filecoin-project/lotus/markets/dealfilter" "github.com/filecoin-project/lotus/markets/storageadapter" - "github.com/filecoin-project/lotus/miner" "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/node/impl" "github.com/filecoin-project/lotus/node/impl/common" - "github.com/filecoin-project/lotus/node/impl/full" "github.com/filecoin-project/lotus/node/modules" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/helpers" "github.com/filecoin-project/lotus/node/modules/lp2p" "github.com/filecoin-project/lotus/node/modules/testing" "github.com/filecoin-project/lotus/node/repo" - "github.com/filecoin-project/lotus/paychmgr" - "github.com/filecoin-project/lotus/paychmgr/settler" - "github.com/filecoin-project/lotus/storage" - "github.com/filecoin-project/lotus/storage/sectorblocks" ) //nolint:deadcode,varcheck @@ -95,6 +65,7 @@ var ( ConnectionManagerKey = special{9} // Libp2p option AutoNATSvcKey = special{10} // Libp2p option BandwidthReporterKey = special{11} // Libp2p option + ConnGaterKey = special{12} // libp2p option ) type invoke int @@ -106,8 +77,10 @@ const ( // the system starts, so that it's available for all other components. InitJournalKey = invoke(iota) - // libp2p + // System processes. + InitMemoryWatchdog + // libp2p PstoreAddSelfKeysKey StartListeningKey BootstrapKey @@ -122,11 +95,12 @@ const ( HandleIncomingBlocksKey HandleIncomingMessagesKey - + HandleMigrateClientFundsKey HandlePaymentChannelManagerKey // miner GetParamsKey + HandleMigrateProviderFundsKey HandleDealsKey HandleRetrievalKey RunSectorServiceKey @@ -136,6 +110,7 @@ const ( HeadMetricsKey SettlePaymentChannelsKey RunPeerTaggerKey + SetupFallbackBlockstoresKey SetApiEndpointKey @@ -156,259 +131,103 @@ type Settings struct { nodeType repo.RepoType - Online bool // Online option applied + Base bool // Base option applied Config bool // Config option applied Lite bool // Start node in "lite" mode + + enableLibp2pNode bool } +// Basic lotus-app services func defaults() []Option { return []Option{ // global system journal. Override(new(journal.DisabledEvents), journal.EnvDisabledEvents), Override(new(journal.Journal), modules.OpenFilesystemJournal), - Override(new(helpers.MetricsCtx), context.Background), - Override(new(record.Validator), modules.RecordValidator), - Override(new(dtypes.Bootstrapper), dtypes.Bootstrapper(false)), - Override(new(dtypes.ShutdownChan), make(chan struct{})), + Override(new(system.MemoryConstraints), modules.MemoryConstraints), + Override(InitMemoryWatchdog, modules.MemoryWatchdog), - // Filecoin modules + Override(new(helpers.MetricsCtx), func() context.Context { + return metricsi.CtxScope(context.Background(), "lotus") + }), + Override(new(dtypes.ShutdownChan), make(chan struct{})), } } -func libp2p() Option { - return Options( - Override(new(peerstore.Peerstore), pstoremem.NewPeerstore), - - Override(DefaultTransportsKey, lp2p.DefaultTransports), - - Override(new(lp2p.RawHost), lp2p.Host), - Override(new(host.Host), lp2p.RoutedHost), - Override(new(lp2p.BaseIpfsRouting), lp2p.DHTRouting(dht.ModeAuto)), - - Override(DiscoveryHandlerKey, lp2p.DiscoveryHandler), - Override(AddrsFactoryKey, lp2p.AddrsFactory(nil, nil)), - Override(SmuxTransportKey, lp2p.SmuxTransport(true)), - Override(RelayKey, lp2p.NoRelay()), - Override(SecurityKey, lp2p.Security(true, false)), - - Override(BaseRoutingKey, lp2p.BaseRouting), - Override(new(routing.Routing), lp2p.Routing), - - Override(NatPortMapKey, lp2p.NatPortMap), - Override(BandwidthReporterKey, lp2p.BandwidthCounter), - - Override(ConnectionManagerKey, lp2p.ConnectionManager(50, 200, 20*time.Second, nil)), - Override(AutoNATSvcKey, lp2p.AutoNATService), - - Override(new(*dtypes.ScoreKeeper), lp2p.ScoreKeeper), - Override(new(*pubsub.PubSub), lp2p.GossipSub), - Override(new(*config.Pubsub), func(bs dtypes.Bootstrapper) *config.Pubsub { - return &config.Pubsub{ - Bootstrapper: bool(bs), - } - }), +var LibP2P = Options( + // Host config + Override(new(dtypes.Bootstrapper), dtypes.Bootstrapper(false)), + + // Host dependencies + Override(new(peerstore.Peerstore), pstoremem.NewPeerstore), + Override(PstoreAddSelfKeysKey, lp2p.PstoreAddSelfKeys), + Override(StartListeningKey, lp2p.StartListening(config.DefaultFullNode().Libp2p.ListenAddresses)), + + // Host settings + Override(DefaultTransportsKey, lp2p.DefaultTransports), + Override(AddrsFactoryKey, lp2p.AddrsFactory(nil, nil)), + Override(SmuxTransportKey, lp2p.SmuxTransport(true)), + Override(RelayKey, lp2p.NoRelay()), + Override(SecurityKey, lp2p.Security(true, false)), + + // Host + Override(new(lp2p.RawHost), lp2p.Host), + Override(new(host.Host), lp2p.RoutedHost), + Override(new(lp2p.BaseIpfsRouting), lp2p.DHTRouting(dht.ModeAuto)), + + Override(DiscoveryHandlerKey, lp2p.DiscoveryHandler), + + // Routing + Override(new(record.Validator), modules.RecordValidator), + Override(BaseRoutingKey, lp2p.BaseRouting), + Override(new(routing.Routing), lp2p.Routing), + + // Services + Override(NatPortMapKey, lp2p.NatPortMap), + Override(BandwidthReporterKey, lp2p.BandwidthCounter), + Override(AutoNATSvcKey, lp2p.AutoNATService), + + // Services (pubsub) + Override(new(*dtypes.ScoreKeeper), lp2p.ScoreKeeper), + Override(new(*pubsub.PubSub), lp2p.GossipSub), + Override(new(*config.Pubsub), func(bs dtypes.Bootstrapper) *config.Pubsub { + return &config.Pubsub{ + Bootstrapper: bool(bs), + } + }), - Override(PstoreAddSelfKeysKey, lp2p.PstoreAddSelfKeys), - Override(StartListeningKey, lp2p.StartListening(config.DefaultFullNode().Libp2p.ListenAddresses)), - ) -} + // Services (connection management) + Override(ConnectionManagerKey, lp2p.ConnectionManager(50, 200, 20*time.Second, nil)), + Override(new(*conngater.BasicConnectionGater), lp2p.ConnGater), + Override(ConnGaterKey, lp2p.ConnGaterOption), +) -func isType(t repo.RepoType) func(s *Settings) bool { +func IsType(t repo.RepoType) func(s *Settings) bool { return func(s *Settings) bool { return s.nodeType == t } } -// Online sets up basic libp2p node -func Online() Option { - isFullOrLiteNode := func(s *Settings) bool { return s.nodeType == repo.FullNode } - isFullNode := func(s *Settings) bool { return s.nodeType == repo.FullNode && !s.Lite } - isLiteNode := func(s *Settings) bool { return s.nodeType == repo.FullNode && s.Lite } - - return Options( - // make sure that online is applied before Config. - // This is important because Config overrides some of Online units - func(s *Settings) error { s.Online = true; return nil }, - ApplyIf(func(s *Settings) bool { return s.Config }, - Error(errors.New("the Online option must be set before Config option")), - ), - - libp2p(), - - // common - Override(new(*slashfilter.SlashFilter), modules.NewSlashFilter), - - // Full node or lite node - ApplyIf(isFullOrLiteNode, - // TODO: Fix offline mode - - Override(new(dtypes.BootstrapPeers), modules.BuiltinBootstrap), - Override(new(dtypes.DrandBootstrap), modules.DrandBootstrap), - Override(new(dtypes.DrandSchedule), modules.BuiltinDrandConfig), - - Override(new(ffiwrapper.Verifier), ffiwrapper.ProofVerifier), - Override(new(vm.SyscallBuilder), vm.Syscalls), - Override(new(*store.ChainStore), modules.ChainStore), - Override(new(stmgr.UpgradeSchedule), stmgr.DefaultUpgradeSchedule()), - Override(new(*stmgr.StateManager), stmgr.NewStateManagerWithUpgradeSchedule), - Override(new(*wallet.LocalWallet), wallet.NewWallet), - Override(new(wallet.Default), From(new(*wallet.LocalWallet))), - Override(new(api.WalletAPI), From(new(wallet.MultiWallet))), - Override(new(*messagesigner.MessageSigner), messagesigner.NewMessageSigner), - - Override(new(dtypes.ChainGCLocker), blockstore.NewGCLocker), - Override(new(dtypes.ChainGCBlockstore), modules.ChainGCBlockstore), - Override(new(dtypes.ChainBitswap), modules.ChainBitswap), - Override(new(dtypes.ChainBlockService), modules.ChainBlockService), - - // Filecoin services - // We don't want the SyncManagerCtor to be used as an fx constructor, but rather as a value. - // It will be called implicitly by the Syncer constructor. - Override(new(chain.SyncManagerCtor), func() chain.SyncManagerCtor { return chain.NewSyncManager }), - Override(new(*chain.Syncer), modules.NewSyncer), - Override(new(exchange.Client), exchange.NewClient), - Override(new(*messagepool.MessagePool), modules.MessagePool), - - Override(new(modules.Genesis), modules.ErrorGenesis), - Override(new(dtypes.AfterGenesisSet), modules.SetGenesis), - Override(SetGenesisKey, modules.DoSetGenesis), - - Override(new(dtypes.NetworkName), modules.NetworkName), - Override(new(*hello.Service), hello.NewHelloService), - Override(new(exchange.Server), exchange.NewServer), - Override(new(*peermgr.PeerMgr), peermgr.NewPeerMgr), - - Override(new(dtypes.Graphsync), modules.Graphsync), - Override(new(*dtypes.MpoolLocker), new(dtypes.MpoolLocker)), - Override(new(*discoveryimpl.Local), modules.NewLocalDiscovery), - Override(new(discovery.PeerResolver), modules.RetrievalResolver), - - Override(new(retrievalmarket.RetrievalClient), modules.RetrievalClient), - Override(new(dtypes.ClientDatastore), modules.NewClientDatastore), - Override(new(dtypes.ClientDataTransfer), modules.NewClientGraphsyncDataTransfer), - Override(new(modules.ClientDealFunds), modules.NewClientDealFunds), - Override(new(storagemarket.StorageClient), modules.StorageClient), - Override(new(storagemarket.StorageClientNode), storageadapter.NewClientNodeAdapter), - Override(new(beacon.Schedule), modules.RandomSchedule), - - Override(new(*paychmgr.Store), paychmgr.NewStore), - Override(new(*paychmgr.Manager), paychmgr.NewManager), - Override(new(*market.FundMgr), market.StartFundManager), - Override(HandlePaymentChannelManagerKey, paychmgr.HandleManager), - Override(SettlePaymentChannelsKey, settler.SettlePaymentChannels), - ), - - // Lite node - ApplyIf(isLiteNode, - Override(new(messagesigner.MpoolNonceAPI), From(new(modules.MpoolNonceAPI))), - Override(new(full.ChainModuleAPI), From(new(api.GatewayAPI))), - Override(new(full.GasModuleAPI), From(new(api.GatewayAPI))), - Override(new(full.MpoolModuleAPI), From(new(api.GatewayAPI))), - Override(new(full.StateModuleAPI), From(new(api.GatewayAPI))), - Override(new(stmgr.StateManagerAPI), modules.NewRPCStateManager), - ), - - // Full node - ApplyIf(isFullNode, - Override(new(messagesigner.MpoolNonceAPI), From(new(*messagepool.MessagePool))), - Override(new(full.ChainModuleAPI), From(new(full.ChainModule))), - Override(new(full.GasModuleAPI), From(new(full.GasModule))), - Override(new(full.MpoolModuleAPI), From(new(full.MpoolModule))), - Override(new(full.StateModuleAPI), From(new(full.StateModule))), - Override(new(stmgr.StateManagerAPI), From(new(*stmgr.StateManager))), - - Override(RunHelloKey, modules.RunHello), - Override(RunChainExchangeKey, modules.RunChainExchange), - Override(RunPeerMgrKey, modules.RunPeerMgr), - Override(HandleIncomingMessagesKey, modules.HandleIncomingMessages), - Override(HandleIncomingBlocksKey, modules.HandleIncomingBlocks), - ), - - // miner - ApplyIf(isType(repo.StorageMiner), - Override(new(api.Common), From(new(common.CommonAPI))), - Override(new(sectorstorage.StorageAuth), modules.StorageAuth), - - Override(new(*stores.Index), stores.NewIndex), - Override(new(stores.SectorIndex), From(new(*stores.Index))), - Override(new(dtypes.MinerID), modules.MinerID), - Override(new(dtypes.MinerAddress), modules.MinerAddress), - Override(new(*ffiwrapper.Config), modules.ProofsConfig), - Override(new(stores.LocalStorage), From(new(repo.LockedRepo))), - Override(new(sealing.SectorIDCounter), modules.SectorIDCounter), - Override(new(*sectorstorage.Manager), modules.SectorStorage), - Override(new(ffiwrapper.Verifier), ffiwrapper.ProofVerifier), - - Override(new(sectorstorage.SectorManager), From(new(*sectorstorage.Manager))), - Override(new(storage2.Prover), From(new(sectorstorage.SectorManager))), - Override(new(storiface.WorkerReturn), From(new(sectorstorage.SectorManager))), - - Override(new(*sectorblocks.SectorBlocks), sectorblocks.NewSectorBlocks), - Override(new(*storage.Miner), modules.StorageMiner(config.DefaultStorageMiner().Fees)), - Override(new(dtypes.NetworkName), modules.StorageNetworkName), - - Override(new(dtypes.StagingMultiDstore), modules.StagingMultiDatastore), - Override(new(dtypes.StagingBlockstore), modules.StagingBlockstore), - Override(new(dtypes.StagingDAG), modules.StagingDAG), - Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync), - Override(new(retrievalmarket.RetrievalProvider), modules.RetrievalProvider), - Override(new(dtypes.ProviderDataTransfer), modules.NewProviderDAGServiceDataTransfer), - Override(new(dtypes.ProviderPieceStore), modules.NewProviderPieceStore), - Override(new(*storedask.StoredAsk), modules.NewStorageAsk), - Override(new(dtypes.StorageDealFilter), modules.BasicDealFilter(nil)), - Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(nil)), - Override(new(modules.ProviderDealFunds), modules.NewProviderDealFunds), - Override(new(storagemarket.StorageProvider), modules.StorageProvider), - Override(new(storagemarket.StorageProviderNode), storageadapter.NewProviderNodeAdapter(nil)), - Override(HandleRetrievalKey, modules.HandleRetrieval), - Override(GetParamsKey, modules.GetParams), - Override(HandleDealsKey, modules.HandleDeals), - Override(new(gen.WinningPoStProver), storage.NewWinningPoStProver), - Override(new(*miner.Miner), modules.SetupBlockProducer), - - Override(new(dtypes.ConsiderOnlineStorageDealsConfigFunc), modules.NewConsiderOnlineStorageDealsConfigFunc), - Override(new(dtypes.SetConsiderOnlineStorageDealsConfigFunc), modules.NewSetConsideringOnlineStorageDealsFunc), - Override(new(dtypes.ConsiderOnlineRetrievalDealsConfigFunc), modules.NewConsiderOnlineRetrievalDealsConfigFunc), - Override(new(dtypes.SetConsiderOnlineRetrievalDealsConfigFunc), modules.NewSetConsiderOnlineRetrievalDealsConfigFunc), - Override(new(dtypes.StorageDealPieceCidBlocklistConfigFunc), modules.NewStorageDealPieceCidBlocklistConfigFunc), - Override(new(dtypes.SetStorageDealPieceCidBlocklistConfigFunc), modules.NewSetStorageDealPieceCidBlocklistConfigFunc), - Override(new(dtypes.ConsiderOfflineStorageDealsConfigFunc), modules.NewConsiderOfflineStorageDealsConfigFunc), - Override(new(dtypes.SetConsiderOfflineStorageDealsConfigFunc), modules.NewSetConsideringOfflineStorageDealsFunc), - Override(new(dtypes.ConsiderOfflineRetrievalDealsConfigFunc), modules.NewConsiderOfflineRetrievalDealsConfigFunc), - Override(new(dtypes.SetConsiderOfflineRetrievalDealsConfigFunc), modules.NewSetConsiderOfflineRetrievalDealsConfigFunc), - Override(new(dtypes.SetSealingConfigFunc), modules.NewSetSealConfigFunc), - Override(new(dtypes.GetSealingConfigFunc), modules.NewGetSealConfigFunc), - Override(new(dtypes.SetExpectedSealDurationFunc), modules.NewSetExpectedSealDurationFunc), - Override(new(dtypes.GetExpectedSealDurationFunc), modules.NewGetExpectedSealDurationFunc), - ), - ) -} +func isFullOrLiteNode(s *Settings) bool { return s.nodeType == repo.FullNode } +func isFullNode(s *Settings) bool { return s.nodeType == repo.FullNode && !s.Lite } +func isLiteNode(s *Settings) bool { return s.nodeType == repo.FullNode && s.Lite } -func StorageMiner(out *api.StorageMiner) Option { +func Base() Option { return Options( + func(s *Settings) error { s.Base = true; return nil }, // mark Base as applied ApplyIf(func(s *Settings) bool { return s.Config }, - Error(errors.New("the StorageMiner option must be set before Config option")), + Error(errors.New("the Base() option must be set before Config option")), ), - ApplyIf(func(s *Settings) bool { return s.Online }, - Error(errors.New("the StorageMiner option must be set before Online option")), + ApplyIf(func(s *Settings) bool { return s.enableLibp2pNode }, + LibP2P, ), - - func(s *Settings) error { - s.nodeType = repo.StorageMiner - return nil - }, - - func(s *Settings) error { - resAPI := &impl.StorageMinerAPI{} - s.invokes[ExtractApiKey] = fx.Populate(resAPI) - *out = resAPI - return nil - }, + ApplyIf(isFullOrLiteNode, ChainNode), + ApplyIf(IsType(repo.StorageMiner), MinerNode), ) } // Config sets up constructors based on the provided Config -func ConfigCommon(cfg *config.Common) Option { +func ConfigCommon(cfg *config.Common, enableLibp2pNode bool) Option { return Options( func(s *Settings) error { s.Config = true; return nil }, Override(new(dtypes.APIEndpoint), func() (dtypes.APIEndpoint, error) { @@ -417,14 +236,21 @@ func ConfigCommon(cfg *config.Common) Option { Override(SetApiEndpointKey, func(lr repo.LockedRepo, e dtypes.APIEndpoint) error { return lr.SetAPIEndpoint(e) }), - Override(new(sectorstorage.URLs), func(e dtypes.APIEndpoint) (sectorstorage.URLs, error) { + Override(new(stores.URLs), func(e dtypes.APIEndpoint) (stores.URLs, error) { ip := cfg.API.RemoteListenAddress - var urls sectorstorage.URLs + var urls stores.URLs urls = append(urls, "http://"+ip+"/remote") // TODO: This makes no assumptions, and probably could... return urls, nil }), - ApplyIf(func(s *Settings) bool { return s.Online }, + ApplyIf(func(s *Settings) bool { return s.Base }), // apply only if Base has already been applied + If(!enableLibp2pNode, + Override(new(api.Net), new(api.NetStub)), + Override(new(api.Common), From(new(common.CommonAPI))), + ), + If(enableLibp2pNode, + Override(new(api.Net), From(new(net.NetAPI))), + Override(new(api.Common), From(new(common.CommonAPI))), Override(StartListeningKey, lp2p.StartListening(cfg.Libp2p.ListenAddresses)), Override(ConnectionManagerKey, lp2p.ConnectionManager( cfg.Libp2p.ConnMgrLow, @@ -437,66 +263,12 @@ func ConfigCommon(cfg *config.Common) Option { ApplyIf(func(s *Settings) bool { return len(cfg.Libp2p.BootstrapPeers) > 0 }, Override(new(dtypes.BootstrapPeers), modules.ConfigBootstrap(cfg.Libp2p.BootstrapPeers)), ), - ), - Override(AddrsFactoryKey, lp2p.AddrsFactory( - cfg.Libp2p.AnnounceAddresses, - cfg.Libp2p.NoAnnounceAddresses)), - ) -} - -func ConfigFullNode(c interface{}) Option { - cfg, ok := c.(*config.FullNode) - if !ok { - return Error(xerrors.Errorf("invalid config from repo, got: %T", c)) - } - - ipfsMaddr := cfg.Client.IpfsMAddr - return Options( - ConfigCommon(&cfg.Common), - If(cfg.Client.UseIpfs, - Override(new(dtypes.ClientBlockstore), modules.IpfsClientBlockstore(ipfsMaddr)), - If(cfg.Client.IpfsUseForRetrieval, - Override(new(dtypes.ClientRetrievalStoreManager), modules.ClientBlockstoreRetrievalStoreManager), - ), - ), - If(cfg.Metrics.HeadNotifs, - Override(HeadMetricsKey, metrics.SendHeadNotifs(cfg.Metrics.Nickname)), - ), - If(cfg.Wallet.RemoteBackend != "", - Override(new(*remotewallet.RemoteWallet), remotewallet.SetupRemoteWallet(cfg.Wallet.RemoteBackend)), - ), - If(cfg.Wallet.EnableLedger, - Override(new(*ledgerwallet.LedgerWallet), ledgerwallet.NewWallet), - ), - If(cfg.Wallet.DisableLocal, - Unset(new(*wallet.LocalWallet)), - Override(new(wallet.Default), wallet.NilDefault), + Override(AddrsFactoryKey, lp2p.AddrsFactory( + cfg.Libp2p.AnnounceAddresses, + cfg.Libp2p.NoAnnounceAddresses)), ), - ) -} - -func ConfigStorageMiner(c interface{}) Option { - cfg, ok := c.(*config.StorageMiner) - if !ok { - return Error(xerrors.Errorf("invalid config from repo, got: %T", c)) - } - - return Options( - ConfigCommon(&cfg.Common), - - If(cfg.Dealmaking.Filter != "", - Override(new(dtypes.StorageDealFilter), modules.BasicDealFilter(dealfilter.CliStorageDealFilter(cfg.Dealmaking.Filter))), - ), - - If(cfg.Dealmaking.RetrievalFilter != "", - Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(dealfilter.CliRetrievalDealFilter(cfg.Dealmaking.RetrievalFilter))), - ), - - Override(new(storagemarket.StorageProviderNode), storageadapter.NewProviderNodeAdapter(&cfg.Fees)), - - Override(new(sectorstorage.SealerConfig), cfg.Storage), - Override(new(*storage.Miner), modules.StorageMiner(cfg.Fees)), + Override(new(dtypes.MetadataDS), modules.Datastore(cfg.Backup.DisableMetadataLog)), ) } @@ -511,11 +283,53 @@ func Repo(r repo.Repo) Option { return err } + var cfg *config.Chainstore + switch settings.nodeType { + case repo.FullNode: + cfgp, ok := c.(*config.FullNode) + if !ok { + return xerrors.Errorf("invalid config from repo, got: %T", c) + } + cfg = &cfgp.Chainstore + default: + cfg = &config.Chainstore{} + } + return Options( Override(new(repo.LockedRepo), modules.LockedRepo(lr)), // module handles closing - Override(new(dtypes.MetadataDS), modules.Datastore), - Override(new(dtypes.ChainBlockstore), modules.ChainBlockstore), + Override(new(dtypes.UniversalBlockstore), modules.UniversalBlockstore), + + If(cfg.EnableSplitstore, + If(cfg.Splitstore.ColdStoreType == "universal", + Override(new(dtypes.ColdBlockstore), From(new(dtypes.UniversalBlockstore)))), + If(cfg.Splitstore.ColdStoreType == "discard", + Override(new(dtypes.ColdBlockstore), modules.DiscardColdBlockstore)), + If(cfg.Splitstore.HotStoreType == "badger", + Override(new(dtypes.HotBlockstore), modules.BadgerHotBlockstore)), + Override(new(dtypes.SplitBlockstore), modules.SplitBlockstore(cfg)), + Override(new(dtypes.BasicChainBlockstore), modules.ChainSplitBlockstore), + Override(new(dtypes.BasicStateBlockstore), modules.StateSplitBlockstore), + Override(new(dtypes.BaseBlockstore), From(new(dtypes.SplitBlockstore))), + Override(new(dtypes.ExposedBlockstore), modules.ExposedSplitBlockstore), + Override(new(dtypes.GCReferenceProtector), modules.SplitBlockstoreGCReferenceProtector), + ), + If(!cfg.EnableSplitstore, + Override(new(dtypes.BasicChainBlockstore), modules.ChainFlatBlockstore), + Override(new(dtypes.BasicStateBlockstore), modules.StateFlatBlockstore), + Override(new(dtypes.BaseBlockstore), From(new(dtypes.UniversalBlockstore))), + Override(new(dtypes.ExposedBlockstore), From(new(dtypes.UniversalBlockstore))), + Override(new(dtypes.GCReferenceProtector), modules.NoopGCReferenceProtector), + ), + + Override(new(dtypes.ChainBlockstore), From(new(dtypes.BasicChainBlockstore))), + Override(new(dtypes.StateBlockstore), From(new(dtypes.BasicStateBlockstore))), + + If(os.Getenv("LOTUS_ENABLE_CHAINSTORE_FALLBACK") == "1", + Override(new(dtypes.ChainBlockstore), modules.FallbackChainBlockstore), + Override(new(dtypes.StateBlockstore), modules.FallbackStateBlockstore), + Override(SetupFallbackBlockstoresKey, modules.InitFallbackBlockstores), + ), Override(new(dtypes.ClientImportMgr), modules.ClientImportMgr), Override(new(dtypes.ClientMultiDstore), modules.ClientMultiDatastore), @@ -530,37 +344,12 @@ func Repo(r repo.Repo) Option { Override(new(*dtypes.APIAlg), modules.APISecret), - ApplyIf(isType(repo.FullNode), ConfigFullNode(c)), - ApplyIf(isType(repo.StorageMiner), ConfigStorageMiner(c)), + ApplyIf(IsType(repo.FullNode), ConfigFullNode(c)), + ApplyIf(IsType(repo.StorageMiner), ConfigStorageMiner(c)), )(settings) } } -type FullOption = Option - -func Lite(enable bool) FullOption { - return func(s *Settings) error { - s.Lite = enable - return nil - } -} - -func FullAPI(out *api.FullNode, fopts ...FullOption) Option { - return Options( - func(s *Settings) error { - s.nodeType = repo.FullNode - return nil - }, - Options(fopts...), - func(s *Settings) error { - resAPI := &impl.FullNodeAPI{} - s.invokes[ExtractApiKey] = fx.Populate(resAPI) - *out = resAPI - return nil - }, - ) -} - type StopFunc func(context.Context) error // New builds and starts new Filecoin node @@ -613,5 +402,22 @@ func Test() Option { Unset(RunPeerMgrKey), Unset(new(*peermgr.PeerMgr)), Override(new(beacon.Schedule), testing.RandomBeacon), + Override(new(*storageadapter.DealPublisher), storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{})), ) } + +// For 3rd party dep injection. + +func WithRepoType(repoType repo.RepoType) func(s *Settings) error { + return func(s *Settings) error { + s.nodeType = repoType + return nil + } +} + +func WithInvokesKey(i invoke, resApi interface{}) func(s *Settings) error { + return func(s *Settings) error { + s.invokes[i] = fx.Populate(resApi) + return nil + } +} diff --git a/node/builder_chain.go b/node/builder_chain.go new file mode 100644 index 00000000000..1447a4df781 --- /dev/null +++ b/node/builder_chain.go @@ -0,0 +1,218 @@ +package node + +import ( + "go.uber.org/fx" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-fil-markets/discovery" + discoveryimpl "github.com/filecoin-project/go-fil-markets/discovery/impl" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/storagemarket" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain" + "github.com/filecoin-project/lotus/chain/beacon" + "github.com/filecoin-project/lotus/chain/exchange" + "github.com/filecoin-project/lotus/chain/gen/slashfilter" + "github.com/filecoin-project/lotus/chain/market" + "github.com/filecoin-project/lotus/chain/messagepool" + "github.com/filecoin-project/lotus/chain/messagesigner" + "github.com/filecoin-project/lotus/chain/metrics" + "github.com/filecoin-project/lotus/chain/stmgr" + rpcstmgr "github.com/filecoin-project/lotus/chain/stmgr/rpc" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/chain/wallet" + ledgerwallet "github.com/filecoin-project/lotus/chain/wallet/ledger" + "github.com/filecoin-project/lotus/chain/wallet/remotewallet" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/lotus/lib/peermgr" + "github.com/filecoin-project/lotus/markets/storageadapter" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/hello" + "github.com/filecoin-project/lotus/node/impl" + "github.com/filecoin-project/lotus/node/impl/full" + "github.com/filecoin-project/lotus/node/modules" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/repo" + "github.com/filecoin-project/lotus/paychmgr" + "github.com/filecoin-project/lotus/paychmgr/settler" +) + +// Chain node provides access to the Filecoin blockchain, by setting up a full +// validator node, or by delegating some actions to other nodes (lite mode) +var ChainNode = Options( + // Full node or lite node + // TODO: Fix offline mode + + // Consensus settings + Override(new(dtypes.DrandSchedule), modules.BuiltinDrandConfig), + Override(new(stmgr.UpgradeSchedule), stmgr.DefaultUpgradeSchedule()), + Override(new(dtypes.NetworkName), modules.NetworkName), + Override(new(modules.Genesis), modules.ErrorGenesis), + Override(new(dtypes.AfterGenesisSet), modules.SetGenesis), + Override(SetGenesisKey, modules.DoSetGenesis), + Override(new(beacon.Schedule), modules.RandomSchedule), + + // Network bootstrap + Override(new(dtypes.BootstrapPeers), modules.BuiltinBootstrap), + Override(new(dtypes.DrandBootstrap), modules.DrandBootstrap), + + // Consensus: crypto dependencies + Override(new(ffiwrapper.Verifier), ffiwrapper.ProofVerifier), + Override(new(ffiwrapper.Prover), ffiwrapper.ProofProver), + + // Consensus: VM + Override(new(vm.SyscallBuilder), vm.Syscalls), + + // Consensus: Chain storage/access + Override(new(*store.ChainStore), modules.ChainStore), + Override(new(*stmgr.StateManager), modules.StateManager), + Override(new(dtypes.ChainBitswap), modules.ChainBitswap), + Override(new(dtypes.ChainBlockService), modules.ChainBlockService), // todo: unused + + // Consensus: Chain sync + + // We don't want the SyncManagerCtor to be used as an fx constructor, but rather as a value. + // It will be called implicitly by the Syncer constructor. + Override(new(chain.SyncManagerCtor), func() chain.SyncManagerCtor { return chain.NewSyncManager }), + Override(new(*chain.Syncer), modules.NewSyncer), + Override(new(exchange.Client), exchange.NewClient), + + // Chain networking + Override(new(*hello.Service), hello.NewHelloService), + Override(new(exchange.Server), exchange.NewServer), + Override(new(*peermgr.PeerMgr), peermgr.NewPeerMgr), + + // Chain mining API dependencies + Override(new(*slashfilter.SlashFilter), modules.NewSlashFilter), + + // Service: Message Pool + Override(new(dtypes.DefaultMaxFeeFunc), modules.NewDefaultMaxFeeFunc), + Override(new(*messagepool.MessagePool), modules.MessagePool), + Override(new(*dtypes.MpoolLocker), new(dtypes.MpoolLocker)), + + // Shared graphsync (markets, serving chain) + Override(new(dtypes.Graphsync), modules.Graphsync(config.DefaultFullNode().Client.SimultaneousTransfers)), + + // Service: Wallet + Override(new(*messagesigner.MessageSigner), messagesigner.NewMessageSigner), + Override(new(*wallet.LocalWallet), wallet.NewWallet), + Override(new(wallet.Default), From(new(*wallet.LocalWallet))), + Override(new(api.Wallet), From(new(wallet.MultiWallet))), + + // Service: Payment channels + Override(new(paychmgr.PaychAPI), From(new(modules.PaychAPI))), + Override(new(*paychmgr.Store), modules.NewPaychStore), + Override(new(*paychmgr.Manager), modules.NewManager), + Override(HandlePaymentChannelManagerKey, modules.HandlePaychManager), + Override(SettlePaymentChannelsKey, settler.SettlePaymentChannels), + + // Markets (common) + Override(new(*discoveryimpl.Local), modules.NewLocalDiscovery), + + // Markets (retrieval) + Override(new(discovery.PeerResolver), modules.RetrievalResolver), + Override(new(retrievalmarket.RetrievalClient), modules.RetrievalClient), + Override(new(dtypes.ClientDataTransfer), modules.NewClientGraphsyncDataTransfer), + + // Markets (storage) + Override(new(*market.FundManager), market.NewFundManager), + Override(new(dtypes.ClientDatastore), modules.NewClientDatastore), + Override(new(storagemarket.StorageClient), modules.StorageClient), + Override(new(storagemarket.StorageClientNode), storageadapter.NewClientNodeAdapter), + Override(HandleMigrateClientFundsKey, modules.HandleMigrateClientFunds), + + Override(new(*full.GasPriceCache), full.NewGasPriceCache), + + // Lite node API + ApplyIf(isLiteNode, + Override(new(messagepool.Provider), messagepool.NewProviderLite), + Override(new(messagesigner.MpoolNonceAPI), From(new(modules.MpoolNonceAPI))), + Override(new(full.ChainModuleAPI), From(new(api.Gateway))), + Override(new(full.GasModuleAPI), From(new(api.Gateway))), + Override(new(full.MpoolModuleAPI), From(new(api.Gateway))), + Override(new(full.StateModuleAPI), From(new(api.Gateway))), + Override(new(stmgr.StateManagerAPI), rpcstmgr.NewRPCStateManager), + ), + + // Full node API / service startup + ApplyIf(isFullNode, + Override(new(messagepool.Provider), messagepool.NewProvider), + Override(new(messagesigner.MpoolNonceAPI), From(new(*messagepool.MessagePool))), + Override(new(full.ChainModuleAPI), From(new(full.ChainModule))), + Override(new(full.GasModuleAPI), From(new(full.GasModule))), + Override(new(full.MpoolModuleAPI), From(new(full.MpoolModule))), + Override(new(full.StateModuleAPI), From(new(full.StateModule))), + Override(new(stmgr.StateManagerAPI), From(new(*stmgr.StateManager))), + + Override(RunHelloKey, modules.RunHello), + Override(RunChainExchangeKey, modules.RunChainExchange), + Override(RunPeerMgrKey, modules.RunPeerMgr), + Override(HandleIncomingMessagesKey, modules.HandleIncomingMessages), + Override(HandleIncomingBlocksKey, modules.HandleIncomingBlocks), + ), +) + +func ConfigFullNode(c interface{}) Option { + cfg, ok := c.(*config.FullNode) + if !ok { + return Error(xerrors.Errorf("invalid config from repo, got: %T", c)) + } + + enableLibp2pNode := true // always enable libp2p for full nodes + + ipfsMaddr := cfg.Client.IpfsMAddr + return Options( + ConfigCommon(&cfg.Common, enableLibp2pNode), + + If(cfg.Client.UseIpfs, + Override(new(dtypes.ClientBlockstore), modules.IpfsClientBlockstore(ipfsMaddr, cfg.Client.IpfsOnlineMode)), + If(cfg.Client.IpfsUseForRetrieval, + Override(new(dtypes.ClientRetrievalStoreManager), modules.ClientBlockstoreRetrievalStoreManager), + ), + ), + Override(new(dtypes.Graphsync), modules.Graphsync(cfg.Client.SimultaneousTransfers)), + + If(cfg.Metrics.HeadNotifs, + Override(HeadMetricsKey, metrics.SendHeadNotifs(cfg.Metrics.Nickname)), + ), + + If(cfg.Wallet.RemoteBackend != "", + Override(new(*remotewallet.RemoteWallet), remotewallet.SetupRemoteWallet(cfg.Wallet.RemoteBackend)), + ), + If(cfg.Wallet.EnableLedger, + Override(new(*ledgerwallet.LedgerWallet), ledgerwallet.NewWallet), + ), + If(cfg.Wallet.DisableLocal, + Unset(new(*wallet.LocalWallet)), + Override(new(wallet.Default), wallet.NilDefault), + ), + ) +} + +type FullOption = Option + +func Lite(enable bool) FullOption { + return func(s *Settings) error { + s.Lite = enable + return nil + } +} + +func FullAPI(out *api.FullNode, fopts ...FullOption) Option { + return Options( + func(s *Settings) error { + s.nodeType = repo.FullNode + s.enableLibp2pNode = true + return nil + }, + Options(fopts...), + func(s *Settings) error { + resAPI := &impl.FullNodeAPI{} + s.invokes[ExtractApiKey] = fx.Populate(resAPI) + *out = resAPI + return nil + }, + ) +} diff --git a/node/builder_miner.go b/node/builder_miner.go new file mode 100644 index 00000000000..0c0f9d15af9 --- /dev/null +++ b/node/builder_miner.go @@ -0,0 +1,223 @@ +package node + +import ( + "errors" + "time" + + "go.uber.org/fx" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/markets/retrievaladapter" + storage2 "github.com/filecoin-project/specs-storage/storage" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/gen" + "github.com/filecoin-project/lotus/chain/gen/slashfilter" + sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/lotus/markets/dealfilter" + "github.com/filecoin-project/lotus/markets/storageadapter" + "github.com/filecoin-project/lotus/miner" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/impl" + "github.com/filecoin-project/lotus/node/modules" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/repo" + "github.com/filecoin-project/lotus/storage" + "github.com/filecoin-project/lotus/storage/sectorblocks" +) + +var MinerNode = Options( + Override(new(sectorstorage.StorageAuth), modules.StorageAuth), + + // Actor config + Override(new(dtypes.MinerAddress), modules.MinerAddress), + Override(new(dtypes.MinerID), modules.MinerID), + Override(new(abi.RegisteredSealProof), modules.SealProofType), + Override(new(dtypes.NetworkName), modules.StorageNetworkName), + + // Mining / proving + Override(new(*storage.AddressSelector), modules.AddressSelector(nil)), +) + +func ConfigStorageMiner(c interface{}) Option { + cfg, ok := c.(*config.StorageMiner) + if !ok { + return Error(xerrors.Errorf("invalid config from repo, got: %T", c)) + } + + pricingConfig := cfg.Dealmaking.RetrievalPricing + if pricingConfig.Strategy == config.RetrievalPricingExternalMode { + if pricingConfig.External == nil { + return Error(xerrors.New("retrieval pricing policy has been to set to external but external policy config is nil")) + } + + if pricingConfig.External.Path == "" { + return Error(xerrors.New("retrieval pricing policy has been to set to external but external script path is empty")) + } + } else if pricingConfig.Strategy != config.RetrievalPricingDefaultMode { + return Error(xerrors.New("retrieval pricing policy must be either default or external")) + } + + enableLibp2pNode := cfg.Subsystems.EnableMarkets // we enable libp2p nodes if the storage market subsystem is enabled, otherwise we don't + + return Options( + ConfigCommon(&cfg.Common, enableLibp2pNode), + + Override(new(stores.LocalStorage), From(new(repo.LockedRepo))), + Override(new(*stores.Local), modules.LocalStorage), + Override(new(*stores.Remote), modules.RemoteStorage), + Override(new(dtypes.RetrievalPricingFunc), modules.RetrievalPricingFunc(cfg.Dealmaking)), + + If(!cfg.Subsystems.EnableMining, + If(cfg.Subsystems.EnableSealing, Error(xerrors.Errorf("sealing can only be enabled on a mining node"))), + If(cfg.Subsystems.EnableSectorStorage, Error(xerrors.Errorf("sealing can only be enabled on a mining node"))), + ), + If(cfg.Subsystems.EnableMining, + If(!cfg.Subsystems.EnableSealing, Error(xerrors.Errorf("sealing can't be disabled on a mining node yet"))), + If(!cfg.Subsystems.EnableSectorStorage, Error(xerrors.Errorf("sealing can't be disabled on a mining node yet"))), + + // Sector storage: Proofs + Override(new(ffiwrapper.Verifier), ffiwrapper.ProofVerifier), + Override(new(ffiwrapper.Prover), ffiwrapper.ProofProver), + Override(new(storage2.Prover), From(new(sectorstorage.SectorManager))), + + // Sealing (todo should be under EnableSealing, but storagefsm is currently bundled with storage.Miner) + Override(new(sealing.SectorIDCounter), modules.SectorIDCounter), + Override(GetParamsKey, modules.GetParams), + + Override(new(dtypes.SetSealingConfigFunc), modules.NewSetSealConfigFunc), + Override(new(dtypes.GetSealingConfigFunc), modules.NewGetSealConfigFunc), + + // Mining / proving + Override(new(*slashfilter.SlashFilter), modules.NewSlashFilter), + Override(new(*storage.Miner), modules.StorageMiner(config.DefaultStorageMiner().Fees)), + Override(new(*miner.Miner), modules.SetupBlockProducer), + Override(new(gen.WinningPoStProver), storage.NewWinningPoStProver), + Override(new(*storage.Miner), modules.StorageMiner(cfg.Fees)), + Override(new(sectorblocks.SectorBuilder), From(new(*storage.Miner))), + ), + + If(cfg.Subsystems.EnableSectorStorage, + // Sector storage + Override(new(*stores.Index), stores.NewIndex), + Override(new(stores.SectorIndex), From(new(*stores.Index))), + Override(new(*sectorstorage.Manager), modules.SectorStorage), + Override(new(sectorstorage.Unsealer), From(new(*sectorstorage.Manager))), + Override(new(sectorstorage.SectorManager), From(new(*sectorstorage.Manager))), + Override(new(storiface.WorkerReturn), From(new(sectorstorage.SectorManager))), + ), + + If(!cfg.Subsystems.EnableSectorStorage, + Override(new(sectorstorage.StorageAuth), modules.StorageAuthWithURL(cfg.Subsystems.SectorIndexApiInfo)), + Override(new(modules.MinerStorageService), modules.ConnectStorageService(cfg.Subsystems.SectorIndexApiInfo)), + Override(new(sectorstorage.Unsealer), From(new(modules.MinerStorageService))), + Override(new(sectorblocks.SectorBuilder), From(new(modules.MinerStorageService))), + ), + If(!cfg.Subsystems.EnableSealing, + Override(new(modules.MinerSealingService), modules.ConnectSealingService(cfg.Subsystems.SealerApiInfo)), + Override(new(stores.SectorIndex), From(new(modules.MinerSealingService))), + ), + + If(cfg.Subsystems.EnableMarkets, + // Markets + Override(new(dtypes.StagingMultiDstore), modules.StagingMultiDatastore), + Override(new(dtypes.StagingBlockstore), modules.StagingBlockstore), + Override(new(dtypes.StagingDAG), modules.StagingDAG), + Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(cfg.Dealmaking.SimultaneousTransfers)), + Override(new(dtypes.ProviderPieceStore), modules.NewProviderPieceStore), + Override(new(*sectorblocks.SectorBlocks), sectorblocks.NewSectorBlocks), + + // Markets (retrieval deps) + Override(new(sectorstorage.PieceProvider), sectorstorage.NewPieceProvider), + Override(new(dtypes.RetrievalPricingFunc), modules.RetrievalPricingFunc(config.DealmakingConfig{ + RetrievalPricing: &config.RetrievalPricing{ + Strategy: config.RetrievalPricingDefaultMode, + Default: &config.RetrievalPricingDefault{}, + }, + })), + Override(new(dtypes.RetrievalPricingFunc), modules.RetrievalPricingFunc(cfg.Dealmaking)), + + // Markets (retrieval) + Override(new(retrievalmarket.RetrievalProviderNode), retrievaladapter.NewRetrievalProviderNode), + Override(new(rmnet.RetrievalMarketNetwork), modules.RetrievalNetwork), + Override(new(retrievalmarket.RetrievalProvider), modules.RetrievalProvider), + Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(nil)), + Override(HandleRetrievalKey, modules.HandleRetrieval), + + // Markets (storage) + Override(new(dtypes.ProviderDataTransfer), modules.NewProviderDAGServiceDataTransfer), + Override(new(*storedask.StoredAsk), modules.NewStorageAsk), + Override(new(dtypes.StorageDealFilter), modules.BasicDealFilter(nil)), + Override(new(storagemarket.StorageProvider), modules.StorageProvider), + Override(new(*storageadapter.DealPublisher), storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{})), + Override(HandleMigrateProviderFundsKey, modules.HandleMigrateProviderFunds), + Override(HandleDealsKey, modules.HandleDeals), + + // Config (todo: get a real property system) + Override(new(dtypes.ConsiderOnlineStorageDealsConfigFunc), modules.NewConsiderOnlineStorageDealsConfigFunc), + Override(new(dtypes.SetConsiderOnlineStorageDealsConfigFunc), modules.NewSetConsideringOnlineStorageDealsFunc), + Override(new(dtypes.ConsiderOnlineRetrievalDealsConfigFunc), modules.NewConsiderOnlineRetrievalDealsConfigFunc), + Override(new(dtypes.SetConsiderOnlineRetrievalDealsConfigFunc), modules.NewSetConsiderOnlineRetrievalDealsConfigFunc), + Override(new(dtypes.StorageDealPieceCidBlocklistConfigFunc), modules.NewStorageDealPieceCidBlocklistConfigFunc), + Override(new(dtypes.SetStorageDealPieceCidBlocklistConfigFunc), modules.NewSetStorageDealPieceCidBlocklistConfigFunc), + Override(new(dtypes.ConsiderOfflineStorageDealsConfigFunc), modules.NewConsiderOfflineStorageDealsConfigFunc), + Override(new(dtypes.SetConsiderOfflineStorageDealsConfigFunc), modules.NewSetConsideringOfflineStorageDealsFunc), + Override(new(dtypes.ConsiderOfflineRetrievalDealsConfigFunc), modules.NewConsiderOfflineRetrievalDealsConfigFunc), + Override(new(dtypes.SetConsiderOfflineRetrievalDealsConfigFunc), modules.NewSetConsiderOfflineRetrievalDealsConfigFunc), + Override(new(dtypes.ConsiderVerifiedStorageDealsConfigFunc), modules.NewConsiderVerifiedStorageDealsConfigFunc), + Override(new(dtypes.SetConsiderVerifiedStorageDealsConfigFunc), modules.NewSetConsideringVerifiedStorageDealsFunc), + Override(new(dtypes.ConsiderUnverifiedStorageDealsConfigFunc), modules.NewConsiderUnverifiedStorageDealsConfigFunc), + Override(new(dtypes.SetConsiderUnverifiedStorageDealsConfigFunc), modules.NewSetConsideringUnverifiedStorageDealsFunc), + Override(new(dtypes.SetExpectedSealDurationFunc), modules.NewSetExpectedSealDurationFunc), + Override(new(dtypes.GetExpectedSealDurationFunc), modules.NewGetExpectedSealDurationFunc), + Override(new(dtypes.SetMaxDealStartDelayFunc), modules.NewSetMaxDealStartDelayFunc), + Override(new(dtypes.GetMaxDealStartDelayFunc), modules.NewGetMaxDealStartDelayFunc), + + If(cfg.Dealmaking.Filter != "", + Override(new(dtypes.StorageDealFilter), modules.BasicDealFilter(dealfilter.CliStorageDealFilter(cfg.Dealmaking.Filter))), + ), + + If(cfg.Dealmaking.RetrievalFilter != "", + Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(dealfilter.CliRetrievalDealFilter(cfg.Dealmaking.RetrievalFilter))), + ), + Override(new(*storageadapter.DealPublisher), storageadapter.NewDealPublisher(&cfg.Fees, storageadapter.PublishMsgConfig{ + Period: time.Duration(cfg.Dealmaking.PublishMsgPeriod), + MaxDealsPerMsg: cfg.Dealmaking.MaxDealsPerPublishMsg, + })), + Override(new(storagemarket.StorageProviderNode), storageadapter.NewProviderNodeAdapter(&cfg.Fees, &cfg.Dealmaking)), + ), + + Override(new(sectorstorage.SealerConfig), cfg.Storage), + Override(new(*storage.AddressSelector), modules.AddressSelector(&cfg.Addresses)), + ) +} + +func StorageMiner(out *api.StorageMiner, subsystemsCfg config.MinerSubsystemConfig) Option { + return Options( + ApplyIf(func(s *Settings) bool { return s.Config }, + Error(errors.New("the StorageMiner option must be set before Config option")), + ), + + func(s *Settings) error { + s.nodeType = repo.StorageMiner + s.enableLibp2pNode = subsystemsCfg.EnableMarkets + return nil + }, + + func(s *Settings) error { + resAPI := &impl.StorageMinerAPI{} + s.invokes[ExtractApiKey] = fx.Populate(resAPI) + *out = resAPI + return nil + }, + ) +} diff --git a/node/config/def.go b/node/config/def.go index 1298ed45a02..7042cf64493 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -6,13 +6,26 @@ import ( "github.com/ipfs/go-cid" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" ) +const ( + // RetrievalPricingDefault configures the node to use the default retrieval pricing policy. + RetrievalPricingDefaultMode = "default" + // RetrievalPricingExternal configures the node to use the external retrieval pricing script + // configured by the user. + RetrievalPricingExternalMode = "external" +) + // Common is common config between full node and miner type Common struct { API API + Backup Backup Libp2p Libp2p Pubsub Pubsub } @@ -20,33 +33,90 @@ type Common struct { // FullNode is a full node config type FullNode struct { Common - Client Client - Metrics Metrics - Wallet Wallet + Client Client + Metrics Metrics + Wallet Wallet + Fees FeeConfig + Chainstore Chainstore } // // Common +type Backup struct { + DisableMetadataLog bool +} + // StorageMiner is a miner config type StorageMiner struct { Common + Subsystems MinerSubsystemConfig Dealmaking DealmakingConfig Sealing SealingConfig Storage sectorstorage.SealerConfig Fees MinerFeeConfig + Addresses MinerAddressConfig +} + +type MinerSubsystemConfig struct { + EnableMining bool + EnableSealing bool + EnableSectorStorage bool + EnableMarkets bool + + SealerApiInfo string // if EnableSealing == false + SectorIndexApiInfo string // if EnableSectorStorage == false } type DealmakingConfig struct { - ConsiderOnlineStorageDeals bool - ConsiderOfflineStorageDeals bool - ConsiderOnlineRetrievalDeals bool - ConsiderOfflineRetrievalDeals bool - PieceCidBlocklist []cid.Cid - ExpectedSealDuration Duration + ConsiderOnlineStorageDeals bool + ConsiderOfflineStorageDeals bool + ConsiderOnlineRetrievalDeals bool + ConsiderOfflineRetrievalDeals bool + ConsiderVerifiedStorageDeals bool + ConsiderUnverifiedStorageDeals bool + PieceCidBlocklist []cid.Cid + ExpectedSealDuration Duration + // Maximum amount of time proposed deal StartEpoch can be in future + MaxDealStartDelay Duration + // The amount of time to wait for more deals to arrive before + // publishing + PublishMsgPeriod Duration + // The maximum number of deals to include in a single PublishStorageDeals + // message + MaxDealsPerPublishMsg uint64 + // The maximum collateral that the provider will put up against a deal, + // as a multiplier of the minimum collateral bound + MaxProviderCollateralMultiplier uint64 + + // The maximum number of parallel online data transfers (storage+retrieval) + SimultaneousTransfers uint64 Filter string RetrievalFilter string + + RetrievalPricing *RetrievalPricing +} + +type RetrievalPricing struct { + Strategy string // possible values: "default", "external" + + Default *RetrievalPricingDefault + External *RetrievalPricingExternal +} + +type RetrievalPricingExternal struct { + // Path of the external script that will be run to price a retrieval deal. + // This parameter is ONLY applicable if the retrieval pricing policy strategy has been configured to "external". + Path string +} + +type RetrievalPricingDefault struct { + // VerifiedDealsFreeTransfer configures zero fees for data transfer for a retrieval deal + // of a payloadCid that belongs to a verified storage deal. + // This parameter is ONLY applicable if the retrieval pricing policy strategy has been configured to "default". + // default value is true + VerifiedDealsFreeTransfer bool } type SealingConfig struct { @@ -60,16 +130,91 @@ type SealingConfig struct { MaxSealingSectorsForDeals uint64 WaitDealsDelay Duration + + AlwaysKeepUnsealedCopy bool + + // Run sector finalization before submitting sector proof to the chain + FinalizeEarly bool + + // Whether to use available miner balance for sector collateral instead of sending it with each message + CollateralFromMinerBalance bool + // Minimum available balance to keep in the miner actor before sending it with messages + AvailableBalanceBuffer types.FIL + // Don't send collateral with messages even if there is no available balance in the miner actor + DisableCollateralFallback bool + + // enable / disable precommit batching (takes effect after nv13) + BatchPreCommits bool + // maximum precommit batch size - batches will be sent immediately above this size + MaxPreCommitBatch int + // how long to wait before submitting a batch after crossing the minimum batch size + PreCommitBatchWait Duration + // time buffer for forceful batch submission before sectors/deal in batch would start expiring + PreCommitBatchSlack Duration + + // enable / disable commit aggregation (takes effect after nv13) + AggregateCommits bool + // maximum batched commit size - batches will be sent immediately above this size + MinCommitBatch int + MaxCommitBatch int + // how long to wait before submitting a batch after crossing the minimum batch size + CommitBatchWait Duration + // time buffer for forceful batch submission before sectors/deals in batch would start expiring + CommitBatchSlack Duration + + // network BaseFee below which to stop doing commit aggregation, instead + // submitting proofs to the chain individually + AggregateAboveBaseFee types.FIL + + TerminateBatchMax uint64 + TerminateBatchMin uint64 + TerminateBatchWait Duration + + // Keep this many sectors in sealing pipeline, start CC if needed + // todo TargetSealingSectors uint64 + + // todo TargetSectors - stop auto-pleding new sectors after this many sectors are sealed, default CC upgrade for deals sectors if above +} + +type BatchFeeConfig struct { + Base types.FIL + PerSector types.FIL +} + +func (b *BatchFeeConfig) FeeForSectors(nSectors int) abi.TokenAmount { + return big.Add(big.Int(b.Base), big.Mul(big.NewInt(int64(nSectors)), big.Int(b.PerSector))) } type MinerFeeConfig struct { - MaxPreCommitGasFee types.FIL - MaxCommitGasFee types.FIL + MaxPreCommitGasFee types.FIL + MaxCommitGasFee types.FIL + + // maxBatchFee = maxBase + maxPerSector * nSectors + MaxPreCommitBatchGasFee BatchFeeConfig + MaxCommitBatchGasFee BatchFeeConfig + + MaxTerminateGasFee types.FIL MaxWindowPoStGasFee types.FIL MaxPublishDealsFee types.FIL MaxMarketBalanceAddFee types.FIL } +type MinerAddressConfig struct { + PreCommitControl []string + CommitControl []string + TerminateControl []string + DealPublishControl []string + + // DisableOwnerFallback disables usage of the owner address for messages + // sent automatically + DisableOwnerFallback bool + // DisableWorkerFallback disables usage of the worker address for messages + // sent automatically, if control addresses are configured. + // A control address that doesn't have enough funds will still be chosen + // over the worker address if this flag is set. + DisableWorkerFallback bool +} + // API contains configs for API endpoint type API struct { ListenAddress string @@ -91,9 +236,23 @@ type Libp2p struct { } type Pubsub struct { - Bootstrapper bool - DirectPeers []string - RemoteTracer string + Bootstrapper bool + DirectPeers []string + IPColocationWhitelist []string + RemoteTracer string +} + +type Chainstore struct { + EnableSplitstore bool + Splitstore Splitstore +} + +type Splitstore struct { + ColdStoreType string + HotStoreType string + MarkSetType string + + HotStoreMessageRetention uint64 } // // Full Node @@ -104,9 +263,11 @@ type Metrics struct { } type Client struct { - UseIpfs bool - IpfsMAddr string - IpfsUseForRetrieval bool + UseIpfs bool + IpfsOnlineMode bool + IpfsMAddr string + IpfsUseForRetrieval bool + SimultaneousTransfers uint64 } type Wallet struct { @@ -115,6 +276,10 @@ type Wallet struct { DisableLocal bool } +type FeeConfig struct { + DefaultMaxFee types.FIL +} + func defCommon() Common { return Common{ API: API{ @@ -142,10 +307,27 @@ func defCommon() Common { } +var DefaultDefaultMaxFee = types.MustParseFIL("0.07") +var DefaultSimultaneousTransfers = uint64(20) + // DefaultFullNode returns the default config func DefaultFullNode() *FullNode { return &FullNode{ Common: defCommon(), + Fees: FeeConfig{ + DefaultMaxFee: DefaultDefaultMaxFee, + }, + Client: Client{ + SimultaneousTransfers: DefaultSimultaneousTransfers, + }, + Chainstore: Chainstore{ + EnableSplitstore: false, + Splitstore: Splitstore{ + ColdStoreType: "universal", + HotStoreType: "badger", + MarkSetType: "map", + }, + }, } } @@ -158,6 +340,29 @@ func DefaultStorageMiner() *StorageMiner { MaxSealingSectors: 0, MaxSealingSectorsForDeals: 0, WaitDealsDelay: Duration(time.Hour * 6), + AlwaysKeepUnsealedCopy: true, + FinalizeEarly: false, + + CollateralFromMinerBalance: false, + AvailableBalanceBuffer: types.FIL(big.Zero()), + DisableCollateralFallback: false, + + BatchPreCommits: true, + MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, // up to 256 sectors + PreCommitBatchWait: Duration(24 * time.Hour), // this should be less than 31.5 hours, which is the expiration of a precommit ticket + PreCommitBatchSlack: Duration(3 * time.Hour), // time buffer for forceful batch submission before sectors/deals in batch would start expiring, higher value will lower the chances for message fail due to expiration + + AggregateCommits: true, + MinCommitBatch: miner5.MinAggregatedSectors, // per FIP13, we must have at least four proofs to aggregate, where 4 is the cross over point where aggregation wins out on single provecommit gas costs + MaxCommitBatch: miner5.MaxAggregatedSectors, // maximum 819 sectors, this is the maximum aggregation per FIP13 + CommitBatchWait: Duration(24 * time.Hour), // this can be up to 30 days + CommitBatchSlack: Duration(1 * time.Hour), // time buffer for forceful batch submission before sectors/deals in batch would start expiring, higher value will lower the chances for message fail due to expiration + + AggregateAboveBaseFee: types.FIL(types.BigMul(types.PicoFil, types.NewInt(150))), // 0.15 nFIL + + TerminateBatchMin: 1, + TerminateBatchMax: 100, + TerminateBatchWait: Duration(5 * time.Minute), }, Storage: sectorstorage.SealerConfig{ @@ -170,25 +375,71 @@ func DefaultStorageMiner() *StorageMiner { // Default to 10 - tcp should still be able to figure this out, and // it's the ratio between 10gbit / 1gbit ParallelFetchLimit: 10, + + // By default use the hardware resource filtering strategy. + ResourceFiltering: sectorstorage.ResourceFilteringHardware, }, Dealmaking: DealmakingConfig{ - ConsiderOnlineStorageDeals: true, - ConsiderOfflineStorageDeals: true, - ConsiderOnlineRetrievalDeals: true, - ConsiderOfflineRetrievalDeals: true, - PieceCidBlocklist: []cid.Cid{}, + ConsiderOnlineStorageDeals: true, + ConsiderOfflineStorageDeals: true, + ConsiderOnlineRetrievalDeals: true, + ConsiderOfflineRetrievalDeals: true, + ConsiderVerifiedStorageDeals: true, + ConsiderUnverifiedStorageDeals: true, + PieceCidBlocklist: []cid.Cid{}, // TODO: It'd be nice to set this based on sector size - ExpectedSealDuration: Duration(time.Hour * 24), + MaxDealStartDelay: Duration(time.Hour * 24 * 14), + ExpectedSealDuration: Duration(time.Hour * 24), + PublishMsgPeriod: Duration(time.Hour), + MaxDealsPerPublishMsg: 8, + MaxProviderCollateralMultiplier: 2, + + SimultaneousTransfers: DefaultSimultaneousTransfers, + + RetrievalPricing: &RetrievalPricing{ + Strategy: RetrievalPricingDefaultMode, + Default: &RetrievalPricingDefault{ + VerifiedDealsFreeTransfer: true, + }, + External: &RetrievalPricingExternal{ + Path: "", + }, + }, + }, + + Subsystems: MinerSubsystemConfig{ + EnableMining: true, + EnableSealing: true, + EnableSectorStorage: true, + EnableMarkets: true, }, Fees: MinerFeeConfig{ - MaxPreCommitGasFee: types.MustParseFIL("0.025"), - MaxCommitGasFee: types.MustParseFIL("0.05"), + MaxPreCommitGasFee: types.MustParseFIL("0.025"), + MaxCommitGasFee: types.MustParseFIL("0.05"), + + MaxPreCommitBatchGasFee: BatchFeeConfig{ + Base: types.MustParseFIL("0"), + PerSector: types.MustParseFIL("0.02"), + }, + MaxCommitBatchGasFee: BatchFeeConfig{ + Base: types.MustParseFIL("0"), + PerSector: types.MustParseFIL("0.03"), // enough for 6 agg and 1nFIL base fee + }, + + MaxTerminateGasFee: types.MustParseFIL("0.5"), MaxWindowPoStGasFee: types.MustParseFIL("5"), MaxPublishDealsFee: types.MustParseFIL("0.05"), MaxMarketBalanceAddFee: types.MustParseFIL("0.007"), }, + + Addresses: MinerAddressConfig{ + PreCommitControl: []string{}, + CommitControl: []string{}, + TerminateControl: []string{}, + DealPublishControl: []string{}, + }, } cfg.Common.API.ListenAddress = "/ip4/127.0.0.1/tcp/2345/http" cfg.Common.API.RemoteListenAddress = "127.0.0.1:2345" diff --git a/node/hello/cbor_gen.go b/node/hello/cbor_gen.go index 3b85e3a7466..7669f60ede1 100644 --- a/node/hello/cbor_gen.go +++ b/node/hello/cbor_gen.go @@ -5,6 +5,7 @@ package hello import ( "fmt" "io" + "sort" abi "github.com/filecoin-project/go-state-types/abi" cid "github.com/ipfs/go-cid" @@ -13,6 +14,8 @@ import ( ) var _ = xerrors.Errorf +var _ = cid.Undef +var _ = sort.Sort var lengthBufHelloMessage = []byte{132} diff --git a/node/hello/hello.go b/node/hello/hello.go index 05d53de06af..e31b7d25b47 100644 --- a/node/hello/hello.go +++ b/node/hello/hello.go @@ -5,7 +5,7 @@ import ( "time" "github.com/filecoin-project/go-state-types/abi" - xerrors "golang.org/x/xerrors" + "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/big" "github.com/ipfs/go-cid" @@ -13,7 +13,7 @@ import ( "github.com/libp2p/go-libp2p-core/host" inet "github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/peer" - protocol "github.com/libp2p/go-libp2p-core/protocol" + "github.com/libp2p/go-libp2p-core/protocol" cborutil "github.com/filecoin-project/go-cbor-util" "github.com/filecoin-project/lotus/build" @@ -23,6 +23,8 @@ import ( "github.com/filecoin-project/lotus/lib/peermgr" ) +// TODO(TEST): missing test coverage. + const ProtocolID = "/fil/hello/1.0.0" var log = logging.Logger("hello") @@ -33,12 +35,14 @@ type HelloMessage struct { HeaviestTipSetWeight big.Int GenesisHash cid.Cid } + type LatencyMessage struct { TArrival int64 TSent int64 } type NewStreamFunc func(context.Context, peer.ID, ...protocol.ID) (inet.Stream, error) + type Service struct { h host.Host @@ -62,7 +66,6 @@ func NewHelloService(h host.Host, cs *store.ChainStore, syncer *chain.Syncer, pm } func (hs *Service) HandleStream(s inet.Stream) { - var hmsg HelloMessage if err := cborutil.ReadCborRPC(s, &hmsg); err != nil { log.Infow("failed to read hello message, disconnecting", "error", err) @@ -77,7 +80,7 @@ func (hs *Service) HandleStream(s inet.Stream) { "hash", hmsg.GenesisHash) if hmsg.GenesisHash != hs.syncer.Genesis.Cids()[0] { - log.Warnf("other peer has different genesis! (%s)", hmsg.GenesisHash) + log.Debugf("other peer has different genesis! (%s)", hmsg.GenesisHash) _ = s.Conn().Close() return } @@ -118,10 +121,9 @@ func (hs *Service) HandleStream(s inet.Stream) { hs.h.ConnManager().TagPeer(s.Conn().RemotePeer(), "fcpeer", 10) // don't bother informing about genesis - log.Infof("Got new tipset through Hello: %s from %s", ts.Cids(), s.Conn().RemotePeer()) + log.Debugf("Got new tipset through Hello: %s from %s", ts.Cids(), s.Conn().RemotePeer()) hs.syncer.InformNewHead(s.Conn().RemotePeer(), ts) } - } func (hs *Service) SayHello(ctx context.Context, pid peer.ID) error { @@ -161,7 +163,7 @@ func (hs *Service) SayHello(ctx context.Context, pid peer.ID) error { _ = s.SetReadDeadline(build.Clock.Now().Add(10 * time.Second)) err := cborutil.ReadCborRPC(s, lmsg) if err != nil { - log.Infow("reading latency message", "error", err) + log.Debugw("reading latency message", "error", err) } t3 := build.Clock.Now() @@ -177,7 +179,9 @@ func (hs *Service) SayHello(ctx context.Context, pid peer.ID) error { t2 := time.Unix(0, lmsg.TSent) offset := t0.Sub(t1) + t3.Sub(t2) offset /= 2 - log.Infow("time offset", "offset", offset.Seconds(), "peerid", pid.String()) + if offset > 5*time.Second || offset < -5*time.Second { + log.Infow("time offset", "offset", offset.Seconds(), "peerid", pid.String()) + } } } }() diff --git a/node/impl/client/client.go b/node/impl/client/client.go index 1e337495059..7ba6463e607 100644 --- a/node/impl/client/client.go +++ b/node/impl/client/client.go @@ -6,6 +6,10 @@ import ( "fmt" "io" "os" + "sort" + "time" + + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "golang.org/x/xerrors" @@ -29,38 +33,43 @@ import ( "github.com/ipld/go-ipld-prime/traversal/selector/builder" "github.com/libp2p/go-libp2p-core/host" "github.com/libp2p/go-libp2p-core/peer" + "github.com/multiformats/go-multibase" mh "github.com/multiformats/go-multihash" "go.uber.org/fx" "github.com/filecoin-project/go-address" + cborutil "github.com/filecoin-project/go-cbor-util" + "github.com/filecoin-project/go-commp-utils/ffiwrapper" + "github.com/filecoin-project/go-commp-utils/writer" datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/filecoin-project/go-fil-markets/discovery" - "github.com/filecoin-project/go-fil-markets/pieceio" "github.com/filecoin-project/go-fil-markets/retrievalmarket" rm "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/shared" "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-fil-markets/storagemarket/network" "github.com/filecoin-project/go-multistore" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-actors/v3/actors/builtin/market" - "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" marketevents "github.com/filecoin-project/lotus/markets/loggers" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/commp" "github.com/filecoin-project/lotus/markets/utils" "github.com/filecoin-project/lotus/node/impl/full" "github.com/filecoin-project/lotus/node/impl/paych" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/repo/importmgr" + "github.com/filecoin-project/lotus/node/repo/retrievalstoremgr" ) var DefaultHashFunction = uint64(mh.BLAKE2B_MIN + 31) -const dealStartBufferHours uint64 = 49 +// 8 days ~= SealDuration + PreCommit + MaxProveCommitDuration + 8 hour buffer +const dealStartBufferHours uint64 = 8 * 24 type API struct { fx.In @@ -76,6 +85,7 @@ type API struct { Chain *store.ChainStore Imports dtypes.ClientImportMgr + Mds dtypes.ClientMultiDstore CombinedBstore dtypes.ClientBlockstore // TODO: try to remove RetrievalStoreMgr dtypes.ClientRetrievalStoreManager @@ -88,7 +98,13 @@ func calcDealExpiration(minDuration uint64, md *dline.Info, startEpoch abi.Chain minExp := startEpoch + abi.ChainEpoch(minDuration) // Align on miners ProvingPeriodBoundary - return minExp + md.WPoStProvingPeriod - (minExp % md.WPoStProvingPeriod) + (md.PeriodStart % md.WPoStProvingPeriod) - 1 + exp := minExp + md.WPoStProvingPeriod - (minExp % md.WPoStProvingPeriod) + (md.PeriodStart % md.WPoStProvingPeriod) - 1 + // Should only be possible for miners created around genesis + for exp < minExp { + exp += md.WPoStProvingPeriod + } + + return exp } func (a *API) imgr() *importmgr.Mgr { @@ -96,8 +112,23 @@ func (a *API) imgr() *importmgr.Mgr { } func (a *API) ClientStartDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) { + return a.dealStarter(ctx, params, false) +} + +func (a *API) ClientStatelessDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) { + return a.dealStarter(ctx, params, true) +} + +func (a *API) dealStarter(ctx context.Context, params *api.StartDealParams, isStateless bool) (*cid.Cid, error) { var storeID *multistore.StoreID - if params.Data.TransferType == storagemarket.TTGraphsync { + if isStateless { + if params.Data.TransferType != storagemarket.TTManual { + return nil, xerrors.Errorf("invalid transfer type %s for stateless storage deal", params.Data.TransferType) + } + if !params.EpochPrice.IsZero() { + return nil, xerrors.New("stateless storage deals can only be initiated with storage price of 0") + } + } else if params.Data.TransferType == storagemarket.TTGraphsync { importIDs := a.imgr().List() for _, importID := range importIDs { info, err := a.imgr().Info(importID) @@ -120,12 +151,12 @@ func (a *API) ClientStartDeal(ctx context.Context, params *api.StartDealParams) walletKey, err := a.StateAccountKey(ctx, params.Wallet, types.EmptyTSK) if err != nil { - return nil, xerrors.Errorf("failed resolving params.Wallet addr: %w", params.Wallet) + return nil, xerrors.Errorf("failed resolving params.Wallet addr (%s): %w", params.Wallet, err) } exist, err := a.WalletHas(ctx, walletKey) if err != nil { - return nil, xerrors.Errorf("failed getting addr from wallet: %w", params.Wallet) + return nil, xerrors.Errorf("failed getting addr from wallet (%s): %w", params.Wallet, err) } if !exist { return nil, xerrors.Errorf("provided address doesn't exist in wallet") @@ -141,17 +172,10 @@ func (a *API) ClientStartDeal(ctx context.Context, params *api.StartDealParams) return nil, xerrors.Errorf("failed getting miner's deadline info: %w", err) } - rt, err := ffiwrapper.SealProofTypeFromSectorSize(mi.SectorSize) - if err != nil { - return nil, xerrors.Errorf("bad sector size: %w", err) - } - if uint64(params.Data.PieceSize.Padded()) > uint64(mi.SectorSize) { return nil, xerrors.New("data doesn't fit in a sector") } - providerInfo := utils.NewStorageProviderInfo(params.Miner, mi.Worker, mi.SectorSize, *mi.PeerId, mi.Multiaddrs) - dealStart := params.DealStartEpoch if dealStart <= 0 { // unset, or explicitly 'epoch undefined' ts, err := a.ChainHead(ctx) @@ -163,25 +187,122 @@ func (a *API) ClientStartDeal(ctx context.Context, params *api.StartDealParams) dealStart = ts.Height() + abi.ChainEpoch(dealStartBufferHours*blocksPerHour) // TODO: Get this from storage ask } - result, err := a.SMDealClient.ProposeStorageDeal(ctx, storagemarket.ProposeStorageDealParams{ - Addr: params.Wallet, - Info: &providerInfo, - Data: params.Data, - StartEpoch: dealStart, - EndEpoch: calcDealExpiration(params.MinBlocksDuration, md, dealStart), - Price: params.EpochPrice, - Collateral: params.ProviderCollateral, - Rt: rt, - FastRetrieval: params.FastRetrieval, - VerifiedDeal: params.VerifiedDeal, - StoreID: storeID, - }) + networkVersion, err := a.StateNetworkVersion(ctx, types.EmptyTSK) + if err != nil { + return nil, xerrors.Errorf("failed to get network version: %w", err) + } + + st, err := miner.PreferredSealProofTypeFromWindowPoStType(networkVersion, mi.WindowPoStProofType) + if err != nil { + return nil, xerrors.Errorf("failed to get seal proof type: %w", err) + } + + // regular flow + if !isStateless { + providerInfo := utils.NewStorageProviderInfo(params.Miner, mi.Worker, mi.SectorSize, *mi.PeerId, mi.Multiaddrs) + + result, err := a.SMDealClient.ProposeStorageDeal(ctx, storagemarket.ProposeStorageDealParams{ + Addr: params.Wallet, + Info: &providerInfo, + Data: params.Data, + StartEpoch: dealStart, + EndEpoch: calcDealExpiration(params.MinBlocksDuration, md, dealStart), + Price: params.EpochPrice, + Collateral: params.ProviderCollateral, + Rt: st, + FastRetrieval: params.FastRetrieval, + VerifiedDeal: params.VerifiedDeal, + StoreID: storeID, + }) + + if err != nil { + return nil, xerrors.Errorf("failed to start deal: %w", err) + } + + return &result.ProposalCid, nil + } + + // + // stateless flow from here to the end + // + dealProposal := &market.DealProposal{ + PieceCID: *params.Data.PieceCid, + PieceSize: params.Data.PieceSize.Padded(), + Client: walletKey, + Provider: params.Miner, + Label: params.Data.Root.Encode(multibase.MustNewEncoder('u')), + StartEpoch: dealStart, + EndEpoch: calcDealExpiration(params.MinBlocksDuration, md, dealStart), + StoragePricePerEpoch: big.Zero(), + ProviderCollateral: params.ProviderCollateral, + ClientCollateral: big.Zero(), + VerifiedDeal: params.VerifiedDeal, + } + + if dealProposal.ProviderCollateral.IsZero() { + networkCollateral, err := a.StateDealProviderCollateralBounds(ctx, params.Data.PieceSize.Padded(), params.VerifiedDeal, types.EmptyTSK) + if err != nil { + return nil, xerrors.Errorf("failed to determine minimum provider collateral: %w", err) + } + dealProposal.ProviderCollateral = networkCollateral.Min + } + + dealProposalSerialized, err := cborutil.Dump(dealProposal) if err != nil { - return nil, xerrors.Errorf("failed to start deal: %w", err) + return nil, xerrors.Errorf("failed to serialize deal proposal: %w", err) + } + + dealProposalSig, err := a.WalletSign(ctx, walletKey, dealProposalSerialized) + if err != nil { + return nil, xerrors.Errorf("failed to sign proposal : %w", err) + } + + dealProposalSigned := &market.ClientDealProposal{ + Proposal: *dealProposal, + ClientSignature: *dealProposalSig, + } + dStream, err := network.NewFromLibp2pHost(a.Host, + // params duplicated from .../node/modules/client.go + // https://github.com/filecoin-project/lotus/pull/5961#discussion_r629768011 + network.RetryParameters(time.Second, 5*time.Minute, 15, 5), + ).NewDealStream(ctx, *mi.PeerId) + if err != nil { + return nil, xerrors.Errorf("opening dealstream to %s/%s failed: %w", params.Miner, *mi.PeerId, err) + } + + if err = dStream.WriteDealProposal(network.Proposal{ + FastRetrieval: true, + DealProposal: dealProposalSigned, + Piece: &storagemarket.DataRef{ + TransferType: storagemarket.TTManual, + Root: params.Data.Root, + PieceCid: params.Data.PieceCid, + PieceSize: params.Data.PieceSize, + }, + }); err != nil { + return nil, xerrors.Errorf("sending deal proposal failed: %w", err) + } + + resp, _, err := dStream.ReadDealResponse() + if err != nil { + return nil, xerrors.Errorf("reading proposal response failed: %w", err) + } + + dealProposalIpld, err := cborutil.AsIpld(dealProposalSigned) + if err != nil { + return nil, xerrors.Errorf("serializing proposal node failed: %w", err) + } + + if !dealProposalIpld.Cid().Equals(resp.Response.Proposal) { + return nil, xerrors.Errorf("provider returned proposal cid %s but we expected %s", resp.Response.Proposal, dealProposalIpld.Cid()) + } + + if resp.Response.State != storagemarket.StorageDealWaitingForData { + return nil, xerrors.Errorf("provider returned unexpected state %d for proposal %s, with message: %s", resp.Response.State, resp.Response.Proposal, resp.Response.Message) } - return &result.ProposalCid, nil + return &resp.Response.Proposal, nil } func (a *API) ClientListDeals(ctx context.Context) ([]api.DealInfo, error) { @@ -190,55 +311,57 @@ func (a *API) ClientListDeals(ctx context.Context) ([]api.DealInfo, error) { return nil, err } + // Get a map of transfer ID => DataTransfer + dataTransfersByID, err := a.transfersByID(ctx) + if err != nil { + return nil, err + } + out := make([]api.DealInfo, len(deals)) for k, v := range deals { - out[k] = api.DealInfo{ - ProposalCid: v.ProposalCid, - DataRef: v.DataRef, - State: v.State, - Message: v.Message, - Provider: v.Proposal.Provider, - - PieceCID: v.Proposal.PieceCID, - Size: uint64(v.Proposal.PieceSize.Unpadded()), - - PricePerEpoch: v.Proposal.StoragePricePerEpoch, - Duration: uint64(v.Proposal.Duration()), - DealID: v.DealID, - CreationTime: v.CreationTime.Time(), - Verified: v.Proposal.VerifiedDeal, + // Find the data transfer associated with this deal + var transferCh *api.DataTransferChannel + if v.TransferChannelID != nil { + if ch, ok := dataTransfersByID[*v.TransferChannelID]; ok { + transferCh = &ch + } } + + out[k] = a.newDealInfoWithTransfer(transferCh, v) } return out, nil } +func (a *API) transfersByID(ctx context.Context) (map[datatransfer.ChannelID]api.DataTransferChannel, error) { + inProgressChannels, err := a.DataTransfer.InProgressChannels(ctx) + if err != nil { + return nil, err + } + + dataTransfersByID := make(map[datatransfer.ChannelID]api.DataTransferChannel, len(inProgressChannels)) + for id, channelState := range inProgressChannels { + ch := api.NewDataTransferChannel(a.Host.ID(), channelState) + dataTransfersByID[id] = ch + } + return dataTransfersByID, nil +} + func (a *API) ClientGetDealInfo(ctx context.Context, d cid.Cid) (*api.DealInfo, error) { v, err := a.SMDealClient.GetLocalDeal(ctx, d) if err != nil { return nil, err } - return &api.DealInfo{ - ProposalCid: v.ProposalCid, - State: v.State, - Message: v.Message, - Provider: v.Proposal.Provider, - PieceCID: v.Proposal.PieceCID, - Size: uint64(v.Proposal.PieceSize.Unpadded()), - PricePerEpoch: v.Proposal.StoragePricePerEpoch, - Duration: uint64(v.Proposal.Duration()), - DealID: v.DealID, - CreationTime: v.CreationTime.Time(), - Verified: v.Proposal.VerifiedDeal, - }, nil + di := a.newDealInfo(ctx, v) + return &di, nil } func (a *API) ClientGetDealUpdates(ctx context.Context) (<-chan api.DealInfo, error) { updates := make(chan api.DealInfo) unsub := a.SMDealClient.SubscribeToEvents(func(_ storagemarket.ClientEvent, deal storagemarket.ClientDeal) { - updates <- newDealInfo(deal) + updates <- a.newDealInfo(ctx, deal) }) go func() { @@ -249,6 +372,45 @@ func (a *API) ClientGetDealUpdates(ctx context.Context) (<-chan api.DealInfo, er return updates, nil } +func (a *API) newDealInfo(ctx context.Context, v storagemarket.ClientDeal) api.DealInfo { + // Find the data transfer associated with this deal + var transferCh *api.DataTransferChannel + if v.TransferChannelID != nil { + state, err := a.DataTransfer.ChannelState(ctx, *v.TransferChannelID) + + // Note: If there was an error just ignore it, as the data transfer may + // be not found if it's no longer active + if err == nil { + ch := api.NewDataTransferChannel(a.Host.ID(), state) + ch.Stages = state.Stages() + transferCh = &ch + } + } + + di := a.newDealInfoWithTransfer(transferCh, v) + di.DealStages = v.DealStages + return di +} + +func (a *API) newDealInfoWithTransfer(transferCh *api.DataTransferChannel, v storagemarket.ClientDeal) api.DealInfo { + return api.DealInfo{ + ProposalCid: v.ProposalCid, + DataRef: v.DataRef, + State: v.State, + Message: v.Message, + Provider: v.Proposal.Provider, + PieceCID: v.Proposal.PieceCID, + Size: uint64(v.Proposal.PieceSize.Unpadded()), + PricePerEpoch: v.Proposal.StoragePricePerEpoch, + Duration: uint64(v.Proposal.Duration()), + DealID: v.DealID, + CreationTime: v.CreationTime.Time(), + Verified: v.Proposal.VerifiedDeal, + TransferChannelID: v.TransferChannelID, + DataTransfer: transferCh, + } +} + func (a *API) ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) { // TODO: check if we have the ENTIRE dag @@ -274,7 +436,19 @@ func (a *API) ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid) if piece != nil && !piece.Equals(*p.PieceCID) { continue } - out = append(out, a.makeRetrievalQuery(ctx, p, root, piece, rm.QueryParams{})) + + // do not rely on local data with respect to peer id + // fetch an up-to-date miner peer id from chain + mi, err := a.StateMinerInfo(ctx, p.Address, types.EmptyTSK) + if err != nil { + return nil, err + } + pp := rm.RetrievalPeer{ + Address: p.Address, + ID: *mi.PeerId, + } + + out = append(out, a.makeRetrievalQuery(ctx, pp, root, piece, rm.QueryParams{})) } return out, nil @@ -432,6 +606,29 @@ func (a *API) ClientListImports(ctx context.Context) ([]api.Import, error) { return out, nil } +func (a *API) ClientCancelRetrievalDeal(ctx context.Context, dealID retrievalmarket.DealID) error { + cerr := make(chan error) + go func() { + err := a.Retrieval.CancelDeal(dealID) + + select { + case cerr <- err: + case <-ctx.Done(): + } + }() + + select { + case err := <-cerr: + if err != nil { + return xerrors.Errorf("failed to cancel retrieval deal: %w", err) + } + + return nil + case <-ctx.Done(): + return xerrors.Errorf("context timeout while canceling retrieval deal: %w", ctx.Err()) + } +} + func (a *API) ClientRetrieve(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error { events := make(chan marketevents.RetrievalEvent) go a.clientRetrieve(ctx, order, ref, events) @@ -495,6 +692,8 @@ func readSubscribeEvents(ctx context.Context, dealID retrievalmarket.DealID, sub return nil case rm.DealStatusRejected: return xerrors.Errorf("Retrieval Proposal Rejected: %s", state.Message) + case rm.DealStatusCancelled: + return xerrors.Errorf("Retrieval was cancelled externally: %s", state.Message) case rm.DealStatusDealNotFound, rm.DealStatusErrored: @@ -512,86 +711,107 @@ func (a *API) clientRetrieve(ctx context.Context, order api.RetrievalOrder, ref } } - if order.MinerPeer.ID == "" { - mi, err := a.StateMinerInfo(ctx, order.Miner, types.EmptyTSK) - if err != nil { - finish(err) - return - } + var store retrievalstoremgr.RetrievalStore - order.MinerPeer = retrievalmarket.RetrievalPeer{ - ID: *mi.PeerId, - Address: order.Miner, + if order.LocalStore == nil { + if order.MinerPeer == nil || order.MinerPeer.ID == "" { + mi, err := a.StateMinerInfo(ctx, order.Miner, types.EmptyTSK) + if err != nil { + finish(err) + return + } + + order.MinerPeer = &retrievalmarket.RetrievalPeer{ + ID: *mi.PeerId, + Address: order.Miner, + } } - } - if order.Size == 0 { - finish(xerrors.Errorf("cannot make retrieval deal for zero bytes")) - return - } + if order.Total.Int == nil { + finish(xerrors.Errorf("cannot make retrieval deal for null total")) + return + } - /*id, st, err := a.imgr().NewStore() - if err != nil { - return err - } - if err := a.imgr().AddLabel(id, "source", "retrieval"); err != nil { - return err - }*/ + if order.Size == 0 { + finish(xerrors.Errorf("cannot make retrieval deal for zero bytes")) + return + } - ppb := types.BigDiv(order.Total, types.NewInt(order.Size)) + /*id, st, err := a.imgr().NewStore() + if err != nil { + return err + } + if err := a.imgr().AddLabel(id, "source", "retrieval"); err != nil { + return err + }*/ - params, err := rm.NewParamsV1(ppb, order.PaymentInterval, order.PaymentIntervalIncrease, shared.AllSelector(), order.Piece, order.UnsealPrice) - if err != nil { - finish(xerrors.Errorf("Error in retrieval params: %s", err)) - return - } + ppb := types.BigDiv(order.Total, types.NewInt(order.Size)) - store, err := a.RetrievalStoreMgr.NewStore() - if err != nil { - finish(xerrors.Errorf("Error setting up new store: %w", err)) - return - } + params, err := rm.NewParamsV1(ppb, order.PaymentInterval, order.PaymentIntervalIncrease, shared.AllSelector(), order.Piece, order.UnsealPrice) + if err != nil { + finish(xerrors.Errorf("Error in retrieval params: %s", err)) + return + } - defer func() { - _ = a.RetrievalStoreMgr.ReleaseStore(store) - }() + store, err = a.RetrievalStoreMgr.NewStore() + if err != nil { + finish(xerrors.Errorf("Error setting up new store: %w", err)) + return + } - // Subscribe to events before retrieving to avoid losing events. - subscribeEvents := make(chan retrievalSubscribeEvent, 1) - subscribeCtx, cancel := context.WithCancel(ctx) - defer cancel() - unsubscribe := a.Retrieval.SubscribeToEvents(func(event rm.ClientEvent, state rm.ClientDealState) { - // We'll check the deal IDs inside readSubscribeEvents. - if state.PayloadCID.Equals(order.Root) { - select { - case <-subscribeCtx.Done(): - case subscribeEvents <- retrievalSubscribeEvent{event, state}: + defer func() { + _ = a.RetrievalStoreMgr.ReleaseStore(store) + }() + + // Subscribe to events before retrieving to avoid losing events. + subscribeEvents := make(chan retrievalSubscribeEvent, 1) + subscribeCtx, cancel := context.WithCancel(ctx) + defer cancel() + unsubscribe := a.Retrieval.SubscribeToEvents(func(event rm.ClientEvent, state rm.ClientDealState) { + // We'll check the deal IDs inside readSubscribeEvents. + if state.PayloadCID.Equals(order.Root) { + select { + case <-subscribeCtx.Done(): + case subscribeEvents <- retrievalSubscribeEvent{event, state}: + } } + }) + + dealID, err := a.Retrieval.Retrieve( + ctx, + order.Root, + params, + order.Total, + *order.MinerPeer, + order.Client, + order.Miner, + store.StoreID()) + + if err != nil { + unsubscribe() + finish(xerrors.Errorf("Retrieve failed: %w", err)) + return } - }) - dealID, err := a.Retrieval.Retrieve( - ctx, - order.Root, - params, - order.Total, - order.MinerPeer, - order.Client, - order.Miner, - store.StoreID()) + err = readSubscribeEvents(ctx, dealID, subscribeEvents, events) - if err != nil { unsubscribe() - finish(xerrors.Errorf("Retrieve failed: %w", err)) - return - } - - err = readSubscribeEvents(ctx, dealID, subscribeEvents, events) + if err != nil { + finish(xerrors.Errorf("Retrieve: %w", err)) + return + } + } else { + // local retrieval + st, err := ((*multistore.MultiStore)(a.Mds)).Get(*order.LocalStore) + if err != nil { + finish(xerrors.Errorf("Retrieve: %w", err)) + return + } - unsubscribe() - if err != nil { - finish(xerrors.Errorf("Retrieve: %w", err)) - return + store = &multiStoreRetrievalStore{ + storeID: *order.LocalStore, + store: st, + } } // If ref is nil, it only fetches the data into the configured blockstore. @@ -631,6 +851,96 @@ func (a *API) clientRetrieve(ctx context.Context, order api.RetrievalOrder, ref return } +func (a *API) ClientListRetrievals(ctx context.Context) ([]api.RetrievalInfo, error) { + deals, err := a.Retrieval.ListDeals() + if err != nil { + return nil, err + } + dataTransfersByID, err := a.transfersByID(ctx) + if err != nil { + return nil, err + } + out := make([]api.RetrievalInfo, 0, len(deals)) + for _, v := range deals { + // Find the data transfer associated with this deal + var transferCh *api.DataTransferChannel + if v.ChannelID != nil { + if ch, ok := dataTransfersByID[*v.ChannelID]; ok { + transferCh = &ch + } + } + out = append(out, a.newRetrievalInfoWithTransfer(transferCh, v)) + } + sort.Slice(out, func(a, b int) bool { + return out[a].ID < out[b].ID + }) + return out, nil +} + +func (a *API) ClientGetRetrievalUpdates(ctx context.Context) (<-chan api.RetrievalInfo, error) { + updates := make(chan api.RetrievalInfo) + + unsub := a.Retrieval.SubscribeToEvents(func(_ rm.ClientEvent, deal rm.ClientDealState) { + updates <- a.newRetrievalInfo(ctx, deal) + }) + + go func() { + defer unsub() + <-ctx.Done() + }() + + return updates, nil +} + +func (a *API) newRetrievalInfoWithTransfer(ch *api.DataTransferChannel, deal rm.ClientDealState) api.RetrievalInfo { + return api.RetrievalInfo{ + PayloadCID: deal.PayloadCID, + ID: deal.ID, + PieceCID: deal.PieceCID, + PricePerByte: deal.PricePerByte, + UnsealPrice: deal.UnsealPrice, + Status: deal.Status, + Message: deal.Message, + Provider: deal.Sender, + BytesReceived: deal.TotalReceived, + BytesPaidFor: deal.BytesPaidFor, + TotalPaid: deal.FundsSpent, + TransferChannelID: deal.ChannelID, + DataTransfer: ch, + } +} + +func (a *API) newRetrievalInfo(ctx context.Context, v rm.ClientDealState) api.RetrievalInfo { + // Find the data transfer associated with this deal + var transferCh *api.DataTransferChannel + if v.ChannelID != nil { + state, err := a.DataTransfer.ChannelState(ctx, *v.ChannelID) + + // Note: If there was an error just ignore it, as the data transfer may + // be not found if it's no longer active + if err == nil { + ch := api.NewDataTransferChannel(a.Host.ID(), state) + ch.Stages = state.Stages() + transferCh = &ch + } + } + + return a.newRetrievalInfoWithTransfer(transferCh, v) +} + +type multiStoreRetrievalStore struct { + storeID multistore.StoreID + store *multistore.Store +} + +func (mrs *multiStoreRetrievalStore) StoreID() *multistore.StoreID { + return &mrs.storeID +} + +func (mrs *multiStoreRetrievalStore) DAGService() ipld.DAGService { + return mrs.store.DAG +} + func (a *API) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) { mi, err := a.StateMinerInfo(ctx, miner, types.EmptyTSK) if err != nil { @@ -647,20 +957,15 @@ func (a *API) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Addre func (a *API) ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet, error) { - // Hard-code the sector size to 32GiB, because: - // - pieceio.GeneratePieceCommitment requires a RegisteredSealProof + // Hard-code the sector type to 32GiBV1_1, because: + // - ffiwrapper.GeneratePieceCIDFromFile requires a RegisteredSealProof // - commP itself is sector-size independent, with rather low probability of that changing // ( note how the final rust call is identical for every RegSP type ) // https://github.com/filecoin-project/rust-filecoin-proofs-api/blob/v5.0.0/src/seal.rs#L1040-L1050 // // IF/WHEN this changes in the future we will have to be able to calculate // "old style" commP, and thus will need to introduce a version switch or similar - arbitrarySectorSize := abi.SectorSize(32 << 30) - - rt, err := ffiwrapper.SealProofTypeFromSectorSize(arbitrarySectorSize) - if err != nil { - return nil, xerrors.Errorf("bad sector size: %w", err) - } + arbitraryProofType := abi.RegisteredSealProof_StackedDrg32GiBV1_1 rdr, err := os.Open(inpath) if err != nil { @@ -673,7 +978,18 @@ func (a *API) ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet return nil, err } - commP, pieceSize, err := pieceio.GeneratePieceCommitment(rt, rdr, uint64(stat.Size())) + // check that the data is a car file; if it's not, retrieval won't work + _, _, err = car.ReadHeader(bufio.NewReader(rdr)) + if err != nil { + return nil, xerrors.Errorf("not a car file: %w", err) + } + + if _, err := rdr.Seek(0, io.SeekStart); err != nil { + return nil, xerrors.Errorf("seek to start: %w", err) + } + + pieceReader, pieceSize := padreader.New(rdr, uint64(stat.Size())) + commP, err := ffiwrapper.GeneratePieceCIDFromFile(arbitraryProofType, pieceReader, pieceSize) if err != nil { return nil, xerrors.Errorf("computing commP failed: %w", err) @@ -713,8 +1029,8 @@ func (a *API) ClientDealSize(ctx context.Context, root cid.Cid) (api.DataSize, e func (a *API) ClientDealPieceCID(ctx context.Context, root cid.Cid) (api.DataCIDSize, error) { dag := merkledag.NewDAGService(blockservice.New(a.CombinedBstore, offline.Exchange(a.CombinedBstore))) - w := &commp.Writer{} - bw := bufio.NewWriterSize(w, int(commp.CommPBuf)) + w := &writer.Writer{} + bw := bufio.NewWriterSize(w, int(writer.CommPBuf)) err := car.WriteCar(ctx, dag, []cid.Cid{root}, w) if err != nil { @@ -725,7 +1041,8 @@ func (a *API) ClientDealPieceCID(ctx context.Context, root cid.Cid) (api.DataCID return api.DataCIDSize{}, err } - return w.Sum() + dataCIDSize, err := w.Sum() + return api.DataCIDSize(dataCIDSize), err } func (a *API) ClientGenCar(ctx context.Context, ref api.FileRef, outputPath string) error { @@ -885,23 +1202,6 @@ func (a *API) ClientCancelDataTransfer(ctx context.Context, transferID datatrans return a.DataTransfer.CloseDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: otherPeer, Responder: selfPeer, ID: transferID}) } -func newDealInfo(v storagemarket.ClientDeal) api.DealInfo { - return api.DealInfo{ - ProposalCid: v.ProposalCid, - DataRef: v.DataRef, - State: v.State, - Message: v.Message, - Provider: v.Proposal.Provider, - PieceCID: v.Proposal.PieceCID, - Size: uint64(v.Proposal.PieceSize.Unpadded()), - PricePerEpoch: v.Proposal.StoragePricePerEpoch, - Duration: uint64(v.Proposal.Duration()), - DealID: v.DealID, - CreationTime: v.CreationTime.Time(), - Verified: v.Proposal.VerifiedDeal, - } -} - func (a *API) ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error { return a.Retrieval.TryRestartInsufficientFunds(paymentChannel) } diff --git a/node/impl/common/common.go b/node/impl/common/common.go index 79478e489f9..a681e4a4a90 100644 --- a/node/impl/common/common.go +++ b/node/impl/common/common.go @@ -2,29 +2,18 @@ package common import ( "context" - "sort" - "strings" "github.com/gbrlsnchs/jwt/v3" "github.com/google/uuid" logging "github.com/ipfs/go-log/v2" - "github.com/libp2p/go-libp2p-core/host" - metrics "github.com/libp2p/go-libp2p-core/metrics" - "github.com/libp2p/go-libp2p-core/network" - "github.com/libp2p/go-libp2p-core/peer" - protocol "github.com/libp2p/go-libp2p-core/protocol" - swarm "github.com/libp2p/go-libp2p-swarm" - basichost "github.com/libp2p/go-libp2p/p2p/host/basic" - ma "github.com/multiformats/go-multiaddr" "go.uber.org/fx" "golang.org/x/xerrors" "github.com/filecoin-project/go-jsonrpc/auth" - "github.com/filecoin-project/lotus/api" + apitypes "github.com/filecoin-project/lotus/api/types" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/node/modules/lp2p" ) var session = uuid.New() @@ -33,11 +22,6 @@ type CommonAPI struct { fx.In APISecret *dtypes.APIAlg - RawHost lp2p.RawHost - Host host.Host - Router lp2p.BaseIpfsRouting - Reporter metrics.Reporter - Sk *dtypes.ScoreKeeper ShutdownChan dtypes.ShutdownChan } @@ -62,128 +46,17 @@ func (a *CommonAPI) AuthNew(ctx context.Context, perms []auth.Permission) ([]byt return jwt.Sign(&p, (*jwt.HMACSHA)(a.APISecret)) } -func (a *CommonAPI) NetConnectedness(ctx context.Context, pid peer.ID) (network.Connectedness, error) { - return a.Host.Network().Connectedness(pid), nil -} -func (a *CommonAPI) NetPubsubScores(context.Context) ([]api.PubsubScore, error) { - scores := a.Sk.Get() - out := make([]api.PubsubScore, len(scores)) - i := 0 - for k, v := range scores { - out[i] = api.PubsubScore{ID: k, Score: v} - i++ - } - - sort.Slice(out, func(i, j int) bool { - return strings.Compare(string(out[i].ID), string(out[j].ID)) > 0 - }) - - return out, nil -} - -func (a *CommonAPI) NetPeers(context.Context) ([]peer.AddrInfo, error) { - conns := a.Host.Network().Conns() - out := make([]peer.AddrInfo, len(conns)) - - for i, conn := range conns { - out[i] = peer.AddrInfo{ - ID: conn.RemotePeer(), - Addrs: []ma.Multiaddr{ - conn.RemoteMultiaddr(), - }, - } - } - - return out, nil +func (a *CommonAPI) Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) { + return build.OpenRPCDiscoverJSON_Full(), nil } -func (a *CommonAPI) NetConnect(ctx context.Context, p peer.AddrInfo) error { - if swrm, ok := a.Host.Network().(*swarm.Swarm); ok { - swrm.Backoff().Clear(p.ID) - } - - return a.Host.Connect(ctx, p) -} - -func (a *CommonAPI) NetAddrsListen(context.Context) (peer.AddrInfo, error) { - return peer.AddrInfo{ - ID: a.Host.ID(), - Addrs: a.Host.Addrs(), - }, nil -} - -func (a *CommonAPI) NetDisconnect(ctx context.Context, p peer.ID) error { - return a.Host.Network().ClosePeer(p) -} - -func (a *CommonAPI) NetFindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo, error) { - return a.Router.FindPeer(ctx, p) -} - -func (a *CommonAPI) NetAutoNatStatus(ctx context.Context) (i api.NatInfo, err error) { - autonat := a.RawHost.(*basichost.BasicHost).AutoNat - - if autonat == nil { - return api.NatInfo{ - Reachability: network.ReachabilityUnknown, - }, nil - } - - var maddr string - if autonat.Status() == network.ReachabilityPublic { - pa, err := autonat.PublicAddr() - if err != nil { - return api.NatInfo{}, err - } - maddr = pa.String() - } - - return api.NatInfo{ - Reachability: autonat.Status(), - PublicAddr: maddr, - }, nil -} - -func (a *CommonAPI) NetAgentVersion(ctx context.Context, p peer.ID) (string, error) { - ag, err := a.Host.Peerstore().Get(p, "AgentVersion") - if err != nil { - return "", err - } - - if ag == nil { - return "unknown", nil - } - - return ag.(string), nil -} - -func (a *CommonAPI) NetBandwidthStats(ctx context.Context) (metrics.Stats, error) { - return a.Reporter.GetBandwidthTotals(), nil -} - -func (a *CommonAPI) NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) { - out := make(map[string]metrics.Stats) - for p, s := range a.Reporter.GetBandwidthByPeer() { - out[p.String()] = s - } - return out, nil -} - -func (a *CommonAPI) NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) { - return a.Reporter.GetBandwidthByProtocol(), nil -} - -func (a *CommonAPI) ID(context.Context) (peer.ID, error) { - return a.Host.ID(), nil -} - -func (a *CommonAPI) Version(context.Context) (api.Version, error) { - v, err := build.VersionForType(build.RunningNodeType) +func (a *CommonAPI) Version(context.Context) (api.APIVersion, error) { + v, err := api.VersionForType(api.RunningNodeType) if err != nil { - return api.Version{}, err + return api.APIVersion{}, err } - return api.Version{ + return api.APIVersion{ Version: build.UserVersion(), APIVersion: v, @@ -211,5 +84,3 @@ func (a *CommonAPI) Session(ctx context.Context) (uuid.UUID, error) { func (a *CommonAPI) Closing(ctx context.Context) (<-chan struct{}, error) { return make(chan struct{}), nil // relies on jsonrpc closing } - -var _ api.Common = &CommonAPI{} diff --git a/node/impl/full.go b/node/impl/full.go index add40917c84..f9c83ded032 100644 --- a/node/impl/full.go +++ b/node/impl/full.go @@ -2,22 +2,29 @@ package impl import ( "context" + "time" + + "github.com/libp2p/go-libp2p-core/peer" logging "github.com/ipfs/go-log/v2" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/node/impl/client" "github.com/filecoin-project/lotus/node/impl/common" "github.com/filecoin-project/lotus/node/impl/full" "github.com/filecoin-project/lotus/node/impl/market" + "github.com/filecoin-project/lotus/node/impl/net" "github.com/filecoin-project/lotus/node/impl/paych" "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/modules/lp2p" ) var log = logging.Logger("node") type FullNodeAPI struct { common.CommonAPI + net.NetAPI full.ChainAPI client.API full.MpoolAPI @@ -30,11 +37,86 @@ type FullNodeAPI struct { full.SyncAPI full.BeaconAPI - DS dtypes.MetadataDS + DS dtypes.MetadataDS + NetworkName dtypes.NetworkName } func (n *FullNodeAPI) CreateBackup(ctx context.Context, fpath string) error { return backup(n.DS, fpath) } +func (n *FullNodeAPI) NodeStatus(ctx context.Context, inclChainStatus bool) (status api.NodeStatus, err error) { + curTs, err := n.ChainHead(ctx) + if err != nil { + return status, err + } + + status.SyncStatus.Epoch = uint64(curTs.Height()) + timestamp := time.Unix(int64(curTs.MinTimestamp()), 0) + delta := time.Since(timestamp).Seconds() + status.SyncStatus.Behind = uint64(delta / 30) + + // get peers in the messages and blocks topics + peersMsgs := make(map[peer.ID]struct{}) + peersBlocks := make(map[peer.ID]struct{}) + + for _, p := range n.PubSub.ListPeers(build.MessagesTopic(n.NetworkName)) { + peersMsgs[p] = struct{}{} + } + + for _, p := range n.PubSub.ListPeers(build.BlocksTopic(n.NetworkName)) { + peersBlocks[p] = struct{}{} + } + + // get scores for all connected and recent peers + scores, err := n.NetPubsubScores(ctx) + if err != nil { + return status, err + } + + for _, score := range scores { + if score.Score.Score > lp2p.PublishScoreThreshold { + _, inMsgs := peersMsgs[score.ID] + if inMsgs { + status.PeerStatus.PeersToPublishMsgs++ + } + + _, inBlocks := peersBlocks[score.ID] + if inBlocks { + status.PeerStatus.PeersToPublishBlocks++ + } + } + } + + if inclChainStatus && status.SyncStatus.Epoch > uint64(build.Finality) { + blockCnt := 0 + ts := curTs + + for i := 0; i < 100; i++ { + blockCnt += len(ts.Blocks()) + tsk := ts.Parents() + ts, err = n.ChainGetTipSet(ctx, tsk) + if err != nil { + return status, err + } + } + + status.ChainStatus.BlocksPerTipsetLast100 = float64(blockCnt) / 100 + + for i := 100; i < int(build.Finality); i++ { + blockCnt += len(ts.Blocks()) + tsk := ts.Parents() + ts, err = n.ChainGetTipSet(ctx, tsk) + if err != nil { + return status, err + } + } + + status.ChainStatus.BlocksPerTipsetLastFinality = float64(blockCnt) / float64(build.Finality) + + } + + return status, nil +} + var _ api.FullNode = &FullNodeAPI{} diff --git a/node/impl/full/chain.go b/node/impl/full/chain.go index a3410b8db1d..33d14d3baee 100644 --- a/node/impl/full/chain.go +++ b/node/impl/full/chain.go @@ -10,6 +10,8 @@ import ( "strings" "sync" + "github.com/filecoin-project/lotus/build" + "go.uber.org/fx" "golang.org/x/xerrors" @@ -18,7 +20,7 @@ import ( offline "github.com/ipfs/go-ipfs-exchange-offline" cbor "github.com/ipfs/go-ipld-cbor" ipld "github.com/ipfs/go-ipld-format" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" "github.com/ipfs/go-merkledag" "github.com/ipfs/go-path" "github.com/ipfs/go-path/resolver" @@ -31,10 +33,11 @@ import ( "github.com/filecoin-project/specs-actors/actors/util/adt" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/lib/blockstore" + "github.com/filecoin-project/lotus/node/modules/dtypes" ) var log = logging.Logger("fullnode") @@ -50,6 +53,8 @@ type ChainModuleAPI interface { ChainReadObj(context.Context, cid.Cid) ([]byte, error) } +var _ ChainModuleAPI = *new(api.FullNode) + // ChainModule provides a default implementation of ChainModuleAPI. // It can be swapped out with another implementation through Dependency // Injection (for example with a thin RPC client). @@ -57,6 +62,11 @@ type ChainModule struct { fx.In Chain *store.ChainStore + + // ExposedBlockstore is the global monolith blockstore that is safe to + // expose externally. In the future, this will be segregated into two + // blockstores. + ExposedBlockstore dtypes.ExposedBlockstore } var _ ChainModuleAPI = (*ChainModule)(nil) @@ -68,6 +78,11 @@ type ChainAPI struct { ChainModuleAPI Chain *store.ChainStore + + // ExposedBlockstore is the global monolith blockstore that is safe to + // expose externally. In the future, this will be segregated into two + // blockstores. + ExposedBlockstore dtypes.ExposedBlockstore } func (m *ChainModule) ChainNotify(ctx context.Context) (<-chan []*api.HeadChange, error) { @@ -84,7 +99,12 @@ func (a *ChainAPI) ChainGetRandomnessFromTickets(ctx context.Context, tsk types. return nil, xerrors.Errorf("loading tipset key: %w", err) } - return a.Chain.GetChainRandomness(ctx, pts.Cids(), personalization, randEpoch, entropy) + // Doing this here is slightly nicer than doing it in the chainstore directly, but it's still bad for ChainAPI to reason about network upgrades + if randEpoch > build.UpgradeHyperdriveHeight { + return a.Chain.GetChainRandomnessLookingForward(ctx, pts.Cids(), personalization, randEpoch, entropy) + } + + return a.Chain.GetChainRandomnessLookingBack(ctx, pts.Cids(), personalization, randEpoch, entropy) } func (a *ChainAPI) ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { @@ -93,7 +113,12 @@ func (a *ChainAPI) ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.T return nil, xerrors.Errorf("loading tipset key: %w", err) } - return a.Chain.GetBeaconRandomness(ctx, pts.Cids(), personalization, randEpoch, entropy) + // Doing this here is slightly nicer than doing it in the chainstore directly, but it's still bad for ChainAPI to reason about network upgrades + if randEpoch > build.UpgradeHyperdriveHeight { + return a.Chain.GetBeaconRandomnessLookingForward(ctx, pts.Cids(), personalization, randEpoch, entropy) + } + + return a.Chain.GetBeaconRandomnessLookingBack(ctx, pts.Cids(), personalization, randEpoch, entropy) } func (a *ChainAPI) ChainGetBlock(ctx context.Context, msg cid.Cid) (*types.BlockHeader, error) { @@ -203,6 +228,33 @@ func (a *ChainAPI) ChainGetParentReceipts(ctx context.Context, bcid cid.Cid) ([] return out, nil } +func (a *ChainAPI) ChainGetMessagesInTipset(ctx context.Context, tsk types.TipSetKey) ([]api.Message, error) { + ts, err := a.Chain.GetTipSetFromKey(tsk) + if err != nil { + return nil, err + } + + // genesis block has no parent messages... + if ts.Height() == 0 { + return nil, nil + } + + cm, err := a.Chain.MessagesForTipset(ts) + if err != nil { + return nil, err + } + + var out []api.Message + for _, m := range cm { + out = append(out, api.Message{ + Cid: m.Cid(), + Message: m.VMMessage(), + }) + } + + return out, nil +} + func (m *ChainModule) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) { ts, err := m.Chain.GetTipSetFromKey(tsk) if err != nil { @@ -212,7 +264,7 @@ func (m *ChainModule) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpo } func (m *ChainModule) ChainReadObj(ctx context.Context, obj cid.Cid) ([]byte, error) { - blk, err := m.Chain.Blockstore().Get(obj) + blk, err := m.ExposedBlockstore.Get(obj) if err != nil { return nil, xerrors.Errorf("blockstore get: %w", err) } @@ -221,15 +273,15 @@ func (m *ChainModule) ChainReadObj(ctx context.Context, obj cid.Cid) ([]byte, er } func (a *ChainAPI) ChainDeleteObj(ctx context.Context, obj cid.Cid) error { - return a.Chain.Blockstore().DeleteBlock(obj) + return a.ExposedBlockstore.DeleteBlock(obj) } func (m *ChainModule) ChainHasObj(ctx context.Context, obj cid.Cid) (bool, error) { - return m.Chain.Blockstore().Has(obj) + return m.ExposedBlockstore.Has(obj) } func (a *ChainAPI) ChainStatObj(ctx context.Context, obj cid.Cid, base cid.Cid) (api.ObjStat, error) { - bs := a.Chain.Blockstore() + bs := a.ExposedBlockstore bsvc := blockservice.New(bs, offline.Exchange(bs)) dag := merkledag.NewDAGService(bsvc) @@ -514,7 +566,7 @@ func (a *ChainAPI) ChainGetNode(ctx context.Context, p string) (*api.IpldObject, return nil, xerrors.Errorf("parsing path: %w", err) } - bs := a.Chain.Blockstore() + bs := a.ExposedBlockstore bsvc := blockservice.New(bs, offline.Exchange(bs)) dag := merkledag.NewDAGService(bsvc) diff --git a/node/impl/full/gas.go b/node/impl/full/gas.go index e0cbd2192db..edf53ff6333 100644 --- a/node/impl/full/gas.go +++ b/node/impl/full/gas.go @@ -8,6 +8,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/paych" + lru "github.com/hashicorp/golang-lru" "go.uber.org/fx" "golang.org/x/xerrors" @@ -23,20 +24,26 @@ import ( "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/node/modules/dtypes" ) type GasModuleAPI interface { GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) } +var _ GasModuleAPI = *new(api.FullNode) + // GasModule provides a default implementation of GasModuleAPI. // It can be swapped out with another implementation through Dependency // Injection (for example with a thin RPC client). type GasModule struct { fx.In - Stmgr *stmgr.StateManager - Chain *store.ChainStore - Mpool *messagepool.MessagePool + Stmgr *stmgr.StateManager + Chain *store.ChainStore + Mpool *messagepool.MessagePool + GetMaxFee dtypes.DefaultMaxFeeFunc + + PriceCache *GasPriceCache } var _ GasModuleAPI = (*GasModule)(nil) @@ -49,6 +56,53 @@ type GasAPI struct { Stmgr *stmgr.StateManager Chain *store.ChainStore Mpool *messagepool.MessagePool + + PriceCache *GasPriceCache +} + +func NewGasPriceCache() *GasPriceCache { + // 50 because we usually won't access more than 40 + c, err := lru.New2Q(50) + if err != nil { + // err only if parameter is bad + panic(err) + } + + return &GasPriceCache{ + c: c, + } +} + +type GasPriceCache struct { + c *lru.TwoQueueCache +} + +type GasMeta struct { + Price big.Int + Limit int64 +} + +func (g *GasPriceCache) GetTSGasStats(cstore *store.ChainStore, ts *types.TipSet) ([]GasMeta, error) { + i, has := g.c.Get(ts.Key()) + if has { + return i.([]GasMeta), nil + } + + var prices []GasMeta + msgs, err := cstore.MessagesForTipset(ts) + if err != nil { + return nil, xerrors.Errorf("loading messages: %w", err) + } + for _, msg := range msgs { + prices = append(prices, GasMeta{ + Price: msg.VMMessage().GasPremium, + Limit: msg.VMMessage().GasLimit, + }) + } + + g.c.Add(ts.Key(), prices) + + return prices, nil } const MinGasPremium = 100e3 @@ -86,22 +140,19 @@ func gasEstimateFeeCap(cstore *store.ChainStore, msg *types.Message, maxqueueblk return out, nil } -type gasMeta struct { - price big.Int - limit int64 -} - -func medianGasPremium(prices []gasMeta, blocks int) abi.TokenAmount { +// finds 55th percntile instead of median to put negative pressure on gas price +func medianGasPremium(prices []GasMeta, blocks int) abi.TokenAmount { sort.Slice(prices, func(i, j int) bool { // sort desc by price - return prices[i].price.GreaterThan(prices[j].price) + return prices[i].Price.GreaterThan(prices[j].Price) }) - at := build.BlockGasTarget * int64(blocks) / 2 + at := build.BlockGasTarget * int64(blocks) / 2 // 50th + at += build.BlockGasTarget * int64(blocks) / (2 * 20) // move 5% further prev1, prev2 := big.Zero(), big.Zero() for _, price := range prices { - prev1, prev2 = price.price, prev1 - at -= price.limit + prev1, prev2 = price.Price, prev1 + at -= price.Limit if at < 0 { break } @@ -122,7 +173,7 @@ func (a *GasAPI) GasEstimateGasPremium( gaslimit int64, _ types.TipSetKey, ) (types.BigInt, error) { - return gasEstimateGasPremium(a.Chain, nblocksincl) + return gasEstimateGasPremium(a.Chain, a.PriceCache, nblocksincl) } func (m *GasModule) GasEstimateGasPremium( ctx context.Context, @@ -131,14 +182,14 @@ func (m *GasModule) GasEstimateGasPremium( gaslimit int64, _ types.TipSetKey, ) (types.BigInt, error) { - return gasEstimateGasPremium(m.Chain, nblocksincl) + return gasEstimateGasPremium(m.Chain, m.PriceCache, nblocksincl) } -func gasEstimateGasPremium(cstore *store.ChainStore, nblocksincl uint64) (types.BigInt, error) { +func gasEstimateGasPremium(cstore *store.ChainStore, cache *GasPriceCache, nblocksincl uint64) (types.BigInt, error) { if nblocksincl == 0 { nblocksincl = 1 } - var prices []gasMeta + var prices []GasMeta var blocks int ts := cstore.GetHeaviestTipSet() @@ -153,17 +204,11 @@ func gasEstimateGasPremium(cstore *store.ChainStore, nblocksincl uint64) (types. } blocks += len(pts.Blocks()) - - msgs, err := cstore.MessagesForTipset(pts) + meta, err := cache.GetTSGasStats(cstore, pts) if err != nil { - return types.BigInt{}, xerrors.Errorf("loading messages: %w", err) - } - for _, msg := range msgs { - prices = append(prices, gasMeta{ - price: msg.VMMessage().GasPremium, - limit: msg.VMMessage().GasLimit, - }) + return types.BigInt{}, err } + prices = append(prices, meta...) ts = pts } @@ -190,11 +235,19 @@ func gasEstimateGasPremium(cstore *store.ChainStore, nblocksincl uint64) (types. return premium, nil } -func (a *GasAPI) GasEstimateGasLimit(ctx context.Context, msgIn *types.Message, _ types.TipSetKey) (int64, error) { - return gasEstimateGasLimit(ctx, a.Chain, a.Stmgr, a.Mpool, msgIn) +func (a *GasAPI) GasEstimateGasLimit(ctx context.Context, msgIn *types.Message, tsk types.TipSetKey) (int64, error) { + ts, err := a.Chain.GetTipSetFromKey(tsk) + if err != nil { + return -1, xerrors.Errorf("getting tipset: %w", err) + } + return gasEstimateGasLimit(ctx, a.Chain, a.Stmgr, a.Mpool, msgIn, ts) } -func (m *GasModule) GasEstimateGasLimit(ctx context.Context, msgIn *types.Message, _ types.TipSetKey) (int64, error) { - return gasEstimateGasLimit(ctx, m.Chain, m.Stmgr, m.Mpool, msgIn) +func (m *GasModule) GasEstimateGasLimit(ctx context.Context, msgIn *types.Message, tsk types.TipSetKey) (int64, error) { + ts, err := m.Chain.GetTipSetFromKey(tsk) + if err != nil { + return -1, xerrors.Errorf("getting tipset: %w", err) + } + return gasEstimateGasLimit(ctx, m.Chain, m.Stmgr, m.Mpool, msgIn, ts) } func gasEstimateGasLimit( ctx context.Context, @@ -202,21 +255,24 @@ func gasEstimateGasLimit( smgr *stmgr.StateManager, mpool *messagepool.MessagePool, msgIn *types.Message, + currTs *types.TipSet, ) (int64, error) { msg := *msgIn msg.GasLimit = build.BlockGasLimit msg.GasFeeCap = types.NewInt(uint64(build.MinimumBaseFee) + 1) msg.GasPremium = types.NewInt(1) - currTs := cstore.GetHeaviestTipSet() fromA, err := smgr.ResolveToKeyAddress(ctx, msgIn.From, currTs) if err != nil { return -1, xerrors.Errorf("getting key address: %w", err) } - pending, ts := mpool.PendingFor(fromA) + pending, ts := mpool.PendingFor(ctx, fromA) priorMsgs := make([]types.ChainMsg, 0, len(pending)) for _, m := range pending { + if m.Message.Nonce == msg.Nonce { + break + } priorMsgs = append(priorMsgs, m) } @@ -268,7 +324,7 @@ func gasEstimateGasLimit( func (m *GasModule) GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, _ types.TipSetKey) (*types.Message, error) { if msg.GasLimit == 0 { - gasLimit, err := m.GasEstimateGasLimit(ctx, msg, types.TipSetKey{}) + gasLimit, err := m.GasEstimateGasLimit(ctx, msg, types.EmptyTSK) if err != nil { return nil, xerrors.Errorf("estimating gas used: %w", err) } @@ -276,7 +332,7 @@ func (m *GasModule) GasEstimateMessageGas(ctx context.Context, msg *types.Messag } if msg.GasPremium == types.EmptyInt || types.BigCmp(msg.GasPremium, types.NewInt(0)) == 0 { - gasPremium, err := m.GasEstimateGasPremium(ctx, 10, msg.From, msg.GasLimit, types.TipSetKey{}) + gasPremium, err := m.GasEstimateGasPremium(ctx, 10, msg.From, msg.GasLimit, types.EmptyTSK) if err != nil { return nil, xerrors.Errorf("estimating gas price: %w", err) } @@ -291,7 +347,7 @@ func (m *GasModule) GasEstimateMessageGas(ctx context.Context, msg *types.Messag msg.GasFeeCap = feeCap } - messagepool.CapGasFee(msg, spec.Get().MaxFee) + messagepool.CapGasFee(m.GetMaxFee, msg, spec) return msg, nil } diff --git a/node/impl/full/gas_test.go b/node/impl/full/gas_test.go index 2452ab8077b..028e039ce4f 100644 --- a/node/impl/full/gas_test.go +++ b/node/impl/full/gas_test.go @@ -12,27 +12,27 @@ import ( ) func TestMedian(t *testing.T) { - require.Equal(t, types.NewInt(5), medianGasPremium([]gasMeta{ + require.Equal(t, types.NewInt(5), medianGasPremium([]GasMeta{ {big.NewInt(5), build.BlockGasTarget}, }, 1)) - require.Equal(t, types.NewInt(10), medianGasPremium([]gasMeta{ + require.Equal(t, types.NewInt(10), medianGasPremium([]GasMeta{ {big.NewInt(5), build.BlockGasTarget}, {big.NewInt(10), build.BlockGasTarget}, }, 1)) - require.Equal(t, types.NewInt(15), medianGasPremium([]gasMeta{ + require.Equal(t, types.NewInt(15), medianGasPremium([]GasMeta{ {big.NewInt(10), build.BlockGasTarget / 2}, {big.NewInt(20), build.BlockGasTarget / 2}, }, 1)) - require.Equal(t, types.NewInt(25), medianGasPremium([]gasMeta{ + require.Equal(t, types.NewInt(25), medianGasPremium([]GasMeta{ {big.NewInt(10), build.BlockGasTarget / 2}, {big.NewInt(20), build.BlockGasTarget / 2}, {big.NewInt(30), build.BlockGasTarget / 2}, }, 1)) - require.Equal(t, types.NewInt(15), medianGasPremium([]gasMeta{ + require.Equal(t, types.NewInt(15), medianGasPremium([]GasMeta{ {big.NewInt(10), build.BlockGasTarget / 2}, {big.NewInt(20), build.BlockGasTarget / 2}, {big.NewInt(30), build.BlockGasTarget / 2}, diff --git a/node/impl/full/mpool.go b/node/impl/full/mpool.go index b1e9f94f994..bd91387a2be 100644 --- a/node/impl/full/mpool.go +++ b/node/impl/full/mpool.go @@ -20,6 +20,8 @@ type MpoolModuleAPI interface { MpoolPush(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) } +var _ MpoolModuleAPI = *new(api.FullNode) + // MpoolModule provides a default implementation of MpoolModuleAPI. // It can be swapped out with another implementation through Dependency // Injection (for example with a thin RPC client). @@ -58,7 +60,7 @@ func (a *MpoolAPI) MpoolSelect(ctx context.Context, tsk types.TipSetKey, ticketQ return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } - return a.Mpool.SelectMessages(ts, ticketQuality) + return a.Mpool.SelectMessages(ctx, ts, ticketQuality) } func (a *MpoolAPI) MpoolPending(ctx context.Context, tsk types.TipSetKey) ([]*types.SignedMessage, error) { @@ -66,7 +68,7 @@ func (a *MpoolAPI) MpoolPending(ctx context.Context, tsk types.TipSetKey) ([]*ty if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } - pending, mpts := a.Mpool.Pending() + pending, mpts := a.Mpool.Pending(ctx) haveCids := map[cid.Cid]struct{}{} for _, m := range pending { @@ -120,16 +122,16 @@ func (a *MpoolAPI) MpoolPending(ctx context.Context, tsk types.TipSetKey) ([]*ty } func (a *MpoolAPI) MpoolClear(ctx context.Context, local bool) error { - a.Mpool.Clear(local) + a.Mpool.Clear(ctx, local) return nil } func (m *MpoolModule) MpoolPush(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) { - return m.Mpool.Push(smsg) + return m.Mpool.Push(ctx, smsg) } func (a *MpoolAPI) MpoolPushUntrusted(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) { - return a.Mpool.PushUntrusted(smsg) + return a.Mpool.PushUntrusted(ctx, smsg) } func (a *MpoolAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) { @@ -190,7 +192,7 @@ func (a *MpoolAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spe func (a *MpoolAPI) MpoolBatchPush(ctx context.Context, smsgs []*types.SignedMessage) ([]cid.Cid, error) { var messageCids []cid.Cid for _, smsg := range smsgs { - smsgCid, err := a.Mpool.Push(smsg) + smsgCid, err := a.Mpool.Push(ctx, smsg) if err != nil { return messageCids, err } @@ -202,7 +204,7 @@ func (a *MpoolAPI) MpoolBatchPush(ctx context.Context, smsgs []*types.SignedMess func (a *MpoolAPI) MpoolBatchPushUntrusted(ctx context.Context, smsgs []*types.SignedMessage) ([]cid.Cid, error) { var messageCids []cid.Cid for _, smsg := range smsgs { - smsgCid, err := a.Mpool.PushUntrusted(smsg) + smsgCid, err := a.Mpool.PushUntrusted(ctx, smsg) if err != nil { return messageCids, err } @@ -223,8 +225,20 @@ func (a *MpoolAPI) MpoolBatchPushMessage(ctx context.Context, msgs []*types.Mess return smsgs, nil } +func (a *MpoolAPI) MpoolCheckMessages(ctx context.Context, protos []*api.MessagePrototype) ([][]api.MessageCheckStatus, error) { + return a.Mpool.CheckMessages(ctx, protos) +} + +func (a *MpoolAPI) MpoolCheckPendingMessages(ctx context.Context, from address.Address) ([][]api.MessageCheckStatus, error) { + return a.Mpool.CheckPendingMessages(ctx, from) +} + +func (a *MpoolAPI) MpoolCheckReplaceMessages(ctx context.Context, msgs []*types.Message) ([][]api.MessageCheckStatus, error) { + return a.Mpool.CheckReplaceMessages(ctx, msgs) +} + func (a *MpoolAPI) MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error) { - return a.Mpool.GetNonce(addr) + return a.Mpool.GetNonce(ctx, addr, types.EmptyTSK) } func (a *MpoolAPI) MpoolSub(ctx context.Context) (<-chan api.MpoolUpdate, error) { diff --git a/node/impl/full/multisig.go b/node/impl/full/multisig.go index 9c5f683c469..e44509d7cbb 100644 --- a/node/impl/full/multisig.go +++ b/node/impl/full/multisig.go @@ -14,7 +14,6 @@ import ( multisig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig" - "github.com/ipfs/go-cid" "go.uber.org/fx" "golang.org/x/xerrors" ) @@ -37,134 +36,129 @@ func (a *MsigAPI) messageBuilder(ctx context.Context, from address.Address) (mul // TODO: remove gp (gasPrice) from arguments // TODO: Add "vesting start" to arguments. -func (a *MsigAPI) MsigCreate(ctx context.Context, req uint64, addrs []address.Address, duration abi.ChainEpoch, val types.BigInt, src address.Address, gp types.BigInt) (cid.Cid, error) { +func (a *MsigAPI) MsigCreate(ctx context.Context, req uint64, addrs []address.Address, duration abi.ChainEpoch, val types.BigInt, src address.Address, gp types.BigInt) (*api.MessagePrototype, error) { mb, err := a.messageBuilder(ctx, src) if err != nil { - return cid.Undef, err + return nil, err } msg, err := mb.Create(addrs, req, 0, duration, val) if err != nil { - return cid.Undef, err - } - - // send the message out to the network - smsg, err := a.MpoolAPI.MpoolPushMessage(ctx, msg, nil) - if err != nil { - return cid.Undef, err + return nil, err } - return smsg.Cid(), nil + return &api.MessagePrototype{ + Message: *msg, + ValidNonce: false, + }, nil } -func (a *MsigAPI) MsigPropose(ctx context.Context, msig address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) { +func (a *MsigAPI) MsigPropose(ctx context.Context, msig address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (*api.MessagePrototype, error) { mb, err := a.messageBuilder(ctx, src) if err != nil { - return cid.Undef, err + return nil, err } msg, err := mb.Propose(msig, to, amt, abi.MethodNum(method), params) if err != nil { - return cid.Undef, xerrors.Errorf("failed to create proposal: %w", err) - } - - smsg, err := a.MpoolAPI.MpoolPushMessage(ctx, msg, nil) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to push message: %w", err) + return nil, xerrors.Errorf("failed to create proposal: %w", err) } - return smsg.Cid(), nil + return &api.MessagePrototype{ + Message: *msg, + ValidNonce: false, + }, nil } -func (a *MsigAPI) MsigAddPropose(ctx context.Context, msig address.Address, src address.Address, newAdd address.Address, inc bool) (cid.Cid, error) { +func (a *MsigAPI) MsigAddPropose(ctx context.Context, msig address.Address, src address.Address, newAdd address.Address, inc bool) (*api.MessagePrototype, error) { enc, actErr := serializeAddParams(newAdd, inc) if actErr != nil { - return cid.Undef, actErr + return nil, actErr } return a.MsigPropose(ctx, msig, msig, big.Zero(), src, uint64(multisig.Methods.AddSigner), enc) } -func (a *MsigAPI) MsigAddApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, newAdd address.Address, inc bool) (cid.Cid, error) { +func (a *MsigAPI) MsigAddApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, newAdd address.Address, inc bool) (*api.MessagePrototype, error) { enc, actErr := serializeAddParams(newAdd, inc) if actErr != nil { - return cid.Undef, actErr + return nil, actErr } return a.MsigApproveTxnHash(ctx, msig, txID, proposer, msig, big.Zero(), src, uint64(multisig.Methods.AddSigner), enc) } -func (a *MsigAPI) MsigAddCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, newAdd address.Address, inc bool) (cid.Cid, error) { +func (a *MsigAPI) MsigAddCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, newAdd address.Address, inc bool) (*api.MessagePrototype, error) { enc, actErr := serializeAddParams(newAdd, inc) if actErr != nil { - return cid.Undef, actErr + return nil, actErr } return a.MsigCancel(ctx, msig, txID, msig, big.Zero(), src, uint64(multisig.Methods.AddSigner), enc) } -func (a *MsigAPI) MsigSwapPropose(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) { +func (a *MsigAPI) MsigSwapPropose(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (*api.MessagePrototype, error) { enc, actErr := serializeSwapParams(oldAdd, newAdd) if actErr != nil { - return cid.Undef, actErr + return nil, actErr } return a.MsigPropose(ctx, msig, msig, big.Zero(), src, uint64(multisig.Methods.SwapSigner), enc) } -func (a *MsigAPI) MsigSwapApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) { +func (a *MsigAPI) MsigSwapApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, oldAdd address.Address, newAdd address.Address) (*api.MessagePrototype, error) { enc, actErr := serializeSwapParams(oldAdd, newAdd) if actErr != nil { - return cid.Undef, actErr + return nil, actErr } return a.MsigApproveTxnHash(ctx, msig, txID, proposer, msig, big.Zero(), src, uint64(multisig.Methods.SwapSigner), enc) } -func (a *MsigAPI) MsigSwapCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) { +func (a *MsigAPI) MsigSwapCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, oldAdd address.Address, newAdd address.Address) (*api.MessagePrototype, error) { enc, actErr := serializeSwapParams(oldAdd, newAdd) if actErr != nil { - return cid.Undef, actErr + return nil, actErr } return a.MsigCancel(ctx, msig, txID, msig, big.Zero(), src, uint64(multisig.Methods.SwapSigner), enc) } -func (a *MsigAPI) MsigApprove(ctx context.Context, msig address.Address, txID uint64, src address.Address) (cid.Cid, error) { +func (a *MsigAPI) MsigApprove(ctx context.Context, msig address.Address, txID uint64, src address.Address) (*api.MessagePrototype, error) { return a.msigApproveOrCancelSimple(ctx, api.MsigApprove, msig, txID, src) } -func (a *MsigAPI) MsigApproveTxnHash(ctx context.Context, msig address.Address, txID uint64, proposer address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) { +func (a *MsigAPI) MsigApproveTxnHash(ctx context.Context, msig address.Address, txID uint64, proposer address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (*api.MessagePrototype, error) { return a.msigApproveOrCancelTxnHash(ctx, api.MsigApprove, msig, txID, proposer, to, amt, src, method, params) } -func (a *MsigAPI) MsigCancel(ctx context.Context, msig address.Address, txID uint64, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) { +func (a *MsigAPI) MsigCancel(ctx context.Context, msig address.Address, txID uint64, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (*api.MessagePrototype, error) { return a.msigApproveOrCancelTxnHash(ctx, api.MsigCancel, msig, txID, src, to, amt, src, method, params) } -func (a *MsigAPI) MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error) { +func (a *MsigAPI) MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (*api.MessagePrototype, error) { enc, actErr := serializeRemoveParams(toRemove, decrease) if actErr != nil { - return cid.Undef, actErr + return nil, actErr } return a.MsigPropose(ctx, msig, msig, types.NewInt(0), proposer, uint64(multisig.Methods.RemoveSigner), enc) } -func (a *MsigAPI) msigApproveOrCancelSimple(ctx context.Context, operation api.MsigProposeResponse, msig address.Address, txID uint64, src address.Address) (cid.Cid, error) { +func (a *MsigAPI) msigApproveOrCancelSimple(ctx context.Context, operation api.MsigProposeResponse, msig address.Address, txID uint64, src address.Address) (*api.MessagePrototype, error) { if msig == address.Undef { - return cid.Undef, xerrors.Errorf("must provide multisig address") + return nil, xerrors.Errorf("must provide multisig address") } if src == address.Undef { - return cid.Undef, xerrors.Errorf("must provide source address") + return nil, xerrors.Errorf("must provide source address") } mb, err := a.messageBuilder(ctx, src) if err != nil { - return cid.Undef, err + return nil, err } var msg *types.Message @@ -174,34 +168,31 @@ func (a *MsigAPI) msigApproveOrCancelSimple(ctx context.Context, operation api.M case api.MsigCancel: msg, err = mb.Cancel(msig, txID, nil) default: - return cid.Undef, xerrors.Errorf("Invalid operation for msigApproveOrCancel") + return nil, xerrors.Errorf("Invalid operation for msigApproveOrCancel") } if err != nil { - return cid.Undef, err - } - - smsg, err := a.MpoolAPI.MpoolPushMessage(ctx, msg, nil) - if err != nil { - return cid.Undef, err + return nil, err } - return smsg.Cid(), nil - + return &api.MessagePrototype{ + Message: *msg, + ValidNonce: false, + }, nil } -func (a *MsigAPI) msigApproveOrCancelTxnHash(ctx context.Context, operation api.MsigProposeResponse, msig address.Address, txID uint64, proposer address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) { +func (a *MsigAPI) msigApproveOrCancelTxnHash(ctx context.Context, operation api.MsigProposeResponse, msig address.Address, txID uint64, proposer address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (*api.MessagePrototype, error) { if msig == address.Undef { - return cid.Undef, xerrors.Errorf("must provide multisig address") + return nil, xerrors.Errorf("must provide multisig address") } if src == address.Undef { - return cid.Undef, xerrors.Errorf("must provide source address") + return nil, xerrors.Errorf("must provide source address") } if proposer.Protocol() != address.ID { proposerID, err := a.StateAPI.StateLookupID(ctx, proposer, types.EmptyTSK) if err != nil { - return cid.Undef, err + return nil, err } proposer = proposerID } @@ -216,7 +207,7 @@ func (a *MsigAPI) msigApproveOrCancelTxnHash(ctx context.Context, operation api. mb, err := a.messageBuilder(ctx, src) if err != nil { - return cid.Undef, err + return nil, err } var msg *types.Message @@ -226,18 +217,16 @@ func (a *MsigAPI) msigApproveOrCancelTxnHash(ctx context.Context, operation api. case api.MsigCancel: msg, err = mb.Cancel(msig, txID, &p) default: - return cid.Undef, xerrors.Errorf("Invalid operation for msigApproveOrCancel") - } - if err != nil { - return cid.Undef, err + return nil, xerrors.Errorf("Invalid operation for msigApproveOrCancel") } - - smsg, err := a.MpoolAPI.MpoolPushMessage(ctx, msg, nil) if err != nil { - return cid.Undef, err + return nil, err } - return smsg.Cid(), nil + return &api.MessagePrototype{ + Message: *msg, + ValidNonce: false, + }, nil } func serializeAddParams(new address.Address, inc bool) ([]byte, error) { diff --git a/node/impl/full/state.go b/node/impl/full/state.go index 9a6c772ae3c..d8545ae13fe 100644 --- a/node/impl/full/state.go +++ b/node/impl/full/state.go @@ -6,7 +6,6 @@ import ( "strconv" cid "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" "go.uber.org/fx" "golang.org/x/xerrors" @@ -35,17 +34,16 @@ import ( "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/chain/wallet" - "github.com/filecoin-project/lotus/lib/bufbstore" "github.com/filecoin-project/lotus/node/modules/dtypes" ) type StateModuleAPI interface { MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) + MsigGetPending(ctx context.Context, addr address.Address, tsk types.TipSetKey) ([]*api.MsigTransaction, error) StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) - StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) @@ -54,10 +52,14 @@ type StateModuleAPI interface { StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error) StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) StateNetworkVersion(ctx context.Context, key types.TipSetKey) (network.Version, error) + StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) - StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error) + StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) + StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) } +var _ StateModuleAPI = *new(api.FullNode) + // StateModule provides a default implementation of StateModuleAPI. // It can be swapped out with another implementation through Dependency // Injection (for example with a thin RPC client). @@ -75,7 +77,7 @@ type StateAPI struct { // TODO: the wallet here is only needed because we have the MinerCreateBlock // API attached to the state API. It probably should live somewhere better - Wallet api.WalletAPI + Wallet api.Wallet DefWallet wallet.Default StateModuleAPI @@ -91,25 +93,26 @@ func (a *StateAPI) StateNetworkName(ctx context.Context) (dtypes.NetworkName, er } func (a *StateAPI) StateMinerSectors(ctx context.Context, addr address.Address, sectorNos *bitfield.BitField, tsk types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + act, err := a.StateManager.LoadActorTsk(ctx, addr, tsk) if err != nil { - return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) + return nil, xerrors.Errorf("failed to load miner actor: %w", err) } - return stmgr.GetMinerSectorSet(ctx, a.StateManager, ts, addr, sectorNos) -} -func (a *StateAPI) StateMinerActiveSectors(ctx context.Context, maddr address.Address, tsk types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { // TODO: only used in cli - ts, err := a.Chain.GetTipSetFromKey(tsk) + mas, err := miner.Load(a.StateManager.ChainStore().ActorStore(ctx), act) if err != nil { - return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) + return nil, xerrors.Errorf("failed to load miner actor state: %w", err) } + return mas.LoadSectors(sectorNos) +} + +func (a *StateAPI) StateMinerActiveSectors(ctx context.Context, maddr address.Address, tsk types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { // TODO: only used in cli act, err := a.StateManager.LoadActorTsk(ctx, maddr, tsk) if err != nil { return nil, xerrors.Errorf("failed to load miner actor: %w", err) } - mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act) + mas, err := miner.Load(a.StateManager.ChainStore().ActorStore(ctx), act) if err != nil { return nil, xerrors.Errorf("failed to load miner actor state: %w", err) } @@ -119,21 +122,30 @@ func (a *StateAPI) StateMinerActiveSectors(ctx context.Context, maddr address.Ad return nil, xerrors.Errorf("merge partition active sets: %w", err) } - return stmgr.GetMinerSectorSet(ctx, a.StateManager, ts, maddr, &activeSectors) + return mas.LoadSectors(&activeSectors) } func (m *StateModule) StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) { - act, err := m.StateManager.LoadActorTsk(ctx, actor, tsk) + ts, err := m.Chain.GetTipSetFromKey(tsk) + if err != nil { + return miner.MinerInfo{}, xerrors.Errorf("failed to load tipset: %w", err) + } + + act, err := m.StateManager.LoadActor(ctx, actor, ts) if err != nil { return miner.MinerInfo{}, xerrors.Errorf("failed to load miner actor: %w", err) } - mas, err := miner.Load(m.StateManager.ChainStore().Store(ctx), act) + mas, err := miner.Load(m.StateManager.ChainStore().ActorStore(ctx), act) if err != nil { return miner.MinerInfo{}, xerrors.Errorf("failed to load miner actor state: %w", err) } - return mas.Info() + info, err := mas.Info() + if err != nil { + return miner.MinerInfo{}, err + } + return info, nil } func (a *StateAPI) StateMinerDeadlines(ctx context.Context, m address.Address, tsk types.TipSetKey) ([]api.Deadline, error) { @@ -142,7 +154,7 @@ func (a *StateAPI) StateMinerDeadlines(ctx context.Context, m address.Address, t return nil, xerrors.Errorf("failed to load miner actor: %w", err) } - mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act) + mas, err := miner.Load(a.StateManager.ChainStore().ActorStore(ctx), act) if err != nil { return nil, xerrors.Errorf("failed to load miner actor state: %w", err) } @@ -154,13 +166,19 @@ func (a *StateAPI) StateMinerDeadlines(ctx context.Context, m address.Address, t out := make([]api.Deadline, deadlines) if err := mas.ForEachDeadline(func(i uint64, dl miner.Deadline) error { - ps, err := dl.PostSubmissions() + ps, err := dl.PartitionsPoSted() + if err != nil { + return err + } + + l, err := dl.DisputableProofCount() if err != nil { return err } out[i] = api.Deadline{ - PostSubmissions: ps, + PostSubmissions: ps, + DisputableProofCount: l, } return nil }); err != nil { @@ -175,7 +193,7 @@ func (a *StateAPI) StateMinerPartitions(ctx context.Context, m address.Address, return nil, xerrors.Errorf("failed to load miner actor: %w", err) } - mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act) + mas, err := miner.Load(a.StateManager.ChainStore().ActorStore(ctx), act) if err != nil { return nil, xerrors.Errorf("failed to load miner actor state: %w", err) } @@ -236,7 +254,7 @@ func (m *StateModule) StateMinerProvingDeadline(ctx context.Context, addr addres return nil, xerrors.Errorf("failed to load miner actor: %w", err) } - mas, err := miner.Load(m.StateManager.ChainStore().Store(ctx), act) + mas, err := miner.Load(m.StateManager.ChainStore().ActorStore(ctx), act) if err != nil { return nil, xerrors.Errorf("failed to load miner actor state: %w", err) } @@ -255,7 +273,7 @@ func (a *StateAPI) StateMinerFaults(ctx context.Context, addr address.Address, t return bitfield.BitField{}, xerrors.Errorf("failed to load miner actor: %w", err) } - mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act) + mas, err := miner.Load(a.StateManager.ChainStore().ActorStore(ctx), act) if err != nil { return bitfield.BitField{}, xerrors.Errorf("failed to load miner actor state: %w", err) } @@ -312,7 +330,7 @@ func (a *StateAPI) StateMinerRecoveries(ctx context.Context, addr address.Addres return bitfield.BitField{}, xerrors.Errorf("failed to load miner actor: %w", err) } - mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act) + mas, err := miner.Load(a.StateManager.ChainStore().ActorStore(ctx), act) if err != nil { return bitfield.BitField{}, xerrors.Errorf("failed to load miner actor state: %w", err) } @@ -361,7 +379,7 @@ func (a *StateAPI) StateReplay(ctx context.Context, tsk types.TipSetKey, mc cid. var ts *types.TipSet var err error if tsk == types.EmptyTSK { - mlkp, err := a.StateSearchMsg(ctx, mc) + mlkp, err := a.StateSearchMsg(ctx, types.EmptyTSK, mc, stmgr.LookbackNoLimit, true) if err != nil { return nil, xerrors.Errorf("searching for msg %s: %w", mc, err) } @@ -408,38 +426,12 @@ func (a *StateAPI) StateReplay(ctx context.Context, tsk types.TipSetKey, mc cid. }, nil } -func stateForTs(ctx context.Context, ts *types.TipSet, cstore *store.ChainStore, smgr *stmgr.StateManager) (*state.StateTree, error) { - if ts == nil { - ts = cstore.GetHeaviestTipSet() - } - - st, _, err := smgr.TipSetState(ctx, ts) - if err != nil { - return nil, err - } - - buf := bufbstore.NewBufferedBstore(cstore.Blockstore()) - cst := cbor.NewCborStore(buf) - return state.LoadStateTree(cst, st) -} -func (a *StateAPI) stateForTs(ctx context.Context, ts *types.TipSet) (*state.StateTree, error) { - return stateForTs(ctx, ts, a.Chain, a.StateManager) -} -func (m *StateModule) stateForTs(ctx context.Context, ts *types.TipSet) (*state.StateTree, error) { - return stateForTs(ctx, ts, m.Chain, m.StateManager) -} - -func (m *StateModule) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) { +func (m *StateModule) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (a *types.Actor, err error) { ts, err := m.Chain.GetTipSetFromKey(tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } - state, err := m.stateForTs(ctx, ts) - if err != nil { - return nil, xerrors.Errorf("computing tipset state failed: %w", err) - } - - return state.GetActor(actor) + return m.StateManager.LoadActor(ctx, actor, ts) } func (m *StateModule) StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) { @@ -465,17 +457,12 @@ func (a *StateAPI) StateReadState(ctx context.Context, actor address.Address, ts if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } - state, err := a.stateForTs(ctx, ts) - if err != nil { - return nil, xerrors.Errorf("getting state for tipset: %w", err) - } - - act, err := state.GetActor(actor) + act, err := a.StateManager.LoadActor(ctx, actor, ts) if err != nil { return nil, xerrors.Errorf("getting actor: %w", err) } - blk, err := state.Store.(*cbor.BasicIpldStore).Blocks.Get(act.Head) + blk, err := a.Chain.StateBlockstore().Get(act.Head) if err != nil { return nil, xerrors.Errorf("getting actor head: %w", err) } @@ -487,12 +474,32 @@ func (a *StateAPI) StateReadState(ctx context.Context, actor address.Address, ts return &api.ActorState{ Balance: act.Balance, + Code: act.Code, State: oif, }, nil } +func (a *StateAPI) StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error) { + act, err := a.StateGetActor(ctx, toAddr, tsk) + if err != nil { + return nil, xerrors.Errorf("getting actor: %w", err) + } + + paramType, err := stmgr.GetParamType(act.Code, method) + if err != nil { + return nil, xerrors.Errorf("getting params type: %w", err) + } + + if err = paramType.UnmarshalCBOR(bytes.NewReader(params)); err != nil { + return nil, err + } + + return paramType, nil +} + // This is on StateAPI because miner.Miner requires this, and MinerAPI requires miner.Miner func (a *StateAPI) MinerGetBaseInfo(ctx context.Context, maddr address.Address, epoch abi.ChainEpoch, tsk types.TipSetKey) (*api.MiningBaseInfo, error) { + // XXX: Gets the state by computing the tipset state, instead of looking at the parent. return stmgr.MinerGetBaseInfo(ctx, a.StateManager, a.Beacon, tsk, epoch, maddr, a.ProofVerifier) } @@ -514,28 +521,22 @@ func (a *StateAPI) MinerCreateBlock(ctx context.Context, bt *api.BlockTemplate) return &out, nil } -func (m *StateModule) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error) { - return stateWaitMsgLimited(ctx, m.StateManager, m.Chain, msg, confidence, stmgr.LookbackNoLimit) -} -func (a *StateAPI) StateWaitMsgLimited(ctx context.Context, msg cid.Cid, confidence uint64, lookbackLimit abi.ChainEpoch) (*api.MsgLookup, error) { - return stateWaitMsgLimited(ctx, a.StateManager, a.Chain, msg, confidence, lookbackLimit) -} -func stateWaitMsgLimited(ctx context.Context, smgr *stmgr.StateManager, cstore *store.ChainStore, msg cid.Cid, confidence uint64, lookbackLimit abi.ChainEpoch) (*api.MsgLookup, error) { - ts, recpt, found, err := smgr.WaitForMessage(ctx, msg, confidence, lookbackLimit) +func (m *StateModule) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64, lookbackLimit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) { + ts, recpt, found, err := m.StateManager.WaitForMessage(ctx, msg, confidence, lookbackLimit, allowReplaced) if err != nil { return nil, err } var returndec interface{} if recpt.ExitCode == 0 && len(recpt.Return) > 0 { - cmsg, err := cstore.GetCMessage(msg) + cmsg, err := m.Chain.GetCMessage(msg) if err != nil { return nil, xerrors.Errorf("failed to load message after successful receipt search: %w", err) } vmsg := cmsg.VMMessage() - t, err := stmgr.GetReturnType(ctx, smgr, vmsg.To, vmsg.Method, ts) + t, err := stmgr.GetReturnType(ctx, m.StateManager, vmsg.To, vmsg.Method, ts) if err != nil { return nil, xerrors.Errorf("failed to get return type: %w", err) } @@ -556,8 +557,13 @@ func stateWaitMsgLimited(ctx context.Context, smgr *stmgr.StateManager, cstore * }, nil } -func (a *StateAPI) StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error) { - ts, recpt, found, err := a.StateManager.SearchForMessage(ctx, msg) +func (m *StateModule) StateSearchMsg(ctx context.Context, tsk types.TipSetKey, msg cid.Cid, lookbackLimit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) { + fromTs, err := m.Chain.GetTipSetFromKey(tsk) + if err != nil { + return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) + } + + ts, recpt, found, err := m.StateManager.SearchForMessage(ctx, fromTs, msg, lookbackLimit, allowReplaced) if err != nil { return nil, err } @@ -573,14 +579,6 @@ func (a *StateAPI) StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLoo return nil, nil } -func (m *StateModule) StateGetReceipt(ctx context.Context, msg cid.Cid, tsk types.TipSetKey) (*types.MessageReceipt, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) - if err != nil { - return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) - } - return m.StateManager.GetReceipt(ctx, msg, ts) -} - func (m *StateModule) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) { ts, err := m.Chain.GetTipSetFromKey(tsk) if err != nil { @@ -695,7 +693,7 @@ func (m *StateModule) StateMarketStorageDeal(ctx context.Context, dealId abi.Dea } func (a *StateAPI) StateChangedActors(ctx context.Context, old cid.Cid, new cid.Cid) (map[string]types.Actor, error) { - store := a.Chain.Store(ctx) + store := a.Chain.ActorStore(ctx) oldTree, err := state.LoadStateTree(store, old) if err != nil { @@ -707,7 +705,7 @@ func (a *StateAPI) StateChangedActors(ctx context.Context, old cid.Cid, new cid. return nil, xerrors.Errorf("failed to load new state tree: %w", err) } - return state.Diff(oldTree, newTree) + return state.Diff(ctx, oldTree, newTree) } func (a *StateAPI) StateMinerSectorCount(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MinerSectors, error) { @@ -715,7 +713,7 @@ func (a *StateAPI) StateMinerSectorCount(ctx context.Context, addr address.Addre if err != nil { return api.MinerSectors{}, err } - mas, err := miner.Load(a.Chain.Store(ctx), act) + mas, err := miner.Load(a.Chain.ActorStore(ctx), act) if err != nil { return api.MinerSectors{}, err } @@ -760,17 +758,19 @@ func (a *StateAPI) StateSectorPreCommitInfo(ctx context.Context, maddr address.A pci, err := stmgr.PreCommitInfo(ctx, a.StateManager, maddr, n, ts) if err != nil { return miner.SectorPreCommitOnChainInfo{}, err + } else if pci == nil { + return miner.SectorPreCommitOnChainInfo{}, xerrors.Errorf("precommit info is not exists") } return *pci, err } -func (a *StateAPI) StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) +func (m *StateModule) StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) { + ts, err := m.Chain.GetTipSetFromKey(tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } - return stmgr.MinerSectorInfo(ctx, a.StateManager, maddr, n, ts) + return stmgr.MinerSectorInfo(ctx, m.StateManager, maddr, n, ts) } func (a *StateAPI) StateSectorExpiration(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorExpiration, error) { @@ -778,7 +778,7 @@ func (a *StateAPI) StateSectorExpiration(ctx context.Context, maddr address.Addr if err != nil { return nil, err } - mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act) + mas, err := miner.Load(a.StateManager.ChainStore().ActorStore(ctx), act) if err != nil { return nil, err } @@ -790,7 +790,7 @@ func (a *StateAPI) StateSectorPartition(ctx context.Context, maddr address.Addre if err != nil { return nil, err } - mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act) + mas, err := miner.Load(a.StateManager.ChainStore().ActorStore(ctx), act) if err != nil { return nil, err } @@ -808,8 +808,31 @@ func (a *StateAPI) StateListMessages(ctx context.Context, match *api.MessageMatc if match.To == address.Undef && match.From == address.Undef { return nil, xerrors.Errorf("must specify at least To or From in message filter") + } else if match.To != address.Undef { + _, err := a.StateLookupID(ctx, match.To, tsk) + + // if the recipient doesn't exist at the start point, we're not gonna find any matches + if xerrors.Is(err, types.ErrActorNotFound) { + return nil, nil + } + + if err != nil { + return nil, xerrors.Errorf("looking up match.To: %w", err) + } + } else if match.From != address.Undef { + _, err := a.StateLookupID(ctx, match.From, tsk) + + // if the sender doesn't exist at the start point, we're not gonna find any matches + if xerrors.Is(err, types.ErrActorNotFound) { + return nil, nil + } + + if err != nil { + return nil, xerrors.Errorf("looking up match.From: %w", err) + } } + // TODO: This should probably match on both ID and robust address, no? matchFunc := func(msg *types.Message) bool { if match.From != address.Undef && match.From != msg.From { return false @@ -876,7 +899,7 @@ func (m *StateModule) MsigGetAvailableBalance(ctx context.Context, addr address. if err != nil { return types.EmptyInt, xerrors.Errorf("failed to load multisig actor: %w", err) } - msas, err := multisig.Load(m.Chain.Store(ctx), act) + msas, err := multisig.Load(m.Chain.ActorStore(ctx), act) if err != nil { return types.EmptyInt, xerrors.Errorf("failed to load multisig actor state: %w", err) } @@ -898,7 +921,7 @@ func (a *StateAPI) MsigGetVestingSchedule(ctx context.Context, addr address.Addr return api.EmptyVesting, xerrors.Errorf("failed to load multisig actor: %w", err) } - msas, err := multisig.Load(a.Chain.Store(ctx), act) + msas, err := multisig.Load(a.Chain.ActorStore(ctx), act) if err != nil { return api.EmptyVesting, xerrors.Errorf("failed to load multisig actor state: %w", err) } @@ -947,7 +970,7 @@ func (m *StateModule) MsigGetVested(ctx context.Context, addr address.Address, s return types.EmptyInt, xerrors.Errorf("failed to load multisig actor at end epoch: %w", err) } - msas, err := multisig.Load(m.Chain.Store(ctx), act) + msas, err := multisig.Load(m.Chain.ActorStore(ctx), act) if err != nil { return types.EmptyInt, xerrors.Errorf("failed to load multisig actor state: %w", err) } @@ -965,6 +988,40 @@ func (m *StateModule) MsigGetVested(ctx context.Context, addr address.Address, s return types.BigSub(startLk, endLk), nil } +func (m *StateModule) MsigGetPending(ctx context.Context, addr address.Address, tsk types.TipSetKey) ([]*api.MsigTransaction, error) { + ts, err := m.Chain.GetTipSetFromKey(tsk) + if err != nil { + return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) + } + + act, err := m.StateManager.LoadActor(ctx, addr, ts) + if err != nil { + return nil, xerrors.Errorf("failed to load multisig actor: %w", err) + } + msas, err := multisig.Load(m.Chain.ActorStore(ctx), act) + if err != nil { + return nil, xerrors.Errorf("failed to load multisig actor state: %w", err) + } + + var out = []*api.MsigTransaction{} + if err := msas.ForEachPendingTxn(func(id int64, txn multisig.Transaction) error { + out = append(out, &api.MsigTransaction{ + ID: id, + To: txn.To, + Value: txn.Value, + Method: txn.Method, + Params: txn.Params, + + Approved: txn.Approved, + }) + return nil + }); err != nil { + return nil, err + } + + return out, nil +} + var initialPledgeNum = types.NewInt(110) var initialPledgeDen = types.NewInt(100) @@ -984,7 +1041,7 @@ func (a *StateAPI) StateMinerPreCommitDepositForPower(ctx context.Context, maddr return types.EmptyInt, xerrors.Errorf("failed to get resolve size: %w", err) } - store := a.Chain.Store(ctx) + store := a.Chain.ActorStore(ctx) var sectorWeight abi.StoragePower if act, err := state.GetActor(market.Address); err != nil { @@ -1045,13 +1102,13 @@ func (a *StateAPI) StateMinerInitialPledgeCollateral(ctx context.Context, maddr return types.EmptyInt, xerrors.Errorf("failed to get resolve size: %w", err) } - store := a.Chain.Store(ctx) + store := a.Chain.ActorStore(ctx) var sectorWeight abi.StoragePower if act, err := state.GetActor(market.Address); err != nil { - return types.EmptyInt, xerrors.Errorf("loading miner actor %s: %w", maddr, err) + return types.EmptyInt, xerrors.Errorf("loading market actor: %w", err) } else if s, err := market.Load(store, act); err != nil { - return types.EmptyInt, xerrors.Errorf("loading market actor state %s: %w", maddr, err) + return types.EmptyInt, xerrors.Errorf("loading market actor state: %w", err) } else if w, vw, err := s.VerifyDealsForActivation(maddr, pci.DealIDs, ts.Height(), pci.Expiration); err != nil { return types.EmptyInt, xerrors.Errorf("verifying deals for activation: %w", err) } else { @@ -1065,7 +1122,7 @@ func (a *StateAPI) StateMinerInitialPledgeCollateral(ctx context.Context, maddr pledgeCollateral abi.TokenAmount ) if act, err := state.GetActor(power.Address); err != nil { - return types.EmptyInt, xerrors.Errorf("loading miner actor: %w", err) + return types.EmptyInt, xerrors.Errorf("loading power actor: %w", err) } else if s, err := power.Load(store, act); err != nil { return types.EmptyInt, xerrors.Errorf("loading power actor state: %w", err) } else if p, err := s.TotalPowerSmoothed(); err != nil { @@ -1079,7 +1136,7 @@ func (a *StateAPI) StateMinerInitialPledgeCollateral(ctx context.Context, maddr rewardActor, err := state.GetActor(reward.Address) if err != nil { - return types.EmptyInt, xerrors.Errorf("loading miner actor: %w", err) + return types.EmptyInt, xerrors.Errorf("loading reward actor: %w", err) } rewardState, err := reward.Load(store, rewardActor) @@ -1116,7 +1173,7 @@ func (a *StateAPI) StateMinerAvailableBalance(ctx context.Context, maddr address return types.EmptyInt, xerrors.Errorf("failed to load miner actor: %w", err) } - mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act) + mas, err := miner.Load(a.StateManager.ChainStore().ActorStore(ctx), act) if err != nil { return types.EmptyInt, xerrors.Errorf("failed to load miner actor state: %w", err) } @@ -1145,7 +1202,7 @@ func (a *StateAPI) StateMinerSectorAllocated(ctx context.Context, maddr address. return false, xerrors.Errorf("failed to load miner actor: %w", err) } - mas, err := miner.Load(a.StateManager.ChainStore().Store(ctx), act) + mas, err := miner.Load(a.StateManager.ChainStore().ActorStore(ctx), act) if err != nil { return false, xerrors.Errorf("failed to load miner actor state: %w", err) } @@ -1168,7 +1225,7 @@ func (a *StateAPI) StateVerifierStatus(ctx context.Context, addr address.Address return nil, err } - vrs, err := verifreg.Load(a.StateManager.ChainStore().Store(ctx), act) + vrs, err := verifreg.Load(a.StateManager.ChainStore().ActorStore(ctx), act) if err != nil { return nil, xerrors.Errorf("failed to load verified registry state: %w", err) } @@ -1199,7 +1256,7 @@ func (m *StateModule) StateVerifiedClientStatus(ctx context.Context, addr addres return nil, err } - vrs, err := verifreg.Load(m.StateManager.ChainStore().Store(ctx), act) + vrs, err := verifreg.Load(m.StateManager.ChainStore().ActorStore(ctx), act) if err != nil { return nil, xerrors.Errorf("failed to load verified registry state: %w", err) } @@ -1221,7 +1278,7 @@ func (a *StateAPI) StateVerifiedRegistryRootKey(ctx context.Context, tsk types.T return address.Undef, err } - vst, err := verifreg.Load(a.StateManager.ChainStore().Store(ctx), vact) + vst, err := verifreg.Load(a.StateManager.ChainStore().ActorStore(ctx), vact) if err != nil { return address.Undef, err } @@ -1250,12 +1307,12 @@ func (m *StateModule) StateDealProviderCollateralBounds(ctx context.Context, siz return api.DealCollateralBounds{}, xerrors.Errorf("failed to load reward actor: %w", err) } - pst, err := power.Load(m.StateManager.ChainStore().Store(ctx), pact) + pst, err := power.Load(m.StateManager.ChainStore().ActorStore(ctx), pact) if err != nil { return api.DealCollateralBounds{}, xerrors.Errorf("failed to load power actor state: %w", err) } - rst, err := reward.Load(m.StateManager.ChainStore().Store(ctx), ract) + rst, err := reward.Load(m.StateManager.ChainStore().ActorStore(ctx), ract) if err != nil { return api.DealCollateralBounds{}, xerrors.Errorf("failed to load reward actor state: %w", err) } @@ -1294,11 +1351,11 @@ func (a *StateAPI) StateCirculatingSupply(ctx context.Context, tsk types.TipSetK return types.EmptyInt, xerrors.Errorf("loading tipset %s: %w", tsk, err) } - sTree, err := a.stateForTs(ctx, ts) + sTree, err := a.StateManager.ParentState(ts) if err != nil { return types.EmptyInt, err } - return a.StateManager.GetCirculatingSupply(ctx, ts.Height(), sTree) + return a.StateManager.GetCirculatingSupply(ctx, ts.Height()-1, sTree) } func (a *StateAPI) StateVMCirculatingSupplyInternal(ctx context.Context, tsk types.TipSetKey) (api.CirculatingSupply, error) { @@ -1315,7 +1372,7 @@ func stateVMCirculatingSupplyInternal( return api.CirculatingSupply{}, xerrors.Errorf("loading tipset %s: %w", tsk, err) } - sTree, err := stateForTs(ctx, ts, cstore, smgr) + sTree, err := smgr.ParentState(ts) if err != nil { return api.CirculatingSupply{}, err } @@ -1329,5 +1386,7 @@ func (m *StateModule) StateNetworkVersion(ctx context.Context, tsk types.TipSetK return network.VersionMax, xerrors.Errorf("loading tipset %s: %w", tsk, err) } + // TODO: Height-1 to be consistent with the rest of the APIs? + // But that's likely going to break a bunch of stuff. return m.StateManager.GetNtwkVersion(ctx, ts.Height()), nil } diff --git a/node/impl/full/sync.go b/node/impl/full/sync.go index 05d4c9cb740..2c697483bdd 100644 --- a/node/impl/full/sync.go +++ b/node/impl/full/sync.go @@ -37,13 +37,14 @@ func (a *SyncAPI) SyncState(ctx context.Context) (*api.SyncState, error) { for i := range states { ss := &states[i] out.ActiveSyncs = append(out.ActiveSyncs, api.ActiveSync{ - Base: ss.Base, - Target: ss.Target, - Stage: ss.Stage, - Height: ss.Height, - Start: ss.Start, - End: ss.End, - Message: ss.Message, + WorkerID: ss.WorkerID, + Base: ss.Base, + Target: ss.Target, + Stage: ss.Stage, + Height: ss.Height, + Start: ss.Start, + End: ss.End, + Message: ss.Message, }) } return out, nil @@ -103,7 +104,7 @@ func (a *SyncAPI) SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHe func (a *SyncAPI) SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) error { log.Warnf("Marking tipset %s as checkpoint", tsk) - return a.Syncer.SetCheckpoint(tsk) + return a.Syncer.SyncCheckpoint(ctx, tsk) } func (a *SyncAPI) SyncMarkBad(ctx context.Context, bcid cid.Cid) error { diff --git a/node/impl/full/wallet.go b/node/impl/full/wallet.go index 05a7a576876..ae2550d77c7 100644 --- a/node/impl/full/wallet.go +++ b/node/impl/full/wallet.go @@ -22,7 +22,7 @@ type WalletAPI struct { StateManagerAPI stmgr.StateManagerAPI Default wallet.Default - api.WalletAPI + api.Wallet } func (a *WalletAPI) WalletBalance(ctx context.Context, addr address.Address) (types.BigInt, error) { @@ -40,7 +40,7 @@ func (a *WalletAPI) WalletSign(ctx context.Context, k address.Address, msg []byt if err != nil { return nil, xerrors.Errorf("failed to resolve ID address: %w", keyAddr) } - return a.WalletAPI.WalletSign(ctx, keyAddr, msg, api.MsgMeta{ + return a.Wallet.WalletSign(ctx, keyAddr, msg, api.MsgMeta{ Type: api.MTUnknown, }) } @@ -56,7 +56,7 @@ func (a *WalletAPI) WalletSignMessage(ctx context.Context, k address.Address, ms return nil, xerrors.Errorf("serializing message: %w", err) } - sig, err := a.WalletAPI.WalletSign(ctx, k, mb.Cid().Bytes(), api.MsgMeta{ + sig, err := a.Wallet.WalletSign(ctx, keyAddr, mb.Cid().Bytes(), api.MsgMeta{ Type: api.MTChainMsg, Extra: mb.RawData(), }) diff --git a/node/impl/market/market.go b/node/impl/market/market.go index 26d4a9edc2e..b62f2b40e4f 100644 --- a/node/impl/market/market.go +++ b/node/impl/market/market.go @@ -3,20 +3,57 @@ package market import ( "context" + "github.com/ipfs/go-cid" "go.uber.org/fx" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/chain/actors" + marketactor "github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/market" "github.com/filecoin-project/lotus/chain/types" - "github.com/ipfs/go-cid" + "github.com/filecoin-project/lotus/node/impl/full" ) type MarketAPI struct { fx.In - FMgr *market.FundMgr + full.MpoolAPI + FMgr *market.FundManager +} + +func (a *MarketAPI) MarketAddBalance(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) { + params, err := actors.SerializeParams(&addr) + if err != nil { + return cid.Undef, err + } + + smsg, aerr := a.MpoolPushMessage(ctx, &types.Message{ + To: marketactor.Address, + From: wallet, + Value: amt, + Method: marketactor.Methods.AddBalance, + Params: params, + }, nil) + + if aerr != nil { + return cid.Undef, aerr + } + + return smsg.Cid(), nil +} + +func (a *MarketAPI) MarketGetReserved(ctx context.Context, addr address.Address) (types.BigInt, error) { + return a.FMgr.GetReserved(addr), nil +} + +func (a *MarketAPI) MarketReserveFunds(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) { + return a.FMgr.Reserve(ctx, wallet, addr, amt) +} + +func (a *MarketAPI) MarketReleaseFunds(ctx context.Context, addr address.Address, amt types.BigInt) error { + return a.FMgr.Release(addr, amt) } -func (a *MarketAPI) MarketEnsureAvailable(ctx context.Context, addr, wallet address.Address, amt types.BigInt) (cid.Cid, error) { - return a.FMgr.EnsureAvailable(ctx, addr, wallet, amt) +func (a *MarketAPI) MarketWithdraw(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) { + return a.FMgr.Withdraw(ctx, wallet, addr, amt) } diff --git a/node/impl/net/conngater.go b/node/impl/net/conngater.go new file mode 100644 index 00000000000..07e9784d977 --- /dev/null +++ b/node/impl/net/conngater.go @@ -0,0 +1,136 @@ +package net + +import ( + "context" + "net" + + "golang.org/x/xerrors" + + logging "github.com/ipfs/go-log/v2" + manet "github.com/multiformats/go-multiaddr/net" + + "github.com/filecoin-project/lotus/api" +) + +var cLog = logging.Logger("conngater") + +func (a *NetAPI) NetBlockAdd(ctx context.Context, acl api.NetBlockList) error { + for _, p := range acl.Peers { + err := a.ConnGater.BlockPeer(p) + if err != nil { + return xerrors.Errorf("error blocking peer %s: %w", p, err) + } + + for _, c := range a.Host.Network().ConnsToPeer(p) { + err = c.Close() + if err != nil { + // just log this, don't fail + cLog.Warnf("error closing connection to %s: %s", p, err) + } + } + } + + for _, addr := range acl.IPAddrs { + ip := net.ParseIP(addr) + if ip == nil { + return xerrors.Errorf("error parsing IP address %s", addr) + } + + err := a.ConnGater.BlockAddr(ip) + if err != nil { + return xerrors.Errorf("error blocking IP address %s: %w", addr, err) + } + + for _, c := range a.Host.Network().Conns() { + remote := c.RemoteMultiaddr() + remoteIP, err := manet.ToIP(remote) + if err != nil { + continue + } + + if ip.Equal(remoteIP) { + err = c.Close() + if err != nil { + // just log this, don't fail + cLog.Warnf("error closing connection to %s: %s", remoteIP, err) + } + } + } + } + + for _, subnet := range acl.IPSubnets { + _, cidr, err := net.ParseCIDR(subnet) + if err != nil { + return xerrors.Errorf("error parsing subnet %s: %w", subnet, err) + } + + err = a.ConnGater.BlockSubnet(cidr) + if err != nil { + return xerrors.Errorf("error blocking subunet %s: %w", subnet, err) + } + + for _, c := range a.Host.Network().Conns() { + remote := c.RemoteMultiaddr() + remoteIP, err := manet.ToIP(remote) + if err != nil { + continue + } + + if cidr.Contains(remoteIP) { + err = c.Close() + if err != nil { + // just log this, don't fail + cLog.Warnf("error closing connection to %s: %s", remoteIP, err) + } + } + } + } + + return nil +} + +func (a *NetAPI) NetBlockRemove(ctx context.Context, acl api.NetBlockList) error { + for _, p := range acl.Peers { + err := a.ConnGater.UnblockPeer(p) + if err != nil { + return xerrors.Errorf("error unblocking peer %s: %w", p, err) + } + } + + for _, addr := range acl.IPAddrs { + ip := net.ParseIP(addr) + if ip == nil { + return xerrors.Errorf("error parsing IP address %s", addr) + } + + err := a.ConnGater.UnblockAddr(ip) + if err != nil { + return xerrors.Errorf("error unblocking IP address %s: %w", addr, err) + } + } + + for _, subnet := range acl.IPSubnets { + _, cidr, err := net.ParseCIDR(subnet) + if err != nil { + return xerrors.Errorf("error parsing subnet %s: %w", subnet, err) + } + + err = a.ConnGater.UnblockSubnet(cidr) + if err != nil { + return xerrors.Errorf("error unblocking subunet %s: %w", subnet, err) + } + } + + return nil +} + +func (a *NetAPI) NetBlockList(ctx context.Context) (result api.NetBlockList, err error) { + result.Peers = a.ConnGater.ListBlockedPeers() + for _, ip := range a.ConnGater.ListBlockedAddrs() { + result.IPAddrs = append(result.IPAddrs, ip.String()) + } + for _, subnet := range a.ConnGater.ListBlockedSubnets() { + result.IPSubnets = append(result.IPSubnets, subnet.String()) + } + return +} diff --git a/node/impl/net/net.go b/node/impl/net/net.go new file mode 100644 index 00000000000..a1003ffe5f2 --- /dev/null +++ b/node/impl/net/net.go @@ -0,0 +1,183 @@ +package net + +import ( + "context" + "sort" + "strings" + + "go.uber.org/fx" + + "github.com/libp2p/go-libp2p-core/host" + "github.com/libp2p/go-libp2p-core/metrics" + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-core/protocol" + swarm "github.com/libp2p/go-libp2p-swarm" + basichost "github.com/libp2p/go-libp2p/p2p/host/basic" + "github.com/libp2p/go-libp2p/p2p/net/conngater" + ma "github.com/multiformats/go-multiaddr" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/modules/lp2p" +) + +type NetAPI struct { + fx.In + + RawHost lp2p.RawHost + Host host.Host + Router lp2p.BaseIpfsRouting + ConnGater *conngater.BasicConnectionGater + Reporter metrics.Reporter + Sk *dtypes.ScoreKeeper +} + +func (a *NetAPI) ID(context.Context) (peer.ID, error) { + return a.Host.ID(), nil +} + +func (a *NetAPI) NetConnectedness(ctx context.Context, pid peer.ID) (network.Connectedness, error) { + return a.Host.Network().Connectedness(pid), nil +} + +func (a *NetAPI) NetPubsubScores(context.Context) ([]api.PubsubScore, error) { + scores := a.Sk.Get() + out := make([]api.PubsubScore, len(scores)) + i := 0 + for k, v := range scores { + out[i] = api.PubsubScore{ID: k, Score: v} + i++ + } + + sort.Slice(out, func(i, j int) bool { + return strings.Compare(string(out[i].ID), string(out[j].ID)) > 0 + }) + + return out, nil +} + +func (a *NetAPI) NetPeers(context.Context) ([]peer.AddrInfo, error) { + conns := a.Host.Network().Conns() + out := make([]peer.AddrInfo, len(conns)) + + for i, conn := range conns { + out[i] = peer.AddrInfo{ + ID: conn.RemotePeer(), + Addrs: []ma.Multiaddr{ + conn.RemoteMultiaddr(), + }, + } + } + + return out, nil +} + +func (a *NetAPI) NetPeerInfo(_ context.Context, p peer.ID) (*api.ExtendedPeerInfo, error) { + info := &api.ExtendedPeerInfo{ID: p} + + agent, err := a.Host.Peerstore().Get(p, "AgentVersion") + if err == nil { + info.Agent = agent.(string) + } + + for _, a := range a.Host.Peerstore().Addrs(p) { + info.Addrs = append(info.Addrs, a.String()) + } + sort.Strings(info.Addrs) + + protocols, err := a.Host.Peerstore().GetProtocols(p) + if err == nil { + sort.Strings(protocols) + info.Protocols = protocols + } + + if cm := a.Host.ConnManager().GetTagInfo(p); cm != nil { + info.ConnMgrMeta = &api.ConnMgrInfo{ + FirstSeen: cm.FirstSeen, + Value: cm.Value, + Tags: cm.Tags, + Conns: cm.Conns, + } + } + + return info, nil +} + +func (a *NetAPI) NetConnect(ctx context.Context, p peer.AddrInfo) error { + if swrm, ok := a.Host.Network().(*swarm.Swarm); ok { + swrm.Backoff().Clear(p.ID) + } + + return a.Host.Connect(ctx, p) +} + +func (a *NetAPI) NetAddrsListen(context.Context) (peer.AddrInfo, error) { + return peer.AddrInfo{ + ID: a.Host.ID(), + Addrs: a.Host.Addrs(), + }, nil +} + +func (a *NetAPI) NetDisconnect(ctx context.Context, p peer.ID) error { + return a.Host.Network().ClosePeer(p) +} + +func (a *NetAPI) NetFindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo, error) { + return a.Router.FindPeer(ctx, p) +} + +func (a *NetAPI) NetAutoNatStatus(ctx context.Context) (i api.NatInfo, err error) { + autonat := a.RawHost.(*basichost.BasicHost).GetAutoNat() + + if autonat == nil { + return api.NatInfo{ + Reachability: network.ReachabilityUnknown, + }, nil + } + + var maddr string + if autonat.Status() == network.ReachabilityPublic { + pa, err := autonat.PublicAddr() + if err != nil { + return api.NatInfo{}, err + } + maddr = pa.String() + } + + return api.NatInfo{ + Reachability: autonat.Status(), + PublicAddr: maddr, + }, nil +} + +func (a *NetAPI) NetAgentVersion(ctx context.Context, p peer.ID) (string, error) { + ag, err := a.Host.Peerstore().Get(p, "AgentVersion") + if err != nil { + return "", err + } + + if ag == nil { + return "unknown", nil + } + + return ag.(string), nil +} + +func (a *NetAPI) NetBandwidthStats(ctx context.Context) (metrics.Stats, error) { + return a.Reporter.GetBandwidthTotals(), nil +} + +func (a *NetAPI) NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) { + out := make(map[string]metrics.Stats) + for p, s := range a.Reporter.GetBandwidthByPeer() { + out[p.String()] = s + } + return out, nil +} + +func (a *NetAPI) NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) { + return a.Reporter.GetBandwidthByProtocol(), nil +} + +var _ api.Net = &NetAPI{} diff --git a/node/impl/remoteworker.go b/node/impl/remoteworker.go index b6ef43c7c59..d27b3baff42 100644 --- a/node/impl/remoteworker.go +++ b/node/impl/remoteworker.go @@ -16,7 +16,7 @@ import ( ) type remoteWorker struct { - api.WorkerAPI + api.Worker closer jsonrpc.ClientCloser } @@ -33,11 +33,21 @@ func connectRemoteWorker(ctx context.Context, fa api.Common, url string) (*remot headers := http.Header{} headers.Add("Authorization", "Bearer "+string(token)) - wapi, closer, err := client.NewWorkerRPC(context.TODO(), url, headers) + wapi, closer, err := client.NewWorkerRPCV0(context.TODO(), url, headers) if err != nil { return nil, xerrors.Errorf("creating jsonrpc client: %w", err) } + wver, err := wapi.Version(ctx) + if err != nil { + closer() + return nil, err + } + + if !wver.EqMajorMinor(api.WorkerAPIVersion0) { + return nil, xerrors.Errorf("unsupported worker api version: %s (expected %s)", wver, api.WorkerAPIVersion0) + } + return &remoteWorker{wapi, closer}, nil } diff --git a/node/impl/storminer.go b/node/impl/storminer.go index dee48f5de2a..9db6a3775ce 100644 --- a/node/impl/storminer.go +++ b/node/impl/storminer.go @@ -8,10 +8,16 @@ import ( "strconv" "time" + "github.com/filecoin-project/go-jsonrpc/auth" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/gen" + + "github.com/filecoin-project/lotus/build" "github.com/google/uuid" "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p-core/host" "github.com/libp2p/go-libp2p-core/peer" + "go.uber.org/fx" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" @@ -19,72 +25,89 @@ import ( "github.com/filecoin-project/go-fil-markets/piecestore" retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket" storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" - "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" "github.com/filecoin-project/lotus/extern/sector-storage/stores" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/apistruct" + apitypes "github.com/filecoin-project/lotus/api/types" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/markets/storageadapter" "github.com/filecoin-project/lotus/miner" - "github.com/filecoin-project/lotus/node/impl/common" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/storage" "github.com/filecoin-project/lotus/storage/sectorblocks" + sto "github.com/filecoin-project/specs-storage/storage" ) type StorageMinerAPI struct { - common.CommonAPI - - ProofsConfig *ffiwrapper.Config - SectorBlocks *sectorblocks.SectorBlocks - - PieceStore dtypes.ProviderPieceStore - StorageProvider storagemarket.StorageProvider - RetrievalProvider retrievalmarket.RetrievalProvider - Miner *storage.Miner - BlockMiner *miner.Miner - Full api.FullNode - StorageMgr *sectorstorage.Manager `optional:"true"` - IStorageMgr sectorstorage.SectorManager - *stores.Index - storiface.WorkerReturn - DataTransfer dtypes.ProviderDataTransfer - Host host.Host - - DS dtypes.MetadataDS - - ConsiderOnlineStorageDealsConfigFunc dtypes.ConsiderOnlineStorageDealsConfigFunc - SetConsiderOnlineStorageDealsConfigFunc dtypes.SetConsiderOnlineStorageDealsConfigFunc - ConsiderOnlineRetrievalDealsConfigFunc dtypes.ConsiderOnlineRetrievalDealsConfigFunc - SetConsiderOnlineRetrievalDealsConfigFunc dtypes.SetConsiderOnlineRetrievalDealsConfigFunc - StorageDealPieceCidBlocklistConfigFunc dtypes.StorageDealPieceCidBlocklistConfigFunc - SetStorageDealPieceCidBlocklistConfigFunc dtypes.SetStorageDealPieceCidBlocklistConfigFunc - ConsiderOfflineStorageDealsConfigFunc dtypes.ConsiderOfflineStorageDealsConfigFunc - SetConsiderOfflineStorageDealsConfigFunc dtypes.SetConsiderOfflineStorageDealsConfigFunc - ConsiderOfflineRetrievalDealsConfigFunc dtypes.ConsiderOfflineRetrievalDealsConfigFunc - SetConsiderOfflineRetrievalDealsConfigFunc dtypes.SetConsiderOfflineRetrievalDealsConfigFunc - SetSealingConfigFunc dtypes.SetSealingConfigFunc - GetSealingConfigFunc dtypes.GetSealingConfigFunc - GetExpectedSealDurationFunc dtypes.GetExpectedSealDurationFunc - SetExpectedSealDurationFunc dtypes.SetExpectedSealDurationFunc -} - -func (sm *StorageMinerAPI) ServeRemote(w http.ResponseWriter, r *http.Request) { - if !auth.HasPerm(r.Context(), nil, apistruct.PermAdmin) { - w.WriteHeader(401) - _ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing write permission"}) - return - } - - sm.StorageMgr.ServeHTTP(w, r) + fx.In + + api.Common + api.Net + + Full api.FullNode + LocalStore *stores.Local + RemoteStore *stores.Remote + + // Markets + PieceStore dtypes.ProviderPieceStore `optional:"true"` + StorageProvider storagemarket.StorageProvider `optional:"true"` + RetrievalProvider retrievalmarket.RetrievalProvider `optional:"true"` + DataTransfer dtypes.ProviderDataTransfer `optional:"true"` + DealPublisher *storageadapter.DealPublisher `optional:"true"` + SectorBlocks *sectorblocks.SectorBlocks `optional:"true"` + Host host.Host `optional:"true"` + + // Miner / storage + Miner *storage.Miner `optional:"true"` + BlockMiner *miner.Miner `optional:"true"` + StorageMgr *sectorstorage.Manager `optional:"true"` + IStorageMgr sectorstorage.SectorManager `optional:"true"` + stores.SectorIndex + storiface.WorkerReturn `optional:"true"` + AddrSel *storage.AddressSelector + + Epp gen.WinningPoStProver `optional:"true"` + DS dtypes.MetadataDS + + ConsiderOnlineStorageDealsConfigFunc dtypes.ConsiderOnlineStorageDealsConfigFunc `optional:"true"` + SetConsiderOnlineStorageDealsConfigFunc dtypes.SetConsiderOnlineStorageDealsConfigFunc `optional:"true"` + ConsiderOnlineRetrievalDealsConfigFunc dtypes.ConsiderOnlineRetrievalDealsConfigFunc `optional:"true"` + SetConsiderOnlineRetrievalDealsConfigFunc dtypes.SetConsiderOnlineRetrievalDealsConfigFunc `optional:"true"` + StorageDealPieceCidBlocklistConfigFunc dtypes.StorageDealPieceCidBlocklistConfigFunc `optional:"true"` + SetStorageDealPieceCidBlocklistConfigFunc dtypes.SetStorageDealPieceCidBlocklistConfigFunc `optional:"true"` + ConsiderOfflineStorageDealsConfigFunc dtypes.ConsiderOfflineStorageDealsConfigFunc `optional:"true"` + SetConsiderOfflineStorageDealsConfigFunc dtypes.SetConsiderOfflineStorageDealsConfigFunc `optional:"true"` + ConsiderOfflineRetrievalDealsConfigFunc dtypes.ConsiderOfflineRetrievalDealsConfigFunc `optional:"true"` + SetConsiderOfflineRetrievalDealsConfigFunc dtypes.SetConsiderOfflineRetrievalDealsConfigFunc `optional:"true"` + ConsiderVerifiedStorageDealsConfigFunc dtypes.ConsiderVerifiedStorageDealsConfigFunc `optional:"true"` + SetConsiderVerifiedStorageDealsConfigFunc dtypes.SetConsiderVerifiedStorageDealsConfigFunc `optional:"true"` + ConsiderUnverifiedStorageDealsConfigFunc dtypes.ConsiderUnverifiedStorageDealsConfigFunc `optional:"true"` + SetConsiderUnverifiedStorageDealsConfigFunc dtypes.SetConsiderUnverifiedStorageDealsConfigFunc `optional:"true"` + SetSealingConfigFunc dtypes.SetSealingConfigFunc `optional:"true"` + GetSealingConfigFunc dtypes.GetSealingConfigFunc `optional:"true"` + GetExpectedSealDurationFunc dtypes.GetExpectedSealDurationFunc `optional:"true"` + SetExpectedSealDurationFunc dtypes.SetExpectedSealDurationFunc `optional:"true"` +} + +func (sm *StorageMinerAPI) ServeRemote(perm bool) func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + if perm == true { + if !auth.HasPerm(r.Context(), nil, api.PermAdmin) { + w.WriteHeader(401) + _ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing write permission"}) + return + } + } + + sm.StorageMgr.ServeHTTP(w, r) + } } func (sm *StorageMinerAPI) WorkerStats(context.Context) (map[uuid.UUID]storiface.WorkerStats, error) { @@ -115,65 +138,36 @@ func (sm *StorageMinerAPI) ActorSectorSize(ctx context.Context, addr address.Add return mi.SectorSize, nil } -func (sm *StorageMinerAPI) PledgeSector(ctx context.Context) error { - return sm.Miner.PledgeSector() -} - -func (sm *StorageMinerAPI) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) { - info, err := sm.Miner.GetSectorInfo(sid) +func (sm *StorageMinerAPI) PledgeSector(ctx context.Context) (abi.SectorID, error) { + sr, err := sm.Miner.PledgeSector(ctx) if err != nil { - return api.SectorInfo{}, err + return abi.SectorID{}, err } - deals := make([]abi.DealID, len(info.Pieces)) - for i, piece := range info.Pieces { - if piece.DealInfo == nil { - continue + // wait for the sector to enter the Packing state + // TODO: instead of polling implement some pubsub-type thing in storagefsm + for { + info, err := sm.Miner.SectorsStatus(ctx, sr.ID.Number, false) + if err != nil { + return abi.SectorID{}, xerrors.Errorf("getting pledged sector info: %w", err) } - deals[i] = piece.DealInfo.DealID - } - log := make([]api.SectorLog, len(info.Log)) - for i, l := range info.Log { - log[i] = api.SectorLog{ - Kind: l.Kind, - Timestamp: l.Timestamp, - Trace: l.Trace, - Message: l.Message, + if info.State != api.SectorState(sealing.UndefinedSectorState) { + return sr.ID, nil + } + + select { + case <-time.After(10 * time.Millisecond): + case <-ctx.Done(): + return abi.SectorID{}, ctx.Err() } } +} - sInfo := api.SectorInfo{ - SectorID: sid, - State: api.SectorState(info.State), - CommD: info.CommD, - CommR: info.CommR, - Proof: info.Proof, - Deals: deals, - Ticket: api.SealTicket{ - Value: info.TicketValue, - Epoch: info.TicketEpoch, - }, - Seed: api.SealSeed{ - Value: info.SeedValue, - Epoch: info.SeedEpoch, - }, - PreCommitMsg: info.PreCommitMessage, - CommitMsg: info.CommitMessage, - Retries: info.InvalidProofs, - ToUpgrade: sm.Miner.IsMarkedForUpgrade(sid), - - LastErr: info.LastErr, - Log: log, - // on chain info - SealProof: 0, - Activation: 0, - Expiration: 0, - DealWeight: big.Zero(), - VerifiedDealWeight: big.Zero(), - InitialPledge: big.Zero(), - OnTime: 0, - Early: 0, +func (sm *StorageMinerAPI) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) { + sInfo, err := sm.Miner.SectorsStatus(ctx, sid, false) + if err != nil { + return api.SectorInfo{}, err } if !showOnChainInfo { @@ -204,6 +198,14 @@ func (sm *StorageMinerAPI) SectorsStatus(ctx context.Context, sid abi.SectorNumb return sInfo, nil } +func (sm *StorageMinerAPI) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r sto.Data, d api.PieceDealInfo) (api.SectorOffset, error) { + return sm.Miner.SectorAddPieceToAny(ctx, size, r, d) +} + +func (sm *StorageMinerAPI) SectorsUnsealPiece(ctx context.Context, sector sto.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd *cid.Cid) error { + return sm.StorageMgr.SectorsUnsealPiece(ctx, sector, offset, size, randomness, commd) +} + // List all staged sectors func (sm *StorageMinerAPI) SectorsList(context.Context) ([]abi.SectorNumber, error) { sectors, err := sm.Miner.ListSectors() @@ -211,15 +213,72 @@ func (sm *StorageMinerAPI) SectorsList(context.Context) ([]abi.SectorNumber, err return nil, err } - out := make([]abi.SectorNumber, len(sectors)) - for i, sector := range sectors { - out[i] = sector.SectorNumber + out := make([]abi.SectorNumber, 0, len(sectors)) + for _, sector := range sectors { + if sector.State == sealing.UndefinedSectorState { + continue // sector ID not set yet + } + + out = append(out, sector.SectorNumber) + } + return out, nil +} + +func (sm *StorageMinerAPI) SectorsListInStates(ctx context.Context, states []api.SectorState) ([]abi.SectorNumber, error) { + filterStates := make(map[sealing.SectorState]struct{}) + for _, state := range states { + st := sealing.SectorState(state) + if _, ok := sealing.ExistSectorStateList[st]; !ok { + continue + } + filterStates[st] = struct{}{} + } + + var sns []abi.SectorNumber + if len(filterStates) == 0 { + return sns, nil + } + + sectors, err := sm.Miner.ListSectors() + if err != nil { + return nil, err + } + + for i := range sectors { + if _, ok := filterStates[sectors[i].State]; ok { + sns = append(sns, sectors[i].SectorNumber) + } + } + return sns, nil +} + +func (sm *StorageMinerAPI) SectorsSummary(ctx context.Context) (map[api.SectorState]int, error) { + sectors, err := sm.Miner.ListSectors() + if err != nil { + return nil, err } + + out := make(map[api.SectorState]int) + for i := range sectors { + state := api.SectorState(sectors[i].State) + out[state]++ + } + return out, nil } func (sm *StorageMinerAPI) StorageLocal(ctx context.Context) (map[stores.ID]string, error) { - return sm.StorageMgr.StorageLocal(ctx) + l, err := sm.LocalStore.Local(ctx) + if err != nil { + return nil, err + } + + out := map[stores.ID]string{} + for _, st := range l { + out[st.ID] = st.LocalPath + } + + return out, nil } func (sm *StorageMinerAPI) SectorsRefs(context.Context) (map[string][]api.SealedRef, error) { @@ -239,7 +298,7 @@ func (sm *StorageMinerAPI) SectorsRefs(context.Context) (map[string][]api.Sealed } func (sm *StorageMinerAPI) StorageStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) { - return sm.StorageMgr.FsStat(ctx, id) + return sm.RemoteStore.FsStat(ctx, id) } func (sm *StorageMinerAPI) SectorStartSealing(ctx context.Context, number abi.SectorNumber) error { @@ -281,10 +340,38 @@ func (sm *StorageMinerAPI) SectorRemove(ctx context.Context, id abi.SectorNumber return sm.Miner.RemoveSector(ctx, id) } +func (sm *StorageMinerAPI) SectorTerminate(ctx context.Context, id abi.SectorNumber) error { + return sm.Miner.TerminateSector(ctx, id) +} + +func (sm *StorageMinerAPI) SectorTerminateFlush(ctx context.Context) (*cid.Cid, error) { + return sm.Miner.TerminateFlush(ctx) +} + +func (sm *StorageMinerAPI) SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) { + return sm.Miner.TerminatePending(ctx) +} + +func (sm *StorageMinerAPI) SectorPreCommitFlush(ctx context.Context) ([]sealiface.PreCommitBatchRes, error) { + return sm.Miner.SectorPreCommitFlush(ctx) +} + +func (sm *StorageMinerAPI) SectorPreCommitPending(ctx context.Context) ([]abi.SectorID, error) { + return sm.Miner.SectorPreCommitPending(ctx) +} + func (sm *StorageMinerAPI) SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error { return sm.Miner.MarkForUpgrade(id) } +func (sm *StorageMinerAPI) SectorCommitFlush(ctx context.Context) ([]sealiface.CommitBatchRes, error) { + return sm.Miner.CommitFlush(ctx) +} + +func (sm *StorageMinerAPI) SectorCommitPending(ctx context.Context) ([]abi.SectorID, error) { + return sm.Miner.CommitPending(ctx) +} + func (sm *StorageMinerAPI) WorkerConnect(ctx context.Context, url string) error { w, err := connectRemoteWorker(ctx, sm, url) if err != nil { @@ -296,8 +383,12 @@ func (sm *StorageMinerAPI) WorkerConnect(ctx context.Context, url string) error return sm.StorageMgr.AddWorker(ctx, w) } -func (sm *StorageMinerAPI) SealingSchedDiag(ctx context.Context) (interface{}, error) { - return sm.StorageMgr.SchedDiag(ctx) +func (sm *StorageMinerAPI) SealingSchedDiag(ctx context.Context, doSched bool) (interface{}, error) { + return sm.StorageMgr.SchedDiag(ctx, doSched) +} + +func (sm *StorageMinerAPI) SealingAbort(ctx context.Context, call storiface.CallID) error { + return sm.StorageMgr.Abort(ctx, call) } func (sm *StorageMinerAPI) MarketImportDealData(ctx context.Context, propCid cid.Cid, path string) error { @@ -341,6 +432,11 @@ func (sm *StorageMinerAPI) MarketListRetrievalDeals(ctx context.Context) ([]retr deals := sm.RetrievalProvider.ListDeals() for _, deal := range deals { + if deal.ChannelID != nil { + if deal.ChannelID.Initiator == "" || deal.ChannelID.Responder == "" { + deal.ChannelID = nil // don't try to push unparsable peer IDs over jsonrpc + } + } out = append(out, deal) } @@ -438,6 +534,15 @@ func (sm *StorageMinerAPI) MarketDataTransferUpdates(ctx context.Context) (<-cha return channels, nil } +func (sm *StorageMinerAPI) MarketPendingDeals(ctx context.Context) (api.PendingDealInfo, error) { + return sm.DealPublisher.PendingDeals(), nil +} + +func (sm *StorageMinerAPI) MarketPublishPendingDeals(ctx context.Context) error { + sm.DealPublisher.ForcePublishPendingDeals() + return nil +} + func (sm *StorageMinerAPI) DealsList(ctx context.Context) ([]api.MarketDeal, error) { return sm.listDeals(ctx) } @@ -478,6 +583,22 @@ func (sm *StorageMinerAPI) DealsSetConsiderOfflineRetrievalDeals(ctx context.Con return sm.SetConsiderOfflineRetrievalDealsConfigFunc(b) } +func (sm *StorageMinerAPI) DealsConsiderVerifiedStorageDeals(ctx context.Context) (bool, error) { + return sm.ConsiderVerifiedStorageDealsConfigFunc() +} + +func (sm *StorageMinerAPI) DealsSetConsiderVerifiedStorageDeals(ctx context.Context, b bool) error { + return sm.SetConsiderVerifiedStorageDealsConfigFunc(b) +} + +func (sm *StorageMinerAPI) DealsConsiderUnverifiedStorageDeals(ctx context.Context) (bool, error) { + return sm.ConsiderUnverifiedStorageDealsConfigFunc() +} + +func (sm *StorageMinerAPI) DealsSetConsiderUnverifiedStorageDeals(ctx context.Context, b bool) error { + return sm.SetConsiderUnverifiedStorageDealsConfigFunc(b) +} + func (sm *StorageMinerAPI) DealsGetExpectedSealDurationFunc(ctx context.Context) (time.Duration, error) { return sm.GetExpectedSealDurationFunc() } @@ -541,4 +662,45 @@ func (sm *StorageMinerAPI) CreateBackup(ctx context.Context, fpath string) error return backup(sm.DS, fpath) } +func (sm *StorageMinerAPI) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []sto.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) { + var rg storiface.RGetter + if expensive { + rg = func(ctx context.Context, id abi.SectorID) (cid.Cid, error) { + si, err := sm.Miner.SectorsStatus(ctx, id.Number, false) + if err != nil { + return cid.Undef, err + } + if si.CommR == nil { + return cid.Undef, xerrors.Errorf("commr is nil") + } + + return *si.CommR, nil + } + } + + bad, err := sm.StorageMgr.CheckProvable(ctx, pp, sectors, rg) + if err != nil { + return nil, err + } + + var out = make(map[abi.SectorNumber]string) + for sid, err := range bad { + out[sid.Number] = err + } + + return out, nil +} + +func (sm *StorageMinerAPI) ActorAddressConfig(ctx context.Context) (api.AddressConfig, error) { + return sm.AddrSel.AddressConfig, nil +} + +func (sm *StorageMinerAPI) Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) { + return build.OpenRPCDiscoverJSON_Miner(), nil +} + +func (sm *StorageMinerAPI) ComputeProof(ctx context.Context, ssi []builtin.SectorInfo, rand abi.PoStRandomness) ([]builtin.PoStProof, error) { + return sm.Epp.ComputeProof(ctx, ssi, rand) +} + var _ api.StorageMiner = &StorageMinerAPI{} diff --git a/node/modules/blockstore.go b/node/modules/blockstore.go new file mode 100644 index 00000000000..2588e3f98b1 --- /dev/null +++ b/node/modules/blockstore.go @@ -0,0 +1,144 @@ +package modules + +import ( + "context" + "io" + "os" + "path/filepath" + + bstore "github.com/ipfs/go-ipfs-blockstore" + "go.uber.org/fx" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/blockstore" + badgerbs "github.com/filecoin-project/lotus/blockstore/badger" + "github.com/filecoin-project/lotus/blockstore/splitstore" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/modules/helpers" + "github.com/filecoin-project/lotus/node/repo" +) + +// UniversalBlockstore returns a single universal blockstore that stores both +// chain data and state data. It can be backed by a blockstore directly +// (e.g. Badger), or by a Splitstore. +func UniversalBlockstore(lc fx.Lifecycle, mctx helpers.MetricsCtx, r repo.LockedRepo) (dtypes.UniversalBlockstore, error) { + bs, err := r.Blockstore(helpers.LifecycleCtx(mctx, lc), repo.UniversalBlockstore) + if err != nil { + return nil, err + } + if c, ok := bs.(io.Closer); ok { + lc.Append(fx.Hook{ + OnStop: func(_ context.Context) error { + return c.Close() + }, + }) + } + return bs, err +} + +func DiscardColdBlockstore(lc fx.Lifecycle, bs dtypes.UniversalBlockstore) (dtypes.ColdBlockstore, error) { + return blockstore.NewDiscardStore(bs), nil +} + +func BadgerHotBlockstore(lc fx.Lifecycle, r repo.LockedRepo) (dtypes.HotBlockstore, error) { + path, err := r.SplitstorePath() + if err != nil { + return nil, err + } + + path = filepath.Join(path, "hot.badger") + if err := os.MkdirAll(path, 0755); err != nil { + return nil, err + } + + opts, err := repo.BadgerBlockstoreOptions(repo.HotBlockstore, path, r.Readonly()) + if err != nil { + return nil, err + } + + bs, err := badgerbs.Open(opts) + if err != nil { + return nil, err + } + + lc.Append(fx.Hook{ + OnStop: func(_ context.Context) error { + return bs.Close() + }}) + + return bs, nil +} + +func SplitBlockstore(cfg *config.Chainstore) func(lc fx.Lifecycle, r repo.LockedRepo, ds dtypes.MetadataDS, cold dtypes.ColdBlockstore, hot dtypes.HotBlockstore) (dtypes.SplitBlockstore, error) { + return func(lc fx.Lifecycle, r repo.LockedRepo, ds dtypes.MetadataDS, cold dtypes.ColdBlockstore, hot dtypes.HotBlockstore) (dtypes.SplitBlockstore, error) { + path, err := r.SplitstorePath() + if err != nil { + return nil, err + } + + cfg := &splitstore.Config{ + MarkSetType: cfg.Splitstore.MarkSetType, + DiscardColdBlocks: cfg.Splitstore.ColdStoreType == "discard", + HotStoreMessageRetention: cfg.Splitstore.HotStoreMessageRetention, + } + ss, err := splitstore.Open(path, ds, hot, cold, cfg) + if err != nil { + return nil, err + } + lc.Append(fx.Hook{ + OnStop: func(context.Context) error { + return ss.Close() + }, + }) + + return ss, err + } +} + +func SplitBlockstoreGCReferenceProtector(_ fx.Lifecycle, s dtypes.SplitBlockstore) dtypes.GCReferenceProtector { + return s.(dtypes.GCReferenceProtector) +} + +func NoopGCReferenceProtector(_ fx.Lifecycle) dtypes.GCReferenceProtector { + return dtypes.NoopGCReferenceProtector{} +} + +func ExposedSplitBlockstore(_ fx.Lifecycle, s dtypes.SplitBlockstore) dtypes.ExposedBlockstore { + return s.(*splitstore.SplitStore).Expose() +} + +func StateFlatBlockstore(_ fx.Lifecycle, _ helpers.MetricsCtx, bs dtypes.UniversalBlockstore) (dtypes.BasicStateBlockstore, error) { + return bs, nil +} + +func StateSplitBlockstore(_ fx.Lifecycle, _ helpers.MetricsCtx, bs dtypes.SplitBlockstore) (dtypes.BasicStateBlockstore, error) { + return bs, nil +} + +func ChainFlatBlockstore(_ fx.Lifecycle, _ helpers.MetricsCtx, bs dtypes.UniversalBlockstore) (dtypes.ChainBlockstore, error) { + return bs, nil +} + +func ChainSplitBlockstore(_ fx.Lifecycle, _ helpers.MetricsCtx, bs dtypes.SplitBlockstore) (dtypes.ChainBlockstore, error) { + return bs, nil +} + +func FallbackChainBlockstore(cbs dtypes.BasicChainBlockstore) dtypes.ChainBlockstore { + return &blockstore.FallbackStore{Blockstore: cbs} +} + +func FallbackStateBlockstore(sbs dtypes.BasicStateBlockstore) dtypes.StateBlockstore { + return &blockstore.FallbackStore{Blockstore: sbs} +} + +func InitFallbackBlockstores(cbs dtypes.ChainBlockstore, sbs dtypes.StateBlockstore, rem dtypes.ChainBitswap) error { + for _, bs := range []bstore.Blockstore{cbs, sbs} { + if fbs, ok := bs.(*blockstore.FallbackStore); ok { + fbs.SetFallback(rem.GetBlock) + continue + } + return xerrors.Errorf("expected a FallbackStore") + } + return nil +} diff --git a/node/modules/chain.go b/node/modules/chain.go index d1414b307f2..c4017b8c0bf 100644 --- a/node/modules/chain.go +++ b/node/modules/chain.go @@ -1,25 +1,19 @@ package modules import ( - "bytes" "context" - "os" "time" "github.com/ipfs/go-bitswap" "github.com/ipfs/go-bitswap/network" "github.com/ipfs/go-blockservice" - "github.com/ipfs/go-datastore" - "github.com/ipld/go-car" "github.com/libp2p/go-libp2p-core/host" "github.com/libp2p/go-libp2p-core/routing" - pubsub "github.com/libp2p/go-libp2p-pubsub" "go.uber.org/fx" "golang.org/x/xerrors" - "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" - "github.com/filecoin-project/lotus/journal" - + "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/blockstore/splitstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain" "github.com/filecoin-project/lotus/chain/beacon" @@ -28,17 +22,15 @@ import ( "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/lib/blockstore" - "github.com/filecoin-project/lotus/lib/bufbstore" - "github.com/filecoin-project/lotus/lib/timedbs" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/lotus/journal" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/helpers" - "github.com/filecoin-project/lotus/node/repo" ) -func ChainBitswap(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, rt routing.Routing, bs dtypes.ChainGCBlockstore) dtypes.ChainBitswap { +// ChainBitswap uses a blockstore that bypasses all caches. +func ChainBitswap(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, rt routing.Routing, bs dtypes.ExposedBlockstore) dtypes.ChainBitswap { // prefix protocol for chain bitswap // (so bitswap uses /chain/ipfs/bitswap/1.0.0 internally for chain sync stuff) bitswapNetwork := network.NewFromIpfsHost(host, rt, network.Prefix("/chain")) @@ -46,10 +38,10 @@ func ChainBitswap(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, rt r // Write all incoming bitswap blocks into a temporary blockstore for two // block times. If they validate, they'll be persisted later. - cache := timedbs.NewTimedCacheBS(2 * time.Duration(build.BlockDelaySecs) * time.Second) + cache := blockstore.NewTimedCacheBlockstore(2 * time.Duration(build.BlockDelaySecs) * time.Second) lc.Append(fx.Hook{OnStop: cache.Stop, OnStart: cache.Start}) - bitswapBs := bufbstore.NewTieredBstore(bs, cache) + bitswapBs := blockstore.NewTieredBstore(bs, cache) // Use just exch.Close(), closing the context is not needed exch := bitswap.New(mctx, bitswapNetwork, bitswapBs, bitswapOptions...) @@ -62,8 +54,11 @@ func ChainBitswap(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, rt r return exch } -func MessagePool(lc fx.Lifecycle, sm *stmgr.StateManager, ps *pubsub.PubSub, ds dtypes.MetadataDS, nn dtypes.NetworkName, j journal.Journal) (*messagepool.MessagePool, error) { - mpp := messagepool.NewProvider(sm, ps) +func ChainBlockService(bs dtypes.ExposedBlockstore, rem dtypes.ChainBitswap) dtypes.ChainBlockService { + return blockservice.New(bs, rem) +} + +func MessagePool(lc fx.Lifecycle, mpp messagepool.Provider, ds dtypes.MetadataDS, nn dtypes.NetworkName, j journal.Journal, protector dtypes.GCReferenceProtector) (*messagepool.MessagePool, error) { mp, err := messagepool.New(mpp, ds, nn, j) if err != nil { return nil, xerrors.Errorf("constructing mpool: %w", err) @@ -73,99 +68,36 @@ func MessagePool(lc fx.Lifecycle, sm *stmgr.StateManager, ps *pubsub.PubSub, ds return mp.Close() }, }) + protector.AddProtector(mp.ForEachPendingMessage) return mp, nil } -func ChainBlockstore(lc fx.Lifecycle, mctx helpers.MetricsCtx, r repo.LockedRepo) (dtypes.ChainBlockstore, error) { - blocks, err := r.Datastore("/chain") - if err != nil { - return nil, err - } - - bs := blockstore.NewBlockstore(blocks) - cbs, err := blockstore.CachedBlockstore(helpers.LifecycleCtx(mctx, lc), bs, blockstore.DefaultCacheOpts()) - if err != nil { - return nil, err - } - - return cbs, nil -} - -func ChainGCBlockstore(bs dtypes.ChainBlockstore, gcl dtypes.ChainGCLocker) dtypes.ChainGCBlockstore { - return blockstore.NewGCBlockstore(bs, gcl) -} - -func ChainBlockService(bs dtypes.ChainBlockstore, rem dtypes.ChainBitswap) dtypes.ChainBlockService { - return blockservice.New(bs, rem) -} - -func ChainStore(lc fx.Lifecycle, bs dtypes.ChainBlockstore, ds dtypes.MetadataDS, syscalls vm.SyscallBuilder, j journal.Journal) *store.ChainStore { - chain := store.NewChainStore(bs, ds, syscalls, j) +func ChainStore(lc fx.Lifecycle, cbs dtypes.ChainBlockstore, sbs dtypes.StateBlockstore, ds dtypes.MetadataDS, basebs dtypes.BaseBlockstore, syscalls vm.SyscallBuilder, j journal.Journal) *store.ChainStore { + chain := store.NewChainStore(cbs, sbs, ds, syscalls, j) if err := chain.Load(); err != nil { log.Warnf("loading chain state from disk: %s", err) } - return chain -} - -func ErrorGenesis() Genesis { - return func() (header *types.BlockHeader, e error) { - return nil, xerrors.New("No genesis block provided, provide the file with 'lotus daemon --genesis=[genesis file]'") - } -} - -func LoadGenesis(genBytes []byte) func(dtypes.ChainBlockstore) Genesis { - return func(bs dtypes.ChainBlockstore) Genesis { - return func() (header *types.BlockHeader, e error) { - c, err := car.LoadCar(bs, bytes.NewReader(genBytes)) - if err != nil { - return nil, xerrors.Errorf("loading genesis car file failed: %w", err) - } - if len(c.Roots) != 1 { - return nil, xerrors.New("expected genesis file to have one root") - } - root, err := bs.Get(c.Roots[0]) - if err != nil { - return nil, err - } - - h, err := types.DecodeBlock(root.RawData()) - if err != nil { - return nil, xerrors.Errorf("decoding block failed: %w", err) - } - return h, nil - } - } -} - -func DoSetGenesis(_ dtypes.AfterGenesisSet) {} - -func SetGenesis(cs *store.ChainStore, g Genesis) (dtypes.AfterGenesisSet, error) { - genFromRepo, err := cs.GetGenesis() - if err == nil { - if os.Getenv("LOTUS_SKIP_GENESIS_CHECK") != "_yes_" { - expectedGenesis, err := g() + var startHook func(context.Context) error + if ss, ok := basebs.(*splitstore.SplitStore); ok { + startHook = func(_ context.Context) error { + err := ss.Start(chain) if err != nil { - return dtypes.AfterGenesisSet{}, xerrors.Errorf("getting expected genesis failed: %w", err) - } - - if genFromRepo.Cid() != expectedGenesis.Cid() { - return dtypes.AfterGenesisSet{}, xerrors.Errorf("genesis in the repo is not the one expected by this version of Lotus!") + err = xerrors.Errorf("error starting splitstore: %w", err) } + return err } - return dtypes.AfterGenesisSet{}, nil // already set, noop - } - if err != datastore.ErrNotFound { - return dtypes.AfterGenesisSet{}, xerrors.Errorf("getting genesis block failed: %w", err) } - genesis, err := g() - if err != nil { - return dtypes.AfterGenesisSet{}, xerrors.Errorf("genesis func failed: %w", err) - } + lc.Append(fx.Hook{ + OnStart: startHook, + OnStop: func(_ context.Context) error { + return chain.Close() + }, + }) - return dtypes.AfterGenesisSet{}, cs.SetGenesis(genesis) + return chain } func NetworkName(mctx helpers.MetricsCtx, lc fx.Lifecycle, cs *store.ChainStore, us stmgr.UpgradeSchedule, _ dtypes.AfterGenesisSet) (dtypes.NetworkName, error) { diff --git a/node/modules/client.go b/node/modules/client.go index f1380bc97d8..e0bcc13c768 100644 --- a/node/modules/client.go +++ b/node/modules/client.go @@ -1,14 +1,16 @@ package modules import ( + "bytes" "context" + "os" + "path/filepath" "time" - "github.com/filecoin-project/go-multistore" - "golang.org/x/xerrors" - "go.uber.org/fx" + "golang.org/x/xerrors" + "github.com/filecoin-project/go-data-transfer/channelmonitor" dtimpl "github.com/filecoin-project/go-data-transfer/impl" dtnet "github.com/filecoin-project/go-data-transfer/network" dtgstransport "github.com/filecoin-project/go-data-transfer/transport/graphsync" @@ -19,31 +21,68 @@ import ( rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" "github.com/filecoin-project/go-fil-markets/storagemarket" storageimpl "github.com/filecoin-project/go-fil-markets/storagemarket/impl" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/funds" "github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation" smnet "github.com/filecoin-project/go-fil-markets/storagemarket/network" - "github.com/filecoin-project/go-storedcounter" + "github.com/filecoin-project/go-multistore" + "github.com/filecoin-project/go-state-types/abi" "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/namespace" "github.com/libp2p/go-libp2p-core/host" + "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/market" "github.com/filecoin-project/lotus/journal" - "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/markets" marketevents "github.com/filecoin-project/lotus/markets/loggers" "github.com/filecoin-project/lotus/markets/retrievaladapter" "github.com/filecoin-project/lotus/node/impl/full" payapi "github.com/filecoin-project/lotus/node/impl/paych" "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/modules/helpers" "github.com/filecoin-project/lotus/node/repo" "github.com/filecoin-project/lotus/node/repo/importmgr" "github.com/filecoin-project/lotus/node/repo/retrievalstoremgr" ) -func ClientMultiDatastore(lc fx.Lifecycle, r repo.LockedRepo) (dtypes.ClientMultiDstore, error) { - ds, err := r.Datastore("/client") +func HandleMigrateClientFunds(lc fx.Lifecycle, ds dtypes.MetadataDS, wallet full.WalletAPI, fundMgr *market.FundManager) { + lc.Append(fx.Hook{ + OnStart: func(ctx context.Context) error { + addr, err := wallet.WalletDefaultAddress(ctx) + // nothing to be done if there is no default address + if err != nil { + return nil + } + b, err := ds.Get(datastore.NewKey("/marketfunds/client")) + if err != nil { + if xerrors.Is(err, datastore.ErrNotFound) { + return nil + } + log.Errorf("client funds migration - getting datastore value: %v", err) + return nil + } + + var value abi.TokenAmount + if err = value.UnmarshalCBOR(bytes.NewReader(b)); err != nil { + log.Errorf("client funds migration - unmarshalling datastore value: %v", err) + return nil + } + _, err = fundMgr.Reserve(ctx, addr, addr, value) + if err != nil { + log.Errorf("client funds migration - reserving funds (wallet %s, addr %s, funds %d): %v", + addr, addr, value, err) + return nil + } + + return ds.Delete(datastore.NewKey("/marketfunds/client")) + }, + }) +} + +func ClientMultiDatastore(lc fx.Lifecycle, mctx helpers.MetricsCtx, r repo.LockedRepo) (dtypes.ClientMultiDstore, error) { + ctx := helpers.LifecycleCtx(mctx, lc) + ds, err := r.Datastore(ctx, "/client") if err != nil { - return nil, xerrors.Errorf("getting datastore out of reop: %w", err) + return nil, xerrors.Errorf("getting datastore out of repo: %w", err) } mds, err := multistore.NewMultiDstore(ds) @@ -80,13 +119,38 @@ func RegisterClientValidator(crv dtypes.ClientRequestValidator, dtm dtypes.Clien // NewClientGraphsyncDataTransfer returns a data transfer manager that just // uses the clients's Client DAG service for transfers -func NewClientGraphsyncDataTransfer(lc fx.Lifecycle, h host.Host, gs dtypes.Graphsync, ds dtypes.MetadataDS) (dtypes.ClientDataTransfer, error) { - sc := storedcounter.New(ds, datastore.NewKey("/datatransfer/client/counter")) - net := dtnet.NewFromLibp2pHost(h) +func NewClientGraphsyncDataTransfer(lc fx.Lifecycle, h host.Host, gs dtypes.Graphsync, ds dtypes.MetadataDS, r repo.LockedRepo) (dtypes.ClientDataTransfer, error) { + // go-data-transfer protocol retries: + // 1s, 5s, 25s, 2m5s, 5m x 11 ~= 1 hour + dtRetryParams := dtnet.RetryParameters(time.Second, 5*time.Minute, 15, 5) + net := dtnet.NewFromLibp2pHost(h, dtRetryParams) dtDs := namespace.Wrap(ds, datastore.NewKey("/datatransfer/client/transfers")) transport := dtgstransport.NewTransport(h.ID(), gs) - dt, err := dtimpl.NewDataTransfer(dtDs, net, transport, sc) + err := os.MkdirAll(filepath.Join(r.Path(), "data-transfer"), 0755) //nolint: gosec + if err != nil && !os.IsExist(err) { + return nil, err + } + + // data-transfer push / pull channel restart configuration: + dtRestartConfig := dtimpl.ChannelRestartConfig(channelmonitor.Config{ + // Disable Accept and Complete timeouts until this issue is resolved: + // https://github.com/filecoin-project/lotus/issues/6343# + // Wait for the other side to respond to an Open channel message + AcceptTimeout: 0, + // Wait for the other side to send a Complete message once all + // data has been sent / received + CompleteTimeout: 0, + + // When an error occurs, wait a little while until all related errors + // have fired before sending a restart message + RestartDebounce: 10 * time.Second, + // After sending a restart, wait for at least 1 minute before sending another + RestartBackoff: time.Minute, + // After trying to restart 3 times, give up and fail the transfer + MaxConsecutiveRestarts: 3, + }) + dt, err := dtimpl.NewDataTransfer(dtDs, filepath.Join(r.Path(), "data-transfer"), net, transport, dtRestartConfig) if err != nil { return nil, err } @@ -94,6 +158,7 @@ func NewClientGraphsyncDataTransfer(lc fx.Lifecycle, h host.Host, gs dtypes.Grap dt.OnReady(marketevents.ReadyLogger("client data transfer")) lc.Append(fx.Hook{ OnStart: func(ctx context.Context) error { + dt.SubscribeToEvents(marketevents.DataTransferLogger) return dt.Start(ctx) }, OnStop: func(ctx context.Context) error { @@ -108,15 +173,13 @@ func NewClientDatastore(ds dtypes.MetadataDS) dtypes.ClientDatastore { return namespace.Wrap(ds, datastore.NewKey("/deals/client")) } -type ClientDealFunds funds.DealFunds - -func NewClientDealFunds(ds dtypes.MetadataDS) (ClientDealFunds, error) { - return funds.NewDealFunds(ds, datastore.NewKey("/marketfunds/client")) -} +func StorageClient(lc fx.Lifecycle, h host.Host, ibs dtypes.ClientBlockstore, mds dtypes.ClientMultiDstore, r repo.LockedRepo, dataTransfer dtypes.ClientDataTransfer, discovery *discoveryimpl.Local, deals dtypes.ClientDatastore, scn storagemarket.StorageClientNode, j journal.Journal) (storagemarket.StorageClient, error) { + // go-fil-markets protocol retries: + // 1s, 5s, 25s, 2m5s, 5m x 11 ~= 1 hour + marketsRetryParams := smnet.RetryParameters(time.Second, 5*time.Minute, 15, 5) + net := smnet.NewFromLibp2pHost(h, marketsRetryParams) -func StorageClient(lc fx.Lifecycle, h host.Host, ibs dtypes.ClientBlockstore, mds dtypes.ClientMultiDstore, r repo.LockedRepo, dataTransfer dtypes.ClientDataTransfer, discovery *discoveryimpl.Local, deals dtypes.ClientDatastore, scn storagemarket.StorageClientNode, dealFunds ClientDealFunds, j journal.Journal) (storagemarket.StorageClient, error) { - net := smnet.NewFromLibp2pHost(h) - c, err := storageimpl.NewClient(net, ibs, mds, dataTransfer, discovery, deals, scn, dealFunds, storageimpl.DealPollingInterval(time.Second)) + c, err := storageimpl.NewClient(net, ibs, mds, dataTransfer, discovery, deals, scn, storageimpl.DealPollingInterval(time.Second)) if err != nil { return nil, err } @@ -141,8 +204,7 @@ func StorageClient(lc fx.Lifecycle, h host.Host, ibs dtypes.ClientBlockstore, md func RetrievalClient(lc fx.Lifecycle, h host.Host, mds dtypes.ClientMultiDstore, dt dtypes.ClientDataTransfer, payAPI payapi.PaychAPI, resolver discovery.PeerResolver, ds dtypes.MetadataDS, chainAPI full.ChainAPI, stateAPI full.StateAPI, j journal.Journal) (retrievalmarket.RetrievalClient, error) { adapter := retrievaladapter.NewRetrievalClientNode(payAPI, chainAPI, stateAPI) network := rmnet.NewFromLibp2pHost(h) - sc := storedcounter.New(ds, datastore.NewKey("/retr")) - client, err := retrievalimpl.NewClient(network, mds, dt, adapter, resolver, namespace.Wrap(ds, datastore.NewKey("/retrievals/client")), sc) + client, err := retrievalimpl.NewClient(network, mds, dt, adapter, resolver, namespace.Wrap(ds, datastore.NewKey("/retrievals/client"))) if err != nil { return nil, err } diff --git a/node/modules/core.go b/node/modules/core.go index a695d865147..e089333e71c 100644 --- a/node/modules/core.go +++ b/node/modules/core.go @@ -6,25 +6,48 @@ import ( "errors" "io" "io/ioutil" + "os" + "path/filepath" + "time" "github.com/gbrlsnchs/jwt/v3" logging "github.com/ipfs/go-log/v2" "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peerstore" record "github.com/libp2p/go-libp2p-record" + "github.com/raulk/go-watchdog" + "go.uber.org/fx" "golang.org/x/xerrors" "github.com/filecoin-project/go-jsonrpc/auth" + "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/api/apistruct" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/lib/addrutil" + "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/repo" + "github.com/filecoin-project/lotus/system" ) -var log = logging.Logger("modules") +const ( + // EnvWatchdogDisabled is an escape hatch to disable the watchdog explicitly + // in case an OS/kernel appears to report incorrect information. The + // watchdog will be disabled if the value of this env variable is 1. + EnvWatchdogDisabled = "LOTUS_DISABLE_WATCHDOG" +) + +const ( + JWTSecretName = "auth-jwt-private" //nolint:gosec + KTJwtHmacSecret = "jwt-hmac-secret" //nolint:gosec +) + +var ( + log = logging.Logger("modules") + logWatchdog = logging.Logger("watchdog") +) type Genesis func() (*types.BlockHeader, error) @@ -35,8 +58,84 @@ func RecordValidator(ps peerstore.Peerstore) record.Validator { } } -const JWTSecretName = "auth-jwt-private" //nolint:gosec -const KTJwtHmacSecret = "jwt-hmac-secret" //nolint:gosec +// MemoryConstraints returns the memory constraints configured for this system. +func MemoryConstraints() system.MemoryConstraints { + constraints := system.GetMemoryConstraints() + log.Infow("memory limits initialized", + "max_mem_heap", constraints.MaxHeapMem, + "total_system_mem", constraints.TotalSystemMem, + "effective_mem_limit", constraints.EffectiveMemLimit) + return constraints +} + +// MemoryWatchdog starts the memory watchdog, applying the computed resource +// constraints. +func MemoryWatchdog(lr repo.LockedRepo, lc fx.Lifecycle, constraints system.MemoryConstraints) { + if os.Getenv(EnvWatchdogDisabled) == "1" { + log.Infof("memory watchdog is disabled via %s", EnvWatchdogDisabled) + return + } + + // configure heap profile capture so that one is captured per episode where + // utilization climbs over 90% of the limit. A maximum of 10 heapdumps + // will be captured during life of this process. + watchdog.HeapProfileDir = filepath.Join(lr.Path(), "heapprof") + watchdog.HeapProfileMaxCaptures = 10 + watchdog.HeapProfileThreshold = 0.9 + watchdog.Logger = logWatchdog + + policy := watchdog.NewWatermarkPolicy(0.50, 0.60, 0.70, 0.85, 0.90, 0.925, 0.95) + + // Try to initialize a watchdog in the following order of precedence: + // 1. If a max heap limit has been provided, initialize a heap-driven watchdog. + // 2. Else, try to initialize a cgroup-driven watchdog. + // 3. Else, try to initialize a system-driven watchdog. + // 4. Else, log a warning that the system is flying solo, and return. + + addStopHook := func(stopFn func()) { + lc.Append(fx.Hook{ + OnStop: func(ctx context.Context) error { + stopFn() + return nil + }, + }) + } + + // 1. If user has set max heap limit, apply it. + if maxHeap := constraints.MaxHeapMem; maxHeap != 0 { + const minGOGC = 10 + err, stopFn := watchdog.HeapDriven(maxHeap, minGOGC, policy) + if err == nil { + log.Infof("initialized heap-driven watchdog; max heap: %d bytes", maxHeap) + addStopHook(stopFn) + return + } + log.Warnf("failed to initialize heap-driven watchdog; err: %s", err) + log.Warnf("trying a cgroup-driven watchdog") + } + + // 2. cgroup-driven watchdog. + err, stopFn := watchdog.CgroupDriven(5*time.Second, policy) + if err == nil { + log.Infof("initialized cgroup-driven watchdog") + addStopHook(stopFn) + return + } + log.Warnf("failed to initialize cgroup-driven watchdog; err: %s", err) + log.Warnf("trying a system-driven watchdog") + + // 3. system-driven watchdog. + err, stopFn = watchdog.SystemDriven(0, 5*time.Second, policy) // 0 calculates the limit automatically. + if err == nil { + log.Infof("initialized system-driven watchdog") + addStopHook(stopFn) + return + } + + // 4. log the failure + log.Warnf("failed to initialize system-driven watchdog; err: %s", err) + log.Warnf("system running without a memory watchdog") +} type JwtPayload struct { Allow []auth.Permission @@ -64,7 +163,7 @@ func APISecret(keystore types.KeyStore, lr repo.LockedRepo) (*dtypes.APIAlg, err // TODO: make this configurable p := JwtPayload{ - Allow: apistruct.AllPermissions, + Allow: api.AllPermissions, } cliToken, err := jwt.Sign(&p, jwt.NewHS256(key.PrivateKey)) @@ -94,14 +193,39 @@ func BuiltinBootstrap() (dtypes.BootstrapPeers, error) { func DrandBootstrap(ds dtypes.DrandSchedule) (dtypes.DrandBootstrap, error) { // TODO: retry resolving, don't fail if at least one resolve succeeds - res := []peer.AddrInfo{} + var res []peer.AddrInfo for _, d := range ds { addrs, err := addrutil.ParseAddresses(context.TODO(), d.Config.Relays) if err != nil { log.Errorf("reoslving drand relays addresses: %+v", err) - return res, nil + continue } res = append(res, addrs...) } return res, nil } + +func NewDefaultMaxFeeFunc(r repo.LockedRepo) dtypes.DefaultMaxFeeFunc { + return func() (out abi.TokenAmount, err error) { + err = readNodeCfg(r, func(cfg *config.FullNode) { + out = abi.TokenAmount(cfg.Fees.DefaultMaxFee) + }) + return + } +} + +func readNodeCfg(r repo.LockedRepo, accessor func(node *config.FullNode)) error { + raw, err := r.Config() + if err != nil { + return err + } + + cfg, ok := raw.(*config.FullNode) + if !ok { + return xerrors.New("expected config.FullNode") + } + + accessor(cfg) + + return nil +} diff --git a/node/modules/dtypes/miner.go b/node/modules/dtypes/miner.go index 1ef157b7ed2..9a391223dc9 100644 --- a/node/modules/dtypes/miner.go +++ b/node/modules/dtypes/miner.go @@ -58,10 +58,28 @@ type ConsiderOfflineRetrievalDealsConfigFunc func() (bool, error) // disable or enable retrieval deal acceptance. type SetConsiderOfflineRetrievalDealsConfigFunc func(bool) error -// SetSealingDelay sets how long a sector waits for more deals before sealing begins. +// ConsiderVerifiedStorageDealsConfigFunc is a function which reads from miner +// config to determine if the user has disabled verified storage deals (or not). +type ConsiderVerifiedStorageDealsConfigFunc func() (bool, error) + +// SetConsiderVerifiedStorageDealsConfigFunc is a function which is used to +// disable or enable verified storage deal acceptance. +type SetConsiderVerifiedStorageDealsConfigFunc func(bool) error + +// ConsiderUnverifiedStorageDealsConfigFunc is a function which reads from miner +// config to determine if the user has disabled unverified storage deals (or not). +type ConsiderUnverifiedStorageDealsConfigFunc func() (bool, error) + +// SetConsiderUnverifiedStorageDealsConfigFunc is a function which is used to +// disable or enable unverified storage deal acceptance. +type SetConsiderUnverifiedStorageDealsConfigFunc func(bool) error + +// SetSealingConfigFunc is a function which is used to +// sets the sealing config. type SetSealingConfigFunc func(sealiface.Config) error -// GetSealingDelay returns how long a sector waits for more deals before sealing begins. +// GetSealingConfigFunc is a function which is used to +// get the sealing config. type GetSealingConfigFunc func() (sealiface.Config, error) // SetExpectedSealDurationFunc is a function which is used to set how long sealing is expected to take. @@ -72,5 +90,10 @@ type SetExpectedSealDurationFunc func(time.Duration) error // too determine how long sealing is expected to take type GetExpectedSealDurationFunc func() (time.Duration, error) +type SetMaxDealStartDelayFunc func(time.Duration) error +type GetMaxDealStartDelayFunc func() (time.Duration, error) + type StorageDealFilter func(ctx context.Context, deal storagemarket.MinerDeal) (bool, string, error) type RetrievalDealFilter func(ctx context.Context, deal retrievalmarket.ProviderDealState) (bool, string, error) + +type RetrievalPricingFunc func(ctx context.Context, dealPricingParams retrievalmarket.PricingInput) (retrievalmarket.Ask, error) diff --git a/node/modules/dtypes/mpool.go b/node/modules/dtypes/mpool.go index 1c64449f881..df96b8d0ebd 100644 --- a/node/modules/dtypes/mpool.go +++ b/node/modules/dtypes/mpool.go @@ -5,6 +5,7 @@ import ( "sync" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" ) type MpoolLocker struct { @@ -33,3 +34,5 @@ func (ml *MpoolLocker) TakeLock(ctx context.Context, a address.Address) (func(), <-lk }, nil } + +type DefaultMaxFeeFunc func() (abi.TokenAmount, error) diff --git a/node/modules/dtypes/protector.go b/node/modules/dtypes/protector.go new file mode 100644 index 00000000000..0d9625fc1cd --- /dev/null +++ b/node/modules/dtypes/protector.go @@ -0,0 +1,13 @@ +package dtypes + +import ( + cid "github.com/ipfs/go-cid" +) + +type GCReferenceProtector interface { + AddProtector(func(func(cid.Cid) error) error) +} + +type NoopGCReferenceProtector struct{} + +func (p NoopGCReferenceProtector) AddProtector(func(func(cid.Cid) error) error) {} diff --git a/node/modules/dtypes/storage.go b/node/modules/dtypes/storage.go index 13defda8dec..b4420f701f8 100644 --- a/node/modules/dtypes/storage.go +++ b/node/modules/dtypes/storage.go @@ -14,25 +14,66 @@ import ( "github.com/filecoin-project/go-fil-markets/piecestore" "github.com/filecoin-project/go-statestore" - "github.com/filecoin-project/lotus/lib/blockstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/node/repo/importmgr" "github.com/filecoin-project/lotus/node/repo/retrievalstoremgr" ) -// MetadataDS stores metadata -// dy default it's namespaced under /metadata in main repo datastore +// MetadataDS stores metadata. By default it's namespaced under /metadata in +// main repo datastore. type MetadataDS datastore.Batching -type ChainBlockstore blockstore.Blockstore +type ( + // UniversalBlockstore is the universal blockstore backend. + UniversalBlockstore blockstore.Blockstore + + // ColdBlockstore is the Cold blockstore abstraction for the splitstore + ColdBlockstore blockstore.Blockstore + + // HotBlockstore is the Hot blockstore abstraction for the splitstore + HotBlockstore blockstore.Blockstore + + // SplitBlockstore is the hot/cold blockstore that sits on top of the ColdBlockstore. + SplitBlockstore blockstore.Blockstore + + // BaseBlockstore is something, coz DI + BaseBlockstore blockstore.Blockstore + + // BasicChainBlockstore is like ChainBlockstore, but without the optional + // network fallback support + BasicChainBlockstore blockstore.Blockstore + + // ChainBlockstore is a blockstore to store chain data (tipsets, blocks, + // messages). It is physically backed by the BareMonolithBlockstore, but it + // has a cache on top that is specially tuned for chain data access + // patterns. + ChainBlockstore blockstore.Blockstore + + // BasicStateBlockstore is like StateBlockstore, but without the optional + // network fallback support + BasicStateBlockstore blockstore.Blockstore + + // StateBlockstore is a blockstore to store state data (state tree). It is + // physically backed by the BareMonolithBlockstore, but it has a cache on + // top that is specially tuned for state data access patterns. + StateBlockstore blockstore.Blockstore + + // ExposedBlockstore is a blockstore that interfaces directly with the + // network or with users, from which queries are served, and where incoming + // data is deposited. For security reasons, this store is disconnected from + // any internal caches. If blocks are added to this store in a way that + // could render caches dirty (e.g. a block is added when an existence cache + // holds a 'false' for that block), the process should signal so by calling + // blockstore.AllCaches.Dirty(cid). + ExposedBlockstore blockstore.Blockstore +) -type ChainGCLocker blockstore.GCLocker -type ChainGCBlockstore blockstore.GCBlockstore type ChainBitswap exchange.Interface type ChainBlockService bserv.BlockService type ClientMultiDstore *multistore.MultiStore type ClientImportMgr *importmgr.Mgr -type ClientBlockstore blockstore.Blockstore +type ClientBlockstore blockstore.BasicBlockstore type ClientDealStore *statestore.StateStore type ClientRequestValidator *requestvalidation.UnifiedRequestValidator type ClientDatastore datastore.Batching @@ -45,12 +86,13 @@ type ClientDataTransfer datatransfer.Manager type ProviderDealStore *statestore.StateStore type ProviderPieceStore piecestore.PieceStore + type ProviderRequestValidator *requestvalidation.UnifiedRequestValidator // ProviderDataTransfer is a data transfer manager for the provider type ProviderDataTransfer datatransfer.Manager type StagingDAG format.DAGService -type StagingBlockstore blockstore.Blockstore +type StagingBlockstore blockstore.BasicBlockstore type StagingGraphsync graphsync.GraphExchange type StagingMultiDstore *multistore.MultiStore diff --git a/node/modules/genesis.go b/node/modules/genesis.go new file mode 100644 index 00000000000..43443b125a8 --- /dev/null +++ b/node/modules/genesis.go @@ -0,0 +1,73 @@ +package modules + +import ( + "bytes" + "os" + + "github.com/ipfs/go-datastore" + "github.com/ipld/go-car" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/node/modules/dtypes" +) + +func ErrorGenesis() Genesis { + return func() (header *types.BlockHeader, e error) { + return nil, xerrors.New("No genesis block provided, provide the file with 'lotus daemon --genesis=[genesis file]'") + } +} + +func LoadGenesis(genBytes []byte) func(dtypes.ChainBlockstore) Genesis { + return func(bs dtypes.ChainBlockstore) Genesis { + return func() (header *types.BlockHeader, e error) { + c, err := car.LoadCar(bs, bytes.NewReader(genBytes)) + if err != nil { + return nil, xerrors.Errorf("loading genesis car file failed: %w", err) + } + if len(c.Roots) != 1 { + return nil, xerrors.New("expected genesis file to have one root") + } + root, err := bs.Get(c.Roots[0]) + if err != nil { + return nil, err + } + + h, err := types.DecodeBlock(root.RawData()) + if err != nil { + return nil, xerrors.Errorf("decoding block failed: %w", err) + } + return h, nil + } + } +} + +func DoSetGenesis(_ dtypes.AfterGenesisSet) {} + +func SetGenesis(cs *store.ChainStore, g Genesis) (dtypes.AfterGenesisSet, error) { + genFromRepo, err := cs.GetGenesis() + if err == nil { + if os.Getenv("LOTUS_SKIP_GENESIS_CHECK") != "_yes_" { + expectedGenesis, err := g() + if err != nil { + return dtypes.AfterGenesisSet{}, xerrors.Errorf("getting expected genesis failed: %w", err) + } + + if genFromRepo.Cid() != expectedGenesis.Cid() { + return dtypes.AfterGenesisSet{}, xerrors.Errorf("genesis in the repo is not the one expected by this version of Lotus!") + } + } + return dtypes.AfterGenesisSet{}, nil // already set, noop + } + if err != datastore.ErrNotFound { + return dtypes.AfterGenesisSet{}, xerrors.Errorf("getting genesis block failed: %w", err) + } + + genesis, err := g() + if err != nil { + return dtypes.AfterGenesisSet{}, xerrors.Errorf("genesis func failed: %w", err) + } + + return dtypes.AfterGenesisSet{}, cs.SetGenesis(genesis) +} diff --git a/node/modules/graphsync.go b/node/modules/graphsync.go index 9bdc9bcca1c..a7f62db76ce 100644 --- a/node/modules/graphsync.go +++ b/node/modules/graphsync.go @@ -3,6 +3,7 @@ package modules import ( "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/helpers" + "github.com/filecoin-project/lotus/node/repo" "github.com/ipfs/go-graphsync" graphsyncimpl "github.com/ipfs/go-graphsync/impl" gsnet "github.com/ipfs/go-graphsync/network" @@ -13,31 +14,34 @@ import ( ) // Graphsync creates a graphsync instance from the given loader and storer -func Graphsync(mctx helpers.MetricsCtx, lc fx.Lifecycle, clientBs dtypes.ClientBlockstore, chainBs dtypes.ChainBlockstore, h host.Host) (dtypes.Graphsync, error) { - graphsyncNetwork := gsnet.NewFromLibp2pHost(h) - loader := storeutil.LoaderForBlockstore(clientBs) - storer := storeutil.StorerForBlockstore(clientBs) - gs := graphsyncimpl.New(helpers.LifecycleCtx(mctx, lc), graphsyncNetwork, loader, storer, graphsyncimpl.RejectAllRequestsByDefault()) - chainLoader := storeutil.LoaderForBlockstore(chainBs) - chainStorer := storeutil.StorerForBlockstore(chainBs) - err := gs.RegisterPersistenceOption("chainstore", chainLoader, chainStorer) - if err != nil { - return nil, err - } - gs.RegisterIncomingRequestHook(func(p peer.ID, requestData graphsync.RequestData, hookActions graphsync.IncomingRequestHookActions) { - _, has := requestData.Extension("chainsync") - if has { - // TODO: we should confirm the selector is a reasonable one before we validate - // TODO: this code will get more complicated and should probably not live here eventually - hookActions.ValidateRequest() - hookActions.UsePersistenceOption("chainstore") - } - }) - gs.RegisterOutgoingRequestHook(func(p peer.ID, requestData graphsync.RequestData, hookActions graphsync.OutgoingRequestHookActions) { - _, has := requestData.Extension("chainsync") - if has { - hookActions.UsePersistenceOption("chainstore") +func Graphsync(parallelTransfers uint64) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, r repo.LockedRepo, clientBs dtypes.ClientBlockstore, chainBs dtypes.ExposedBlockstore, h host.Host) (dtypes.Graphsync, error) { + return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, r repo.LockedRepo, clientBs dtypes.ClientBlockstore, chainBs dtypes.ExposedBlockstore, h host.Host) (dtypes.Graphsync, error) { + graphsyncNetwork := gsnet.NewFromLibp2pHost(h) + loader := storeutil.LoaderForBlockstore(clientBs) + storer := storeutil.StorerForBlockstore(clientBs) + + gs := graphsyncimpl.New(helpers.LifecycleCtx(mctx, lc), graphsyncNetwork, loader, storer, graphsyncimpl.RejectAllRequestsByDefault(), graphsyncimpl.MaxInProgressRequests(parallelTransfers)) + chainLoader := storeutil.LoaderForBlockstore(chainBs) + chainStorer := storeutil.StorerForBlockstore(chainBs) + err := gs.RegisterPersistenceOption("chainstore", chainLoader, chainStorer) + if err != nil { + return nil, err } - }) - return gs, nil + gs.RegisterIncomingRequestHook(func(p peer.ID, requestData graphsync.RequestData, hookActions graphsync.IncomingRequestHookActions) { + _, has := requestData.Extension("chainsync") + if has { + // TODO: we should confirm the selector is a reasonable one before we validate + // TODO: this code will get more complicated and should probably not live here eventually + hookActions.ValidateRequest() + hookActions.UsePersistenceOption("chainstore") + } + }) + gs.RegisterOutgoingRequestHook(func(p peer.ID, requestData graphsync.RequestData, hookActions graphsync.OutgoingRequestHookActions) { + _, has := requestData.Extension("chainsync") + if has { + hookActions.UsePersistenceOption("chainstore") + } + }) + return gs, nil + } } diff --git a/node/modules/ipfsclient.go b/node/modules/ipfsclient.go index 24e595fdbed..24c5c96783e 100644 --- a/node/modules/ipfsclient.go +++ b/node/modules/ipfsclient.go @@ -6,8 +6,7 @@ import ( "github.com/multiformats/go-multiaddr" - "github.com/filecoin-project/lotus/lib/blockstore" - "github.com/filecoin-project/lotus/lib/ipfsbstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/helpers" ) @@ -16,19 +15,19 @@ import ( // If ipfsMaddr is empty, a local IPFS node is assumed considering IPFS_PATH configuration. // If ipfsMaddr is not empty, it will connect to the remote IPFS node with the provided multiaddress. // The flag useForRetrieval indicates if the IPFS node will also be used for storing retrieving deals. -func IpfsClientBlockstore(ipfsMaddr string) func(helpers.MetricsCtx, fx.Lifecycle, dtypes.ClientImportMgr) (dtypes.ClientBlockstore, error) { +func IpfsClientBlockstore(ipfsMaddr string, onlineMode bool) func(helpers.MetricsCtx, fx.Lifecycle, dtypes.ClientImportMgr) (dtypes.ClientBlockstore, error) { return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, localStore dtypes.ClientImportMgr) (dtypes.ClientBlockstore, error) { var err error - var ipfsbs blockstore.Blockstore + var ipfsbs blockstore.BasicBlockstore if ipfsMaddr != "" { var ma multiaddr.Multiaddr ma, err = multiaddr.NewMultiaddr(ipfsMaddr) if err != nil { return nil, xerrors.Errorf("parsing ipfs multiaddr: %w", err) } - ipfsbs, err = ipfsbstore.NewRemoteIpfsBstore(helpers.LifecycleCtx(mctx, lc), ma) + ipfsbs, err = blockstore.NewRemoteIPFSBlockstore(helpers.LifecycleCtx(mctx, lc), ma, onlineMode) } else { - ipfsbs, err = ipfsbstore.NewIpfsBstore(helpers.LifecycleCtx(mctx, lc)) + ipfsbs, err = blockstore.NewLocalIPFSBlockstore(helpers.LifecycleCtx(mctx, lc), onlineMode) } if err != nil { return nil, xerrors.Errorf("constructing ipfs blockstore: %w", err) diff --git a/node/modules/lp2p/conngater.go b/node/modules/lp2p/conngater.go new file mode 100644 index 00000000000..29087a1bc28 --- /dev/null +++ b/node/modules/lp2p/conngater.go @@ -0,0 +1,17 @@ +package lp2p + +import ( + "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p/p2p/net/conngater" + + "github.com/filecoin-project/lotus/node/modules/dtypes" +) + +func ConnGater(ds dtypes.MetadataDS) (*conngater.BasicConnectionGater, error) { + return conngater.NewBasicConnectionGater(ds) +} + +func ConnGaterOption(cg *conngater.BasicConnectionGater) (opts Libp2pOpts, err error) { + opts.Opts = append(opts.Opts, libp2p.ConnectionGater(cg)) + return +} diff --git a/node/modules/lp2p/pubsub.go b/node/modules/lp2p/pubsub.go index 9724eb3b4cf..32b85daf347 100644 --- a/node/modules/lp2p/pubsub.go +++ b/node/modules/lp2p/pubsub.go @@ -3,6 +3,7 @@ package lp2p import ( "context" "encoding/json" + "net" "time" host "github.com/libp2p/go-libp2p-core/host" @@ -35,6 +36,15 @@ func init() { pubsub.GossipSubHistoryLength = 10 pubsub.GossipSubGossipFactor = 0.1 } + +const ( + GossipScoreThreshold = -500 + PublishScoreThreshold = -1000 + GraylistScoreThreshold = -2500 + AcceptPXScoreThreshold = 1000 + OpportunisticGraftScoreThreshold = 3.5 +) + func ScoreKeeper() *dtypes.ScoreKeeper { return new(dtypes.ScoreKeeper) } @@ -198,6 +208,16 @@ func GossipSub(in GossipIn) (service *pubsub.PubSub, err error) { drandTopics = append(drandTopics, topic) } + // IP colocation whitelist + var ipcoloWhitelist []*net.IPNet + for _, cidr := range in.Cfg.IPColocationWhitelist { + _, ipnet, err := net.ParseCIDR(cidr) + if err != nil { + return nil, xerrors.Errorf("error parsing IPColocation subnet %s: %w", cidr, err) + } + ipcoloWhitelist = append(ipcoloWhitelist, ipnet) + } + options := []pubsub.Option{ // Gossipsubv1.1 configuration pubsub.WithFloodPublish(true), @@ -228,8 +248,7 @@ func GossipSub(in GossipIn) (service *pubsub.PubSub, err error) { // This sets the IP colocation threshold to 5 peers before we apply penalties IPColocationFactorThreshold: 5, IPColocationFactorWeight: -100, - // TODO we want to whitelist IPv6 /64s that belong to datacenters etc - // IPColocationFactorWhitelist: map[string]struct{}{}, + IPColocationFactorWhitelist: ipcoloWhitelist, // P7: behavioural penalties, decay after 1hr BehaviourPenaltyThreshold: 6, @@ -246,11 +265,11 @@ func GossipSub(in GossipIn) (service *pubsub.PubSub, err error) { Topics: topicParams, }, &pubsub.PeerScoreThresholds{ - GossipThreshold: -500, - PublishThreshold: -1000, - GraylistThreshold: -2500, - AcceptPXThreshold: 1000, - OpportunisticGraftThreshold: 3.5, + GossipThreshold: GossipScoreThreshold, + PublishThreshold: PublishScoreThreshold, + GraylistThreshold: GraylistScoreThreshold, + AcceptPXThreshold: AcceptPXScoreThreshold, + OpportunisticGraftThreshold: OpportunisticGraftScoreThreshold, }, ), pubsub.WithPeerScoreInspect(in.Sk.Update, 10*time.Second), diff --git a/node/modules/mpoolnonceapi.go b/node/modules/mpoolnonceapi.go index 294f4d95478..67f512960ec 100644 --- a/node/modules/mpoolnonceapi.go +++ b/node/modules/mpoolnonceapi.go @@ -2,8 +2,10 @@ package modules import ( "context" + "strings" "go.uber.org/fx" + "golang.org/x/xerrors" "github.com/filecoin-project/lotus/node/impl/full" @@ -18,16 +20,89 @@ import ( type MpoolNonceAPI struct { fx.In - StateAPI full.StateAPI + ChainModule full.ChainModuleAPI + StateModule full.StateModuleAPI } -// GetNonce gets the nonce from actor state -func (a *MpoolNonceAPI) GetNonce(addr address.Address) (uint64, error) { - act, err := a.StateAPI.StateGetActor(context.Background(), addr, types.EmptyTSK) +// GetNonce gets the nonce from current chain head. +func (a *MpoolNonceAPI) GetNonce(ctx context.Context, addr address.Address, tsk types.TipSetKey) (uint64, error) { + var err error + var ts *types.TipSet + if tsk == types.EmptyTSK { + // we need consistent tsk + ts, err = a.ChainModule.ChainHead(ctx) + if err != nil { + return 0, xerrors.Errorf("getting head: %w", err) + } + tsk = ts.Key() + } else { + ts, err = a.ChainModule.ChainGetTipSet(ctx, tsk) + if err != nil { + return 0, xerrors.Errorf("getting tipset: %w", err) + } + } + + keyAddr := addr + + if addr.Protocol() == address.ID { + // make sure we have a key address so we can compare with messages + keyAddr, err = a.StateModule.StateAccountKey(ctx, addr, tsk) + if err != nil { + return 0, xerrors.Errorf("getting account key: %w", err) + } + } else { + addr, err = a.StateModule.StateLookupID(ctx, addr, types.EmptyTSK) + if err != nil { + log.Infof("failed to look up id addr for %s: %w", addr, err) + addr = address.Undef + } + } + + // Load the last nonce from the state, if it exists. + highestNonce := uint64(0) + act, err := a.StateModule.StateGetActor(ctx, keyAddr, ts.Key()) + if err != nil { + if strings.Contains(err.Error(), types.ErrActorNotFound.Error()) { + return 0, xerrors.Errorf("getting actor converted: %w", types.ErrActorNotFound) + } + return 0, xerrors.Errorf("getting actor: %w", err) + } + highestNonce = act.Nonce + + apply := func(msg *types.Message) { + if msg.From != addr && msg.From != keyAddr { + return + } + if msg.Nonce == highestNonce { + highestNonce = msg.Nonce + 1 + } + } + + for _, b := range ts.Blocks() { + msgs, err := a.ChainModule.ChainGetBlockMessages(ctx, b.Cid()) + if err != nil { + return 0, xerrors.Errorf("getting block messages: %w", err) + } + if keyAddr.Protocol() == address.BLS { + for _, m := range msgs.BlsMessages { + apply(m) + } + } else { + for _, sm := range msgs.SecpkMessages { + apply(&sm.Message) + } + } + } + return highestNonce, nil +} + +func (a *MpoolNonceAPI) GetActor(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*types.Actor, error) { + act, err := a.StateModule.StateGetActor(ctx, addr, tsk) if err != nil { - return 0, err + return nil, xerrors.Errorf("calling StateGetActor: %w", err) } - return act.Nonce, nil + + return act, nil } var _ messagesigner.MpoolNonceAPI = (*MpoolNonceAPI)(nil) diff --git a/node/modules/paych.go b/node/modules/paych.go new file mode 100644 index 00000000000..905590057f9 --- /dev/null +++ b/node/modules/paych.go @@ -0,0 +1,47 @@ +package modules + +import ( + "context" + + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/node/impl/full" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/modules/helpers" + "github.com/filecoin-project/lotus/paychmgr" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + "go.uber.org/fx" +) + +func NewManager(mctx helpers.MetricsCtx, lc fx.Lifecycle, sm stmgr.StateManagerAPI, pchstore *paychmgr.Store, api paychmgr.PaychAPI) *paychmgr.Manager { + ctx := helpers.LifecycleCtx(mctx, lc) + ctx, shutdown := context.WithCancel(ctx) + + return paychmgr.NewManager(ctx, shutdown, sm, pchstore, api) +} + +func NewPaychStore(ds dtypes.MetadataDS) *paychmgr.Store { + ds = namespace.Wrap(ds, datastore.NewKey("/paych/")) + return paychmgr.NewStore(ds) +} + +type PaychAPI struct { + fx.In + + full.MpoolAPI + full.StateAPI +} + +var _ paychmgr.PaychAPI = &PaychAPI{} + +// HandlePaychManager is called by dependency injection to set up hooks +func HandlePaychManager(lc fx.Lifecycle, pm *paychmgr.Manager) { + lc.Append(fx.Hook{ + OnStart: func(ctx context.Context) error { + return pm.Start() + }, + OnStop: func(context.Context) error { + return pm.Stop() + }, + }) +} diff --git a/node/modules/services.go b/node/modules/services.go index e0a7c2edab3..011b8916313 100644 --- a/node/modules/services.go +++ b/node/modules/services.go @@ -2,6 +2,9 @@ package modules import ( "context" + "os" + "strconv" + "time" "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/namespace" @@ -25,6 +28,7 @@ import ( "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/sub" + "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/journal" "github.com/filecoin-project/lotus/lib/peermgr" marketevents "github.com/filecoin-project/lotus/markets/loggers" @@ -34,6 +38,19 @@ import ( "github.com/filecoin-project/lotus/node/repo" ) +var pubsubMsgsSyncEpochs = 10 + +func init() { + if s := os.Getenv("LOTUS_MSGS_SYNC_EPOCHS"); s != "" { + val, err := strconv.Atoi(s) + if err != nil { + log.Errorf("failed to parse LOTUS_MSGS_SYNC_EPOCHS: %s", err) + return + } + pubsubMsgsSyncEpochs = val + } +} + func RunHello(mctx helpers.MetricsCtx, lc fx.Lifecycle, h host.Host, svc *hello.Service) error { h.SetStreamHandler(hello.ProtocolID, svc.HandleStream) @@ -82,14 +99,45 @@ func RunChainExchange(h host.Host, svc exchange.Server) { h.SetStreamHandler(exchange.ChainExchangeProtocolID, svc.HandleStream) // new } -func HandleIncomingBlocks(mctx helpers.MetricsCtx, lc fx.Lifecycle, ps *pubsub.PubSub, s *chain.Syncer, bserv dtypes.ChainBlockService, chain *store.ChainStore, stmgr *stmgr.StateManager, h host.Host, nn dtypes.NetworkName) { - ctx := helpers.LifecycleCtx(mctx, lc) +func waitForSync(stmgr *stmgr.StateManager, epochs int, subscribe func()) { + nearsync := time.Duration(epochs*int(build.BlockDelaySecs)) * time.Second - blocksub, err := ps.Subscribe(build.BlocksTopic(nn)) //nolint - if err != nil { - panic(err) + // early check, are we synced at start up? + ts := stmgr.ChainStore().GetHeaviestTipSet() + timestamp := ts.MinTimestamp() + timestampTime := time.Unix(int64(timestamp), 0) + if build.Clock.Since(timestampTime) < nearsync { + subscribe() + return } + // we are not synced, subscribe to head changes and wait for sync + stmgr.ChainStore().SubscribeHeadChanges(func(rev, app []*types.TipSet) error { + if len(app) == 0 { + return nil + } + + latest := app[0].MinTimestamp() + for _, ts := range app[1:] { + timestamp := ts.MinTimestamp() + if timestamp > latest { + latest = timestamp + } + } + + latestTime := time.Unix(int64(latest), 0) + if build.Clock.Since(latestTime) < nearsync { + subscribe() + return store.ErrNotifeeDone + } + + return nil + }) +} + +func HandleIncomingBlocks(mctx helpers.MetricsCtx, lc fx.Lifecycle, ps *pubsub.PubSub, s *chain.Syncer, bserv dtypes.ChainBlockService, chain *store.ChainStore, stmgr *stmgr.StateManager, h host.Host, nn dtypes.NetworkName) { + ctx := helpers.LifecycleCtx(mctx, lc) + v := sub.NewBlockValidator( h.ID(), chain, stmgr, func(p peer.ID) { @@ -101,24 +149,43 @@ func HandleIncomingBlocks(mctx helpers.MetricsCtx, lc fx.Lifecycle, ps *pubsub.P panic(err) } - go sub.HandleIncomingBlocks(ctx, blocksub, s, bserv, h.ConnManager()) -} - -func HandleIncomingMessages(mctx helpers.MetricsCtx, lc fx.Lifecycle, ps *pubsub.PubSub, mpool *messagepool.MessagePool, h host.Host, nn dtypes.NetworkName) { - ctx := helpers.LifecycleCtx(mctx, lc) + log.Infof("subscribing to pubsub topic %s", build.BlocksTopic(nn)) - msgsub, err := ps.Subscribe(build.MessagesTopic(nn)) //nolint:staticcheck + blocksub, err := ps.Subscribe(build.BlocksTopic(nn)) //nolint if err != nil { panic(err) } + go sub.HandleIncomingBlocks(ctx, blocksub, s, bserv, h.ConnManager()) +} + +func HandleIncomingMessages(mctx helpers.MetricsCtx, lc fx.Lifecycle, ps *pubsub.PubSub, stmgr *stmgr.StateManager, mpool *messagepool.MessagePool, h host.Host, nn dtypes.NetworkName, bootstrapper dtypes.Bootstrapper) { + ctx := helpers.LifecycleCtx(mctx, lc) + v := sub.NewMessageValidator(h.ID(), mpool) if err := ps.RegisterTopicValidator(build.MessagesTopic(nn), v.Validate); err != nil { panic(err) } - go sub.HandleIncomingMessages(ctx, mpool, msgsub) + subscribe := func() { + log.Infof("subscribing to pubsub topic %s", build.MessagesTopic(nn)) + + msgsub, err := ps.Subscribe(build.MessagesTopic(nn)) //nolint + if err != nil { + panic(err) + } + + go sub.HandleIncomingMessages(ctx, mpool, msgsub) + } + + if bootstrapper { + subscribe() + return + } + + // wait until we are synced within 10 epochs -- env var can override + waitForSync(stmgr, pubsubMsgsSyncEpochs, subscribe) } func NewLocalDiscovery(lc fx.Lifecycle, ds dtypes.MetadataDS) (*discoveryimpl.Local, error) { diff --git a/node/modules/stmgr.go b/node/modules/stmgr.go new file mode 100644 index 00000000000..9d3917b856f --- /dev/null +++ b/node/modules/stmgr.go @@ -0,0 +1,20 @@ +package modules + +import ( + "go.uber.org/fx" + + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/store" +) + +func StateManager(lc fx.Lifecycle, cs *store.ChainStore, us stmgr.UpgradeSchedule) (*stmgr.StateManager, error) { + sm, err := stmgr.NewStateManagerWithUpgradeSchedule(cs, us) + if err != nil { + return nil, err + } + lc.Append(fx.Hook{ + OnStart: sm.Start, + OnStop: sm.Stop, + }) + return sm, nil +} diff --git a/node/modules/storage.go b/node/modules/storage.go index 9c1a18368ce..cb30eb8c29d 100644 --- a/node/modules/storage.go +++ b/node/modules/storage.go @@ -2,12 +2,15 @@ package modules import ( "context" + "path/filepath" "go.uber.org/fx" + "golang.org/x/xerrors" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/lib/backupds" "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/modules/helpers" "github.com/filecoin-project/lotus/node/repo" ) @@ -27,11 +30,30 @@ func KeyStore(lr repo.LockedRepo) (types.KeyStore, error) { return lr.KeyStore() } -func Datastore(r repo.LockedRepo) (dtypes.MetadataDS, error) { - mds, err := r.Datastore("/metadata") - if err != nil { - return nil, err - } +func Datastore(disableLog bool) func(lc fx.Lifecycle, mctx helpers.MetricsCtx, r repo.LockedRepo) (dtypes.MetadataDS, error) { + return func(lc fx.Lifecycle, mctx helpers.MetricsCtx, r repo.LockedRepo) (dtypes.MetadataDS, error) { + ctx := helpers.LifecycleCtx(mctx, lc) + mds, err := r.Datastore(ctx, "/metadata") + if err != nil { + return nil, err + } + + var logdir string + if !disableLog { + logdir = filepath.Join(r.Path(), "kvlog/metadata") + } + + bds, err := backupds.Wrap(mds, logdir) + if err != nil { + return nil, xerrors.Errorf("opening backupds: %w", err) + } - return backupds.Wrap(mds), nil + lc.Append(fx.Hook{ + OnStop: func(_ context.Context) error { + return bds.CloseLog() + }, + }) + + return bds, nil + } } diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go index e9f5db00826..3a3914e0c85 100644 --- a/node/modules/storageminer.go +++ b/node/modules/storageminer.go @@ -1,12 +1,17 @@ package modules import ( + "bytes" "context" "errors" "fmt" "net/http" + "os" + "path/filepath" + "strings" "time" + "github.com/filecoin-project/lotus/markets/pricing" "go.uber.org/fx" "go.uber.org/multierr" "golang.org/x/xerrors" @@ -36,34 +41,34 @@ import ( "github.com/filecoin-project/go-fil-markets/shared" "github.com/filecoin-project/go-fil-markets/storagemarket" storageimpl "github.com/filecoin-project/go-fil-markets/storagemarket/impl" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/funds" "github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask" smnet "github.com/filecoin-project/go-fil-markets/storagemarket/network" "github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/go-multistore" - paramfetch "github.com/filecoin-project/go-paramfetch" + "github.com/filecoin-project/go-paramfetch" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-statestore" "github.com/filecoin-project/go-storedcounter" + "github.com/filecoin-project/lotus/api" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/stores" sealing "github.com/filecoin-project/lotus/extern/storage-sealing" "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" - lapi "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v0api" + "github.com/filecoin-project/lotus/api/v1api" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/gen" "github.com/filecoin-project/lotus/chain/gen/slashfilter" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/journal" - "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/markets" marketevents "github.com/filecoin-project/lotus/markets/loggers" - "github.com/filecoin-project/lotus/markets/retrievaladapter" - "github.com/filecoin-project/lotus/miner" + lotusminer "github.com/filecoin-project/lotus/miner" "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/helpers" @@ -82,8 +87,8 @@ func minerAddrFromDS(ds dtypes.MetadataDS) (address.Address, error) { return address.NewFromBytes(maddrb) } -func GetParams(sbc *ffiwrapper.Config) error { - ssize, err := sbc.SealProofType.SectorSize() +func GetParams(spt abi.RegisteredSealProof) error { + ssize, err := spt.SectorSize() if err != nil { return err } @@ -94,7 +99,8 @@ func GetParams(sbc *ffiwrapper.Config) error { return nil } - if err := paramfetch.GetParams(context.TODO(), build.ParametersJSON(), uint64(ssize)); err != nil { + // TODO: We should fetch the params for the actual proof type, not just based on the size. + if err := paramfetch.GetParams(context.TODO(), build.ParametersJSON(), build.SrsJSON(), uint64(ssize)); err != nil { return xerrors.Errorf("fetching proof parameters: %w", err) } @@ -111,29 +117,24 @@ func MinerID(ma dtypes.MinerAddress) (dtypes.MinerID, error) { return dtypes.MinerID(id), err } -func StorageNetworkName(ctx helpers.MetricsCtx, a lapi.FullNode) (dtypes.NetworkName, error) { +func StorageNetworkName(ctx helpers.MetricsCtx, a v1api.FullNode) (dtypes.NetworkName, error) { if !build.Devnet { return "testnetnet", nil } return a.StateNetworkName(ctx) } -func ProofsConfig(maddr dtypes.MinerAddress, fnapi lapi.FullNode) (*ffiwrapper.Config, error) { +func SealProofType(maddr dtypes.MinerAddress, fnapi v1api.FullNode) (abi.RegisteredSealProof, error) { mi, err := fnapi.StateMinerInfo(context.TODO(), address.Address(maddr), types.EmptyTSK) if err != nil { - return nil, err + return 0, err } - - spt, err := ffiwrapper.SealProofTypeFromSectorSize(mi.SectorSize) + networkVersion, err := fnapi.StateNetworkVersion(context.TODO(), types.EmptyTSK) if err != nil { - return nil, xerrors.Errorf("bad sector size: %w", err) + return 0, err } - sb := &ffiwrapper.Config{ - SealProofType: spt, - } - - return sb, nil + return miner.PreferredSealProofTypeFromWindowPoStType(networkVersion, mi.WindowPoStProofType) } type sidsc struct { @@ -150,19 +151,70 @@ func SectorIDCounter(ds dtypes.MetadataDS) sealing.SectorIDCounter { return &sidsc{sc} } +func AddressSelector(addrConf *config.MinerAddressConfig) func() (*storage.AddressSelector, error) { + return func() (*storage.AddressSelector, error) { + as := &storage.AddressSelector{} + if addrConf == nil { + return as, nil + } + + as.DisableOwnerFallback = addrConf.DisableOwnerFallback + as.DisableWorkerFallback = addrConf.DisableWorkerFallback + + for _, s := range addrConf.PreCommitControl { + addr, err := address.NewFromString(s) + if err != nil { + return nil, xerrors.Errorf("parsing precommit control address: %w", err) + } + + as.PreCommitControl = append(as.PreCommitControl, addr) + } + + for _, s := range addrConf.CommitControl { + addr, err := address.NewFromString(s) + if err != nil { + return nil, xerrors.Errorf("parsing commit control address: %w", err) + } + + as.CommitControl = append(as.CommitControl, addr) + } + + for _, s := range addrConf.TerminateControl { + addr, err := address.NewFromString(s) + if err != nil { + return nil, xerrors.Errorf("parsing terminate control address: %w", err) + } + + as.TerminateControl = append(as.TerminateControl, addr) + } + + for _, s := range addrConf.DealPublishControl { + addr, err := address.NewFromString(s) + if err != nil { + return nil, xerrors.Errorf("parsing deal publishing control address: %w", err) + } + + as.DealPublishControl = append(as.DealPublishControl, addr) + } + + return as, nil + } +} + type StorageMinerParams struct { fx.In Lifecycle fx.Lifecycle MetricsCtx helpers.MetricsCtx - API lapi.FullNode - Host host.Host + API v1api.FullNode MetadataDS dtypes.MetadataDS Sealer sectorstorage.SectorManager SectorIDCounter sealing.SectorIDCounter Verifier ffiwrapper.Verifier + Prover ffiwrapper.Prover GetSealingConfigFn dtypes.GetSealingConfigFunc Journal journal.Journal + AddrSel *storage.AddressSelector } func StorageMiner(fc config.MinerFeeConfig) func(params StorageMinerParams) (*storage.Miner, error) { @@ -173,11 +225,12 @@ func StorageMiner(fc config.MinerFeeConfig) func(params StorageMinerParams) (*st lc = params.Lifecycle api = params.API sealer = params.Sealer - h = params.Host sc = params.SectorIDCounter verif = params.Verifier + prover = params.Prover gsd = params.GetSealingConfigFn j = params.Journal + as = params.AddrSel ) maddr, err := minerAddrFromDS(ds) @@ -187,12 +240,12 @@ func StorageMiner(fc config.MinerFeeConfig) func(params StorageMinerParams) (*st ctx := helpers.LifecycleCtx(mctx, lc) - fps, err := storage.NewWindowedPoStScheduler(api, fc, sealer, sealer, j, maddr) + fps, err := storage.NewWindowedPoStScheduler(api, fc, as, sealer, verif, sealer, j, maddr) if err != nil { return nil, err } - sm, err := storage.NewMiner(api, maddr, h, ds, sealer, sc, verif, gsd, fc, j) + sm, err := storage.NewMiner(api, maddr, ds, sealer, sc, verif, prover, gsd, fc, j, as) if err != nil { return nil, err } @@ -245,15 +298,58 @@ func HandleDeals(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, h sto }) } +func HandleMigrateProviderFunds(lc fx.Lifecycle, ds dtypes.MetadataDS, node api.FullNode, minerAddress dtypes.MinerAddress) { + lc.Append(fx.Hook{ + OnStart: func(ctx context.Context) error { + b, err := ds.Get(datastore.NewKey("/marketfunds/provider")) + if err != nil { + if xerrors.Is(err, datastore.ErrNotFound) { + return nil + } + return err + } + + var value abi.TokenAmount + if err = value.UnmarshalCBOR(bytes.NewReader(b)); err != nil { + return err + } + ts, err := node.ChainHead(ctx) + if err != nil { + log.Errorf("provider funds migration - getting chain head: %v", err) + return nil + } + + mi, err := node.StateMinerInfo(ctx, address.Address(minerAddress), ts.Key()) + if err != nil { + log.Errorf("provider funds migration - getting miner info %s: %v", minerAddress, err) + return nil + } + + _, err = node.MarketReserveFunds(ctx, mi.Worker, address.Address(minerAddress), value) + if err != nil { + log.Errorf("provider funds migration - reserving funds (wallet %s, addr %s, funds %d): %v", + mi.Worker, minerAddress, value, err) + return nil + } + + return ds.Delete(datastore.NewKey("/marketfunds/provider")) + }, + }) +} + // NewProviderDAGServiceDataTransfer returns a data transfer manager that just // uses the provider's Staging DAG service for transfers -func NewProviderDAGServiceDataTransfer(lc fx.Lifecycle, h host.Host, gs dtypes.StagingGraphsync, ds dtypes.MetadataDS) (dtypes.ProviderDataTransfer, error) { - sc := storedcounter.New(ds, datastore.NewKey("/datatransfer/provider/counter")) +func NewProviderDAGServiceDataTransfer(lc fx.Lifecycle, h host.Host, gs dtypes.StagingGraphsync, ds dtypes.MetadataDS, r repo.LockedRepo) (dtypes.ProviderDataTransfer, error) { net := dtnet.NewFromLibp2pHost(h) dtDs := namespace.Wrap(ds, datastore.NewKey("/datatransfer/provider/transfers")) transport := dtgstransport.NewTransport(h.ID(), gs) - dt, err := dtimpl.NewDataTransfer(dtDs, net, transport, sc) + err := os.MkdirAll(filepath.Join(r.Path(), "data-transfer"), 0755) //nolint: gosec + if err != nil && !os.IsExist(err) { + return nil, err + } + + dt, err := dtimpl.NewDataTransfer(dtDs, filepath.Join(r.Path(), "data-transfer"), net, transport) if err != nil { return nil, err } @@ -261,6 +357,7 @@ func NewProviderDAGServiceDataTransfer(lc fx.Lifecycle, h host.Host, gs dtypes.S dt.OnReady(marketevents.ReadyLogger("provider data transfer")) lc.Append(fx.Hook{ OnStart: func(ctx context.Context) error { + dt.SubscribeToEvents(marketevents.DataTransferLogger) return dt.Start(ctx) }, OnStop: func(ctx context.Context) error { @@ -286,8 +383,9 @@ func NewProviderPieceStore(lc fx.Lifecycle, ds dtypes.MetadataDS) (dtypes.Provid return ps, nil } -func StagingMultiDatastore(lc fx.Lifecycle, r repo.LockedRepo) (dtypes.StagingMultiDstore, error) { - ds, err := r.Datastore("/staging") +func StagingMultiDatastore(lc fx.Lifecycle, mctx helpers.MetricsCtx, r repo.LockedRepo) (dtypes.StagingMultiDstore, error) { + ctx := helpers.LifecycleCtx(mctx, lc) + ds, err := r.Datastore(ctx, "/staging") if err != nil { return nil, xerrors.Errorf("getting datastore out of reop: %w", err) } @@ -308,13 +406,14 @@ func StagingMultiDatastore(lc fx.Lifecycle, r repo.LockedRepo) (dtypes.StagingMu // StagingBlockstore creates a blockstore for staging blocks for a miner // in a storage deal, prior to sealing -func StagingBlockstore(r repo.LockedRepo) (dtypes.StagingBlockstore, error) { - stagingds, err := r.Datastore("/staging") +func StagingBlockstore(lc fx.Lifecycle, mctx helpers.MetricsCtx, r repo.LockedRepo) (dtypes.StagingBlockstore, error) { + ctx := helpers.LifecycleCtx(mctx, lc) + stagingds, err := r.Datastore(ctx, "/staging") if err != nil { return nil, err } - return blockstore.NewBlockstore(stagingds), nil + return blockstore.FromDatastore(stagingds), nil } // StagingDAG is a DAGService for the StagingBlockstore @@ -339,22 +438,24 @@ func StagingDAG(mctx helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBloc // StagingGraphsync creates a graphsync instance which reads and writes blocks // to the StagingBlockstore -func StagingGraphsync(mctx helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBlockstore, h host.Host) dtypes.StagingGraphsync { - graphsyncNetwork := gsnet.NewFromLibp2pHost(h) - loader := storeutil.LoaderForBlockstore(ibs) - storer := storeutil.StorerForBlockstore(ibs) - gs := graphsync.New(helpers.LifecycleCtx(mctx, lc), graphsyncNetwork, loader, storer, graphsync.RejectAllRequestsByDefault()) - - return gs +func StagingGraphsync(parallelTransfers uint64) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBlockstore, h host.Host) dtypes.StagingGraphsync { + return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBlockstore, h host.Host) dtypes.StagingGraphsync { + graphsyncNetwork := gsnet.NewFromLibp2pHost(h) + loader := storeutil.LoaderForBlockstore(ibs) + storer := storeutil.StorerForBlockstore(ibs) + gs := graphsync.New(helpers.LifecycleCtx(mctx, lc), graphsyncNetwork, loader, storer, graphsync.RejectAllRequestsByDefault(), graphsync.MaxInProgressRequests(parallelTransfers)) + + return gs + } } -func SetupBlockProducer(lc fx.Lifecycle, ds dtypes.MetadataDS, api lapi.FullNode, epp gen.WinningPoStProver, sf *slashfilter.SlashFilter, j journal.Journal) (*miner.Miner, error) { +func SetupBlockProducer(lc fx.Lifecycle, ds dtypes.MetadataDS, api v1api.FullNode, epp gen.WinningPoStProver, sf *slashfilter.SlashFilter, j journal.Journal) (*lotusminer.Miner, error) { minerAddr, err := minerAddrFromDS(ds) if err != nil { return nil, err } - m := miner.NewMiner(api, epp, minerAddr, sf, j) + m := lotusminer.NewMiner(api, epp, minerAddr, sf, j) lc.Append(fx.Hook{ OnStart: func(ctx context.Context) error { @@ -371,7 +472,7 @@ func SetupBlockProducer(lc fx.Lifecycle, ds dtypes.MetadataDS, api lapi.FullNode return m, nil } -func NewStorageAsk(ctx helpers.MetricsCtx, fapi lapi.FullNode, ds dtypes.MetadataDS, minerAddress dtypes.MinerAddress, spn storagemarket.StorageProviderNode) (*storedask.StoredAsk, error) { +func NewStorageAsk(ctx helpers.MetricsCtx, fapi v1api.FullNode, ds dtypes.MetadataDS, minerAddress dtypes.MinerAddress, spn storagemarket.StorageProviderNode) (*storedask.StoredAsk, error) { mi, err := fapi.StateMinerInfo(ctx, address.Address(minerAddress), types.EmptyTSK) if err != nil { @@ -384,32 +485,25 @@ func NewStorageAsk(ctx helpers.MetricsCtx, fapi lapi.FullNode, ds dtypes.Metadat if err != nil { return nil, err } - storedAsk, err := storedask.NewStoredAsk(namespace.Wrap(providerDs, datastore.NewKey("/storage-ask")), datastore.NewKey("latest"), spn, address.Address(minerAddress), + return storedask.NewStoredAsk(namespace.Wrap(providerDs, datastore.NewKey("/storage-ask")), datastore.NewKey("latest"), spn, address.Address(minerAddress), storagemarket.MaxPieceSize(abi.PaddedPieceSize(mi.SectorSize))) - if err != nil { - return nil, err - } - if err != nil { - return storedAsk, err - } - return storedAsk, nil -} - -type ProviderDealFunds funds.DealFunds - -func NewProviderDealFunds(ds dtypes.MetadataDS) (ProviderDealFunds, error) { - return funds.NewDealFunds(ds, datastore.NewKey("/marketfunds/provider")) } func BasicDealFilter(user dtypes.StorageDealFilter) func(onlineOk dtypes.ConsiderOnlineStorageDealsConfigFunc, offlineOk dtypes.ConsiderOfflineStorageDealsConfigFunc, + verifiedOk dtypes.ConsiderVerifiedStorageDealsConfigFunc, + unverifiedOk dtypes.ConsiderUnverifiedStorageDealsConfigFunc, blocklistFunc dtypes.StorageDealPieceCidBlocklistConfigFunc, expectedSealTimeFunc dtypes.GetExpectedSealDurationFunc, + startDelay dtypes.GetMaxDealStartDelayFunc, spn storagemarket.StorageProviderNode) dtypes.StorageDealFilter { return func(onlineOk dtypes.ConsiderOnlineStorageDealsConfigFunc, offlineOk dtypes.ConsiderOfflineStorageDealsConfigFunc, + verifiedOk dtypes.ConsiderVerifiedStorageDealsConfigFunc, + unverifiedOk dtypes.ConsiderUnverifiedStorageDealsConfigFunc, blocklistFunc dtypes.StorageDealPieceCidBlocklistConfigFunc, expectedSealTimeFunc dtypes.GetExpectedSealDurationFunc, + startDelay dtypes.GetMaxDealStartDelayFunc, spn storagemarket.StorageProviderNode) dtypes.StorageDealFilter { return func(ctx context.Context, deal storagemarket.MinerDeal) (bool, string, error) { @@ -433,6 +527,26 @@ func BasicDealFilter(user dtypes.StorageDealFilter) func(onlineOk dtypes.Conside return false, "miner is not accepting offline storage deals", nil } + b, err = verifiedOk() + if err != nil { + return false, "miner error", err + } + + if deal.Proposal.VerifiedDeal && !b { + log.Warnf("verified storage deal consideration disabled; rejecting storage deal proposal from client: %s", deal.Client.String()) + return false, "miner is not accepting verified storage deals", nil + } + + b, err = unverifiedOk() + if err != nil { + return false, "miner error", err + } + + if !deal.Proposal.VerifiedDeal && !b { + log.Warnf("unverified storage deal consideration disabled; rejecting storage deal proposal from client: %s", deal.Client.String()) + return false, "miner is not accepting unverified storage deals", nil + } + blocklist, err := blocklistFunc() if err != nil { return false, "miner error", err @@ -461,9 +575,14 @@ func BasicDealFilter(user dtypes.StorageDealFilter) func(onlineOk dtypes.Conside return false, fmt.Sprintf("cannot seal a sector before %s", deal.Proposal.StartEpoch), nil } + sd, err := startDelay() + if err != nil { + return false, "miner error", err + } + // Reject if it's more than 7 days in the future // TODO: read from cfg - maxStartEpoch := earliest + abi.ChainEpoch(7*builtin.SecondsInDay/build.BlockDelaySecs) + maxStartEpoch := earliest + abi.ChainEpoch(uint64(sd.Seconds())/build.BlockDelaySecs) if deal.Proposal.StartEpoch > maxStartEpoch { return false, fmt.Sprintf("deal start epoch is too far in the future: %s > %s", deal.Proposal.StartEpoch, maxStartEpoch), nil } @@ -478,7 +597,6 @@ func BasicDealFilter(user dtypes.StorageDealFilter) func(onlineOk dtypes.Conside } func StorageProvider(minerAddress dtypes.MinerAddress, - ffiConfig *ffiwrapper.Config, storedAsk *storedask.StoredAsk, h host.Host, ds dtypes.MetadataDS, mds dtypes.StagingMultiDstore, @@ -487,7 +605,6 @@ func StorageProvider(minerAddress dtypes.MinerAddress, dataTransfer dtypes.ProviderDataTransfer, spn storagemarket.StorageProviderNode, df dtypes.StorageDealFilter, - funds ProviderDealFunds, ) (storagemarket.StorageProvider, error) { net := smnet.NewFromLibp2pHost(h) store, err := piecefilestore.NewLocalFileStore(piecefilestore.OsPath(r.Path())) @@ -497,7 +614,7 @@ func StorageProvider(minerAddress dtypes.MinerAddress, opt := storageimpl.CustomDealDecisionLogic(storageimpl.DealDeciderFunc(df)) - return storageimpl.NewProvider(net, namespace.Wrap(ds, datastore.NewKey("/deals/provider")), store, mds, pieceStore, dataTransfer, spn, address.Address(minerAddress), ffiConfig.SealProofType, storedAsk, funds, opt) + return storageimpl.NewProvider(net, namespace.Wrap(ds, datastore.NewKey("/deals/provider")), store, mds, pieceStore, dataTransfer, spn, address.Address(minerAddress), storedAsk, opt) } func RetrievalDealFilter(userFilter dtypes.RetrievalDealFilter) func(onlineOk dtypes.ConsiderOnlineRetrievalDealsConfigFunc, @@ -533,42 +650,60 @@ func RetrievalDealFilter(userFilter dtypes.RetrievalDealFilter) func(onlineOk dt } } +func RetrievalNetwork(h host.Host) rmnet.RetrievalMarketNetwork { + return rmnet.NewFromLibp2pHost(h) +} + +// RetrievalPricingFunc configures the pricing function to use for retrieval deals. +func RetrievalPricingFunc(cfg config.DealmakingConfig) func(_ dtypes.ConsiderOnlineRetrievalDealsConfigFunc, + _ dtypes.ConsiderOfflineRetrievalDealsConfigFunc) dtypes.RetrievalPricingFunc { + + return func(_ dtypes.ConsiderOnlineRetrievalDealsConfigFunc, + _ dtypes.ConsiderOfflineRetrievalDealsConfigFunc) dtypes.RetrievalPricingFunc { + if cfg.RetrievalPricing.Strategy == config.RetrievalPricingExternalMode { + return pricing.ExternalRetrievalPricingFunc(cfg.RetrievalPricing.External.Path) + } + + return retrievalimpl.DefaultPricingFunc(cfg.RetrievalPricing.Default.VerifiedDealsFreeTransfer) + } +} + // RetrievalProvider creates a new retrieval provider attached to the provider blockstore -func RetrievalProvider(h host.Host, - miner *storage.Miner, - sealer sectorstorage.SectorManager, - full lapi.FullNode, +func RetrievalProvider( + maddr dtypes.MinerAddress, + adapter retrievalmarket.RetrievalProviderNode, + netwk rmnet.RetrievalMarketNetwork, ds dtypes.MetadataDS, pieceStore dtypes.ProviderPieceStore, mds dtypes.StagingMultiDstore, dt dtypes.ProviderDataTransfer, - onlineOk dtypes.ConsiderOnlineRetrievalDealsConfigFunc, - offlineOk dtypes.ConsiderOfflineRetrievalDealsConfigFunc, + pricingFnc dtypes.RetrievalPricingFunc, userFilter dtypes.RetrievalDealFilter, ) (retrievalmarket.RetrievalProvider, error) { - adapter := retrievaladapter.NewRetrievalProviderNode(miner, sealer, full) - - maddr, err := minerAddrFromDS(ds) - if err != nil { - return nil, err - } - - netwk := rmnet.NewFromLibp2pHost(h) opt := retrievalimpl.DealDeciderOpt(retrievalimpl.DealDecider(userFilter)) - - return retrievalimpl.NewProvider(maddr, adapter, netwk, pieceStore, mds, dt, namespace.Wrap(ds, datastore.NewKey("/retrievals/provider")), opt) + return retrievalimpl.NewProvider(address.Address(maddr), adapter, netwk, pieceStore, mds, dt, namespace.Wrap(ds, datastore.NewKey("/retrievals/provider")), + retrievalimpl.RetrievalPricingFunc(pricingFnc), opt) } var WorkerCallsPrefix = datastore.NewKey("/worker/calls") var ManagerWorkPrefix = datastore.NewKey("/stmgr/calls") -func SectorStorage(mctx helpers.MetricsCtx, lc fx.Lifecycle, ls stores.LocalStorage, si stores.SectorIndex, cfg *ffiwrapper.Config, sc sectorstorage.SealerConfig, urls sectorstorage.URLs, sa sectorstorage.StorageAuth, ds dtypes.MetadataDS) (*sectorstorage.Manager, error) { +func LocalStorage(mctx helpers.MetricsCtx, lc fx.Lifecycle, ls stores.LocalStorage, si stores.SectorIndex, urls stores.URLs) (*stores.Local, error) { + ctx := helpers.LifecycleCtx(mctx, lc) + return stores.NewLocal(ctx, ls, si, urls) +} + +func RemoteStorage(lstor *stores.Local, si stores.SectorIndex, sa sectorstorage.StorageAuth, sc sectorstorage.SealerConfig) *stores.Remote { + return stores.NewRemote(lstor, si, http.Header(sa), sc.ParallelFetchLimit, &stores.DefaultPartialFileHandler{}) +} + +func SectorStorage(mctx helpers.MetricsCtx, lc fx.Lifecycle, lstor *stores.Local, stor *stores.Remote, ls stores.LocalStorage, si stores.SectorIndex, sc sectorstorage.SealerConfig, ds dtypes.MetadataDS) (*sectorstorage.Manager, error) { ctx := helpers.LifecycleCtx(mctx, lc) wsts := statestore.New(namespace.Wrap(ds, WorkerCallsPrefix)) smsts := statestore.New(namespace.Wrap(ds, ManagerWorkPrefix)) - sst, err := sectorstorage.New(ctx, ls, si, cfg, sc, urls, sa, wsts, smsts) + sst, err := sectorstorage.New(ctx, lstor, stor, ls, si, sc, wsts, smsts) if err != nil { return nil, err } @@ -580,7 +715,7 @@ func SectorStorage(mctx helpers.MetricsCtx, lc fx.Lifecycle, ls stores.LocalStor return sst, nil } -func StorageAuth(ctx helpers.MetricsCtx, ca lapi.Common) (sectorstorage.StorageAuth, error) { +func StorageAuth(ctx helpers.MetricsCtx, ca v0api.Common) (sectorstorage.StorageAuth, error) { token, err := ca.AuthNew(ctx, []auth.Permission{"admin"}) if err != nil { return nil, xerrors.Errorf("creating storage auth header: %w", err) @@ -591,6 +726,18 @@ func StorageAuth(ctx helpers.MetricsCtx, ca lapi.Common) (sectorstorage.StorageA return sectorstorage.StorageAuth(headers), nil } +func StorageAuthWithURL(apiInfo string) func(ctx helpers.MetricsCtx, ca v0api.Common) (sectorstorage.StorageAuth, error) { + return func(ctx helpers.MetricsCtx, ca v0api.Common) (sectorstorage.StorageAuth, error) { + s := strings.Split(apiInfo, ":") + if len(s) != 2 { + return nil, errors.New("unexpected format of `apiInfo`") + } + headers := http.Header{} + headers.Add("Authorization", "Bearer "+s[0]) + return sectorstorage.StorageAuth(headers), nil + } +} + func NewConsiderOnlineStorageDealsConfigFunc(r repo.LockedRepo) (dtypes.ConsiderOnlineStorageDealsConfigFunc, error) { return func() (out bool, err error) { err = readCfg(r, func(cfg *config.StorageMiner) { @@ -681,28 +828,113 @@ func NewSetConsiderOfflineRetrievalDealsConfigFunc(r repo.LockedRepo) (dtypes.Se }, nil } +func NewConsiderVerifiedStorageDealsConfigFunc(r repo.LockedRepo) (dtypes.ConsiderVerifiedStorageDealsConfigFunc, error) { + return func() (out bool, err error) { + err = readCfg(r, func(cfg *config.StorageMiner) { + out = cfg.Dealmaking.ConsiderVerifiedStorageDeals + }) + return + }, nil +} + +func NewSetConsideringVerifiedStorageDealsFunc(r repo.LockedRepo) (dtypes.SetConsiderVerifiedStorageDealsConfigFunc, error) { + return func(b bool) (err error) { + err = mutateCfg(r, func(cfg *config.StorageMiner) { + cfg.Dealmaking.ConsiderVerifiedStorageDeals = b + }) + return + }, nil +} + +func NewConsiderUnverifiedStorageDealsConfigFunc(r repo.LockedRepo) (dtypes.ConsiderUnverifiedStorageDealsConfigFunc, error) { + return func() (out bool, err error) { + err = readCfg(r, func(cfg *config.StorageMiner) { + out = cfg.Dealmaking.ConsiderUnverifiedStorageDeals + }) + return + }, nil +} + +func NewSetConsideringUnverifiedStorageDealsFunc(r repo.LockedRepo) (dtypes.SetConsiderUnverifiedStorageDealsConfigFunc, error) { + return func(b bool) (err error) { + err = mutateCfg(r, func(cfg *config.StorageMiner) { + cfg.Dealmaking.ConsiderUnverifiedStorageDeals = b + }) + return + }, nil +} + func NewSetSealConfigFunc(r repo.LockedRepo) (dtypes.SetSealingConfigFunc, error) { return func(cfg sealiface.Config) (err error) { err = mutateCfg(r, func(c *config.StorageMiner) { c.Sealing = config.SealingConfig{ - MaxWaitDealsSectors: cfg.MaxWaitDealsSectors, - MaxSealingSectors: cfg.MaxSealingSectors, - WaitDealsDelay: config.Duration(cfg.WaitDealsDelay), + MaxWaitDealsSectors: cfg.MaxWaitDealsSectors, + MaxSealingSectors: cfg.MaxSealingSectors, + MaxSealingSectorsForDeals: cfg.MaxSealingSectorsForDeals, + WaitDealsDelay: config.Duration(cfg.WaitDealsDelay), + AlwaysKeepUnsealedCopy: cfg.AlwaysKeepUnsealedCopy, + FinalizeEarly: cfg.FinalizeEarly, + + CollateralFromMinerBalance: cfg.CollateralFromMinerBalance, + AvailableBalanceBuffer: types.FIL(cfg.AvailableBalanceBuffer), + DisableCollateralFallback: cfg.DisableCollateralFallback, + + BatchPreCommits: cfg.BatchPreCommits, + MaxPreCommitBatch: cfg.MaxPreCommitBatch, + PreCommitBatchWait: config.Duration(cfg.PreCommitBatchWait), + PreCommitBatchSlack: config.Duration(cfg.PreCommitBatchSlack), + + AggregateCommits: cfg.AggregateCommits, + MinCommitBatch: cfg.MinCommitBatch, + MaxCommitBatch: cfg.MaxCommitBatch, + CommitBatchWait: config.Duration(cfg.CommitBatchWait), + CommitBatchSlack: config.Duration(cfg.CommitBatchSlack), + AggregateAboveBaseFee: types.FIL(cfg.AggregateAboveBaseFee), + + TerminateBatchMax: cfg.TerminateBatchMax, + TerminateBatchMin: cfg.TerminateBatchMin, + TerminateBatchWait: config.Duration(cfg.TerminateBatchWait), } }) return }, nil } +func ToSealingConfig(cfg *config.StorageMiner) sealiface.Config { + return sealiface.Config{ + MaxWaitDealsSectors: cfg.Sealing.MaxWaitDealsSectors, + MaxSealingSectors: cfg.Sealing.MaxSealingSectors, + MaxSealingSectorsForDeals: cfg.Sealing.MaxSealingSectorsForDeals, + WaitDealsDelay: time.Duration(cfg.Sealing.WaitDealsDelay), + AlwaysKeepUnsealedCopy: cfg.Sealing.AlwaysKeepUnsealedCopy, + FinalizeEarly: cfg.Sealing.FinalizeEarly, + + CollateralFromMinerBalance: cfg.Sealing.CollateralFromMinerBalance, + AvailableBalanceBuffer: types.BigInt(cfg.Sealing.AvailableBalanceBuffer), + DisableCollateralFallback: cfg.Sealing.DisableCollateralFallback, + + BatchPreCommits: cfg.Sealing.BatchPreCommits, + MaxPreCommitBatch: cfg.Sealing.MaxPreCommitBatch, + PreCommitBatchWait: time.Duration(cfg.Sealing.PreCommitBatchWait), + PreCommitBatchSlack: time.Duration(cfg.Sealing.PreCommitBatchSlack), + + AggregateCommits: cfg.Sealing.AggregateCommits, + MinCommitBatch: cfg.Sealing.MinCommitBatch, + MaxCommitBatch: cfg.Sealing.MaxCommitBatch, + CommitBatchWait: time.Duration(cfg.Sealing.CommitBatchWait), + CommitBatchSlack: time.Duration(cfg.Sealing.CommitBatchSlack), + AggregateAboveBaseFee: types.BigInt(cfg.Sealing.AggregateAboveBaseFee), + + TerminateBatchMax: cfg.Sealing.TerminateBatchMax, + TerminateBatchMin: cfg.Sealing.TerminateBatchMin, + TerminateBatchWait: time.Duration(cfg.Sealing.TerminateBatchWait), + } +} + func NewGetSealConfigFunc(r repo.LockedRepo) (dtypes.GetSealingConfigFunc, error) { return func() (out sealiface.Config, err error) { err = readCfg(r, func(cfg *config.StorageMiner) { - out = sealiface.Config{ - MaxWaitDealsSectors: cfg.Sealing.MaxWaitDealsSectors, - MaxSealingSectors: cfg.Sealing.MaxSealingSectors, - MaxSealingSectorsForDeals: cfg.Sealing.MaxSealingSectorsForDeals, - WaitDealsDelay: time.Duration(cfg.Sealing.WaitDealsDelay), - } + out = ToSealingConfig(cfg) }) return }, nil @@ -726,6 +958,24 @@ func NewGetExpectedSealDurationFunc(r repo.LockedRepo) (dtypes.GetExpectedSealDu }, nil } +func NewSetMaxDealStartDelayFunc(r repo.LockedRepo) (dtypes.SetMaxDealStartDelayFunc, error) { + return func(delay time.Duration) (err error) { + err = mutateCfg(r, func(cfg *config.StorageMiner) { + cfg.Dealmaking.MaxDealStartDelay = config.Duration(delay) + }) + return + }, nil +} + +func NewGetMaxDealStartDelayFunc(r repo.LockedRepo) (dtypes.GetMaxDealStartDelayFunc, error) { + return func() (out time.Duration, err error) { + err = readCfg(r, func(cfg *config.StorageMiner) { + out = time.Duration(cfg.Dealmaking.MaxDealStartDelay) + }) + return + }, nil +} + func readCfg(r repo.LockedRepo, accessor func(*config.StorageMiner)) error { raw, err := r.Config() if err != nil { diff --git a/node/modules/storageminer_svc.go b/node/modules/storageminer_svc.go new file mode 100644 index 00000000000..0a4be219212 --- /dev/null +++ b/node/modules/storageminer_svc.go @@ -0,0 +1,71 @@ +package modules + +import ( + "context" + + "github.com/filecoin-project/lotus/storage/sectorblocks" + + "go.uber.org/fx" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/client" + cliutil "github.com/filecoin-project/lotus/cli/util" + "github.com/filecoin-project/lotus/node/modules/helpers" +) + +type MinerSealingService api.StorageMiner +type MinerStorageService api.StorageMiner + +var _ sectorblocks.SectorBuilder = *new(MinerSealingService) + +func connectMinerService(apiInfo string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (api.StorageMiner, error) { + return func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (api.StorageMiner, error) { + ctx := helpers.LifecycleCtx(mctx, lc) + info := cliutil.ParseApiInfo(apiInfo) + addr, err := info.DialArgs("v0") + if err != nil { + return nil, xerrors.Errorf("could not get DialArgs: %w", err) + } + + log.Infof("Checking (svc) api version of %s", addr) + + mapi, closer, err := client.NewStorageMinerRPCV0(ctx, addr, info.AuthHeader()) + if err != nil { + return nil, err + } + lc.Append(fx.Hook{ + OnStart: func(ctx context.Context) error { + v, err := mapi.Version(ctx) + if err != nil { + return xerrors.Errorf("checking version: %w", err) + } + + if !v.APIVersion.EqMajorMinor(api.MinerAPIVersion0) { + return xerrors.Errorf("remote service API version didn't match (expected %s, remote %s)", api.MinerAPIVersion0, v.APIVersion) + } + + return nil + }, + OnStop: func(context.Context) error { + closer() + return nil + }}) + + return mapi, nil + } +} + +func ConnectSealingService(apiInfo string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (MinerSealingService, error) { + return func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (MinerSealingService, error) { + log.Info("Connecting sealing service to miner") + return connectMinerService(apiInfo)(mctx, lc) + } +} + +func ConnectStorageService(apiInfo string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (MinerStorageService, error) { + return func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (MinerStorageService, error) { + log.Info("Connecting storage service to miner") + return connectMinerService(apiInfo)(mctx, lc) + } +} diff --git a/node/modules/testing/genesis.go b/node/modules/testing/genesis.go index fa9e0cff786..a3d25e36a1d 100644 --- a/node/modules/testing/genesis.go +++ b/node/modules/testing/genesis.go @@ -81,7 +81,7 @@ func MakeGenesis(outFile, genesisTemplate string) func(bs dtypes.ChainBlockstore fmt.Printf("GENESIS MINER ADDRESS: t0%d\n", genesis2.MinerStart) - f, err := os.OpenFile(outFile, os.O_CREATE|os.O_WRONLY, 0644) + f, err := os.OpenFile(outFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) if err != nil { return nil, err } diff --git a/node/node_test.go b/node/node_test.go deleted file mode 100644 index e553e83b2f0..00000000000 --- a/node/node_test.go +++ /dev/null @@ -1,163 +0,0 @@ -package node_test - -import ( - "os" - "testing" - "time" - - builder "github.com/filecoin-project/lotus/node/test" - - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/lib/lotuslog" - logging "github.com/ipfs/go-log/v2" - - "github.com/filecoin-project/lotus/api/test" - "github.com/filecoin-project/lotus/chain/actors/policy" -) - -func init() { - _ = logging.SetLogLevel("*", "INFO") - - policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048)) - policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) - policy.SetMinVerifiedDealSize(abi.NewStoragePower(256)) -} - -func TestAPI(t *testing.T) { - test.TestApis(t, builder.Builder) -} - -func TestAPIRPC(t *testing.T) { - test.TestApis(t, builder.RPCBuilder) -} - -func TestAPIDealFlow(t *testing.T) { - logging.SetLogLevel("miner", "ERROR") - logging.SetLogLevel("chainstore", "ERROR") - logging.SetLogLevel("chain", "ERROR") - logging.SetLogLevel("sub", "ERROR") - logging.SetLogLevel("storageminer", "ERROR") - - t.Run("TestDealFlow", func(t *testing.T) { - test.TestDealFlow(t, builder.MockSbBuilder, 10*time.Millisecond, false, false) - }) - t.Run("WithExportedCAR", func(t *testing.T) { - test.TestDealFlow(t, builder.MockSbBuilder, 10*time.Millisecond, true, false) - }) - t.Run("TestDoubleDealFlow", func(t *testing.T) { - test.TestDoubleDealFlow(t, builder.MockSbBuilder, 10*time.Millisecond) - }) - t.Run("TestFastRetrievalDealFlow", func(t *testing.T) { - test.TestFastRetrievalDealFlow(t, builder.MockSbBuilder, 10*time.Millisecond) - }) -} - -func TestAPIDealFlowReal(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode") - } - lotuslog.SetupLogLevels() - logging.SetLogLevel("miner", "ERROR") - logging.SetLogLevel("chainstore", "ERROR") - logging.SetLogLevel("chain", "ERROR") - logging.SetLogLevel("sub", "ERROR") - logging.SetLogLevel("storageminer", "ERROR") - - // TODO: just set this globally? - oldDelay := policy.GetPreCommitChallengeDelay() - policy.SetPreCommitChallengeDelay(5) - t.Cleanup(func() { - policy.SetPreCommitChallengeDelay(oldDelay) - }) - - t.Run("basic", func(t *testing.T) { - test.TestDealFlow(t, builder.Builder, time.Second, false, false) - }) - - t.Run("fast-retrieval", func(t *testing.T) { - test.TestDealFlow(t, builder.Builder, time.Second, false, true) - }) - - t.Run("retrieval-second", func(t *testing.T) { - test.TestSenondDealRetrieval(t, builder.Builder, time.Second) - }) -} - -func TestDealMining(t *testing.T) { - logging.SetLogLevel("miner", "ERROR") - logging.SetLogLevel("chainstore", "ERROR") - logging.SetLogLevel("chain", "ERROR") - logging.SetLogLevel("sub", "ERROR") - logging.SetLogLevel("storageminer", "ERROR") - - test.TestDealMining(t, builder.MockSbBuilder, 50*time.Millisecond, false) -} - -func TestPledgeSectors(t *testing.T) { - logging.SetLogLevel("miner", "ERROR") - logging.SetLogLevel("chainstore", "ERROR") - logging.SetLogLevel("chain", "ERROR") - logging.SetLogLevel("sub", "ERROR") - logging.SetLogLevel("storageminer", "ERROR") - - t.Run("1", func(t *testing.T) { - test.TestPledgeSector(t, builder.MockSbBuilder, 50*time.Millisecond, 1) - }) - - t.Run("100", func(t *testing.T) { - test.TestPledgeSector(t, builder.MockSbBuilder, 50*time.Millisecond, 100) - }) - - t.Run("1000", func(t *testing.T) { - if testing.Short() { // takes ~16s - t.Skip("skipping test in short mode") - } - - test.TestPledgeSector(t, builder.MockSbBuilder, 50*time.Millisecond, 1000) - }) -} - -func TestTapeFix(t *testing.T) { - logging.SetLogLevel("miner", "ERROR") - logging.SetLogLevel("chainstore", "ERROR") - logging.SetLogLevel("chain", "ERROR") - logging.SetLogLevel("sub", "ERROR") - logging.SetLogLevel("storageminer", "ERROR") - - test.TestTapeFix(t, builder.MockSbBuilder, 2*time.Millisecond) -} - -func TestWindowedPost(t *testing.T) { - if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" { - t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run") - } - - logging.SetLogLevel("miner", "ERROR") - logging.SetLogLevel("chainstore", "ERROR") - logging.SetLogLevel("chain", "ERROR") - logging.SetLogLevel("sub", "ERROR") - logging.SetLogLevel("storageminer", "ERROR") - - test.TestWindowPost(t, builder.MockSbBuilder, 2*time.Millisecond, 10) -} - -func TestCCUpgrade(t *testing.T) { - logging.SetLogLevel("miner", "ERROR") - logging.SetLogLevel("chainstore", "ERROR") - logging.SetLogLevel("chain", "ERROR") - logging.SetLogLevel("sub", "ERROR") - logging.SetLogLevel("storageminer", "ERROR") - - test.TestCCUpgrade(t, builder.MockSbBuilder, 5*time.Millisecond) -} - -func TestPaymentChannels(t *testing.T) { - logging.SetLogLevel("miner", "ERROR") - logging.SetLogLevel("chainstore", "ERROR") - logging.SetLogLevel("chain", "ERROR") - logging.SetLogLevel("sub", "ERROR") - logging.SetLogLevel("pubsub", "ERROR") - logging.SetLogLevel("storageminer", "ERROR") - - test.TestPaymentChannels(t, builder.MockSbBuilder, 5*time.Millisecond) -} diff --git a/node/repo/blockstore_opts.go b/node/repo/blockstore_opts.go new file mode 100644 index 00000000000..1705217d304 --- /dev/null +++ b/node/repo/blockstore_opts.go @@ -0,0 +1,46 @@ +package repo + +import badgerbs "github.com/filecoin-project/lotus/blockstore/badger" + +// BadgerBlockstoreOptions returns the badger options to apply for the provided +// domain. +func BadgerBlockstoreOptions(domain BlockstoreDomain, path string, readonly bool) (badgerbs.Options, error) { + opts := badgerbs.DefaultOptions(path) + + // Due to legacy usage of blockstore.Blockstore, over a datastore, all + // blocks are prefixed with this namespace. In the future, this can go away, + // in order to shorten keys, but it'll require a migration. + opts.Prefix = "/blocks/" + + // Blockstore values are immutable; therefore we do not expect any + // conflicts to emerge. + opts.DetectConflicts = false + + // This is to optimize the database on close so it can be opened + // read-only and efficiently queried. + opts.CompactL0OnClose = true + + // The alternative is "crash on start and tell the user to fix it". This + // will truncate corrupt and unsynced data, which we don't guarantee to + // persist anyways. + opts.Truncate = true + + // We mmap the index and the value logs; this is important to enable + // zero-copy value access. + opts.ValueLogLoadingMode = badgerbs.MemoryMap + opts.TableLoadingMode = badgerbs.MemoryMap + + // Embed only values < 128 bytes in the LSM tree; larger values are stored + // in value logs. + opts.ValueThreshold = 128 + + // Default table size is already 64MiB. This is here to make it explicit. + opts.MaxTableSize = 64 << 20 + + // NOTE: The chain blockstore doesn't require any GC (blocks are never + // deleted). This will change if we move to a tiered blockstore. + + opts.ReadOnly = readonly + + return opts, nil +} diff --git a/node/repo/fsrepo.go b/node/repo/fsrepo.go index c1b6b5233b1..9323410ddd0 100644 --- a/node/repo/fsrepo.go +++ b/node/repo/fsrepo.go @@ -2,6 +2,7 @@ package repo import ( "bytes" + "context" "encoding/json" "fmt" "io" @@ -12,6 +13,7 @@ import ( "sync" "github.com/BurntSushi/toml" + "github.com/ipfs/go-datastore" fslock "github.com/ipfs/go-fs-lock" logging "github.com/ipfs/go-log/v2" @@ -20,6 +22,8 @@ import ( "github.com/multiformats/go-multiaddr" "golang.org/x/xerrors" + "github.com/filecoin-project/lotus/blockstore" + badgerbs "github.com/filecoin-project/lotus/blockstore/badger" "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" "github.com/filecoin-project/lotus/extern/sector-storage/stores" @@ -257,10 +261,21 @@ type fsLockedRepo struct { dsErr error dsOnce sync.Once + bs blockstore.Blockstore + bsErr error + bsOnce sync.Once + ssPath string + ssErr error + ssOnce sync.Once + storageLk sync.Mutex configLk sync.Mutex } +func (fsr *fsLockedRepo) Readonly() bool { + return fsr.readonly +} + func (fsr *fsLockedRepo) Path() string { return fsr.path } @@ -279,11 +294,80 @@ func (fsr *fsLockedRepo) Close() error { } } + // type assertion will return ok=false if fsr.bs is nil altogether. + if c, ok := fsr.bs.(io.Closer); ok && c != nil { + if err := c.Close(); err != nil { + return xerrors.Errorf("could not close blockstore: %w", err) + } + } + err = fsr.closer.Close() fsr.closer = nil return err } +// Blockstore returns a blockstore for the provided data domain. +func (fsr *fsLockedRepo) Blockstore(ctx context.Context, domain BlockstoreDomain) (blockstore.Blockstore, error) { + if domain != UniversalBlockstore { + return nil, ErrInvalidBlockstoreDomain + } + + fsr.bsOnce.Do(func() { + path := fsr.join(filepath.Join(fsDatastore, "chain")) + readonly := fsr.readonly + + if err := os.MkdirAll(path, 0755); err != nil { + fsr.bsErr = err + return + } + + opts, err := BadgerBlockstoreOptions(domain, path, readonly) + if err != nil { + fsr.bsErr = err + return + } + + // + // Tri-state environment variable LOTUS_CHAIN_BADGERSTORE_DISABLE_FSYNC + // - unset == the default (currently fsync enabled) + // - set with a false-y value == fsync enabled no matter what a future default is + // - set with any other value == fsync is disabled ignored defaults (recommended for day-to-day use) + // + if nosyncBs, nosyncBsSet := os.LookupEnv("LOTUS_CHAIN_BADGERSTORE_DISABLE_FSYNC"); nosyncBsSet { + nosyncBs = strings.ToLower(nosyncBs) + if nosyncBs == "" || nosyncBs == "0" || nosyncBs == "false" || nosyncBs == "no" { + opts.SyncWrites = true + } else { + opts.SyncWrites = false + } + } + + bs, err := badgerbs.Open(opts) + if err != nil { + fsr.bsErr = err + return + } + fsr.bs = blockstore.WrapIDStore(bs) + }) + + return fsr.bs, fsr.bsErr +} + +func (fsr *fsLockedRepo) SplitstorePath() (string, error) { + fsr.ssOnce.Do(func() { + path := fsr.join(filepath.Join(fsDatastore, "splitstore")) + + if err := os.MkdirAll(path, 0755); err != nil { + fsr.ssErr = err + return + } + + fsr.ssPath = path + }) + + return fsr.ssPath, fsr.ssErr +} + // join joins path elements with fsr.path func (fsr *fsLockedRepo) join(paths ...string) string { return filepath.Join(append([]string{fsr.path}, paths...)...) @@ -476,17 +560,31 @@ func (fsr *fsLockedRepo) Get(name string) (types.KeyInfo, error) { return res, nil } +const KTrashPrefix = "trash-" + // Put saves key info under given name func (fsr *fsLockedRepo) Put(name string, info types.KeyInfo) error { + return fsr.put(name, info, 0) +} + +func (fsr *fsLockedRepo) put(rawName string, info types.KeyInfo, retries int) error { if err := fsr.stillValid(); err != nil { return err } + name := rawName + if retries > 0 { + name = fmt.Sprintf("%s-%d", rawName, retries) + } + encName := base32.RawStdEncoding.EncodeToString([]byte(name)) keyPath := fsr.join(fsKeystore, encName) _, err := os.Stat(keyPath) - if err == nil { + if err == nil && strings.HasPrefix(name, KTrashPrefix) { + // retry writing the trash-prefixed file with a number suffix + return fsr.put(rawName, info, retries+1) + } else if err == nil { return xerrors.Errorf("checking key before put '%s': %w", name, types.ErrKeyExists) } else if !os.IsNotExist(err) { return xerrors.Errorf("checking key before put '%s': %w", name, err) diff --git a/node/repo/fsrepo_ds.go b/node/repo/fsrepo_ds.go index aa91d2514d0..09fb854606f 100644 --- a/node/repo/fsrepo_ds.go +++ b/node/repo/fsrepo_ds.go @@ -1,23 +1,23 @@ package repo import ( + "context" "os" "path/filepath" - "github.com/ipfs/go-datastore" + dgbadger "github.com/dgraph-io/badger/v2" + ldbopts "github.com/syndtr/goleveldb/leveldb/opt" "golang.org/x/xerrors" - dgbadger "github.com/dgraph-io/badger/v2" + "github.com/ipfs/go-datastore" badger "github.com/ipfs/go-ds-badger2" levelds "github.com/ipfs/go-ds-leveldb" measure "github.com/ipfs/go-ds-measure" - ldbopts "github.com/syndtr/goleveldb/leveldb/opt" ) type dsCtor func(path string, readonly bool) (datastore.Batching, error) var fsDatastores = map[string]dsCtor{ - "chain": chainBadgerDs, "metadata": levelDs, // Those need to be fast for large writes... but also need a really good GC :c @@ -26,24 +26,12 @@ var fsDatastores = map[string]dsCtor{ "client": badgerDs, // client specific } -func chainBadgerDs(path string, readonly bool) (datastore.Batching, error) { - opts := badger.DefaultOptions - opts.GcInterval = 0 // disable GC for chain datastore - opts.ReadOnly = readonly - - opts.Options = dgbadger.DefaultOptions("").WithTruncate(true). - WithValueThreshold(1 << 10) - - return badger.NewDatastore(path, &opts) -} - func badgerDs(path string, readonly bool) (datastore.Batching, error) { opts := badger.DefaultOptions opts.ReadOnly = readonly opts.Options = dgbadger.DefaultOptions("").WithTruncate(true). WithValueThreshold(1 << 10) - return badger.NewDatastore(path, &opts) } @@ -80,7 +68,7 @@ func (fsr *fsLockedRepo) openDatastores(readonly bool) (map[string]datastore.Bat return out, nil } -func (fsr *fsLockedRepo) Datastore(ns string) (datastore.Batching, error) { +func (fsr *fsLockedRepo) Datastore(_ context.Context, ns string) (datastore.Batching, error) { fsr.dsOnce.Do(func() { fsr.ds, fsr.dsErr = fsr.openDatastores(fsr.readonly) }) diff --git a/node/repo/importmgr/mgr.go b/node/repo/importmgr/mgr.go index 31991617add..936d9b60662 100644 --- a/node/repo/importmgr/mgr.go +++ b/node/repo/importmgr/mgr.go @@ -7,7 +7,7 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/go-multistore" - "github.com/filecoin-project/lotus/lib/blockstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/namespace" ) @@ -16,7 +16,7 @@ type Mgr struct { mds *multistore.MultiStore ds datastore.Batching - Blockstore blockstore.Blockstore + Blockstore blockstore.BasicBlockstore } type Label string @@ -31,7 +31,7 @@ const ( func New(mds *multistore.MultiStore, ds datastore.Batching) *Mgr { return &Mgr{ mds: mds, - Blockstore: mds.MultiReadBlockstore(), + Blockstore: blockstore.Adapt(mds.MultiReadBlockstore()), ds: datastore.NewLogDatastore(namespace.Wrap(ds, datastore.NewKey("/stores")), "storess"), } diff --git a/node/repo/interface.go b/node/repo/interface.go index c25bcb53485..b169ee5cc78 100644 --- a/node/repo/interface.go +++ b/node/repo/interface.go @@ -1,22 +1,40 @@ package repo import ( + "context" "errors" "github.com/ipfs/go-datastore" "github.com/multiformats/go-multiaddr" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" "github.com/filecoin-project/lotus/extern/sector-storage/stores" "github.com/filecoin-project/lotus/chain/types" ) +// BlockstoreDomain represents the domain of a blockstore. +type BlockstoreDomain string + +const ( + // UniversalBlockstore represents the blockstore domain for all data. + // Right now, this includes chain objects (tipsets, blocks, messages), as + // well as state. In the future, they may get segregated into different + // domains. + UniversalBlockstore = BlockstoreDomain("universal") + HotBlockstore = BlockstoreDomain("hot") +) + var ( ErrNoAPIEndpoint = errors.New("API not running (no endpoint)") ErrNoAPIToken = errors.New("API token not set") ErrRepoAlreadyLocked = errors.New("repo is already locked (lotus daemon already running)") ErrClosedRepo = errors.New("repo is no longer open") + + // ErrInvalidBlockstoreDomain is returned by LockedRepo#Blockstore() when + // an unrecognized domain is requested. + ErrInvalidBlockstoreDomain = errors.New("invalid blockstore domain") ) type Repo interface { @@ -35,7 +53,19 @@ type LockedRepo interface { Close() error // Returns datastore defined in this repo. - Datastore(namespace string) (datastore.Batching, error) + // The supplied context must only be used to initialize the datastore. + // The implementation should not retain the context for usage throughout + // the lifecycle. + Datastore(ctx context.Context, namespace string) (datastore.Batching, error) + + // Blockstore returns an IPLD blockstore for the requested domain. + // The supplied context must only be used to initialize the blockstore. + // The implementation should not retain the context for usage throughout + // the lifecycle. + Blockstore(ctx context.Context, domain BlockstoreDomain) (blockstore.Blockstore, error) + + // SplitstorePath returns the path for the SplitStore + SplitstorePath() (string, error) // Returns config in this repo Config() (interface{}, error) @@ -58,4 +88,7 @@ type LockedRepo interface { // Path returns absolute path of the repo Path() string + + // Readonly returns true if the repo is readonly + Readonly() bool } diff --git a/node/repo/memrepo.go b/node/repo/memrepo.go index 34e3637ebb0..00ea32b88b5 100644 --- a/node/repo/memrepo.go +++ b/node/repo/memrepo.go @@ -1,6 +1,7 @@ package repo import ( + "context" "encoding/json" "io/ioutil" "os" @@ -14,9 +15,9 @@ import ( "github.com/multiformats/go-multiaddr" "golang.org/x/xerrors" - "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" - + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" "github.com/filecoin-project/lotus/extern/sector-storage/stores" "github.com/filecoin-project/lotus/node/config" ) @@ -31,8 +32,9 @@ type MemRepo struct { repoLock chan struct{} token *byte - datastore datastore.Datastore - keystore map[string]types.KeyInfo + datastore datastore.Datastore + keystore map[string]types.KeyInfo + blockstore blockstore.Blockstore // given a repo type, produce the default config configF func(t RepoType) interface{} @@ -158,11 +160,11 @@ func NewMemory(opts *MemRepoOptions) *MemRepo { } return &MemRepo{ - repoLock: make(chan struct{}, 1), - - datastore: opts.Ds, - configF: opts.ConfigF, - keystore: opts.KeyStore, + repoLock: make(chan struct{}, 1), + blockstore: blockstore.WrapIDStore(blockstore.NewMemorySync()), + datastore: opts.Ds, + configF: opts.ConfigF, + keystore: opts.KeyStore, } } @@ -199,6 +201,10 @@ func (mem *MemRepo) Lock(t RepoType) (LockedRepo, error) { }, nil } +func (lmem *lockedMemRepo) Readonly() bool { + return false +} + func (lmem *lockedMemRepo) checkToken() error { lmem.RLock() defer lmem.RUnlock() @@ -235,7 +241,7 @@ func (lmem *lockedMemRepo) Close() error { } -func (lmem *lockedMemRepo) Datastore(ns string) (datastore.Batching, error) { +func (lmem *lockedMemRepo) Datastore(_ context.Context, ns string) (datastore.Batching, error) { if err := lmem.checkToken(); err != nil { return nil, err } @@ -243,6 +249,17 @@ func (lmem *lockedMemRepo) Datastore(ns string) (datastore.Batching, error) { return namespace.Wrap(lmem.mem.datastore, datastore.NewKey(ns)), nil } +func (lmem *lockedMemRepo) Blockstore(ctx context.Context, domain BlockstoreDomain) (blockstore.Blockstore, error) { + if domain != UniversalBlockstore { + return nil, ErrInvalidBlockstoreDomain + } + return lmem.mem.blockstore, nil +} + +func (lmem *lockedMemRepo) SplitstorePath() (string, error) { + return ioutil.TempDir("", "splitstore.*") +} + func (lmem *lockedMemRepo) ListDatastores(ns string) ([]int64, error) { return nil, nil } diff --git a/node/repo/retrievalstoremgr/retrievalstoremgr.go b/node/repo/retrievalstoremgr/retrievalstoremgr.go index e791150d953..ba86ccee540 100644 --- a/node/repo/retrievalstoremgr/retrievalstoremgr.go +++ b/node/repo/retrievalstoremgr/retrievalstoremgr.go @@ -4,7 +4,7 @@ import ( "errors" "github.com/filecoin-project/go-multistore" - "github.com/filecoin-project/lotus/lib/blockstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/node/repo/importmgr" "github.com/ipfs/go-blockservice" offline "github.com/ipfs/go-ipfs-exchange-offline" @@ -73,13 +73,13 @@ func (mrs *multiStoreRetrievalStore) DAGService() ipldformat.DAGService { // BlockstoreRetrievalStoreManager manages a single blockstore as if it were multiple stores type BlockstoreRetrievalStoreManager struct { - bs blockstore.Blockstore + bs blockstore.BasicBlockstore } var _ RetrievalStoreManager = &BlockstoreRetrievalStoreManager{} // NewBlockstoreRetrievalStoreManager returns a new blockstore based RetrievalStoreManager -func NewBlockstoreRetrievalStoreManager(bs blockstore.Blockstore) RetrievalStoreManager { +func NewBlockstoreRetrievalStoreManager(bs blockstore.BasicBlockstore) RetrievalStoreManager { return &BlockstoreRetrievalStoreManager{ bs: bs, } diff --git a/node/repo/retrievalstoremgr/retrievalstoremgr_test.go b/node/repo/retrievalstoremgr/retrievalstoremgr_test.go index 044a8cc272e..0a44fa0729e 100644 --- a/node/repo/retrievalstoremgr/retrievalstoremgr_test.go +++ b/node/repo/retrievalstoremgr/retrievalstoremgr_test.go @@ -9,13 +9,13 @@ import ( "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/query" dss "github.com/ipfs/go-datastore/sync" - blockstore "github.com/ipfs/go-ipfs-blockstore" format "github.com/ipfs/go-ipld-format" dag "github.com/ipfs/go-merkledag" "github.com/stretchr/testify/require" "github.com/filecoin-project/go-multistore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/node/repo/importmgr" "github.com/filecoin-project/lotus/node/repo/retrievalstoremgr" ) @@ -71,7 +71,7 @@ func TestMultistoreRetrievalStoreManager(t *testing.T) { func TestBlockstoreRetrievalStoreManager(t *testing.T) { ctx := context.Background() ds := dss.MutexWrap(datastore.NewMapDatastore()) - bs := blockstore.NewBlockstore(ds) + bs := blockstore.FromDatastore(ds) retrievalStoreMgr := retrievalstoremgr.NewBlockstoreRetrievalStoreManager(bs) var stores []retrievalstoremgr.RetrievalStore var cids []cid.Cid diff --git a/node/rpc.go b/node/rpc.go new file mode 100644 index 00000000000..b283f6ac10a --- /dev/null +++ b/node/rpc.go @@ -0,0 +1,196 @@ +package node + +import ( + "context" + "encoding/json" + "net" + "net/http" + _ "net/http/pprof" + "runtime" + "strconv" + + "github.com/gorilla/mux" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" + "go.opencensus.io/tag" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-jsonrpc" + "github.com/filecoin-project/go-jsonrpc/auth" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v0api" + "github.com/filecoin-project/lotus/api/v1api" + "github.com/filecoin-project/lotus/lib/rpcenc" + "github.com/filecoin-project/lotus/metrics" + "github.com/filecoin-project/lotus/node/impl" +) + +var rpclog = logging.Logger("rpc") + +// ServeRPC serves an HTTP handler over the supplied listen multiaddr. +// +// This function spawns a goroutine to run the server, and returns immediately. +// It returns the stop function to be called to terminate the endpoint. +// +// The supplied ID is used in tracing, by inserting a tag in the context. +func ServeRPC(h http.Handler, id string, addr multiaddr.Multiaddr) (StopFunc, error) { + // Start listening to the addr; if invalid or occupied, we will fail early. + lst, err := manet.Listen(addr) + if err != nil { + return nil, xerrors.Errorf("could not listen: %w", err) + } + + // Instantiate the server and start listening. + srv := &http.Server{ + Handler: h, + BaseContext: func(listener net.Listener) context.Context { + ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, id)) + return ctx + }, + } + + go func() { + err = srv.Serve(manet.NetListener(lst)) + if err != http.ErrServerClosed { + rpclog.Warnf("rpc server failed: %s", err) + } + }() + + return srv.Shutdown, err +} + +// FullNodeHandler returns a full node handler, to be mounted as-is on the server. +func FullNodeHandler(a v1api.FullNode, permissioned bool, opts ...jsonrpc.ServerOption) (http.Handler, error) { + m := mux.NewRouter() + + serveRpc := func(path string, hnd interface{}) { + rpcServer := jsonrpc.NewServer(opts...) + rpcServer.Register("Filecoin", hnd) + + var handler http.Handler = rpcServer + if permissioned { + handler = &auth.Handler{Verify: a.AuthVerify, Next: rpcServer.ServeHTTP} + } + + m.Handle(path, handler) + } + + fnapi := metrics.MetricedFullAPI(a) + if permissioned { + fnapi = api.PermissionedFullAPI(fnapi) + } + + serveRpc("/rpc/v1", fnapi) + serveRpc("/rpc/v0", &v0api.WrapperV1Full{FullNode: fnapi}) + + // Import handler + handleImportFunc := handleImport(a.(*impl.FullNodeAPI)) + if permissioned { + importAH := &auth.Handler{ + Verify: a.AuthVerify, + Next: handleImportFunc, + } + m.Handle("/rest/v0/import", importAH) + } else { + m.HandleFunc("/rest/v0/import", handleImportFunc) + } + + // debugging + m.Handle("/debug/metrics", metrics.Exporter()) + m.Handle("/debug/pprof-set/block", handleFractionOpt("BlockProfileRate", runtime.SetBlockProfileRate)) + m.Handle("/debug/pprof-set/mutex", handleFractionOpt("MutexProfileFraction", func(x int) { + runtime.SetMutexProfileFraction(x) + })) + m.PathPrefix("/").Handler(http.DefaultServeMux) // pprof + + return m, nil +} + +// MinerHandler returns a miner handler, to be mounted as-is on the server. +func MinerHandler(a api.StorageMiner, permissioned bool) (http.Handler, error) { + m := mux.NewRouter() + + mapi := metrics.MetricedStorMinerAPI(a) + if permissioned { + mapi = api.PermissionedStorMinerAPI(mapi) + } + + readerHandler, readerServerOpt := rpcenc.ReaderParamDecoder() + rpcServer := jsonrpc.NewServer(readerServerOpt) + rpcServer.Register("Filecoin", mapi) + + m.Handle("/rpc/v0", rpcServer) + m.Handle("/rpc/streams/v0/push/{uuid}", readerHandler) + m.PathPrefix("/remote").HandlerFunc(a.(*impl.StorageMinerAPI).ServeRemote(permissioned)) + + // debugging + m.Handle("/debug/metrics", metrics.Exporter()) + m.PathPrefix("/").Handler(http.DefaultServeMux) // pprof + + if !permissioned { + return m, nil + } + + ah := &auth.Handler{ + Verify: a.AuthVerify, + Next: m.ServeHTTP, + } + return ah, nil +} + +func handleImport(a *impl.FullNodeAPI) func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + if r.Method != "PUT" { + w.WriteHeader(404) + return + } + if !auth.HasPerm(r.Context(), nil, api.PermWrite) { + w.WriteHeader(401) + _ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing write permission"}) + return + } + + c, err := a.ClientImportLocal(r.Context(), r.Body) + if err != nil { + w.WriteHeader(500) + _ = json.NewEncoder(w).Encode(struct{ Error string }{err.Error()}) + return + } + w.WriteHeader(200) + err = json.NewEncoder(w).Encode(struct{ Cid cid.Cid }{c}) + if err != nil { + rpclog.Errorf("/rest/v0/import: Writing response failed: %+v", err) + return + } + } +} + +func handleFractionOpt(name string, setter func(int)) http.HandlerFunc { + return func(rw http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(rw, "only POST allowed", http.StatusMethodNotAllowed) + return + } + if err := r.ParseForm(); err != nil { + http.Error(rw, err.Error(), http.StatusBadRequest) + return + } + + asfr := r.Form.Get("x") + if len(asfr) == 0 { + http.Error(rw, "parameter 'x' must be set", http.StatusBadRequest) + return + } + + fr, err := strconv.Atoi(asfr) + if err != nil { + http.Error(rw, err.Error(), http.StatusBadRequest) + return + } + rpclog.Infof("setting %s to %d", name, fr) + setter(fr) + } +} diff --git a/node/shutdown.go b/node/shutdown.go new file mode 100644 index 00000000000..e630031dac7 --- /dev/null +++ b/node/shutdown.go @@ -0,0 +1,56 @@ +package node + +import ( + "context" + "os" + "os/signal" + "syscall" +) + +type ShutdownHandler struct { + Component string + StopFunc StopFunc +} + +// MonitorShutdown manages shutdown requests, by watching signals and invoking +// the supplied handlers in order. +// +// It watches SIGTERM and SIGINT OS signals, as well as the trigger channel. +// When any of them fire, it calls the supplied handlers in order. If any of +// them errors, it merely logs the error. +// +// Once the shutdown has completed, it closes the returned channel. The caller +// can watch this channel +func MonitorShutdown(triggerCh <-chan struct{}, handlers ...ShutdownHandler) <-chan struct{} { + sigCh := make(chan os.Signal, 2) + out := make(chan struct{}) + + go func() { + select { + case sig := <-sigCh: + log.Warnw("received shutdown", "signal", sig) + case <-triggerCh: + log.Warn("received shutdown") + } + + log.Warn("Shutting down...") + + // Call all the handlers, logging on failure and success. + for _, h := range handlers { + if err := h.StopFunc(context.TODO()); err != nil { + log.Errorf("shutting down %s failed: %s", h.Component, err) + continue + } + log.Infof("%s shut down successfully ", h.Component) + } + + log.Warn("Graceful shutdown successful") + + // Sync all loggers. + _ = log.Sync() //nolint:errcheck + close(out) + }() + + signal.Notify(sigCh, syscall.SIGTERM, syscall.SIGINT) + return out +} diff --git a/node/shutdown_test.go b/node/shutdown_test.go new file mode 100644 index 00000000000..15e2af93e5e --- /dev/null +++ b/node/shutdown_test.go @@ -0,0 +1,36 @@ +package node + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestMonitorShutdown(t *testing.T) { + signalCh := make(chan struct{}) + + // Three shutdown handlers. + var wg sync.WaitGroup + wg.Add(3) + h := ShutdownHandler{ + Component: "handler", + StopFunc: func(_ context.Context) error { + wg.Done() + return nil + }, + } + + finishCh := MonitorShutdown(signalCh, h, h, h) + + // Nothing here after 10ms. + time.Sleep(10 * time.Millisecond) + require.Len(t, finishCh, 0) + + // Now trigger the shutdown. + close(signalCh) + wg.Wait() + <-finishCh +} diff --git a/node/test/builder.go b/node/test/builder.go deleted file mode 100644 index ea9a8222048..00000000000 --- a/node/test/builder.go +++ /dev/null @@ -1,543 +0,0 @@ -package test - -import ( - "bytes" - "context" - "crypto/rand" - "io/ioutil" - "net" - "net/http/httptest" - "strings" - "sync" - "testing" - "time" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-jsonrpc" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-storedcounter" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/client" - "github.com/filecoin-project/lotus/api/test" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/actors/policy" - "github.com/filecoin-project/lotus/chain/gen" - genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/wallet" - "github.com/filecoin-project/lotus/cmd/lotus-seed/seed" - sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" - "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" - "github.com/filecoin-project/lotus/extern/sector-storage/mock" - "github.com/filecoin-project/lotus/genesis" - lotusminer "github.com/filecoin-project/lotus/miner" - "github.com/filecoin-project/lotus/node" - "github.com/filecoin-project/lotus/node/modules" - testing2 "github.com/filecoin-project/lotus/node/modules/testing" - "github.com/filecoin-project/lotus/node/repo" - "github.com/filecoin-project/lotus/storage/mockstorage" - miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" - "github.com/ipfs/go-datastore" - "github.com/libp2p/go-libp2p-core/crypto" - "github.com/libp2p/go-libp2p-core/peer" - mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" - "github.com/multiformats/go-multiaddr" - "github.com/stretchr/testify/require" -) - -func CreateTestStorageNode(ctx context.Context, t *testing.T, waddr address.Address, act address.Address, pk crypto.PrivKey, tnd test.TestNode, mn mocknet.Mocknet, opts node.Option) test.TestStorageNode { - r := repo.NewMemory(nil) - - lr, err := r.Lock(repo.StorageMiner) - require.NoError(t, err) - - ks, err := lr.KeyStore() - require.NoError(t, err) - - kbytes, err := pk.Bytes() - require.NoError(t, err) - - err = ks.Put("libp2p-host", types.KeyInfo{ - Type: "libp2p-host", - PrivateKey: kbytes, - }) - require.NoError(t, err) - - ds, err := lr.Datastore("/metadata") - require.NoError(t, err) - err = ds.Put(datastore.NewKey("miner-address"), act.Bytes()) - require.NoError(t, err) - - nic := storedcounter.New(ds, datastore.NewKey(modules.StorageCounterDSPrefix)) - for i := 0; i < test.GenesisPreseals; i++ { - _, err := nic.Next() - require.NoError(t, err) - } - _, err = nic.Next() - require.NoError(t, err) - - err = lr.Close() - require.NoError(t, err) - - peerid, err := peer.IDFromPrivateKey(pk) - require.NoError(t, err) - - enc, err := actors.SerializeParams(&miner2.ChangePeerIDParams{NewID: abi.PeerID(peerid)}) - require.NoError(t, err) - - msg := &types.Message{ - To: act, - From: waddr, - Method: miner.Methods.ChangePeerID, - Params: enc, - Value: types.NewInt(0), - } - - _, err = tnd.MpoolPushMessage(ctx, msg, nil) - require.NoError(t, err) - - // start node - var minerapi api.StorageMiner - - mineBlock := make(chan lotusminer.MineReq) - stop, err := node.New(ctx, - node.StorageMiner(&minerapi), - node.Online(), - node.Repo(r), - node.Test(), - - node.MockHost(mn), - - node.Override(new(api.FullNode), tnd), - node.Override(new(*lotusminer.Miner), lotusminer.NewTestMiner(mineBlock, act)), - - opts, - ) - if err != nil { - t.Fatalf("failed to construct node: %v", err) - } - - t.Cleanup(func() { _ = stop(context.Background()) }) - - /*// Bootstrap with full node - remoteAddrs, err := tnd.NetAddrsListen(ctx) - require.NoError(t, err) - - err = minerapi.NetConnect(ctx, remoteAddrs) - require.NoError(t, err)*/ - mineOne := func(ctx context.Context, req lotusminer.MineReq) error { - select { - case mineBlock <- req: - return nil - case <-ctx.Done(): - return ctx.Err() - } - } - - return test.TestStorageNode{StorageMiner: minerapi, MineOne: mineOne} -} - -func Builder(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.StorageMiner) ([]test.TestNode, []test.TestStorageNode) { - return mockBuilderOpts(t, fullOpts, storage, false) -} - -func MockSbBuilder(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.StorageMiner) ([]test.TestNode, []test.TestStorageNode) { - return mockSbBuilderOpts(t, fullOpts, storage, false) -} - -func RPCBuilder(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.StorageMiner) ([]test.TestNode, []test.TestStorageNode) { - return mockBuilderOpts(t, fullOpts, storage, true) -} - -func RPCMockSbBuilder(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.StorageMiner) ([]test.TestNode, []test.TestStorageNode) { - return mockSbBuilderOpts(t, fullOpts, storage, true) -} - -func mockBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.StorageMiner, rpc bool) ([]test.TestNode, []test.TestStorageNode) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - mn := mocknet.New(ctx) - - fulls := make([]test.TestNode, len(fullOpts)) - storers := make([]test.TestStorageNode, len(storage)) - - pk, _, err := crypto.GenerateEd25519Key(rand.Reader) - require.NoError(t, err) - - minerPid, err := peer.IDFromPrivateKey(pk) - require.NoError(t, err) - - var genbuf bytes.Buffer - - if len(storage) > 1 { - panic("need more peer IDs") - } - // PRESEAL SECTION, TRY TO REPLACE WITH BETTER IN THE FUTURE - // TODO: would be great if there was a better way to fake the preseals - - var genms []genesis.Miner - var maddrs []address.Address - var genaccs []genesis.Actor - var keys []*wallet.Key - - var presealDirs []string - for i := 0; i < len(storage); i++ { - maddr, err := address.NewIDAddress(genesis2.MinerStart + uint64(i)) - if err != nil { - t.Fatal(err) - } - tdir, err := ioutil.TempDir("", "preseal-memgen") - if err != nil { - t.Fatal(err) - } - genm, k, err := seed.PreSeal(maddr, abi.RegisteredSealProof_StackedDrg2KiBV1, 0, test.GenesisPreseals, tdir, []byte("make genesis mem random"), nil, true) - if err != nil { - t.Fatal(err) - } - genm.PeerId = minerPid - - wk, err := wallet.NewKey(*k) - if err != nil { - return nil, nil - } - - genaccs = append(genaccs, genesis.Actor{ - Type: genesis.TAccount, - Balance: big.Mul(big.NewInt(400000000), types.NewInt(build.FilecoinPrecision)), - Meta: (&genesis.AccountMeta{Owner: wk.Address}).ActorMeta(), - }) - - keys = append(keys, wk) - presealDirs = append(presealDirs, tdir) - maddrs = append(maddrs, maddr) - genms = append(genms, *genm) - } - templ := &genesis.Template{ - Accounts: genaccs, - Miners: genms, - NetworkName: "test", - Timestamp: uint64(time.Now().Unix() - 10000), // some time sufficiently far in the past - VerifregRootKey: gen.DefaultVerifregRootkeyActor, - RemainderAccount: gen.DefaultRemainderAccountActor, - } - - // END PRESEAL SECTION - - for i := 0; i < len(fullOpts); i++ { - var genesis node.Option - if i == 0 { - genesis = node.Override(new(modules.Genesis), testing2.MakeGenesisMem(&genbuf, *templ)) - } else { - genesis = node.Override(new(modules.Genesis), modules.LoadGenesis(genbuf.Bytes())) - } - - stop, err := node.New(ctx, - node.FullAPI(&fulls[i].FullNode, node.Lite(fullOpts[i].Lite)), - node.Online(), - node.Repo(repo.NewMemory(nil)), - node.MockHost(mn), - node.Test(), - - genesis, - - fullOpts[i].Opts(fulls), - ) - if err != nil { - t.Fatal(err) - } - - t.Cleanup(func() { _ = stop(context.Background()) }) - - if rpc { - fulls[i] = fullRpc(t, fulls[i]) - } - } - - for i, def := range storage { - // TODO: support non-bootstrap miners - if i != 0 { - t.Fatal("only one storage node supported") - } - if def.Full != 0 { - t.Fatal("storage nodes only supported on the first full node") - } - - f := fulls[def.Full] - if _, err := f.FullNode.WalletImport(ctx, &keys[i].KeyInfo); err != nil { - t.Fatal(err) - } - if err := f.FullNode.WalletSetDefault(ctx, keys[i].Address); err != nil { - t.Fatal(err) - } - - genMiner := maddrs[i] - wa := genms[i].Worker - - storers[i] = CreateTestStorageNode(ctx, t, wa, genMiner, pk, f, mn, node.Options()) - if err := storers[i].StorageAddLocal(ctx, presealDirs[i]); err != nil { - t.Fatalf("%+v", err) - } - /* - sma := storers[i].StorageMiner.(*impl.StorageMinerAPI) - - psd := presealDirs[i] - */ - if rpc { - storers[i] = storerRpc(t, storers[i]) - } - } - - if err := mn.LinkAll(); err != nil { - t.Fatal(err) - } - - if len(storers) > 0 { - // Mine 2 blocks to setup some CE stuff in some actors - var wait sync.Mutex - wait.Lock() - - test.MineUntilBlock(ctx, t, fulls[0], storers[0], func(epoch abi.ChainEpoch) { - wait.Unlock() - }) - - wait.Lock() - test.MineUntilBlock(ctx, t, fulls[0], storers[0], func(epoch abi.ChainEpoch) { - wait.Unlock() - }) - wait.Lock() - } - - return fulls, storers -} - -func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.StorageMiner, rpc bool) ([]test.TestNode, []test.TestStorageNode) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - mn := mocknet.New(ctx) - - fulls := make([]test.TestNode, len(fullOpts)) - storers := make([]test.TestStorageNode, len(storage)) - - var genbuf bytes.Buffer - - // PRESEAL SECTION, TRY TO REPLACE WITH BETTER IN THE FUTURE - // TODO: would be great if there was a better way to fake the preseals - - var genms []genesis.Miner - var genaccs []genesis.Actor - var maddrs []address.Address - var keys []*wallet.Key - var pidKeys []crypto.PrivKey - for i := 0; i < len(storage); i++ { - maddr, err := address.NewIDAddress(genesis2.MinerStart + uint64(i)) - if err != nil { - t.Fatal(err) - } - - preseals := storage[i].Preseal - if preseals == test.PresealGenesis { - preseals = test.GenesisPreseals - } - - genm, k, err := mockstorage.PreSeal(2048, maddr, preseals) - if err != nil { - t.Fatal(err) - } - - pk, _, err := crypto.GenerateEd25519Key(rand.Reader) - require.NoError(t, err) - - minerPid, err := peer.IDFromPrivateKey(pk) - require.NoError(t, err) - - genm.PeerId = minerPid - - wk, err := wallet.NewKey(*k) - if err != nil { - return nil, nil - } - - genaccs = append(genaccs, genesis.Actor{ - Type: genesis.TAccount, - Balance: big.Mul(big.NewInt(400000000), types.NewInt(build.FilecoinPrecision)), - Meta: (&genesis.AccountMeta{Owner: wk.Address}).ActorMeta(), - }) - - keys = append(keys, wk) - pidKeys = append(pidKeys, pk) - maddrs = append(maddrs, maddr) - genms = append(genms, *genm) - } - templ := &genesis.Template{ - Accounts: genaccs, - Miners: genms, - NetworkName: "test", - Timestamp: uint64(time.Now().Unix()) - (build.BlockDelaySecs * 20000), - VerifregRootKey: gen.DefaultVerifregRootkeyActor, - RemainderAccount: gen.DefaultRemainderAccountActor, - } - - // END PRESEAL SECTION - - for i := 0; i < len(fullOpts); i++ { - var genesis node.Option - if i == 0 { - genesis = node.Override(new(modules.Genesis), testing2.MakeGenesisMem(&genbuf, *templ)) - } else { - genesis = node.Override(new(modules.Genesis), modules.LoadGenesis(genbuf.Bytes())) - } - - stop, err := node.New(ctx, - node.FullAPI(&fulls[i].FullNode, node.Lite(fullOpts[i].Lite)), - node.Online(), - node.Repo(repo.NewMemory(nil)), - node.MockHost(mn), - node.Test(), - - node.Override(new(ffiwrapper.Verifier), mock.MockVerifier), - - genesis, - - fullOpts[i].Opts(fulls), - ) - if err != nil { - t.Fatalf("%+v", err) - } - - t.Cleanup(func() { _ = stop(context.Background()) }) - - if rpc { - fulls[i] = fullRpc(t, fulls[i]) - } - } - - for i, def := range storage { - // TODO: support non-bootstrap miners - - minerID := abi.ActorID(genesis2.MinerStart + uint64(i)) - - if def.Full != 0 { - t.Fatal("storage nodes only supported on the first full node") - } - - f := fulls[def.Full] - if _, err := f.FullNode.WalletImport(ctx, &keys[i].KeyInfo); err != nil { - return nil, nil - } - if err := f.FullNode.WalletSetDefault(ctx, keys[i].Address); err != nil { - return nil, nil - } - - sectors := make([]abi.SectorID, len(genms[i].Sectors)) - for i, sector := range genms[i].Sectors { - sectors[i] = abi.SectorID{ - Miner: minerID, - Number: sector.SectorID, - } - } - - storers[i] = CreateTestStorageNode(ctx, t, genms[i].Worker, maddrs[i], pidKeys[i], f, mn, node.Options( - node.Override(new(sectorstorage.SectorManager), func() (sectorstorage.SectorManager, error) { - return mock.NewMockSectorMgr(policy.GetDefaultSectorSize(), sectors), nil - }), - node.Override(new(ffiwrapper.Verifier), mock.MockVerifier), - node.Unset(new(*sectorstorage.Manager)), - )) - - if rpc { - storers[i] = storerRpc(t, storers[i]) - } - } - - if err := mn.LinkAll(); err != nil { - t.Fatal(err) - } - - if len(storers) > 0 { - // Mine 2 blocks to setup some CE stuff in some actors - var wait sync.Mutex - wait.Lock() - - test.MineUntilBlock(ctx, t, fulls[0], storers[0], func(abi.ChainEpoch) { - wait.Unlock() - }) - wait.Lock() - test.MineUntilBlock(ctx, t, fulls[0], storers[0], func(abi.ChainEpoch) { - wait.Unlock() - }) - wait.Lock() - } - - return fulls, storers -} - -func fullRpc(t *testing.T, nd test.TestNode) test.TestNode { - ma, listenAddr, err := CreateRPCServer(nd) - require.NoError(t, err) - - var full test.TestNode - full.FullNode, _, err = client.NewFullNodeRPC(context.Background(), listenAddr, nil) - require.NoError(t, err) - - full.ListenAddr = ma - return full -} - -func storerRpc(t *testing.T, nd test.TestStorageNode) test.TestStorageNode { - ma, listenAddr, err := CreateRPCServer(nd) - require.NoError(t, err) - - var storer test.TestStorageNode - storer.StorageMiner, _, err = client.NewStorageMinerRPC(context.Background(), listenAddr, nil) - require.NoError(t, err) - - storer.ListenAddr = ma - storer.MineOne = nd.MineOne - return storer -} - -func CreateRPCServer(handler interface{}) (multiaddr.Multiaddr, string, error) { - rpcServer := jsonrpc.NewServer() - rpcServer.Register("Filecoin", handler) - testServ := httptest.NewServer(rpcServer) // todo: close - - addr := testServ.Listener.Addr() - listenAddr := "ws://" + addr.String() - ma, err := parseWSMultiAddr(addr) - if err != nil { - return nil, "", err - } - return ma, listenAddr, err -} - -func parseWSMultiAddr(addr net.Addr) (multiaddr.Multiaddr, error) { - host, port, err := net.SplitHostPort(addr.String()) - if err != nil { - return nil, err - } - ma, err := multiaddr.NewMultiaddr("/ip4/" + host + "/" + addr.Network() + "/" + port + "/ws") - if err != nil { - return nil, err - } - return ma, nil -} - -func WSMultiAddrToString(addr multiaddr.Multiaddr) (string, error) { - parts := strings.Split(addr.String(), "/") - if len(parts) != 6 || parts[0] != "" { - return "", xerrors.Errorf("Malformed ws multiaddr %s", addr) - } - - host := parts[2] - port := parts[4] - proto := parts[5] - - return proto + "://" + host + ":" + port + "/rpc/v0", nil -} diff --git a/node/testopts.go b/node/testopts.go index f348fc55510..ca1e8112759 100644 --- a/node/testopts.go +++ b/node/testopts.go @@ -10,8 +10,8 @@ import ( func MockHost(mn mocknet.Mocknet) Option { return Options( - ApplyIf(func(s *Settings) bool { return !s.Online }, - Error(errors.New("MockHost must be specified after Online")), + ApplyIf(func(s *Settings) bool { return !s.Base }, + Error(errors.New("MockHost must be specified after Base")), ), Override(new(lp2p.RawHost), lp2p.MockHost), diff --git a/paychmgr/cbor_gen.go b/paychmgr/cbor_gen.go index 4c9259f06cf..f25183db8ab 100644 --- a/paychmgr/cbor_gen.go +++ b/paychmgr/cbor_gen.go @@ -5,14 +5,18 @@ package paychmgr import ( "fmt" "io" + "sort" address "github.com/filecoin-project/go-address" paych "github.com/filecoin-project/specs-actors/actors/builtin/paych" + cid "github.com/ipfs/go-cid" cbg "github.com/whyrusleeping/cbor-gen" xerrors "golang.org/x/xerrors" ) var _ = xerrors.Errorf +var _ = cid.Undef +var _ = sort.Sort func (t *VoucherInfo) MarshalCBOR(w io.Writer) error { if t == nil { @@ -178,7 +182,8 @@ func (t *VoucherInfo) UnmarshalCBOR(r io.Reader) error { } default: - return fmt.Errorf("unknown struct field %d: '%s'", i, name) + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) } } @@ -647,7 +652,8 @@ func (t *ChannelInfo) UnmarshalCBOR(r io.Reader) error { } default: - return fmt.Errorf("unknown struct field %d: '%s'", i, name) + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) } } @@ -832,7 +838,8 @@ func (t *MsgInfo) UnmarshalCBOR(r io.Reader) error { } default: - return fmt.Errorf("unknown struct field %d: '%s'", i, name) + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) } } diff --git a/paychmgr/manager.go b/paychmgr/manager.go index 5e0aa88cea8..6f6efa7ea91 100644 --- a/paychmgr/manager.go +++ b/paychmgr/manager.go @@ -8,10 +8,10 @@ import ( "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" logging "github.com/ipfs/go-log/v2" - "go.uber.org/fx" xerrors "golang.org/x/xerrors" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/network" @@ -19,22 +19,12 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin/paych" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/impl/full" - "github.com/filecoin-project/lotus/node/modules/helpers" ) var log = logging.Logger("paych") var errProofNotSupported = errors.New("payment channel proof parameter is not supported") -// PaychAPI is used by dependency injection to pass the consituent APIs to NewManager() -type PaychAPI struct { - fx.In - - full.MpoolAPI - full.StateAPI -} - // stateManagerAPI defines the methods needed from StateManager type stateManagerAPI interface { ResolveToKeyAddress(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) @@ -43,9 +33,9 @@ type stateManagerAPI interface { } // paychAPI defines the API methods needed by the payment channel manager -type paychAPI interface { +type PaychAPI interface { StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) - StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error) + StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) MpoolPushMessage(ctx context.Context, msg *types.Message, maxFee *api.MessageSendSpec) (*types.SignedMessage, error) WalletHas(ctx context.Context, addr address.Address) (bool, error) WalletSign(ctx context.Context, k address.Address, msg []byte) (*crypto.Signature, error) @@ -55,13 +45,13 @@ type paychAPI interface { // managerAPI defines all methods needed by the manager type managerAPI interface { stateManagerAPI - paychAPI + PaychAPI } // managerAPIImpl is used to create a composite that implements managerAPI type managerAPIImpl struct { stmgr.StateManagerAPI - paychAPI + PaychAPI } type Manager struct { @@ -77,11 +67,8 @@ type Manager struct { channels map[string]*channelAccessor } -func NewManager(mctx helpers.MetricsCtx, lc fx.Lifecycle, sm stmgr.StateManagerAPI, pchstore *Store, api PaychAPI) *Manager { - ctx := helpers.LifecycleCtx(mctx, lc) - ctx, shutdown := context.WithCancel(ctx) - - impl := &managerAPIImpl{StateManagerAPI: sm, paychAPI: &api} +func NewManager(ctx context.Context, shutdown func(), sm stmgr.StateManagerAPI, pchstore *Store, api PaychAPI) *Manager { + impl := &managerAPIImpl{StateManagerAPI: sm, PaychAPI: api} return &Manager{ ctx: ctx, shutdown: shutdown, @@ -103,18 +90,6 @@ func newManager(pchstore *Store, pchapi managerAPI) (*Manager, error) { return pm, pm.Start() } -// HandleManager is called by dependency injection to set up hooks -func HandleManager(lc fx.Lifecycle, pm *Manager) { - lc.Append(fx.Hook{ - OnStart: func(ctx context.Context) error { - return pm.Start() - }, - OnStop: func(context.Context) error { - return pm.Stop() - }, - }) -} - // Start restarts tracking of any messages that were sent to chain. func (pm *Manager) Start() error { return pm.restartPending() diff --git a/paychmgr/mock_test.go b/paychmgr/mock_test.go index 3393a3072e8..2c891803bcd 100644 --- a/paychmgr/mock_test.go +++ b/paychmgr/mock_test.go @@ -8,6 +8,7 @@ import ( "github.com/ipfs/go-cid" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/network" @@ -132,7 +133,7 @@ func newMockPaychAPI() *mockPaychAPI { } } -func (pchapi *mockPaychAPI) StateWaitMsg(ctx context.Context, mcid cid.Cid, confidence uint64) (*api.MsgLookup, error) { +func (pchapi *mockPaychAPI) StateWaitMsg(ctx context.Context, mcid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) { pchapi.lk.Lock() response := make(chan types.MessageReceipt) diff --git a/paychmgr/paych_test.go b/paychmgr/paych_test.go index 8557dfb63f8..04ed5ce5c08 100644 --- a/paychmgr/paych_test.go +++ b/paychmgr/paych_test.go @@ -23,6 +23,7 @@ import ( paychmock "github.com/filecoin-project/lotus/chain/actors/builtin/paych/mock" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/lib/sigs" + _ "github.com/filecoin-project/lotus/lib/sigs/secp" ) func TestCheckVoucherValid(t *testing.T) { diff --git a/paychmgr/settler/settler.go b/paychmgr/settler/settler.go index d2a0900b5ae..ce31ab223b0 100644 --- a/paychmgr/settler/settler.go +++ b/paychmgr/settler/settler.go @@ -21,6 +21,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/node/impl/full" payapi "github.com/filecoin-project/lotus/node/impl/paych" + "github.com/filecoin-project/lotus/node/modules/helpers" ) var log = logging.Logger("payment-channel-settler") @@ -40,7 +41,7 @@ type settlerAPI interface { PaychVoucherCheckSpendable(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (bool, error) PaychVoucherList(context.Context, address.Address) ([]*paych.SignedVoucher, error) PaychVoucherSubmit(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error) - StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error) + StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) } type paymentChannelSettler struct { @@ -50,11 +51,12 @@ type paymentChannelSettler struct { // SettlePaymentChannels checks the chain for events related to payment channels settling and // submits any vouchers for inbound channels tracked for this node -func SettlePaymentChannels(lc fx.Lifecycle, api API) error { +func SettlePaymentChannels(mctx helpers.MetricsCtx, lc fx.Lifecycle, papi API) error { + ctx := helpers.LifecycleCtx(mctx, lc) lc.Append(fx.Hook{ - OnStart: func(ctx context.Context) error { - pcs := newPaymentChannelSettler(ctx, &api) - ev := events.NewEvents(ctx, &api) + OnStart: func(context.Context) error { + pcs := newPaymentChannelSettler(ctx, &papi) + ev := events.NewEvents(ctx, papi) return ev.Called(pcs.check, pcs.messageHandler, pcs.revertHandler, int(build.MessageConfidence+1), events.NoTimeout, pcs.matcher) }, }) @@ -73,6 +75,11 @@ func (pcs *paymentChannelSettler) check(ts *types.TipSet) (done bool, more bool, } func (pcs *paymentChannelSettler) messageHandler(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error) { + // Ignore unsuccessful settle messages + if rec.ExitCode != 0 { + return true, nil + } + bestByLane, err := paychmgr.BestSpendableByLane(pcs.ctx, pcs.api, msg.To) if err != nil { return true, err @@ -86,9 +93,10 @@ func (pcs *paymentChannelSettler) messageHandler(msg *types.Message, rec *types. } go func(voucher *paych.SignedVoucher, submitMessageCID cid.Cid) { defer wg.Done() - msgLookup, err := pcs.api.StateWaitMsg(pcs.ctx, submitMessageCID, build.MessageConfidence) + msgLookup, err := pcs.api.StateWaitMsg(pcs.ctx, submitMessageCID, build.MessageConfidence, api.LookbackNoLimit, true) if err != nil { log.Errorf("submitting voucher: %s", err.Error()) + return } if msgLookup.Receipt.ExitCode != 0 { log.Errorf("failed submitting voucher: %+v", voucher) @@ -103,27 +111,27 @@ func (pcs *paymentChannelSettler) revertHandler(ctx context.Context, ts *types.T return nil } -func (pcs *paymentChannelSettler) matcher(msg *types.Message) (matchOnce bool, matched bool, err error) { +func (pcs *paymentChannelSettler) matcher(msg *types.Message) (matched bool, err error) { // Check if this is a settle payment channel message if msg.Method != paych.Methods.Settle { - return false, false, nil + return false, nil } // Check if this payment channel is of concern to this node (i.e. tracked in payment channel store), // and its inbound (i.e. we're getting vouchers that we may need to redeem) trackedAddresses, err := pcs.api.PaychList(pcs.ctx) if err != nil { - return false, false, err + return false, err } for _, addr := range trackedAddresses { if msg.To == addr { status, err := pcs.api.PaychStatus(pcs.ctx, addr) if err != nil { - return false, false, err + return false, err } if status.Direction == api.PCHInbound { - return false, true, nil + return true, nil } } } - return false, false, nil + return false, nil } diff --git a/paychmgr/simple.go b/paychmgr/simple.go index afa1ae1f704..f93c6d5bd20 100644 --- a/paychmgr/simple.go +++ b/paychmgr/simple.go @@ -36,8 +36,6 @@ type fundsReq struct { lk sync.Mutex // merge parent, if this req is part of a merge merge *mergedFundsReq - // whether the req's context has been cancelled - active bool } func newFundsReq(ctx context.Context, amt types.BigInt) *fundsReq { @@ -46,7 +44,6 @@ func newFundsReq(ctx context.Context, amt types.BigInt) *fundsReq { ctx: ctx, promise: promise, amt: amt, - active: true, } } @@ -61,25 +58,18 @@ func (r *fundsReq) onComplete(res *paychFundsRes) { // cancel is called when the req's context is cancelled func (r *fundsReq) cancel() { r.lk.Lock() - - r.active = false - m := r.merge - - r.lk.Unlock() + defer r.lk.Unlock() // If there's a merge parent, tell the merge parent to check if it has any // active reqs left - if m != nil { - m.checkActive() + if r.merge != nil { + r.merge.checkActive() } } // isActive indicates whether the req's context has been cancelled func (r *fundsReq) isActive() bool { - r.lk.Lock() - defer r.lk.Unlock() - - return r.active + return r.ctx.Err() == nil } // setMergeParent sets the merge that this req is part of @@ -423,9 +413,9 @@ func (ca *channelAccessor) waitForPaychCreateMsg(channelID string, mcid cid.Cid) } func (ca *channelAccessor) waitPaychCreateMsg(channelID string, mcid cid.Cid) error { - mwait, err := ca.api.StateWaitMsg(ca.chctx, mcid, build.MessageConfidence) + mwait, err := ca.api.StateWaitMsg(ca.chctx, mcid, build.MessageConfidence, api.LookbackNoLimit, true) if err != nil { - log.Errorf("wait msg: %w", err) + log.Errorf("wait msg: %v", err) return err } @@ -509,7 +499,7 @@ func (ca *channelAccessor) waitForAddFundsMsg(channelID string, mcid cid.Cid) { } func (ca *channelAccessor) waitAddFundsMsg(channelID string, mcid cid.Cid) error { - mwait, err := ca.api.StateWaitMsg(ca.chctx, mcid, build.MessageConfidence) + mwait, err := ca.api.StateWaitMsg(ca.chctx, mcid, build.MessageConfidence, api.LookbackNoLimit, true) if err != nil { log.Error(err) return err diff --git a/paychmgr/store.go b/paychmgr/store.go index a17ad1fcd80..343149f932e 100644 --- a/paychmgr/store.go +++ b/paychmgr/store.go @@ -14,14 +14,12 @@ import ( cborutil "github.com/filecoin-project/go-cbor-util" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/namespace" dsq "github.com/ipfs/go-datastore/query" "github.com/filecoin-project/go-address" cborrpc "github.com/filecoin-project/go-cbor-util" "github.com/filecoin-project/lotus/chain/actors/builtin/paych" - "github.com/filecoin-project/lotus/node/modules/dtypes" ) var ErrChannelNotTracked = errors.New("channel not tracked") @@ -30,8 +28,7 @@ type Store struct { ds datastore.Batching } -func NewStore(ds dtypes.MetadataDS) *Store { - ds = namespace.Wrap(ds, datastore.NewKey("/paych/")) +func NewStore(ds datastore.Batching) *Store { return &Store{ ds: ds, } diff --git a/scripts/bash-completion/lotus b/scripts/bash-completion/lotus index 20c312b6ce6..b572ab32002 100644 --- a/scripts/bash-completion/lotus +++ b/scripts/bash-completion/lotus @@ -1,10 +1,18 @@ #!/usr/bin/env bash + _cli_bash_autocomplete() { - local cur opts base; - COMPREPLY=(); - cur="${COMP_WORDS[COMP_CWORD]}"; - opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-completion ); - COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ); - return 0; -}; -complete -F _cli_bash_autocomplete lotus \ No newline at end of file + if [[ "${COMP_WORDS[0]}" != "source" ]]; then + local cur opts base + COMPREPLY=() + cur="${COMP_WORDS[COMP_CWORD]}" + if [[ "$cur" == "-"* ]]; then + opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} ${cur} --generate-bash-completion ) + else + opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-bash-completion ) + fi + COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) + return 0 + fi +} + +complete -o bashdefault -o default -o nospace -F _cli_bash_autocomplete lotus lotus-miner lotus-worker diff --git a/scripts/bash-completion/lotus-miner b/scripts/bash-completion/lotus-miner deleted file mode 100644 index df5cc01cc71..00000000000 --- a/scripts/bash-completion/lotus-miner +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env bash -_cli_bash_autocomplete() { - local cur opts base; - COMPREPLY=(); - cur="${COMP_WORDS[COMP_CWORD]}"; - opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-completion ); - COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ); - return 0; -}; -complete -F _cli_bash_autocomplete lotus-miner \ No newline at end of file diff --git a/scripts/build-bundle.sh b/scripts/build-bundle.sh index 7d37edff87e..fe1c886114e 100755 --- a/scripts/build-bundle.sh +++ b/scripts/build-bundle.sh @@ -49,4 +49,7 @@ do ipfs add -q "lotus_${CIRCLE_TAG}_${ARCH}-amd64.tar.gz" > "lotus_${CIRCLE_TAG}_${ARCH}-amd64.tar.gz.cid" done +cp "../appimage/Lotus-${CIRCLE_TAG}-x86_64.AppImage" . +sha512sum "Lotus-${CIRCLE_TAG}-x86_64.AppImage" > "Lotus-${CIRCLE_TAG}-x86_64.AppImage.sha512" +ipfs add -q "Lotus-${CIRCLE_TAG}-x86_64.AppImage" > "Lotus-${CIRCLE_TAG}-x86_64.AppImage.cid" popd diff --git a/scripts/devnet.bash b/scripts/devnet.bash index 8ffddaea406..a53cbc7b960 100755 --- a/scripts/devnet.bash +++ b/scripts/devnet.bash @@ -64,7 +64,7 @@ cat > "${BASEDIR}/scripts/create_miner.bash" <&1 | tee -a ${BASEDIR}/daemon.log" C-m @@ -186,7 +187,7 @@ export LOTUS_PATH="${BASEDIR}/.lotus" ${BASEDIR}/bin/lotus wait-api tmux send-keys -t $session:$wminer "${BASEDIR}/scripts/create_miner.bash" C-m -tmux send-keys -t $session:$wminer "lotus-miner run --api 48020 --nosync 2>&1 | tee -a ${BASEDIR}/miner.log" C-m +tmux send-keys -t $session:$wminer "lotus-miner run --miner-api 48020 --nosync 2>&1 | tee -a ${BASEDIR}/miner.log" C-m tmux send-keys -t $session:$wcli "${BASEDIR}/scripts/monitor.bash" C-m tmux send-keys -t $session:$wpleding "${BASEDIR}/scripts/pledge_sectors.bash" C-m diff --git a/scripts/generate-lotus-cli.py b/scripts/generate-lotus-cli.py new file mode 100644 index 00000000000..8018962e9b7 --- /dev/null +++ b/scripts/generate-lotus-cli.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python +# Generate lotus command lines documents as text and markdown in folder "lotus/documentation/en". +# Python 2.7 + +import os + + +def generate_lotus_cli(prog): + output_folder = 'documentation/en' + md_file = open('%s/cli-%s.md' % (output_folder, prog), 'w') # set the name of md output + + def get_cmd_recursively(cur_cmd): + depth = cur_cmd.count(' ') + md_file.writelines(('\n' * min(depth, 1)) + ('#' * depth) + '# ' + cur_cmd[2:] + '\n') + + cmd_flag = False + + print('> ' + cur_cmd) + cmd_help_output = os.popen(cur_cmd + ' -h') + cmd_help_output_lines = cmd_help_output.readlines() + + md_file.writelines('```\n') + md_file.writelines(cmd_help_output_lines) + md_file.writelines('```\n') + + for line in cmd_help_output_lines: + try: + line = line.strip() + if line == 'COMMANDS:': + cmd_flag = True + if cmd_flag is True and line == '': + cmd_flag = False + if cmd_flag is True and line[-1] != ':' and 'help, h' not in line: + gap_pos = 0 + sub_cmd = line + if ' ' in line: + gap_pos = sub_cmd.index(' ') + if gap_pos: + sub_cmd = cur_cmd + ' ' + sub_cmd[:gap_pos] + get_cmd_recursively(sub_cmd) + except Exception as e: + print('Fail to deal with "%s" with error:\n%s' % (line, e)) + + get_cmd_recursively('./' + prog) + md_file.close() + + +if __name__ == "__main__": + os.putenv("LOTUS_VERSION_IGNORE_COMMIT", "1") + generate_lotus_cli('lotus') + generate_lotus_cli('lotus-miner') + generate_lotus_cli('lotus-worker') diff --git a/scripts/make-completions.sh b/scripts/make-completions.sh deleted file mode 100755 index 1bfd59bf38b..00000000000 --- a/scripts/make-completions.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env bash - -# scripts/make-completions.sh [progname] - -echo '#!/usr/bin/env bash' > "scripts/bash-completion/$1" -echo '#!/usr/bin/env zsh' > "scripts/zsh-completion/$1" - -$1 --init-completion=bash >> "scripts/bash-completion/$1" -$1 --init-completion=zsh >> "scripts/zsh-completion/$1" diff --git a/scripts/mkreleaselog b/scripts/mkreleaselog new file mode 100755 index 00000000000..c9eaef4fb47 --- /dev/null +++ b/scripts/mkreleaselog @@ -0,0 +1,234 @@ +#!/bin/zsh +set -euo pipefail +export GO111MODULE=on +export GOPATH="$(go env GOPATH)" + +alias jq="jq --unbuffered" + +AUTHORS=(filecoin-project) + +[[ -n "${REPO_FILTER+x}" ]] || REPO_FILTER="github.com/(${$(printf "|%s" "${AUTHORS[@]}"):1})" + +[[ -n "${IGNORED_FILES+x}" ]] || IGNORED_FILES='^\(\.gx\|package\.json\|\.travis\.yml\|go.mod\|go\.sum|\.github|\.circleci\)$' + +NL=$'\n' + +ROOT_DIR="$(git rev-parse --show-toplevel)" + +msg() { + echo "$*" >&2 +} + +statlog() { + local module="$1" + local rpath="$GOPATH/src/$(strip_version "$module")" + local start="${2:-}" + local end="${3:-HEAD}" + local mailmap_file="$rpath/.mailmap" + if ! [[ -e "$mailmap_file" ]]; then + mailmap_file="$ROOT_DIR/.mailmap" + fi + + local stack=() + git -C "$rpath" -c mailmap.file="$mailmap_file" log --use-mailmap --shortstat --no-merges --pretty="tformat:%H%x09%aN%x09%aE" "$start..$end" | while read -r line; do + if [[ -n "$line" ]]; then + stack+=("$line") + continue + fi + + read -r changes + + changed=0 + insertions=0 + deletions=0 + while read count event; do + if [[ "$event" =~ ^file ]]; then + changed=$count + elif [[ "$event" =~ ^insertion ]]; then + insertions=$count + elif [[ "$event" =~ ^deletion ]]; then + deletions=$count + else + echo "unknown event $event" >&2 + exit 1 + fi + done<<<"${changes//,/$NL}" + + for author in "${stack[@]}"; do + IFS=$'\t' read -r hash name email <<<"$author" + jq -n \ + --arg "hash" "$hash" \ + --arg "name" "$name" \ + --arg "email" "$email" \ + --argjson "changed" "$changed" \ + --argjson "insertions" "$insertions" \ + --argjson "deletions" "$deletions" \ + '{Commit: $hash, Author: $name, Email: $email, Files: $changed, Insertions: $insertions, Deletions: $deletions}' + done + stack=() + done +} + +# Returns a stream of deps changed between $1 and $2. +dep_changes() { + { + <"$1" + <"$2" + } | jq -s 'JOIN(INDEX(.[0][]; .Path); .[1][]; .Path; {Path: .[0].Path, Old: (.[1] | del(.Path)), New: (.[0] | del(.Path))}) | select(.New.Version != .Old.Version)' +} + +# resolve_commits resolves a git ref for each version. +resolve_commits() { + jq '. + {Ref: (.Version|capture("^((?.*)\\+incompatible|v.*-(0\\.)?[0-9]{14}-(?[a-f0-9]{12})|(?v.*))$") | .ref1 // .ref2 // .ref3)}' +} + +pr_link() { + local repo="$1" + local prnum="$2" + local ghname="${repo##github.com/}" + printf -- "[%s#%s](https://%s/pull/%s)" "$ghname" "$prnum" "$repo" "$prnum" +} + +# Generate a release log for a range of commits in a single repo. +release_log() { + setopt local_options BASH_REMATCH + + local module="$1" + local start="$2" + local end="${3:-HEAD}" + local repo="$(strip_version "$1")" + local dir="$GOPATH/src/$repo" + + local commit pr + git -C "$dir" log \ + --format='tformat:%H %s' \ + --first-parent \ + "$start..$end" | + while read commit subject; do + # Skip gx-only PRs. + git -C "$dir" diff-tree --no-commit-id --name-only "$commit^" "$commit" | + grep -v "${IGNORED_FILES}" >/dev/null || continue + + if [[ "$subject" =~ '^Merge pull request #([0-9]+) from' ]]; then + local prnum="${BASH_REMATCH[2]}" + local desc="$(git -C "$dir" show --summary --format='tformat:%b' "$commit" | head -1)" + printf -- "- %s (%s)\n" "$desc" "$(pr_link "$repo" "$prnum")" + elif [[ "$subject" =~ '\(#([0-9]+)\)$' ]]; then + local prnum="${BASH_REMATCH[2]}" + printf -- "- %s (%s)\n" "$subject" "$(pr_link "$repo" "$prnum")" + else + printf -- "- %s\n" "$subject" + fi + done +} + +indent() { + sed -e 's/^/ /' +} + +mod_deps() { + go list -mod=mod -json -m all | jq 'select(.Version != null)' +} + +ensure() { + local repo="$(strip_version "$1")" + local commit="$2" + local rpath="$GOPATH/src/$repo" + if [[ ! -d "$rpath" ]]; then + msg "Cloning $repo..." + git clone "http://$repo" "$rpath" >&2 + fi + + if ! git -C "$rpath" rev-parse --verify "$commit" >/dev/null; then + msg "Fetching $repo..." + git -C "$rpath" fetch --all >&2 + fi + + git -C "$rpath" rev-parse --verify "$commit" >/dev/null || return 1 +} + +statsummary() { + jq -s 'group_by(.Author)[] | {Author: .[0].Author, Commits: (. | length), Insertions: (map(.Insertions) | add), Deletions: (map(.Deletions) | add), Files: (map(.Files) | add)}' | + jq '. + {Lines: (.Deletions + .Insertions)}' +} + +strip_version() { + local repo="$1" + if [[ "$repo" =~ '.*/v[0-9]+$' ]]; then + repo="$(dirname "$repo")" + fi + echo "$repo" +} + +recursive_release_log() { + local start="${1:-$(git tag -l | sort -V | grep -v -- '-rc' | grep 'v'| tail -n1)}" + local end="${2:-$(git rev-parse HEAD)}" + local repo_root="$(git rev-parse --show-toplevel)" + local module="$(go list -m)" + local dir="$(go list -m -f '{{.Dir}}')" + + if [[ "${GOPATH}/${module}" -ef "${dir}" ]]; then + echo "This script requires the target module and all dependencies to live in a GOPATH." + return 1 + fi + + ( + local result=0 + local workspace="$(mktemp -d)" + trap "$(printf 'rm -rf "%q"' "$workspace")" INT TERM EXIT + cd "$workspace" + + mkdir extern + ln -s "$repo_root"/extern/filecoin-ffi extern/filecoin-ffi + ln -s "$repo_root"/extern/test-vectors extern/test-vectors + + echo "Computing old deps..." >&2 + git -C "$repo_root" show "$start:go.mod" >go.mod + mod_deps | resolve_commits | jq -s > old_deps.json + + echo "Computing new deps..." >&2 + git -C "$repo_root" show "$end:go.mod" >go.mod + mod_deps | resolve_commits | jq -s > new_deps.json + + rm -f go.mod go.sum + + printf -- "Generating Changelog for %s %s..%s\n" "$module" "$start" "$end" >&2 + + printf -- "- %s:\n" "$module" + release_log "$module" "$start" "$end" | indent + + + statlog "$module" "$start" "$end" > statlog.json + + dep_changes old_deps.json new_deps.json | + jq --arg filter "$REPO_FILTER" 'select(.Path | match($filter))' | + # Compute changelogs + jq -r '"\(.Path) \(.New.Version) \(.New.Ref) \(.Old.Version) \(.Old.Ref // "")"' | + while read module new new_ref old old_ref; do + if ! ensure "$module" "$new_ref"; then + result=1 + local changelog="failed to fetch repo" + else + statlog "$module" "$old_ref" "$new_ref" >> statlog.json + local changelog="$(release_log "$module" "$old_ref" "$new_ref")" + fi + if [[ -n "$changelog" ]]; then + printf -- "- %s (%s -> %s):\n" "$module" "$old" "$new" + echo "$changelog" | indent + fi + done + + echo + echo "Contributors" + echo + + echo "| Contributor | Commits | Lines ± | Files Changed |" + echo "|-------------|---------|---------|---------------|" + statsummary \ No newline at end of file diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml new file mode 100644 index 00000000000..472621c2a48 --- /dev/null +++ b/snap/snapcraft.yaml @@ -0,0 +1,76 @@ +name: lotus-filecoin +base: core20 +version: latest +summary: filecoin daemon/client +icon: snap/local/icon.svg +description: | + Filecoin is a peer-to-peer network that stores files on the internet + with built-in economic incentives to ensure files are stored reliably over time + + For documentation and additional information, please see the following resources + + https://filecoin.io + + https://fil.org + + https://docs.filecoin.io + + https://github.com/filecoin-project/lotus + +grade: devel +confinement: strict + +parts: + lotus: + plugin: make + source: ./ + build-snaps: + - go + - rustup + build-packages: + - git + - jq + - libhwloc-dev + - ocl-icd-opencl-dev + - pkg-config + stage-packages: + - libhwloc15 + - ocl-icd-libopencl1 + override-build: | + LDFLAGS="" make lotus lotus-miner lotus-worker + cp lotus lotus-miner lotus-worker $SNAPCRAFT_PART_INSTALL + +apps: + lotus: + command: lotus + plugs: + - network + - network-bind + - home + environment: + FIL_PROOFS_PARAMETER_CACHE: $SNAP_USER_COMMON/filecoin-proof-parameters + LOTUS_PATH: $SNAP_USER_COMMON/lotus + LOTUS_MINER_PATH: $SNAP_USER_COMMON/lotus-miner + LOTUS_WORKER_PATH: $SNAP_USER_COMMON/lotus-worker + lotus-miner: + command: lotus-miner + plugs: + - network + - network-bind + - opengl + environment: + FIL_PROOFS_PARAMETER_CACHE: $SNAP_USER_COMMON/filecoin-proof-parameters + LOTUS_PATH: $SNAP_USER_COMMON/lotus + LOTUS_MINER_PATH: $SNAP_USER_COMMON/lotus-miner + LOTUS_WORKER_PATH: $SNAP_USER_COMMON/lotus-worker + lotus-worker: + command: lotus-worker + plugs: + - network + - network-bind + - opengl + environment: + FIL_PROOFS_PARAMETER_CACHE: $SNAP_USER_COMMON/filecoin-proof-parameters + LOTUS_PATH: $SNAP_USER_COMMON/lotus + LOTUS_MINER_PATH: $SNAP_USER_COMMON/lotus-miner + LOTUS_WORKER_PATH: $SNAP_USER_COMMON/lotus-worker diff --git a/storage/adapter_storage_miner.go b/storage/adapter_storage_miner.go index 2279a92018d..531fe2d03a4 100644 --- a/storage/adapter_storage_miner.go +++ b/storage/adapter_storage_miner.go @@ -4,8 +4,6 @@ import ( "bytes" "context" - "github.com/filecoin-project/go-state-types/network" - "github.com/ipfs/go-cid" cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" @@ -14,11 +12,14 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/go-state-types/network" market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" + market5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/market" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/apibstore" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/builtin/market" @@ -31,21 +32,16 @@ import ( var _ sealing.SealingAPI = new(SealingAPIAdapter) type SealingAPIAdapter struct { - delegate storageMinerApi + delegate fullNodeFilteredAPI } -func NewSealingAPIAdapter(api storageMinerApi) SealingAPIAdapter { +func NewSealingAPIAdapter(api fullNodeFilteredAPI) SealingAPIAdapter { return SealingAPIAdapter{delegate: api} } func (s SealingAPIAdapter) StateMinerSectorSize(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) (abi.SectorSize, error) { - tsk, err := types.TipSetKeyFromBytes(tok) - if err != nil { - return 0, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err) - } - // TODO: update storage-fsm to just StateMinerInfo - mi, err := s.delegate.StateMinerInfo(ctx, maddr, tsk) + mi, err := s.StateMinerInfo(ctx, maddr, tok) if err != nil { return 0, err } @@ -70,14 +66,28 @@ func (s SealingAPIAdapter) StateMinerInitialPledgeCollateral(ctx context.Context return s.delegate.StateMinerInitialPledgeCollateral(ctx, a, pci, tsk) } -func (s SealingAPIAdapter) StateMinerWorkerAddress(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) (address.Address, error) { +func (s SealingAPIAdapter) StateMinerInfo(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) (miner.MinerInfo, error) { tsk, err := types.TipSetKeyFromBytes(tok) if err != nil { - return address.Undef, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err) + return miner.MinerInfo{}, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err) } // TODO: update storage-fsm to just StateMinerInfo - mi, err := s.delegate.StateMinerInfo(ctx, maddr, tsk) + return s.delegate.StateMinerInfo(ctx, maddr, tsk) +} + +func (s SealingAPIAdapter) StateMinerAvailableBalance(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) (big.Int, error) { + tsk, err := types.TipSetKeyFromBytes(tok) + if err != nil { + return big.Zero(), xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err) + } + + return s.delegate.StateMinerAvailableBalance(ctx, maddr, tsk) +} + +func (s SealingAPIAdapter) StateMinerWorkerAddress(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) (address.Address, error) { + // TODO: update storage-fsm to just StateMinerInfo + mi, err := s.StateMinerInfo(ctx, maddr, tok) if err != nil { return address.Undef, err } @@ -103,7 +113,7 @@ func (s SealingAPIAdapter) StateMinerSectorAllocated(ctx context.Context, maddr } func (s SealingAPIAdapter) StateWaitMsg(ctx context.Context, mcid cid.Cid) (sealing.MsgLookup, error) { - wmsg, err := s.delegate.StateWaitMsg(ctx, mcid, build.MessageConfidence) + wmsg, err := s.delegate.StateWaitMsg(ctx, mcid, build.MessageConfidence, api.LookbackNoLimit, true) if err != nil { return sealing.MsgLookup{}, err } @@ -120,7 +130,7 @@ func (s SealingAPIAdapter) StateWaitMsg(ctx context.Context, mcid cid.Cid) (seal } func (s SealingAPIAdapter) StateSearchMsg(ctx context.Context, c cid.Cid) (*sealing.MsgLookup, error) { - wmsg, err := s.delegate.StateSearchMsg(ctx, c) + wmsg, err := s.delegate.StateSearchMsg(ctx, types.EmptyTSK, c, api.LookbackNoLimit, true) if err != nil { return nil, err } @@ -146,10 +156,28 @@ func (s SealingAPIAdapter) StateComputeDataCommitment(ctx context.Context, maddr return cid.Undef, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err) } - ccparams, err := actors.SerializeParams(&market2.ComputeDataCommitmentParams{ - DealIDs: deals, - SectorType: sectorType, - }) + nv, err := s.delegate.StateNetworkVersion(ctx, tsk) + if err != nil { + return cid.Cid{}, err + } + + var ccparams []byte + if nv < network.Version13 { + ccparams, err = actors.SerializeParams(&market2.ComputeDataCommitmentParams{ + DealIDs: deals, + SectorType: sectorType, + }) + } else { + ccparams, err = actors.SerializeParams(&market5.ComputeDataCommitmentParams{ + Inputs: []*market5.SectorDataSpec{ + { + DealIDs: deals, + SectorType: sectorType, + }, + }, + }) + } + if err != nil { return cid.Undef, xerrors.Errorf("computing params for ComputeDataCommitment: %w", err) } @@ -169,12 +197,25 @@ func (s SealingAPIAdapter) StateComputeDataCommitment(ctx context.Context, maddr return cid.Undef, xerrors.Errorf("receipt for ComputeDataCommitment had exit code %d", r.MsgRct.ExitCode) } - var c cbg.CborCid - if err := c.UnmarshalCBOR(bytes.NewReader(r.MsgRct.Return)); err != nil { + if nv < network.Version13 { + var c cbg.CborCid + if err := c.UnmarshalCBOR(bytes.NewReader(r.MsgRct.Return)); err != nil { + return cid.Undef, xerrors.Errorf("failed to unmarshal CBOR to CborCid: %w", err) + } + + return cid.Cid(c), nil + } + + var cr market5.ComputeDataCommitmentReturn + if err := cr.UnmarshalCBOR(bytes.NewReader(r.MsgRct.Return)); err != nil { return cid.Undef, xerrors.Errorf("failed to unmarshal CBOR to CborCid: %w", err) } - return cid.Cid(c), nil + if len(cr.CommDs) != 1 { + return cid.Undef, xerrors.Errorf("CommD output must have 1 entry") + } + + return cid.Cid(cr.CommDs[0]), nil } func (s SealingAPIAdapter) StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok sealing.TipSetToken) (*miner.SectorPreCommitOnChainInfo, error) { @@ -188,7 +229,7 @@ func (s SealingAPIAdapter) StateSectorPreCommitInfo(ctx context.Context, maddr a return nil, xerrors.Errorf("handleSealFailed(%d): temp error: %+v", sectorNumber, err) } - stor := store.ActorStore(ctx, apibstore.NewAPIBlockstore(s.delegate)) + stor := store.ActorStore(ctx, blockstore.NewAPIBlockstore(s.delegate)) state, err := miner.Load(stor, act) if err != nil { @@ -243,7 +284,34 @@ func (s SealingAPIAdapter) StateSectorPartition(ctx context.Context, maddr addre return nil, nil // not found } -func (s SealingAPIAdapter) StateMarketStorageDeal(ctx context.Context, dealID abi.DealID, tok sealing.TipSetToken) (market.DealProposal, error) { +func (s SealingAPIAdapter) StateMinerPartitions(ctx context.Context, maddr address.Address, dlIdx uint64, tok sealing.TipSetToken) ([]api.Partition, error) { + tsk, err := types.TipSetKeyFromBytes(tok) + if err != nil { + return nil, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err) + } + + return s.delegate.StateMinerPartitions(ctx, maddr, dlIdx, tsk) +} + +func (s SealingAPIAdapter) StateLookupID(ctx context.Context, addr address.Address, tok sealing.TipSetToken) (address.Address, error) { + tsk, err := types.TipSetKeyFromBytes(tok) + if err != nil { + return address.Undef, err + } + + return s.delegate.StateLookupID(ctx, addr, tsk) +} + +func (s SealingAPIAdapter) StateMarketStorageDeal(ctx context.Context, dealID abi.DealID, tok sealing.TipSetToken) (*api.MarketDeal, error) { + tsk, err := types.TipSetKeyFromBytes(tok) + if err != nil { + return nil, err + } + + return s.delegate.StateMarketStorageDeal(ctx, dealID, tsk) +} + +func (s SealingAPIAdapter) StateMarketStorageDealProposal(ctx context.Context, dealID abi.DealID, tok sealing.TipSetToken) (market.DealProposal, error) { tsk, err := types.TipSetKeyFromBytes(tok) if err != nil { return market.DealProposal{}, err @@ -266,6 +334,15 @@ func (s SealingAPIAdapter) StateNetworkVersion(ctx context.Context, tok sealing. return s.delegate.StateNetworkVersion(ctx, tsk) } +func (s SealingAPIAdapter) StateMinerProvingDeadline(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) (*dline.Info, error) { + tsk, err := types.TipSetKeyFromBytes(tok) + if err != nil { + return nil, err + } + + return s.delegate.StateMinerProvingDeadline(ctx, maddr, tsk) +} + func (s SealingAPIAdapter) SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error) { msg := types.Message{ To: to, @@ -292,6 +369,24 @@ func (s SealingAPIAdapter) ChainHead(ctx context.Context) (sealing.TipSetToken, return head.Key().Bytes(), head.Height(), nil } +func (s SealingAPIAdapter) ChainBaseFee(ctx context.Context, tok sealing.TipSetToken) (abi.TokenAmount, error) { + tsk, err := types.TipSetKeyFromBytes(tok) + if err != nil { + return big.Zero(), err + } + + ts, err := s.delegate.ChainGetTipSet(ctx, tsk) + if err != nil { + return big.Zero(), err + } + + return ts.Blocks()[0].ParentBaseFee, nil +} + +func (s SealingAPIAdapter) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) { + return s.delegate.ChainGetMessage(ctx, mc) +} + func (s SealingAPIAdapter) ChainGetRandomnessFromBeacon(ctx context.Context, tok sealing.TipSetToken, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { tsk, err := types.TipSetKeyFromBytes(tok) if err != nil { diff --git a/storage/addresses.go b/storage/addresses.go index f5640794ef3..f8f06ed9813 100644 --- a/storage/addresses.go +++ b/storage/addresses.go @@ -3,92 +3,154 @@ package storage import ( "context" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - - "golang.org/x/xerrors" - "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" ) -type AddrUse int - -const ( - PreCommitAddr AddrUse = iota - CommitAddr - PoStAddr -) - type addrSelectApi interface { WalletBalance(context.Context, address.Address) (types.BigInt, error) WalletHas(context.Context, address.Address) (bool, error) StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) + StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) } -func AddressFor(ctx context.Context, a addrSelectApi, mi miner.MinerInfo, use AddrUse, minFunds abi.TokenAmount) (address.Address, error) { - switch use { - case PreCommitAddr, CommitAddr: - // always use worker, at least for now - return mi.Worker, nil +type AddressSelector struct { + api.AddressConfig +} + +func (as *AddressSelector) AddressFor(ctx context.Context, a addrSelectApi, mi miner.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) { + if as == nil { + // should only happen in some tests + log.Warnw("smart address selection disabled, using worker address") + return mi.Worker, big.Zero(), nil } - for _, addr := range mi.ControlAddresses { - b, err := a.WalletBalance(ctx, addr) - if err != nil { - return address.Undef, xerrors.Errorf("checking control address balance: %w", err) + var addrs []address.Address + switch use { + case api.PreCommitAddr: + addrs = append(addrs, as.PreCommitControl...) + case api.CommitAddr: + addrs = append(addrs, as.CommitControl...) + case api.TerminateSectorsAddr: + addrs = append(addrs, as.TerminateControl...) + case api.DealPublishAddr: + addrs = append(addrs, as.DealPublishControl...) + default: + defaultCtl := map[address.Address]struct{}{} + for _, a := range mi.ControlAddresses { + defaultCtl[a] = struct{}{} } - - if b.GreaterThanEqual(minFunds) { - k, err := a.StateAccountKey(ctx, addr, types.EmptyTSK) - if err != nil { - log.Errorw("getting account key", "error", err) - continue + delete(defaultCtl, mi.Owner) + delete(defaultCtl, mi.Worker) + + configCtl := append([]address.Address{}, as.PreCommitControl...) + configCtl = append(configCtl, as.CommitControl...) + configCtl = append(configCtl, as.TerminateControl...) + configCtl = append(configCtl, as.DealPublishControl...) + + for _, addr := range configCtl { + if addr.Protocol() != address.ID { + var err error + addr, err = a.StateLookupID(ctx, addr, types.EmptyTSK) + if err != nil { + log.Warnw("looking up control address", "address", addr, "error", err) + continue + } } - have, err := a.WalletHas(ctx, k) - if err != nil { - return address.Undef, xerrors.Errorf("failed to check control address: %w", err) - } + delete(defaultCtl, addr) + } + + for a := range defaultCtl { + addrs = append(addrs, a) + } + } + + if len(addrs) == 0 || !as.DisableWorkerFallback { + addrs = append(addrs, mi.Worker) + } + if !as.DisableOwnerFallback { + addrs = append(addrs, mi.Owner) + } + + return pickAddress(ctx, a, mi, goodFunds, minFunds, addrs) +} + +func pickAddress(ctx context.Context, a addrSelectApi, mi miner.MinerInfo, goodFunds, minFunds abi.TokenAmount, addrs []address.Address) (address.Address, abi.TokenAmount, error) { + leastBad := mi.Worker + bestAvail := minFunds - if !have { - log.Errorw("don't have key", "key", k) + ctl := map[address.Address]struct{}{} + for _, a := range append(mi.ControlAddresses, mi.Owner, mi.Worker) { + ctl[a] = struct{}{} + } + + for _, addr := range addrs { + if addr.Protocol() != address.ID { + var err error + addr, err = a.StateLookupID(ctx, addr, types.EmptyTSK) + if err != nil { + log.Warnw("looking up control address", "address", addr, "error", err) continue } + } - return addr, nil + if _, ok := ctl[addr]; !ok { + log.Warnw("non-control address configured for sending messages", "address", addr) + continue } - log.Warnw("control address didn't have enough funds for window post message", "address", addr, "required", types.FIL(minFunds), "balance", types.FIL(b)) + if maybeUseAddress(ctx, a, addr, goodFunds, &leastBad, &bestAvail) { + return leastBad, bestAvail, nil + } } - // Try to use the owner account if we can, fallback to worker if we can't + log.Warnw("No address had enough funds to for full message Fee, selecting least bad address", "address", leastBad, "balance", types.FIL(bestAvail), "optimalFunds", types.FIL(goodFunds), "minFunds", types.FIL(minFunds)) + + return leastBad, bestAvail, nil +} - b, err := a.WalletBalance(ctx, mi.Owner) +func maybeUseAddress(ctx context.Context, a addrSelectApi, addr address.Address, goodFunds abi.TokenAmount, leastBad *address.Address, bestAvail *abi.TokenAmount) bool { + b, err := a.WalletBalance(ctx, addr) if err != nil { - return address.Undef, xerrors.Errorf("checking owner balance: %w", err) + log.Errorw("checking control address balance", "addr", addr, "error", err) + return false } - if !b.GreaterThanEqual(minFunds) { - return mi.Worker, nil - } + if b.GreaterThanEqual(goodFunds) { + k, err := a.StateAccountKey(ctx, addr, types.EmptyTSK) + if err != nil { + log.Errorw("getting account key", "error", err) + return false + } - k, err := a.StateAccountKey(ctx, mi.Owner, types.EmptyTSK) - if err != nil { - log.Errorw("getting owner account key", "error", err) - return mi.Worker, nil - } + have, err := a.WalletHas(ctx, k) + if err != nil { + log.Errorw("failed to check control address", "addr", addr, "error", err) + return false + } - have, err := a.WalletHas(ctx, k) - if err != nil { - return address.Undef, xerrors.Errorf("failed to check owner address: %w", err) + if !have { + log.Errorw("don't have key", "key", k, "address", addr) + return false + } + + *leastBad = addr + *bestAvail = b + return true } - if !have { - return mi.Worker, nil + if b.GreaterThan(*bestAvail) { + *leastBad = addr + *bestAvail = b } - return mi.Owner, nil + log.Warnw("address didn't have enough funds to send message", "address", addr, "required", types.FIL(goodFunds), "balance", types.FIL(b)) + return false } diff --git a/storage/miner.go b/storage/miner.go index 378c12b849d..cdacc273492 100644 --- a/storage/miner.go +++ b/storage/miner.go @@ -5,26 +5,25 @@ import ( "errors" "time" - "github.com/filecoin-project/go-state-types/network" - - "github.com/filecoin-project/go-state-types/dline" - "github.com/filecoin-project/go-bitfield" - "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" logging "github.com/ipfs/go-log/v2" - "github.com/libp2p/go-libp2p-core/host" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/specs-storage/storage" + sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" - "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v1api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" @@ -40,14 +39,23 @@ import ( var log = logging.Logger("storageminer") +// Miner is the central miner entrypoint object inside Lotus. It is +// instantiated in the node builder, along with the WindowPoStScheduler. +// +// This object is the owner of the sealing pipeline. Most of the actual logic +// lives in the storage-sealing module (sealing.Sealing), and the Miner object +// exposes it to the rest of the system by proxying calls. +// +// Miner#Run starts the sealing FSM. type Miner struct { - api storageMinerApi - feeCfg config.MinerFeeConfig - h host.Host - sealer sectorstorage.SectorManager - ds datastore.Batching - sc sealing.SectorIDCounter - verif ffiwrapper.Verifier + api fullNodeFilteredAPI + feeCfg config.MinerFeeConfig + sealer sectorstorage.SectorManager + ds datastore.Batching + sc sealing.SectorIDCounter + verif ffiwrapper.Verifier + prover ffiwrapper.Prover + addrSel *AddressSelector maddr address.Address @@ -68,7 +76,9 @@ type SealingStateEvt struct { Error string } -type storageMinerApi interface { +// fullNodeFilteredAPI is the subset of the full node API the Miner needs from +// a Lotus full node. +type fullNodeFilteredAPI interface { // Call a read only method on actors (no interaction with the chain required) StateCall(context.Context, *types.Message, types.TipSetKey) (*api.InvocResult, error) StateMinerSectors(context.Context, address.Address, *bitfield.BitField, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) @@ -76,25 +86,28 @@ type storageMinerApi interface { StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error) StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error) + StateMinerAvailableBalance(ctx context.Context, maddr address.Address, tok types.TipSetKey) (types.BigInt, error) StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]api.Deadline, error) StateMinerPartitions(context.Context, address.Address, uint64, types.TipSetKey) ([]api.Partition, error) StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (bool, error) - StateSearchMsg(context.Context, cid.Cid) (*api.MsgLookup, error) - StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error) // TODO: removeme eventually + StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) + StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) - StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) StateMarketStorageDeal(context.Context, abi.DealID, types.TipSetKey) (*api.MarketDeal, error) StateMinerFaults(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error) + StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) MpoolPushMessage(context.Context, *types.Message, *api.MessageSendSpec) (*types.SignedMessage, error) GasEstimateMessageGas(context.Context, *types.Message, *api.MessageSendSpec, types.TipSetKey) (*types.Message, error) + GasEstimateFeeCap(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error) + GasEstimateGasPremium(_ context.Context, nblocksincl uint64, sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) ChainHead(context.Context) (*types.TipSet, error) ChainNotify(context.Context) (<-chan []*api.HeadChange, error) @@ -102,6 +115,7 @@ type storageMinerApi interface { ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error) + ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) ChainReadObj(context.Context, cid.Cid) ([]byte, error) ChainHasObj(context.Context, cid.Cid) (bool, error) ChainGetTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) @@ -111,15 +125,27 @@ type storageMinerApi interface { WalletHas(context.Context, address.Address) (bool, error) } -func NewMiner(api storageMinerApi, maddr address.Address, h host.Host, ds datastore.Batching, sealer sectorstorage.SectorManager, sc sealing.SectorIDCounter, verif ffiwrapper.Verifier, gsd dtypes.GetSealingConfigFunc, feeCfg config.MinerFeeConfig, journal journal.Journal) (*Miner, error) { +// NewMiner creates a new Miner object. +func NewMiner(api fullNodeFilteredAPI, + maddr address.Address, + ds datastore.Batching, + sealer sectorstorage.SectorManager, + sc sealing.SectorIDCounter, + verif ffiwrapper.Verifier, + prover ffiwrapper.Prover, + gsd dtypes.GetSealingConfigFunc, + feeCfg config.MinerFeeConfig, + journal journal.Journal, + as *AddressSelector) (*Miner, error) { m := &Miner{ - api: api, - feeCfg: feeCfg, - h: h, - sealer: sealer, - ds: ds, - sc: sc, - verif: verif, + api: api, + feeCfg: feeCfg, + sealer: sealer, + ds: ds, + sc: sc, + verif: verif, + prover: prover, + addrSel: as, maddr: maddr, getSealConfig: gsd, @@ -130,6 +156,7 @@ func NewMiner(api storageMinerApi, maddr address.Address, h host.Host, ds datast return m, nil } +// Run starts the sealing FSM in the background, running preliminary checks first. func (m *Miner) Run(ctx context.Context) error { if err := m.runPreflightChecks(ctx); err != nil { return xerrors.Errorf("miner preflight checks failed: %w", err) @@ -140,17 +167,37 @@ func (m *Miner) Run(ctx context.Context) error { return xerrors.Errorf("getting miner info: %w", err) } - fc := sealing.FeeConfig{ - MaxPreCommitGasFee: abi.TokenAmount(m.feeCfg.MaxPreCommitGasFee), - MaxCommitGasFee: abi.TokenAmount(m.feeCfg.MaxCommitGasFee), - } + var ( + // consumer of chain head changes. + evts = events.NewEvents(ctx, m.api) + evtsAdapter = NewEventsAdapter(evts) + + // Create a shim to glue the API required by the sealing component + // with the API that Lotus is capable of providing. + // The shim translates between "tipset tokens" and tipset keys, and + // provides extra methods. + adaptedAPI = NewSealingAPIAdapter(m.api) - evts := events.NewEvents(ctx, m.api) - adaptedAPI := NewSealingAPIAdapter(m.api) - // TODO: Maybe we update this policy after actor upgrades? - pcp := sealing.NewBasicPreCommitPolicy(adaptedAPI, policy.GetMaxSectorExpirationExtension()-(md.WPoStProvingPeriod*2), md.PeriodStart%md.WPoStProvingPeriod) - m.sealing = sealing.New(adaptedAPI, fc, NewEventsAdapter(evts), m.maddr, m.ds, m.sealer, m.sc, m.verif, &pcp, sealing.GetSealingConfigFunc(m.getSealConfig), m.handleSealingNotifications) + // Instantiate a precommit policy. + defaultDuration = policy.GetMaxSectorExpirationExtension() - (md.WPoStProvingPeriod * 2) + provingBoundary = md.PeriodStart % md.WPoStProvingPeriod + // TODO: Maybe we update this policy after actor upgrades? + pcp = sealing.NewBasicPreCommitPolicy(adaptedAPI, defaultDuration, provingBoundary) + + // address selector. + as = func(ctx context.Context, mi miner.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) { + return m.addrSel.AddressFor(ctx, m.api, mi, use, goodFunds, minFunds) + } + + // sealing configuration. + cfg = sealing.GetSealingConfigFunc(m.getSealConfig) + ) + + // Instantiate the sealing FSM. + m.sealing = sealing.New(ctx, adaptedAPI, m.feeCfg, evtsAdapter, m.maddr, m.ds, m.sealer, m.sc, m.verif, m.prover, &pcp, cfg, m.handleSealingNotifications, as) + + // Run the sealing FSM. go m.sealing.Run(ctx) //nolint:errcheck // logged intside the function return nil @@ -172,6 +219,7 @@ func (m *Miner) Stop(ctx context.Context) error { return m.sealing.Stop(ctx) } +// runPreflightChecks verifies that preconditions to run the miner are satisfied. func (m *Miner) runPreflightChecks(ctx context.Context) error { mi, err := m.api.StateMinerInfo(ctx, m.maddr, types.EmptyTSK) if err != nil { @@ -203,7 +251,7 @@ type StorageWpp struct { winnRpt abi.RegisteredPoStProof } -func NewWinningPoStProver(api api.FullNode, prover storage.Prover, verifier ffiwrapper.Verifier, miner dtypes.MinerID) (*StorageWpp, error) { +func NewWinningPoStProver(api v1api.FullNode, prover storage.Prover, verifier ffiwrapper.Verifier, miner dtypes.MinerID) (*StorageWpp, error) { ma, err := address.NewIDAddress(uint64(miner)) if err != nil { return nil, err @@ -214,23 +262,13 @@ func NewWinningPoStProver(api api.FullNode, prover storage.Prover, verifier ffiw return nil, xerrors.Errorf("getting sector size: %w", err) } - spt, err := ffiwrapper.SealProofTypeFromSectorSize(mi.SectorSize) - if err != nil { - return nil, err - } - - wpt, err := spt.RegisteredWinningPoStProof() - if err != nil { - return nil, err - } - if build.InsecurePoStValidation { log.Warn("*****************************************************************************") log.Warn(" Generating fake PoSt proof! You should only see this while running tests! ") log.Warn("*****************************************************************************") } - return &StorageWpp{prover, verifier, abi.ActorID(miner), wpt}, nil + return &StorageWpp{prover, verifier, abi.ActorID(miner), mi.WindowPoStProofType}, nil } var _ gen.WinningPoStProver = (*StorageWpp)(nil) diff --git a/storage/miner_sealing.go b/storage/miner_sealing.go new file mode 100644 index 00000000000..38b24e8c13c --- /dev/null +++ b/storage/miner_sealing.go @@ -0,0 +1,150 @@ +package storage + +import ( + "context" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/specs-storage/storage" + + "github.com/filecoin-project/lotus/api" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" + "github.com/filecoin-project/lotus/storage/sectorblocks" +) + +// TODO: refactor this to be direct somehow + +func (m *Miner) Address() address.Address { + return m.sealing.Address() +} + +func (m *Miner) StartPackingSector(sectorNum abi.SectorNumber) error { + return m.sealing.StartPacking(sectorNum) +} + +func (m *Miner) ListSectors() ([]sealing.SectorInfo, error) { + return m.sealing.ListSectors() +} + +func (m *Miner) PledgeSector(ctx context.Context) (storage.SectorRef, error) { + return m.sealing.PledgeSector(ctx) +} + +func (m *Miner) ForceSectorState(ctx context.Context, id abi.SectorNumber, state sealing.SectorState) error { + return m.sealing.ForceSectorState(ctx, id, state) +} + +func (m *Miner) RemoveSector(ctx context.Context, id abi.SectorNumber) error { + return m.sealing.Remove(ctx, id) +} + +func (m *Miner) TerminateSector(ctx context.Context, id abi.SectorNumber) error { + return m.sealing.Terminate(ctx, id) +} + +func (m *Miner) TerminateFlush(ctx context.Context) (*cid.Cid, error) { + return m.sealing.TerminateFlush(ctx) +} + +func (m *Miner) TerminatePending(ctx context.Context) ([]abi.SectorID, error) { + return m.sealing.TerminatePending(ctx) +} + +func (m *Miner) SectorPreCommitFlush(ctx context.Context) ([]sealiface.PreCommitBatchRes, error) { + return m.sealing.SectorPreCommitFlush(ctx) +} + +func (m *Miner) SectorPreCommitPending(ctx context.Context) ([]abi.SectorID, error) { + return m.sealing.SectorPreCommitPending(ctx) +} + +func (m *Miner) CommitFlush(ctx context.Context) ([]sealiface.CommitBatchRes, error) { + return m.sealing.CommitFlush(ctx) +} + +func (m *Miner) CommitPending(ctx context.Context) ([]abi.SectorID, error) { + return m.sealing.CommitPending(ctx) +} + +func (m *Miner) MarkForUpgrade(id abi.SectorNumber) error { + return m.sealing.MarkForUpgrade(id) +} + +func (m *Miner) IsMarkedForUpgrade(id abi.SectorNumber) bool { + return m.sealing.IsMarkedForUpgrade(id) +} + +func (m *Miner) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storage.Data, d api.PieceDealInfo) (api.SectorOffset, error) { + return m.sealing.SectorAddPieceToAny(ctx, size, r, d) +} + +func (m *Miner) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) { + if showOnChainInfo { + return api.SectorInfo{}, xerrors.Errorf("on-chain info not supported") + } + + info, err := m.sealing.GetSectorInfo(sid) + if err != nil { + return api.SectorInfo{}, err + } + + deals := make([]abi.DealID, len(info.Pieces)) + for i, piece := range info.Pieces { + if piece.DealInfo == nil { + continue + } + deals[i] = piece.DealInfo.DealID + } + + log := make([]api.SectorLog, len(info.Log)) + for i, l := range info.Log { + log[i] = api.SectorLog{ + Kind: l.Kind, + Timestamp: l.Timestamp, + Trace: l.Trace, + Message: l.Message, + } + } + + sInfo := api.SectorInfo{ + SectorID: sid, + State: api.SectorState(info.State), + CommD: info.CommD, + CommR: info.CommR, + Proof: info.Proof, + Deals: deals, + Ticket: api.SealTicket{ + Value: info.TicketValue, + Epoch: info.TicketEpoch, + }, + Seed: api.SealSeed{ + Value: info.SeedValue, + Epoch: info.SeedEpoch, + }, + PreCommitMsg: info.PreCommitMessage, + CommitMsg: info.CommitMessage, + Retries: info.InvalidProofs, + ToUpgrade: m.IsMarkedForUpgrade(sid), + + LastErr: info.LastErr, + Log: log, + // on chain info + SealProof: info.SectorType, + Activation: 0, + Expiration: 0, + DealWeight: big.Zero(), + VerifiedDealWeight: big.Zero(), + InitialPledge: big.Zero(), + OnTime: 0, + Early: 0, + } + + return sInfo, nil +} + +var _ sectorblocks.SectorBuilder = &Miner{} diff --git a/storage/mockstorage/preseal.go b/storage/mockstorage/preseal.go index 0417405c867..66a2a5054b9 100644 --- a/storage/mockstorage/preseal.go +++ b/storage/mockstorage/preseal.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-commp-utils/zerocomm" commcid "github.com/filecoin-project/go-fil-commcid" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" @@ -13,17 +14,20 @@ import ( "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/wallet" - "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" - "github.com/filecoin-project/lotus/extern/sector-storage/zerocomm" "github.com/filecoin-project/lotus/genesis" ) -func PreSeal(ssize abi.SectorSize, maddr address.Address, sectors int) (*genesis.Miner, *types.KeyInfo, error) { +func PreSeal(spt abi.RegisteredSealProof, maddr address.Address, sectors int) (*genesis.Miner, *types.KeyInfo, error) { k, err := wallet.GenerateKey(types.KTBLS) if err != nil { return nil, nil, err } + ssize, err := spt.SectorSize() + if err != nil { + return nil, nil, err + } + genm := &genesis.Miner{ ID: maddr, Owner: k.Address, @@ -34,15 +38,10 @@ func PreSeal(ssize abi.SectorSize, maddr address.Address, sectors int) (*genesis Sectors: make([]*genesis.PreSeal, sectors), } - st, err := ffiwrapper.SealProofTypeFromSectorSize(ssize) - if err != nil { - return nil, nil, err - } - for i := range genm.Sectors { preseal := &genesis.PreSeal{} - preseal.ProofType = st + preseal.ProofType = spt preseal.CommD = zerocomm.ZeroPieceCommitment(abi.PaddedPieceSize(ssize).Unpadded()) d, _ := commcid.CIDToPieceCommitmentV1(preseal.CommD) r := mock.CommDR(d) diff --git a/storage/sealing.go b/storage/sealing.go deleted file mode 100644 index 2cd454e5b33..00000000000 --- a/storage/sealing.go +++ /dev/null @@ -1,53 +0,0 @@ -package storage - -import ( - "context" - "io" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - - sealing "github.com/filecoin-project/lotus/extern/storage-sealing" -) - -// TODO: refactor this to be direct somehow - -func (m *Miner) Address() address.Address { - return m.sealing.Address() -} - -func (m *Miner) AddPieceToAnySector(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, d sealing.DealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) { - return m.sealing.AddPieceToAnySector(ctx, size, r, d) -} - -func (m *Miner) StartPackingSector(sectorNum abi.SectorNumber) error { - return m.sealing.StartPacking(sectorNum) -} - -func (m *Miner) ListSectors() ([]sealing.SectorInfo, error) { - return m.sealing.ListSectors() -} - -func (m *Miner) GetSectorInfo(sid abi.SectorNumber) (sealing.SectorInfo, error) { - return m.sealing.GetSectorInfo(sid) -} - -func (m *Miner) PledgeSector() error { - return m.sealing.PledgeSector() -} - -func (m *Miner) ForceSectorState(ctx context.Context, id abi.SectorNumber, state sealing.SectorState) error { - return m.sealing.ForceSectorState(ctx, id, state) -} - -func (m *Miner) RemoveSector(ctx context.Context, id abi.SectorNumber) error { - return m.sealing.Remove(ctx, id) -} - -func (m *Miner) MarkForUpgrade(id abi.SectorNumber) error { - return m.sealing.MarkForUpgrade(id) -} - -func (m *Miner) IsMarkedForUpgrade(id abi.SectorNumber) bool { - return m.sealing.IsMarkedForUpgrade(id) -} diff --git a/storage/sectorblocks/blocks.go b/storage/sectorblocks/blocks.go index bc8456a1f28..ad4ffc0db8a 100644 --- a/storage/sectorblocks/blocks.go +++ b/storage/sectorblocks/blocks.go @@ -16,11 +16,10 @@ import ( cborutil "github.com/filecoin-project/go-cbor-util" "github.com/filecoin-project/go-state-types/abi" - sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/storage" ) type SealSerialization uint8 @@ -48,17 +47,22 @@ func DsKeyToDealID(key datastore.Key) (uint64, error) { return dealID, nil } +type SectorBuilder interface { + SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storage.Data, d api.PieceDealInfo) (api.SectorOffset, error) + SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) +} + type SectorBlocks struct { - *storage.Miner + SectorBuilder keys datastore.Batching keyLk sync.Mutex } -func NewSectorBlocks(miner *storage.Miner, ds dtypes.MetadataDS) *SectorBlocks { +func NewSectorBlocks(sb SectorBuilder, ds dtypes.MetadataDS) *SectorBlocks { sbc := &SectorBlocks{ - Miner: miner, - keys: namespace.Wrap(ds, dsPrefix), + SectorBuilder: sb, + keys: namespace.Wrap(ds, dsPrefix), } return sbc @@ -96,19 +100,19 @@ func (st *SectorBlocks) writeRef(dealID abi.DealID, sectorID abi.SectorNumber, o return st.keys.Put(DealIDToDsKey(dealID), newRef) // TODO: batch somehow } -func (st *SectorBlocks) AddPiece(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, d sealing.DealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) { - sn, offset, err := st.Miner.AddPieceToAnySector(ctx, size, r, d) +func (st *SectorBlocks) AddPiece(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, d api.PieceDealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) { + so, err := st.SectorBuilder.SectorAddPieceToAny(ctx, size, r, d) if err != nil { return 0, 0, err } // TODO: DealID has very low finality here - err = st.writeRef(d.DealID, sn, offset, size) + err = st.writeRef(d.DealID, so.Sector, so.Offset, size) if err != nil { return 0, 0, xerrors.Errorf("writeRef: %w", err) } - return sn, offset, nil + return so.Sector, so.Offset, nil } func (st *SectorBlocks) List() (map[uint64][]api.SealedRef, error) { diff --git a/storage/wdpost_changehandler.go b/storage/wdpost_changehandler.go index 2859957570e..7b80f2744a8 100644 --- a/storage/wdpost_changehandler.go +++ b/storage/wdpost_changehandler.go @@ -13,27 +13,33 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) -const SubmitConfidence = 4 +const ( + SubmitConfidence = 4 + ChallengeConfidence = 10 +) type CompleteGeneratePoSTCb func(posts []miner.SubmitWindowedPoStParams, err error) type CompleteSubmitPoSTCb func(err error) -type changeHandlerAPI interface { +// wdPoStCommands is the subset of the WindowPoStScheduler + full node APIs used +// by the changeHandler to execute actions and query state. +type wdPoStCommands interface { StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) + startGeneratePoST(ctx context.Context, ts *types.TipSet, deadline *dline.Info, onComplete CompleteGeneratePoSTCb) context.CancelFunc startSubmitPoST(ctx context.Context, ts *types.TipSet, deadline *dline.Info, posts []miner.SubmitWindowedPoStParams, onComplete CompleteSubmitPoSTCb) context.CancelFunc onAbort(ts *types.TipSet, deadline *dline.Info) - failPost(err error, ts *types.TipSet, deadline *dline.Info) + recordPoStFailure(err error, ts *types.TipSet, deadline *dline.Info) } type changeHandler struct { - api changeHandlerAPI + api wdPoStCommands actor address.Address proveHdlr *proveHandler submitHdlr *submitHandler } -func newChangeHandler(api changeHandlerAPI, actor address.Address) *changeHandler { +func newChangeHandler(api wdPoStCommands, actor address.Address) *changeHandler { posts := newPostsCache() p := newProver(api, posts) s := newSubmitter(api, posts) @@ -143,7 +149,7 @@ type postResult struct { // proveHandler generates proofs type proveHandler struct { - api changeHandlerAPI + api wdPoStCommands posts *postsCache postResults chan *postResult @@ -160,7 +166,7 @@ type proveHandler struct { } func newProver( - api changeHandlerAPI, + api wdPoStCommands, posts *postsCache, ) *proveHandler { ctx, cancel := context.WithCancel(context.Background()) @@ -230,7 +236,7 @@ func (p *proveHandler) processHeadChange(ctx context.Context, newTS *types.TipSe } // Check if the chain is above the Challenge height for the post window - if newTS.Height() < di.Challenge { + if newTS.Height() < di.Challenge+ChallengeConfidence { return } @@ -245,7 +251,7 @@ func (p *proveHandler) processPostResult(res *postResult) { di := res.currPost.di if res.err != nil { // Proving failed so inform the API - p.api.failPost(res.err, res.ts, di) + p.api.recordPoStFailure(res.err, res.ts, di) log.Warnf("Aborted window post Proving (Deadline: %+v)", di) p.api.onAbort(res.ts, di) @@ -292,7 +298,7 @@ type postInfo struct { // submitHandler submits proofs on-chain type submitHandler struct { - api changeHandlerAPI + api wdPoStCommands posts *postsCache submitResults chan *submitResult @@ -316,7 +322,7 @@ type submitHandler struct { } func newSubmitter( - api changeHandlerAPI, + api wdPoStCommands, posts *postsCache, ) *submitHandler { ctx, cancel := context.WithCancel(context.Background()) @@ -485,7 +491,7 @@ func (s *submitHandler) submitIfReady(ctx context.Context, advance *types.TipSet func (s *submitHandler) processSubmitResult(res *submitResult) { if res.err != nil { // Submit failed so inform the API and go back to the start state - s.api.failPost(res.err, res.pw.ts, res.pw.di) + s.api.recordPoStFailure(res.err, res.pw.ts, res.pw.di) log.Warnf("Aborted window post Submitting (Deadline: %+v)", res.pw.di) s.api.onAbort(res.pw.ts, res.pw.di) diff --git a/storage/wdpost_changehandler_test.go b/storage/wdpost_changehandler_test.go index 6479c0d7ef3..a2283cb7cc2 100644 --- a/storage/wdpost_changehandler_test.go +++ b/storage/wdpost_changehandler_test.go @@ -191,7 +191,7 @@ func (m *mockAPI) wasAbortCalled() bool { return m.abortCalled } -func (m *mockAPI) failPost(err error, ts *types.TipSet, deadline *dline.Info) { +func (m *mockAPI) recordPoStFailure(err error, ts *types.TipSet, deadline *dline.Info) { } func (m *mockAPI) setChangeHandler(ch *changeHandler) { @@ -395,7 +395,7 @@ func TestChangeHandlerStartProvingNextDeadline(t *testing.T) { // Trigger a head change currentEpoch := abi.ChainEpoch(1) - go triggerHeadAdvance(t, s, currentEpoch) + go triggerHeadAdvance(t, s, currentEpoch+ChallengeConfidence) // Should start proving <-s.ch.proveHdlr.processedHeadChanges @@ -405,7 +405,7 @@ func TestChangeHandlerStartProvingNextDeadline(t *testing.T) { // Trigger a head change that advances the chain beyond the submit // confidence currentEpoch = 1 + SubmitConfidence - go triggerHeadAdvance(t, s, currentEpoch) + go triggerHeadAdvance(t, s, currentEpoch+ChallengeConfidence) // Should be no change to state yet <-s.ch.proveHdlr.processedHeadChanges @@ -424,7 +424,7 @@ func TestChangeHandlerStartProvingNextDeadline(t *testing.T) { // the next deadline go func() { di = nextDeadline(di) - currentEpoch = di.Challenge + currentEpoch = di.Challenge + ChallengeConfidence triggerHeadAdvance(t, s, currentEpoch) }() @@ -446,7 +446,7 @@ func TestChangeHandlerProvingRounds(t *testing.T) { for currentEpoch := abi.ChainEpoch(1); currentEpoch < miner.WPoStChallengeWindow*5; currentEpoch++ { // Trigger a head change di := mock.getDeadline(currentEpoch) - go triggerHeadAdvance(t, s, currentEpoch) + go triggerHeadAdvance(t, s, currentEpoch+ChallengeConfidence) // Wait for prover to process head change <-s.ch.proveHdlr.processedHeadChanges @@ -913,7 +913,7 @@ func TestChangeHandlerSubmitRevertTwoEpochs(t *testing.T) { // Move to the challenge epoch for the next deadline diE2 := nextDeadline(diE1) - currentEpoch = diE2.Challenge + currentEpoch = diE2.Challenge + ChallengeConfidence go triggerHeadAdvance(t, s, currentEpoch) // Should move to submitting state for epoch 1 @@ -1014,7 +1014,7 @@ func TestChangeHandlerSubmitRevertAdvanceLess(t *testing.T) { // Move to the challenge epoch for the next deadline diE2 := nextDeadline(diE1) - currentEpoch = diE2.Challenge + currentEpoch = diE2.Challenge + ChallengeConfidence go triggerHeadAdvance(t, s, currentEpoch) // Should move to submitting state for epoch 1 diff --git a/storage/wdpost_run.go b/storage/wdpost_run.go index 87438fec3ce..51a0729aff0 100644 --- a/storage/wdpost_run.go +++ b/storage/wdpost_run.go @@ -6,6 +6,7 @@ import ( "time" "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" @@ -19,16 +20,19 @@ import ( "golang.org/x/xerrors" proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + "github.com/filecoin-project/specs-actors/v3/actors/runtime/proof" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/types" ) -func (s *WindowPoStScheduler) failPost(err error, ts *types.TipSet, deadline *dline.Info) { +// recordPoStFailure records a failure in the journal. +func (s *WindowPoStScheduler) recordPoStFailure(err error, ts *types.TipSet, deadline *dline.Info) { s.journal.RecordEvent(s.evtTypes[evtTypeWdPoStScheduler], func() interface{} { c := evtCommon{Error: err} if ts != nil { @@ -42,7 +46,7 @@ func (s *WindowPoStScheduler) failPost(err error, ts *types.TipSet, deadline *dl } }) - log.Errorf("Got err %w - TODO handle errors", err) + log.Errorf("Got err %+v - TODO handle errors", err) /*s.failLk.Lock() if eps > s.failed { s.failed = eps @@ -96,9 +100,9 @@ func (s *WindowPoStScheduler) runGeneratePoST( ctx, span := trace.StartSpan(ctx, "WindowPoStScheduler.generatePoST") defer span.End() - posts, err := s.runPost(ctx, *deadline, ts) + posts, err := s.runPoStCycle(ctx, *deadline, ts) if err != nil { - log.Errorf("runPost failed: %+v", err) + log.Errorf("runPoStCycle failed: %+v", err) return nil, err } @@ -164,7 +168,7 @@ func (s *WindowPoStScheduler) runSubmitPoST( commRand, err := s.api.ChainGetRandomnessFromTickets(ctx, ts.Key(), crypto.DomainSeparationTag_PoStChainCommit, commEpoch, nil) if err != nil { err = xerrors.Errorf("failed to get chain randomness from tickets for windowPost (ts=%d; deadline=%d): %w", ts.Height(), commEpoch, err) - log.Errorf("submitPost failed: %+v", err) + log.Errorf("submitPoStMessage failed: %+v", err) return err } @@ -177,7 +181,7 @@ func (s *WindowPoStScheduler) runSubmitPoST( post.ChainCommitRand = commRand // Submit PoST - sm, submitErr := s.submitPost(ctx, post) + sm, submitErr := s.submitPoStMessage(ctx, post) if submitErr != nil { log.Errorf("submit window post failed: %+v", submitErr) } else { @@ -188,48 +192,67 @@ func (s *WindowPoStScheduler) runSubmitPoST( return submitErr } -func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check bitfield.BitField) (bitfield.BitField, error) { +func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check bitfield.BitField, tsk types.TipSetKey) (bitfield.BitField, error) { mid, err := address.IDFromAddress(s.actor) if err != nil { return bitfield.BitField{}, err } - sectors := make(map[abi.SectorID]struct{}) - var tocheck []abi.SectorID - err = check.ForEach(func(snum uint64) error { - s := abi.SectorID{ - Miner: abi.ActorID(mid), - Number: abi.SectorNumber(snum), - } - - tocheck = append(tocheck, s) - sectors[s] = struct{}{} - return nil - }) + sectorInfos, err := s.api.StateMinerSectors(ctx, s.actor, &check, tsk) if err != nil { - return bitfield.BitField{}, xerrors.Errorf("iterating over bitfield: %w", err) + return bitfield.BitField{}, err + } + + sectors := make(map[abi.SectorNumber]struct{}) + var tocheck []storage.SectorRef + for _, info := range sectorInfos { + sectors[info.SectorNumber] = struct{}{} + tocheck = append(tocheck, storage.SectorRef{ + ProofType: info.SealProof, + ID: abi.SectorID{ + Miner: abi.ActorID(mid), + Number: info.SectorNumber, + }, + }) } - bad, err := s.faultTracker.CheckProvable(ctx, s.proofType, tocheck) + bad, err := s.faultTracker.CheckProvable(ctx, s.proofType, tocheck, nil) if err != nil { return bitfield.BitField{}, xerrors.Errorf("checking provable sectors: %w", err) } - for _, id := range bad { - delete(sectors, id) + for id := range bad { + delete(sectors, id.Number) } log.Warnw("Checked sectors", "checked", len(tocheck), "good", len(sectors)) sbf := bitfield.New() for s := range sectors { - sbf.Set(uint64(s.Number)) + sbf.Set(uint64(s)) } return sbf, nil } -func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uint64, partitions []api.Partition) ([]miner.RecoveryDeclaration, *types.SignedMessage, error) { - ctx, span := trace.StartSpan(ctx, "storage.checkNextRecoveries") +// declareRecoveries identifies sectors that were previously marked as faulty +// for our miner, but are now recovered (i.e. are now provable again) and +// still not reported as such. +// +// It then reports the recovery on chain via a `DeclareFaultsRecovered` +// message to our miner actor. +// +// This is always invoked ahead of time, before the deadline for the evaluated +// sectors arrives. That way, recoveries are declared in preparation for those +// sectors to be proven. +// +// If a declaration is made, it awaits for build.MessageConfidence confirmations +// on chain before returning. +// +// TODO: the waiting should happen in the background. Right now this +// is blocking/delaying the actual generation and submission of WindowPoSts in +// this deadline! +func (s *WindowPoStScheduler) declareRecoveries(ctx context.Context, dlIdx uint64, partitions []api.Partition, tsk types.TipSetKey) ([]miner.RecoveryDeclaration, *types.SignedMessage, error) { + ctx, span := trace.StartSpan(ctx, "storage.declareRecoveries") defer span.End() faulty := uint64(0) @@ -254,7 +277,7 @@ func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uin faulty += uc - recovered, err := s.checkSectors(ctx, unrecovered) + recovered, err := s.checkSectors(ctx, unrecovered, tsk) if err != nil { return nil, nil, xerrors.Errorf("checking unrecovered sectors: %w", err) } @@ -297,7 +320,7 @@ func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uin Value: types.NewInt(0), } spec := &api.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee)} - if err := s.setSender(ctx, msg, spec); err != nil { + if err := s.prepareMessage(ctx, msg, spec); err != nil { return recoveries, nil, err } @@ -308,7 +331,7 @@ func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uin log.Warnw("declare faults recovered Message CID", "cid", sm.Cid()) - rec, err := s.api.StateWaitMsg(context.TODO(), sm.Cid(), build.MessageConfidence) + rec, err := s.api.StateWaitMsg(context.TODO(), sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true) if err != nil { return recoveries, sm, xerrors.Errorf("declare faults recovered wait error: %w", err) } @@ -320,8 +343,21 @@ func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uin return recoveries, sm, nil } -func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64, partitions []api.Partition) ([]miner.FaultDeclaration, *types.SignedMessage, error) { - ctx, span := trace.StartSpan(ctx, "storage.checkNextFaults") +// declareFaults identifies the sectors on the specified proving deadline that +// are faulty, and reports the faults on chain via the `DeclareFaults` message +// to our miner actor. +// +// This is always invoked ahead of time, before the deadline for the evaluated +// sectors arrives. That way, faults are declared before a penalty is accrued. +// +// If a declaration is made, it awaits for build.MessageConfidence confirmations +// on chain before returning. +// +// TODO: the waiting should happen in the background. Right now this +// is blocking/delaying the actual generation and submission of WindowPoSts in +// this deadline! +func (s *WindowPoStScheduler) declareFaults(ctx context.Context, dlIdx uint64, partitions []api.Partition, tsk types.TipSetKey) ([]miner.FaultDeclaration, *types.SignedMessage, error) { + ctx, span := trace.StartSpan(ctx, "storage.declareFaults") defer span.End() bad := uint64(0) @@ -335,7 +371,7 @@ func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64, return nil, nil, xerrors.Errorf("determining non faulty sectors: %w", err) } - good, err := s.checkSectors(ctx, nonFaulty) + good, err := s.checkSectors(ctx, nonFaulty, tsk) if err != nil { return nil, nil, xerrors.Errorf("checking sectors: %w", err) } @@ -382,7 +418,7 @@ func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64, Value: types.NewInt(0), // TODO: Is there a fee? } spec := &api.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee)} - if err := s.setSender(ctx, msg, spec); err != nil { + if err := s.prepareMessage(ctx, msg, spec); err != nil { return faults, nil, err } @@ -393,7 +429,7 @@ func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64, log.Warnw("declare faults Message CID", "cid", sm.Cid()) - rec, err := s.api.StateWaitMsg(context.TODO(), sm.Cid(), build.MessageConfidence) + rec, err := s.api.StateWaitMsg(context.TODO(), sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true) if err != nil { return faults, sm, xerrors.Errorf("declare faults wait error: %w", err) } @@ -405,12 +441,18 @@ func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64, return faults, sm, nil } -func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *types.TipSet) ([]miner.SubmitWindowedPoStParams, error) { - ctx, span := trace.StartSpan(ctx, "storage.runPost") +// runPoStCycle runs a full cycle of the PoSt process: +// +// 1. performs recovery declarations for the next deadline. +// 2. performs fault declarations for the next deadline. +// 3. computes and submits proofs, batching partitions and making sure they +// don't exceed message capacity. +func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, di dline.Info, ts *types.TipSet) ([]miner.SubmitWindowedPoStParams, error) { + ctx, span := trace.StartSpan(ctx, "storage.runPoStCycle") defer span.End() go func() { - // TODO: extract from runPost, run on fault cutoff boundaries + // TODO: extract from runPoStCycle, run on fault cutoff boundaries // check faults / recoveries for the *next* deadline. It's already too // late to declare them for this deadline @@ -438,7 +480,7 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty } ) - if recoveries, sigmsg, err = s.checkNextRecoveries(context.TODO(), declDeadline, partitions); err != nil { + if recoveries, sigmsg, err = s.declareRecoveries(context.TODO(), declDeadline, partitions, ts.Key()); err != nil { // TODO: This is potentially quite bad, but not even trying to post when this fails is objectively worse log.Errorf("checking sector recoveries: %v", err) } @@ -457,7 +499,7 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty return // FORK: declaring faults after ignition upgrade makes no sense } - if faults, sigmsg, err = s.checkNextFaults(context.TODO(), declDeadline, partitions); err != nil { + if faults, sigmsg, err = s.declareFaults(context.TODO(), declDeadline, partitions, ts.Key()); err != nil { // TODO: This is also potentially really bad, but we try to post anyways log.Errorf("checking sector faults: %v", err) } @@ -476,7 +518,12 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty return nil, xerrors.Errorf("failed to marshal address to cbor: %w", err) } - rand, err := s.api.ChainGetRandomnessFromBeacon(ctx, ts.Key(), crypto.DomainSeparationTag_WindowedPoStChallengeSeed, di.Challenge, buf.Bytes()) + headTs, err := s.api.ChainHead(ctx) + if err != nil { + return nil, xerrors.Errorf("getting current head: %w", err) + } + + rand, err := s.api.ChainGetRandomnessFromBeacon(ctx, headTs.Key(), crypto.DomainSeparationTag_WindowedPoStChallengeSeed, di.Challenge, buf.Bytes()) if err != nil { return nil, xerrors.Errorf("failed to get chain randomness from beacon for window post (ts=%d; deadline=%d): %w", ts.Height(), di, err) } @@ -487,9 +534,14 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty return nil, xerrors.Errorf("getting partitions: %w", err) } + nv, err := s.api.StateNetworkVersion(ctx, ts.Key()) + if err != nil { + return nil, xerrors.Errorf("getting network version: %w", err) + } + // Split partitions into batches, so as not to exceed the number of sectors // allowed in a single message - partitionBatches, err := s.batchPartitions(partitions) + partitionBatches, err := s.batchPartitions(partitions, nv) if err != nil { return nil, err } @@ -510,10 +562,10 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty skipCount := uint64(0) postSkipped := bitfield.New() - var postOut []proof2.PoStProof - somethingToProve := true + somethingToProve := false - for retries := 0; retries < 5; retries++ { + // Retry until we run out of sectors to prove. + for retries := 0; ; retries++ { var partitions []miner.PoStPartition var sinfos []proof2.SectorInfo for partIdx, partition := range batch { @@ -527,7 +579,7 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty return nil, xerrors.Errorf("adding recoveries to set of sectors to prove: %w", err) } - good, err := s.checkSectors(ctx, toProve) + good, err := s.checkSectors(ctx, toProve, ts.Key()) if err != nil { return nil, xerrors.Errorf("checking sectors to skip: %w", err) } @@ -567,7 +619,6 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty if len(sinfos) == 0 { // nothing to prove for this batch - somethingToProve = false break } @@ -585,27 +636,75 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty return nil, err } - var ps []abi.SectorID - postOut, ps, err = s.prover.GenerateWindowPoSt(ctx, abi.ActorID(mid), sinfos, abi.PoStRandomness(rand)) + postOut, ps, err := s.prover.GenerateWindowPoSt(ctx, abi.ActorID(mid), sinfos, append(abi.PoStRandomness{}, rand...)) elapsed := time.Since(tsStart) log.Infow("computing window post", "batch", batchIdx, "elapsed", elapsed) if err == nil { - // Proof generation successful, stop retrying - params.Partitions = append(params.Partitions, partitions...) + // If we proved nothing, something is very wrong. + if len(postOut) == 0 { + return nil, xerrors.Errorf("received no proofs back from generate window post") + } + + headTs, err := s.api.ChainHead(ctx) + if err != nil { + return nil, xerrors.Errorf("getting current head: %w", err) + } + + checkRand, err := s.api.ChainGetRandomnessFromBeacon(ctx, headTs.Key(), crypto.DomainSeparationTag_WindowedPoStChallengeSeed, di.Challenge, buf.Bytes()) + if err != nil { + return nil, xerrors.Errorf("failed to get chain randomness from beacon for window post (ts=%d; deadline=%d): %w", ts.Height(), di, err) + } + + if !bytes.Equal(checkRand, rand) { + log.Warnw("windowpost randomness changed", "old", rand, "new", checkRand, "ts-height", ts.Height(), "challenge-height", di.Challenge, "tsk", ts.Key()) + rand = checkRand + continue + } + + // If we generated an incorrect proof, try again. + if correct, err := s.verifier.VerifyWindowPoSt(ctx, proof.WindowPoStVerifyInfo{ + Randomness: abi.PoStRandomness(checkRand), + Proofs: postOut, + ChallengedSectors: sinfos, + Prover: abi.ActorID(mid), + }); err != nil { + log.Errorw("window post verification failed", "post", postOut, "error", err) + time.Sleep(5 * time.Second) + continue + } else if !correct { + log.Errorw("generated incorrect window post proof", "post", postOut, "error", err) + continue + } + // Proof generation successful, stop retrying + somethingToProve = true + params.Partitions = partitions + params.Proofs = postOut break } // Proof generation failed, so retry if len(ps) == 0 { + // If we didn't skip any new sectors, we failed + // for some other reason and we need to abort. return nil, xerrors.Errorf("running window post failed: %w", err) } + // TODO: maybe mark these as faulty somewhere? log.Warnw("generate window post skipped sectors", "sectors", ps, "error", err, "try", retries) + // Explicitly make sure we haven't aborted this PoSt + // (GenerateWindowPoSt may or may not check this). + // Otherwise, we could try to continue proving a + // deadline after the deadline has ended. + if ctx.Err() != nil { + log.Warnw("aborting PoSt due to context cancellation", "error", ctx.Err(), "deadline", di.Index) + return nil, ctx.Err() + } + skipCount += uint64(len(ps)) for _, sector := range ps { postSkipped.Set(uint64(sector.Number)) @@ -617,19 +716,13 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty continue } - if len(postOut) == 0 { - return nil, xerrors.Errorf("received no proofs back from generate window post") - } - - params.Proofs = postOut - posts = append(posts, params) } return posts, nil } -func (s *WindowPoStScheduler) batchPartitions(partitions []api.Partition) ([][]api.Partition, error) { +func (s *WindowPoStScheduler) batchPartitions(partitions []api.Partition, nv network.Version) ([][]api.Partition, error) { // We don't want to exceed the number of sectors allowed in a message. // So given the number of sectors in a partition, work out the number of // partitions that can be in a message without exceeding sectors per @@ -640,11 +733,16 @@ func (s *WindowPoStScheduler) batchPartitions(partitions []api.Partition) ([][]a // sectors per partition 3: ooo // partitions per message 2: oooOOO // <1><2> (3rd doesn't fit) - partitionsPerMsg, err := policy.GetMaxPoStPartitions(s.proofType) + partitionsPerMsg, err := policy.GetMaxPoStPartitions(nv, s.proofType) if err != nil { return nil, xerrors.Errorf("getting sectors per partition: %w", err) } + // Also respect the AddressedPartitionsMax (which is the same as DeclarationsMax (which is all really just MaxPartitionsPerDeadline)) + if partitionsPerMsg > policy.GetDeclarationsMax(nv) { + partitionsPerMsg = policy.GetDeclarationsMax(nv) + } + // The number of messages will be: // ceiling(number of partitions / partitions per message) batchCount := len(partitions) / partitionsPerMsg @@ -705,7 +803,10 @@ func (s *WindowPoStScheduler) sectorsForProof(ctx context.Context, goodSectors, return proofSectors, nil } -func (s *WindowPoStScheduler) submitPost(ctx context.Context, proof *miner.SubmitWindowedPoStParams) (*types.SignedMessage, error) { +// submitPoStMessage builds a SubmitWindowedPoSt message and submits it to +// the mpool. It doesn't synchronously block on confirmations, but it does +// monitor in the background simply for the purposes of logging. +func (s *WindowPoStScheduler) submitPoStMessage(ctx context.Context, proof *miner.SubmitWindowedPoStParams) (*types.SignedMessage, error) { ctx, span := trace.StartSpan(ctx, "storage.commitPost") defer span.End() @@ -723,13 +824,11 @@ func (s *WindowPoStScheduler) submitPost(ctx context.Context, proof *miner.Submi Value: types.NewInt(0), } spec := &api.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee)} - if err := s.setSender(ctx, msg, spec); err != nil { + if err := s.prepareMessage(ctx, msg, spec); err != nil { return nil, err } - // TODO: consider maybe caring about the output sm, err := s.api.MpoolPushMessage(ctx, msg, spec) - if err != nil { return nil, xerrors.Errorf("pushing message to mpool: %w", err) } @@ -737,7 +836,7 @@ func (s *WindowPoStScheduler) submitPost(ctx context.Context, proof *miner.Submi log.Infof("Submitted window post: %s", sm.Cid()) go func() { - rec, err := s.api.StateWaitMsg(context.TODO(), sm.Cid(), build.MessageConfidence) + rec, err := s.api.StateWaitMsg(context.TODO(), sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true) if err != nil { log.Error(err) return @@ -753,14 +852,20 @@ func (s *WindowPoStScheduler) submitPost(ctx context.Context, proof *miner.Submi return sm, nil } -func (s *WindowPoStScheduler) setSender(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) error { +// prepareMessage prepares a message before sending it, setting: +// +// * the sender (from the AddressSelector, falling back to the worker address if none set) +// * the right gas parameters +func (s *WindowPoStScheduler) prepareMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) error { mi, err := s.api.StateMinerInfo(ctx, s.actor, types.EmptyTSK) if err != nil { return xerrors.Errorf("error getting miner info: %w", err) } - // use the worker as a fallback + // set the worker as a fallback msg.From = mi.Worker + // (optimal) initial estimation with some overestimation that guarantees + // block inclusion within the next 20 tipsets. gm, err := s.api.GasEstimateMessageGas(ctx, msg, spec, types.EmptyTSK) if err != nil { log.Errorw("estimating gas", "error", err) @@ -768,14 +873,42 @@ func (s *WindowPoStScheduler) setSender(ctx context.Context, msg *types.Message, } *msg = *gm - minFunds := big.Add(msg.RequiredFunds(), msg.Value) + // calculate a more frugal estimation; premium is estimated to guarantee + // inclusion within 5 tipsets, and fee cap is estimated for inclusion + // within 4 tipsets. + minGasFeeMsg := *msg + + minGasFeeMsg.GasPremium, err = s.api.GasEstimateGasPremium(ctx, 5, msg.From, msg.GasLimit, types.EmptyTSK) + if err != nil { + log.Errorf("failed to estimate minimum gas premium: %+v", err) + minGasFeeMsg.GasPremium = msg.GasPremium + } + + minGasFeeMsg.GasFeeCap, err = s.api.GasEstimateFeeCap(ctx, &minGasFeeMsg, 4, types.EmptyTSK) + if err != nil { + log.Errorf("failed to estimate minimum gas fee cap: %+v", err) + minGasFeeMsg.GasFeeCap = msg.GasFeeCap + } + + // goodFunds = funds needed for optimal inclusion probability. + // minFunds = funds needed for more speculative inclusion probability. + goodFunds := big.Add(msg.RequiredFunds(), msg.Value) + minFunds := big.Min(big.Add(minGasFeeMsg.RequiredFunds(), minGasFeeMsg.Value), goodFunds) - pa, err := AddressFor(ctx, s.api, mi, PoStAddr, minFunds) + pa, avail, err := s.addrSel.AddressFor(ctx, s.api, mi, api.PoStAddr, goodFunds, minFunds) if err != nil { log.Errorw("error selecting address for window post", "error", err) return nil } msg.From = pa + bestReq := big.Add(msg.RequiredFunds(), msg.Value) + if avail.LessThan(bestReq) { + mff := func() (abi.TokenAmount, error) { + return msg.RequiredFunds(), nil + } + + messagepool.CapGasFee(mff, msg, &api.MessageSendSpec{MaxFee: big.Min(big.Sub(avail, msg.Value), msg.RequiredFunds())}) + } return nil } diff --git a/storage/wdpost_run_test.go b/storage/wdpost_run_test.go index a76483a5f09..61f2a324b08 100644 --- a/storage/wdpost_run_test.go +++ b/storage/wdpost_run_test.go @@ -5,12 +5,17 @@ import ( "context" "testing" + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + "github.com/stretchr/testify/require" + "golang.org/x/xerrors" "github.com/ipfs/go-cid" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" @@ -21,18 +26,21 @@ import ( miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" tutils "github.com/filecoin-project/specs-actors/v2/support/testing" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/lotus/journal" ) type mockStorageMinerAPI struct { partitions []api.Partition pushedMessages chan *types.Message - storageMinerApi + fullNodeFilteredAPI } func newMockStorageMinerAPI() *mockStorageMinerAPI { @@ -48,6 +56,10 @@ func (m *mockStorageMinerAPI) StateMinerInfo(ctx context.Context, a address.Addr }, nil } +func (m *mockStorageMinerAPI) StateNetworkVersion(ctx context.Context, key types.TipSetKey) (network.Version, error) { + return build.NewestNetworkVersion, nil +} + func (m *mockStorageMinerAPI) ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { return abi.Randomness("ticket rand"), nil } @@ -85,7 +97,7 @@ func (m *mockStorageMinerAPI) MpoolPushMessage(ctx context.Context, message *typ }, nil } -func (m *mockStorageMinerAPI) StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error) { +func (m *mockStorageMinerAPI) StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) { return &api.MsgLookup{ Receipt: types.MessageReceipt{ ExitCode: 0, @@ -93,8 +105,12 @@ func (m *mockStorageMinerAPI) StateWaitMsg(ctx context.Context, cid cid.Cid, con }, nil } -func (m *mockStorageMinerAPI) StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error) { - return build.NewestNetworkVersion, nil +func (m *mockStorageMinerAPI) GasEstimateGasPremium(_ context.Context, nblocksincl uint64, sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) { + return big.Zero(), nil +} + +func (m *mockStorageMinerAPI) GasEstimateFeeCap(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error) { + return big.Zero(), nil } type mockProver struct { @@ -113,12 +129,44 @@ func (m *mockProver) GenerateWindowPoSt(ctx context.Context, aid abi.ActorID, si }, nil, nil } +type mockVerif struct { +} + +func (m mockVerif) VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) { + panic("implement me") +} + +func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStVerifyInfo) (bool, error) { + if len(info.Proofs) != 1 { + return false, xerrors.Errorf("expected 1 proof entry") + } + + proof := info.Proofs[0] + + if !bytes.Equal(proof.ProofBytes, []byte("post-proof")) { + return false, xerrors.Errorf("bad proof") + } + return true, nil +} + +func (m mockVerif) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) { + panic("implement me") +} + +func (m mockVerif) VerifySeal(proof2.SealVerifyInfo) (bool, error) { + panic("implement me") +} + +func (m mockVerif) GenerateWinningPoStSectorChallenge(context.Context, abi.RegisteredPoStProof, abi.ActorID, abi.PoStRandomness, uint64) ([]uint64, error) { + panic("implement me") +} + type mockFaultTracker struct { } -func (m mockFaultTracker) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []abi.SectorID) ([]abi.SectorID, error) { - // Returns "bad" sectors so just return nil meaning all sectors are good - return nil, nil +func (m mockFaultTracker) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, rg storiface.RGetter) (map[abi.SectorID]string, error) { + // Returns "bad" sectors so just return empty map meaning all sectors are good + return map[abi.SectorID]string{}, nil } // TestWDPostDoPost verifies that doPost will send the correct number of window @@ -133,13 +181,16 @@ func TestWDPostDoPost(t *testing.T) { mockStgMinerAPI := newMockStorageMinerAPI() // Get the number of sectors allowed in a partition for this proof type - sectorsPerPartition, err := builtin2.PoStProofWindowPoStPartitionSectors(proofType) + sectorsPerPartition, err := builtin5.PoStProofWindowPoStPartitionSectors(proofType) require.NoError(t, err) // Work out the number of partitions that can be included in a message // without exceeding the message sector limit + partitionsPerMsg, err := policy.GetMaxPoStPartitions(network.Version13, proofType) require.NoError(t, err) - partitionsPerMsg := int(miner2.AddressedSectorsMax / sectorsPerPartition) + if partitionsPerMsg > miner5.AddressedPartitionsMax { + partitionsPerMsg = miner5.AddressedPartitionsMax + } // Enough partitions to fill expectedMsgCount-1 messages partitionCount := (expectedMsgCount - 1) * partitionsPerMsg @@ -166,18 +217,20 @@ func TestWDPostDoPost(t *testing.T) { scheduler := &WindowPoStScheduler{ api: mockStgMinerAPI, prover: &mockProver{}, + verifier: &mockVerif{}, faultTracker: &mockFaultTracker{}, proofType: proofType, actor: postAct, journal: journal.NilJournal(), + addrSel: &AddressSelector{}, } di := &dline.Info{ - WPoStPeriodDeadlines: miner2.WPoStPeriodDeadlines, - WPoStProvingPeriod: miner2.WPoStProvingPeriod, - WPoStChallengeWindow: miner2.WPoStChallengeWindow, - WPoStChallengeLookback: miner2.WPoStChallengeLookback, - FaultDeclarationCutoff: miner2.FaultDeclarationCutoff, + WPoStPeriodDeadlines: miner5.WPoStPeriodDeadlines, + WPoStProvingPeriod: miner5.WPoStProvingPeriod, + WPoStChallengeWindow: miner5.WPoStChallengeWindow, + WPoStChallengeLookback: miner5.WPoStChallengeLookback, + FaultDeclarationCutoff: miner5.FaultDeclarationCutoff, } ts := mockTipSet(t) @@ -270,7 +323,7 @@ func (m *mockStorageMinerAPI) StateMinerInitialPledgeCollateral(ctx context.Cont panic("implement me") } -func (m *mockStorageMinerAPI) StateSearchMsg(ctx context.Context, cid cid.Cid) (*api.MsgLookup, error) { +func (m *mockStorageMinerAPI) StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) { panic("implement me") } @@ -309,7 +362,7 @@ func (m *mockStorageMinerAPI) GasEstimateMessageGas(ctx context.Context, message } func (m *mockStorageMinerAPI) ChainHead(ctx context.Context) (*types.TipSet, error) { - panic("implement me") + return nil, nil } func (m *mockStorageMinerAPI) ChainNotify(ctx context.Context) (<-chan []*api.HeadChange, error) { @@ -348,4 +401,4 @@ func (m *mockStorageMinerAPI) WalletHas(ctx context.Context, address address.Add return true, nil } -var _ storageMinerApi = &mockStorageMinerAPI{} +var _ fullNodeFilteredAPI = &mockStorageMinerAPI{} diff --git a/storage/wdpost_sched.go b/storage/wdpost_sched.go index 1a1422e1937..88357c5b37c 100644 --- a/storage/wdpost_sched.go +++ b/storage/wdpost_sched.go @@ -16,16 +16,25 @@ import ( "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/journal" "github.com/filecoin-project/lotus/node/config" "go.opencensus.io/trace" ) +// WindowPoStScheduler is the coordinator for WindowPoSt submissions, fault +// declaration, and recovery declarations. It watches the chain for reverts and +// applies, and schedules/run those processes as partition deadlines arrive. +// +// WindowPoStScheduler watches the chain though the changeHandler, which in turn +// turn calls the scheduler when the time arrives to do work. type WindowPoStScheduler struct { - api storageMinerApi + api fullNodeFilteredAPI feeCfg config.MinerFeeConfig + addrSel *AddressSelector prover storage.Prover + verifier ffiwrapper.Verifier faultTracker sectorstorage.FaultTracker proofType abi.RegisteredPoStProof partitionSectors uint64 @@ -40,23 +49,28 @@ type WindowPoStScheduler struct { // failLk sync.Mutex } -func NewWindowedPoStScheduler(api storageMinerApi, fc config.MinerFeeConfig, sb storage.Prover, ft sectorstorage.FaultTracker, j journal.Journal, actor address.Address) (*WindowPoStScheduler, error) { +// NewWindowedPoStScheduler creates a new WindowPoStScheduler scheduler. +func NewWindowedPoStScheduler(api fullNodeFilteredAPI, + cfg config.MinerFeeConfig, + as *AddressSelector, + sp storage.Prover, + verif ffiwrapper.Verifier, + ft sectorstorage.FaultTracker, + j journal.Journal, + actor address.Address) (*WindowPoStScheduler, error) { mi, err := api.StateMinerInfo(context.TODO(), actor, types.EmptyTSK) if err != nil { return nil, xerrors.Errorf("getting sector size: %w", err) } - rt, err := mi.SealProofType.RegisteredWindowPoStProof() - if err != nil { - return nil, err - } - return &WindowPoStScheduler{ api: api, - feeCfg: fc, - prover: sb, + feeCfg: cfg, + addrSel: as, + prover: sp, + verifier: verif, faultTracker: ft, - proofType: rt, + proofType: mi.WindowPoStProofType, partitionSectors: mi.WindowPoStPartitionSectors, actor: actor, @@ -70,21 +84,24 @@ func NewWindowedPoStScheduler(api storageMinerApi, fc config.MinerFeeConfig, sb }, nil } -type changeHandlerAPIImpl struct { - storageMinerApi - *WindowPoStScheduler -} - func (s *WindowPoStScheduler) Run(ctx context.Context) { - // Initialize change handler - chImpl := &changeHandlerAPIImpl{storageMinerApi: s.api, WindowPoStScheduler: s} - s.ch = newChangeHandler(chImpl, s.actor) + // Initialize change handler. + + // callbacks is a union of the fullNodeFilteredAPI and ourselves. + callbacks := struct { + fullNodeFilteredAPI + *WindowPoStScheduler + }{s.api, s} + + s.ch = newChangeHandler(callbacks, s.actor) defer s.ch.shutdown() s.ch.start() - var notifs <-chan []*api.HeadChange - var err error - var gotCur bool + var ( + notifs <-chan []*api.HeadChange + err error + gotCur bool + ) // not fine to panic after this point for { diff --git a/system/resources.go b/system/resources.go new file mode 100644 index 00000000000..4c0d3894399 --- /dev/null +++ b/system/resources.go @@ -0,0 +1,63 @@ +package system + +import ( + "os" + + "github.com/dustin/go-humanize" + "github.com/elastic/gosigar" + logging "github.com/ipfs/go-log/v2" +) + +var ( + logSystem = logging.Logger("system") +) + +// EnvMaximumHeap is name of the environment variable with which the user can +// specify a maximum heap size to abide by. The value of the env variable should +// be in bytes, or in SI bytes (e.g. 32GiB). +const EnvMaximumHeap = "LOTUS_MAX_HEAP" + +// MemoryConstraints represents resource constraints that Lotus and the go +// runtime should abide by. It is a singleton object that's populated on +// initialization, and can be used by components for size calculations +// (e.g. caches). +type MemoryConstraints struct { + // MaxHeapMem is the maximum heap memory that has been set by the user + // through the LOTUS_MAX_HEAP env variable. If zero, there is no max heap + // limit set. + MaxHeapMem uint64 + + // TotalSystemMem is the total system memory as reported by go-sigar. If + // zero, it was impossible to determine the total system memory. + TotalSystemMem uint64 + + // EffectiveMemLimit is the memory limit in effect, in bytes. + // + // In order of precedence: + // 1. MaxHeapMem if non-zero. + // 2. TotalSystemMem if non-zero. + // 3. Zero (no known limit). + EffectiveMemLimit uint64 +} + +// GetMemoryConstraints returns the memory constraints for this process. +func GetMemoryConstraints() (ret MemoryConstraints) { + var mem gosigar.Mem + if err := mem.Get(); err != nil { + logSystem.Warnf("failed to acquire total system memory: %s", err) + } else { + ret.TotalSystemMem = mem.Total + ret.EffectiveMemLimit = mem.Total + } + + if v := os.Getenv(EnvMaximumHeap); v != "" { + bytes, err := humanize.ParseBytes(v) + if err != nil { + logSystem.Warnf("failed to parse %s env variable with value %s: %s; ignoring max heap limit", EnvMaximumHeap, v, err) + } else { + ret.MaxHeapMem = bytes + ret.EffectiveMemLimit = bytes + } + } + return ret +} diff --git a/testplans/DELVING.md b/testplans/DELVING.md new file mode 100644 index 00000000000..4c2d3639042 --- /dev/null +++ b/testplans/DELVING.md @@ -0,0 +1,193 @@ +# Delving into the unknown + +This write-up summarises how to debug what appears to be a mischievous Lotus +instance during our Testground tests. It also goes enumerates which assets are +useful to report suspicious behaviours upstream, in a way that they are +actionable. + +## Querying the Lotus RPC API + +The `local:docker` and `cluster:k8s` map ports that you specify in the +composition.toml, so you can access them externally. + +All our compositions should carry this fragment: + +```toml +[global.run_config] + exposed_ports = { pprof = "6060", node_rpc = "1234", miner_rpc = "2345" } +``` + +This tells Testground to expose the following ports: + +* `6060` => Go pprof. +* `1234` => Lotus full node RPC. +* `2345` => Lotus storage miner RPC. + +### `local:docker` + +1. Install the `lotus` binary on your host. +2. Find the container that you want to connect to in `docker ps`. + * Note that our _container names_ are slightly long, and they're the last + field on every line, so if your terminal is wrapping text, the port + numbers will end up ABOVE the friendly/recognizable container name (e.g. `tg-lotus-soup-deals-e2e-acfc60bc1727-miners-1`). + * The testground output displays the _container ID_ inside coloured angle + brackets, so if you spot something spurious in a particular node, you can + hone in on that one, e.g. `<< 54dd5ad916b2 >>`. + + ``` + ⟩ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 54dd5ad916b2 be3c18d7f0d4 "/testplan" 10 seconds ago Up 8 seconds 0.0.0.0:32788->1234/tcp, 0.0.0.0:32783->2345/tcp, 0.0.0.0:32773->6060/tcp, 0.0.0.0:32777->6060/tcp tg-lotus-soup-deals-e2e-acfc60bc1727-clients-2 + 53757489ce71 be3c18d7f0d4 "/testplan" 10 seconds ago Up 8 seconds 0.0.0.0:32792->1234/tcp, 0.0.0.0:32790->2345/tcp, 0.0.0.0:32781->6060/tcp, 0.0.0.0:32786->6060/tcp tg-lotus-soup-deals-e2e-acfc60bc1727-clients-1 + 9d3e83b71087 be3c18d7f0d4 "/testplan" 10 seconds ago Up 8 seconds 0.0.0.0:32791->1234/tcp, 0.0.0.0:32789->2345/tcp, 0.0.0.0:32779->6060/tcp, 0.0.0.0:32784->6060/tcp tg-lotus-soup-deals-e2e-acfc60bc1727-clients-0 + 7bd60e75ed0e be3c18d7f0d4 "/testplan" 10 seconds ago Up 8 seconds 0.0.0.0:32787->1234/tcp, 0.0.0.0:32782->2345/tcp, 0.0.0.0:32772->6060/tcp, 0.0.0.0:32776->6060/tcp tg-lotus-soup-deals-e2e-acfc60bc1727-miners-1 + dff229d7b342 be3c18d7f0d4 "/testplan" 10 seconds ago Up 9 seconds 0.0.0.0:32778->1234/tcp, 0.0.0.0:32774->2345/tcp, 0.0.0.0:32769->6060/tcp, 0.0.0.0:32770->6060/tcp tg-lotus-soup-deals-e2e-acfc60bc1727-miners-0 + 4cd67690e3b8 be3c18d7f0d4 "/testplan" 11 seconds ago Up 8 seconds 0.0.0.0:32785->1234/tcp, 0.0.0.0:32780->2345/tcp, 0.0.0.0:32771->6060/tcp, 0.0.0.0:32775->6060/tcp tg-lotus-soup-deals-e2e-acfc60bc1727-bootstrapper-0 + aeb334adf88d iptestground/sidecar:edge "testground sidecar …" 43 hours ago Up About an hour 0.0.0.0:32768->6060/tcp testground-sidecar + c1157500282b influxdb:1.8 "/entrypoint.sh infl…" 43 hours ago Up 25 seconds 0.0.0.0:8086->8086/tcp testground-influxdb + 99ca4c07fecc redis "docker-entrypoint.s…" 43 hours ago Up About an hour 0.0.0.0:6379->6379/tcp testground-redis + bf25c87488a5 bitnami/grafana "/run.sh" 43 hours ago Up 26 seconds 0.0.0.0:3000->3000/tcp testground-grafana + cd1d6383eff7 goproxy/goproxy "/goproxy" 45 hours ago Up About a minute 8081/tcp testground-goproxy + ``` + +3. Take note of the port mapping. Imagine in the output above, we want to query + `54dd5ad916b2`. We'd use `localhost:32788`, as it forwards to the container's + 1234 port (Lotus Full Node RPC). +4. Run your Lotus CLI command setting the `FULLNODE_API_INFO` env variable, + which is a multiaddr: + + ```sh + $ FULLNODE_API_INFO=":/ip4/127.0.0.1/tcp/$port/http" lotus chain list + [...] + ``` + +--- + +Alternatively, you could download gawk and setup a script in you .bashrc or .zshrc similar to: + +``` +lprt() { + NAME=$1 + PORT=$2 + + docker ps --format "table {{.Names}}" | grep $NAME | xargs -I {} docker port {} $PORT | gawk --field-separator=":" '{print $2}' +} + +envs() { + NAME=$1 + + local REMOTE_PORT_1234=$(lprt $NAME 1234) + local REMOTE_PORT_2345=$(lprt $NAME 2345) + + export FULLNODE_API_INFO=":/ip4/127.0.0.1/tcp/$REMOTE_PORT_1234/http" + export STORAGE_API_INFO=":/ip4/127.0.0.1/tcp/$REMOTE_PORT_2345/http" + + echo "Setting \$FULLNODE_API_INFO to $FULLNODE_API_INFO" + echo "Setting \$STORAGE_API_INFO to $STORAGE_API_INFO" +} +``` + +Then call commands like: +``` +envs miners-0 +lotus chain list +``` + +### `cluster:k8s` + +Similar to `local:docker`, you pick a pod that you want to connect to and port-forward 1234 and 2345 to that specific pod, such as: + +``` +export PODNAME="tg-lotus-soup-ae620dfb2e19-miners-0" +kubectl port-forward pods/$PODNAME 1234:1234 2345:2345 + +export FULLNODE_API_INFO=":/ip4/127.0.0.1/tcp/1234/http" +export STORAGE_API_INFO=":/ip4/127.0.0.1/tcp/2345/http" +lotus-storage-miner storage-deals list +lotus-storage-miner storage-deals get-ask +``` + +### Useful commands / checks + +* **Making sure miners are on the same chain:** compare outputs of `lotus chain list`. +* **Checking deals:** `lotus client list-deals`. +* **Sector queries:** `lotus-storage-miner info` , `lotus-storage-miner proving info` +* **Sector sealing errors:** + * `STORAGE_API_INFO=":/ip4/127.0.0.1/tcp/53624/http" FULLNODE_API_INFO=":/ip4/127.0.0.1/tcp/53623/http" lotus-storage-miner sector info` + * `STORAGE_API_INFO=":/ip4/127.0.0.1/tcp/53624/http" FULLNODE_API_INFO=":/ip4/127.0.0.1/tcp/53623/http" lotus-storage-miner sector status ` + * `STORAGE_API_INFO=":/ip4/127.0.0.1/tcp/53624/http" FULLNODE_API_INFO=":/ip4/127.0.0.1/tcp/53623/http" lotus-storage-miner sector status --log ` + +## Viewing logs of a particular container `local:docker` + +This works for both started and stopped containers. Just get the container ID +(in double angle brackets in Testground output, on every log line), and do a: + +```shell script +$ docker logs $container_id +``` + +## Accessing the golang instrumentation + +Testground exposes a pprof endpoint under local port 6060, which both +`local:docker` and `cluster:k8s` map. + +For `local:docker`, see above to figure out which host port maps to the +container's 6060 port. + +## Acquiring a goroutine dump + +When things appear to be stuck, get a goroutine dump. + +```shell script +$ wget -o goroutine.out http://localhost:${pprof_port}/debug/pprof/goroutine?debug=2 +``` + +You can use whyrusleeping/stackparse to extract a summary: + +```shell script +$ go get https://github.com/whyrusleeping/stackparse +$ stackparse --summary goroutine.out +``` + +## Acquiring a CPU profile + +When the CPU appears to be spiking/rallying, grab a CPU profile. + +```shell script +$ wget -o profile.out http://localhost:${pprof_port}/debug/pprof/profile +``` + +Analyse it using `go tool pprof`. Usually, generating a `png` graph is useful: + +```shell script +$ go tool pprof profile.out +File: testground +Type: cpu +Time: Jul 3, 2020 at 12:00am (WEST) +Duration: 30.07s, Total samples = 2.81s ( 9.34%) +Entering interactive mode (type "help" for commands, "o" for options) +(pprof) png +Generating report in profile003.png +``` + +## Submitting actionable reports / findings + +This is useful both internally (within the Oni team, so that peers can help) and +externally (when submitting a finding upstream). + +We don't need to play the full bug-hunting game on Lotus, but it's tremendously +useful to provide the necessary data so that any reports are actionable. + +These include: + +* test outputs (use `testground collect`). +* stack traces that appear in logs (whether panics or not). +* output of relevant Lotus CLI commands. +* if this is some kind of blockage / deadlock, goroutine dumps. +* if this is a CPU hotspot, a CPU profile would be useful. +* if this is a memory issue, a heap dump would be useful. + +**When submitting bugs upstream (Lotus), make sure to indicate:** + +* Lotus commit. +* FFI commit. diff --git a/testplans/Makefile b/testplans/Makefile new file mode 100644 index 00000000000..38f46baa8f8 --- /dev/null +++ b/testplans/Makefile @@ -0,0 +1,23 @@ +SHELL = /bin/bash + +.DEFAULT_GOAL := download-proofs + +download-proofs: + go run github.com/filecoin-project/go-paramfetch/paramfetch 2048 ./docker-images/proof-parameters.json + +build-images: + docker build -t "iptestground/oni-buildbase:v15-lotus" -f "docker-images/Dockerfile.oni-buildbase" "docker-images" + docker build -t "iptestground/oni-runtime:v10" -f "docker-images/Dockerfile.oni-runtime" "docker-images" + docker build -t "iptestground/oni-runtime:v10-debug" -f "docker-images/Dockerfile.oni-runtime-debug" "docker-images" + +push-images: + docker push iptestground/oni-buildbase:v15-lotus + docker push iptestground/oni-runtime:v10 + docker push iptestground/oni-runtime:v10-debug + +pull-images: + docker pull iptestground/oni-buildbase:v15-lotus + docker pull iptestground/oni-runtime:v10 + docker pull iptestground/oni-runtime:v10-debug + +.PHONY: download-proofs build-images push-images pull-images diff --git a/testplans/README-old-from-oni.md b/testplans/README-old-from-oni.md new file mode 100644 index 00000000000..1a6b5debe9a --- /dev/null +++ b/testplans/README-old-from-oni.md @@ -0,0 +1,254 @@ +# Project Oni 👹 + +Our mandate is: + +> To verify the successful end-to-end outcome of the filecoin protocol and filecoin implementations, under a variety of real-world and simulated scenarios. + +➡️ Find out more about our goals, requirements, execution plan, and team culture, in our [Project Description](https://docs.google.com/document/d/16jYL--EWYpJhxT9bakYq7ZBGLQ9SB940Wd1lTDOAbNE). + +## Table of Contents + +- [Testing topics](#testing-topics) +- [Repository contents](#repository-contents) +- [Running the test cases](#running-the-test-cases) +- [Catalog](#catalog) +- [Debugging](#debugging) +- [Dependencies](#dependencies) +- [Docker images changelog](#docker-images-changelog) +- [Team](#team) + +## Testing topics + +These are the topics we are currently centering our testing efforts on. Our testing efforts include fault induction, stress tests, and end-to-end testing. + +* **slashing:** [_(view test scenarios)_](https://github.com/filecoin-project/oni/issues?q=is%3Aissue+sort%3Aupdated-desc+label%3Atopic%2Fslashing) + * We are recreating the scenarios that lead to slashing, as they are not readily seen in mono-client testnets. + * Context: slashing is the negative economic consequence of penalising a miner that has breached protocol by deducing FIL and/or removing their power from the network. +* **windowed PoSt/sector proving faults:** [_(view test scenarios)_](https://github.com/filecoin-project/oni/issues?q=is%3Aissue+sort%3Aupdated-desc+label%3Atopic%2Fsector-proving) + * We are recreating the proving fault scenarios and triggering them in an accelerated fasion (by modifying the system configuration), so that we're able to verify that the sector state transitions properly through the different milestones (temporary faults, termination, etc.), and under chain fork conditions. + * Context: every 24 hours there are 36 windows where miners need to submit their proofs of sector liveness, correctness, and validity. Failure to do so will mark a sector as faulted, and will eventually terminate the sector, triggering slashing consequences for the miner. +* **syncing/fork selection:** [_(view test scenarios)_](https://github.com/filecoin-project/oni/issues?q=is%3Aissue+sort%3Aupdated-desc+label%3Atopic%2Fsync-forks) + * Newly bootstrapped clients, and paused-then-resumed clients, are able to latch on to the correct chain even in the presence of a large number of forks in the network, either in the present, or throughout history. +* **present-time mining/tipset assembly:** [_(view test scenarios)_](https://github.com/filecoin-project/oni/issues?q=is%3Aissue+sort%3Aupdated-desc+label%3Atopic%2Fmining-present) + * Induce forks in the network, create network partitions, simulate chain halts, long-range forks, etc. Stage many kinds of convoluted chain shapes, and network partitions, and ensure that miners are always able to arrive to consensus when disruptions subside. +* **catch-up/rush mining:** [_(view test scenarios)_](https://github.com/filecoin-project/oni/issues?q=is%3Aissue+sort%3Aupdated-desc+label%3Atopic%2Fmining-rush) + * Induce network-wide, or partition-wide arrests, and investigate what the resulting chain is after the system is allowed to recover. + * Context: catch-up/rush mining is a dedicated pathway in the mining logic that brings the chain up to speed with present time, in order to recover from network halts. Basically it entails producing backdated blocks in a hot loop. Imagine all miners recover in unison from a network-wide disruption; miners will produce blocks for their winning rounds, and will label losing rounds as _null rounds_. In the current implementation, there is no time for block propagation, so miners will produce solo-chains, and the assumption is that when all these chains hit the network, the _fork choice rule_ will pick the heaviest one. Unfortunately this process is brittle and unbalanced, as it favours the miner that held the highest power before the disruption commenced. +* **storage and retrieval deals:** [_(view test scenarios)_](https://github.com/filecoin-project/oni/issues?q=is%3Aissue+sort%3Aupdated-desc+label%3Atopic%2Fdeals) + * end-to-end flows where clients store and retrieve pieces from miners, including stress testing the system. +* **payment channels:** [_(view test scenarios)_](https://github.com/filecoin-project/oni/issues?q=is%3Aissue+sort%3Aupdated-desc+label%3Atopic%2Fpaych) + * stress testing payment channels via excessive lane creation, excessive payment voucher atomisation, and redemption. +* **drand incidents and impact on the filecoin network/protocol/chain:** [_(view test scenarios)_](https://github.com/filecoin-project/oni/issues?q=is%3Aissue+sort%3Aupdated-desc+label%3Atopic%2Fdrand) + * drand total unavailabilities, drand catch-ups, drand slowness, etc. +* **mempool message selection:** [_(view test scenarios)_](https://github.com/filecoin-project/oni/issues?q=is%3Aissue+sort%3Aupdated-desc+label%3Atopic%2Fmempool) + * soundness of message selection logic; potentially targeted attacks against miners by flooding their message pools with different kinds of messages. +* **presealing:** [_(view test scenarios)_](https://github.com/filecoin-project/oni/issues?q=is%3Aissue+sort%3Aupdated-desc+label%3Atopic%2Fpresealing) + * TBD, anything related to this worth testing? + +## Repository contents + +This repository consists of [test plans](https://docs.testground.ai/concepts-and-architecture/test-structure) built to be run on [Testground](https://github.com/testground/testground). + +The source code for the various test cases can be found in the [`lotus-soup` directory](https://github.com/filecoin-project/oni/tree/master/lotus-soup). + +## Running the test cases + +If you are unfamiliar with Testground, we strongly suggest you read the Testground [Getting Started guide](https://docs.testground.ai/getting-started) in order to learn how to install Testground and how to use it. + +You can find various [composition files](https://docs.testground.ai/running-test-plans#composition-runs) describing various test scenarios built as part of Project Oni at [`lotus-soup/_compositions` directory](https://github.com/filecoin-project/oni/tree/master/lotus-soup/_compositions). + +We've designed the test cases so that you can run them via the `local:exec`, `local:docker` and the `cluster:k8s` runners. Note that Lotus miners are quite resource intensive, requiring gigabytes of memory. Hence you would have to run these test cases on a beafy machine (when using `local:docker` and `local:exec`), or on a Kubernetes cluster (when using `cluster:k8s`). + +Here are the basics of how to run the baseline deals end-to-end test case: + +### Running the baseline deals end-to-end test case + +1. Compile and Install Testground from source code. + * See the [Getting Started](https://github.com/testground/testground#getting-started) section of the README for instructions. + +2. Run a Testground daemon + +``` +testground daemon +``` + +3. Download required Docker images for the `lotus-soup` test plan + +``` +make pull-images +``` + +Alternatively you can build them locally with + +``` +make build-images +``` + +4. Import the `lotus-soup` test plan into your Testground home directory + +``` +testground plan import --from ./lotus-soup +``` + +5. Init the `filecoin-ffi` Git submodule in the `extra` folder. + +``` +git submodule update --init --recursive +``` + +6. Compile the `filecoin-ffi` version locally (necessary if you use `local:exec`) + +``` +cd extra/filecoin-ffi +make +``` + +7. Run a composition for the baseline deals end-to-end test case + +``` +testground run composition -f ./lotus-soup/_compositions/baseline-docker-5-1.toml +``` + +## Batch-running randomised test cases + +The Oni testkit supports [range parameters](https://github.com/filecoin-project/oni/blob/master/lotus-soup/testkit/testenv_ranges.go), +which test cases can use to generate random values, either at the instance level +(each instance computes a random value within range), or at the run level (one +instance computes the values, and propagates them to all other instances via the +sync service). + +For example: + +```toml +latency_range = '["20ms", "500ms"]' +loss_range = '[0, 0.2]' +``` + +Could pick a random latency between 20ms and 500ms, and a packet loss +probability between 0 and 0.2. We could apply those values through the +`netclient.ConfigureNetwork` Testground SDK API. + +Randomized range-based parameters are specially interesting when combined with +batch runs, as it enables Monte Carlo approaches to testing. + +The Oni codebase includes a batch test run driver in package `lotus-soup/runner`. +You can point it at a composition file that uses range parameters and tell it to +run N iterations of the test: + +```shell script +$ go run ./runner -runs 5 _compositions/net-chaos/latency.toml +``` + +This will run the test as many times as instructed, and will place all outputs +in a temporary directory. You can pass a concrete output directory with +the `-output` flag. + +## Catalog + +### Test cases part of `lotus-soup` + +* `deals-e2e` - Deals end-to-end test case. Clients pick a miner at random, start a deal, wait for it to be sealed, and try to retrieve from another random miner who offers back the data. +* `drand-halting` - Test case that instructs Drand with a sequence of halt/resume/wait events, while running deals between clients and miners at the same time. +* `deals-stress` - Deals stress test case. Clients pick a miner and send multiple deals (concurrently or serially) in order to test how many deals miners can handle. +* `paych-stress` - A test case exercising various payment channel stress tests. + +### Compositions part of `lotus-soup` + +* `baseline-docker-5-1.toml` - Runs a `baseline` test (deals e2e test) with a network of 5 clients and 1 miner targeting `local:docker` +* `baseline-k8s-10-3.toml` - Runs a `baseline` test (deals e2e test) with a network of 10 clients and 3 miner targeting `cluster:k8s` +* `baseline-k8s-3-1.toml` - Runs a `baseline` test (deals e2e test) with a network of 3 clients and 1 miner targeting `cluster:k8s` +* `baseline-k8s-3-2.toml` - Runs a `baseline` test (deals e2e test) with a network of 3 clients and 2 miner targeting `cluster:k8s` +* `baseline.toml` - Runs a `baseline` test (deals e2e test) with a network of 3 clients and 2 miner targeting `local:exec`. You have to manually download the proof parameters and place them in `/var/tmp`. +* `deals-stress-concurrent-natural-k8s.toml` +* `deals-stress-concurrent-natural.toml` +* `deals-stress-concurrent.toml` +* `deals-stress-serial-natural.toml` +* `deals-stress-serial.toml` +* `drand-halt.toml` +* `local-drand.toml` +* `natural.toml` +* `paych-stress.toml` +* `pubsub-tracer.toml` + + +## Debugging + +Find commands and how-to guides on debugging test plans at [DELVING.md](https://github.com/filecoin-project/oni/blob/master/DELVING.md) + +1. Querying the Lotus RPC API + +2. Useful commands / checks + +* Making sure miners are on the same chain + +* Checking deals + +* Sector queries + +* Sector sealing errors + +## Dependencies + +Our current test plan `lotus-soup` is building programatically the Lotus filecoin implementation and therefore requires all it's dependencies. The build process is slightly more complicated than a normal Go project, because we are binding a bit of Rust code. Lotus codebase is in Go, however its `proofs` and `crypto` libraries are in Rust (BLS signatures, SNARK verification, etc.). + +Depending on the runner you want to use to run the test plan, these dependencies are included in the build process in a different way, which you should be aware of should you require to use the test plan with a newer version of Lotus: + +### Filecoin FFI libraries + +* `local:docker` + +The Rust libraries are included in the Filecoin FFI Git submodule, which is part of the `iptestground/oni-buildbase` image. If the FFI changes on Lotus, we have to rebuild this image with the `make build-images` command, where X is the next version (see [Docker images changelog](#docker-images-changelog) +below). + +* `local:exec` + +The Rust libraries are included via the `extra` directory. Make sure that the test plan reference to Lotus in `go.mod` and the `extra` directory are pointing to the same commit of the FFI git submodule. You also need to compile the `extra/filecoin-ffi` libraries with `make`. + +* `cluster:k8s` + +The same process as for `local:docker`, however you need to make sure that the respective `iptestground/oni-buildbase` image is available as a public Docker image, so that the Kubernetes cluster can download it. + +### proof parameters + +Additional to the Filecoin FFI Git submodules, we are also bundling `proof parameters` in the `iptestground/oni-runtime` image. If these change, you will need to rebuild that image with `make build-images` command, where X is the next version. + +## Docker images changelog + +### oni-buildbase + +* `v1` => initial image locking in Filecoin FFI commit ca281af0b6c00314382a75ae869e5cb22c83655b. +* `v2` => no changes; released only for aligning both images to aesthetically please @nonsense :D +* `v3` => locking in Filecoin FFI commit 5342c7c97d1a1df4650629d14f2823d52889edd9. +* `v4` => locking in Filecoin FFI commit 6a143e06f923f3a4f544c7a652e8b4df420a3d28. +* `v5` => locking in Filecoin FFI commit cddc56607e1d851ea6d09d49404bd7db70cb3c2e. +* `v6` => locking in Filecoin FFI commit 40569104603407c999d6c9e4c3f1228cbd4d0e5c. +* `v7` => add Filecoin-BLST repo to buildbase. +* `v8` => locking in Filecoin FFI commit f640612a1a1f7a2d. +* `v9` => locking in Filecoin FFI commit 57e38efe4943f09d3127dcf6f0edd614e6acf68e and Filecoin-BLST commit 8609119cf4595d1741139c24378fcd8bc4f1c475. + + +### oni-runtime + +* `v1` => initial image with 2048 parameters. +* `v2` => adds auxiliary tools: `net-tools netcat traceroute iputils-ping wget vim curl telnet iproute2 dnsutils`. +* `v3` => bump proof parameters from v27 to v28 + +### oni-runtime-debug + +* `v1` => initial image +* `v2` => locking in Lotus commit e21ea53 +* `v3` => locking in Lotus commit d557c40 +* `v4` => bump proof parameters from v27 to v28 +* `v5` => locking in Lotus commit 1a170e18a + + +## Team + +* [@raulk](https://github.com/raulk) (Captain + TL) +* [@nonsense](https://github.com/nonsense) (Testground TG + engineer) +* [@yusefnapora](https://github.com/yusefnapora) (engineer and technical writer) +* [@vyzo](https://github.com/vyzo) (engineer) +* [@schomatis](https://github.com/schomatis) (advisor) +* [@willscott](https://github.com/willscott) (engineer) +* [@alanshaw](https://github.com/alanshaw) (engineer) + diff --git a/testplans/README.md b/testplans/README.md new file mode 100644 index 00000000000..bab10e690a1 --- /dev/null +++ b/testplans/README.md @@ -0,0 +1,60 @@ +# Testground testplans for Lotus + +This directory consists of [testplans](https://docs.testground.ai/concepts-and-architecture/test-structure) built to be run on [Testground](https://github.com/testground/testground) that exercise Lotus on [TaaS](https://ci.testground.ipfs.team). + +## Table of Contents + +- [Testing topics](#testing-topics) +- [Running the test cases](#running-the-test-cases) + +## Testing topics + +* **storage and retrieval deals:** + * end-to-end flows where clients store and retrieve pieces from miners, including stress testing the system. +* **payment channels:** + * stress testing payment channels via excessive lane creation, excessive payment voucher atomisation, and redemption. + +## Running the test cases + +If you are unfamiliar with Testground, we strongly suggest you read the Testground [Getting Started guide](https://docs.testground.ai/getting-started) in order to learn how to install Testground and how to use it. + +You can find various [composition files](https://docs.testground.ai/running-test-plans#composition-runs) describing various test scenarios built as part of Project Oni at [`lotus-soup/_compositions` directory](https://github.com/filecoin-project/oni/tree/master/lotus-soup/_compositions). + +We've designed the test cases so that you can run them via the `local:exec`, `local:docker` and the `cluster:k8s` runners. Note that Lotus miners are quite resource intensive, requiring gigabytes of memory. Hence you would have to run these test cases on a beafy machine (when using `local:docker` and `local:exec`), or on a Kubernetes cluster (when using `cluster:k8s`). + +Here are the basics of how to run the baseline deals end-to-end test case: + +### Running the baseline deals end-to-end test case + +1. Compile and Install Testground from source code. + * See the [Getting Started](https://github.com/testground/testground#getting-started) section of the README for instructions. + +2. Run a Testground daemon + +``` +testground daemon +``` + +3. Download required Docker images for the `lotus-soup` test plan + +``` +make pull-images +``` + +Alternatively you can build them locally with + +``` +make build-images +``` + +4. Import the `lotus-soup` test plan into your Testground home directory + +``` +testground plan import --from ./lotus-soup +``` + +6. Run a composition for the baseline deals end-to-end test case + +``` +testground run composition -f ./lotus-soup/_compositions/baseline-docker-5-1.toml +``` diff --git a/testplans/composer/Dockerfile b/testplans/composer/Dockerfile new file mode 100644 index 00000000000..6142650bed9 --- /dev/null +++ b/testplans/composer/Dockerfile @@ -0,0 +1,29 @@ +FROM golang:1.15-buster as tg-build + +ARG TESTGROUND_REF="oni" +WORKDIR /usr/src +RUN git clone https://github.com/testground/testground.git +RUN cd testground && git checkout $TESTGROUND_REF && go build . + +FROM python:3.8-buster + +WORKDIR /usr/src/app + +COPY --from=tg-build /usr/src/testground/testground /usr/bin/testground + +RUN mkdir /composer && chmod 777 /composer +RUN mkdir /testground && chmod 777 /testground + +ENV HOME /composer +ENV TESTGROUND_HOME /testground +ENV LISTEN_PORT 5006 +ENV TESTGROUND_DAEMON_HOST host.docker.internal + +VOLUME /testground/plans + + +COPY requirements.txt ./ +RUN pip install -r requirements.txt +COPY . . + +CMD panel serve --address 0.0.0.0 --port $LISTEN_PORT composer.ipynb diff --git a/testplans/composer/Makefile b/testplans/composer/Makefile new file mode 100644 index 00000000000..60f022110c0 --- /dev/null +++ b/testplans/composer/Makefile @@ -0,0 +1,4 @@ +all: docker + +docker: + docker build -t "iptestground/composer:latest" . diff --git a/testplans/composer/README.md b/testplans/composer/README.md new file mode 100644 index 00000000000..82cd130cb06 --- /dev/null +++ b/testplans/composer/README.md @@ -0,0 +1,63 @@ +# Testground Composer + +This is a work-in-progress UI for configuring and running testground compositions. + +The app code lives in [./app](./app), and there's a thin Jupyter notebook shell in [composer.ipynb](./composer.ipynb). + +## Running + +You can either run the app in docker, or in a local python virtualenv. Docker is recommended unless you're hacking +on the code for Composer itself. + +### Running with docker + +Run the `./composer.sh` script to build a container with the latest source and run it. The first build +will take a little while since it needs to build testground and fetch a bunch of python dependencies. + +You can skip the build if you set `SKIP_BUILD=true` when running `composer.sh`, and you can rebuild +manually with `make docker`. + +The contents of `$TESTGROUND_HOME/plans` will be sync'd to a temporary directory and read-only mounted +into the container. + +After building and starting the container, the script will open a browser to the composer UI. + +You should be able to load an existing composition or create a new one from one of the plans in +`$TESTGROUND_HOME/plans`. + +Right now docker only supports the standalone webapp UI; to run the UI in a Jupyter notebook, see below. + +### Running with local python + +To run without docker, make a python3 virtual environment somewhere and activate it: + +```shell +# make a virtualenv called "venv" in the current directory +python3 -m venv ./venv + +# activate (bash/zsh): +source ./venv/bin/activate + +# activate (fish): +source ./venv/bin/activate.fish +``` + +Then install the python dependencies: + +```shell +pip install -r requirements.txt +``` + +And start the UI: + +```shell +panel serve composer.ipynb +``` + +That will start the standalone webapp UI. If you want a Jupyter notebook instead, run: + +``` +jupyter notebook +``` + +and open `composer.ipynb` in the Jupyter file picker. \ No newline at end of file diff --git a/testplans/composer/app/app.py b/testplans/composer/app/app.py new file mode 100644 index 00000000000..c8d4aa3c1fb --- /dev/null +++ b/testplans/composer/app/app.py @@ -0,0 +1,94 @@ +import param +import panel as pn +import toml +from .util import get_plans, get_manifest +from .composition import Composition +from .runner import TestRunner + +STAGE_WELCOME = 'Welcome' +STAGE_CONFIG_COMPOSITION = 'Configure' +STAGE_RUN_TEST = 'Run' + + +class Welcome(param.Parameterized): + composition = param.Parameter() + composition_picker = pn.widgets.FileInput(accept='.toml') + plan_picker = param.Selector() + ready = param.Boolean() + + def __init__(self, **params): + super().__init__(**params) + self.composition_picker.param.watch(self._composition_updated, 'value') + self.param.watch(self._plan_selected, 'plan_picker') + self.param['plan_picker'].objects = ['Select a Plan'] + get_plans() + + def panel(self): + tabs = pn.Tabs( + ('New Compostion', self.param['plan_picker']), + ('Existing Composition', self.composition_picker), + ) + + return pn.Column( + "Either choose an existing composition or select a plan to create a new composition:", + tabs, + ) + + def _composition_updated(self, *args): + print('composition updated') + content = self.composition_picker.value.decode('utf8') + comp_toml = toml.loads(content) + manifest = get_manifest(comp_toml['global']['plan']) + self.composition = Composition.from_dict(comp_toml, manifest=manifest) + print('existing composition: {}'.format(self.composition)) + self.ready = True + + def _plan_selected(self, evt): + if evt.new == 'Select a Plan': + return + print('plan selected: {}'.format(evt.new)) + manifest = get_manifest(evt.new) + self.composition = Composition(manifest=manifest, add_default_group=True) + print('new composition: ', self.composition) + self.ready = True + + +class ConfigureComposition(param.Parameterized): + composition = param.Parameter() + + @param.depends('composition') + def panel(self): + if self.composition is None: + return pn.Pane("no composition :(") + print('composition: ', self.composition) + return self.composition.panel() + + +class WorkflowPipeline(object): + def __init__(self): + stages = [ + (STAGE_WELCOME, Welcome(), dict(ready_parameter='ready')), + (STAGE_CONFIG_COMPOSITION, ConfigureComposition()), + (STAGE_RUN_TEST, TestRunner()), + ] + + self.pipeline = pn.pipeline.Pipeline(debug=True, stages=stages) + + def panel(self): + return pn.Column( + pn.Row( + self.pipeline.title, + self.pipeline.network, + self.pipeline.prev_button, + self.pipeline.next_button, + ), + self.pipeline.stage, + sizing_mode='stretch_width', + ) + + +class App(object): + def __init__(self): + self.workflow = WorkflowPipeline() + + def ui(self): + return self.workflow.panel().servable("Testground Composer") diff --git a/testplans/composer/app/composition.py b/testplans/composer/app/composition.py new file mode 100644 index 00000000000..f12034f8c68 --- /dev/null +++ b/testplans/composer/app/composition.py @@ -0,0 +1,328 @@ +import param +import panel as pn +import toml +from .util import get_manifest, print_err + + +def value_dict(parameterized, renames=None, stringify=False): + d = dict() + if renames is None: + renames = dict() + for name, p in parameterized.param.objects().items(): + if name == 'name': + continue + if name in renames: + name = renames[name] + val = p.__get__(parameterized, type(p)) + if isinstance(val, param.Parameterized): + try: + val = val.to_dict() + except: + val = value_dict(val, renames=renames) + if stringify: + val = str(val) + d[name] = val + return d + + +def make_group_params_class(testcase): + """Returns a subclass of param.Parameterized whose params are defined by the + 'params' dict inside of the given testcase dict""" + tc_params = dict() + for name, p in testcase.get('params', {}).items(): + tc_params[name] = make_param(p) + + name = 'Test Params for testcase {}'.format(testcase.get('name', '')) + cls = param.parameterized_class(name, tc_params, GroupParamsBase) + return cls + + +def make_param(pdef): + """ + :param pdef: a parameter definition dict from a testground plan manifest + :return: a param.Parameter that has the type, bounds, default value, etc from the definition + """ + typ = pdef['type'].lower() + if typ == 'int': + return num_param(pdef, cls=param.Integer) + elif typ == 'float': + return num_param(pdef) + elif typ.startswith('bool'): + return bool_param(pdef) + else: + return str_param(pdef) + + +def num_param(pdef, cls=param.Number): + lo = pdef.get('min', None) + hi = pdef.get('max', None) + bounds = (lo, hi) + if lo == hi and lo is not None: + bounds = None + + default_val = pdef.get('default', None) + if default_val is not None: + if cls == param.Integer: + default_val = int(default_val) + else: + default_val = float(default_val) + return cls(default=default_val, bounds=bounds, doc=pdef.get('desc', '')) + + +def bool_param(pdef): + default_val = str(pdef.get('default', 'false')).lower() == 'true' + return param.Boolean( + doc=pdef.get('desc', ''), + default=default_val + ) + + +def str_param(pdef): + return param.String( + default=pdef.get('default', ''), + doc=pdef.get('desc', ''), + ) + + +class Base(param.Parameterized): + @classmethod + def from_dict(cls, d): + return cls(**d) + + def to_dict(self): + return value_dict(self) + + +class GroupParamsBase(Base): + def to_dict(self): + return value_dict(self, stringify=True) + + +class Metadata(Base): + composition_name = param.String() + author = param.String() + + @classmethod + def from_dict(cls, d): + d['composition_name'] = d.get('name', '') + del d['name'] + return Metadata(**d) + + def to_dict(self): + return value_dict(self, {'composition_name': 'name'}) + + +class Global(Base): + plan = param.String() + case = param.Selector() + builder = param.String() + runner = param.String() + + # TODO: link to instance counts in groups + total_instances = param.Integer() + # TODO: add ui widget for key/value maps instead of using Dict param type + build_config = param.Dict(default={}, allow_None=True) + run_config = param.Dict(default={}, allow_None=True) + + def set_manifest(self, manifest): + if manifest is None: + return + print('manifest:', manifest) + self.plan = manifest['name'] + cases = [tc['name'] for tc in manifest['testcases']] + self.param['case'].objects = cases + print('global config updated manifest. cases:', self.param['case'].objects) + if len(cases) != 0: + self.case = cases[0] + + if 'defaults' in manifest: + print('manifest defaults', manifest['defaults']) + if self.builder == '': + self.builder = manifest['defaults'].get('builder', '') + if self.runner == '': + self.runner = manifest['defaults'].get('runner', '') + + +class Resources(Base): + memory = param.String(allow_None=True) + cpu = param.String(allow_None=True) + + +class Instances(Base): + count = param.Integer(allow_None=True) + percentage = param.Number(allow_None=True) + + +class Dependency(Base): + module = param.String() + version = param.String() + + +class Build(Base): + selectors = param.List(class_=str, allow_None=True) + dependencies = param.List(allow_None=True) + + +class Run(Base): + artifact = param.String(allow_None=True) + test_params = param.Parameter(instantiate=True) + + def __init__(self, params_class=None, **params): + super().__init__(**params) + if params_class is not None: + self.test_params = params_class() + + @classmethod + def from_dict(cls, d, params_class=None): + return Run(artifact=d.get('artifact', None), params_class=params_class) + + def panel(self): + return pn.Column( + self.param['artifact'], + pn.Param(self.test_params) + ) + + +class Group(Base): + id = param.String() + instances = param.Parameter(Instances(), instantiate=True) + resources = param.Parameter(Resources(), allow_None=True, instantiate=True) + build = param.Parameter(Build(), instantiate=True) + run = param.Parameter(Run(), instantiate=True) + + def __init__(self, params_class=None, **params): + super().__init__(**params) + if params_class is not None: + self.run = Run(params_class=params_class) + self._set_name(self.id) + + @classmethod + def from_dict(cls, d, params_class=None): + return Group( + id=d['id'], + resources=Resources.from_dict(d.get('resources', {})), + instances=Instances.from_dict(d.get('instances', {})), + build=Build.from_dict(d.get('build', {})), + run=Run.from_dict(d.get('params', {}), params_class=params_class), + ) + + def panel(self): + print('rendering groups panel for ' + self.id) + return pn.Column( + "**Group: {}**".format(self.id), + self.param['id'], + self.instances, + self.resources, + self.build, + self.run.panel(), + ) + + +class Composition(param.Parameterized): + metadata = param.Parameter(Metadata(), instantiate=True) + global_config = param.Parameter(Global(), instantiate=True) + + groups = param.List(precedence=-1) + group_tabs = pn.Tabs() + groups_ui = None + + def __init__(self, manifest=None, add_default_group=False, **params): + super(Composition, self).__init__(**params) + self.manifest = manifest + self.testcase_param_classes = dict() + self._set_manifest(manifest) + if add_default_group: + self._add_group() + + @classmethod + def from_dict(cls, d, manifest=None): + if manifest is None: + try: + manifest = get_manifest(d['global']['plan']) + except FileNotFoundError: + print_err("Unable to find manifest for test plan {}. Please import into $TESTGROUND_HOME/plans and try again".format(d['global']['plan'])) + + c = Composition( + manifest=manifest, + metadata=Metadata.from_dict(d.get('metadata', {})), + global_config=Global.from_dict(d.get('global', {})), + ) + params_class = c._params_class_for_current_testcase() + c.groups = [Group.from_dict(g, params_class=params_class) for g in d.get('groups', [])] + + return c + + @classmethod + def from_toml_file(cls, filename, manifest=None): + with open(filename, 'rt') as f: + d = toml.load(f) + return cls.from_dict(d, manifest=manifest) + + @param.depends('groups', watch=True) + def panel(self): + add_group_button = pn.widgets.Button(name='Add Group') + add_group_button.on_click(self._add_group) + + self._refresh_tabs() + + if self.groups_ui is None: + self.groups_ui = pn.Column( + add_group_button, + self.group_tabs, + ) + + return pn.Row( + pn.Column(self.metadata, self.global_config), + self.groups_ui, + ) + + def _set_manifest(self, manifest): + if manifest is None: + return + + g = self.global_config + print('global conifg: ', g) + g.set_manifest(manifest) + for tc in manifest.get('testcases', []): + self.testcase_param_classes[tc['name']] = make_group_params_class(tc) + + def _params_class_for_current_testcase(self): + case = self.global_config.case + cls = self.testcase_param_classes.get(case, None) + if cls is None: + print_err("No testcase found in manifest named " + case) + return cls + + def _add_group(self, *args): + group_id = 'group-{}'.format(len(self.groups) + 1) + g = Group(id=group_id, params_class=self._params_class_for_current_testcase()) + g.param.watch(self._refresh_tabs, 'id') + groups = self.groups + groups.append(g) + self.groups = groups + self.group_tabs.active = len(groups)-1 + + @param.depends("global_config.case", watch=True) + def _test_case_changed(self): + print('test case changed', self.global_config.case) + cls = self._params_class_for_current_testcase() + for g in self.groups: + g.run.test_params = cls() + self._refresh_tabs() + + def _refresh_tabs(self, *args): + self.group_tabs[:] = [(g.id, g.panel()) for g in self.groups] + + def to_dict(self): + return { + 'metadata': value_dict(self.metadata, renames={'composition_name': 'name'}), + 'global': value_dict(self.global_config), + 'groups': [g.to_dict() for g in self.groups] + } + + def to_toml(self): + return toml.dumps(self.to_dict()) + + def write_to_file(self, filename): + with open(filename, 'wt') as f: + toml.dump(self.to_dict(), f) diff --git a/testplans/composer/app/runner.py b/testplans/composer/app/runner.py new file mode 100644 index 00000000000..6eb368795df --- /dev/null +++ b/testplans/composer/app/runner.py @@ -0,0 +1,111 @@ +import os +import panel as pn +import param +from panel.io.server import unlocked +from tornado.ioloop import IOLoop, PeriodicCallback +from tornado.process import Subprocess +from subprocess import STDOUT +from bokeh.models.widgets import Div +from ansi2html import Ansi2HTMLConverter + +from .composition import Composition + +TESTGROUND = 'testground' + + +class AnsiColorText(pn.widgets.Widget): + style = param.Dict(default=None, doc=""" + Dictionary of CSS property:value pairs to apply to this Div.""") + + value = param.Parameter(default=None) + + _format = '
{value}
' + + _rename = {'name': None, 'value': 'text'} + + # _target_transforms = {'value': 'target.text.split(": ")[0]+": "+value'} + # + # _source_transforms = {'value': 'value.split(": ")[1]'} + + _widget_type = Div + + _converter = Ansi2HTMLConverter(inline=True) + + def _process_param_change(self, msg): + msg = super(AnsiColorText, self)._process_property_change(msg) + if 'value' in msg: + text = str(msg.pop('value')) + text = self._converter.convert(text) + msg['text'] = text + return msg + + def scroll_down(self): + # TODO: figure out how to automatically scroll down as text is added + pass + + +class CommandRunner(param.Parameterized): + command_output = param.String() + + def __init__(self, **params): + super().__init__(**params) + self._output_lines = [] + self.proc = None + self._updater = PeriodicCallback(self._refresh_output, callback_time=1000) + + @pn.depends('command_output') + def panel(self): + return pn.Param(self.param, show_name=False, sizing_mode='stretch_width', widgets={ + 'command_output': dict( + type=AnsiColorText, + sizing_mode='stretch_width', + height=800) + }) + + def run(self, *cmd): + self.command_output = '' + self._output_lines = [] + self.proc = Subprocess(cmd, stdout=Subprocess.STREAM, stderr=STDOUT) + self._get_next_line() + self._updater.start() + + def _get_next_line(self): + if self.proc is None: + return + loop = IOLoop.current() + loop.add_future(self.proc.stdout.read_until(bytes('\n', encoding='utf8')), self._append_output) + + def _append_output(self, future): + self._output_lines.append(future.result().decode('utf8')) + self._get_next_line() + + def _refresh_output(self): + text = ''.join(self._output_lines) + if len(text) != len(self.command_output): + with unlocked(): + self.command_output = text + + +class TestRunner(param.Parameterized): + composition = param.ClassSelector(class_=Composition, precedence=-1) + testground_daemon_endpoint = param.String(default="{}:8042".format(os.environ.get('TESTGROUND_DAEMON_HOST', 'localhost'))) + run_test = param.Action(lambda self: self.run()) + runner = CommandRunner() + + def __init__(self, **params): + super().__init__(**params) + + def run(self): + # TODO: temp file management - maybe we should mount a volume and save there? + filename = '/tmp/composition.toml' + self.composition.write_to_file(filename) + + self.runner.run(TESTGROUND, '--endpoint', self.testground_daemon_endpoint, 'run', 'composition', '-f', filename) + + def panel(self): + return pn.Column( + self.param['testground_daemon_endpoint'], + self.param['run_test'], + self.runner.panel(), + sizing_mode='stretch_width', + ) diff --git a/testplans/composer/app/util.py b/testplans/composer/app/util.py new file mode 100644 index 00000000000..5321a95e80a --- /dev/null +++ b/testplans/composer/app/util.py @@ -0,0 +1,26 @@ +import toml +import os +import sys + + +def parse_manifest(manifest_path): + with open(manifest_path, 'rt') as f: + return toml.load(f) + + +def tg_home(): + return os.environ.get('TESTGROUND_HOME', + os.path.join(os.environ['HOME'], 'testground')) + + +def get_plans(): + return list(os.listdir(os.path.join(tg_home(), 'plans'))) + + +def get_manifest(plan_name): + manifest_path = os.path.join(tg_home(), 'plans', plan_name, 'manifest.toml') + return parse_manifest(manifest_path) + + +def print_err(*args): + print(*args, file=sys.stderr) diff --git a/testplans/composer/chain-state.ipynb b/testplans/composer/chain-state.ipynb new file mode 100644 index 00000000000..bd833dd2105 --- /dev/null +++ b/testplans/composer/chain-state.ipynb @@ -0,0 +1,174 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "import pandas as pd\n", + "import matplotlib.pyplot as plt\n", + "import hvplot.pandas\n", + "import panel as pn\n", + "\n", + "STATE_FILE = './chain-state.ndjson'\n", + "\n", + "MINER_STATE_COL_RENAMES = {\n", + " 'Info.MinerAddr': 'Miner',\n", + " 'Info.MinerPower.MinerPower.RawBytePower': 'Info.MinerPowerRaw',\n", + " 'Info.MinerPower.MinerPower.QualityAdjPower': 'Info.MinerPowerQualityAdj',\n", + " 'Info.MinerPower.TotalPower.RawBytePower': 'Info.TotalPowerRaw',\n", + " 'Info.MinerPower.TotalPower.QualityAdjPower': 'Info.TotalPowerQualityAdj',\n", + "}\n", + "\n", + "MINER_NUMERIC_COLS = [\n", + " 'Info.MinerPowerRaw',\n", + " 'Info.MinerPowerQualityAdj',\n", + " 'Info.TotalPowerRaw',\n", + " 'Info.TotalPowerQualityAdj',\n", + " 'Info.Balance',\n", + " 'Info.CommittedBytes',\n", + " 'Info.ProvingBytes',\n", + " 'Info.FaultyBytes',\n", + " 'Info.FaultyPercentage',\n", + " 'Info.PreCommitDeposits',\n", + " 'Info.LockedFunds',\n", + " 'Info.AvailableFunds',\n", + " 'Info.WorkerBalance',\n", + " 'Info.MarketEscrow',\n", + " 'Info.MarketLocked',\n", + "]\n", + "\n", + "DERIVED_COLS = [\n", + " 'CommittedSectors',\n", + " 'ProvingSectors',\n", + "]\n", + "\n", + "ATTO_FIL_COLS = [\n", + " 'Info.Balance',\n", + " 'Info.PreCommitDeposits',\n", + " 'Info.LockedFunds',\n", + " 'Info.AvailableFunds',\n", + " 'Info.WorkerBalance',\n", + " 'Info.MarketEscrow',\n", + " 'Info.MarketLocked',\n", + "]\n", + "\n", + "def atto_to_fil(x):\n", + " return float(x) * pow(10, -18)\n", + "\n", + "def chain_state_to_pandas(statefile):\n", + " chain = None\n", + " \n", + " with open(statefile, 'rt') as f:\n", + " for line in f.readlines():\n", + " j = json.loads(line)\n", + " chain_height = j['Height']\n", + " \n", + " miners = j['MinerStates']\n", + " for m in miners.values():\n", + " df = pd.json_normalize(m)\n", + " df['Height'] = chain_height\n", + " df.rename(columns=MINER_STATE_COL_RENAMES, inplace=True)\n", + " if chain is None:\n", + " chain = df\n", + " else:\n", + " chain = chain.append(df, ignore_index=True)\n", + " chain.fillna(0, inplace=True)\n", + " chain.set_index('Height', inplace=True)\n", + " \n", + " for c in ATTO_FIL_COLS:\n", + " chain[c] = chain[c].apply(atto_to_fil)\n", + " \n", + " for c in MINER_NUMERIC_COLS:\n", + " chain[c] = chain[c].apply(pd.to_numeric)\n", + " \n", + " # the Sectors.* fields are lists of sector ids, but we want to plot counts, so\n", + " # we pull the length of each list into a new column\n", + " chain['CommittedSectors'] = chain['Sectors.Committed'].apply(lambda x: len(x))\n", + " chain['ProvingSectors'] = chain['Sectors.Proving'].apply(lambda x: len(x))\n", + " return chain\n", + " \n", + "cs = chain_state_to_pandas(STATE_FILE)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# choose which col to plot using a widget\n", + "\n", + "cols_to_plot = MINER_NUMERIC_COLS + DERIVED_COLS\n", + "\n", + "col_selector = pn.widgets.Select(name='Field', options=cols_to_plot)\n", + "cols = ['Miner'] + cols_to_plot\n", + "plot = cs[cols].hvplot(by='Miner', y=col_selector)\n", + "pn.Column(pn.WidgetBox(col_selector), plot)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "# plot all line charts in a vertical stack\n", + "\n", + "plots = []\n", + "for c in cols_to_plot:\n", + " title = c.split('.')[-1]\n", + " p = cs[['Miner', c]].hvplot(by='Miner', y=c, title=title)\n", + " plots.append(p)\n", + "pn.Column(*plots)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# miner power area chart\n", + "\n", + "mp = cs[['Miner', 'Info.MinerPowerRaw']].rename(columns={'Info.MinerPowerRaw': 'Power'})\n", + "mp = mp.pivot_table(values=['Power'], index=cs.index, columns='Miner', aggfunc='sum')\n", + "mp = mp.div(mp.sum(1), axis=0)\n", + "mp.columns = mp.columns.get_level_values(1)\n", + "mp.hvplot.area(title='Miner Power Distribution')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.2" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/testplans/composer/composer.ipynb b/testplans/composer/composer.ipynb new file mode 100644 index 00000000000..148d1e86105 --- /dev/null +++ b/testplans/composer/composer.ipynb @@ -0,0 +1,45 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "import param\n", + "import panel as pn\n", + "import app.app as app\n", + "import importlib\n", + "importlib.reload(app)\n", + "\n", + "pn.extension()\n", + "\n", + "a = app.App()\n", + "a.ui()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.2" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/testplans/composer/composer.sh b/testplans/composer/composer.sh new file mode 100755 index 00000000000..0d8bc8eb9a2 --- /dev/null +++ b/testplans/composer/composer.sh @@ -0,0 +1,134 @@ +#!/bin/bash + +# this script runs jupyter inside a docker container and copies +# plan manifests from the user's local filesystem into a temporary +# directory that's bind-mounted into the container. + +set -o errexit +set -o pipefail + +set -e + +err_report() { + echo "Error on line $1" +} + +trap 'err_report $LINENO' ERR + + +image_name="iptestground/composer" +image_tag="latest" +image_full_name="$image_name:$image_tag" +tg_home=${TESTGROUND_HOME:-$HOME/testground} +container_plans_dir="/testground/plans" +jupyter_port=${JUPYTER_PORT:-8888} +panel_port=${PANEL_PORT:-5006} + +poll_interval=30 + +exists() { + command -v "$1" >/dev/null 2>&1 +} + +require_cmds() { + for cmd in $@; do + exists $cmd || { echo "This script requires the $cmd command. Please install it and try again." >&2; exit 1; } + done +} + +update_plans() { + local dest_dir=$1 + rsync -avzh --quiet --copy-links "${tg_home}/plans/" ${dest_dir} +} + +watch_plans() { + local plans_dest=$1 + while true; do + update_plans ${plans_dest} + sleep $poll_interval + done +} + +open_url() { + local url=$1 + if exists cmd.exe; then + cmd.exe /c start ${url} >/dev/null 2>&1 + elif exists xdg-open; then + xdg-open ${url} >/dev/null 2>&1 & + elif exists open; then + open ${url} + else + echo "unable to automatically open url. copy/paste this into a browser: $url" + fi +} + +# delete temp dir and stop docker container +cleanup () { + if [[ "$container_id" != "" ]]; then + docker stop ${container_id} >/dev/null + fi + + if [[ -d "$temp_plans_dir" ]]; then + rm -rf ${temp_plans_dir} + fi +} + +get_host_ip() { + # get interface of default route + local net_if=$(netstat -rn | awk '/^0.0.0.0/ {thif=substr($0,74,10); print thif;} /^default.*UG/ {thif=substr($0,65,10); print thif;}') + # use ifconfig to get addr of that interface + detected_host_ip=`ifconfig ${net_if} | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*' | grep -v '127.0.0.1'` + + if [ -z "$detected_host_ip" ] + then + detected_host_ip="host.docker.internal" + fi + + echo $detected_host_ip +} + +# run cleanup on exit +trap "{ cleanup; }" EXIT + +# make sure we have the commands we need +require_cmds jq docker rsync + +if [[ "$SKIP_BUILD" == "" ]]; then + echo "Building latest docker image. Set SKIP_BUILD env var to any value to bypass." + require_cmds make + make docker +fi + +# make temp dir for manifests +temp_base="/tmp" +if [[ "$TEMP" != "" ]]; then + temp_base=$TEMP +fi + +temp_plans_dir="$(mktemp -d ${temp_base}/testground-composer-XXXX)" +echo "temp plans dir: $temp_plans_dir" + +# copy testplans from $TESTGROUND_HOME/plans to the temp dir +update_plans ${temp_plans_dir} + +# run the container in detached mode and grab the id +container_id=$(docker run -d \ + -e TESTGROUND_DAEMON_HOST=$(get_host_ip) \ + --user $(id -u):$(id -g) \ + -p ${panel_port}:5006 \ + -v ${temp_plans_dir}:${container_plans_dir}:ro \ + $image_full_name) + +echo "container $container_id started" +# print the log output +docker logs -f ${container_id} & + +# sleep for a couple seconds to let the server start up +sleep 2 + +# open a browser to the app url +panel_url="http://localhost:${panel_port}" +open_url $panel_url + +# poll & sync testplan changes every few seconds +watch_plans ${temp_plans_dir} diff --git a/testplans/composer/fixtures/all-both-k8s.toml b/testplans/composer/fixtures/all-both-k8s.toml new file mode 100644 index 00000000000..ab9e0864e22 --- /dev/null +++ b/testplans/composer/fixtures/all-both-k8s.toml @@ -0,0 +1,214 @@ +[metadata] + name = "all-both" + author = "adin" + +[global] + plan = "dht" + case = "all" + total_instances = 1000 + builder = "docker:go" + runner = "cluster:k8s" + [global.build_config] + push_registry = true + registry_type = "aws" + +[[groups]] + id = "balsam-undialable-provider" + [groups.instances] + count = 5 + percentage = 0.0 + [groups.build] + selectors = ["balsam"] + [groups.run] + artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:701251a63b92" + [groups.run.test_params] + bs_strategy = "7" + bucket_size = "10" + expect_dht = "false" + group_order = "4" + latency = "100" + record_count = "1" + timeout_secs = "600" + undialable = "true" + +[[groups]] + id = "balsam-undialable-searcher" + [groups.instances] + count = 5 + percentage = 0.0 + [groups.build] + selectors = ["balsam"] + [groups.run] + artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:701251a63b92" + [groups.run.test_params] + bs_strategy = "7" + bucket_size = "10" + expect_dht = "false" + group_order = "5" + latency = "100" + search_records = "true" + timeout_secs = "600" + undialable = "true" + +[[groups]] + id = "balsam-dialable-passive" + [groups.instances] + count = 780 + percentage = 0.0 + [groups.build] + selectors = ["balsam"] + [groups.run] + artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:701251a63b92" + [groups.run.test_params] + bs_strategy = "7" + bucket_size = "10" + expect_dht = "false" + group_order = "6" + latency = "100" + timeout_secs = "600" + undialable = "false" + +[[groups]] + id = "balsam-dialable-provider" + [groups.instances] + count = 5 + percentage = 0.0 + [groups.build] + selectors = ["balsam"] + [groups.run] + artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:701251a63b92" + [groups.run.test_params] + bs_strategy = "7" + bucket_size = "10" + expect_dht = "false" + group_order = "7" + latency = "100" + record_count = "1" + timeout_secs = "600" + undialable = "false" + +[[groups]] + id = "balsam-dialable-searcher" + [groups.instances] + count = 5 + percentage = 0.0 + [groups.build] + selectors = ["balsam"] + [groups.run] + artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:701251a63b92" + [groups.run.test_params] + bs_strategy = "7" + bucket_size = "10" + expect_dht = "false" + group_order = "8" + latency = "100" + search_records = "true" + timeout_secs = "600" + undialable = "false" + +[[groups]] + id = "cypress-passive" + [groups.instances] + count = 185 + percentage = 0.0 + [groups.build] + selectors = ["cypress"] + + [[groups.build.dependencies]] + module = "github.com/libp2p/go-libp2p-kad-dht" + version = "180be07b8303d536e39809bc39c58be5407fedd9" + + [[groups.build.dependencies]] + module = "github.com/libp2p/go-libp2p-xor" + version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a" + [groups.run] + artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:ca78473d669d" + [groups.run.test_params] + alpha = "6" + beta = "3" + bs_strategy = "7" + bucket_size = "10" + group_order = "1" + latency = "100" + timeout_secs = "600" + +[[groups]] + id = "cypress-provider" + [groups.instances] + count = 5 + percentage = 0.0 + [groups.build] + selectors = ["cypress"] + + [[groups.build.dependencies]] + module = "github.com/libp2p/go-libp2p-kad-dht" + version = "180be07b8303d536e39809bc39c58be5407fedd9" + + [[groups.build.dependencies]] + module = "github.com/libp2p/go-libp2p-xor" + version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a" + [groups.run] + artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:ca78473d669d" + [groups.run.test_params] + alpha = "6" + beta = "3" + bs_strategy = "7" + bucket_size = "10" + group_order = "2" + latency = "100" + record_count = "1" + timeout_secs = "600" + +[[groups]] + id = "cypress-searcher" + [groups.instances] + count = 5 + percentage = 0.0 + [groups.build] + selectors = ["cypress"] + + [[groups.build.dependencies]] + module = "github.com/libp2p/go-libp2p-kad-dht" + version = "180be07b8303d536e39809bc39c58be5407fedd9" + + [[groups.build.dependencies]] + module = "github.com/libp2p/go-libp2p-xor" + version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a" + [groups.run] + artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:ca78473d669d" + [groups.run.test_params] + alpha = "6" + beta = "3" + bs_strategy = "7" + bucket_size = "10" + group_order = "3" + latency = "100" + search_records = "true" + timeout_secs = "600" + +[[groups]] + id = "cypress-bs" + [groups.instances] + count = 5 + percentage = 0.0 + [groups.build] + selectors = ["cypress"] + + [[groups.build.dependencies]] + module = "github.com/libp2p/go-libp2p-kad-dht" + version = "180be07b8303d536e39809bc39c58be5407fedd9" + + [[groups.build.dependencies]] + module = "github.com/libp2p/go-libp2p-xor" + version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a" + [groups.run] + artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:ca78473d669d" + [groups.run.test_params] + alpha = "6" + beta = "3" + bootstrapper = "true" + bs_strategy = "7" + bucket_size = "10" + group_order = "0" + latency = "100" + timeout_secs = "600" diff --git a/testplans/composer/fixtures/ping-pong-local.toml b/testplans/composer/fixtures/ping-pong-local.toml new file mode 100644 index 00000000000..d845daafd17 --- /dev/null +++ b/testplans/composer/fixtures/ping-pong-local.toml @@ -0,0 +1,14 @@ +[metadata] +name = "ping-pong-local" +author = "yusef" + +[global] +plan = "network" +case = "ping-pong" +total_instances = 2 +builder = "docker:go" +runner = "local:docker" + +[[groups]] +id = "nodes" +instances = { count = 2 } \ No newline at end of file diff --git a/testplans/composer/requirements.txt b/testplans/composer/requirements.txt new file mode 100644 index 00000000000..cfdfaa4634d --- /dev/null +++ b/testplans/composer/requirements.txt @@ -0,0 +1,8 @@ +param +toml +jupyter +panel +holoviews +ansi2html +matplotlib +hvplot diff --git a/testplans/dashboards/baseline.json b/testplans/dashboards/baseline.json new file mode 100644 index 00000000000..0678594ac04 --- /dev/null +++ b/testplans/dashboards/baseline.json @@ -0,0 +1,2106 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 15, + "iteration": 1595335476624, + "links": [], + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 21, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "$tag_instance", + "groupBy": [ + { + "params": [ + "$__interval" + ], + "type": "time" + }, + { + "params": [ + "instance" + ], + "type": "tag" + }, + { + "params": [ + "previous" + ], + "type": "fill" + } + ], + "measurement": "message/received", + "orderByTime": "ASC", + "policy": "default", + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "count" + ], + "type": "field" + }, + { + "params": [], + "type": "last" + } + ] + ], + "tags": [] + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "message/received", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 0 + }, + "hiddenSeries": false, + "id": 22, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "$tag_instance", + "groupBy": [ + { + "params": [ + "$__interval" + ], + "type": "time" + }, + { + "params": [ + "instance" + ], + "type": "tag" + }, + { + "params": [ + "previous" + ], + "type": "fill" + } + ], + "measurement": "message/success", + "orderByTime": "ASC", + "policy": "default", + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "count" + ], + "type": "field" + }, + { + "params": [], + "type": "last" + } + ] + ], + "tags": [] + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "message/success", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 8 + }, + "hiddenSeries": false, + "id": 18, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "$tag_instance", + "groupBy": [ + { + "params": [ + "$__interval" + ], + "type": "time" + }, + { + "params": [ + "instance" + ], + "type": "tag" + }, + { + "params": [ + "none" + ], + "type": "fill" + } + ], + "measurement": "chain/node_height", + "orderByTime": "ASC", + "policy": "default", + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "last" + ], + "type": "field" + }, + { + "params": [], + "type": "last" + } + ] + ], + "tags": [] + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "chain/node_height", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 8 + }, + "hiddenSeries": false, + "id": 19, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "$tag_instance", + "groupBy": [ + { + "params": [ + "$__interval" + ], + "type": "time" + }, + { + "params": [ + "instance" + ], + "type": "tag" + }, + { + "params": [ + "none" + ], + "type": "fill" + } + ], + "measurement": "peer/count", + "orderByTime": "ASC", + "policy": "default", + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "last" + ], + "type": "field" + }, + { + "params": [], + "type": "last" + } + ] + ], + "tags": [] + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "peer/count", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 16 + }, + "id": 10, + "panels": [], + "title": "Blocks", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "influxdb", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 17 + }, + "hiddenSeries": false, + "id": 15, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "total", + "sortDesc": false, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "$tag_instance", + "groupBy": [ + { + "params": [ + "$myinterval" + ], + "type": "time" + }, + { + "params": [ + "instance" + ], + "type": "tag" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "measurement": "block/received", + "orderByTime": "ASC", + "policy": "default", + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "count" + ], + "type": "field" + }, + { + "params": [], + "type": "last" + } + ] + ], + "tags": [] + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "block/received", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "influxdb", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 17 + }, + "hiddenSeries": false, + "id": 16, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "total", + "sortDesc": false, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "$tag_instance", + "groupBy": [ + { + "params": [ + "$myinterval" + ], + "type": "time" + }, + { + "params": [ + "instance" + ], + "type": "tag" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "measurement": "block/success", + "orderByTime": "ASC", + "policy": "default", + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "count" + ], + "type": "field" + }, + { + "params": [], + "type": "last" + } + ] + ], + "tags": [] + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "block/success", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "influxdb", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 25 + }, + "hiddenSeries": false, + "id": 13, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "total", + "sortDesc": false, + "total": true, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "$tag_miner ($tag_run)", + "groupBy": [ + { + "params": [ + "$myinterval" + ], + "type": "time" + }, + { + "params": [ + "run" + ], + "type": "tag" + }, + { + "params": [ + "miner" + ], + "type": "tag" + }, + { + "params": [ + "0" + ], + "type": "fill" + } + ], + "measurement": "diagnostics.block.mine.counter", + "orderByTime": "ASC", + "policy": "default", + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "count" + ], + "type": "field" + }, + { + "params": [], + "type": "sum" + } + ] + ], + "tags": [ + { + "key": "run", + "operator": "=~", + "value": "/^$runid$/" + } + ] + }, + { + "alias": "all ($tag_run)", + "groupBy": [ + { + "params": [ + "$myinterval" + ], + "type": "time" + }, + { + "params": [ + "run" + ], + "type": "tag" + }, + { + "params": [ + "0" + ], + "type": "fill" + } + ], + "measurement": "diagnostics.block.mine.counter", + "orderByTime": "ASC", + "policy": "default", + "refId": "B", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "count" + ], + "type": "field" + }, + { + "params": [], + "type": "sum" + } + ] + ], + "tags": [ + { + "key": "run", + "operator": "=~", + "value": "/^$runid$/" + } + ] + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "mined blocks from testplan", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 33 + }, + "id": 8, + "panels": [], + "title": "Data", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "influxdb", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 34 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "find-data - 95% max", + "groupBy": [ + { + "params": [ + "$myinterval" + ], + "type": "time" + }, + { + "params": [ + "run" + ], + "type": "tag" + }, + { + "params": [ + "0" + ], + "type": "fill" + } + ], + "measurement": "diagnostics.find-data.histogram", + "orderByTime": "ASC", + "policy": "default", + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "p95" + ], + "type": "field" + }, + { + "params": [], + "type": "max" + } + ] + ], + "tags": [ + { + "key": "run", + "operator": "=~", + "value": "/^$runid$/" + } + ] + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "find data", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ns", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "influxdb", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 34 + }, + "hiddenSeries": false, + "id": 14, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "total", + "sortDesc": true, + "total": true, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "send-data-to - $tag_miner ($tag_run)", + "groupBy": [ + { + "params": [ + "$myinterval" + ], + "type": "time" + }, + { + "params": [ + "miner" + ], + "type": "tag" + }, + { + "params": [ + "run" + ], + "type": "tag" + }, + { + "params": [ + "0" + ], + "type": "fill" + } + ], + "measurement": "diagnostics.send-data-to.counter", + "orderByTime": "ASC", + "policy": "default", + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "count" + ], + "type": "field" + }, + { + "params": [], + "type": "sum" + } + ] + ], + "tags": [ + { + "key": "run", + "operator": "=~", + "value": "/^$runid$/" + } + ] + }, + { + "alias": "find-data.offer - $tag_miner ($tag_run)", + "groupBy": [ + { + "params": [ + "$myinterval" + ], + "type": "time" + }, + { + "params": [ + "miner" + ], + "type": "tag" + }, + { + "params": [ + "run" + ], + "type": "tag" + }, + { + "params": [ + "0" + ], + "type": "fill" + } + ], + "measurement": "diagnostics.find-data.offer.counter", + "orderByTime": "ASC", + "policy": "default", + "refId": "D", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "count" + ], + "type": "field" + }, + { + "params": [], + "type": "sum" + } + ] + ], + "tags": [ + { + "key": "run", + "operator": "=~", + "value": "/^$runid$/" + } + ] + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "send data to ; got retrieve offers from", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 43 + }, + "id": 6, + "panels": [], + "title": "Deals", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "influxdb", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 44 + }, + "hiddenSeries": false, + "id": 3, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "deal.sealed - 95% max", + "groupBy": [ + { + "params": [ + "$myinterval" + ], + "type": "time" + }, + { + "params": [ + "run" + ], + "type": "tag" + }, + { + "params": [ + "0" + ], + "type": "fill" + } + ], + "measurement": "diagnostics.deal.sealed.histogram", + "orderByTime": "ASC", + "policy": "default", + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "p95" + ], + "type": "field" + }, + { + "params": [], + "type": "max" + } + ] + ], + "tags": [ + { + "key": "run", + "operator": "=~", + "value": "/^$runid$/" + } + ] + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "started -> sealed", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ns", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "influxdb", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 44 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "deal.retrieved - 95% max ($tag_run)", + "groupBy": [ + { + "params": [ + "$myinterval" + ], + "type": "time" + }, + { + "params": [ + "run" + ], + "type": "tag" + }, + { + "params": [ + "run" + ], + "type": "tag" + }, + { + "params": [ + "0" + ], + "type": "fill" + } + ], + "measurement": "diagnostics.deal.retrieved.histogram", + "orderByTime": "ASC", + "policy": "default", + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "p95" + ], + "type": "field" + }, + { + "params": [], + "type": "max" + } + ] + ], + "tags": [ + { + "key": "run", + "operator": "=~", + "value": "/^$runid$/" + } + ] + }, + { + "alias": "deal.retrieved - min ($tag_run)", + "groupBy": [ + { + "params": [ + "$myinterval" + ], + "type": "time" + }, + { + "params": [ + "run" + ], + "type": "tag" + }, + { + "params": [ + "run" + ], + "type": "tag" + }, + { + "params": [ + "0" + ], + "type": "fill" + } + ], + "measurement": "diagnostics.deal.retrieved.histogram", + "orderByTime": "ASC", + "policy": "default", + "refId": "B", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "min" + ], + "type": "field" + }, + { + "params": [], + "type": "min" + } + ] + ], + "tags": [ + { + "key": "run", + "operator": "=~", + "value": "/^$runid$/" + } + ] + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "started -> retrieved", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ns", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "influxdb", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 53 + }, + "hiddenSeries": false, + "id": 23, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": true, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "deal.sealed - count", + "groupBy": [ + { + "params": [ + "$myinterval" + ], + "type": "time" + }, + { + "params": [ + "run" + ], + "type": "tag" + }, + { + "params": [ + "0" + ], + "type": "fill" + } + ], + "measurement": "diagnostics.deal.sealed.histogram", + "orderByTime": "ASC", + "policy": "default", + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "count" + ], + "type": "field" + }, + { + "params": [], + "type": "sum" + } + ] + ], + "tags": [ + { + "key": "run", + "operator": "=~", + "value": "/^$runid$/" + } + ] + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "started -> sealed", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "influxdb", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 53 + }, + "hiddenSeries": false, + "id": 24, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": true, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "deal.retrieved - count", + "groupBy": [ + { + "params": [ + "$myinterval" + ], + "type": "time" + }, + { + "params": [ + "run" + ], + "type": "tag" + }, + { + "params": [ + "0" + ], + "type": "fill" + } + ], + "measurement": "diagnostics.deal.retrieved.histogram", + "orderByTime": "ASC", + "policy": "default", + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "count" + ], + "type": "field" + }, + { + "params": [], + "type": "sum" + } + ] + ], + "tags": [ + { + "key": "run", + "operator": "=~", + "value": "/^$runid$/" + } + ] + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "started -> retrieved", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "5s", + "schemaVersion": 25, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "selected": false, + "text": "10s", + "value": "10s" + }, + "hide": 0, + "label": null, + "name": "myinterval", + "options": [ + { + "selected": true, + "text": "10s", + "value": "10s" + }, + { + "selected": false, + "text": "100s", + "value": "100s" + } + ], + "query": "10s,100s", + "queryValue": "", + "refresh": 2, + "skipUrlSync": false, + "type": "interval" + }, + { + "allValue": null, + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, + "datasource": "influxdb", + "definition": "SHOW TAG VALUES WITH KEY = run", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "runid", + "options": [], + "query": "SHOW TAG VALUES WITH KEY = run", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "Project Oni - Baseline test", + "uid": "8em8RXWMz", + "version": 1 +} diff --git a/testplans/dashboards/chain.json b/testplans/dashboards/chain.json new file mode 100644 index 00000000000..c708c61cc28 --- /dev/null +++ b/testplans/dashboards/chain.json @@ -0,0 +1,2748 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 15, + "links": [], + "panels": [ + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "influxdb", + "decimals": 2, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 3, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "hideTimeOverride": false, + "id": 38, + "interval": "", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "alias": "$tag_miner", + "groupBy": [ + { + "params": [ + "$__interval" + ], + "type": "time" + }, + { + "params": [ + "miner" + ], + "type": "tag" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "measurement": "chain.election", + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT count(\"value\") FROM \"chain.election\" WHERE $timeFilter -10m GROUP BY time($__interval), \"miner\" fill(null)", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "count" + }, + { + "params": [ + "20" + ], + "type": "moving_average" + } + ] + ], + "tags": [] + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Blocks Won", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 2, + "format": "none", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "decimals": null, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "datasource": "influxdb", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 4, + "w": 8, + "x": 0, + "y": 9 + }, + "hiddenSeries": false, + "id": 22, + "interval": "", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "/.*/", + "color": "rgb(31, 120, 193)" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "groupBy": [], + "measurement": "chain.power", + "orderByTime": "ASC", + "policy": "default", + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + } + ] + ], + "tags": [] + } + ], + "thresholds": [], + "timeFrom": "4h", + "timeRegions": [], + "timeShift": null, + "title": "Total Power", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "influxdb", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "s", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 8, + "y": 9 + }, + "id": 12, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true, + "ymax": null, + "ymin": 0 + }, + "tableColumn": "", + "targets": [ + { + "groupBy": [], + "measurement": "chain.blocktime", + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT difference(mean(\"value\")) FROM \"chain.blocktime\" WHERE $timeFilter GROUP BY time($__interval) fill(null)", + "rawQuery": false, + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "difference" + } + ] + ], + "tags": [] + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Avg Blocktime", + "type": "singlestat", + "valueFontSize": "100%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "influxdb", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 12, + "y": 9 + }, + "id": 42, + "interval": "", + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true, + "ymax": null, + "ymin": 0 + }, + "tableColumn": "", + "targets": [ + { + "groupBy": [ + { + "params": [ + "$__interval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT sum(\"value\") FROM \"chain.miner_power\" WHERE $timeFilter GROUP BY time(2s)", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "mean" + } + ] + ], + "tags": [] + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Network Storage", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "influxdb", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 8, + "x": 16, + "y": 9 + }, + "id": 6, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true, + "ymax": null, + "ymin": 0 + }, + "tableColumn": "", + "targets": [ + { + "groupBy": [ + { + "params": [ + "$interval" + ], + "type": "time" + } + ], + "measurement": "chain.election", + "orderByTime": "ASC", + "policy": "default", + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "sum" + } + ] + ], + "tags": [] + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Blocks In Tipset", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorPostfix": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "influxdb", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 0, + "y": 13 + }, + "id": 4, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true, + "ymax": null, + "ymin": null + }, + "tableColumn": "", + "targets": [ + { + "groupBy": [], + "measurement": "chain.height", + "orderByTime": "ASC", + "policy": "default", + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + } + ] + ], + "tags": [] + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Block Height", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "influxdb", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "s", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 4, + "y": 13 + }, + "id": 14, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true, + "ymax": null, + "ymin": 0 + }, + "tableColumn": "", + "targets": [ + { + "groupBy": [], + "measurement": "chain.blocktime", + "orderByTime": "ASC", + "policy": "default", + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "difference" + } + ] + ], + "tags": [] + } + ], + "thresholds": "30,90", + "timeFrom": null, + "timeShift": null, + "title": "Last Blocktime", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "influxdb", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 8, + "y": 13 + }, + "id": 32, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true, + "ymax": null, + "ymin": null + }, + "tableColumn": "", + "targets": [ + { + "groupBy": [ + { + "params": [ + "$__interval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "measurement": "chain.message_gasprice", + "orderByTime": "ASC", + "policy": "default", + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "mean" + } + ] + ], + "tags": [] + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Avg Gas Price", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "influxdb", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "decbytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 12, + "y": 13 + }, + "id": 20, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true, + "ymax": null, + "ymin": null + }, + "tableColumn": "", + "targets": [ + { + "groupBy": [ + { + "params": [ + "$__interval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "measurement": "chain.message_size", + "orderByTime": "ASC", + "policy": "default", + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "mean" + } + ] + ], + "tags": [] + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Avg Message Size", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "influxdb", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 16, + "y": 13 + }, + "id": 8, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true, + "ymax": null, + "ymin": 0 + }, + "tableColumn": "", + "targets": [ + { + "groupBy": [ + { + "params": [ + "$__interval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "measurement": "chain.blockheader_size", + "orderByTime": "ASC", + "policy": "default", + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "mean" + } + ] + ], + "tags": [] + } + ], + "thresholds": "1024,2048", + "timeFrom": null, + "timeShift": null, + "title": "Avg Blockheader Size", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "influxdb", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 20, + "y": 13 + }, + "id": 10, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "pluginVersion": "6.4.2", + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true, + "ymax": null, + "ymin": 0 + }, + "tableColumn": "", + "targets": [ + { + "groupBy": [ + { + "params": [ + "$__interval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "measurement": "chain.message_count", + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT \"value\" FROM \"chain.message_count\" WHERE $timeFilter ", + "rawQuery": false, + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "sum" + } + ] + ], + "tags": [] + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Avg Messages in Tipset", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "influxdb", + "decimals": 0, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "dateTimeFromNow", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 0, + "y": 16 + }, + "id": 16, + "interval": "", + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false, + "ymax": null, + "ymin": null + }, + "tableColumn": "", + "targets": [ + { + "groupBy": [], + "measurement": "chain.blocktime", + "orderByTime": "ASC", + "policy": "default", + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [ + "*1000" + ], + "type": "math" + } + ] + ], + "tags": [] + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Head Updated", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "aliasColors": {}, + "bars": false, + "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "datasource": "influxdb", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 3, + "w": 16, + "x": 4, + "y": 16 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": false, + "hideZero": false, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "Null Blocks", + "yaxis": 2 + }, + { + "alias": "Block Time", + "color": "rgb(31, 120, 193)" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "Block Time", + "groupBy": [], + "measurement": "chain.blocktime", + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT difference(\"value\") FROM \"chain.blocktime\" WHERE $timeFilter", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "difference" + } + ] + ], + "tags": [] + }, + { + "alias": "Null Blocks", + "groupBy": [], + "measurement": "chain.height", + "orderByTime": "ASC", + "policy": "default", + "refId": "B", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "difference" + }, + { + "params": [ + "-1" + ], + "type": "math" + } + ] + ], + "tags": [] + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Tipsets", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": "Time between tipsets", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "decimals": 0, + "format": "short", + "label": "Number of Null blocks", + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "influxdb", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 20, + "y": 16 + }, + "id": 30, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "FIL", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true, + "ymax": null, + "ymin": null + }, + "tableColumn": "", + "targets": [ + { + "groupBy": [], + "measurement": "chain.pledge_collateral", + "orderByTime": "ASC", + "policy": "default", + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + } + ] + ], + "tags": [] + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Pledge Collateral", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "columns": [], + "datasource": "influxdb", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fontSize": "100%", + "gridPos": { + "h": 21, + "w": 4, + "x": 0, + "y": 19 + }, + "id": 28, + "pageSize": null, + "showHeader": true, + "sort": { + "col": 1, + "desc": true + }, + "styles": [ + { + "alias": "Time", + "align": "auto", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "power", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "alias": "", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "number", + "unit": "short" + } + ], + "targets": [ + { + "groupBy": [], + "measurement": "chain.miner_power", + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT top(\"value\", \"miner\", 20) as \"power\" FROM \"chain.miner_power\" WHERE $timeFilter", + "rawQuery": true, + "refId": "A", + "resultFormat": "table", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + } + ] + ], + "tags": [] + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Top Power Table", + "transform": "table", + "type": "table-old" + }, + { + "aliasColors": {}, + "bars": false, + "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "datasource": "influxdb", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 5, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 4, + "y": 19 + }, + "hiddenSeries": false, + "id": 40, + "interval": "", + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": true, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "alias": "$tag_miner", + "groupBy": [ + { + "params": [ + "$__interval" + ], + "type": "time" + }, + { + "params": [ + "miner" + ], + "type": "tag" + }, + { + "params": [ + "previous" + ], + "type": "fill" + } + ], + "limit": "", + "measurement": "chain.miner_power", + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT mean(\"value\") FROM \"chain.miner_power\" WHERE $timeFilter GROUP BY time($__interval), \"miner\" fill(previous)", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [ + "\"miner\",20" + ], + "type": "top" + } + ] + ], + "slimit": "", + "tags": [] + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Top Miner Power", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 2, + "format": "bytes", + "label": "Power", + "logBase": 1, + "max": "100", + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "columns": [], + "datasource": "influxdb", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fontSize": "100%", + "gridPos": { + "h": 21, + "w": 8, + "x": 16, + "y": 19 + }, + "id": 18, + "pageSize": null, + "showHeader": true, + "sort": { + "col": 0, + "desc": true + }, + "styles": [ + { + "alias": "Height", + "align": "auto", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "link": false, + "mappingType": 1, + "pattern": "chain.height", + "preserveFormat": false, + "sanitize": false, + "type": "string" + }, + { + "alias": "Tipset", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "chain.height.tipset", + "preserveFormat": false, + "sanitize": false, + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "mappingType": 1, + "pattern": "Time", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "groupBy": [], + "measurement": "chain.height", + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT \"value\", \"tipset\" FROM \"chain.height\" WHERE $timeFilter", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + } + ] + ], + "tags": [] + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Chain Table", + "transform": "timeseries_to_columns", + "type": "table-old" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "influxdb", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 4, + "y": 27 + }, + "hiddenSeries": false, + "id": 46, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "$tag_miner", + "groupBy": [ + { + "params": [ + "$__interval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT top(\"value\", \"miner\",20) FROM \"chain.miner_power\" WHERE $timeFilter GROUP BY time($__interval), \"miner\"", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "mean" + } + ] + ], + "tags": [] + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Chain / Miner Power", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "datasource": "influxdb", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 12, + "x": 4, + "y": 35 + }, + "hiddenSeries": false, + "id": 24, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "/.*/", + "color": "rgb(31, 120, 193)" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "groupBy": [ + { + "params": [ + "$__interval" + ], + "type": "time" + }, + { + "params": [ + "previous" + ], + "type": "fill" + } + ], + "measurement": "chain.pledge_collateral", + "orderByTime": "ASC", + "policy": "default", + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "mean" + } + ] + ], + "tags": [] + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Pledge Collateral", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "FIL", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "datasource": "influxdb", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 4, + "y": 41 + }, + "hiddenSeries": false, + "id": 44, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "groupBy": [ + { + "params": [ + "$__interval" + ], + "type": "time" + }, + { + "params": [ + "previous" + ], + "type": "fill" + } + ], + "measurement": "chain.miner_power", + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT count(\"value\") FROM \"chain.miner_power\" WHERE $timeFilter GROUP BY time($__interval)", + "rawQuery": false, + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "count" + } + ] + ], + "tags": [] + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Miners on Chain", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "influxdb", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 48 + }, + "hiddenSeries": false, + "id": 34, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": false + }, + "lines": false, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "alias": "Adr $tag_actor | Md $tag_method", + "groupBy": [ + { + "params": [ + "$__interval" + ], + "type": "time" + }, + { + "params": [ + "actor" + ], + "type": "tag" + }, + { + "params": [ + "method" + ], + "type": "tag" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "measurement": "chain.message_count", + "orderByTime": "ASC", + "policy": "default", + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "sum" + } + ] + ], + "tags": [] + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Actor Messages Method", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "influxdb", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 48 + }, + "hiddenSeries": false, + "id": 36, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "avg", + "sortDesc": false, + "total": false, + "values": false + }, + "lines": false, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "alias": "Adr $tag_actor | Md $tag_method | Ex $tag_exitcode", + "groupBy": [ + { + "params": [ + "$__interval" + ], + "type": "time" + }, + { + "params": [ + "method" + ], + "type": "tag" + }, + { + "params": [ + "exitcode" + ], + "type": "tag" + }, + { + "params": [ + "actor" + ], + "type": "tag" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "measurement": "chain.message_count", + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT sum(\"value\") FROM \"chain.message_count\" WHERE $timeFilter GROUP BY time($__interval), \"method\", \"exitcode\", \"actor\" fill(null)", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "count" + } + ] + ], + "tags": [] + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Actor Messages Method With Exitcode", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "5s", + "schemaVersion": 25, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-30m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "10s", + "30s", + "45s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "Chain", + "uid": "z6FtI92Zz", + "version": 3 +} diff --git a/testplans/docker-images/Dockerfile.oni-buildbase b/testplans/docker-images/Dockerfile.oni-buildbase new file mode 100644 index 00000000000..265066537f3 --- /dev/null +++ b/testplans/docker-images/Dockerfile.oni-buildbase @@ -0,0 +1,16 @@ +ARG GO_VERSION=1.16.3 + +FROM golang:${GO_VERSION}-buster + +RUN apt-get update && apt-get install -y ca-certificates llvm clang mesa-opencl-icd ocl-icd-opencl-dev jq gcc git pkg-config bzr libhwloc-dev + +ARG FILECOIN_FFI_COMMIT=8b97bd8230b77bd32f4f27e4766a6d8a03b4e801 +ARG FFI_DIR=/extern/filecoin-ffi + +RUN mkdir -p ${FFI_DIR} \ + && git clone https://github.com/filecoin-project/filecoin-ffi.git ${FFI_DIR} \ + && cd ${FFI_DIR} \ + && git checkout ${FILECOIN_FFI_COMMIT} \ + && make + +RUN ldconfig diff --git a/testplans/docker-images/Dockerfile.oni-runtime b/testplans/docker-images/Dockerfile.oni-runtime new file mode 100644 index 00000000000..27144069a4a --- /dev/null +++ b/testplans/docker-images/Dockerfile.oni-runtime @@ -0,0 +1,23 @@ +ARG GO_VERSION=1.16.3 + +FROM golang:${GO_VERSION}-buster as downloader + +## Fetch the proof parameters. +## 1. Install the paramfetch binary first, so it can be cached over builds. +## 2. Then copy over the parameters (which could change). +## 3. Trigger the download. +## Output will be in /var/tmp/filecoin-proof-parameters. + +RUN go get github.com/filecoin-project/go-paramfetch/paramfetch@master +COPY /proof-parameters.json / +RUN paramfetch 8388608 /proof-parameters.json + +FROM ubuntu:18.04 + +RUN apt-get update && apt-get install -y ca-certificates llvm clang mesa-opencl-icd ocl-icd-opencl-dev gcc pkg-config libhwloc-dev + +RUN apt-get install -y jq net-tools netcat traceroute iputils-ping wget vim curl telnet iproute2 dnsutils + +COPY --from=downloader /var/tmp/filecoin-proof-parameters /var/tmp/filecoin-proof-parameters + +RUN ldconfig diff --git a/testplans/docker-images/Dockerfile.oni-runtime-debug b/testplans/docker-images/Dockerfile.oni-runtime-debug new file mode 100644 index 00000000000..856fcc1fc27 --- /dev/null +++ b/testplans/docker-images/Dockerfile.oni-runtime-debug @@ -0,0 +1,30 @@ +ARG GO_VERSION=1.16.3 + +FROM golang:${GO_VERSION}-buster as downloader + +## Fetch the proof parameters. +## 1. Install the paramfetch binary first, so it can be cached over builds. +## 2. Then copy over the parameters (which could change). +## 3. Trigger the download. +## Output will be in /var/tmp/filecoin-proof-parameters. + +RUN go get github.com/filecoin-project/go-paramfetch/paramfetch@master +COPY /proof-parameters.json / +RUN paramfetch 8388608 /proof-parameters.json + +ARG LOTUS_COMMIT=b8deee048eaf850113e8626a73f64b17ba69a9f6 + +## for debug purposes +RUN apt update && apt install -y mesa-opencl-icd ocl-icd-opencl-dev gcc git bzr jq pkg-config libhwloc-dev curl && git clone https://github.com/filecoin-project/lotus.git && cd lotus/ && git checkout ${LOTUS_COMMIT} && make clean && make all && make install + +FROM ubuntu:18.04 + +RUN apt-get update && apt-get install -y ca-certificates llvm clang mesa-opencl-icd ocl-icd-opencl-dev jq gcc pkg-config libhwloc-dev net-tools netcat traceroute iputils-ping wget vim curl telnet iproute2 dnsutils +COPY --from=downloader /var/tmp/filecoin-proof-parameters /var/tmp/filecoin-proof-parameters + +## for debug purposes +COPY --from=downloader /usr/local/bin/lotus /usr/local/bin/lll +COPY --from=downloader /usr/local/bin/lotus-miner /usr/local/bin/lm + +ENV FULLNODE_API_INFO="/ip4/127.0.0.1/tcp/1234/http" +ENV MINER_API_INFO="/ip4/127.0.0.1/tcp/2345/http" diff --git a/testplans/docker-images/proof-parameters.json b/testplans/docker-images/proof-parameters.json new file mode 100644 index 00000000000..1d458445458 --- /dev/null +++ b/testplans/docker-images/proof-parameters.json @@ -0,0 +1,152 @@ +{ + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.params": { + "cid": "QmVxjFRyhmyQaZEtCh7nk2abc7LhFkzhnRX4rcHqCCpikR", + "digest": "7610b9f82bfc88405b7a832b651ce2f6", + "sector_size": 2048 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.vk": { + "cid": "QmcS5JZs8X3TdtkEBpHAdUYjdNDqcL7fWQFtQz69mpnu2X", + "digest": "0e0958009936b9d5e515ec97b8cb792d", + "sector_size": 2048 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.params": { + "cid": "QmUiRx71uxfmUE8V3H9sWAsAXoM88KR4eo1ByvvcFNeTLR", + "digest": "1a7d4a9c8a502a497ed92a54366af33f", + "sector_size": 536870912 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.vk": { + "cid": "QmfCeddjFpWtavzfEzZpJfzSajGNwfL4RjFXWAvA9TSnTV", + "digest": "4dae975de4f011f101f5a2f86d1daaba", + "sector_size": 536870912 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.params": { + "cid": "QmcSTqDcFVLGGVYz1njhUZ7B6fkKtBumsLUwx4nkh22TzS", + "digest": "82c88066be968bb550a05e30ff6c2413", + "sector_size": 2048 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.vk": { + "cid": "QmSTCXF2ipGA3f6muVo6kHc2URSx6PzZxGUqu7uykaH5KU", + "digest": "ffd79788d614d27919ae5bd2d94eacb6", + "sector_size": 2048 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.params": { + "cid": "QmU9SBzJNrcjRFDiFc4GcApqdApN6z9X7MpUr66mJ2kAJP", + "digest": "700171ecf7334e3199437c930676af82", + "sector_size": 8388608 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.vk": { + "cid": "QmbmUMa3TbbW3X5kFhExs6WgC4KeWT18YivaVmXDkB6ANG", + "digest": "79ebb55f56fda427743e35053edad8fc", + "sector_size": 8388608 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.params": { + "cid": "QmdNEL2RtqL52GQNuj8uz6mVj5Z34NVnbaJ1yMyh1oXtBx", + "digest": "c49499bb76a0762884896f9683403f55", + "sector_size": 8388608 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.vk": { + "cid": "QmUiVYCQUgr6Y13pZFr8acWpSM4xvTXUdcvGmxyuHbKhsc", + "digest": "34d4feeacd9abf788d69ef1bb4d8fd00", + "sector_size": 8388608 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.params": { + "cid": "QmVgCsJFRXKLuuUhT3aMYwKVGNA9rDeR6DCrs7cAe8riBT", + "digest": "827359440349fe8f5a016e7598993b79", + "sector_size": 536870912 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.vk": { + "cid": "QmfA31fbCWojSmhSGvvfxmxaYCpMoXP95zEQ9sLvBGHNaN", + "digest": "bd2cd62f65c1ab84f19ca27e97b7c731", + "sector_size": 536870912 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.params": { + "cid": "QmaUmfcJt6pozn8ndq1JVBzLRjRJdHMTPd4foa8iw5sjBZ", + "digest": "2cf49eb26f1fee94c85781a390ddb4c8", + "sector_size": 34359738368 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.vk": { + "cid": "QmR9i9KL3vhhAqTBGj1bPPC7LvkptxrH9RvxJxLN1vvsBE", + "digest": "0f8ec542485568fa3468c066e9fed82b", + "sector_size": 34359738368 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.params": { + "cid": "Qmdtczp7p4wrbDofmHdGhiixn9irAcN77mV9AEHZBaTt1i", + "digest": "d84f79a16fe40e9e25a36e2107bb1ba0", + "sector_size": 34359738368 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.vk": { + "cid": "QmZCvxKcKP97vDAk8Nxs9R1fWtqpjQrAhhfXPoCi1nkDoF", + "digest": "fc02943678dd119e69e7fab8420e8819", + "sector_size": 34359738368 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.params": { + "cid": "QmeAN4vuANhXsF8xP2Lx5j2L6yMSdogLzpcvqCJThRGK1V", + "digest": "3810b7780ac0e299b22ae70f1f94c9bc", + "sector_size": 68719476736 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.vk": { + "cid": "QmWV8rqZLxs1oQN9jxNWmnT1YdgLwCcscv94VARrhHf1T7", + "digest": "59d2bf1857adc59a4f08fcf2afaa916b", + "sector_size": 68719476736 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.params": { + "cid": "QmVkrXc1SLcpgcudK5J25HH93QvR9tNsVhVTYHm5UymXAz", + "digest": "2170a91ad5bae22ea61f2ea766630322", + "sector_size": 68719476736 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.vk": { + "cid": "QmbfQjPD7EpzjhWGmvWAsyN2mAZ4PcYhsf3ujuhU9CSuBm", + "digest": "6d3789148fb6466d07ee1e24d6292fd6", + "sector_size": 68719476736 + }, + "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.params": { + "cid": "QmWceMgnWYLopMuM4AoGMvGEau7tNe5UK83XFjH5V9B17h", + "digest": "434fb1338ecfaf0f59256f30dde4968f", + "sector_size": 2048 + }, + "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.vk": { + "cid": "QmamahpFCstMUqHi2qGtVoDnRrsXhid86qsfvoyCTKJqHr", + "digest": "dc1ade9929ade1708238f155343044ac", + "sector_size": 2048 + }, + "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.params": { + "cid": "QmYBpTt7LWNAWr1JXThV5VxX7wsQFLd1PHrGYVbrU1EZjC", + "digest": "6c77597eb91ab936c1cef4cf19eba1b3", + "sector_size": 536870912 + }, + "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.vk": { + "cid": "QmWionkqH2B6TXivzBSQeSyBxojaiAFbzhjtwYRrfwd8nH", + "digest": "065179da19fbe515507267677f02823e", + "sector_size": 536870912 + }, + "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.params": { + "cid": "QmPXAPPuQtuQz7Zz3MHMAMEtsYwqM1o9H1csPLeiMUQwZH", + "digest": "09e612e4eeb7a0eb95679a88404f960c", + "sector_size": 8388608 + }, + "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.vk": { + "cid": "QmYCuipFyvVW1GojdMrjK1JnMobXtT4zRCZs1CGxjizs99", + "digest": "b687beb9adbd9dabe265a7e3620813e4", + "sector_size": 8388608 + }, + "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.params": { + "cid": "QmengpM684XLQfG8754ToonszgEg2bQeAGUan5uXTHUQzJ", + "digest": "6a388072a518cf46ebd661f5cc46900a", + "sector_size": 34359738368 + }, + "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.vk": { + "cid": "Qmf93EMrADXAK6CyiSfE8xx45fkMfR3uzKEPCvZC1n2kzb", + "digest": "0c7b4aac1c40fdb7eb82bc355b41addf", + "sector_size": 34359738368 + }, + "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.params": { + "cid": "QmS7ye6Ri2MfFzCkcUJ7FQ6zxDKuJ6J6B8k5PN7wzSR9sX", + "digest": "1801f8a6e1b00bceb00cc27314bb5ce3", + "sector_size": 68719476736 + }, + "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.vk": { + "cid": "QmehSmC6BhrgRZakPDta2ewoH9nosNzdjCqQRXsNFNUkLN", + "digest": "a89884252c04c298d0b3c81bfd884164", + "sector_size": 68719476736 + } +} diff --git a/testplans/graphsync/_compositions/stress-k8s.toml b/testplans/graphsync/_compositions/stress-k8s.toml new file mode 100644 index 00000000000..bfc854bcc1d --- /dev/null +++ b/testplans/graphsync/_compositions/stress-k8s.toml @@ -0,0 +1,35 @@ +[metadata] + name = "stress" + +[global] + plan = "graphsync" + case = "stress" + total_instances = 2 + builder = "docker:go" + runner = "cluster:k8s" + +[global.build_config] + push_registry=true + go_proxy_mode="remote" + go_proxy_url="http://localhost:8081" + registry_type="aws" + +[global.run.test_params] +size = "10MB" +latencies = '["50ms", "100ms", "200ms"]' +bandwidths = '["32MiB", "16MiB", "8MiB", "4MiB", "1MiB"]' +concurrency = "10" + +[[groups]] + id = "providers" + instances = { count = 1 } + [groups.resources] + memory = "4096Mi" + cpu = "1000m" + +[[groups]] + id = "requestors" + instances = { count = 1 } + [groups.resources] + memory = "4096Mi" + cpu = "1000m" diff --git a/testplans/graphsync/_compositions/stress.toml b/testplans/graphsync/_compositions/stress.toml new file mode 100644 index 00000000000..4920f6ff31c --- /dev/null +++ b/testplans/graphsync/_compositions/stress.toml @@ -0,0 +1,23 @@ +[metadata] + name = "stress" + +[global] + plan = "graphsync" + case = "stress" + total_instances = 2 + builder = "docker:go" + runner = "local:docker" + +[global.run.test_params] +size = "10MB" +latencies = '["50ms", "100ms", "200ms"]' +bandwidths = '["32MiB", "16MiB", "8MiB", "4MiB", "1MiB"]' +concurrency = "10" + +[[groups]] + id = "providers" + instances = { count = 1 } + +[[groups]] + id = "requestors" + instances = { count = 1 } diff --git a/testplans/graphsync/_compositions/version_compat.toml b/testplans/graphsync/_compositions/version_compat.toml new file mode 100644 index 00000000000..b7e89a97f0b --- /dev/null +++ b/testplans/graphsync/_compositions/version_compat.toml @@ -0,0 +1,34 @@ +[metadata] + name = "version_compat" + +[global] + plan = "graphsync" + case = "stress" + total_instances = 2 + builder = "docker:go" + runner = "local:docker" + +[global.run.test_params] +size = "10MB" +latencies = '["50ms"]' +bandwidths = '["4MiB"]' +concurrency = "1" + +[[groups]] + id = "providers" + instances = { count = 1 } + [groups.build] + [[groups.build.dependencies]] + module = "github.com/ipfs/go-graphsync" + version = "v0.2.1" + [[groups.build.dependencies]] + module = "github.com/hannahhoward/all-selector" + version = "v0.2.0" + +[[groups]] + id = "requestors" + instances = { count = 1 } + [groups.build] + [[groups.build.dependencies]] + module = "github.com/ipfs/go-graphsync" + version = "v0.1.2" \ No newline at end of file diff --git a/testplans/graphsync/go.mod b/testplans/graphsync/go.mod new file mode 100644 index 00000000000..ffd131f833a --- /dev/null +++ b/testplans/graphsync/go.mod @@ -0,0 +1,33 @@ +module github.com/filecoin-project/lotus/testplans/graphsync + +go 1.14 + +require ( + github.com/dustin/go-humanize v1.0.0 + github.com/hannahhoward/all-selector v0.1.0 + github.com/ipfs/go-blockservice v0.1.3 + github.com/ipfs/go-cid v0.0.6 + github.com/ipfs/go-datastore v0.4.4 + github.com/ipfs/go-graphsync v0.1.2 + github.com/ipfs/go-ipfs-blockstore v0.1.4 + github.com/ipfs/go-ipfs-chunker v0.0.5 + github.com/ipfs/go-ipfs-exchange-offline v0.0.1 + github.com/ipfs/go-ipfs-files v0.0.8 + github.com/ipfs/go-ipld-format v0.2.0 + github.com/ipfs/go-merkledag v0.3.1 + github.com/ipfs/go-unixfs v0.2.4 + github.com/ipld/go-ipld-prime v0.4.0 + github.com/kr/text v0.2.0 // indirect + github.com/libp2p/go-libp2p v0.10.0 + github.com/libp2p/go-libp2p-core v0.6.0 + github.com/libp2p/go-libp2p-noise v0.1.1 + github.com/libp2p/go-libp2p-secio v0.2.2 + github.com/libp2p/go-libp2p-tls v0.1.3 + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect + github.com/testground/sdk-go v0.2.7-0.20201112151952-8ee00c80c3ec + golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 + golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae // indirect + google.golang.org/protobuf v1.25.0 // indirect + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect + gopkg.in/yaml.v2 v2.2.8 // indirect +) diff --git a/testplans/graphsync/go.sum b/testplans/graphsync/go.sum new file mode 100644 index 00000000000..e80038c38f0 --- /dev/null +++ b/testplans/graphsync/go.sum @@ -0,0 +1,972 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9 h1:HD8gA2tkByhMAwYaFAX9w2l7vxvBQ5NMoxDrkhqhtn4= +github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/Stebalien/go-bitfield v0.0.1 h1:X3kbSSPUaJK60wV2hjOPZwmpljr6VGCqdq4cBLhbQBo= +github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI+XWrX9Wf2XH0s= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/avast/retry-go v2.6.0+incompatible h1:FelcMrm7Bxacr1/RM8+/eqkDkmVN7tjlsy51dOzB3LI= +github.com/avast/retry-go v2.6.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= +github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE= +github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg= +github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= +github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= +github.com/davidlazar/go-crypto v0.0.0-20190912175916-7055855a373f h1:BOaYiTvg8p9vBUXpklC22XSK/mifLF7lG9jtmYYi3Tc= +github.com/davidlazar/go-crypto v0.0.0-20190912175916-7055855a373f/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= +github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= +github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgraph-io/badger v1.6.1 h1:w9pSFNSdq/JPM1N12Fz/F/bzo993Is1W+Q7HjPzi7yg= +github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= +github.com/dgraph-io/ristretto v0.0.2 h1:a5WaUrDa0qm0YrAAS1tUykT5El3kt62KNZZeMxQn3po= +github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6 h1:u/UEqS66A5ckRmS4yNpjmVH56sVtS/RfclBAYocb4as= +github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= +github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= +github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-redis/redis/v7 v7.4.0 h1:7obg6wUoj05T0EpY0o8B59S9w5yeMWql7sw2kwNW1x4= +github.com/go-redis/redis/v7 v7.4.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.4.0 h1:Rd1kQnQu0Hq3qvJppYSG0HtP+f5LPPUiDswTLiEegLg= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gopacket v1.1.17 h1:rMrlX2ZY2UbvT+sdz3+6J+pp2z+msCq9MxTU6ymxbBY= +github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f h1:KMlcu9X58lhTA/KrfX8Bi1LQSO4pzoVjTiL3h4Jk+Zk= +github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/hannahhoward/all-selector v0.1.0 h1:B+hMG/8Vb0+XB3eHK2Cz6hYpSZWVZuSz401ebRvfGtk= +github.com/hannahhoward/all-selector v0.1.0/go.mod h1:2wbwlpJCyAaTfpSYqKqqA5Xe0YPvJmyjylxKs6+PIvA= +github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e h1:3YKHER4nmd7b5qy5t0GWDTwSn4OyRgfAXSmo6VnryBY= +github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e/go.mod h1:I8h3MITA53gN9OnWGCgaMa0JWVRdXthWw4M3CPM54OY= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo= +github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= +github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/influxdb1-client v0.0.0-20200515024757-02f0bf5dbca3 h1:k3/6a1Shi7GGCp9QpyYuXsMM6ncTOjCzOE9Fd6CDA+Q= +github.com/influxdata/influxdb1-client v0.0.0-20200515024757-02f0bf5dbca3/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= +github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= +github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= +github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= +github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs= +github.com/ipfs/go-bitswap v0.1.8 h1:38X1mKXkiU6Nzw4TOSWD8eTVY5eX3slQunv3QEWfXKg= +github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM= +github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= +github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE= +github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= +github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= +github.com/ipfs/go-blockservice v0.1.3 h1:9XgsPMwwWJSC9uVr2pMDsW2qFTBSkxpGMhmna8mIjPM= +github.com/ipfs/go-blockservice v0.1.3/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= +github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= +github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-cid v0.0.6 h1:go0y+GcDOGeJIV01FeBsta4FHngoA4Wz7KMeLkXAhMs= +github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= +github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= +github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.4 h1:rjvQ9+muFaJ+QZ7dN5B1MSDNQ0JVZKkkES/rMZmA8X8= +github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= +github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= +github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= +github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaHzfGTzuE3s= +github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= +github.com/ipfs/go-ds-badger v0.2.3 h1:J27YvAcpuA5IvZUbeBxOcQgqnYHUPxoygc6QxxkodZ4= +github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= +github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= +github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= +github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= +github.com/ipfs/go-graphsync v0.1.2 h1:25Ll9kIXCE+DY0dicvfS3KMw+U5sd01b/FJbA7KAbhg= +github.com/ipfs/go-graphsync v0.1.2/go.mod h1:sLXVXm1OxtE2XYPw62MuXCdAuNwkAdsbnfrmos5odbA= +github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= +github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= +github.com/ipfs/go-ipfs-blockstore v0.1.4 h1:2SGI6U1B44aODevza8Rde3+dY30Pb+lbcObe1LETxOQ= +github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= +github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= +github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= +github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw= +github.com/ipfs/go-ipfs-chunker v0.0.5 h1:ojCf7HV/m+uS2vhUGWcogIIxiO5ubl5O57Q7NapWLY8= +github.com/ipfs/go-ipfs-chunker v0.0.5/go.mod h1:jhgdF8vxRHycr00k13FM8Y0E+6BoalYeobXmUyTreP8= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= +github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= +github.com/ipfs/go-ipfs-ds-help v0.1.1 h1:IW/bXGeaAZV2VH0Kuok+Ohva/zHkHmeLFBxC1k7mNPc= +github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs= +github.com/ipfs/go-ipfs-exchange-interface v0.0.1 h1:LJXIo9W7CAmugqI+uofioIpRb6rY30GUu7G6LUfpMvM= +github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= +github.com/ipfs/go-ipfs-exchange-offline v0.0.1 h1:P56jYKZF7lDDOLx5SotVh5KFxoY6C81I1NSHW1FxGew= +github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= +github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= +github.com/ipfs/go-ipfs-files v0.0.8 h1:8o0oFJkJ8UkO/ABl8T6ac6tKF3+NIpj67aAB6ZpusRg= +github.com/ipfs/go-ipfs-files v0.0.8/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= +github.com/ipfs/go-ipfs-posinfo v0.0.1 h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6OtpRs= +github.com/ipfs/go-ipfs-posinfo v0.0.1/go.mod h1:SwyeVP+jCwiDu0C313l/8jg6ZxM0qqtlt2a0vILTc1A= +github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= +github.com/ipfs/go-ipfs-pq v0.0.2 h1:e1vOOW6MuOwG2lqxcLA+wEn93i/9laCY8sXAw76jFOY= +github.com/ipfs/go-ipfs-pq v0.0.2/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= +github.com/ipfs/go-ipfs-routing v0.1.0 h1:gAJTT1cEeeLj6/DlLX6t+NxD9fQe2ymTO6qWRDI/HQQ= +github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= +github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= +github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= +github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= +github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= +github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= +github.com/ipfs/go-ipld-cbor v0.0.4 h1:Aw3KPOKXjvrm6VjwJvFf1F1ekR/BH3jdof3Bk7OTiSA= +github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= +github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= +github.com/ipfs/go-ipld-format v0.2.0 h1:xGlJKkArkmBvowr+GMCX0FEZtkro71K1AwiKnL37mwA= +github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= +github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= +github.com/ipfs/go-log v1.0.2/go.mod h1:1MNjMxe0u6xvJZgeqbJ8vdo2TKaGwZ1a0Bpza+sr2Sk= +github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= +github.com/ipfs/go-log v1.0.4 h1:6nLQdX4W8P9yZZFH7mO+X/PzjN8Laozm/lMJ6esdgzY= +github.com/ipfs/go-log v1.0.4/go.mod h1:oDCg2FkjogeFOhqqb+N39l2RpTNPL6F/StPkB3kPgcs= +github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= +github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= +github.com/ipfs/go-log/v2 v2.0.5 h1:fL4YI+1g5V/b1Yxr1qAiXTMg1H8z9vx/VmJxBuQMHvU= +github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= +github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= +github.com/ipfs/go-merkledag v0.3.1 h1:3UqWINBEr3/N+r6OwgFXAddDP/8zpQX/8J7IGVOCqRQ= +github.com/ipfs/go-merkledag v0.3.1/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= +github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= +github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= +github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= +github.com/ipfs/go-peertaskqueue v0.1.1/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= +github.com/ipfs/go-peertaskqueue v0.2.0 h1:2cSr7exUGKYyDeUyQ7P/nHPs9P7Ht/B+ROrpN1EJOjc= +github.com/ipfs/go-peertaskqueue v0.2.0/go.mod h1:5/eNrBEbtSKWCG+kQK8K8fGNixoYUnr+P7jivavs9lY= +github.com/ipfs/go-unixfs v0.2.4 h1:6NwppOXefWIyysZ4LR/qUBPvXd5//8J3jiMdvpbw6Lo= +github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw= +github.com/ipfs/go-verifcid v0.0.1 h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E= +github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= +github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e/go.mod h1:uVIwe/u0H4VdKv3kaN1ck7uCb6yD9cFLS9/ELyXbsw8= +github.com/ipld/go-ipld-prime v0.0.4-0.20200828224805-5ff8c8b0b6ef h1:/yPelt/0CuzZsmRkYzBBnJ499JnAOGaIaAXHujx96ic= +github.com/ipld/go-ipld-prime v0.0.4-0.20200828224805-5ff8c8b0b6ef/go.mod h1:uVIwe/u0H4VdKv3kaN1ck7uCb6yD9cFLS9/ELyXbsw8= +github.com/ipld/go-ipld-prime v0.4.0 h1:ySDtWeWl+TDMokXlwGANSMeD5TN618cZp9NnxqZ452M= +github.com/ipld/go-ipld-prime v0.4.0/go.mod h1:uVIwe/u0H4VdKv3kaN1ck7uCb6yD9cFLS9/ELyXbsw8= +github.com/ipld/go-ipld-prime-proto v0.0.0-20200828231332-ae0aea07222b h1:ZtlW6pubN17TDaStlxgrwEXXwwUfJaXu9RobwczXato= +github.com/ipld/go-ipld-prime-proto v0.0.0-20200828231332-ae0aea07222b/go.mod h1:OAV6xBmuTLsPZ+epzKkPB1e25FHk/vCtyatkdHcArLs= +github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= +github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= +github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs= +github.com/jbenet/go-cienv v0.1.0 h1:Vc/s0QbQtoxX8MwwSLWWh+xNNZvM3Lw7NsTcHrvvhMc= +github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= +github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c h1:uUx61FiAa1GI6ZmVd2wf2vULeQZIKG66eybjNXKYCz4= +github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c/go.mod h1:sdx1xVM9UuLw1tXnhJWN3piypTUO3vCIHYmG15KE/dU= +github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2/go.mod h1:8GXXJV31xl8whumTzdZsTt3RnUIiPqzkyf7mxToRCMs= +github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= +github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= +github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= +github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= +github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d h1:68u9r4wEvL3gYg2jvAOgROwZ3H+Y3hIDk4tbbmIjcYQ= +github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= +github.com/libp2p/go-addr-util v0.0.2 h1:7cWK5cdA5x72jX0g8iLrQWm5TRJZ6CzGdPEhWj7plWU= +github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= +github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= +github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= +github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= +github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= +github.com/libp2p/go-conn-security-multistream v0.2.0 h1:uNiDjS58vrvJTg9jO6bySd1rMKejieG7v45ekqHbZ1M= +github.com/libp2p/go-conn-security-multistream v0.2.0/go.mod h1:hZN4MjlNetKD3Rq5Jb/P5ohUnFLNzEAR4DLSzpn2QLU= +github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= +github.com/libp2p/go-eventbus v0.2.1 h1:VanAdErQnpTioN2TowqNcOijf6YwhuODe4pPKSDpxGc= +github.com/libp2p/go-eventbus v0.2.1/go.mod h1:jc2S4SoEVPP48H9Wpzm5aiGwUCBMfGhVhhBjyhhCJs8= +github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= +github.com/libp2p/go-flow-metrics v0.0.3 h1:8tAs/hSdNvUiLgtlSy3mxwxWP4I9y/jlkPFT7epKdeM= +github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= +github.com/libp2p/go-libp2p v0.1.0/go.mod h1:6D/2OBauqLUoqcADOJpn9WbKqvaM07tDw68qHM0BxUM= +github.com/libp2p/go-libp2p v0.1.1/go.mod h1:I00BRo1UuUSdpuc8Q2mN7yDF/oTUTRAX6JWpTiK9Rp8= +github.com/libp2p/go-libp2p v0.6.0/go.mod h1:mfKWI7Soz3ABX+XEBR61lGbg+ewyMtJHVt043oWeqwg= +github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZkfEI5sT54= +github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k= +github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= +github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= +github.com/libp2p/go-libp2p v0.8.3/go.mod h1:EsH1A+8yoWK+L4iKcbPYu6MPluZ+CHWI9El8cTaefiM= +github.com/libp2p/go-libp2p v0.10.0 h1:7ooOvK1wi8eLpyTppy8TeH43UHy5uI75GAHGJxenUi0= +github.com/libp2p/go-libp2p v0.10.0/go.mod h1:yBJNpb+mGJdgrwbKAKrhPU0u3ogyNFTfjJ6bdM+Q/G8= +github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= +github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= +github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI= +github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= +github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A= +github.com/libp2p/go-libp2p-autonat v0.2.3 h1:w46bKK3KTOUWDe5mDYMRjJu1uryqBp8HCNDp/TWMqKw= +github.com/libp2p/go-libp2p-autonat v0.2.3/go.mod h1:2U6bNWCNsAG9LEbwccBDQbjzQ8Krdjge1jLTE9rdoMM= +github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= +github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= +github.com/libp2p/go-libp2p-blankhost v0.1.6 h1:CkPp1/zaCrCnBo0AdsQA0O1VkUYoUOtyHOnoa8gKIcE= +github.com/libp2p/go-libp2p-blankhost v0.1.6/go.mod h1:jONCAJqEP+Z8T6EQviGL4JsQcLx1LgTGtVqFNY8EMfQ= +github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= +github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU= +github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo= +github.com/libp2p/go-libp2p-circuit v0.2.2/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCyJp1Eo4A1xYdpjfs4= +github.com/libp2p/go-libp2p-circuit v0.2.3 h1:3Uw1fPHWrp1tgIhBz0vSOxRUmnKL8L/NGUyEd5WfSGM= +github.com/libp2p/go-libp2p-circuit v0.2.3/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCyJp1Eo4A1xYdpjfs4= +github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= +github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= +github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE= +github.com/libp2p/go-libp2p-core v0.0.4/go.mod h1:jyuCQP356gzfCFtRKyvAbNkyeuxb7OlyhWZ3nls5d2I= +github.com/libp2p/go-libp2p-core v0.2.0/go.mod h1:X0eyB0Gy93v0DZtSYbEM7RnMChm9Uv3j7yRXjO77xSI= +github.com/libp2p/go-libp2p-core v0.2.2/go.mod h1:8fcwTbsG2B+lTgRJ1ICZtiM5GWCWZVoVrLaDRvIRng0= +github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= +github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= +github.com/libp2p/go-libp2p-core v0.3.1/go.mod h1:thvWy0hvaSBhnVBaW37BvzgVV68OUhgJJLAa6almrII= +github.com/libp2p/go-libp2p-core v0.4.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= +github.com/libp2p/go-libp2p-core v0.5.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= +github.com/libp2p/go-libp2p-core v0.5.1/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= +github.com/libp2p/go-libp2p-core v0.5.2/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= +github.com/libp2p/go-libp2p-core v0.5.3/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= +github.com/libp2p/go-libp2p-core v0.5.4/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= +github.com/libp2p/go-libp2p-core v0.5.5/go.mod h1:vj3awlOr9+GMZJFH9s4mpt9RHHgGqeHCopzbYKZdRjM= +github.com/libp2p/go-libp2p-core v0.5.6/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= +github.com/libp2p/go-libp2p-core v0.5.7/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= +github.com/libp2p/go-libp2p-core v0.6.0 h1:u03qofNYTBN+yVg08PuAKylZogVf0xcTEeM8skGf+ak= +github.com/libp2p/go-libp2p-core v0.6.0/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= +github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= +github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFTGElt8HnoDzwkFZm29g= +github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg= +github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw= +github.com/libp2p/go-libp2p-discovery v0.4.0 h1:dK78UhopBk48mlHtRCzbdLm3q/81g77FahEBTjcqQT8= +github.com/libp2p/go-libp2p-discovery v0.4.0/go.mod h1:bZ0aJSrFc/eX2llP0ryhb1kpgkPyTo23SJ5b7UQCMh4= +github.com/libp2p/go-libp2p-loggables v0.1.0 h1:h3w8QFfCt2UJl/0/NW4K829HX/0S4KD31PQ7m8UXXO8= +github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= +github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo= +github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= +github.com/libp2p/go-libp2p-mplex v0.2.2/go.mod h1:74S9eum0tVQdAfFiKxAyKzNdSuLqw5oadDq7+L/FELo= +github.com/libp2p/go-libp2p-mplex v0.2.3 h1:2zijwaJvpdesST2MXpI5w9wWFRgYtMcpRX7rrw0jmOo= +github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxWDkm+dVvjfuG3ek= +github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= +github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= +github.com/libp2p/go-libp2p-nat v0.0.6 h1:wMWis3kYynCbHoyKLPBEMu4YRLltbm8Mk08HGSfvTkU= +github.com/libp2p/go-libp2p-nat v0.0.6/go.mod h1:iV59LVhB3IkFvS6S6sauVTSOrNEANnINbI/fkaLimiw= +github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ= +github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= +github.com/libp2p/go-libp2p-noise v0.1.1 h1:vqYQWvnIcHpIoWJKC7Al4D6Hgj0H012TuXRhPwSMGpQ= +github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM= +github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= +github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY= +github.com/libp2p/go-libp2p-peerstore v0.1.3/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI= +github.com/libp2p/go-libp2p-peerstore v0.2.0/go.mod h1:N2l3eVIeAitSg3Pi2ipSrJYnqhVnMNQZo9nkSCuAbnQ= +github.com/libp2p/go-libp2p-peerstore v0.2.1/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= +github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= +github.com/libp2p/go-libp2p-peerstore v0.2.3/go.mod h1:K8ljLdFn590GMttg/luh4caB/3g0vKuY01psze0upRw= +github.com/libp2p/go-libp2p-peerstore v0.2.4/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= +github.com/libp2p/go-libp2p-peerstore v0.2.6 h1:2ACefBX23iMdJU9Ke+dcXt3w86MIryes9v7In4+Qq3U= +github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= +github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k= +github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= +github.com/libp2p/go-libp2p-quic-transport v0.5.0 h1:BUN1lgYNUrtv4WLLQ5rQmC9MCJ6uEXusezGvYRNoJXE= +github.com/libp2p/go-libp2p-quic-transport v0.5.0/go.mod h1:IEcuC5MLxvZ5KuHKjRu+dr3LjCT1Be3rcD/4d8JrX8M= +github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= +github.com/libp2p/go-libp2p-record v0.1.1 h1:ZJK2bHXYUBqObHX+rHLSNrM3M8fmJUlUHrodDPPATmY= +github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg= +github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= +github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g= +github.com/libp2p/go-libp2p-secio v0.2.1/go.mod h1:cWtZpILJqkqrSkiYcDBh5lA3wbT2Q+hz3rJQq3iftD8= +github.com/libp2p/go-libp2p-secio v0.2.2 h1:rLLPvShPQAcY6eNurKNZq3eZjPWfU9kXF2eI9jIYdrg= +github.com/libp2p/go-libp2p-secio v0.2.2/go.mod h1:wP3bS+m5AUnFA+OFO7Er03uO1mncHG0uVwGrwvjYlNY= +github.com/libp2p/go-libp2p-swarm v0.1.0/go.mod h1:wQVsCdjsuZoc730CgOvh5ox6K8evllckjebkdiY5ta4= +github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaTNyBcHImCxRpPKU= +github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM= +github.com/libp2p/go-libp2p-swarm v0.2.7 h1:4lV/sf7f0NuVqunOpt1I11+Z54+xp+m0eeAvxj/LyRc= +github.com/libp2p/go-libp2p-swarm v0.2.7/go.mod h1:ZSJ0Q+oq/B1JgfPHJAT2HTall+xYRNYp1xs4S2FBWKA= +github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= +github.com/libp2p/go-libp2p-testing v0.1.1 h1:U03z3HnGI7Ni8Xx6ONVZvUFOAzWYmolWf5W5jAOPNmU= +github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= +github.com/libp2p/go-libp2p-tls v0.1.3 h1:twKMhMu44jQO+HgQK9X8NHO5HkeJu2QbhLzLJpa8oNM= +github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= +github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= +github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= +github.com/libp2p/go-libp2p-transport-upgrader v0.3.0 h1:q3ULhsknEQ34eVDhv4YwKS8iet69ffs9+Fir6a7weN4= +github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o= +github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= +github.com/libp2p/go-libp2p-yamux v0.2.1/go.mod h1:1FBXiHDk1VyRM1C0aez2bCfHQ4vMZKkAQzZbkSQt5fI= +github.com/libp2p/go-libp2p-yamux v0.2.2/go.mod h1:lIohaR0pT6mOt0AZ0L2dFze9hds9Req3OfS+B+dv4qw= +github.com/libp2p/go-libp2p-yamux v0.2.5/go.mod h1:Zpgj6arbyQrmZ3wxSZxfBmbdnWtbZ48OpsfmQVTErwA= +github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhLEn0XhIoZ5viCwU= +github.com/libp2p/go-libp2p-yamux v0.2.8 h1:0s3ELSLu2O7hWKfX1YjzudBKCP0kZ+m9e2+0veXzkn4= +github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4= +github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= +github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= +github.com/libp2p/go-mplex v0.0.3/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= +github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU= +github.com/libp2p/go-mplex v0.1.1/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= +github.com/libp2p/go-mplex v0.1.2 h1:qOg1s+WdGLlpkrczDqmhYzyk3vCfsQ8+RxRTQjOZWwI= +github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= +github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.4 h1:agEFehY3zWJFUHK6SEMR7UYmk2z6kC3oeCM7ybLhguA= +github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= +github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= +github.com/libp2p/go-nat v0.0.5 h1:qxnwkco8RLKqVh1NmjQ+tJ8p8khNLFxuElYG/TwqW4Q= +github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= +github.com/libp2p/go-netroute v0.1.2 h1:UHhB35chwgvcRI392znJA3RCBtZ3MpE3ahNCN5MR4Xg= +github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= +github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= +github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-openssl v0.0.5 h1:pQkejVhF0xp08D4CQUcw8t+BFJeXowja6RVcb5p++EA= +github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-reuseport v0.0.1 h1:7PhkfH73VXfPJYKQ6JwS5I/eVcoyYi9IMNGc6FWpFLw= +github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= +github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= +github.com/libp2p/go-reuseport-transport v0.0.3 h1:zzOeXnTooCkRvoH+bSXEfXhn76+LAiwoneM0gnXjF2M= +github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= +github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= +github.com/libp2p/go-sockaddr v0.1.0 h1:Y4s3/jNoryVRKEBrkJ576F17CPOaMIzUeCsg7dlTDj0= +github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= +github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= +github.com/libp2p/go-stream-muxer-multistream v0.2.0/go.mod h1:j9eyPol/LLRqT+GPLSxvimPhNph4sfYfMoDPd7HkzIc= +github.com/libp2p/go-stream-muxer-multistream v0.3.0 h1:TqnSHPJEIqDEO7h1wZZ0p3DXdvDSiLHQidKKUGZtiOY= +github.com/libp2p/go-stream-muxer-multistream v0.3.0/go.mod h1:yDh8abSIzmZtqtOt64gFJUXEryejzNb0lisTt+fAMJA= +github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= +github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= +github.com/libp2p/go-tcp-transport v0.2.0 h1:YoThc549fzmNJIh7XjHVtMIFaEDRtIrtWciG5LyYAPo= +github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0= +github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc= +github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw7yT74kj3raBFuo= +github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM= +github.com/libp2p/go-ws-transport v0.3.0/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= +github.com/libp2p/go-ws-transport v0.3.1 h1:ZX5rWB8nhRRJVaPO6tmkGI/Xx8XNboYX20PW5hXIscw= +github.com/libp2p/go-ws-transport v0.3.1/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= +github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.7 h1:v40A1eSPJDIZwz2AvrV3cxpTZEGDP11QJbukmEhYyQI= +github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= +github.com/lucas-clemente/quic-go v0.16.0 h1:jJw36wfzGJhmOhAOaOC2lS36WgeqXQszH47A7spo1LI= +github.com/lucas-clemente/quic-go v0.16.0/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE= +github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/marten-seemann/qpack v0.1.0/go.mod h1:LFt1NU/Ptjip0C2CPkhimBz5CGE3WGDAUWqna+CNTrI= +github.com/marten-seemann/qtls v0.9.1 h1:O0YKQxNVPaiFgMng0suWEOY2Sb4LT2sRn9Qimq3Z1IQ= +github.com/marten-seemann/qtls v0.9.1/go.mod h1:T1MmAdDPyISzxlK6kjRr0pcZFBVd1OZbBb/j3cvzHhk= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= +github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= +github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= +github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= +github.com/multiformats/go-multiaddr v0.2.1/go.mod h1:s/Apk6IyxfvMjDafnhJgJ3/46z7tZ04iMk5wP4QMGGE= +github.com/multiformats/go-multiaddr v0.2.2 h1:XZLDTszBIJe6m0zF6ITBrEcZR73OPUhCBBS9rYAuUzI= +github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u0xW5UouOmQQrn6a3Y= +github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= +github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= +github.com/multiformats/go-multiaddr-dns v0.2.0 h1:YWJoIDwLePniH7OU5hBnDZV6SWuvJqJ0YtN6pLeH9zA= +github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= +github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q= +github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= +github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= +github.com/multiformats/go-multiaddr-net v0.0.1/go.mod h1:nw6HSxNmCIQH27XPGBuX+d1tnvM7ihcFwHMSstNAVUU= +github.com/multiformats/go-multiaddr-net v0.1.0/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= +github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= +github.com/multiformats/go-multiaddr-net v0.1.2/go.mod h1:QsWt3XK/3hwvNxZJp92iMQKME1qHfpYmyIjFVsSOY6Y= +github.com/multiformats/go-multiaddr-net v0.1.3/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= +github.com/multiformats/go-multiaddr-net v0.1.4/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= +github.com/multiformats/go-multiaddr-net v0.1.5 h1:QoRKvu0xHN1FCFJcMQLbG/yQE2z441L5urvG3+qyz7g= +github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= +github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc= +github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= +github.com/multiformats/go-multistream v0.1.1 h1:JlAdpIFhBhGRLxe9W6Om0w++Gd6KMWoFPZL/dEnm9nI= +github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= +github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a h1:hjZfReYVLbqFkAtr2us7vdy04YWz3LVAirzP7reh8+M= +github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= +github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= +github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8= +github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/testground/sdk-go v0.2.7-0.20201112151952-8ee00c80c3ec h1:ZigYjS91IfPRStWGEZuI8/QDes9vPKpwnmLmc3AVQns= +github.com/testground/sdk-go v0.2.7-0.20201112151952-8ee00c80c3ec/go.mod h1:Q4dnWsUBH+dZ1u7aEGDBHWGUaLfhitjUq3UJQqxeTmk= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= +github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a h1:G++j5e0OC488te356JvdhaM8YS6nMsjLAYF7JxCv07w= +github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20200402171437-3d27c146c105 h1:Sh6UG5dW5xW8Ek2CtRGq4ipdEvvx9hOyBJjEGyTYDl0= +github.com/whyrusleeping/cbor-gen v0.0.0-20200402171437-3d27c146c105/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= +github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= +github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= +github.com/whyrusleeping/go-logging v0.0.1/go.mod h1:lDPYj54zutzG1XYfHAhcc7oNXEburHQBn+Iqd4yS4vE= +github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f/go.mod h1:cZNvX9cFybI01GriPRMXDtczuvUhgbcYr9iCGaNlRv8= +github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA= +github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= +github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= +github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 h1:E9S12nwJwEOXe2d6gT6qxdvqMnNq+VnSsKPgm2ZZNds= +github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= +github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/goleak v1.0.0 h1:qsup4IcBdlmsnGfqyLl4Ntn3C2XCCuKAE7DwHpScyUo= +go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= +go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5 h1:Q7tZBpemrlsc2I7IyODzhtallWRSm4Q0d09pL6XbQtU= +golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190228165749-92fc7df08ae7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190302025703-b6889370fb10/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190524122548-abf6ff778158/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190526052359-791d8a0f4d09/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425 h1:VvQyQJN0tSuecqgcIxMWnnfG5kSmgy9KZR9sW3W5QeA= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3 h1:r3P/5xOq/dK1991B65Oy6E1fRF/2d/fSYZJ/fXGVfJc= +golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8= +gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/testplans/graphsync/main.go b/testplans/graphsync/main.go new file mode 100644 index 00000000000..1567d029053 --- /dev/null +++ b/testplans/graphsync/main.go @@ -0,0 +1,388 @@ +package main + +import ( + "context" + "crypto/rand" + "fmt" + "io" + goruntime "runtime" + "strings" + "time" + + "github.com/dustin/go-humanize" + allselector "github.com/hannahhoward/all-selector" + "github.com/ipfs/go-blockservice" + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + dss "github.com/ipfs/go-datastore/sync" + "github.com/ipfs/go-graphsync/storeutil" + blockstore "github.com/ipfs/go-ipfs-blockstore" + chunk "github.com/ipfs/go-ipfs-chunker" + offline "github.com/ipfs/go-ipfs-exchange-offline" + files "github.com/ipfs/go-ipfs-files" + format "github.com/ipfs/go-ipld-format" + "github.com/ipfs/go-merkledag" + "github.com/ipfs/go-unixfs/importer/balanced" + ihelper "github.com/ipfs/go-unixfs/importer/helpers" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/libp2p/go-libp2p-core/metrics" + "github.com/testground/sdk-go/network" + "golang.org/x/sync/errgroup" + + gs "github.com/ipfs/go-graphsync" + gsi "github.com/ipfs/go-graphsync/impl" + gsnet "github.com/ipfs/go-graphsync/network" + + "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p-core/host" + "github.com/libp2p/go-libp2p-core/peer" + noise "github.com/libp2p/go-libp2p-noise" + secio "github.com/libp2p/go-libp2p-secio" + tls "github.com/libp2p/go-libp2p-tls" + + "github.com/testground/sdk-go/run" + "github.com/testground/sdk-go/runtime" + "github.com/testground/sdk-go/sync" +) + +var testcases = map[string]interface{}{ + "stress": run.InitializedTestCaseFn(runStress), +} + +func main() { + run.InvokeMap(testcases) +} + +type networkParams struct { + latency time.Duration + bandwidth uint64 +} + +func (p networkParams) String() string { + return fmt.Sprintf("", p.latency, p.bandwidth) +} + +func runStress(runenv *runtime.RunEnv, initCtx *run.InitContext) error { + var ( + size = runenv.SizeParam("size") + concurrency = runenv.IntParam("concurrency") + + networkParams = parseNetworkConfig(runenv) + ) + runenv.RecordMessage("started test instance") + runenv.RecordMessage("network params: %v", networkParams) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute) + defer cancel() + + initCtx.MustWaitAllInstancesInitialized(ctx) + + host, peers, _ := makeHost(ctx, runenv, initCtx) + defer host.Close() + + var ( + // make datastore, blockstore, dag service, graphsync + bs = blockstore.NewBlockstore(dss.MutexWrap(ds.NewMapDatastore())) + dagsrv = merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))) + gsync = gsi.New(ctx, + gsnet.NewFromLibp2pHost(host), + storeutil.LoaderForBlockstore(bs), + storeutil.StorerForBlockstore(bs), + ) + ) + + defer initCtx.SyncClient.MustSignalAndWait(ctx, "done", runenv.TestInstanceCount) + + switch runenv.TestGroupID { + case "providers": + if runenv.TestGroupInstanceCount > 1 { + panic("test case only supports one provider") + } + + runenv.RecordMessage("we are the provider") + defer runenv.RecordMessage("done provider") + + gsync.RegisterIncomingRequestHook(func(p peer.ID, request gs.RequestData, hookActions gs.IncomingRequestHookActions) { + hookActions.ValidateRequest() + }) + + return runProvider(ctx, runenv, initCtx, dagsrv, size, networkParams, concurrency) + + case "requestors": + runenv.RecordMessage("we are the requestor") + defer runenv.RecordMessage("done requestor") + + p := *peers[0] + if err := host.Connect(ctx, p); err != nil { + return err + } + runenv.RecordMessage("done dialling provider") + return runRequestor(ctx, runenv, initCtx, gsync, p, dagsrv, networkParams, concurrency, size) + + default: + panic("unsupported group ID") + } +} + +func parseNetworkConfig(runenv *runtime.RunEnv) []networkParams { + var ( + bandwidths = runenv.SizeArrayParam("bandwidths") + latencies []time.Duration + ) + + lats := runenv.StringArrayParam("latencies") + for _, l := range lats { + d, err := time.ParseDuration(l) + if err != nil { + panic(err) + } + latencies = append(latencies, d) + } + + // prepend bandwidth=0 and latency=0 zero values; the first iteration will + // be a control iteration. The sidecar interprets zero values as no + // limitation on that attribute. + bandwidths = append([]uint64{0}, bandwidths...) + latencies = append([]time.Duration{0}, latencies...) + + var ret []networkParams + for _, bandwidth := range bandwidths { + for _, latency := range latencies { + ret = append(ret, networkParams{ + latency: latency, + bandwidth: bandwidth, + }) + } + } + return ret +} + +func runRequestor(ctx context.Context, runenv *runtime.RunEnv, initCtx *run.InitContext, gsync gs.GraphExchange, p peer.AddrInfo, dagsrv format.DAGService, networkParams []networkParams, concurrency int, size uint64) error { + var ( + cids []cid.Cid + // create a selector for the whole UnixFS dag + sel = allselector.AllSelector + ) + + for round, np := range networkParams { + var ( + topicCid = sync.NewTopic(fmt.Sprintf("cid-%d", round), []cid.Cid{}) + stateNext = sync.State(fmt.Sprintf("next-%d", round)) + stateNet = sync.State(fmt.Sprintf("network-configured-%d", round)) + ) + + // wait for all instances to be ready for the next state. + initCtx.SyncClient.MustSignalAndWait(ctx, stateNext, runenv.TestInstanceCount) + + // clean up previous CIDs to attempt to free memory + // TODO does this work? + _ = dagsrv.RemoveMany(ctx, cids) + + runenv.RecordMessage("===== ROUND %d: latency=%s, bandwidth=%d =====", round, np.latency, np.bandwidth) + + sctx, scancel := context.WithCancel(ctx) + cidCh := make(chan []cid.Cid, 1) + initCtx.SyncClient.MustSubscribe(sctx, topicCid, cidCh) + cids = <-cidCh + scancel() + + // run GC to get accurate-ish stats. + goruntime.GC() + goruntime.GC() + + <-initCtx.SyncClient.MustBarrier(ctx, stateNet, 1).C + + errgrp, grpctx := errgroup.WithContext(ctx) + for _, c := range cids { + c := c // capture + np := np // capture + + errgrp.Go(func() error { + // make a go-ipld-prime link for the root UnixFS node + clink := cidlink.Link{Cid: c} + + // execute the traversal. + runenv.RecordMessage("\t>>> requesting CID %s", c) + + start := time.Now() + _, errCh := gsync.Request(grpctx, p.ID, clink, sel) + for err := range errCh { + return err + } + dur := time.Since(start) + + runenv.RecordMessage("\t<<< request complete with no errors") + runenv.RecordMessage("***** ROUND %d observed duration (lat=%s,bw=%d): %s", round, np.latency, np.bandwidth, dur) + + measurement := fmt.Sprintf("duration.sec,lat=%s,bw=%s,concurrency=%d,size=%s", np.latency, humanize.IBytes(np.bandwidth), concurrency, humanize.Bytes(size)) + measurement = strings.Replace(measurement, " ", "", -1) + runenv.R().RecordPoint(measurement, float64(dur)/float64(time.Second)) + + // verify that we have the CID now. + if node, err := dagsrv.Get(grpctx, c); err != nil { + return err + } else if node == nil { + return fmt.Errorf("finished graphsync request, but CID not in store") + } + + return nil + }) + } + + if err := errgrp.Wait(); err != nil { + return err + } + } + + return nil +} + +func runProvider(ctx context.Context, runenv *runtime.RunEnv, initCtx *run.InitContext, dagsrv format.DAGService, size uint64, networkParams []networkParams, concurrency int) error { + var ( + cids []cid.Cid + bufferedDS = format.NewBufferedDAG(ctx, dagsrv) + ) + + for round, np := range networkParams { + var ( + topicCid = sync.NewTopic(fmt.Sprintf("cid-%d", round), []cid.Cid{}) + stateNext = sync.State(fmt.Sprintf("next-%d", round)) + stateNet = sync.State(fmt.Sprintf("network-configured-%d", round)) + ) + + // wait for all instances to be ready for the next state. + initCtx.SyncClient.MustSignalAndWait(ctx, stateNext, runenv.TestInstanceCount) + + // remove the previous CIDs from the dag service; hopefully this + // will delete them from the store and free up memory. + for _, c := range cids { + _ = dagsrv.Remove(ctx, c) + } + cids = cids[:0] + + runenv.RecordMessage("===== ROUND %d: latency=%s, bandwidth=%d =====", round, np.latency, np.bandwidth) + + // generate as many random files as the concurrency level. + for i := 0; i < concurrency; i++ { + // file with random data + file := files.NewReaderFile(io.LimitReader(rand.Reader, int64(size))) + + const unixfsChunkSize uint64 = 1 << 20 + const unixfsLinksPerLevel = 1024 + + params := ihelper.DagBuilderParams{ + Maxlinks: unixfsLinksPerLevel, + RawLeaves: true, + CidBuilder: nil, + Dagserv: bufferedDS, + } + + db, err := params.New(chunk.NewSizeSplitter(file, int64(unixfsChunkSize))) + if err != nil { + return fmt.Errorf("unable to setup dag builder: %w", err) + } + + node, err := balanced.Layout(db) + if err != nil { + return fmt.Errorf("unable to create unix fs node: %w", err) + } + + cids = append(cids, node.Cid()) + } + + if err := bufferedDS.Commit(); err != nil { + return fmt.Errorf("unable to commit unix fs node: %w", err) + } + + // run GC to get accurate-ish stats. + goruntime.GC() + goruntime.GC() + + runenv.RecordMessage("\tCIDs are: %v", cids) + initCtx.SyncClient.MustPublish(ctx, topicCid, cids) + + runenv.RecordMessage("\tconfiguring network for round %d", round) + initCtx.NetClient.MustConfigureNetwork(ctx, &network.Config{ + Network: "default", + Enable: true, + Default: network.LinkShape{ + Latency: np.latency, + Bandwidth: np.bandwidth * 8, // bps + }, + CallbackState: stateNet, + CallbackTarget: 1, + }) + runenv.RecordMessage("\tnetwork configured for round %d", round) + } + + return nil +} + +func makeHost(ctx context.Context, runenv *runtime.RunEnv, initCtx *run.InitContext) (host.Host, []*peer.AddrInfo, *metrics.BandwidthCounter) { + secureChannel := runenv.StringParam("secure_channel") + + var security libp2p.Option + switch secureChannel { + case "noise": + security = libp2p.Security(noise.ID, noise.New) + case "secio": + security = libp2p.Security(secio.ID, secio.New) + case "tls": + security = libp2p.Security(tls.ID, tls.New) + } + + // ☎️ Let's construct the libp2p node. + ip := initCtx.NetClient.MustGetDataNetworkIP() + listenAddr := fmt.Sprintf("/ip4/%s/tcp/0", ip) + bwcounter := metrics.NewBandwidthCounter() + host, err := libp2p.New(ctx, + security, + libp2p.ListenAddrStrings(listenAddr), + libp2p.BandwidthReporter(bwcounter), + ) + if err != nil { + panic(fmt.Sprintf("failed to instantiate libp2p instance: %s", err)) + } + + // Record our listen addrs. + runenv.RecordMessage("my listen addrs: %v", host.Addrs()) + + // Obtain our own address info, and use the sync service to publish it to a + // 'peersTopic' topic, where others will read from. + var ( + id = host.ID() + ai = &peer.AddrInfo{ID: id, Addrs: host.Addrs()} + + // the peers topic where all instances will advertise their AddrInfo. + peersTopic = sync.NewTopic("peers", new(peer.AddrInfo)) + + // initialize a slice to store the AddrInfos of all other peers in the run. + peers = make([]*peer.AddrInfo, 0, runenv.TestInstanceCount-1) + ) + + // Publish our own. + initCtx.SyncClient.MustPublish(ctx, peersTopic, ai) + + // Now subscribe to the peers topic and consume all addresses, storing them + // in the peers slice. + peersCh := make(chan *peer.AddrInfo) + sctx, scancel := context.WithCancel(ctx) + defer scancel() + + sub := initCtx.SyncClient.MustSubscribe(sctx, peersTopic, peersCh) + + // Receive the expected number of AddrInfos. + for len(peers) < cap(peers) { + select { + case ai := <-peersCh: + if ai.ID == id { + continue // skip over ourselves. + } + peers = append(peers, ai) + case err := <-sub.Done(): + panic(err) + } + } + + return host, peers, bwcounter +} diff --git a/testplans/graphsync/manifest.toml b/testplans/graphsync/manifest.toml new file mode 100644 index 00000000000..87803474dc2 --- /dev/null +++ b/testplans/graphsync/manifest.toml @@ -0,0 +1,24 @@ +name = "graphsync" + +[builders] +"docker:go" = { enabled = true, enable_go_build_cache = true } +"exec:go" = { enabled = true } + +[runners] +"local:docker" = { enabled = true } +"local:exec" = { enabled = true } +"cluster:k8s" = { enabled = true } + +[global.build_config] + enable_go_build_cache = true + +[[testcases]] +name = "stress" +instances = { min = 2, max = 10000, default = 2 } + + [testcases.params] + size = { type = "int", desc = "size of file to transfer, in human-friendly form", default = "1MiB" } + secure_channel = { type = "enum", desc = "secure channel used", values = ["secio", "noise", "tls"], default = "noise" } + latencies = { type = "string", desc = "latencies to try with; comma-separated list of durations", default = '["100ms", "200ms", "300ms"]' } + bandwidths = { type = "string", desc = "bandwidths (egress bytes/s) to try with; comma-separated list of humanized sizes", default = '["10M", "1M", "512kb"]' } + concurrency = { type = "int", desc = "concurrency level", default = "1" } \ No newline at end of file diff --git a/testplans/lotus-soup/.gitignore b/testplans/lotus-soup/.gitignore new file mode 100644 index 00000000000..001a5837b83 --- /dev/null +++ b/testplans/lotus-soup/.gitignore @@ -0,0 +1 @@ +lotus-soup diff --git a/testplans/lotus-soup/_compositions/baseline-docker-1-1-with-restarts.toml b/testplans/lotus-soup/_compositions/baseline-docker-1-1-with-restarts.toml new file mode 100644 index 00000000000..28865a03bb8 --- /dev/null +++ b/testplans/lotus-soup/_compositions/baseline-docker-1-1-with-restarts.toml @@ -0,0 +1,59 @@ +[metadata] + name = "lotus-soup" + author = "" + +[global] + plan = "lotus-soup" + case = "deals-e2e" + total_instances = 3 + builder = "docker:go" + runner = "local:docker" + +[global.build] + selectors = ["testground"] + +[global.run_config] + exposed_ports = { pprof = "6060", node_rpc = "1234", miner_rpc = "2345" } + +[global.build_config] + enable_go_build_cache = true + +[global.run.test_params] + clients = "1" + miners = "1" + genesis_timestamp_offset = "0" + balance = "20000000" # These balances will work for maximum 100 nodes, as TotalFilecoin is 2B + sectors = "3" + random_beacon_type = "mock" + mining_mode = "natural" + bandwidth = "4MB" + + +[[groups]] + id = "bootstrapper" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "bootstrapper" + +[[groups]] + id = "miners" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner" + +[[groups]] + id = "clients" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "client" + # Bounce the connection during push and pull requests + bounce_conn_data_transfers = "true" diff --git a/testplans/lotus-soup/_compositions/baseline-docker-1-1.toml b/testplans/lotus-soup/_compositions/baseline-docker-1-1.toml new file mode 100644 index 00000000000..25a31f9ec47 --- /dev/null +++ b/testplans/lotus-soup/_compositions/baseline-docker-1-1.toml @@ -0,0 +1,55 @@ +[metadata] + name = "lotus-soup" + author = "" + +[global] + plan = "lotus-soup" + case = "deals-e2e" + total_instances = 3 + builder = "docker:go" + runner = "local:docker" + +[global.build] + selectors = ["testground"] + +[global.run_config] + exposed_ports = { pprof = "6060", node_rpc = "1234", miner_rpc = "2345" } + +[global.build_config] + enable_go_build_cache = true + +[global.run.test_params] + clients = "1" + miners = "1" + genesis_timestamp_offset = "0" + balance = "20000000" # These balances will work for maximum 100 nodes, as TotalFilecoin is 2B + sectors = "3" + random_beacon_type = "mock" + mining_mode = "natural" + +[[groups]] + id = "bootstrapper" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "bootstrapper" + +[[groups]] + id = "miners" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner" + +[[groups]] + id = "clients" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "client" diff --git a/testplans/lotus-soup/_compositions/baseline-docker-5-1.toml b/testplans/lotus-soup/_compositions/baseline-docker-5-1.toml new file mode 100644 index 00000000000..001a02434c5 --- /dev/null +++ b/testplans/lotus-soup/_compositions/baseline-docker-5-1.toml @@ -0,0 +1,55 @@ +[metadata] + name = "lotus-soup" + author = "" + +[global] + plan = "lotus-soup" + case = "deals-e2e" + total_instances = 7 + builder = "docker:go" + runner = "local:docker" + +[global.build] + selectors = ["testground"] + +[global.run_config] + exposed_ports = { pprof = "6060", node_rpc = "1234", miner_rpc = "2345" } + +[global.build_config] + enable_go_build_cache = true + +[global.run.test_params] + clients = "5" + miners = "1" + genesis_timestamp_offset = "0" + balance = "20000000" # These balances will work for maximum 100 nodes, as TotalFilecoin is 2B + sectors = "5" + random_beacon_type = "mock" + mining_mode = "natural" + +[[groups]] + id = "bootstrapper" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "bootstrapper" + +[[groups]] + id = "miners" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner" + +[[groups]] + id = "clients" + [groups.instances] + count = 5 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "client" diff --git a/testplans/lotus-soup/_compositions/baseline-k8s-1-1-versions.toml b/testplans/lotus-soup/_compositions/baseline-k8s-1-1-versions.toml new file mode 100644 index 00000000000..051d8e0c643 --- /dev/null +++ b/testplans/lotus-soup/_compositions/baseline-k8s-1-1-versions.toml @@ -0,0 +1,74 @@ +[metadata] + name = "lotus-soup" + author = "" + +[global] + plan = "lotus-soup" + case = "deals-e2e" + total_instances = 3 + builder = "docker:go" + runner = "cluster:k8s" + +[global.build] + selectors = ["testground"] + +[global.run_config] + exposed_ports = { pprof = "6060", node_rpc = "1234", miner_rpc = "2345" } + +[global.build_config] + push_registry=true + go_proxy_mode="remote" + go_proxy_url="http://localhost:8081" + registry_type="aws" + +[global.run.test_params] + clients = "1" + miners = "1" + genesis_timestamp_offset = "0" + balance = "20000000" # These balances will work for maximum 100 nodes, as TotalFilecoin is 2B + sectors = "10" + random_beacon_type = "mock" + mining_mode = "natural" + +[[groups]] + id = "bootstrapper" + [groups.resources] + memory = "512Mi" + cpu = "1000m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "bootstrapper" + +[[groups]] + id = "miners" + [groups.resources] + memory = "4096Mi" + cpu = "1000m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner" + [groups.build] + dependencies = [ + { module = "github.com/filecoin-project/lotus", version = "{{.Env.LOTUS_VERSION_MINER}}"}, + ] +[[groups]] + id = "clients" + [groups.resources] + memory = "1024Mi" + cpu = "1000m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "client" + [groups.build] + dependencies = [ + { module = "github.com/filecoin-project/lotus", version = "{{.Env.LOTUS_VERSION_CLIENT}}"}, + ] diff --git a/testplans/lotus-soup/_compositions/baseline-k8s-1-1.toml b/testplans/lotus-soup/_compositions/baseline-k8s-1-1.toml new file mode 100644 index 00000000000..754d1279774 --- /dev/null +++ b/testplans/lotus-soup/_compositions/baseline-k8s-1-1.toml @@ -0,0 +1,67 @@ +[metadata] + name = "lotus-soup" + author = "" + +[global] + plan = "lotus-soup" + case = "deals-e2e" + total_instances = 3 + builder = "docker:go" + runner = "cluster:k8s" + +[global.build] + selectors = ["testground"] + +[global.run_config] + exposed_ports = { pprof = "6060", node_rpc = "1234", miner_rpc = "2345" } + +[global.build_config] + push_registry=true + go_proxy_mode="remote" + go_proxy_url="http://localhost:8081" + registry_type="aws" + +[global.run.test_params] + clients = "1" + miners = "1" + genesis_timestamp_offset = "0" + balance = "20000000" # These balances will work for maximum 100 nodes, as TotalFilecoin is 2B + sectors = "10" + random_beacon_type = "mock" + mining_mode = "natural" + +[[groups]] + id = "bootstrapper" + [groups.resources] + memory = "512Mi" + cpu = "1000m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "bootstrapper" + +[[groups]] + id = "miners" + [groups.resources] + memory = "4096Mi" + cpu = "1000m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner" + +[[groups]] + id = "clients" + [groups.resources] + memory = "1024Mi" + cpu = "1000m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "client" diff --git a/testplans/lotus-soup/_compositions/baseline-k8s-10-3.toml b/testplans/lotus-soup/_compositions/baseline-k8s-10-3.toml new file mode 100644 index 00000000000..4afd3375eb0 --- /dev/null +++ b/testplans/lotus-soup/_compositions/baseline-k8s-10-3.toml @@ -0,0 +1,80 @@ +[metadata] + name = "lotus-soup" + author = "" + +[global] + plan = "lotus-soup" + case = "deals-e2e" + total_instances = 14 + builder = "docker:go" + runner = "cluster:k8s" + +[global.build] + selectors = ["testground"] + +[global.run_config] + exposed_ports = { pprof = "6060", node_rpc = "1234", miner_rpc = "2345" } + +[global.build_config] + push_registry=true + go_proxy_mode="remote" + go_proxy_url="http://localhost:8081" + registry_type="aws" + +[global.run.test_params] + clients = "10" + miners = "3" + genesis_timestamp_offset = "0" + balance = "20000000" # These balances will work for maximum 100 nodes, as TotalFilecoin is 2B + random_beacon_type = "mock" + mining_mode = "natural" + +[[groups]] + id = "bootstrapper" + [groups.resources] + memory = "512Mi" + cpu = "1000m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "bootstrapper" + +[[groups]] + id = "miners-weak" + [groups.resources] + memory = "8192Mi" + cpu = "1000m" + [groups.instances] + count = 2 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner" + sectors = "8" + +[[groups]] + id = "miners-strong" + [groups.resources] + memory = "8192Mi" + cpu = "1000m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner" + sectors = "24" + +[[groups]] + id = "clients" + [groups.resources] + memory = "1024Mi" + cpu = "1000m" + [groups.instances] + count = 10 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "client" diff --git a/testplans/lotus-soup/_compositions/baseline-k8s-2-1.toml b/testplans/lotus-soup/_compositions/baseline-k8s-2-1.toml new file mode 100644 index 00000000000..6258adf400e --- /dev/null +++ b/testplans/lotus-soup/_compositions/baseline-k8s-2-1.toml @@ -0,0 +1,67 @@ +[metadata] + name = "lotus-soup" + author = "" + +[global] + plan = "lotus-soup" + case = "deals-e2e" + total_instances = 4 + builder = "docker:go" + runner = "cluster:k8s" + +[global.build] + selectors = ["testground"] + +[global.run_config] + exposed_ports = { pprof = "6060", node_rpc = "1234", miner_rpc = "2345" } + +[global.build_config] + push_registry=true + go_proxy_mode="remote" + go_proxy_url="http://localhost:8081" + registry_type="aws" + +[global.run.test_params] + clients = "2" + miners = "1" + genesis_timestamp_offset = "0" + balance = "20000000" # These balances will work for maximum 100 nodes, as TotalFilecoin is 2B + sectors = "10" + random_beacon_type = "mock" + mining_mode = "natural" + +[[groups]] + id = "bootstrapper" + [groups.resources] + memory = "512Mi" + cpu = "1000m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "bootstrapper" + +[[groups]] + id = "miners" + [groups.resources] + memory = "4096Mi" + cpu = "1000m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner" + +[[groups]] + id = "clients" + [groups.resources] + memory = "1024Mi" + cpu = "1000m" + [groups.instances] + count = 2 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "client" diff --git a/testplans/lotus-soup/_compositions/baseline-k8s-3-1.toml b/testplans/lotus-soup/_compositions/baseline-k8s-3-1.toml new file mode 100644 index 00000000000..dc6519656d6 --- /dev/null +++ b/testplans/lotus-soup/_compositions/baseline-k8s-3-1.toml @@ -0,0 +1,67 @@ +[metadata] + name = "lotus-soup" + author = "" + +[global] + plan = "lotus-soup" + case = "deals-e2e" + total_instances = 5 + builder = "docker:go" + runner = "cluster:k8s" + +[global.build] + selectors = ["testground"] + +[global.run_config] + exposed_ports = { pprof = "6060", node_rpc = "1234", miner_rpc = "2345" } + +[global.build_config] + push_registry=true + go_proxy_mode="remote" + go_proxy_url="http://localhost:8081" + registry_type="aws" + +[global.run.test_params] + clients = "3" + miners = "1" + genesis_timestamp_offset = "0" + balance = "20000000" # These balances will work for maximum 100 nodes, as TotalFilecoin is 2B + sectors = "10" + random_beacon_type = "mock" + mining_mode = "natural" + +[[groups]] + id = "bootstrapper" + [groups.resources] + memory = "512Mi" + cpu = "1000m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "bootstrapper" + +[[groups]] + id = "miners" + [groups.resources] + memory = "8192Mi" + cpu = "1000m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner" + +[[groups]] + id = "clients" + [groups.resources] + memory = "1024Mi" + cpu = "1000m" + [groups.instances] + count = 3 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "client" diff --git a/testplans/lotus-soup/_compositions/baseline-k8s-3-2.toml b/testplans/lotus-soup/_compositions/baseline-k8s-3-2.toml new file mode 100644 index 00000000000..a1c130c3132 --- /dev/null +++ b/testplans/lotus-soup/_compositions/baseline-k8s-3-2.toml @@ -0,0 +1,67 @@ +[metadata] + name = "lotus-soup" + author = "" + +[global] + plan = "lotus-soup" + case = "deals-e2e" + total_instances = 6 + builder = "docker:go" + runner = "cluster:k8s" + +[global.build] + selectors = ["testground"] + +[global.run_config] + exposed_ports = { pprof = "6060", node_rpc = "1234", miner_rpc = "2345" } + +[global.build_config] + push_registry=true + go_proxy_mode="remote" + go_proxy_url="http://localhost:8081" + registry_type="aws" + +[global.run.test_params] + clients = "3" + miners = "2" + genesis_timestamp_offset = "0" + balance = "20000000" # These balances will work for maximum 100 nodes, as TotalFilecoin is 2B + sectors = "4" + random_beacon_type = "mock" + mining_mode = "natural" + +[[groups]] + id = "bootstrapper" + [groups.resources] + memory = "512Mi" + cpu = "1000m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "bootstrapper" + +[[groups]] + id = "miners" + [groups.resources] + memory = "4096Mi" + cpu = "1000m" + [groups.instances] + count = 2 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner" + +[[groups]] + id = "clients" + [groups.resources] + memory = "1024Mi" + cpu = "1000m" + [groups.instances] + count = 3 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "client" diff --git a/testplans/lotus-soup/_compositions/baseline-k8s-3-3.toml b/testplans/lotus-soup/_compositions/baseline-k8s-3-3.toml new file mode 100644 index 00000000000..d8ab629db3c --- /dev/null +++ b/testplans/lotus-soup/_compositions/baseline-k8s-3-3.toml @@ -0,0 +1,67 @@ +[metadata] + name = "lotus-soup" + author = "" + +[global] + plan = "lotus-soup" + case = "deals-e2e" + total_instances = 7 + builder = "docker:go" + runner = "cluster:k8s" + +[global.build] + selectors = ["testground"] + +[global.run_config] + exposed_ports = { pprof = "6060", node_rpc = "1234", miner_rpc = "2345" } + +[global.build_config] + push_registry=true + go_proxy_mode="remote" + go_proxy_url="http://localhost:8081" + registry_type="aws" + +[global.run.test_params] + clients = "3" + miners = "3" + genesis_timestamp_offset = "0" + balance = "20000000" # These balances will work for maximum 100 nodes, as TotalFilecoin is 2B + sectors = "10" + random_beacon_type = "mock" + mining_mode = "natural" + +[[groups]] + id = "bootstrapper" + [groups.resources] + memory = "512Mi" + cpu = "1000m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "bootstrapper" + +[[groups]] + id = "miners" + [groups.resources] + memory = "4096Mi" + cpu = "1000m" + [groups.instances] + count = 3 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner" + +[[groups]] + id = "clients" + [groups.resources] + memory = "1024Mi" + cpu = "1000m" + [groups.instances] + count = 3 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "client" diff --git a/testplans/lotus-soup/_compositions/baseline.toml b/testplans/lotus-soup/_compositions/baseline.toml new file mode 100644 index 00000000000..be035b6ebbc --- /dev/null +++ b/testplans/lotus-soup/_compositions/baseline.toml @@ -0,0 +1,55 @@ +[metadata] + name = "lotus-soup" + author = "" + +[global] + plan = "lotus-soup" + case = "deals-e2e" + total_instances = 6 + builder = "exec:go" + runner = "local:exec" + +[global.build] + selectors = ["testground"] + +[global.run_config] + exposed_ports = { pprof = "6060", node_rpc = "1234", miner_rpc = "2345" } + +[global.build_config] + enable_go_build_cache = true + +[global.run.test_params] + clients = "3" + miners = "2" + genesis_timestamp_offset = "0" + balance = "20000000.5" # These balances will work for maximum 100 nodes, as TotalFilecoin is 2B + sectors = "10" + random_beacon_type = "mock" + mining_mode = "natural" + +[[groups]] + id = "bootstrapper" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "bootstrapper" + +[[groups]] + id = "miners" + [groups.instances] + count = 2 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner" + +[[groups]] + id = "clients" + [groups.instances] + count = 3 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "client" diff --git a/testplans/lotus-soup/_compositions/deals-stress-concurrent-natural-k8s.toml b/testplans/lotus-soup/_compositions/deals-stress-concurrent-natural-k8s.toml new file mode 100644 index 00000000000..18f14407f43 --- /dev/null +++ b/testplans/lotus-soup/_compositions/deals-stress-concurrent-natural-k8s.toml @@ -0,0 +1,69 @@ +[metadata] + name = "lotus-soup" + author = "" + +[global] + plan = "lotus-soup" + case = "deals-stress" + total_instances = 6 + builder = "docker:go" + runner = "cluster:k8s" + +[global.build] + selectors = ["testground"] + +[global.run_config] + exposed_ports = { pprof = "6060", node_rpc = "1234", miner_rpc = "2345" } + +[global.build_config] + push_registry=true + go_proxy_mode="remote" + go_proxy_url="http://localhost:8081" + registry_type="aws" + +[global.run.test_params] + clients = "3" + miners = "2" + genesis_timestamp_offset = "0" + balance = "90000000" # These balances will work for maximum 100 nodes, as TotalFilecoin is 2B + sectors = "10" + random_beacon_type = "mock" + mining_mode = "natural" + +[[groups]] + id = "bootstrapper" + [groups.resources] + memory = "512Mi" + cpu = "100m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "bootstrapper" + +[[groups]] + id = "miners" + [groups.resources] + memory = "14000Mi" + cpu = "1000m" + [groups.instances] + count = 2 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner" + +[[groups]] + id = "clients" + [groups.resources] + memory = "2048Mi" + cpu = "100m" + [groups.instances] + count = 3 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "client" + deals = "3" + deal_mode = "concurrent" diff --git a/testplans/lotus-soup/_compositions/deals-stress-concurrent-natural.toml b/testplans/lotus-soup/_compositions/deals-stress-concurrent-natural.toml new file mode 100644 index 00000000000..cc3ab45fc70 --- /dev/null +++ b/testplans/lotus-soup/_compositions/deals-stress-concurrent-natural.toml @@ -0,0 +1,57 @@ +[metadata] + name = "lotus-soup" + author = "" + +[global] + plan = "lotus-soup" + case = "deals-stress" + total_instances = 6 + builder = "docker:go" + runner = "local:docker" + +[global.build] + selectors = ["testground"] + +[global.run_config] + exposed_ports = { pprof = "6060", node_rpc = "1234", miner_rpc = "2345" } + +[global.build_config] + enable_go_build_cache = true + +[global.run.test_params] + clients = "3" + miners = "2" + genesis_timestamp_offset = "0" + balance = "20000000" # These balances will work for maximum 100 nodes, as TotalFilecoin is 2B + sectors = "1000" + random_beacon_type = "mock" + +[[groups]] + id = "bootstrapper" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "bootstrapper" + +[[groups]] + id = "miners" + [groups.instances] + count = 2 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner" + mining_mode = "natural" + +[[groups]] + id = "clients" + [groups.instances] + count = 3 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "client" + deals = "300" + deal_mode = "concurrent" diff --git a/testplans/lotus-soup/_compositions/deals-stress-concurrent.toml b/testplans/lotus-soup/_compositions/deals-stress-concurrent.toml new file mode 100644 index 00000000000..bec530c42ce --- /dev/null +++ b/testplans/lotus-soup/_compositions/deals-stress-concurrent.toml @@ -0,0 +1,56 @@ +[metadata] + name = "lotus-soup" + author = "" + +[global] + plan = "lotus-soup" + case = "deals-stress" + total_instances = 6 + builder = "docker:go" + runner = "local:docker" + +[global.build] + selectors = ["testground"] + +[global.run_config] + exposed_ports = { pprof = "6060", node_rpc = "1234", miner_rpc = "2345" } + +[global.build_config] + enable_go_build_cache = true + +[global.run.test_params] + clients = "3" + miners = "2" + genesis_timestamp_offset = "100000" + balance = "20000000" # These balances will work for maximum 100 nodes, as TotalFilecoin is 2B + sectors = "1000" + random_beacon_type = "mock" + +[[groups]] + id = "bootstrapper" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "bootstrapper" + +[[groups]] + id = "miners" + [groups.instances] + count = 2 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner" + +[[groups]] + id = "clients" + [groups.instances] + count = 3 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "client" + deals = "300" + deal_mode = "concurrent" diff --git a/testplans/lotus-soup/_compositions/deals-stress-serial-natural.toml b/testplans/lotus-soup/_compositions/deals-stress-serial-natural.toml new file mode 100644 index 00000000000..c5611a3e838 --- /dev/null +++ b/testplans/lotus-soup/_compositions/deals-stress-serial-natural.toml @@ -0,0 +1,57 @@ +[metadata] + name = "lotus-soup" + author = "" + +[global] + plan = "lotus-soup" + case = "deals-stress" + total_instances = 6 + builder = "docker:go" + runner = "local:docker" + +[global.build] + selectors = ["testground"] + +[global.run_config] + exposed_ports = { pprof = "6060", node_rpc = "1234", miner_rpc = "2345" } + +[global.build_config] + enable_go_build_cache = true + +[global.run.test_params] + clients = "3" + miners = "2" + genesis_timestamp_offset = "0" + balance = "20000000" # These balances will work for maximum 100 nodes, as TotalFilecoin is 2B + sectors = "1000" + random_beacon_type = "mock" + +[[groups]] + id = "bootstrapper" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "bootstrapper" + +[[groups]] + id = "miners" + [groups.instances] + count = 2 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner" + mining_mode = "natural" + +[[groups]] + id = "clients" + [groups.instances] + count = 3 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "client" + deals = "300" + deal_mode = "serial" diff --git a/testplans/lotus-soup/_compositions/deals-stress-serial.toml b/testplans/lotus-soup/_compositions/deals-stress-serial.toml new file mode 100644 index 00000000000..5317fecb96d --- /dev/null +++ b/testplans/lotus-soup/_compositions/deals-stress-serial.toml @@ -0,0 +1,56 @@ +[metadata] + name = "lotus-soup" + author = "" + +[global] + plan = "lotus-soup" + case = "deals-stress" + total_instances = 6 + builder = "docker:go" + runner = "local:docker" + +[global.build] + selectors = ["testground"] + +[global.run_config] + exposed_ports = { pprof = "6060", node_rpc = "1234", miner_rpc = "2345" } + +[global.build_config] + enable_go_build_cache = true + +[global.run.test_params] + clients = "3" + miners = "2" + genesis_timestamp_offset = "100000" + balance = "20000000" # These balances will work for maximum 100 nodes, as TotalFilecoin is 2B + sectors = "1000" + random_beacon_type = "mock" + +[[groups]] + id = "bootstrapper" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "bootstrapper" + +[[groups]] + id = "miners" + [groups.instances] + count = 2 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner" + +[[groups]] + id = "clients" + [groups.instances] + count = 3 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "client" + deals = "300" + deal_mode = "serial" diff --git a/testplans/lotus-soup/_compositions/drand-halt.toml b/testplans/lotus-soup/_compositions/drand-halt.toml new file mode 100644 index 00000000000..ce2610dc1c5 --- /dev/null +++ b/testplans/lotus-soup/_compositions/drand-halt.toml @@ -0,0 +1,79 @@ +[metadata] + name = "lotus-soup" + author = "" + +[global] + plan = "lotus-soup" + case = "drand-halting" + total_instances = 6 + builder = "docker:go" + runner = "local:docker" + +[global.build] + selectors = ["testground"] + +[global.run_config] + exposed_ports = { pprof = "6060", node_rpc = "1234", miner_rpc = "2345" } + +[global.build_config] + enable_go_build_cache = true + +[global.run.test_params] + clients = "1" + miners = "1" + balance = "20000000" # These balances will work for maximum 100 nodes, as TotalFilecoin is 2B + sectors = "10" + random_beacon_type = "local-drand" + genesis_timestamp_offset = "0" +# mining_mode = "natural" + +[[groups]] + id = "bootstrapper" + [groups.resources] + memory = "120Mi" + cpu = "10m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "bootstrapper" + + +[[groups]] + id = "miners" + [groups.resources] + memory = "120Mi" + cpu = "10m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner" + + +[[groups]] + id = "clients" + [groups.resources] + memory = "120Mi" + cpu = "10m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "client" + + +[[groups]] + id = "drand" + [groups.instances] + count = 3 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "drand" + drand_period = "1s" + drand_log_level = "none" + suspend_events = "wait 20s -> halt -> wait 1m -> resume -> wait 2s -> halt -> wait 1m -> resume" diff --git a/testplans/lotus-soup/_compositions/drand-outage-k8s.toml b/testplans/lotus-soup/_compositions/drand-outage-k8s.toml new file mode 100644 index 00000000000..0588adb0b8b --- /dev/null +++ b/testplans/lotus-soup/_compositions/drand-outage-k8s.toml @@ -0,0 +1,71 @@ +[metadata] + name = "lotus-soup" + author = "" + +[global] + plan = "lotus-soup" + case = "drand-outage" + total_instances = 7 + builder = "docker:go" + runner = "cluster:k8s" + +[global.build] + selectors = ["testground"] + +[global.run_config] + exposed_ports = { pprof = "6060", node_rpc = "1234", miner_rpc = "2345" } + +[global.build_config] + push_registry=true + go_proxy_mode="remote" + go_proxy_url="http://localhost:8081" + registry_type="aws" + +[global.run.test_params] + clients = "0" + miners = "3" + balance = "20000000" # These balances will work for maximum 100 nodes, as TotalFilecoin is 2B + sectors = "100" + random_beacon_type = "local-drand" + genesis_timestamp_offset = "0" + mining_mode = "natural" + +[[groups]] + id = "bootstrapper" + [groups.resources] + memory = "1024Mi" + cpu = "10m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "bootstrapper" + +[[groups]] + id = "miners" + [groups.resources] + memory = "1024Mi" + cpu = "10m" + [groups.instances] + count = 3 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner" + +[[groups]] + id = "drand" + [groups.resources] + memory = "1024Mi" + cpu = "10m" + [groups.instances] + count = 3 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "drand" + drand_period = "30s" + drand_catchup_period = "10s" + drand_log_level = "debug" + suspend_events = "wait 5m -> halt -> wait 45m -> resume -> wait 15m -> halt -> wait 5m -> resume" diff --git a/testplans/lotus-soup/_compositions/drand-outage-local.toml b/testplans/lotus-soup/_compositions/drand-outage-local.toml new file mode 100644 index 00000000000..dad81a20344 --- /dev/null +++ b/testplans/lotus-soup/_compositions/drand-outage-local.toml @@ -0,0 +1,59 @@ +[metadata] + name = "lotus-soup" + author = "" + +[global] + plan = "lotus-soup" + case = "drand-outage" + total_instances = 7 + builder = "docker:go" + runner = "local:docker" + +[global.build] + selectors = ["testground"] + +[global.run_config] + exposed_ports = { pprof = "6060", node_rpc = "1234", miner_rpc = "2345" } + +[global.build_config] + enable_go_build_cache = true + +[global.run.test_params] + clients = "0" + miners = "3" + balance = "20000000" # These balances will work for maximum 100 nodes, as TotalFilecoin is 2B + sectors = "10" + random_beacon_type = "local-drand" + genesis_timestamp_offset = "0" + mining_mode = "natural" + +[[groups]] + id = "bootstrapper" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "bootstrapper" + +[[groups]] + id = "miners" + [groups.instances] + count = 3 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner" + +[[groups]] + id = "drand" + [groups.instances] + count = 3 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "drand" + drand_period = "30s" + drand_catchup_period = "10s" + drand_log_level = "debug" + suspend_events = "wait 3m -> halt -> wait 3m -> resume -> wait 3m -> halt -> wait 3m -> resume" diff --git a/testplans/lotus-soup/_compositions/fast-k8s-3-1.toml b/testplans/lotus-soup/_compositions/fast-k8s-3-1.toml new file mode 100644 index 00000000000..d77bfbc3e82 --- /dev/null +++ b/testplans/lotus-soup/_compositions/fast-k8s-3-1.toml @@ -0,0 +1,68 @@ +[metadata] + name = "lotus-soup" + author = "" + +[global] + plan = "lotus-soup" + case = "deals-e2e" + total_instances = 5 + builder = "docker:go" + runner = "cluster:k8s" + +[global.build] + selectors = ["testground"] + +[global.run_config] + exposed_ports = { pprof = "6060", node_rpc = "1234", miner_rpc = "2345" } + +[global.build_config] + push_registry=true + go_proxy_mode="remote" + go_proxy_url="http://localhost:8081" + registry_type="aws" + +[global.run.test_params] + clients = "3" + miners = "1" + fast_retrieval = "true" + genesis_timestamp_offset = "0" + balance = "20000000" # These balances will work for maximum 100 nodes, as TotalFilecoin is 2B + sectors = "10" + random_beacon_type = "mock" + mining_mode = "natural" + +[[groups]] + id = "bootstrapper" + [groups.resources] + memory = "512Mi" + cpu = "1000m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "bootstrapper" + +[[groups]] + id = "miners" + [groups.resources] + memory = "4096Mi" + cpu = "1000m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner" + +[[groups]] + id = "clients" + [groups.resources] + memory = "1024Mi" + cpu = "1000m" + [groups.instances] + count = 3 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "client" diff --git a/testplans/lotus-soup/_compositions/local-drand.toml b/testplans/lotus-soup/_compositions/local-drand.toml new file mode 100644 index 00000000000..e942ed072cd --- /dev/null +++ b/testplans/lotus-soup/_compositions/local-drand.toml @@ -0,0 +1,72 @@ +[metadata] + name = "lotus-soup" + author = "" + +[global] + plan = "lotus-soup" + case = "deals-e2e" + total_instances = 6 + builder = "docker:go" + runner = "local:docker" + +[global.build] + selectors = ["testground"] + +[global.run_config] + exposed_ports = { pprof = "6060", node_rpc = "1234", miner_rpc = "2345" } + +[global.build_config] + enable_go_build_cache = true + +[global.run.test_params] + clients = "1" + miners = "1" + balance = "20000000" # These balances will work for maximum 100 nodes, as TotalFilecoin is 2B + sectors = "10" + random_beacon_type = "local-drand" + genesis_timestamp_offset = "0" + +[[groups]] + id = "bootstrapper" + [groups.resources] + memory = "120Mi" + cpu = "10m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "bootstrapper" + +[[groups]] + id = "miners" + [groups.resources] + memory = "120Mi" + cpu = "10m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner" + +[[groups]] + id = "clients" + [groups.resources] + memory = "120Mi" + cpu = "10m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "client" + +[[groups]] + id = "drand" + [groups.instances] + count = 3 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "drand" diff --git a/testplans/lotus-soup/_compositions/natural.toml b/testplans/lotus-soup/_compositions/natural.toml new file mode 100644 index 00000000000..bfef6b844df --- /dev/null +++ b/testplans/lotus-soup/_compositions/natural.toml @@ -0,0 +1,55 @@ +[metadata] + name = "lotus-soup" + author = "" + +[global] + plan = "lotus-soup" + case = "deals-e2e" + total_instances = 6 + builder = "docker:go" + runner = "local:docker" + +[global.build] + selectors = ["testground"] + +[global.run_config] + exposed_ports = { pprof = "6060", node_rpc = "1234", miner_rpc = "2345" } + +[global.build_config] + enable_go_build_cache = true + +[global.run.test_params] + clients = "3" + miners = "2" + genesis_timestamp_offset = "100000" + balance = "20000000" # These balances will work for maximum 100 nodes, as TotalFilecoin is 2B + sectors = "10" + random_beacon_type = "mock" + +[[groups]] + id = "bootstrapper" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "bootstrapper" + +[[groups]] + id = "miners" + [groups.instances] + count = 2 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner" + mining_mode = "natural" + +[[groups]] + id = "clients" + [groups.instances] + count = 3 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "client" diff --git a/testplans/lotus-soup/_compositions/net-chaos/latency.toml b/testplans/lotus-soup/_compositions/net-chaos/latency.toml new file mode 100644 index 00000000000..fb5f5f54483 --- /dev/null +++ b/testplans/lotus-soup/_compositions/net-chaos/latency.toml @@ -0,0 +1,57 @@ +[metadata] + name = "lotus-soup" + author = "" + +[global] + plan = "lotus-soup" + case = "deals-e2e" + total_instances = 7 + builder = "docker:go" + runner = "local:docker" + +[global.build] + selectors = ["testground"] + +[global.run_config] + exposed_ports = { pprof = "6060", node_rpc = "1234", miner_rpc = "2345" } + +[global.build_config] + enable_go_build_cache = true + +[global.run.test_params] + clients = "5" + miners = "1" + genesis_timestamp_offset = "0" + balance = "20000000" # These balances will work for maximum 100 nodes, as TotalFilecoin is 2B + sectors = "5" + random_beacon_type = "mock" + mining_mode = "natural" + +[[groups]] + id = "bootstrapper" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "bootstrapper" + +[[groups]] + id = "miners" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner" + latency_range = '["20ms", "300ms"]' + +[[groups]] + id = "clients" + [groups.instances] + count = 5 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "client" + latency_range = '["100ms", "1500ms"]' diff --git a/testplans/lotus-soup/_compositions/paych-stress-k8s.toml b/testplans/lotus-soup/_compositions/paych-stress-k8s.toml new file mode 100644 index 00000000000..b5d7f9bd4a2 --- /dev/null +++ b/testplans/lotus-soup/_compositions/paych-stress-k8s.toml @@ -0,0 +1,62 @@ +[metadata] + name = "lotus-soup" + author = "raulk" + +[global] + plan = "lotus-soup" + case = "paych-stress" + total_instances = 4 # 2 clients + 1 miners + 1 bootstrapper + builder = "docker:go" + runner = "cluster:k8s" + +[global.build] + selectors = ["testground"] + +[global.run_config] + exposed_ports = { pprof = "6060", node_rpc = "1234", miner_rpc = "2345" } + +[global.build_config] + push_registry=true + go_proxy_mode="remote" + go_proxy_url="http://localhost:8081" + registry_type="aws" + +[global.run.test_params] + clients = "2" + miners = "1" + genesis_timestamp_offset = "0" + balance = "100" ## be careful, this is in FIL. + sectors = "10" + random_beacon_type = "mock" + mining_mode = "natural" + # number of lanes to send vouchers on + lane_count = "8" + # number of vouchers on each lane + vouchers_per_lane = "3" + # amount to increase voucher by each time (per lane) + increments = "3" ## in FIL + +[[groups]] + id = "bootstrapper" + instances = { count = 1 } + [groups.run.test_params] + role = "bootstrapper" + +[[groups]] + id = "miners" + instances = { count = 1 } + [groups.run.test_params] + role = "miner" + [groups.resources] + memory = "2048Mi" + cpu = "100m" + +[[groups]] + id = "clients" + # the first client will be on the receiving end; all others will be on the sending end. + instances = { count = 2 } + [groups.run.test_params] + role = "client" + [groups.resources] + memory = "1024Mi" + cpu = "100m" diff --git a/testplans/lotus-soup/_compositions/paych-stress.toml b/testplans/lotus-soup/_compositions/paych-stress.toml new file mode 100644 index 00000000000..b42721a7b2e --- /dev/null +++ b/testplans/lotus-soup/_compositions/paych-stress.toml @@ -0,0 +1,53 @@ +[metadata] + name = "lotus-soup" + author = "raulk" + +[global] + plan = "lotus-soup" + case = "paych-stress" + total_instances = 5 # 2 clients + 2 miners + 1 bootstrapper + builder = "exec:go" + runner = "local:exec" + +[global.build] + selectors = ["testground"] + +[global.run_config] + exposed_ports = { pprof = "6060", node_rpc = "1234", miner_rpc = "2345" } + +[global.build_config] + enable_go_build_cache = true + +[global.run.test_params] + clients = "2" + miners = "2" + genesis_timestamp_offset = "0" + balance = "100" ## be careful, this is in FIL. + sectors = "10" + random_beacon_type = "mock" + mining_mode = "natural" + # number of lanes to send vouchers on + lane_count = "8" + # number of vouchers on each lane + vouchers_per_lane = "3" + # amount to increase voucher by each time (per lane) + increments = "3" ## in FIL + +[[groups]] + id = "bootstrapper" + instances = { count = 1 } + [groups.run.test_params] + role = "bootstrapper" + +[[groups]] + id = "miners" + instances = { count = 2 } + [groups.run.test_params] + role = "miner" + +[[groups]] + id = "clients" + # the first client will be on the receiving end; all others will be on the sending end. + instances = { count = 2 } + [groups.run.test_params] + role = "client" diff --git a/testplans/lotus-soup/_compositions/pubsub-tracer.toml b/testplans/lotus-soup/_compositions/pubsub-tracer.toml new file mode 100644 index 00000000000..db9f111d168 --- /dev/null +++ b/testplans/lotus-soup/_compositions/pubsub-tracer.toml @@ -0,0 +1,64 @@ +[metadata] + name = "lotus-soup" + author = "" + +[global] + plan = "lotus-soup" + case = "deals-e2e" + total_instances = 7 + builder = "docker:go" + runner = "local:docker" + +[global.build] + selectors = ["testground"] + +[global.run_config] + exposed_ports = { pprof = "6060", node_rpc = "1234", miner_rpc = "2345" } + +[global.build_config] + enable_go_build_cache = true + +[global.run.test_params] + clients = "3" + miners = "2" + genesis_timestamp_offset = "100000" + balance = "20000000" # These balances will work for maximum 100 nodes, as TotalFilecoin is 2B + sectors = "10" + random_beacon_type = "mock" + enable_pubsub_tracer = "true" + +[[groups]] + id = "pubsub-tracer" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "pubsub-tracer" + +[[groups]] + id = "bootstrapper" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "bootstrapper" + +[[groups]] + id = "miners" + [groups.instances] + count = 2 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner" + +[[groups]] + id = "clients" + [groups.instances] + count = 3 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "client" diff --git a/testplans/lotus-soup/_compositions/recovery-exec.toml b/testplans/lotus-soup/_compositions/recovery-exec.toml new file mode 100644 index 00000000000..8e9ef9d6e65 --- /dev/null +++ b/testplans/lotus-soup/_compositions/recovery-exec.toml @@ -0,0 +1,80 @@ +[metadata] + name = "lotus-soup" + author = "" + +[global] + plan = "lotus-soup" + case = "recovery-failed-windowed-post" + total_instances = 7 + builder = "exec:go" + runner = "local:exec" + +[global.build] + selectors = ["testground"] + +[global.run_config] + exposed_ports = { pprof = "6060", node_rpc = "1234", miner_rpc = "2345" } + +[global.build_config] + push_registry=true + go_proxy_mode="remote" + go_proxy_url="http://localhost:8081" + registry_type="aws" + +[global.run.test_params] + clients = "3" + miners = "3" + genesis_timestamp_offset = "0" + balance = "20000000" + +[[groups]] + id = "bootstrapper" + [groups.resources] + memory = "512Mi" + cpu = "1000m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "bootstrapper" + +[[groups]] + id = "miners" + [groups.resources] + memory = "4096Mi" + cpu = "1000m" + [groups.instances] + count = 2 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner" + sectors = "10" + mining_mode = "natural" + +[[groups]] + id = "miners-biserk" + [groups.resources] + memory = "4096Mi" + cpu = "1000m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner-biserk" + sectors = "5" + mining_mode = "natural" + +[[groups]] + id = "clients" + [groups.resources] + memory = "1024Mi" + cpu = "1000m" + [groups.instances] + count = 3 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "client" diff --git a/testplans/lotus-soup/_compositions/recovery-k8s.toml b/testplans/lotus-soup/_compositions/recovery-k8s.toml new file mode 100644 index 00000000000..5b7037e01f7 --- /dev/null +++ b/testplans/lotus-soup/_compositions/recovery-k8s.toml @@ -0,0 +1,95 @@ +[metadata] + name = "lotus-soup" + author = "" + +[global] + plan = "lotus-soup" + case = "recovery-failed-windowed-post" + total_instances = 9 + builder = "docker:go" + runner = "cluster:k8s" + +[global.build] + selectors = ["testground"] + +[global.run_config] + exposed_ports = { pprof = "6060", node_rpc = "1234", miner_rpc = "2345" } + keep_service=true + +[global.build_config] + push_registry=true + go_proxy_mode="remote" + go_proxy_url="http://localhost:8081" + registry_type="aws" + +[global.run.test_params] + clients = "4" + miners = "4" + genesis_timestamp_offset = "0" + balance = "20000000" + +[[groups]] + id = "bootstrapper" + [groups.resources] + memory = "512Mi" + cpu = "1000m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "bootstrapper" + +[[groups]] + id = "miners" + [groups.resources] + memory = "4096Mi" + cpu = "1000m" + [groups.instances] + count = 2 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner" + sectors = "10" + mining_mode = "natural" + +[[groups]] + id = "miners-full-slash" + [groups.resources] + memory = "4096Mi" + cpu = "1000m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner-full-slash" + sectors = "10" + mining_mode = "natural" + +[[groups]] + id = "miners-partial-slash" + [groups.resources] + memory = "4096Mi" + cpu = "1000m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner-partial-slash" + sectors = "10" + mining_mode = "natural" + +[[groups]] + id = "clients" + [groups.resources] + memory = "1024Mi" + cpu = "1000m" + [groups.instances] + count = 4 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "client" diff --git a/testplans/lotus-soup/deals_e2e.go b/testplans/lotus-soup/deals_e2e.go new file mode 100644 index 00000000000..6737bdae226 --- /dev/null +++ b/testplans/lotus-soup/deals_e2e.go @@ -0,0 +1,241 @@ +package main + +import ( + "context" + "fmt" + "io/ioutil" + mbig "math/big" + "math/rand" + "os" + "time" + + "github.com/libp2p/go-libp2p-core/peer" + "github.com/testground/sdk-go/sync" + + "github.com/filecoin-project/go-address" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/testplans/lotus-soup/testkit" +) + +// This is the baseline test; Filecoin 101. +// +// A network with a bootstrapper, a number of miners, and a number of clients/full nodes +// is constructed and connected through the bootstrapper. +// Some funds are allocated to each node and a number of sectors are presealed in the genesis block. +// +// The test plan: +// One or more clients store content to one or more miners, testing storage deals. +// The plan ensures that the storage deals hit the blockchain and measure the time it took. +// Verification: one or more clients retrieve and verify the hashes of stored content. +// The plan ensures that all (previously) published content can be correctly retrieved +// and measures the time it took. +// +// Preparation of the genesis block: this is the responsibility of the bootstrapper. +// In order to compute the genesis block, we need to collect identities and presealed +// sectors from each node. +// Then we create a genesis block that allocates some funds to each node and collects +// the presealed sectors. +func dealsE2E(t *testkit.TestEnvironment) error { + t.RecordMessage("running node with role '%s'", t.Role) + + // Dispatch/forward non-client roles to defaults. + if t.Role != "client" { + return testkit.HandleDefaultRole(t) + } + + // This is a client role + fastRetrieval := t.BooleanParam("fast_retrieval") + t.RecordMessage("running client, with fast retrieval set to: %v", fastRetrieval) + + cl, err := testkit.PrepareClient(t) + if err != nil { + return err + } + + ctx := context.Background() + client := cl.FullApi + + // select a random miner + minerAddr := cl.MinerAddrs[rand.Intn(len(cl.MinerAddrs))] + if err := client.NetConnect(ctx, minerAddr.MinerNetAddrs); err != nil { + return err + } + t.D().Counter(fmt.Sprintf("send-data-to,miner=%s", minerAddr.MinerActorAddr)).Inc(1) + + t.RecordMessage("selected %s as the miner", minerAddr.MinerActorAddr) + + if fastRetrieval { + err = initPaymentChannel(t, ctx, cl, minerAddr) + if err != nil { + return err + } + } + + // give some time to the miner, otherwise, we get errors like: + // deal errored deal failed: (State=26) error calling node: publishing deal: GasEstimateMessageGas + // error: estimating gas used: message execution failed: exit 19, reason: failed to lock balance: failed to lock client funds: not enough balance to lock for addr t0102: escrow balance 0 < locked 0 + required 640297000 (RetCode=19) + time.Sleep(40 * time.Second) + + time.Sleep(time.Duration(t.GlobalSeq) * 5 * time.Second) + + // generate 5000000 bytes of random data + data := make([]byte, 5000000) + rand.New(rand.NewSource(time.Now().UnixNano())).Read(data) + + file, err := ioutil.TempFile("/tmp", "data") + if err != nil { + return err + } + defer os.Remove(file.Name()) + + _, err = file.Write(data) + if err != nil { + return err + } + + fcid, err := client.ClientImport(ctx, api.FileRef{Path: file.Name(), IsCAR: false}) + if err != nil { + return err + } + t.RecordMessage("file cid: %s", fcid) + + // Check if we should bounce the connection during data transfers + if t.BooleanParam("bounce_conn_data_transfers") { + t.RecordMessage("Will bounce connection during push and pull data-transfers") + err = bounceConnInTransfers(ctx, t, client, minerAddr.MinerNetAddrs.ID) + if err != nil { + return err + } + } + + // start deal + t1 := time.Now() + deal := testkit.StartDeal(ctx, minerAddr.MinerActorAddr, client, fcid.Root, fastRetrieval) + t.RecordMessage("started deal: %s", deal) + + // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this + time.Sleep(2 * time.Second) + + t.RecordMessage("waiting for deal to be sealed") + testkit.WaitDealSealed(t, ctx, client, deal) + t.D().ResettingHistogram("deal.sealed").Update(int64(time.Since(t1))) + + // wait for all client deals to be sealed before trying to retrieve + t.SyncClient.MustSignalAndWait(ctx, sync.State("done-sealing"), t.IntParam("clients")) + + carExport := true + + t.RecordMessage("trying to retrieve %s", fcid) + t1 = time.Now() + _ = testkit.RetrieveData(t, ctx, client, fcid.Root, nil, carExport, data) + t.D().ResettingHistogram("deal.retrieved").Update(int64(time.Since(t1))) + + t.SyncClient.MustSignalEntry(ctx, testkit.StateStopMining) + + time.Sleep(10 * time.Second) // wait for metrics to be emitted + + // TODO broadcast published content CIDs to other clients + // TODO select a random piece of content published by some other client and retrieve it + + t.SyncClient.MustSignalAndWait(ctx, testkit.StateDone, t.TestInstanceCount) + return nil +} + +func bounceConnInTransfers(ctx context.Context, t *testkit.TestEnvironment, client api.FullNode, minerPeerID peer.ID) error { + storageConnBroken := false + retrievalConnBroken := false + upds, err := client.ClientDataTransferUpdates(ctx) + if err != nil { + return err + } + + go func() { + for upd := range upds { + dir := "push" + if !upd.IsSender { + dir = "pull" + } + + t.RecordMessage("%s data transfer status: %s, transferred: %d", dir, datatransfer.Statuses[upd.Status], upd.Transferred) + + // Bounce the connection after the first block is sent for the storage deal + if upd.IsSender && upd.Transferred > 0 && !storageConnBroken { + storageConnBroken = true + bounceConnection(ctx, t, client, minerPeerID) + } + + // Bounce the connection after the first block is received for the retrieval deal + if !upd.IsSender && upd.Transferred > 0 && !retrievalConnBroken { + retrievalConnBroken = true + bounceConnection(ctx, t, client, minerPeerID) + } + } + }() + + return nil +} + +func bounceConnection(ctx context.Context, t *testkit.TestEnvironment, client api.FullNode, minerPeerID peer.ID) { + t.RecordMessage("disconnecting peer %s", minerPeerID) + client.NetBlockAdd(ctx, api.NetBlockList{ + Peers: []peer.ID{minerPeerID}, + }) + + go func() { + time.Sleep(3 * time.Second) + t.RecordMessage("reconnecting to peer %s", minerPeerID) + client.NetBlockRemove(ctx, api.NetBlockList{ + Peers: []peer.ID{minerPeerID}, + }) + }() +} + +// filToAttoFil converts a fractional filecoin value into AttoFIL, rounding if necessary +func filToAttoFil(f float64) big.Int { + a := mbig.NewFloat(f) + a.Mul(a, mbig.NewFloat(float64(build.FilecoinPrecision))) + i, _ := a.Int(nil) + return big.Int{Int: i} +} + +func initPaymentChannel(t *testkit.TestEnvironment, ctx context.Context, cl *testkit.LotusClient, minerAddr testkit.MinerAddressesMsg) error { + recv := minerAddr + balance := filToAttoFil(10) + t.RecordMessage("my balance: %d", balance) + t.RecordMessage("creating payment channel; from=%s, to=%s, funds=%d", cl.Wallet.Address, recv.WalletAddr, balance) + + channel, err := cl.FullApi.PaychGet(ctx, cl.Wallet.Address, recv.WalletAddr, balance) + if err != nil { + return fmt.Errorf("failed to create payment channel: %w", err) + } + + if addr := channel.Channel; addr != address.Undef { + return fmt.Errorf("expected an Undef channel address, got: %s", addr) + } + + t.RecordMessage("payment channel created; msg_cid=%s", channel.WaitSentinel) + t.RecordMessage("waiting for payment channel message to appear on chain") + + // wait for the channel creation message to appear on chain. + _, err = cl.FullApi.StateWaitMsg(ctx, channel.WaitSentinel, 2, api.LookbackNoLimit, true) + if err != nil { + return fmt.Errorf("failed while waiting for payment channel creation msg to appear on chain: %w", err) + } + + // need to wait so that the channel is tracked. + // the full API waits for build.MessageConfidence (=1 in tests) before tracking the channel. + // we wait for 2 confirmations, so we have the assurance the channel is tracked. + + t.RecordMessage("reloading paych; now it should have an address") + channel, err = cl.FullApi.PaychGet(ctx, cl.Wallet.Address, recv.WalletAddr, big.Zero()) + if err != nil { + return fmt.Errorf("failed to reload payment channel: %w", err) + } + + t.RecordMessage("channel address: %s", channel.Channel) + + return nil +} diff --git a/testplans/lotus-soup/deals_stress.go b/testplans/lotus-soup/deals_stress.go new file mode 100644 index 00000000000..68347ce33b6 --- /dev/null +++ b/testplans/lotus-soup/deals_stress.go @@ -0,0 +1,147 @@ +package main + +import ( + "context" + "fmt" + "io/ioutil" + "math/rand" + "os" + "sync" + "time" + + "github.com/filecoin-project/lotus/api" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/testplans/lotus-soup/testkit" +) + +func dealsStress(t *testkit.TestEnvironment) error { + // Dispatch/forward non-client roles to defaults. + if t.Role != "client" { + return testkit.HandleDefaultRole(t) + } + + t.RecordMessage("running client") + + cl, err := testkit.PrepareClient(t) + if err != nil { + return err + } + + ctx := context.Background() + client := cl.FullApi + + // select a random miner + minerAddr := cl.MinerAddrs[rand.Intn(len(cl.MinerAddrs))] + if err := client.NetConnect(ctx, minerAddr.MinerNetAddrs); err != nil { + return err + } + + t.RecordMessage("selected %s as the miner", minerAddr.MinerActorAddr) + + time.Sleep(12 * time.Second) + + // prepare a number of concurrent data points + deals := t.IntParam("deals") + data := make([][]byte, 0, deals) + files := make([]*os.File, 0, deals) + cids := make([]cid.Cid, 0, deals) + rng := rand.NewSource(time.Now().UnixNano()) + + for i := 0; i < deals; i++ { + dealData := make([]byte, 1600) + rand.New(rng).Read(dealData) + + dealFile, err := ioutil.TempFile("/tmp", "data") + if err != nil { + return err + } + defer os.Remove(dealFile.Name()) + + _, err = dealFile.Write(dealData) + if err != nil { + return err + } + + dealCid, err := client.ClientImport(ctx, api.FileRef{Path: dealFile.Name(), IsCAR: false}) + if err != nil { + return err + } + + t.RecordMessage("deal %d file cid: %s", i, dealCid) + + data = append(data, dealData) + files = append(files, dealFile) + cids = append(cids, dealCid.Root) + } + + concurrentDeals := true + if t.StringParam("deal_mode") == "serial" { + concurrentDeals = false + } + + // this to avoid failure to get block + time.Sleep(2 * time.Second) + + t.RecordMessage("starting storage deals") + if concurrentDeals { + + var wg1 sync.WaitGroup + for i := 0; i < deals; i++ { + wg1.Add(1) + go func(i int) { + defer wg1.Done() + t1 := time.Now() + deal := testkit.StartDeal(ctx, minerAddr.MinerActorAddr, client, cids[i], false) + t.RecordMessage("started storage deal %d -> %s", i, deal) + time.Sleep(2 * time.Second) + t.RecordMessage("waiting for deal %d to be sealed", i) + testkit.WaitDealSealed(t, ctx, client, deal) + t.D().ResettingHistogram(fmt.Sprintf("deal.sealed,miner=%s", minerAddr.MinerActorAddr)).Update(int64(time.Since(t1))) + }(i) + } + t.RecordMessage("waiting for all deals to be sealed") + wg1.Wait() + t.RecordMessage("all deals sealed; starting retrieval") + + var wg2 sync.WaitGroup + for i := 0; i < deals; i++ { + wg2.Add(1) + go func(i int) { + defer wg2.Done() + t.RecordMessage("retrieving data for deal %d", i) + t1 := time.Now() + _ = testkit.RetrieveData(t, ctx, client, cids[i], nil, true, data[i]) + + t.RecordMessage("retrieved data for deal %d", i) + t.D().ResettingHistogram("deal.retrieved").Update(int64(time.Since(t1))) + }(i) + } + t.RecordMessage("waiting for all retrieval deals to complete") + wg2.Wait() + t.RecordMessage("all retrieval deals successful") + + } else { + + for i := 0; i < deals; i++ { + deal := testkit.StartDeal(ctx, minerAddr.MinerActorAddr, client, cids[i], false) + t.RecordMessage("started storage deal %d -> %s", i, deal) + time.Sleep(2 * time.Second) + t.RecordMessage("waiting for deal %d to be sealed", i) + testkit.WaitDealSealed(t, ctx, client, deal) + } + + for i := 0; i < deals; i++ { + t.RecordMessage("retrieving data for deal %d", i) + _ = testkit.RetrieveData(t, ctx, client, cids[i], nil, true, data[i]) + t.RecordMessage("retrieved data for deal %d", i) + } + } + + t.SyncClient.MustSignalEntry(ctx, testkit.StateStopMining) + t.SyncClient.MustSignalAndWait(ctx, testkit.StateDone, t.TestInstanceCount) + + time.Sleep(15 * time.Second) // wait for metrics to be emitted + + return nil +} diff --git a/testplans/lotus-soup/env-ci.toml b/testplans/lotus-soup/env-ci.toml new file mode 100644 index 00000000000..bd651c465ea --- /dev/null +++ b/testplans/lotus-soup/env-ci.toml @@ -0,0 +1 @@ +[client] diff --git a/testplans/lotus-soup/go.mod b/testplans/lotus-soup/go.mod new file mode 100644 index 00000000000..55da298db37 --- /dev/null +++ b/testplans/lotus-soup/go.mod @@ -0,0 +1,45 @@ +module github.com/filecoin-project/lotus/testplans/lotus-soup + +go 1.16 + +require ( + contrib.go.opencensus.io/exporter/prometheus v0.1.0 + github.com/codeskyblue/go-sh v0.0.0-20200712050446-30169cf553fe + github.com/davecgh/go-spew v1.1.1 + github.com/drand/drand v1.2.1 + github.com/filecoin-project/go-address v0.0.5 + github.com/filecoin-project/go-data-transfer v1.6.0 + github.com/filecoin-project/go-fil-markets v1.5.0 + github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec + github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48 + github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b + github.com/filecoin-project/lotus v1.10.1-0.20210707122128-1fe08f5973f4 + github.com/filecoin-project/specs-actors v0.9.14 + github.com/google/uuid v1.1.2 + github.com/gorilla/mux v1.7.4 + github.com/hashicorp/go-multierror v1.1.0 + github.com/influxdata/influxdb v1.8.3 // indirect + github.com/ipfs/go-cid v0.0.7 + github.com/ipfs/go-datastore v0.4.5 + github.com/ipfs/go-graphsync v0.6.2-0.20210428121800-88edb5462e17 // indirect + github.com/ipfs/go-ipfs-files v0.0.8 + github.com/ipfs/go-ipld-format v0.2.0 + github.com/ipfs/go-log/v2 v2.1.3 + github.com/ipfs/go-merkledag v0.3.2 + github.com/ipfs/go-unixfs v0.2.4 + github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d + github.com/kpacha/opencensus-influxdb v0.0.0-20181102202715-663e2683a27c + github.com/libp2p/go-libp2p v0.14.2 + github.com/libp2p/go-libp2p-core v0.8.5 + github.com/libp2p/go-libp2p-pubsub-tracer v0.0.0-20200626141350-e730b32bf1e6 + github.com/multiformats/go-multiaddr v0.3.1 + github.com/multiformats/go-multiaddr-net v0.2.0 + github.com/testground/sdk-go v0.2.6 + go.opencensus.io v0.23.0 + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c +) + +// This will work in all build modes: docker:go, exec:go, and local go build. +// On docker:go and exec:go, it maps to /extra/filecoin-ffi, as it's picked up +// as an "extra source" in the manifest. +replace github.com/filecoin-project/filecoin-ffi => ../../extern/filecoin-ffi diff --git a/testplans/lotus-soup/go.sum b/testplans/lotus-soup/go.sum new file mode 100644 index 00000000000..9969c51824d --- /dev/null +++ b/testplans/lotus-soup/go.sum @@ -0,0 +1,2173 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= +contrib.go.opencensus.io/exporter/jaeger v0.1.0/go.mod h1:VYianECmuFPwU37O699Vc1GOcy+y8kOsfaxHRImmjbA= +contrib.go.opencensus.io/exporter/prometheus v0.1.0 h1:SByaIoWwNgMdPSgl5sMqM2KDE5H/ukPWBRo314xiDvg= +contrib.go.opencensus.io/exporter/prometheus v0.1.0/go.mod h1:cGFniUXGZlKRjzOyuZJ6mgB+PgBcCIa79kEKR8YCW+A= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9 h1:HD8gA2tkByhMAwYaFAX9w2l7vxvBQ5NMoxDrkhqhtn4= +github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= +github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/GeertJohan/go.incremental v1.0.0 h1:7AH+pY1XUgQE4Y1HcXYaMqAI0m9yrFqo/jt0CW30vsg= +github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0= +github.com/GeertJohan/go.rice v1.0.0 h1:KkI6O9uMaQU3VEKaj01ulavtF7o1fWT7+pk/4voiMLQ= +github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0= +github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee h1:8doiS7ib3zi6/K172oDhSKU0dJ/miJramo9NITOMyZQ= +github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee/go.mod h1:W0GbEAA4uFNYOGG2cJpmFJ04E6SD1NLELPYZB57/7AY= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= +github.com/Kubuxu/imtui v0.0.0-20210401140320-41663d68d0fa h1:1PPxEyGdIGVkX/kqMvLJ95a1dGS1Sz7tpNEgehEYYt0= +github.com/Kubuxu/imtui v0.0.0-20210401140320-41663d68d0fa/go.mod h1:WUmMvh9wMtqj1Xhf1hf3kp9RvL+y6odtdYxpyZjb90U= +github.com/Masterminds/glide v0.13.2/go.mod h1:STyF5vcenH/rUqTEv+/hBXlSTo7KYwg2oc2f4tzPWic= +github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/vcs v1.13.0/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= +github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/Stebalien/go-bitfield v0.0.0-20180330043415-076a62f9ce6e/go.mod h1:3oM7gXIttpYDAJXpVNnSCiUMYBLIZ6cb1t+Ip982MRo= +github.com/Stebalien/go-bitfield v0.0.1 h1:X3kbSSPUaJK60wV2hjOPZwmpljr6VGCqdq4cBLhbQBo= +github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI+XWrX9Wf2XH0s= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/akavel/rsrc v0.8.0 h1:zjWn7ukO9Kc5Q62DOJCcxGpXC18RawVtYAGdz2aLlfw= +github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= +github.com/alecthomas/jsonschema v0.0.0-20200530073317-71f438968921/go.mod h1:/n6+1/DWPltRLWL/VKyUxg6tzsl5kHUCcraimt4vr60= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/avast/retry-go v2.6.0+incompatible h1:FelcMrm7Bxacr1/RM8+/eqkDkmVN7tjlsy51dOzB3LI= +github.com/avast/retry-go v2.6.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.32.11/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= +github.com/benbjohnson/clock v1.0.1/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/benbjohnson/clock v1.0.2/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg= +github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bep/debounce v1.2.0 h1:wXds8Kq8qRfwAOpAxHrJDbCXgC5aHSzgQb/0gKsHQqo= +github.com/bep/debounce v1.2.0/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/briandowns/spinner v1.11.1/go.mod h1:QOuQk7x+EaDASo80FEXwlwiA+j/PPIcX3FScO+3/ZPQ= +github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= +github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btcd v0.21.0-beta h1:At9hIZdJW0s9E/fAz28nrz6AmcNlSVucCH796ZteX1M= +github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/buger/goterm v0.0.0-20200322175922-2f3e71b85129 h1:gfAMKE626QEuKG3si0pdTRcr/YEbBoxY+3GOH3gWvl4= +github.com/buger/goterm v0.0.0-20200322175922-2f3e71b85129/go.mod h1:u9UyCz2eTrSGy6fbupqJ54eY5c4IC8gREQ1053dK12U= +github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE= +github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= +github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= +github.com/cockroachdb/pebble v0.0.0-20200916222308-4e219a90ba5b/go.mod h1:hU7vhtrqonEphNF+xt8/lHdaBprxmV1h8BOGrd9XwmQ= +github.com/cockroachdb/pebble v0.0.0-20201001221639-879f3bfeef07/go.mod h1:hU7vhtrqonEphNF+xt8/lHdaBprxmV1h8BOGrd9XwmQ= +github.com/cockroachdb/redact v0.0.0-20200622112456-cd282804bbd3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= +github.com/codeskyblue/go-sh v0.0.0-20200712050446-30169cf553fe h1:69JI97HlzP+PH5Mi1thcGlDoBr6PS2Oe+l3mNmAkbs4= +github.com/codeskyblue/go-sh v0.0.0-20200712050446-30169cf553fe/go.mod h1:VQx0hjo2oUeQkQUET7wRwradO6f+fN5jzXgB/zROxxE= +github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327 h1:7grrpcfCtbZLsjtB0DgMuzs1umsJmpzaHMZ6cO6iAWw= +github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.2.1-0.20180108230905-e214231b295a/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= +github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.1.0 h1:kq/SbG2BCKLkDKkjQf5OWwKWUKj1lgs3lFI4PxnR5lg= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/corpix/uarand v0.1.1/go.mod h1:SFKZvkcRoLqVRFZ4u25xPmp6m9ktANfbpXZ7SJ0/FNU= +github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg= +github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= +github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= +github.com/daaku/go.zipexe v1.0.0 h1:VSOgZtH418pH9L16hC/JrgSNJbbAL26pj7lmD1+CGdY= +github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E= +github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= +github.com/davidlazar/go-crypto v0.0.0-20190912175916-7055855a373f/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= +github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= +github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e h1:lj77EKYUpYXTd8CD/+QMIf8b6OIOTsfEBSXiAzuEHTU= +github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e/go.mod h1:3ZQK6DMPSz/QZ73jlWxBtUhNA8xZx7LzUFSq/OfP8vk= +github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= +github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgraph-io/badger v1.6.1 h1:w9pSFNSdq/JPM1N12Fz/F/bzo993Is1W+Q7HjPzi7yg= +github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= +github.com/dgraph-io/badger/v2 v2.0.3/go.mod h1:3KY8+bsP8wI0OEnQJAKpd4wIJW/Mm32yw2j/9FUVnIM= +github.com/dgraph-io/badger/v2 v2.2007.2 h1:EjjK0KqwaFMlPin1ajhP943VPENHJdEz1KLIegjaI3k= +github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= +github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de h1:t0UHb5vdojIDUqktM6+xJAfScFBsVpXZmqC9dsgJmeA= +github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= +github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/drand/bls12-381 v0.3.2/go.mod h1:dtcLgPtYT38L3NO6mPDYH0nbpc5tjPassDqiniuAt4Y= +github.com/drand/drand v1.2.1 h1:KB7z+69YbnQ5z22AH/LMi0ObDR8DzYmrkS6vZXTR9jI= +github.com/drand/drand v1.2.1/go.mod h1:j0P7RGmVaY7E/OuO2yQOcQj7OgeZCuhgu2gdv0JAm+g= +github.com/drand/kyber v1.0.1-0.20200110225416-8de27ed8c0e2/go.mod h1:UpXoA0Upd1N9l4TvRPHr1qAUBBERj6JQ/mnKI3BPEmw= +github.com/drand/kyber v1.0.2/go.mod h1:x6KOpK7avKj0GJ4emhXFP5n7M7W7ChAPmnQh/OL6vRw= +github.com/drand/kyber v1.1.4 h1:YvKM03QWGvLrdTnYmxxP5iURAX+Gdb6qRDUOgg8i60Q= +github.com/drand/kyber v1.1.4/go.mod h1:9+IgTq7kadePhZg7eRwSD7+bA+bmvqRK+8DtmoV5a3U= +github.com/drand/kyber-bls12381 v0.2.0/go.mod h1:zQip/bHdeEB6HFZSU3v+d3cQE0GaBVQw9aR2E7AdoeI= +github.com/drand/kyber-bls12381 v0.2.1 h1:/d5/YAdaCmHpYjF1NZevOEcKGaq6LBbyvkCTIdGqDjs= +github.com/drand/kyber-bls12381 v0.2.1/go.mod h1:JwWn4nHO9Mp4F5qCie5sVIPQZ0X6cw8XAeMRvc/GXBE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/elastic/go-sysinfo v1.3.0 h1:eb2XFGTMlSwG/yyU9Y8jVAYLIzU2sFzWXwo2gmetyrE= +github.com/elastic/go-sysinfo v1.3.0/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= +github.com/elastic/go-windows v1.0.0 h1:qLURgZFkkrYyTTkvYpsZIgf83AUsdIHfvlJaqaZ7aSY= +github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= +github.com/elastic/gosigar v0.12.0 h1:AsdhYCJlTudhfOYQyFNgx+fIVTfrDO0V1ST0vHgiapU= +github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/ema/qdisc v0.0.0-20190904071900-b82c76788043/go.mod h1:ix4kG2zvdUd8kEKSW0ZTr1XLks0epFpI4j745DXxlNE= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/etclabscore/go-jsonschema-walk v0.0.6/go.mod h1:VdfDY72AFAiUhy0ZXEaWSpveGjMT5JcDIm903NGqFwQ= +github.com/etclabscore/go-openrpc-reflect v0.0.36/go.mod h1:0404Ky3igAasAOpyj1eESjstTyneBAIk5PgJFbK4s5E= +github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 h1:BBso6MBKW8ncyZLv37o+KNyy0HrrHgfnOaGQC2qvN+A= +github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5/go.mod h1:JpoxHjuQauoxiFMl1ie8Xc/7TfLuMZ5eOCONd1sUBHg= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGjnw8= +github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fd/go-nat v1.0.0/go.mod h1:BTBu/CKvMmOMUPkKVef1pngt2WFH/lg7E6yQnulfp6E= +github.com/filecoin-project/go-address v0.0.3/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= +github.com/filecoin-project/go-address v0.0.5 h1:SSaFT/5aLfPXycUlFyemoHYhRgdyXClXCyDdNJKPlDM= +github.com/filecoin-project/go-address v0.0.5/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= +github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs= +github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349 h1:pIuR0dnMD0i+as8wNnjjHyQrnhP5O5bmba/lmgQeRgU= +github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349/go.mod h1:vgmwKBkx+ca5OIeEvstiQgzAZnb7R6QaqE1oEDSqa6g= +github.com/filecoin-project/go-amt-ipld/v3 v3.0.0/go.mod h1:Qa95YNAbtoVCTSVtX38aAC1ptBnJfPma1R/zZsKmx4o= +github.com/filecoin-project/go-amt-ipld/v3 v3.1.0 h1:ZNJ9tEG5bE72vBWYiuh5bkxJVM3ViHNOmQ7qew9n6RE= +github.com/filecoin-project/go-amt-ipld/v3 v3.1.0/go.mod h1:UjM2QhDFrrjD5s1CdnkJkat4ga+LqZBZgTMniypABRo= +github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= +github.com/filecoin-project/go-bitfield v0.2.3/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= +github.com/filecoin-project/go-bitfield v0.2.4 h1:uZ7MeE+XfM5lqrHJZ93OnhQKc/rveW8p9au0C68JPgk= +github.com/filecoin-project/go-bitfield v0.2.4/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= +github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 h1:av5fw6wmm58FYMgJeoB/lK9XXrgdugYiTqkdxjTy9k8= +github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= +github.com/filecoin-project/go-commp-utils v0.0.0-20201119054358-b88f7a96a434/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U= +github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7 h1:U9Z+76pHCKBmtdxFV7JFZJj7OVm12I6dEKwtMVbq5p0= +github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U= +github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= +github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= +github.com/filecoin-project/go-data-transfer v1.0.1/go.mod h1:UxvfUAY9v3ub0a21BSK9u3pB2aq30Y0KMsG+w9/ysyo= +github.com/filecoin-project/go-data-transfer v1.6.0 h1:DHIzEc23ydRCCBwtFet3MfgO8gMpZEnw60Y+s71oX6o= +github.com/filecoin-project/go-data-transfer v1.6.0/go.mod h1:E3WW4mCEYwU2y65swPEajSZoFWFmfXt7uwGduoACZQc= +github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl8btmWxyFMEeeWGUxIQ= +github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s= +github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= +github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a h1:hyJ+pUm/4U4RdEZBlg6k8Ma4rDiuvqyGpoICXAxwsTg= +github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= +github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c= +github.com/filecoin-project/go-fil-markets v1.5.0 h1:3KEs01L8XFCEgujZ6ggFjr1XWjpjTQcmSSeo3I99I0k= +github.com/filecoin-project/go-fil-markets v1.5.0/go.mod h1:7be6zzFwaN8kxVeYZf/UUj/JilHC0ogPvWqE1TW8Ptk= +github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= +github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= +github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM= +github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0/go.mod h1:7aWZdaQ1b16BVoQUYR+eEvrDCGJoPLxFpDynFjYfBjI= +github.com/filecoin-project/go-hamt-ipld/v3 v3.0.1/go.mod h1:gXpNmr3oQx8l3o7qkGyDjJjYSRX7hp/FGOStdqrWyDI= +github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 h1:rVVNq0x6RGQIzCo1iiJlGFm9AGIZzeifggxtKMU7zmI= +github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0/go.mod h1:bxmzgT8tmeVQA1/gvBwFmYdT8SOFUwB3ovSUfG1Ux0g= +github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec h1:rGI5I7fdU4viManxmDdbk5deZO7afe6L1Wc04dAmlOM= +github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4= +github.com/filecoin-project/go-multistore v0.0.3 h1:vaRBY4YiA2UZFPK57RNuewypB8u0DzzQwqsL0XarpnI= +github.com/filecoin-project/go-multistore v0.0.3/go.mod h1:kaNqCC4IhU4B1uyr7YWFHd23TL4KM32aChS0jNkyUvQ= +github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20 h1:+/4aUeUoKr6AKfPE3mBhXA5spIV6UcKdTYDPNU2Tdmg= +github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20/go.mod h1:mPn+LRRd5gEKNAtc+r3ScpW2JRU/pj4NBKdADYWHiak= +github.com/filecoin-project/go-paramfetch v0.0.2-0.20210614165157-25a6c7769498 h1:G10ezOvpH1CLXQ19EA9VWNwyL0mg536ujSayjV0yg0k= +github.com/filecoin-project/go-paramfetch v0.0.2-0.20210614165157-25a6c7769498/go.mod h1:1FH85P8U+DUEmWk1Jkw3Bw7FrwTVUNHk/95PSPG+dts= +github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= +github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= +github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48 h1:Jc4OprDp3bRDxbsrXNHPwJabZJM3iDy+ri8/1e0ZnX4= +github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe h1:dF8u+LEWeIcTcfUcCf3WFVlc81Fr2JKg8zPzIbBDKDw= +github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= +github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= +github.com/filecoin-project/go-statestore v0.1.1 h1:ufMFq00VqnT2CAuDpcGnwLnCX1I/c3OROw/kXVNSTZk= +github.com/filecoin-project/go-statestore v0.1.1/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= +github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b h1:fkRZSPrYpk42PV3/lIXiL0LHetxde7vyYYvSsttQtfg= +github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8= +github.com/filecoin-project/lotus v1.10.1-0.20210707122128-1fe08f5973f4 h1:u5/uky+PdeaGuEGsExtVP8UUB8No/e873xjqcb7h3CM= +github.com/filecoin-project/lotus v1.10.1-0.20210707122128-1fe08f5973f4/go.mod h1:8ooe5Rzw80rJL0br81A8NNiwZ4BUVzPRwAnDxUG4E7g= +github.com/filecoin-project/specs-actors v0.9.4/go.mod h1:BStZQzx5x7TmCkLv0Bpa07U6cPKol6fd3w9KjMPZ6Z4= +github.com/filecoin-project/specs-actors v0.9.12/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= +github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= +github.com/filecoin-project/specs-actors v0.9.14 h1:68PVstg2UB3ZsMLF+DKFTAs/YKsqhKWynkr0IqmVRQY= +github.com/filecoin-project/specs-actors v0.9.14/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= +github.com/filecoin-project/specs-actors/v2 v2.0.1/go.mod h1:v2NZVYinNIKA9acEMBm5wWXxqv5+frFEbekBFemYghY= +github.com/filecoin-project/specs-actors/v2 v2.3.2/go.mod h1:UuJQLoTx/HPvvWeqlIFmC/ywlOLHNe8SNQ3OunFbu2Y= +github.com/filecoin-project/specs-actors/v2 v2.3.5-0.20210114162132-5b58b773f4fb/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc= +github.com/filecoin-project/specs-actors/v2 v2.3.5 h1:PbT4tPlSXZ8sRgajhb4D8AOEmiaaZ+jg6tc6BBv8VQc= +github.com/filecoin-project/specs-actors/v2 v2.3.5/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc= +github.com/filecoin-project/specs-actors/v3 v3.1.0/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= +github.com/filecoin-project/specs-actors/v3 v3.1.1 h1:BE8fsns1GnEOxt1DTE5LxBK2FThXtWmCChgcJoHTg0E= +github.com/filecoin-project/specs-actors/v3 v3.1.1/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= +github.com/filecoin-project/specs-actors/v4 v4.0.0/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng= +github.com/filecoin-project/specs-actors/v4 v4.0.1 h1:AiWrtvJZ63MHGe6rn7tPu4nSUY8bA1KDNszqJaD5+Fg= +github.com/filecoin-project/specs-actors/v4 v4.0.1/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng= +github.com/filecoin-project/specs-actors/v5 v5.0.0-20210512015452-4fe3889fff57/go.mod h1:283yBMMUSDB2abcjP/hhrwTkhb9h3sfM6KGrep/ZlBI= +github.com/filecoin-project/specs-actors/v5 v5.0.1 h1:PrYm5AKdMlJ/55eRW5laWcnaX66gyyDYBWvH38kNAMo= +github.com/filecoin-project/specs-actors/v5 v5.0.1/go.mod h1:74euMDIXorusOBs/QL/LNkYsXZdDpLJwojWw6T03pdE= +github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw= +github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g= +github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= +github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= +github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= +github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1 h1:EzDjxMg43q1tA2c0MV3tNbaontnHLplHyFF6M5KiVP0= +github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1/go.mod h1:0eHX/BVySxPc6SE2mZRoppGq7qcEagxdmQnA3dzork8= +github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko= +github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= +github.com/gdamore/tcell/v2 v2.2.0 h1:vSyEgKwraXPSOkvCk7IwOSyX+Pv3V2cV9CikJMXg4U4= +github.com/gdamore/tcell/v2 v2.2.0/go.mod h1:cTTuF84Dlj/RqmaCIV5p4w8uG1zWdk0SF6oBpwHp4fU= +github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= +github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= +github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.4/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/spec v0.19.7/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.11/go.mod h1:vqK/dIdLGCosfvYsQV3WfC7N3TiZSnGY2RZKoFK7X28= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.8/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.11/go.mod h1:Uc0gKkdR+ojzsEpjh39QChyu92vPgIr72POcgHMAgSY= +github.com/go-redis/redis/v7 v7.4.0 h1:7obg6wUoj05T0EpY0o8B59S9w5yeMWql7sw2kwNW1x4= +github.com/go-redis/redis/v7 v7.4.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968 h1:s+PDl6lozQ+dEUtUtQnO7+A2iPG3sK1pI4liU+jxn90= +github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus/v5 v5.0.3 h1:ZqHaoEF7TBzh4jzPmqVhE/5A1z9of6orkAe5uHoAeME= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.4.0 h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/gogo/status v1.0.3/go.mod h1:SavQ51ycCLnc7dGyJxp8YAmudx8xqiVrRf+6IXRsugc= +github.com/gogo/status v1.1.0 h1:+eIkrewn5q6b30y+g/BJINVVdi2xH7je5MPJ3ZPK3JA= +github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0 h1:jlYHihg//f7RRwuPfptm04yp4s7O6Kw8EZiVYIGcH0g= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf h1:gFVkHXmVAhEbxZVDln5V9GKrLaluNoFHDbrZwAWZgws= +github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3 h1:x95R7cp+rSeeqAMI2knLtQ0DKlaBhv2NrtrOvafPHRo= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= +github.com/google/gopacket v1.1.18/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f h1:KMlcu9X58lhTA/KrfX8Bi1LQSO4pzoVjTiL3h4Jk+Zk= +github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0 h1:0IKlLyQ3Hs9nDaiK5cSHAGmcQEIC8l2Ts1u6x5Dfrqg= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.14.6 h1:8ERzHx8aj1Sc47mu9n/AksaKCSWrMchFtkdrS4BIj5o= +github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= +github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= +github.com/gxed/go-shellwords v1.0.3/go.mod h1:N7paucT91ByIjmVJHhvoarjoQnmsi3Jd3vH7VqgtMxQ= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/gxed/pubsub v0.0.0-20180201040156-26ebdf44f824/go.mod h1:OiEWyHgK+CWrmOlVquHaIK1vhpUJydC9m0Je6mhaiNE= +github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026 h1:BpJ2o0OR5FV7vrkDYfXYVJQeMNWa8RhklZOpW2ITAIQ= +github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026/go.mod h1:5Scbynm8dF1XAPwIwkGPqzkM/shndPm79Jd1003hTjE= +github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1 h1:F9k+7wv5OIk1zcq23QpdiL0hfDuXPjuOmMNaC6fgQ0Q= +github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1/go.mod h1:jvfsLIxk0fY/2BKSQ1xf2406AKA5dwMmKKv0ADcOfN8= +github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e h1:3YKHER4nmd7b5qy5t0GWDTwSn4OyRgfAXSmo6VnryBY= +github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e/go.mod h1:I8h3MITA53gN9OnWGCgaMa0JWVRdXthWw4M3CPM54OY= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hodgesds/perf-utils v0.0.8/go.mod h1:F6TfvsbtrF88i++hou29dTXlI2sfsJv+gRZDtmTJkAs= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/huin/goupnp v0.0.0-20180415215157-1395d1447324/go.mod h1:MZ2ZmwcBpvOoJ22IJsc7va19ZwoheaBk43rKg12SKag= +github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo= +github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= +github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= +github.com/iancoleman/orderedmap v0.1.0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428/go.mod h1:uhpZMVGznybq1itEKXj6RYw9I71qK4kH+OGMjRC4KEo= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= +github.com/influxdata/influxdb v1.8.3 h1:WEypI1BQFTT4teLM+1qkEcvUi0dAvopAI/ir0vAiBg8= +github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/influxdata/influxdb1-client v0.0.0-20200515024757-02f0bf5dbca3 h1:k3/6a1Shi7GGCp9QpyYuXsMM6ncTOjCzOE9Fd6CDA+Q= +github.com/influxdata/influxdb1-client v0.0.0-20200515024757-02f0bf5dbca3/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= +github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= +github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= +github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= +github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= +github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= +github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= +github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= +github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= +github.com/ipfs/go-bitswap v0.0.3/go.mod h1:jadAZYsP/tcRMl47ZhFxhaNuDQoXawT8iHMg+iFoQbg= +github.com/ipfs/go-bitswap v0.0.9/go.mod h1:kAPf5qgn2W2DrgAcscZ3HrM9qh4pH+X8Fkk3UPrwvis= +github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= +github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs= +github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM= +github.com/ipfs/go-bitswap v0.3.2 h1:TdKx7lpidYe2dMAKfdeNS26y6Pc/AZX/i8doI1GV210= +github.com/ipfs/go-bitswap v0.3.2/go.mod h1:AyWWfN3moBzQX0banEtfKOfbXb3ZeoOeXnZGNPV9S6w= +github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= +github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= +github.com/ipfs/go-block-format v0.0.3 h1:r8t66QstRp/pd/or4dpnbVfXT5Gt7lOqRvC+/dDTpMc= +github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= +github.com/ipfs/go-blockservice v0.0.3/go.mod h1:/NNihwTi6V2Yr6g8wBI+BSwPuURpBRMtYNGrlxZ8KuI= +github.com/ipfs/go-blockservice v0.0.7/go.mod h1:EOfb9k/Y878ZTRY/CH0x5+ATtaipfbRhbvNSdgc/7So= +github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= +github.com/ipfs/go-blockservice v0.1.3/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= +github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= +github.com/ipfs/go-blockservice v0.1.4 h1:Vq+MlsH8000KbbUciRyYMEw/NNP8UAGmcqKi4uWmFGA= +github.com/ipfs/go-blockservice v0.1.4/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= +github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.4-0.20191112011718-79e75dffeb10/go.mod h1:/BYOuUoxkE+0f6tGzlzMvycuN+5l35VOR4Bpg2sCmds= +github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= +github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-cid v0.0.7 h1:ysQJVJA3fNDF1qigJbsSQOdjhVLsOEoPdh0+R97k3jY= +github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-cidutil v0.0.2 h1:CNOboQf1t7Qp0nuNh8QMmhJs0+Q//bRL1axtCnIB1Yo= +github.com/ipfs/go-cidutil v0.0.2/go.mod h1:ewllrvrxG6AMYStla3GD7Cqn+XYSLqjK0vc+086tB6s= +github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= +github.com/ipfs/go-datastore v0.3.0/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= +github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= +github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.2/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.5 h1:cwOUcGMLdLPWgu3SlrCckCMznaGADbPqE0r8h768/Dg= +github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs= +github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= +github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= +github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= +github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaHzfGTzuE3s= +github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk= +github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= +github.com/ipfs/go-ds-badger v0.2.3 h1:J27YvAcpuA5IvZUbeBxOcQgqnYHUPxoygc6QxxkodZ4= +github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= +github.com/ipfs/go-ds-badger2 v0.1.0/go.mod h1:pbR1p817OZbdId9EvLOhKBgUVTM3BMCSTan78lDDVaw= +github.com/ipfs/go-ds-badger2 v0.1.1-0.20200708190120-187fc06f714e h1:Xi1nil8K2lBOorBS6Ys7+hmUCzH8fr3U9ipdL/IrcEI= +github.com/ipfs/go-ds-badger2 v0.1.1-0.20200708190120-187fc06f714e/go.mod h1:lJnws7amT9Ehqzta0gwMrRsURU04caT0iRPr1W8AsOU= +github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= +github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8= +github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= +github.com/ipfs/go-ds-leveldb v0.4.2 h1:QmQoAJ9WkPMUfBLnu1sBVy0xWWlJPg0m4kRAiJL9iaw= +github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= +github.com/ipfs/go-ds-measure v0.1.0 h1:vE4TyY4aeLeVgnnPBC5QzKIjKrqzha0NCujTfgvVbVQ= +github.com/ipfs/go-ds-measure v0.1.0/go.mod h1:1nDiFrhLlwArTME1Ees2XaBOl49OoCgd2A3f8EchMSY= +github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459/go.mod h1:oh4liWHulKcDKVhCska5NLelE3MatWl+1FwSz3tY91g= +github.com/ipfs/go-filestore v1.0.0 h1:QR7ekKH+q2AGiWDc7W2Q0qHuYSRZGUJqUn0GsegEPb0= +github.com/ipfs/go-filestore v1.0.0/go.mod h1:/XOCuNtIe2f1YPbiXdYvD0BKLA0JR1MgPiFOdcuu9SM= +github.com/ipfs/go-fs-lock v0.0.6 h1:sn3TWwNVQqSeNjlWy6zQ1uUGAZrV3hPOyEA6y1/N2a0= +github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28L7zESmM= +github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= +github.com/ipfs/go-graphsync v0.4.2/go.mod h1:/VmbZTUdUMTbNkgzAiCEucIIAU3BkLE2cZrDCVUhyi0= +github.com/ipfs/go-graphsync v0.4.3/go.mod h1:mPOwDYv128gf8gxPFgXnz4fNrSYPsWyqisJ7ych+XDY= +github.com/ipfs/go-graphsync v0.6.1/go.mod h1:e2ZxnClqBBYAtd901g9vXMJzS47labjAtOzsWtOzKNk= +github.com/ipfs/go-graphsync v0.6.2-0.20210428121800-88edb5462e17 h1:rOoF88dVuDGbIx7idSdimN7JvXriyOIT96WD3eX9sHA= +github.com/ipfs/go-graphsync v0.6.2-0.20210428121800-88edb5462e17/go.mod h1:5WyaeigpNdpiYQuW2vwpuecOoEfB4h747ZGEOKmAGTg= +github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk= +github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= +github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= +github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= +github.com/ipfs/go-ipfs-blockstore v1.0.0/go.mod h1:knLVdhVU9L7CC4T+T4nvGdeUIPAXlnd9zmXfp+9MIjU= +github.com/ipfs/go-ipfs-blockstore v1.0.1/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE= +github.com/ipfs/go-ipfs-blockstore v1.0.3 h1:RDhK6fdg5YsonkpMuMpdvk/pRtOQlrIRIybuQfkvB2M= +github.com/ipfs/go-ipfs-blockstore v1.0.3/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE= +github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= +github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= +github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw= +github.com/ipfs/go-ipfs-chunker v0.0.5 h1:ojCf7HV/m+uS2vhUGWcogIIxiO5ubl5O57Q7NapWLY8= +github.com/ipfs/go-ipfs-chunker v0.0.5/go.mod h1:jhgdF8vxRHycr00k13FM8Y0E+6BoalYeobXmUyTreP8= +github.com/ipfs/go-ipfs-cmds v0.1.0 h1:0CEde9EcxByej8+L6d1PST57J4ambRPyCTjLG5Ymou8= +github.com/ipfs/go-ipfs-cmds v0.1.0/go.mod h1:TiK4e7/V31tuEb8YWDF8lN3qrnDH+BS7ZqWIeYJlAs8= +github.com/ipfs/go-ipfs-config v0.0.11 h1:5/4nas2CQXiKr2/MLxU24GDGTBvtstQIQezuk7ltOQQ= +github.com/ipfs/go-ipfs-config v0.0.11/go.mod h1:wveA8UT5ywN26oKStByzmz1CO6cXwLKKM6Jn/Hfw08I= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= +github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= +github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs= +github.com/ipfs/go-ipfs-ds-help v1.0.0 h1:bEQ8hMGs80h0sR8O4tfDgV6B01aaF9qeTrujrTLYV3g= +github.com/ipfs/go-ipfs-ds-help v1.0.0/go.mod h1:ujAbkeIgkKAWtxxNkoZHWLCyk5JpPoKnGyCcsoF6ueE= +github.com/ipfs/go-ipfs-exchange-interface v0.0.1 h1:LJXIo9W7CAmugqI+uofioIpRb6rY30GUu7G6LUfpMvM= +github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= +github.com/ipfs/go-ipfs-exchange-offline v0.0.1 h1:P56jYKZF7lDDOLx5SotVh5KFxoY6C81I1NSHW1FxGew= +github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= +github.com/ipfs/go-ipfs-files v0.0.2/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= +github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= +github.com/ipfs/go-ipfs-files v0.0.4/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= +github.com/ipfs/go-ipfs-files v0.0.8 h1:8o0oFJkJ8UkO/ABl8T6ac6tKF3+NIpj67aAB6ZpusRg= +github.com/ipfs/go-ipfs-files v0.0.8/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= +github.com/ipfs/go-ipfs-flags v0.0.1/go.mod h1:RnXBb9WV53GSfTrSDVK61NLTFKvWc60n+K9EgCDh+rA= +github.com/ipfs/go-ipfs-http-client v0.0.5 h1:niW5M0qqa0O/VRCAzr3f5Y7i3MjTpf0lhpkisjRtHR8= +github.com/ipfs/go-ipfs-http-client v0.0.5/go.mod h1:8EKP9RGUrUex4Ff86WhnKU7seEBOtjdgXlY9XHYvYMw= +github.com/ipfs/go-ipfs-posinfo v0.0.1 h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6OtpRs= +github.com/ipfs/go-ipfs-posinfo v0.0.1/go.mod h1:SwyeVP+jCwiDu0C313l/8jg6ZxM0qqtlt2a0vILTc1A= +github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= +github.com/ipfs/go-ipfs-pq v0.0.2 h1:e1vOOW6MuOwG2lqxcLA+wEn93i/9laCY8sXAw76jFOY= +github.com/ipfs/go-ipfs-pq v0.0.2/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= +github.com/ipfs/go-ipfs-routing v0.0.1/go.mod h1:k76lf20iKFxQTjcJokbPM9iBXVXVZhcOwc360N4nuKs= +github.com/ipfs/go-ipfs-routing v0.1.0 h1:gAJTT1cEeeLj6/DlLX6t+NxD9fQe2ymTO6qWRDI/HQQ= +github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= +github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= +github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= +github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= +github.com/ipfs/go-ipld-cbor v0.0.1/go.mod h1:RXHr8s4k0NE0TKhnrxqZC9M888QfsBN9rhS5NjfKzY8= +github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= +github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= +github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-cbor v0.0.5-0.20200204214505-252690b78669/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-cbor v0.0.5 h1:ovz4CHKogtG2KB/h1zUp5U0c/IzZrL435rCh5+K/5G8= +github.com/ipfs/go-ipld-cbor v0.0.5/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= +github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= +github.com/ipfs/go-ipld-format v0.2.0 h1:xGlJKkArkmBvowr+GMCX0FEZtkro71K1AwiKnL37mwA= +github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= +github.com/ipfs/go-ipns v0.0.2 h1:oq4ErrV4hNQ2Eim257RTYRgfOSV/s8BDaf9iIl4NwFs= +github.com/ipfs/go-ipns v0.0.2/go.mod h1:WChil4e0/m9cIINWLxZe1Jtf77oz5L05rO2ei/uKJ5U= +github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= +github.com/ipfs/go-log v1.0.0/go.mod h1:JO7RzlMK6rA+CIxFMLOuB6Wf5b81GDiKElL7UPSIKjA= +github.com/ipfs/go-log v1.0.1/go.mod h1:HuWlQttfN6FWNHRhlY5yMk/lW7evQC0HHGOxEwMRR8I= +github.com/ipfs/go-log v1.0.2/go.mod h1:1MNjMxe0u6xvJZgeqbJ8vdo2TKaGwZ1a0Bpza+sr2Sk= +github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= +github.com/ipfs/go-log v1.0.4 h1:6nLQdX4W8P9yZZFH7mO+X/PzjN8Laozm/lMJ6esdgzY= +github.com/ipfs/go-log v1.0.4/go.mod h1:oDCg2FkjogeFOhqqb+N39l2RpTNPL6F/StPkB3kPgcs= +github.com/ipfs/go-log/v2 v2.0.1/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= +github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= +github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= +github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= +github.com/ipfs/go-log/v2 v2.0.8/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= +github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= +github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= +github.com/ipfs/go-log/v2 v2.1.2/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= +github.com/ipfs/go-log/v2 v2.1.3 h1:1iS3IU7aXRlbgUpN8yTTpJ53NXYjAe37vcI5+5nYrzk= +github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= +github.com/ipfs/go-merkledag v0.0.3/go.mod h1:Oc5kIXLHokkE1hWGMBHw+oxehkAaTOqtEb7Zbh6BhLA= +github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto= +github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= +github.com/ipfs/go-merkledag v0.2.4/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= +github.com/ipfs/go-merkledag v0.3.1/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= +github.com/ipfs/go-merkledag v0.3.2 h1:MRqj40QkrWkvPswXs4EfSslhZ4RVPRbxwX11js0t1xY= +github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= +github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= +github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= +github.com/ipfs/go-metrics-prometheus v0.0.2/go.mod h1:ELLU99AQQNi+zX6GCGm2lAgnzdSH3u5UVlCdqSXnEks= +github.com/ipfs/go-path v0.0.3/go.mod h1:zIRQUez3LuQIU25zFjC2hpBTHimWx7VK5bjZgRLbbdo= +github.com/ipfs/go-path v0.0.7 h1:H06hKMquQ0aYtHiHryOMLpQC1qC3QwXwkahcEVD51Ho= +github.com/ipfs/go-path v0.0.7/go.mod h1:6KTKmeRnBXgqrTvzFrPV3CamxcgvXX/4z79tfAd2Sno= +github.com/ipfs/go-peertaskqueue v0.0.4/go.mod h1:03H8fhyeMfKNFWqzYEVyMbcPUeYrqP1MX6Kd+aN+rMQ= +github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= +github.com/ipfs/go-peertaskqueue v0.1.1/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= +github.com/ipfs/go-peertaskqueue v0.2.0 h1:2cSr7exUGKYyDeUyQ7P/nHPs9P7Ht/B+ROrpN1EJOjc= +github.com/ipfs/go-peertaskqueue v0.2.0/go.mod h1:5/eNrBEbtSKWCG+kQK8K8fGNixoYUnr+P7jivavs9lY= +github.com/ipfs/go-todocounter v0.0.1/go.mod h1:l5aErvQc8qKE2r7NDMjmq5UNAvuZy0rC8BHOplkWvZ4= +github.com/ipfs/go-unixfs v0.0.4/go.mod h1:eIo/p9ADu/MFOuyxzwU+Th8D6xoxU//r590vUpWyfz8= +github.com/ipfs/go-unixfs v0.2.1/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k= +github.com/ipfs/go-unixfs v0.2.2-0.20190827150610-868af2e9e5cb/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k= +github.com/ipfs/go-unixfs v0.2.4 h1:6NwppOXefWIyysZ4LR/qUBPvXd5//8J3jiMdvpbw6Lo= +github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw= +github.com/ipfs/go-verifcid v0.0.1 h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E= +github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= +github.com/ipfs/interface-go-ipfs-core v0.2.3 h1:E6uQ+1fJjkxJWlL9lAE72a5FWeyeeNL3GitLy8+jq3Y= +github.com/ipfs/interface-go-ipfs-core v0.2.3/go.mod h1:Tihp8zxGpUeE3Tokr94L6zWZZdkRQvG5TL6i9MuNE+s= +github.com/ipfs/iptb v1.4.0 h1:YFYTrCkLMRwk/35IMyC6+yjoQSHTEcNcefBStLJzgvo= +github.com/ipfs/iptb v1.4.0/go.mod h1:1rzHpCYtNp87/+hTxG5TfCVn/yMY3dKnLn8tBiMfdmg= +github.com/ipfs/iptb-plugins v0.2.1 h1:au4HWn9/pRPbkxA08pDx2oRAs4cnbgQWgV0teYXuuGA= +github.com/ipfs/iptb-plugins v0.2.1/go.mod h1:QXMbtIWZ+jRsW8a4h13qAKU7jcM7qaittO8wOsTP0Rs= +github.com/ipld/go-car v0.1.0/go.mod h1:RCWzaUh2i4mOEkB3W45Vc+9jnS/M6Qay5ooytiBHl3g= +github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4/go.mod h1:xrMEcuSq+D1vEwl+YAXsg/JfA98XGpXDwnkIL4Aimqw= +github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d h1:iphSzTuPqyDgH7WUVZsdqUnQNzYgIblsVr1zhVNA33U= +github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d/go.mod h1:2Gys8L8MJ6zkh1gktTSXreY63t4UbyvNp5JaudTyxHQ= +github.com/ipld/go-ipld-prime v0.0.2-0.20191108012745-28a82f04c785/go.mod h1:bDDSvVz7vaK12FNvMeRYnpRFkSUPNQOiCYQezMD/P3w= +github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e/go.mod h1:uVIwe/u0H4VdKv3kaN1ck7uCb6yD9cFLS9/ELyXbsw8= +github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= +github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018 h1:RbRHv8epkmvBYA5cGfz68GUSbOgx5j/7ObLIl4Rsif0= +github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= +github.com/ipld/go-ipld-prime-proto v0.0.0-20191113031812-e32bd156a1e5/go.mod h1:gcvzoEDBjwycpXt3LBE061wT9f46szXGHAmj9uoP6fU= +github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1/go.mod h1:OAV6xBmuTLsPZ+epzKkPB1e25FHk/vCtyatkdHcArLs= +github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6/go.mod h1:3pHYooM9Ea65jewRwrb2u5uHZCNkNTe9ABsVB+SrkH0= +github.com/ipld/go-ipld-prime-proto v0.1.0 h1:j7gjqrfwbT4+gXpHwEx5iMssma3mnctC7YaCimsFP70= +github.com/ipld/go-ipld-prime-proto v0.1.0/go.mod h1:11zp8f3sHVgIqtb/c9Kr5ZGqpnCLF1IVTNOez9TopzE= +github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c= +github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4= +github.com/jackpal/gateway v1.0.4/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= +github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= +github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= +github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs= +github.com/jbenet/go-cienv v0.1.0 h1:Vc/s0QbQtoxX8MwwSLWWh+xNNZvM3Lw7NsTcHrvvhMc= +github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= +github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c h1:uUx61FiAa1GI6ZmVd2wf2vULeQZIKG66eybjNXKYCz4= +github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c/go.mod h1:sdx1xVM9UuLw1tXnhJWN3piypTUO3vCIHYmG15KE/dU= +github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2/go.mod h1:8GXXJV31xl8whumTzdZsTt3RnUIiPqzkyf7mxToRCMs= +github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= +github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= +github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= +github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.1.1-0.20190114141812-62fb9bc030d1 h1:qBCV/RLV02TSfQa7tFmxTihnG+u+7JXByOkhlkR5rmQ= +github.com/jonboulle/clockwork v0.1.1-0.20190114141812-62fb9bc030d1/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= +github.com/jsimonetti/rtnetlink v0.0.0-20190830100107-3784a6c7c552/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= +github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= +github.com/kabukky/httpscerts v0.0.0-20150320125433-617593d7dcb3 h1:Iy7Ifq2ysilWU4QlCx/97OoI4xT1IV7i8byT/EyIT/M= +github.com/kabukky/httpscerts v0.0.0-20150320125433-617593d7dcb3/go.mod h1:BYpt4ufZiIGv2nXn4gMxnfKV306n3mWXgNu/d2TqdTU= +github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= +github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= +github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/kilic/bls12-381 v0.0.0-20200607163746-32e1441c8a9f/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s= +github.com/kilic/bls12-381 v0.0.0-20200731194930-64c428e1bff5/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s= +github.com/kilic/bls12-381 v0.0.0-20200820230200-6b2c19996391 h1:51kHw7l/dUDdOdW06AlUGT5jnpj6nqQSILebcsikSjA= +github.com/kilic/bls12-381 v0.0.0-20200820230200-6b2c19996391/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= +github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= +github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d h1:68u9r4wEvL3gYg2jvAOgROwZ3H+Y3hIDk4tbbmIjcYQ= +github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= +github.com/kpacha/opencensus-influxdb v0.0.0-20181102202715-663e2683a27c h1:3pM6OrLfkfe0rKZjE6MHdcTaI0ohcHbRUZJeJqkvPb4= +github.com/kpacha/opencensus-influxdb v0.0.0-20181102202715-663e2683a27c/go.mod h1:ESXZSm2iaF+1P5o6VFEWpeARTQpcil4e1DwumnTopdg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= +github.com/libp2p/go-addr-util v0.0.2 h1:7cWK5cdA5x72jX0g8iLrQWm5TRJZ6CzGdPEhWj7plWU= +github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= +github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= +github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= +github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= +github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= +github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= +github.com/libp2p/go-conn-security v0.0.1/go.mod h1:bGmu51N0KU9IEjX7kl2PQjgZa40JQWnayTvNMgD/vyk= +github.com/libp2p/go-conn-security-multistream v0.0.1/go.mod h1:nc9vud7inQ+d6SO0I/6dSWrdMnHnzZNHeyUQqrAJulE= +github.com/libp2p/go-conn-security-multistream v0.0.2/go.mod h1:nc9vud7inQ+d6SO0I/6dSWrdMnHnzZNHeyUQqrAJulE= +github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= +github.com/libp2p/go-conn-security-multistream v0.2.0/go.mod h1:hZN4MjlNetKD3Rq5Jb/P5ohUnFLNzEAR4DLSzpn2QLU= +github.com/libp2p/go-conn-security-multistream v0.2.1 h1:ft6/POSK7F+vl/2qzegnHDaXFU0iWB4yVTYrioC6Zy0= +github.com/libp2p/go-conn-security-multistream v0.2.1/go.mod h1:cR1d8gA0Hr59Fj6NhaTpFhJZrjSYuNmhpT2r25zYR70= +github.com/libp2p/go-eventbus v0.0.2/go.mod h1:Hr/yGlwxA/stuLnpMiu82lpNKpvRy3EaJxPu40XYOwk= +github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= +github.com/libp2p/go-eventbus v0.2.1 h1:VanAdErQnpTioN2TowqNcOijf6YwhuODe4pPKSDpxGc= +github.com/libp2p/go-eventbus v0.2.1/go.mod h1:jc2S4SoEVPP48H9Wpzm5aiGwUCBMfGhVhhBjyhhCJs8= +github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= +github.com/libp2p/go-flow-metrics v0.0.2/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= +github.com/libp2p/go-flow-metrics v0.0.3 h1:8tAs/hSdNvUiLgtlSy3mxwxWP4I9y/jlkPFT7epKdeM= +github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= +github.com/libp2p/go-libp2p v0.0.2/go.mod h1:Qu8bWqFXiocPloabFGUcVG4kk94fLvfC8mWTDdFC9wE= +github.com/libp2p/go-libp2p v0.0.30/go.mod h1:XWT8FGHlhptAv1+3V/+J5mEpzyui/5bvFsNuWYs611A= +github.com/libp2p/go-libp2p v0.1.0/go.mod h1:6D/2OBauqLUoqcADOJpn9WbKqvaM07tDw68qHM0BxUM= +github.com/libp2p/go-libp2p v0.1.1/go.mod h1:I00BRo1UuUSdpuc8Q2mN7yDF/oTUTRAX6JWpTiK9Rp8= +github.com/libp2p/go-libp2p v0.3.1/go.mod h1:e6bwxbdYH1HqWTz8faTChKGR0BjPc8p+6SyP8GTTR7Y= +github.com/libp2p/go-libp2p v0.4.0/go.mod h1:9EsEIf9p2UDuwtPd0DwJsAl0qXVxgAnuDGRvHbfATfI= +github.com/libp2p/go-libp2p v0.6.0/go.mod h1:mfKWI7Soz3ABX+XEBR61lGbg+ewyMtJHVt043oWeqwg= +github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZkfEI5sT54= +github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k= +github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= +github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= +github.com/libp2p/go-libp2p v0.8.3/go.mod h1:EsH1A+8yoWK+L4iKcbPYu6MPluZ+CHWI9El8cTaefiM= +github.com/libp2p/go-libp2p v0.9.2/go.mod h1:cunHNLDVus66Ct9iXXcjKRLdmHdFdHVe1TAnbubJQqQ= +github.com/libp2p/go-libp2p v0.10.0/go.mod h1:yBJNpb+mGJdgrwbKAKrhPU0u3ogyNFTfjJ6bdM+Q/G8= +github.com/libp2p/go-libp2p v0.12.0/go.mod h1:FpHZrfC1q7nA8jitvdjKBDF31hguaC676g/nT9PgQM0= +github.com/libp2p/go-libp2p v0.14.2 h1:qs0ABtjjNjS+RIXT1uM7sMJEvIc0pq2nKR0VQxFXhHI= +github.com/libp2p/go-libp2p v0.14.2/go.mod h1:0PQMADQEjCM2l8cSMYDpTgsb8gr6Zq7i4LUgq1mlW2E= +github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052 h1:BM7aaOF7RpmNn9+9g6uTjGJ0cTzWr5j9i9IKeun2M8U= +github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo= +github.com/libp2p/go-libp2p-autonat v0.0.2/go.mod h1:fs71q5Xk+pdnKU014o2iq1RhMs9/PMaG5zXRFNnIIT4= +github.com/libp2p/go-libp2p-autonat v0.0.6/go.mod h1:uZneLdOkZHro35xIhpbtTzLlgYturpu4J5+0cZK3MqE= +github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= +github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= +github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI= +github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= +github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A= +github.com/libp2p/go-libp2p-autonat v0.2.3/go.mod h1:2U6bNWCNsAG9LEbwccBDQbjzQ8Krdjge1jLTE9rdoMM= +github.com/libp2p/go-libp2p-autonat v0.4.0/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= +github.com/libp2p/go-libp2p-autonat v0.4.2 h1:YMp7StMi2dof+baaxkbxaizXjY1RPvU71CXfxExzcUU= +github.com/libp2p/go-libp2p-autonat v0.4.2/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= +github.com/libp2p/go-libp2p-autonat-svc v0.1.0/go.mod h1:fqi8Obl/z3R4PFVLm8xFtZ6PBL9MlV/xumymRFkKq5A= +github.com/libp2p/go-libp2p-blankhost v0.0.1/go.mod h1:Ibpbw/7cPPYwFb7PACIWdvxxv0t0XCCI10t7czjAjTc= +github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= +github.com/libp2p/go-libp2p-blankhost v0.1.3/go.mod h1:KML1//wiKR8vuuJO0y3LUd1uLv+tlkGTAr3jC0S5cLg= +github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= +github.com/libp2p/go-libp2p-blankhost v0.1.6/go.mod h1:jONCAJqEP+Z8T6EQviGL4JsQcLx1LgTGtVqFNY8EMfQ= +github.com/libp2p/go-libp2p-blankhost v0.2.0 h1:3EsGAi0CBGcZ33GwRuXEYJLLPoVWyXJ1bcJzAJjINkk= +github.com/libp2p/go-libp2p-blankhost v0.2.0/go.mod h1:eduNKXGTioTuQAUcZ5epXi9vMl+t4d8ugUBRQ4SqaNQ= +github.com/libp2p/go-libp2p-circuit v0.0.1/go.mod h1:Dqm0s/BiV63j8EEAs8hr1H5HudqvCAeXxDyic59lCwE= +github.com/libp2p/go-libp2p-circuit v0.0.9/go.mod h1:uU+IBvEQzCu953/ps7bYzC/D/R0Ho2A9LfKVVCatlqU= +github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= +github.com/libp2p/go-libp2p-circuit v0.1.1/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= +github.com/libp2p/go-libp2p-circuit v0.1.3/go.mod h1:Xqh2TjSy8DD5iV2cCOMzdynd6h8OTBGoV1AWbWor3qM= +github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU= +github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo= +github.com/libp2p/go-libp2p-circuit v0.2.2/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCyJp1Eo4A1xYdpjfs4= +github.com/libp2p/go-libp2p-circuit v0.2.3/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCyJp1Eo4A1xYdpjfs4= +github.com/libp2p/go-libp2p-circuit v0.4.0 h1:eqQ3sEYkGTtybWgr6JLqJY6QLtPWRErvFjFDfAOO1wc= +github.com/libp2p/go-libp2p-circuit v0.4.0/go.mod h1:t/ktoFIUzM6uLQ+o1G6NuBl2ANhBKN9Bc8jRIk31MoA= +github.com/libp2p/go-libp2p-connmgr v0.1.1/go.mod h1:wZxh8veAmU5qdrfJ0ZBLcU8oJe9L82ciVP/fl1VHjXk= +github.com/libp2p/go-libp2p-connmgr v0.2.3/go.mod h1:Gqjg29zI8CwXX21zRxy6gOg8VYu3zVerJRt2KyktzH4= +github.com/libp2p/go-libp2p-connmgr v0.2.4 h1:TMS0vc0TCBomtQJyWr7fYxcVYYhx+q/2gF++G5Jkl/w= +github.com/libp2p/go-libp2p-connmgr v0.2.4/go.mod h1:YV0b/RIm8NGPnnNWM7hG9Q38OeQiQfKhHCCs1++ufn0= +github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= +github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= +github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE= +github.com/libp2p/go-libp2p-core v0.0.4/go.mod h1:jyuCQP356gzfCFtRKyvAbNkyeuxb7OlyhWZ3nls5d2I= +github.com/libp2p/go-libp2p-core v0.0.6/go.mod h1:0d9xmaYAVY5qmbp/fcgxHT3ZJsLjYeYPMJAUKpaCHrE= +github.com/libp2p/go-libp2p-core v0.2.0/go.mod h1:X0eyB0Gy93v0DZtSYbEM7RnMChm9Uv3j7yRXjO77xSI= +github.com/libp2p/go-libp2p-core v0.2.2/go.mod h1:8fcwTbsG2B+lTgRJ1ICZtiM5GWCWZVoVrLaDRvIRng0= +github.com/libp2p/go-libp2p-core v0.2.3/go.mod h1:GqhyQqyIAPsxFYXHMjfXgMv03lxsvM0mFzuYA9Ib42A= +github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= +github.com/libp2p/go-libp2p-core v0.2.5/go.mod h1:6+5zJmKhsf7yHn1RbmYDu08qDUpIUxGdqHuEZckmZOA= +github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= +github.com/libp2p/go-libp2p-core v0.3.1/go.mod h1:thvWy0hvaSBhnVBaW37BvzgVV68OUhgJJLAa6almrII= +github.com/libp2p/go-libp2p-core v0.4.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= +github.com/libp2p/go-libp2p-core v0.5.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= +github.com/libp2p/go-libp2p-core v0.5.1/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= +github.com/libp2p/go-libp2p-core v0.5.2/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= +github.com/libp2p/go-libp2p-core v0.5.3/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= +github.com/libp2p/go-libp2p-core v0.5.4/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= +github.com/libp2p/go-libp2p-core v0.5.5/go.mod h1:vj3awlOr9+GMZJFH9s4mpt9RHHgGqeHCopzbYKZdRjM= +github.com/libp2p/go-libp2p-core v0.5.6/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= +github.com/libp2p/go-libp2p-core v0.5.7/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= +github.com/libp2p/go-libp2p-core v0.6.0/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= +github.com/libp2p/go-libp2p-core v0.6.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.7.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.2/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.5 h1:aEgbIcPGsKy6zYcC+5AJivYFedhYa4sW7mIpWpUaLKw= +github.com/libp2p/go-libp2p-core v0.8.5/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE= +github.com/libp2p/go-libp2p-crypto v0.0.2/go.mod h1:eETI5OUfBnvARGOHrJz2eWNyTUxEGZnBxMcbUjfIj4I= +github.com/libp2p/go-libp2p-crypto v0.1.0 h1:k9MFy+o2zGDNGsaoZl0MA3iZ75qXxr9OOoAZF+sD5OQ= +github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= +github.com/libp2p/go-libp2p-daemon v0.2.2/go.mod h1:kyrpsLB2JeNYR2rvXSVWyY0iZuRIMhqzWR3im9BV6NQ= +github.com/libp2p/go-libp2p-discovery v0.0.1/go.mod h1:ZkkF9xIFRLA1xCc7bstYFkd80gBGK8Fc1JqGoU2i+zI= +github.com/libp2p/go-libp2p-discovery v0.0.5/go.mod h1:YtF20GUxjgoKZ4zmXj8j3Nb2TUSBHFlOCetzYdbZL5I= +github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFTGElt8HnoDzwkFZm29g= +github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg= +github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw= +github.com/libp2p/go-libp2p-discovery v0.4.0/go.mod h1:bZ0aJSrFc/eX2llP0ryhb1kpgkPyTo23SJ5b7UQCMh4= +github.com/libp2p/go-libp2p-discovery v0.5.0 h1:Qfl+e5+lfDgwdrXdu4YNCWyEo3fWuP+WgN9mN0iWviQ= +github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= +github.com/libp2p/go-libp2p-host v0.0.1/go.mod h1:qWd+H1yuU0m5CwzAkvbSjqKairayEHdR5MMl7Cwa7Go= +github.com/libp2p/go-libp2p-host v0.0.3/go.mod h1:Y/qPyA6C8j2coYyos1dfRm0I8+nvd4TGrDGt4tA7JR8= +github.com/libp2p/go-libp2p-interface-connmgr v0.0.1/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= +github.com/libp2p/go-libp2p-interface-connmgr v0.0.4/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= +github.com/libp2p/go-libp2p-interface-connmgr v0.0.5/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= +github.com/libp2p/go-libp2p-interface-pnet v0.0.1/go.mod h1:el9jHpQAXK5dnTpKA4yfCNBZXvrzdOU75zz+C6ryp3k= +github.com/libp2p/go-libp2p-kad-dht v0.2.1/go.mod h1:k7ONOlup7HKzQ68dE6lSnp07cdxdkmnRa+6B4Fh9/w0= +github.com/libp2p/go-libp2p-kad-dht v0.11.0 h1:ZLhlmDKsFiOkPhTzfEqBrMy/1Tqx+Dk6UgbHM5//IQM= +github.com/libp2p/go-libp2p-kad-dht v0.11.0/go.mod h1:5ojtR2acDPqh/jXf5orWy8YGb8bHQDS+qeDcoscL/PI= +github.com/libp2p/go-libp2p-kbucket v0.2.1/go.mod h1:/Rtu8tqbJ4WQ2KTCOMJhggMukOLNLNPY1EtEWWLxUvc= +github.com/libp2p/go-libp2p-kbucket v0.4.7 h1:spZAcgxifvFZHBD8tErvppbnNiKA5uokDu3CV7axu70= +github.com/libp2p/go-libp2p-kbucket v0.4.7/go.mod h1:XyVo99AfQH0foSf176k4jY1xUJ2+jUJIZCSDm7r2YKk= +github.com/libp2p/go-libp2p-loggables v0.0.1/go.mod h1:lDipDlBNYbpyqyPX/KcoO+eq0sJYEVR2JgOexcivchg= +github.com/libp2p/go-libp2p-loggables v0.1.0 h1:h3w8QFfCt2UJl/0/NW4K829HX/0S4KD31PQ7m8UXXO8= +github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= +github.com/libp2p/go-libp2p-metrics v0.0.1/go.mod h1:jQJ95SXXA/K1VZi13h52WZMa9ja78zjyy5rspMsC/08= +github.com/libp2p/go-libp2p-mplex v0.1.1/go.mod h1:KUQWpGkCzfV7UIpi8SKsAVxyBgz1c9R5EvxgnwLsb/I= +github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo= +github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= +github.com/libp2p/go-libp2p-mplex v0.2.2/go.mod h1:74S9eum0tVQdAfFiKxAyKzNdSuLqw5oadDq7+L/FELo= +github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxWDkm+dVvjfuG3ek= +github.com/libp2p/go-libp2p-mplex v0.3.0/go.mod h1:l9QWxRbbb5/hQMECEb908GbS9Sm2UAR2KFZKUJEynEs= +github.com/libp2p/go-libp2p-mplex v0.4.0/go.mod h1:yCyWJE2sc6TBTnFpjvLuEJgTSw/u+MamvzILKdX7asw= +github.com/libp2p/go-libp2p-mplex v0.4.1 h1:/pyhkP1nLwjG3OM+VuaNJkQT/Pqq73WzB3aDN3Fx1sc= +github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g= +github.com/libp2p/go-libp2p-nat v0.0.2/go.mod h1:QrjXQSD5Dj4IJOdEcjHRkWTSomyxRo6HnUkf/TfQpLQ= +github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= +github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= +github.com/libp2p/go-libp2p-nat v0.0.6 h1:wMWis3kYynCbHoyKLPBEMu4YRLltbm8Mk08HGSfvTkU= +github.com/libp2p/go-libp2p-nat v0.0.6/go.mod h1:iV59LVhB3IkFvS6S6sauVTSOrNEANnINbI/fkaLimiw= +github.com/libp2p/go-libp2p-net v0.0.1/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= +github.com/libp2p/go-libp2p-net v0.0.2/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= +github.com/libp2p/go-libp2p-netutil v0.0.1/go.mod h1:GdusFvujWZI9Vt0X5BKqwWWmZFxecf9Gt03cKxm2f/Q= +github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ= +github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= +github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM= +github.com/libp2p/go-libp2p-noise v0.2.0 h1:wmk5nhB9a2w2RxMOyvsoKjizgJOEaJdfAakr0jN8gds= +github.com/libp2p/go-libp2p-noise v0.2.0/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= +github.com/libp2p/go-libp2p-peer v0.0.1/go.mod h1:nXQvOBbwVqoP+T5Y5nCjeH4sP9IX/J0AMzcDUVruVoo= +github.com/libp2p/go-libp2p-peer v0.1.1/go.mod h1:jkF12jGB4Gk/IOo+yomm+7oLWxF278F7UnrYUQ1Q8es= +github.com/libp2p/go-libp2p-peer v0.2.0 h1:EQ8kMjaCUwt/Y5uLgjT8iY2qg0mGUT0N1zUjer50DsY= +github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= +github.com/libp2p/go-libp2p-peerstore v0.0.1/go.mod h1:RabLyPVJLuNQ+GFyoEkfi8H4Ti6k/HtZJ7YKgtSq+20= +github.com/libp2p/go-libp2p-peerstore v0.0.6/go.mod h1:RabLyPVJLuNQ+GFyoEkfi8H4Ti6k/HtZJ7YKgtSq+20= +github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY= +github.com/libp2p/go-libp2p-peerstore v0.1.3/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI= +github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVdLxvqh7hXOujessqdzs= +github.com/libp2p/go-libp2p-peerstore v0.2.0/go.mod h1:N2l3eVIeAitSg3Pi2ipSrJYnqhVnMNQZo9nkSCuAbnQ= +github.com/libp2p/go-libp2p-peerstore v0.2.1/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= +github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= +github.com/libp2p/go-libp2p-peerstore v0.2.3/go.mod h1:K8ljLdFn590GMttg/luh4caB/3g0vKuY01psze0upRw= +github.com/libp2p/go-libp2p-peerstore v0.2.4/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= +github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= +github.com/libp2p/go-libp2p-peerstore v0.2.7 h1:83JoLxyR9OYTnNfB5vvFqvMUv/xDNa6NoPHnENhBsGw= +github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= +github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k= +github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= +github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1VZNHYcK8cLgFJLZ4s= +github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk= +github.com/libp2p/go-libp2p-pubsub v0.1.1/go.mod h1:ZwlKzRSe1eGvSIdU5bD7+8RZN/Uzw0t1Bp9R1znpR/Q= +github.com/libp2p/go-libp2p-pubsub v0.3.2-0.20200527132641-c0712c6e92cf/go.mod h1:TxPOBuo1FPdsTjFnv+FGZbNbWYsp74Culx+4ViQpato= +github.com/libp2p/go-libp2p-pubsub v0.3.2/go.mod h1:Uss7/Cfz872KggNb+doCVPHeCDmXB7z500m/R8DaAUk= +github.com/libp2p/go-libp2p-pubsub v0.4.2-0.20210212194758-6c1addf493eb h1:HExLcdXn8fgtXPciUw97O5NNhBn31dt6d9fVUD4cngo= +github.com/libp2p/go-libp2p-pubsub v0.4.2-0.20210212194758-6c1addf493eb/go.mod h1:izkeMLvz6Ht8yAISXjx60XUQZMq9ZMe5h2ih4dLIBIQ= +github.com/libp2p/go-libp2p-pubsub-tracer v0.0.0-20200626141350-e730b32bf1e6 h1:2lH7rMlvDPSvXeOR+g7FE6aqiEwxtpxWKQL8uigk5fQ= +github.com/libp2p/go-libp2p-pubsub-tracer v0.0.0-20200626141350-e730b32bf1e6/go.mod h1:8ZodgKS4qRLayfw9FDKDd9DX4C16/GMofDxSldG8QPI= +github.com/libp2p/go-libp2p-quic-transport v0.1.1/go.mod h1:wqG/jzhF3Pu2NrhJEvE+IE0NTHNXslOPn9JQzyCAxzU= +github.com/libp2p/go-libp2p-quic-transport v0.5.0/go.mod h1:IEcuC5MLxvZ5KuHKjRu+dr3LjCT1Be3rcD/4d8JrX8M= +github.com/libp2p/go-libp2p-quic-transport v0.10.0 h1:koDCbWD9CCHwcHZL3/WEvP2A+e/o5/W5L3QS/2SPMA0= +github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA= +github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q= +github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= +github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg= +github.com/libp2p/go-libp2p-record v0.1.2/go.mod h1:pal0eNcT5nqZaTV7UGhqeGqxFgGdsU/9W//C8dqjQDk= +github.com/libp2p/go-libp2p-record v0.1.3 h1:R27hoScIhQf/A8XJZ8lYpnqh9LatJ5YbHs28kCIfql0= +github.com/libp2p/go-libp2p-record v0.1.3/go.mod h1:yNUff/adKIfPnYQXgp6FQmNu3gLJ6EMg7+/vv2+9pY4= +github.com/libp2p/go-libp2p-routing v0.0.1/go.mod h1:N51q3yTr4Zdr7V8Jt2JIktVU+3xBBylx1MZeVA6t1Ys= +github.com/libp2p/go-libp2p-routing v0.1.0/go.mod h1:zfLhI1RI8RLEzmEaaPwzonRvXeeSHddONWkcTcB54nE= +github.com/libp2p/go-libp2p-routing-helpers v0.2.3 h1:xY61alxJ6PurSi+MXbywZpelvuU4U4p/gPTxjqCqTzY= +github.com/libp2p/go-libp2p-routing-helpers v0.2.3/go.mod h1:795bh+9YeoFl99rMASoiVgHdi5bjack0N1+AFAdbvBw= +github.com/libp2p/go-libp2p-secio v0.0.1/go.mod h1:IdG6iQybdcYmbTzxp4J5dwtUEDTOvZrT0opIDVNPrJs= +github.com/libp2p/go-libp2p-secio v0.0.3/go.mod h1:hS7HQ00MgLhRO/Wyu1bTX6ctJKhVpm+j2/S2A5UqYb0= +github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= +github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g= +github.com/libp2p/go-libp2p-secio v0.2.1/go.mod h1:cWtZpILJqkqrSkiYcDBh5lA3wbT2Q+hz3rJQq3iftD8= +github.com/libp2p/go-libp2p-secio v0.2.2/go.mod h1:wP3bS+m5AUnFA+OFO7Er03uO1mncHG0uVwGrwvjYlNY= +github.com/libp2p/go-libp2p-swarm v0.0.1/go.mod h1:mh+KZxkbd3lQnveQ3j2q60BM1Cw2mX36XXQqwfPOShs= +github.com/libp2p/go-libp2p-swarm v0.0.6/go.mod h1:s5GZvzg9xXe8sbeESuFpjt8CJPTCa8mhEusweJqyFy8= +github.com/libp2p/go-libp2p-swarm v0.1.0/go.mod h1:wQVsCdjsuZoc730CgOvh5ox6K8evllckjebkdiY5ta4= +github.com/libp2p/go-libp2p-swarm v0.2.1/go.mod h1:x07b4zkMFo2EvgPV2bMTlNmdQc8i+74Jjio7xGvsTgU= +github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaTNyBcHImCxRpPKU= +github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM= +github.com/libp2p/go-libp2p-swarm v0.2.4/go.mod h1:/xIpHFPPh3wmSthtxdGbkHZ0OET1h/GGZes8Wku/M5Y= +github.com/libp2p/go-libp2p-swarm v0.2.7/go.mod h1:ZSJ0Q+oq/B1JgfPHJAT2HTall+xYRNYp1xs4S2FBWKA= +github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= +github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= +github.com/libp2p/go-libp2p-swarm v0.3.1/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= +github.com/libp2p/go-libp2p-swarm v0.5.0 h1:HIK0z3Eqoo8ugmN8YqWAhD2RORgR+3iNXYG4U2PFd1E= +github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4= +github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= +github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= +github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= +github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g= +github.com/libp2p/go-libp2p-testing v0.4.0 h1:PrwHRi0IGqOwVQWR3xzgigSlhlLfxgfXgkHxr77EghQ= +github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= +github.com/libp2p/go-libp2p-tls v0.1.3 h1:twKMhMu44jQO+HgQK9X8NHO5HkeJu2QbhLzLJpa8oNM= +github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= +github.com/libp2p/go-libp2p-transport v0.0.1/go.mod h1:UzbUs9X+PHOSw7S3ZmeOxfnwaQY5vGDzZmKPod3N3tk= +github.com/libp2p/go-libp2p-transport v0.0.4/go.mod h1:StoY3sx6IqsP6XKoabsPnHCwqKXWUMWU7Rfcsubee/A= +github.com/libp2p/go-libp2p-transport v0.0.5/go.mod h1:StoY3sx6IqsP6XKoabsPnHCwqKXWUMWU7Rfcsubee/A= +github.com/libp2p/go-libp2p-transport-upgrader v0.0.1/go.mod h1:NJpUAgQab/8K6K0m+JmZCe5RUXG10UMEx4kWe9Ipj5c= +github.com/libp2p/go-libp2p-transport-upgrader v0.0.4/go.mod h1:RGq+tupk+oj7PzL2kn/m1w6YXxcIAYJYeI90h6BGgUc= +github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= +github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= +github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o= +github.com/libp2p/go-libp2p-transport-upgrader v0.4.2 h1:4JsnbfJzgZeRS9AWN7B9dPqn/LY/HoQTlO9gtdJTIYM= +github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk= +github.com/libp2p/go-libp2p-yamux v0.1.2/go.mod h1:xUoV/RmYkg6BW/qGxA9XJyg+HzXFYkeXbnhjmnYzKp8= +github.com/libp2p/go-libp2p-yamux v0.1.3/go.mod h1:VGSQVrqkh6y4nm0189qqxMtvyBft44MOYYPpYKXiVt4= +github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= +github.com/libp2p/go-libp2p-yamux v0.2.1/go.mod h1:1FBXiHDk1VyRM1C0aez2bCfHQ4vMZKkAQzZbkSQt5fI= +github.com/libp2p/go-libp2p-yamux v0.2.2/go.mod h1:lIohaR0pT6mOt0AZ0L2dFze9hds9Req3OfS+B+dv4qw= +github.com/libp2p/go-libp2p-yamux v0.2.5/go.mod h1:Zpgj6arbyQrmZ3wxSZxfBmbdnWtbZ48OpsfmQVTErwA= +github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhLEn0XhIoZ5viCwU= +github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4= +github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30= +github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po= +github.com/libp2p/go-libp2p-yamux v0.5.4 h1:/UOPtT/6DHPtr3TtKXBHa6g0Le0szYuI33Xc/Xpd7fQ= +github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE= +github.com/libp2p/go-maddr-filter v0.0.1/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= +github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= +github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= +github.com/libp2p/go-maddr-filter v0.1.0 h1:4ACqZKw8AqiuJfwFGq1CYDFugfXTOos+qQ3DETkhtCE= +github.com/libp2p/go-maddr-filter v0.1.0/go.mod h1:VzZhTXkMucEGGEOSKddrwGiOv0tUhgnKqNEmIAz/bPU= +github.com/libp2p/go-mplex v0.0.1/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= +github.com/libp2p/go-mplex v0.0.3/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= +github.com/libp2p/go-mplex v0.0.4/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= +github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU= +github.com/libp2p/go-mplex v0.1.1/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= +github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= +github.com/libp2p/go-mplex v0.2.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= +github.com/libp2p/go-mplex v0.3.0 h1:U1T+vmCYJaEoDJPV1aq31N56hS+lJgb397GsylNSgrU= +github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= +github.com/libp2p/go-msgio v0.0.1/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.6 h1:lQ7Uc0kS1wb1EfRxO2Eir/RJoHkHn7t6o+EiwsYIKJA= +github.com/libp2p/go-msgio v0.0.6/go.mod h1:4ecVB6d9f4BDSL5fqvPiC4A3KivjWn+Venn/1ALLMWA= +github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= +github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= +github.com/libp2p/go-nat v0.0.5 h1:qxnwkco8RLKqVh1NmjQ+tJ8p8khNLFxuElYG/TwqW4Q= +github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= +github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= +github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= +github.com/libp2p/go-netroute v0.1.6 h1:ruPJStbYyXVYGQ81uzEDzuvbYRLKRrLvTYd33yomC38= +github.com/libp2p/go-netroute v0.1.6/go.mod h1:AqhkMh0VuWmfgtxKPp3Oc1LdU5QSWS7wl0QLhSZqXxQ= +github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= +github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-openssl v0.0.7 h1:eCAzdLejcNVBzP/iZM9vqHnQm+XyCEbSSIheIPRGNsw= +github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= +github.com/libp2p/go-reuseport v0.0.2 h1:XSG94b1FJfGA01BUrT82imejHQyTxO4jEWqheyCXYvU= +github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ= +github.com/libp2p/go-reuseport-transport v0.0.1/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= +github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= +github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= +github.com/libp2p/go-reuseport-transport v0.0.4 h1:OZGz0RB620QDGpv300n1zaOcKGGAoGVf8h9txtt/1uM= +github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= +github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= +github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= +github.com/libp2p/go-sockaddr v0.1.1 h1:yD80l2ZOdGksnOyHrhxDdTDFrf7Oy+v3FMVArIRgZxQ= +github.com/libp2p/go-sockaddr v0.1.1/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= +github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= +github.com/libp2p/go-stream-muxer v0.1.0/go.mod h1:8JAVsjeRBCWwPoZeH0W1imLOcriqXJyFvB0mR4A04sQ= +github.com/libp2p/go-stream-muxer-multistream v0.1.1/go.mod h1:zmGdfkQ1AzOECIAcccoL8L//laqawOsO03zX8Sa+eGw= +github.com/libp2p/go-stream-muxer-multistream v0.2.0/go.mod h1:j9eyPol/LLRqT+GPLSxvimPhNph4sfYfMoDPd7HkzIc= +github.com/libp2p/go-stream-muxer-multistream v0.3.0 h1:TqnSHPJEIqDEO7h1wZZ0p3DXdvDSiLHQidKKUGZtiOY= +github.com/libp2p/go-stream-muxer-multistream v0.3.0/go.mod h1:yDh8abSIzmZtqtOt64gFJUXEryejzNb0lisTt+fAMJA= +github.com/libp2p/go-tcp-transport v0.0.1/go.mod h1:mnjg0o0O5TmXUaUIanYPUqkW4+u6mK0en8rlpA6BBTs= +github.com/libp2p/go-tcp-transport v0.0.4/go.mod h1:+E8HvC8ezEVOxIo3V5vCK9l1y/19K427vCzQ+xHKH/o= +github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= +github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= +github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0= +github.com/libp2p/go-tcp-transport v0.2.1 h1:ExZiVQV+h+qL16fzCWtd1HSzPsqWottJ8KXwWaVi8Ns= +github.com/libp2p/go-tcp-transport v0.2.1/go.mod h1:zskiJ70MEfWz2MKxvFB/Pv+tPIB1PpPUrHIWQ8aFw7M= +github.com/libp2p/go-testutil v0.0.1/go.mod h1:iAcJc/DKJQanJ5ws2V+u5ywdL2n12X1WbbEG+Jjy69I= +github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc= +github.com/libp2p/go-ws-transport v0.0.1/go.mod h1:p3bKjDWHEgtuKKj+2OdPYs5dAPIjtpQGHF2tJfGz7Ww= +github.com/libp2p/go-ws-transport v0.0.5/go.mod h1:Qbl4BxPfXXhhd/o0wcrgoaItHqA9tnZjoFZnxykuaXU= +github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw7yT74kj3raBFuo= +github.com/libp2p/go-ws-transport v0.1.2/go.mod h1:dsh2Ld8F+XNmzpkaAijmg5Is+e9l6/1tK/6VFOdN69Y= +github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM= +github.com/libp2p/go-ws-transport v0.3.0/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= +github.com/libp2p/go-ws-transport v0.3.1/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= +github.com/libp2p/go-ws-transport v0.4.0 h1:9tvtQ9xbws6cA5LvqdE6Ne3vcmGB4f1z9SByggk4s0k= +github.com/libp2p/go-ws-transport v0.4.0/go.mod h1:EcIEKqf/7GDjth6ksuS/6p7R49V4CBY6/E7R/iyhYUA= +github.com/libp2p/go-yamux v1.2.1/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.6/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= +github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= +github.com/libp2p/go-yamux v1.4.1 h1:P1Fe9vF4th5JOxxgQvfbOHkrGqIZniTLf+ddhZp8YTI= +github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= +github.com/libp2p/go-yamux/v2 v2.2.0 h1:RwtpYZ2/wVviZ5+3pjC8qdQ4TKnrak0/E01N1UWoAFU= +github.com/libp2p/go-yamux/v2 v2.2.0/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lucas-clemente/quic-go v0.11.2/go.mod h1:PpMmPfPKO9nKJ/psF49ESTAGQSdfXxlg1otPbEB2nOw= +github.com/lucas-clemente/quic-go v0.16.0/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE= +github.com/lucas-clemente/quic-go v0.19.3 h1:eCDQqvGBB+kCTkA0XrAFtNe81FMa0/fn4QSoeAbmiF4= +github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= +github.com/lucasb-eyer/go-colorful v1.0.3 h1:QIbQXiugsb+q10B+MI+7DI1oQLdmnep86tWFlaaUAac= +github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= +github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/marten-seemann/qpack v0.1.0/go.mod h1:LFt1NU/Ptjip0C2CPkhimBz5CGE3WGDAUWqna+CNTrI= +github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= +github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= +github.com/marten-seemann/qtls v0.9.1/go.mod h1:T1MmAdDPyISzxlK6kjRr0pcZFBVd1OZbBb/j3cvzHhk= +github.com/marten-seemann/qtls v0.10.0 h1:ECsuYUKalRL240rRD4Ri33ISb7kAQ3qGDlrrl55b2pc= +github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= +github.com/marten-seemann/qtls-go1-15 v0.1.1 h1:LIH6K34bPVttyXnUWixk0bzH6/N07VxbSabxn5A5gZQ= +github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.10 h1:CoZ3S2P7pvtP45xOtBw+/mDL2z0RKI576gSkzRRpdGg= +github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= +github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= +github.com/mattn/go-xmlrpc v0.0.3/go.mod h1:mqc2dz7tP5x5BKlCahN/n+hs7OSZKJkS9JsHNBRlrxA= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= +github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= +github.com/mdlayher/netlink v0.0.0-20190828143259-340058475d09/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= +github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= +github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= +github.com/mdlayher/wifi v0.0.0-20190303161829-b1436901ddee/go.mod h1:Evt/EIne46u9PtQbeTx2NTcqURpr5K4SvKtGmBuDPN8= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= +github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= +github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= +github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= +github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= +github.com/multiformats/go-multiaddr v0.2.1/go.mod h1:s/Apk6IyxfvMjDafnhJgJ3/46z7tZ04iMk5wP4QMGGE= +github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u0xW5UouOmQQrn6a3Y= +github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= +github.com/multiformats/go-multiaddr v0.3.1 h1:1bxa+W7j9wZKTZREySx1vPMs2TqrYWjVZ7zE6/XLG1I= +github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= +github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= +github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= +github.com/multiformats/go-multiaddr-dns v0.0.3/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= +github.com/multiformats/go-multiaddr-dns v0.1.0/go.mod h1:01k2RAqtoXIuPa3DCavAE9/6jc6nM0H3EgZyfUhN2oY= +github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= +github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= +github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= +github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q= +github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= +github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= +github.com/multiformats/go-multiaddr-net v0.0.1/go.mod h1:nw6HSxNmCIQH27XPGBuX+d1tnvM7ihcFwHMSstNAVUU= +github.com/multiformats/go-multiaddr-net v0.1.0/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= +github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= +github.com/multiformats/go-multiaddr-net v0.1.2/go.mod h1:QsWt3XK/3hwvNxZJp92iMQKME1qHfpYmyIjFVsSOY6Y= +github.com/multiformats/go-multiaddr-net v0.1.3/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= +github.com/multiformats/go-multiaddr-net v0.1.4/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= +github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= +github.com/multiformats/go-multiaddr-net v0.2.0 h1:MSXRGN0mFymt6B1yo/6BPnIRpLPEnKgQNvVfCX5VDJk= +github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multibase v0.0.2/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= +github.com/multiformats/go-multihash v0.0.7/go.mod h1:XuKXPp8VHcTygube3OWZC+aZrA+H1IhmjoCDtJc7PXM= +github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multihash v0.0.14 h1:QoBceQYQQtNUuf6s7wHxnE2c8bhbMqhfGzNI032se/I= +github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= +github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= +github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= +github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= +github.com/multiformats/go-multistream v0.2.0/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= +github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= +github.com/multiformats/go-multistream v0.2.2 h1:TCYu1BHTDr1F/Qm75qwYISQdzGcRdC21nFgQW7l7GBo= +github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs= +github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2W/KhfNY= +github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/ngdinhtoan/glide-cleanup v0.2.0/go.mod h1:UQzsmiDOb8YV3nOsCxK/c9zPpCZVNoHScRE3EO9pVMM= +github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c h1:5bFTChQxSKNwy8ALwOebjekYExl9HTT9urdawqC95tA= +github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c/go.mod h1:7qN3Y0BvzRUf4LofcoJplQL10lsFDb4PYlePTVwrP28= +github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229 h1:E2B8qYyeSgv5MXpmzZXRNp8IAQ4vjxIjhpAf5hv/tAg= +github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333/go.mod h1:Ag6rSXkHIckQmjFBCweJEEt1mrTPBv8b9W4aU/NQWfI= +github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc= +github.com/opentracing-contrib/go-grpc v0.0.0-20191001143057-db30781987df h1:vdYtBU6zvL7v+Tr+0xFM/qhahw/EvY8DMMunZHKH6eE= +github.com/opentracing-contrib/go-grpc v0.0.0-20191001143057-db30781987df/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= +github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= +github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= +github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a h1:hjZfReYVLbqFkAtr2us7vdy04YWz3LVAirzP7reh8+M= +github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= +github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289/go.mod h1:FGbBv5OPKjch+jNUJmEQpMZytIdyW0NdBtWFcfSKusc= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.1.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/raulk/clock v1.1.0 h1:dpb29+UKMbLqiU/jqIJptgLR1nn23HLgMY0sTCDza5Y= +github.com/raulk/clock v1.1.0/go.mod h1:3MpVxdZ/ODBQDxbN+kzshf5OSZwPjtMDx6BBXBmOeY0= +github.com/raulk/go-watchdog v1.0.1 h1:qgm3DIJAeb+2byneLrQJ7kvmDLGxN2vy3apXyGaDKN4= +github.com/raulk/go-watchdog v1.0.1/go.mod h1:lzSbAl5sh4rtI8tYHU01BWIDzgzqaQLj6RcA1i4mlqI= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= +github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= +github.com/rivo/uniseg v0.1.0 h1:+2KBaVoUmb9XzDsrx/Ct0W/EYOSFf/nWTauy++DprtY= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rs/cors v1.6.0 h1:G9tHG9lebljV9mfp9SNPDL36nCDxmo3zTlAf1YgvzmI= +github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= +github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= +github.com/sercand/kuberesolver v2.1.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ= +github.com/sercand/kuberesolver v2.4.0+incompatible h1:WE2OlRf6wjLxHwNkkFLQGaZcVLEXjMjBPjjEU5vksH8= +github.com/sercand/kuberesolver v2.4.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shirou/gopsutil v2.18.12+incompatible h1:1eaJvGomDnH74/5cF4CTmTbLHAriGFsTZppLXDX93OM= +github.com/shirou/gopsutil v2.18.12+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/siebenmann/go-kstat v0.0.0-20160321171754-d34789b79745/go.mod h1:G81aIFAMS9ECrwBYR9YxhlPjWgrItd+Kje78O6+uqm8= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= +github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a/go.mod h1:LeFCbQYJ3KJlPs/FvPz2dy1tkpxyeNESVyCNNzRXFR0= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25/go.mod h1:lbP8tGiBjZ5YWIc2fzuRpTaz0b/53vT6PEs3QuAWzuU= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/testground/sdk-go v0.2.6 h1:sMwv0/caNNODKfdPigNqmSSIZLcse7pZX6fgrjCGBIs= +github.com/testground/sdk-go v0.2.6/go.mod h1:Q4dnWsUBH+dZ1u7aEGDBHWGUaLfhitjUq3UJQqxeTmk= +github.com/texttheater/golang-levenshtein v0.0.0-20180516184445-d188e65d659e/go.mod h1:XDKHRm5ThF8YJjx001LtgelzsoaEcvnA7lVWz9EeX3g= +github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= +github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tj/go-spin v1.1.0 h1:lhdWZsvImxvZ3q1C5OIB7d72DuOwP4O2NdBg9PyzNds= +github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.23.1+incompatible h1:uArBYHQR0HqLFFAypI7RsWTzPSj/bDpmZZuQjMLSg1A= +github.com/uber/jaeger-client-go v2.23.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v1.5.1-0.20181102163054-1fc5c315e03c/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw= +github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli/v2 v2.0.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= +github.com/urfave/cli/v2 v2.2.0 h1:JTTnM6wKzdA0Jqodd966MVj4vWbbquZykeX1sKbe2C4= +github.com/urfave/cli/v2 v2.2.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v1.0.1 h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8WdUSz8= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= +github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a h1:G++j5e0OC488te356JvdhaM8YS6nMsjLAYF7JxCv07w= +github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/weaveworks/common v0.0.0-20200512154658-384f10054ec5 h1:EYxr08r8x6r/5fLEAMMkida1BVgxVXE4LfZv/XV+znU= +github.com/weaveworks/common v0.0.0-20200512154658-384f10054ec5/go.mod h1:c98fKi5B9u8OsKGiWHLRKus6ToQ1Tubeow44ECO1uxY= +github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= +github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= +github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM= +github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba h1:X4n8JG2e2biEZZXdBKt9HX7DN3bYGFUqljqqy0DqgnY= +github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba/go.mod h1:CHQnYnQUEPydYCwuy8lmTHfGmdw9TKrhWV0xLx8l0oM= +github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= +github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20200402171437-3d27c146c105/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d/go.mod h1:W5MvapuoHRP8rz4vxjwCK1pDqF1aQcWsV5PZ+AHbqdg= +github.com/whyrusleeping/cbor-gen v0.0.0-20200710004633-5379fc63235d/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200715143311-227fab5a2377/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200723185710-6a3894a6352b/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200806213330-63aa96ca5488/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200810223238-211df3b9e24c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200812213548-958ddffe352c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20210118024343-169e9d70c0c2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20210219115102-f37d292932f2 h1:bsUlNhdmbtlfdLVXAVfuvKQ01RnWAM09TVrJkI7NZs4= +github.com/whyrusleeping/cbor-gen v0.0.0-20210219115102-f37d292932f2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= +github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= +github.com/whyrusleeping/go-ctrlnet v0.0.0-20180313164037-f564fbbdaa95/go.mod h1:SJqKCCPXRfBFCwXjfNT/skfsceF7+MBFLI2OrvuRA7g= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= +github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= +github.com/whyrusleeping/go-logging v0.0.1/go.mod h1:lDPYj54zutzG1XYfHAhcc7oNXEburHQBn+Iqd4yS4vE= +github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f/go.mod h1:cZNvX9cFybI01GriPRMXDtczuvUhgbcYr9iCGaNlRv8= +github.com/whyrusleeping/go-smux-multiplex v3.0.16+incompatible/go.mod h1:34LEDbeKFZInPUrAG+bjuJmUXONGdEFW7XL0SpTY1y4= +github.com/whyrusleeping/go-smux-multistream v2.0.2+incompatible/go.mod h1:dRWHHvc4HDQSHh9gbKEBbUZ+f2Q8iZTPG3UOGYODxSQ= +github.com/whyrusleeping/go-smux-yamux v2.0.8+incompatible/go.mod h1:6qHUzBXUbB9MXmw3AUdB52L8sEb/hScCqOdW2kj/wuI= +github.com/whyrusleeping/go-smux-yamux v2.0.9+incompatible/go.mod h1:6qHUzBXUbB9MXmw3AUdB52L8sEb/hScCqOdW2kj/wuI= +github.com/whyrusleeping/ledger-filecoin-go v0.9.1-0.20201010031517-c3dcc1bddce4 h1:NwiwjQDB3CzQ5XH0rdMh1oQqzJH7O2PSLWxif/w3zsY= +github.com/whyrusleeping/ledger-filecoin-go v0.9.1-0.20201010031517-c3dcc1bddce4/go.mod h1:K+EVq8d5QcQ2At5VECsA+SNZvWefyBXh8TnIsxo1OvQ= +github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA= +github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= +github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= +github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 h1:E9S12nwJwEOXe2d6gT6qxdvqMnNq+VnSsKPgm2ZZNds= +github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= +github.com/whyrusleeping/pubsub v0.0.0-20190708150250-92bcb0691325 h1:++Zf4xQ7YrkE81gNHIjVqx5JZsn0nbMeHOkY1ILAIME= +github.com/whyrusleeping/pubsub v0.0.0-20190708150250-92bcb0691325/go.mod h1:g7ckxrjiFh8mi1AY7ox23PZD0g6QU/TxW3U3unX7I3A= +github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow= +github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg= +github.com/whyrusleeping/yamux v1.1.5/go.mod h1:E8LnQQ8HKx5KD29HZFUwM1PxCOdPRzGwur1mcYhXcD8= +github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/c-for-go v0.0.0-20201112171043-ea6dce5809cb h1:/7/dQyiKnxAOj9L69FhST7uMe17U015XPzX7cy+5ykM= +github.com/xlab/c-for-go v0.0.0-20201112171043-ea6dce5809cb/go.mod h1:pbNsDSxn1ICiNn9Ct4ZGNrwzfkkwYbx/lw8VuyutFIg= +github.com/xlab/pkgconfig v0.0.0-20170226114623-cea12a0fd245 h1:Sw125DKxZhPUI4JLlWugkzsrlB50jR9v2khiD9FxuSo= +github.com/xlab/pkgconfig v0.0.0-20170226114623-cea12a0fd245/go.mod h1:C+diUUz7pxhNY6KAoLgrTYARGWnt82zWTylZlxT92vk= +github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/xorcare/golden v0.6.0/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ= +github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 h1:oWgZJmC1DorFZDpfMfWg7xk29yEOZiXmo/wZl+utTI8= +github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/zondax/hid v0.9.0 h1:eiT3P6vNxAEVxXMw66eZUAAnU2zD33JBkfG/EnfAKl8= +github.com/zondax/hid v0.9.0/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= +github.com/zondax/ledger-go v0.12.1 h1:hYRcyznPRJp+5mzF2sazTLP2nGvGjYDD2VzhHhFomLU= +github.com/zondax/ledger-go v0.12.1/go.mod h1:KatxXrVDzgWwbssUWsF5+cOJHXPvzQ09YSlzGNuhOEo= +go.dedis.ch/fixbuf v1.0.3 h1:hGcV9Cd/znUxlusJ64eAlExS+5cJDIyTyEG+otu5wQs= +go.dedis.ch/fixbuf v1.0.3/go.mod h1:yzJMt34Wa5xD37V5RTdmp38cz3QhMagdGoem9anUalw= +go.dedis.ch/kyber/v3 v3.0.4/go.mod h1:OzvaEnPvKlyrWyp3kGXlFdp7ap1VC6RkZDTaPikqhsQ= +go.dedis.ch/kyber/v3 v3.0.9 h1:i0ZbOQocHUjfFasBiUql5zVeC7u/vahFd96DFA8UOWk= +go.dedis.ch/kyber/v3 v3.0.9/go.mod h1:rhNjUUg6ahf8HEg5HUvVBYoWY4boAafX8tYxX+PS+qg= +go.dedis.ch/protobuf v1.0.5/go.mod h1:eIV4wicvi6JK0q/QnfIEGeSFNG0ZeB24kzut5+HaRLo= +go.dedis.ch/protobuf v1.0.7/go.mod h1:pv5ysfkDX/EawiPqcW3ikOxsL5t+BqnV6xHSmE79KI4= +go.dedis.ch/protobuf v1.0.11 h1:FTYVIEzY/bfl37lu3pR4lIj+F9Vp1jE8oh91VmxKgLo= +go.dedis.ch/protobuf v1.0.11/go.mod h1:97QR256dnkimeNdfmURz0wAMNVbd1VmLXhG1CrTYrJ4= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg= +go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/dig v1.10.0 h1:yLmDDj9/zuDjv3gz8GQGviXMs9TfysIUMUilCpgzUJY= +go.uber.org/dig v1.10.0/go.mod h1:X34SnWGr8Fyla9zQNO2GSO2D+TIuqB14OS8JhYocIyw= +go.uber.org/fx v1.9.0 h1:7OAz8ucp35AU8eydejpYG7QrbE8rLKzGhHbZlJi5LYY= +go.uber.org/fx v1.9.0/go.mod h1:mFdUyAUuJ3w4jAckiKSKbldsxy1ojpAMJ+dVZg5Y0Aw= +go.uber.org/goleak v1.0.0 h1:qsup4IcBdlmsnGfqyLl4Ntn3C2XCCuKAE7DwHpScyUo= +go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +go4.org v0.0.0-20200411211856-f5505b9728dd h1:BNJlw5kRTzdmyfh5U8F93HA2OwkP7ZGwA51eJ/0wKOU= +go4.org v0.0.0-20200411211856-f5505b9728dd/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20181106170214-d68db9428509/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200513190911-00229845015e h1:rMqLP+9XLy+LdbCXHjJHAmTfXCr93W7oruWA6Hq1Alc= +golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180524181706-dfa909b99c79/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190228165749-92fc7df08ae7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200519113804-d87ec0cfa476/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201022231255-08b38378de70/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6 h1:0PC75Fz/kyMGhL0e1QnypqK2kQMqKt9csD1GnMJR+Zk= +golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190302025703-b6889370fb10/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190524122548-abf6ff778158/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190524152521-dbbf3f1254d4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190526052359-791d8a0f4d09/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191025090151-53bf42e6b339/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426080607-c94f62235c83 h1:kHSDPqCtsHZOg0nVylfTo20DDhE9gG4Y0jn7hKQ0QAM= +golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf h1:MZ2shdL+ZM/XzY3ZGOnh4Nlpnxz5GSOhOmtHo3iPU6M= +golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191030062658-86caa796c7ab/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200711155855-7342f9734a7d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200608115520-7c474a2e3482 h1:i+Aiej6cta/Frzp13/swvwz5O00kYcSe0A/C5Wd7zX8= +google.golang.org/genproto v0.0.0-20200608115520-7c474a2e3482/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk= +gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8= +gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= +howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= +modernc.org/cc v1.0.0 h1:nPibNuDEx6tvYrUAtvDTTw98rx5juGsa5zuDnKwEEQQ= +modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= +modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8= +modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +modernc.org/golex v1.0.1 h1:EYKY1a3wStt0RzHaH8mdSRNg78Ub0OHxYfCRWw35YtM= +modernc.org/golex v1.0.1/go.mod h1:QCA53QtsT1NdGkaZZkF5ezFwk4IXh4BGNafAARTC254= +modernc.org/lex v1.0.0/go.mod h1:G6rxMTy3cH2iA0iXL/HRRv4Znu8MK4higxph/lE7ypk= +modernc.org/lexer v1.0.0/go.mod h1:F/Dld0YKYdZCLQ7bD0USbWL4YKCyTDRDHiDTOs0q0vk= +modernc.org/mathutil v1.1.1 h1:FeylZSVX8S+58VsyJlkEj2bcpdytmp9MmDKZkKx8OIE= +modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/strutil v1.1.0 h1:+1/yCzZxY2pZwwrsbH+4T7BQMoLQ9QiBshRC9eicYsc= +modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/xc v1.0.0 h1:7ccXrupWZIS3twbUGrtKmHS2DXY6xegFua+6O3xgAFU= +modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/testplans/lotus-soup/init.go b/testplans/lotus-soup/init.go new file mode 100644 index 00000000000..c20f5f2b8e2 --- /dev/null +++ b/testplans/lotus-soup/init.go @@ -0,0 +1,63 @@ +package main + +import ( + "os" + + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/policy" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/ipfs/go-log/v2" +) + +func init() { + build.BlockDelaySecs = 3 + build.PropagationDelaySecs = 1 + + _ = log.SetLogLevel("*", "DEBUG") + _ = log.SetLogLevel("dht", "WARN") + _ = log.SetLogLevel("swarm2", "WARN") + _ = log.SetLogLevel("addrutil", "WARN") + _ = log.SetLogLevel("stats", "WARN") + _ = log.SetLogLevel("dht/RtRefreshManager", "ERROR") // noisy + _ = log.SetLogLevel("bitswap", "ERROR") // noisy + _ = log.SetLogLevel("badgerbs", "ERROR") // noisy + _ = log.SetLogLevel("sub", "ERROR") // noisy + _ = log.SetLogLevel("pubsub", "ERROR") // noisy + _ = log.SetLogLevel("chain", "ERROR") // noisy + _ = log.SetLogLevel("chainstore", "ERROR") // noisy + _ = log.SetLogLevel("basichost", "ERROR") // noisy + + _ = os.Setenv("BELLMAN_NO_GPU", "1") + + build.InsecurePoStValidation = true + build.DisableBuiltinAssets = true + + // MessageConfidence is the amount of tipsets we wait after a message is + // mined, e.g. payment channel creation, to be considered committed. + build.MessageConfidence = 1 + + // The duration of a deadline's challenge window, the period before a + // deadline when the challenge is available. + // + // This will auto-scale the proving period. + // policy.SetWPoStChallengeWindow(abi.ChainEpoch(5)) // commented-out until we enable PoSt faults tests + + // Number of epochs between publishing the precommit and when the challenge for interactive PoRep is drawn + // used to ensure it is not predictable by miner. + policy.SetPreCommitChallengeDelay(abi.ChainEpoch(10)) + + policy.SetConsensusMinerMinPower(abi.NewTokenAmount(2048)) + policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg8MiBV1) + + policy.SetMinVerifiedDealSize(abi.NewTokenAmount(256)) + + // Disable upgrades. + build.UpgradeSmokeHeight = -1 + build.UpgradeIgnitionHeight = -2 + build.UpgradeLiftoffHeight = -3 + // We need to _run_ this upgrade because genesis doesn't support v2, so + // we run it at height 0. + build.UpgradeAssemblyHeight = 0 +} diff --git a/testplans/lotus-soup/main.go b/testplans/lotus-soup/main.go new file mode 100644 index 00000000000..ec8b17aef8d --- /dev/null +++ b/testplans/lotus-soup/main.go @@ -0,0 +1,24 @@ +package main + +import ( + "github.com/filecoin-project/lotus/testplans/lotus-soup/paych" + "github.com/filecoin-project/lotus/testplans/lotus-soup/rfwp" + "github.com/filecoin-project/lotus/testplans/lotus-soup/testkit" + + "github.com/testground/sdk-go/run" +) + +var cases = map[string]interface{}{ + "deals-e2e": testkit.WrapTestEnvironment(dealsE2E), + "recovery-failed-windowed-post": testkit.WrapTestEnvironment(rfwp.RecoveryFromFailedWindowedPoStE2E), + "deals-stress": testkit.WrapTestEnvironment(dealsStress), + "drand-halting": testkit.WrapTestEnvironment(dealsE2E), + "drand-outage": testkit.WrapTestEnvironment(dealsE2E), + "paych-stress": testkit.WrapTestEnvironment(paych.Stress), +} + +func main() { + sanityCheck() + + run.InvokeMap(cases) +} diff --git a/testplans/lotus-soup/manifest.toml b/testplans/lotus-soup/manifest.toml new file mode 100644 index 00000000000..9f5a574440b --- /dev/null +++ b/testplans/lotus-soup/manifest.toml @@ -0,0 +1,218 @@ +name = "lotus-soup" + +[defaults] +builder = "docker:go" +runner = "local:docker" + +[builders."exec:go"] +enabled = true + +[builders."docker:go"] +enabled = true +build_base_image = "iptestground/oni-buildbase:v15-lotus" +runtime_image = "iptestground/oni-runtime:v10-debug" + +[runners."local:exec"] +enabled = true + +[runners."local:docker"] +enabled = true + +[runners."cluster:k8s"] +enabled = true + +###################### +## +## Testcases +## +###################### + +[[testcases]] +name = "deals-e2e" +instances = { min = 1, max = 100, default = 5 } + + [testcases.params] + clients = { type = "int", default = 1 } + miners = { type = "int", default = 1 } + balance = { type = "float", default = 1 } + sectors = { type = "int", default = 1 } + role = { type = "string" } + + genesis_timestamp_offset = { type = "int", default = 0 } + + random_beacon_type = { type = "enum", default = "mock", options = ["mock", "local-drand", "external-drand"] } + + # Params relevant to drand nodes. drand nodes should have role="drand", and must all be + # in the same composition group. There must be at least threshold drand nodes. + # To get lotus nodes to actually use the drand nodes, you must set random_beacon_type="local-drand" + # for the lotus node groups. + drand_period = { type = "duration", default="10s" } + drand_threshold = { type = "int", default = 2 } + drand_gossip_relay = { type = "bool", default = true } + drand_log_level = { type = "string", default="info" } + + # Params relevant to pubsub tracing + enable_pubsub_tracer = { type = "bool", default = false } + mining_mode = { type = "enum", default = "synchronized", options = ["synchronized", "natural"] } + + # Fast retrieval + fast_retrieval = { type = "bool", default = false } + + # Bounce connection during push and pull data transfers + bounce_conn_data_transfers = { type = "bool", default = false } + + +[[testcases]] +name = "drand-halting" +instances = { min = 1, max = 100, default = 5 } + + [testcases.params] + clients = { type = "int", default = 1 } + miners = { type = "int", default = 1 } + balance = { type = "float", default = 1 } + sectors = { type = "int", default = 1 } + role = { type = "string" } + genesis_timestamp_offset = { type = "int", default = 0 } + + + random_beacon_type = { type = "enum", default = "local-drand", options = ["mock", "local-drand", "external-drand"] } + + # Params relevant to drand nodes. drand nodes should have role="drand", and must all be + # in the same composition group. There must be at least threshold drand nodes. + # To get lotus nodes to actually use the drand nodes, you must set random_beacon_type="local-drand" + # for the lotus node groups. + drand_period = { type = "duration", default="10s" } + drand_threshold = { type = "int", default = 2 } + drand_gossip_relay = { type = "bool", default = true } + drand_log_level = { type = "string", default="info" } + suspend_events = { type = "string", default="", desc = "a sequence of halt/resume/wait events separated by '->'" } + + # Params relevant to pubsub tracing + enable_pubsub_tracer = { type = "bool", default = false } # Mining Mode: synchronized -vs- natural time + mining_mode = { type = "enum", default = "synchronized", options = ["synchronized", "natural"] } + + +[[testcases]] +name = "drand-outage" +instances = { min = 1, max = 100, default = 5 } + + [testcases.params] + clients = { type = "int", default = 0 } + miners = { type = "int", default = 3 } + balance = { type = "float", default = 1 } + sectors = { type = "int", default = 1 } + role = { type = "string" } + genesis_timestamp_offset = { type = "int", default = 0 } + + + random_beacon_type = { type = "enum", default = "local-drand", options = ["mock", "local-drand", "external-drand"] } + + # Params relevant to drand nodes. drand nodes should have role="drand", and must all be + # in the same composition group. There must be at least threshold drand nodes. + # To get lotus nodes to actually use the drand nodes, you must set random_beacon_type="local-drand" + # for the lotus node groups. + drand_period = { type = "duration", default="30s" } + drand_catchup_period = { type = "duration", default="10s" } + drand_threshold = { type = "int", default = 2 } + drand_gossip_relay = { type = "bool", default = true } + drand_log_level = { type = "string", default="info" } + suspend_events = { type = "string", default="", desc = "a sequence of halt/resume/wait events separated by '->'" } + + # Params relevant to pubsub tracing + enable_pubsub_tracer = { type = "bool", default = false } # Mining Mode: synchronized -vs- natural time + mining_mode = { type = "enum", default = "synchronized", options = ["synchronized", "natural"] } + + +[[testcases]] +name = "deals-stress" +instances = { min = 1, max = 100, default = 5 } + + [testcases.params] + clients = { type = "int", default = 1 } + miners = { type = "int", default = 1 } + balance = { type = "float", default = 1 } + sectors = { type = "int", default = 1 } + role = { type = "string" } + + genesis_timestamp_offset = { type = "int", default = 0 } + + random_beacon_type = { type = "enum", default = "mock", options = ["mock", "local-drand", "external-drand"] } + + # Params relevant to drand nodes. drand nodes should have role="drand", and must all be + # in the same composition group. There must be at least threshold drand nodes. + # To get lotus nodes to actually use the drand nodes, you must set random_beacon_type="local-drand" + # for the lotus node groups. + drand_period = { type = "duration", default="10s" } + drand_threshold = { type = "int", default = 2 } + drand_gossip_relay = { type = "bool", default = true } + + # Params relevant to pubsub tracing + enable_pubsub_tracer = { type = "bool", default = false } + + # Mining Mode: synchronized -vs- natural time + mining_mode = { type = "enum", default = "synchronized", options = ["synchronized", "natural"] } + + deals = { type = "int", default = 1 } + deal_mode = { type = "enum", default = "serial", options = ["serial", "concurrent"] } + + +[[testcases]] +name = "paych-stress" +instances = { min = 1, max = 100, default = 5 } + + [testcases.params] + clients = { type = "int", default = 1 } + miners = { type = "int", default = 1 } + balance = { type = "float", default = 1 } + sectors = { type = "int", default = 1 } + role = { type = "string" } + genesis_timestamp_offset = { type = "int", default = 0 } + + random_beacon_type = { type = "enum", default = "local-drand", options = ["mock", "local-drand", "external-drand"] } + + # Params relevant to drand nodes. drand nodes should have role="drand", and must all be + # in the same composition group. There must be at least threshold drand nodes. + # To get lotus nodes to actually use the drand nodes, you must set random_beacon_type="local-drand" + # for the lotus node groups. + drand_period = { type = "duration", default="10s" } + drand_threshold = { type = "int", default = 2 } + drand_gossip_relay = { type = "bool", default = true } + drand_log_level = { type = "string", default="info" } + suspend_events = { type = "string", default="", desc = "a sequence of halt/resume/wait events separated by '->'" } + + # Params relevant to pubsub tracing + enable_pubsub_tracer = { type = "bool", default = false } # Mining Mode: synchronized -vs- natural time + mining_mode = { type = "enum", default = "synchronized", options = ["synchronized", "natural"] } + + # ********** Test-case specific ********** + increments = { type = "int", default = "100", desc = "increments in which to send payment vouchers" } + lane_count = { type = "int", default = "256", desc = "lanes to open; vouchers will be distributed across these lanes in round-robin fashion" } + + +[[testcases]] +name = "recovery-failed-windowed-post" +instances = { min = 1, max = 100, default = 5 } + + [testcases.params] + clients = { type = "int", default = 1 } + miners = { type = "int", default = 1 } + balance = { type = "int", default = 1 } + sectors = { type = "int", default = 1 } + role = { type = "string" } + + genesis_timestamp_offset = { type = "int", default = 0 } + + random_beacon_type = { type = "enum", default = "mock", options = ["mock", "local-drand", "external-drand"] } + + # Params relevant to drand nodes. drand nodes should have role="drand", and must all be + # in the same composition group. There must be at least threshold drand nodes. + # To get lotus nodes to actually use the drand nodes, you must set random_beacon_type="local-drand" + # for the lotus node groups. + drand_period = { type = "duration", default="10s" } + drand_threshold = { type = "int", default = 2 } + drand_gossip_relay = { type = "bool", default = true } + drand_log_level = { type = "string", default="info" } + + # Params relevant to pubsub tracing + enable_pubsub_tracer = { type = "bool", default = false } + mining_mode = { type = "enum", default = "synchronized", options = ["synchronized", "natural"] } diff --git a/testplans/lotus-soup/paych/README.md b/testplans/lotus-soup/paych/README.md new file mode 100644 index 00000000000..dbd5879edb4 --- /dev/null +++ b/testplans/lotus-soup/paych/README.md @@ -0,0 +1,32 @@ +# Payment channels end-to-end tests + +This package contains the following test cases, each of which is described +further below. + +- Payment channels stress test case (`stress.go`). + +## Payment channels stress test case (`stress.go`) + +***WIP | blocked due to https://github.com/filecoin-project/lotus/issues/2297*** + +This test case turns all clients into payment receivers and senders. +The first member to start in the group becomes the _receiver_. +All other members become _senders_. + +The _senders_ will open a single payment channel to the _receiver_, and will +wait for the message to be posted on-chain. We are setting +`build.MessageConfidence=1`, in order to accelerate the test. So we'll only wait +for a single tipset confirmation once we witness the message. + +Once the message is posted, we load the payment channel actor address and create +as many lanes as the `lane_count` test parameter dictates. + +When then fetch our total balance, and start sending it on the payment channel, +round-robinning across all lanes, until our balance is extinguished. + +**TODO:** + +- [ ] Assertions, metrics, etc. Actually gather statistics. Right now this is + just a smoke test, and it fails. +- [ ] Implement the _receiver_ logic. +- [ ] Model test lifetime by signalling end. \ No newline at end of file diff --git a/testplans/lotus-soup/paych/stress.go b/testplans/lotus-soup/paych/stress.go new file mode 100644 index 00000000000..85246603f0f --- /dev/null +++ b/testplans/lotus-soup/paych/stress.go @@ -0,0 +1,314 @@ +package paych + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/specs-actors/actors/builtin/paych" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + "github.com/testground/sdk-go/sync" + + "github.com/filecoin-project/lotus/testplans/lotus-soup/testkit" +) + +var SendersDoneState = sync.State("senders-done") +var ReceiverReadyState = sync.State("receiver-ready") +var ReceiverAddedVouchersState = sync.State("receiver-added-vouchers") + +var VoucherTopic = sync.NewTopic("voucher", &paych.SignedVoucher{}) +var SettleTopic = sync.NewTopic("settle", cid.Cid{}) + +type ClientMode uint64 + +const ( + ModeSender ClientMode = iota + ModeReceiver +) + +func (cm ClientMode) String() string { + return [...]string{"Sender", "Receiver"}[cm] +} + +func getClientMode(groupSeq int64) ClientMode { + if groupSeq == 1 { + return ModeReceiver + } + return ModeSender +} + +// TODO Stress is currently WIP. We found blockers in Lotus that prevent us from +// making progress. See https://github.com/filecoin-project/lotus/issues/2297. +func Stress(t *testkit.TestEnvironment) error { + // Dispatch/forward non-client roles to defaults. + if t.Role != "client" { + return testkit.HandleDefaultRole(t) + } + + // This is a client role. + t.RecordMessage("running payments client") + + ctx := context.Background() + cl, err := testkit.PrepareClient(t) + if err != nil { + return err + } + + // are we the receiver or a sender? + mode := getClientMode(t.GroupSeq) + t.RecordMessage("acting as %s", mode) + + var clients []*testkit.ClientAddressesMsg + sctx, cancel := context.WithCancel(ctx) + clientsCh := make(chan *testkit.ClientAddressesMsg) + t.SyncClient.MustSubscribe(sctx, testkit.ClientsAddrsTopic, clientsCh) + for i := 0; i < t.TestGroupInstanceCount; i++ { + clients = append(clients, <-clientsCh) + } + cancel() + + switch mode { + case ModeReceiver: + err := runReceiver(t, ctx, cl) + if err != nil { + return err + } + + case ModeSender: + err := runSender(ctx, t, clients, cl) + if err != nil { + return err + } + } + + // Signal that the client is done + t.SyncClient.MustSignalEntry(ctx, testkit.StateDone) + + // Signal to the miners to stop mining + t.SyncClient.MustSignalEntry(ctx, testkit.StateStopMining) + + return nil +} + +func runSender(ctx context.Context, t *testkit.TestEnvironment, clients []*testkit.ClientAddressesMsg, cl *testkit.LotusClient) error { + var ( + // lanes to open; vouchers will be distributed across these lanes in round-robin fashion + laneCount = t.IntParam("lane_count") + // number of vouchers to send on each lane + vouchersPerLane = t.IntParam("vouchers_per_lane") + // increments in which to send payment vouchers + increments = big.Mul(big.NewInt(int64(t.IntParam("increments"))), big.NewInt(int64(build.FilecoinPrecision))) + // channel amount should be enough to cover all vouchers + channelAmt = big.Mul(big.NewInt(int64(laneCount*vouchersPerLane)), increments) + ) + + // Lock up funds in the payment channel. + recv := findReceiver(clients) + balance, err := cl.FullApi.WalletBalance(ctx, cl.Wallet.Address) + if err != nil { + return fmt.Errorf("failed to acquire wallet balance: %w", err) + } + + t.RecordMessage("my balance: %d", balance) + t.RecordMessage("creating payment channel; from=%s, to=%s, funds=%d", cl.Wallet.Address, recv.WalletAddr, channelAmt) + + pid := os.Getpid() + t.RecordMessage("sender pid: %d", pid) + + time.Sleep(20 * time.Second) + + channel, err := cl.FullApi.PaychGet(ctx, cl.Wallet.Address, recv.WalletAddr, channelAmt) + if err != nil { + return fmt.Errorf("failed to create payment channel: %w", err) + } + + if addr := channel.Channel; addr != address.Undef { + return fmt.Errorf("expected an Undef channel address, got: %s", addr) + } + + t.RecordMessage("payment channel created; msg_cid=%s", channel.WaitSentinel) + t.RecordMessage("waiting for payment channel message to appear on chain") + + // wait for the channel creation message to appear on chain. + _, err = cl.FullApi.StateWaitMsg(ctx, channel.WaitSentinel, 2, api.LookbackNoLimit, true) + if err != nil { + return fmt.Errorf("failed while waiting for payment channel creation msg to appear on chain: %w", err) + } + + // need to wait so that the channel is tracked. + // the full API waits for build.MessageConfidence (=1 in tests) before tracking the channel. + // we wait for 2 confirmations, so we have the assurance the channel is tracked. + + t.RecordMessage("get payment channel address") + channelAddr, err := cl.FullApi.PaychGetWaitReady(ctx, channel.WaitSentinel) + if err != nil { + return fmt.Errorf("failed to get payment channel address: %w", err) + } + + t.RecordMessage("channel address: %s", channelAddr) + t.RecordMessage("allocating lanes; count=%d", laneCount) + + // allocate as many lanes as required + var lanes []uint64 + for i := 0; i < laneCount; i++ { + lane, err := cl.FullApi.PaychAllocateLane(ctx, channelAddr) + if err != nil { + return fmt.Errorf("failed to allocate lane: %w", err) + } + lanes = append(lanes, lane) + } + + t.RecordMessage("lanes allocated; count=%d", laneCount) + + <-t.SyncClient.MustBarrier(ctx, ReceiverReadyState, 1).C + + t.RecordMessage("sending payments in round-robin fashion across lanes; increments=%d", increments) + + // create vouchers + remaining := channelAmt + for i := 0; i < vouchersPerLane; i++ { + for _, lane := range lanes { + voucherAmt := big.Mul(big.NewInt(int64(i+1)), increments) + voucher, err := cl.FullApi.PaychVoucherCreate(ctx, channelAddr, voucherAmt, lane) + if err != nil { + return fmt.Errorf("failed to create voucher: %w", err) + } + t.RecordMessage("payment voucher created; lane=%d, nonce=%d, amount=%d", voucher.Voucher.Lane, voucher.Voucher.Nonce, voucher.Voucher.Amount) + + _, err = t.SyncClient.Publish(ctx, VoucherTopic, voucher.Voucher) + if err != nil { + return fmt.Errorf("failed to publish voucher: %w", err) + } + + remaining = big.Sub(remaining, increments) + t.RecordMessage("remaining balance: %d", remaining) + } + } + + t.RecordMessage("finished sending all payment vouchers") + + // Inform the receiver that all vouchers have been created + t.SyncClient.MustSignalEntry(ctx, SendersDoneState) + + // Wait for the receiver to add all vouchers + <-t.SyncClient.MustBarrier(ctx, ReceiverAddedVouchersState, 1).C + + t.RecordMessage("settle channel") + + // Settle the channel. When the receiver sees the settle message, they + // should automatically submit all vouchers. + settleMsgCid, err := cl.FullApi.PaychSettle(ctx, channelAddr) + if err != nil { + return fmt.Errorf("failed to settle payment channel: %w", err) + } + + t.SyncClient.Publish(ctx, SettleTopic, settleMsgCid) + if err != nil { + return fmt.Errorf("failed to publish settle message cid: %w", err) + } + + return nil +} + +func findReceiver(clients []*testkit.ClientAddressesMsg) *testkit.ClientAddressesMsg { + for _, c := range clients { + if getClientMode(c.GroupSeq) == ModeReceiver { + return c + } + } + return nil +} + +func runReceiver(t *testkit.TestEnvironment, ctx context.Context, cl *testkit.LotusClient) error { + // lanes to open; vouchers will be distributed across these lanes in round-robin fashion + laneCount := t.IntParam("lane_count") + // number of vouchers to send on each lane + vouchersPerLane := t.IntParam("vouchers_per_lane") + totalVouchers := laneCount * vouchersPerLane + + vouchers := make(chan *paych.SignedVoucher) + vouchersSub, err := t.SyncClient.Subscribe(ctx, VoucherTopic, vouchers) + if err != nil { + return fmt.Errorf("failed to subscribe to voucher topic: %w", err) + } + + settleMsgChan := make(chan cid.Cid) + settleSub, err := t.SyncClient.Subscribe(ctx, SettleTopic, settleMsgChan) + if err != nil { + return fmt.Errorf("failed to subscribe to settle topic: %w", err) + } + + // inform the clients that the receiver is ready for incoming vouchers + t.SyncClient.MustSignalEntry(ctx, ReceiverReadyState) + + t.RecordMessage("adding %d payment vouchers", totalVouchers) + + // Add each of the vouchers + var addedVouchers []*paych.SignedVoucher + for i := 0; i < totalVouchers; i++ { + v := <-vouchers + addedVouchers = append(addedVouchers, v) + + _, err := cl.FullApi.PaychVoucherAdd(ctx, v.ChannelAddr, v, nil, big.NewInt(0)) + if err != nil { + return fmt.Errorf("failed to add voucher: %w", err) + } + spendable, err := cl.FullApi.PaychVoucherCheckSpendable(ctx, v.ChannelAddr, v, nil, nil) + if err != nil { + return fmt.Errorf("failed to check voucher spendable: %w", err) + } + if !spendable { + return fmt.Errorf("expected voucher %d to be spendable", i) + } + + t.RecordMessage("payment voucher added; lane=%d, nonce=%d, amount=%d", v.Lane, v.Nonce, v.Amount) + } + + vouchersSub.Done() + + t.RecordMessage("finished adding all payment vouchers") + + // Inform the clients that the receiver has added all vouchers + t.SyncClient.MustSignalEntry(ctx, ReceiverAddedVouchersState) + + // Wait for the settle message (put on chain by the sender) + t.RecordMessage("waiting for client to put settle message on chain") + settleMsgCid := <-settleMsgChan + settleSub.Done() + + time.Sleep(5 * time.Second) + + t.RecordMessage("waiting for confirmation of settle message on chain: %s", settleMsgCid) + _, err = cl.FullApi.StateWaitMsg(ctx, settleMsgCid, 10, api.LookbackNoLimit, true) + if err != nil { + return fmt.Errorf("failed to wait for settle message: %w", err) + } + + // Note: Once the receiver sees the settle message on chain, it will + // automatically call submit voucher with the best vouchers + + // TODO: Uncomment this section once this PR is merged: + // https://github.com/filecoin-project/lotus/pull/3197 + //t.RecordMessage("checking that all %d vouchers are no longer spendable", len(addedVouchers)) + //for i, v := range addedVouchers { + // spendable, err := cl.FullApi.PaychVoucherCheckSpendable(ctx, v.ChannelAddr, v, nil, nil) + // if err != nil { + // return fmt.Errorf("failed to check voucher spendable: %w", err) + // } + // // Should no longer be spendable because the best voucher has been submitted + // if spendable { + // return fmt.Errorf("expected voucher %d to no longer be spendable", i) + // } + //} + + t.RecordMessage("all vouchers were submitted successfully") + + return nil +} diff --git a/testplans/lotus-soup/rfwp/chain_state.go b/testplans/lotus-soup/rfwp/chain_state.go new file mode 100644 index 00000000000..d91acdff9f3 --- /dev/null +++ b/testplans/lotus-soup/rfwp/chain_state.go @@ -0,0 +1,840 @@ +package rfwp + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "math" + corebig "math/big" + "os" + "sort" + "text/tabwriter" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/build" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v0api" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + + "github.com/filecoin-project/lotus/testplans/lotus-soup/testkit" + + "github.com/filecoin-project/go-state-types/abi" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + tstats "github.com/filecoin-project/lotus/tools/stats" +) + +func UpdateChainState(t *testkit.TestEnvironment, m *testkit.LotusMiner) error { + height := 0 + headlag := 3 + + ctx := context.Background() + + tipsetsCh, err := tstats.GetTips(ctx, &v0api.WrapperV1Full{FullNode: m.FullApi}, abi.ChainEpoch(height), headlag) + if err != nil { + return err + } + + jsonFilename := fmt.Sprintf("%s%cchain-state.ndjson", t.TestOutputsPath, os.PathSeparator) + jsonFile, err := os.Create(jsonFilename) + if err != nil { + return err + } + defer jsonFile.Close() + jsonEncoder := json.NewEncoder(jsonFile) + + for tipset := range tipsetsCh { + maddrs, err := m.FullApi.StateListMiners(ctx, tipset.Key()) + if err != nil { + return err + } + + snapshot := ChainSnapshot{ + Height: tipset.Height(), + MinerStates: make(map[string]*MinerStateSnapshot), + } + + err = func() error { + cs.Lock() + defer cs.Unlock() + + for _, maddr := range maddrs { + err := func() error { + filename := fmt.Sprintf("%s%cstate-%s-%d", t.TestOutputsPath, os.PathSeparator, maddr, tipset.Height()) + + f, err := os.Create(filename) + if err != nil { + return err + } + defer f.Close() + + w := bufio.NewWriter(f) + defer w.Flush() + + minerInfo, err := info(t, m, maddr, w, tipset.Height()) + if err != nil { + return err + } + writeText(w, minerInfo) + + if tipset.Height()%100 == 0 { + printDiff(t, minerInfo, tipset.Height()) + } + + faultState, err := provingFaults(t, m, maddr, tipset.Height()) + if err != nil { + return err + } + writeText(w, faultState) + + provState, err := provingInfo(t, m, maddr, tipset.Height()) + if err != nil { + return err + } + writeText(w, provState) + + // record diff + recordDiff(minerInfo, provState, tipset.Height()) + + deadlines, err := provingDeadlines(t, m, maddr, tipset.Height()) + if err != nil { + return err + } + writeText(w, deadlines) + + sectorInfo, err := sectorsList(t, m, maddr, w, tipset.Height()) + if err != nil { + return err + } + writeText(w, sectorInfo) + + snapshot.MinerStates[maddr.String()] = &MinerStateSnapshot{ + Info: minerInfo, + Faults: faultState, + ProvingInfo: provState, + Deadlines: deadlines, + Sectors: sectorInfo, + } + + return jsonEncoder.Encode(snapshot) + }() + if err != nil { + return err + } + } + + cs.PrevHeight = tipset.Height() + + return nil + }() + if err != nil { + return err + } + } + + return nil +} + +type ChainSnapshot struct { + Height abi.ChainEpoch + + MinerStates map[string]*MinerStateSnapshot +} + +type MinerStateSnapshot struct { + Info *MinerInfo + Faults *ProvingFaultState + ProvingInfo *ProvingInfoState + Deadlines *ProvingDeadlines + Sectors *SectorInfo +} + +// writeText marshals m to text and writes to w, swallowing any errors along the way. +func writeText(w io.Writer, m plainTextMarshaler) { + b, err := m.MarshalPlainText() + if err != nil { + return + } + _, _ = w.Write(b) +} + +// if we make our structs `encoding.TextMarshaler`s, they all get stringified when marshaling to JSON +// instead of just using the default struct marshaler. +// so here's encoding.TextMarshaler with a different name, so that doesn't happen. +type plainTextMarshaler interface { + MarshalPlainText() ([]byte, error) +} + +type ProvingFaultState struct { + // FaultedSectors is a slice per-deadline faulty sectors. If the miner + // has no faulty sectors, this will be nil. + FaultedSectors [][]uint64 +} + +func (s *ProvingFaultState) MarshalPlainText() ([]byte, error) { + w := &bytes.Buffer{} + + if len(s.FaultedSectors) == 0 { + fmt.Fprintf(w, "no faulty sectors\n") + return w.Bytes(), nil + } + + tw := tabwriter.NewWriter(w, 2, 4, 2, ' ', 0) + _, _ = fmt.Fprintf(tw, "deadline\tsectors") + for deadline, sectors := range s.FaultedSectors { + for _, num := range sectors { + _, _ = fmt.Fprintf(tw, "%d\t%d\n", deadline, num) + } + } + + return w.Bytes(), nil +} + +func provingFaults(t *testkit.TestEnvironment, m *testkit.LotusMiner, maddr address.Address, height abi.ChainEpoch) (*ProvingFaultState, error) { + api := m.FullApi + ctx := context.Background() + + head, err := api.ChainHead(ctx) + if err != nil { + return nil, err + } + deadlines, err := api.StateMinerDeadlines(ctx, maddr, head.Key()) + if err != nil { + return nil, err + } + faultedSectors := make([][]uint64, len(deadlines)) + hasFaults := false + for dlIdx := range deadlines { + partitions, err := api.StateMinerPartitions(ctx, maddr, uint64(dlIdx), types.EmptyTSK) + if err != nil { + return nil, err + } + + for _, partition := range partitions { + faulty, err := partition.FaultySectors.All(10000000) + if err != nil { + return nil, err + } + + if len(faulty) > 0 { + hasFaults = true + } + + faultedSectors[dlIdx] = append(faultedSectors[dlIdx], faulty...) + } + } + result := new(ProvingFaultState) + if hasFaults { + result.FaultedSectors = faultedSectors + } + + return result, nil +} + +type ProvingInfoState struct { + CurrentEpoch abi.ChainEpoch + + ProvingPeriodStart abi.ChainEpoch + + Faults uint64 + ProvenSectors uint64 + FaultPercent float64 + Recoveries uint64 + + DeadlineIndex uint64 + DeadlineSectors uint64 + DeadlineOpen abi.ChainEpoch + DeadlineClose abi.ChainEpoch + DeadlineChallenge abi.ChainEpoch + DeadlineFaultCutoff abi.ChainEpoch + + WPoStProvingPeriod abi.ChainEpoch +} + +func (s *ProvingInfoState) MarshalPlainText() ([]byte, error) { + w := &bytes.Buffer{} + fmt.Fprintf(w, "Current Epoch: %d\n", s.CurrentEpoch) + fmt.Fprintf(w, "Chain Period: %d\n", s.CurrentEpoch/s.WPoStProvingPeriod) + fmt.Fprintf(w, "Chain Period Start: %s\n", epochTime(s.CurrentEpoch, (s.CurrentEpoch/s.WPoStProvingPeriod)*s.WPoStProvingPeriod)) + fmt.Fprintf(w, "Chain Period End: %s\n\n", epochTime(s.CurrentEpoch, (s.CurrentEpoch/s.WPoStProvingPeriod+1)*s.WPoStProvingPeriod)) + + fmt.Fprintf(w, "Proving Period Boundary: %d\n", s.ProvingPeriodStart%s.WPoStProvingPeriod) + fmt.Fprintf(w, "Proving Period Start: %s\n", epochTime(s.CurrentEpoch, s.ProvingPeriodStart)) + fmt.Fprintf(w, "Next Period Start: %s\n\n", epochTime(s.CurrentEpoch, s.ProvingPeriodStart+s.WPoStProvingPeriod)) + + fmt.Fprintf(w, "Faults: %d (%.2f%%)\n", s.Faults, s.FaultPercent) + fmt.Fprintf(w, "Recovering: %d\n", s.Recoveries) + //fmt.Fprintf(w, "New Sectors: %d\n\n", s.NewSectors) + + fmt.Fprintf(w, "Deadline Index: %d\n", s.DeadlineIndex) + fmt.Fprintf(w, "Deadline Sectors: %d\n", s.DeadlineSectors) + + fmt.Fprintf(w, "Deadline Open: %s\n", epochTime(s.CurrentEpoch, s.DeadlineOpen)) + fmt.Fprintf(w, "Deadline Close: %s\n", epochTime(s.CurrentEpoch, s.DeadlineClose)) + fmt.Fprintf(w, "Deadline Challenge: %s\n", epochTime(s.CurrentEpoch, s.DeadlineChallenge)) + fmt.Fprintf(w, "Deadline FaultCutoff: %s\n", epochTime(s.CurrentEpoch, s.DeadlineFaultCutoff)) + + return w.Bytes(), nil +} + +func provingInfo(t *testkit.TestEnvironment, m *testkit.LotusMiner, maddr address.Address, height abi.ChainEpoch) (*ProvingInfoState, error) { + lapi := m.FullApi + ctx := context.Background() + + head, err := lapi.ChainHead(ctx) + if err != nil { + return nil, err + } + + cd, err := lapi.StateMinerProvingDeadline(ctx, maddr, head.Key()) + if err != nil { + return nil, err + } + + deadlines, err := lapi.StateMinerDeadlines(ctx, maddr, head.Key()) + if err != nil { + return nil, err + } + + parts := map[uint64][]api.Partition{} + for dlIdx := range deadlines { + part, err := lapi.StateMinerPartitions(ctx, maddr, uint64(dlIdx), types.EmptyTSK) + if err != nil { + return nil, err + } + + parts[uint64(dlIdx)] = part + } + + proving := uint64(0) + faults := uint64(0) + recovering := uint64(0) + + for _, partitions := range parts { + for _, partition := range partitions { + sc, err := partition.LiveSectors.Count() + if err != nil { + return nil, err + } + proving += sc + + fc, err := partition.FaultySectors.Count() + if err != nil { + return nil, err + } + faults += fc + + rc, err := partition.RecoveringSectors.Count() + if err != nil { + return nil, err + } + recovering += rc + } + } + + var faultPerc float64 + if proving > 0 { + faultPerc = float64(faults*10000/proving) / 100 + } + + s := ProvingInfoState{ + CurrentEpoch: cd.CurrentEpoch, + ProvingPeriodStart: cd.PeriodStart, + Faults: faults, + ProvenSectors: proving, + FaultPercent: faultPerc, + Recoveries: recovering, + DeadlineIndex: cd.Index, + DeadlineOpen: cd.Open, + DeadlineClose: cd.Close, + DeadlineChallenge: cd.Challenge, + DeadlineFaultCutoff: cd.FaultCutoff, + WPoStProvingPeriod: cd.WPoStProvingPeriod, + } + + if cd.Index < cd.WPoStPeriodDeadlines { + for _, partition := range parts[cd.Index] { + sc, err := partition.LiveSectors.Count() + if err != nil { + return nil, err + } + s.DeadlineSectors += sc + } + } + + return &s, nil +} + +func epochTime(curr, e abi.ChainEpoch) string { + switch { + case curr > e: + return fmt.Sprintf("%d (%s ago)", e, time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(curr-e))) + case curr == e: + return fmt.Sprintf("%d (now)", e) + case curr < e: + return fmt.Sprintf("%d (in %s)", e, time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(e-curr))) + } + + panic("math broke") +} + +type ProvingDeadlines struct { + Deadlines []DeadlineInfo +} + +type DeadlineInfo struct { + Sectors uint64 + Partitions int + Proven uint64 + Current bool +} + +func (d *ProvingDeadlines) MarshalPlainText() ([]byte, error) { + w := new(bytes.Buffer) + tw := tabwriter.NewWriter(w, 2, 4, 2, ' ', 0) + _, _ = fmt.Fprintln(tw, "deadline\tsectors\tpartitions\tproven") + + for i, di := range d.Deadlines { + var cur string + if di.Current { + cur += "\t(current)" + } + _, _ = fmt.Fprintf(tw, "%d\t%d\t%d\t%d%s\n", i, di.Sectors, di.Partitions, di.Proven, cur) + } + tw.Flush() + return w.Bytes(), nil +} + +func provingDeadlines(t *testkit.TestEnvironment, m *testkit.LotusMiner, maddr address.Address, height abi.ChainEpoch) (*ProvingDeadlines, error) { + lapi := m.FullApi + ctx := context.Background() + + deadlines, err := lapi.StateMinerDeadlines(ctx, maddr, types.EmptyTSK) + if err != nil { + return nil, err + } + + di, err := lapi.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + if err != nil { + return nil, err + } + + infos := make([]DeadlineInfo, 0, len(deadlines)) + for dlIdx, deadline := range deadlines { + partitions, err := lapi.StateMinerPartitions(ctx, maddr, uint64(dlIdx), types.EmptyTSK) + if err != nil { + return nil, err + } + + provenPartitions, err := deadline.PostSubmissions.Count() + if err != nil { + return nil, err + } + + var cur string + if di.Index == uint64(dlIdx) { + cur += "\t(current)" + } + + outInfo := DeadlineInfo{ + //Sectors: c, + Partitions: len(partitions), + Proven: provenPartitions, + Current: di.Index == uint64(dlIdx), + } + infos = append(infos, outInfo) + //_, _ = fmt.Fprintf(tw, "%d\t%d\t%d%s\n", dlIdx, len(partitions), provenPartitions, cur) + } + + return &ProvingDeadlines{Deadlines: infos}, nil +} + +type SectorInfo struct { + Sectors []abi.SectorNumber + SectorStates map[abi.SectorNumber]api.SectorInfo + Committed []abi.SectorNumber + Proving []abi.SectorNumber +} + +func (i *SectorInfo) MarshalPlainText() ([]byte, error) { + provingIDs := make(map[abi.SectorNumber]struct{}, len(i.Proving)) + for _, id := range i.Proving { + provingIDs[id] = struct{}{} + } + commitedIDs := make(map[abi.SectorNumber]struct{}, len(i.Committed)) + for _, id := range i.Committed { + commitedIDs[id] = struct{}{} + } + + w := new(bytes.Buffer) + tw := tabwriter.NewWriter(w, 8, 4, 1, ' ', 0) + + for _, s := range i.Sectors { + _, inSSet := commitedIDs[s] + _, inPSet := provingIDs[s] + + st, ok := i.SectorStates[s] + if !ok { + continue + } + + fmt.Fprintf(tw, "%d: %s\tsSet: %s\tpSet: %s\ttktH: %d\tseedH: %d\tdeals: %v\n", + s, + st.State, + yesno(inSSet), + yesno(inPSet), + st.Ticket.Epoch, + st.Seed.Epoch, + st.Deals, + ) + } + + if err := tw.Flush(); err != nil { + return nil, err + } + return w.Bytes(), nil +} + +func sectorsList(t *testkit.TestEnvironment, m *testkit.LotusMiner, maddr address.Address, w io.Writer, height abi.ChainEpoch) (*SectorInfo, error) { + node := m.FullApi + ctx := context.Background() + + list, err := m.MinerApi.SectorsList(ctx) + if err != nil { + return nil, err + } + + activeSet, err := node.StateMinerActiveSectors(ctx, maddr, types.EmptyTSK) + if err != nil { + return nil, err + } + activeIDs := make(map[abi.SectorNumber]struct{}, len(activeSet)) + for _, info := range activeSet { + activeIDs[info.SectorNumber] = struct{}{} + } + + sset, err := node.StateMinerSectors(ctx, maddr, nil, types.EmptyTSK) + if err != nil { + return nil, err + } + commitedIDs := make(map[abi.SectorNumber]struct{}, len(activeSet)) + for _, info := range sset { + commitedIDs[info.SectorNumber] = struct{}{} + } + + sort.Slice(list, func(i, j int) bool { + return list[i] < list[j] + }) + + i := SectorInfo{Sectors: list, SectorStates: make(map[abi.SectorNumber]api.SectorInfo, len(list))} + + for _, s := range list { + st, err := m.MinerApi.SectorsStatus(ctx, s, true) + if err != nil { + fmt.Fprintf(w, "%d:\tError: %s\n", s, err) + continue + } + i.SectorStates[s] = st + } + return &i, nil +} + +func yesno(b bool) string { + if b { + return "YES" + } + return "NO" +} + +type MinerInfo struct { + MinerAddr address.Address + SectorSize string + + MinerPower *api.MinerPower + + CommittedBytes big.Int + ProvingBytes big.Int + FaultyBytes big.Int + FaultyPercentage float64 + + Balance big.Int + PreCommitDeposits big.Int + LockedFunds big.Int + AvailableFunds big.Int + WorkerBalance big.Int + MarketEscrow big.Int + MarketLocked big.Int + + SectorStateCounts map[sealing.SectorState]int +} + +func (i *MinerInfo) MarshalPlainText() ([]byte, error) { + w := new(bytes.Buffer) + fmt.Fprintf(w, "Miner: %s\n", i.MinerAddr) + fmt.Fprintf(w, "Sector Size: %s\n", i.SectorSize) + + pow := i.MinerPower + + fmt.Fprintf(w, "Byte Power: %s / %s (%0.4f%%)\n", + types.SizeStr(pow.MinerPower.RawBytePower), + types.SizeStr(pow.TotalPower.RawBytePower), + types.BigDivFloat( + types.BigMul(pow.MinerPower.RawBytePower, big.NewInt(100)), + pow.TotalPower.RawBytePower, + ), + ) + + fmt.Fprintf(w, "Actual Power: %s / %s (%0.4f%%)\n", + types.DeciStr(pow.MinerPower.QualityAdjPower), + types.DeciStr(pow.TotalPower.QualityAdjPower), + types.BigDivFloat( + types.BigMul(pow.MinerPower.QualityAdjPower, big.NewInt(100)), + pow.TotalPower.QualityAdjPower, + ), + ) + + fmt.Fprintf(w, "\tCommitted: %s\n", types.SizeStr(i.CommittedBytes)) + + if i.FaultyBytes.Int == nil || i.FaultyBytes.IsZero() { + fmt.Fprintf(w, "\tProving: %s\n", types.SizeStr(i.ProvingBytes)) + } else { + fmt.Fprintf(w, "\tProving: %s (%s Faulty, %.2f%%)\n", + types.SizeStr(i.ProvingBytes), + types.SizeStr(i.FaultyBytes), + i.FaultyPercentage) + } + + if !i.MinerPower.HasMinPower { + fmt.Fprintf(w, "Below minimum power threshold, no blocks will be won\n") + } else { + + winRatio := new(corebig.Rat).SetFrac( + types.BigMul(pow.MinerPower.QualityAdjPower, types.NewInt(build.BlocksPerEpoch)).Int, + pow.TotalPower.QualityAdjPower.Int, + ) + + if winRatioFloat, _ := winRatio.Float64(); winRatioFloat > 0 { + + // if the corresponding poisson distribution isn't infinitely small then + // throw it into the mix as well, accounting for multi-wins + winRationWithPoissonFloat := -math.Expm1(-winRatioFloat) + winRationWithPoisson := new(corebig.Rat).SetFloat64(winRationWithPoissonFloat) + if winRationWithPoisson != nil { + winRatio = winRationWithPoisson + winRatioFloat = winRationWithPoissonFloat + } + + weekly, _ := new(corebig.Rat).Mul( + winRatio, + new(corebig.Rat).SetInt64(7*builtin.EpochsInDay), + ).Float64() + + avgDuration, _ := new(corebig.Rat).Mul( + new(corebig.Rat).SetInt64(builtin.EpochDurationSeconds), + new(corebig.Rat).Inv(winRatio), + ).Float64() + + fmt.Fprintf(w, "Projected average block win rate: %.02f/week (every %s)\n", + weekly, + (time.Second * time.Duration(avgDuration)).Truncate(time.Second).String(), + ) + + // Geometric distribution of P(Y < k) calculated as described in https://en.wikipedia.org/wiki/Geometric_distribution#Probability_Outcomes_Examples + // https://www.wolframalpha.com/input/?i=t+%3E+0%3B+p+%3E+0%3B+p+%3C+1%3B+c+%3E+0%3B+c+%3C1%3B+1-%281-p%29%5E%28t%29%3Dc%3B+solve+t + // t == how many dice-rolls (epochs) before win + // p == winRate == ( minerPower / netPower ) + // c == target probability of win ( 99.9% in this case ) + fmt.Fprintf(w, "Projected block win with 99.9%% probability every %s\n", + (time.Second * time.Duration( + builtin.EpochDurationSeconds*math.Log(1-0.999)/ + math.Log(1-winRatioFloat), + )).Truncate(time.Second).String(), + ) + fmt.Fprintln(w, "(projections DO NOT account for future network and miner growth)") + } + } + + fmt.Fprintf(w, "Miner Balance: %s\n", types.FIL(i.Balance)) + fmt.Fprintf(w, "\tPreCommit: %s\n", types.FIL(i.PreCommitDeposits)) + fmt.Fprintf(w, "\tLocked: %s\n", types.FIL(i.LockedFunds)) + fmt.Fprintf(w, "\tAvailable: %s\n", types.FIL(i.AvailableFunds)) + fmt.Fprintf(w, "Worker Balance: %s\n", types.FIL(i.WorkerBalance)) + fmt.Fprintf(w, "Market (Escrow): %s\n", types.FIL(i.MarketEscrow)) + fmt.Fprintf(w, "Market (Locked): %s\n\n", types.FIL(i.MarketLocked)) + + buckets := i.SectorStateCounts + + var sorted []stateMeta + for state, i := range buckets { + sorted = append(sorted, stateMeta{i: i, state: state}) + } + + sort.Slice(sorted, func(i, j int) bool { + return stateOrder[sorted[i].state].i < stateOrder[sorted[j].state].i + }) + + for _, s := range sorted { + _, _ = fmt.Fprintf(w, "\t%s: %d\n", s.state, s.i) + } + + return w.Bytes(), nil +} + +func info(t *testkit.TestEnvironment, m *testkit.LotusMiner, maddr address.Address, w io.Writer, height abi.ChainEpoch) (*MinerInfo, error) { + api := m.FullApi + ctx := context.Background() + + ts, err := api.ChainHead(ctx) + if err != nil { + return nil, err + } + + mact, err := api.StateGetActor(ctx, maddr, ts.Key()) + if err != nil { + return nil, err + } + + i := MinerInfo{MinerAddr: maddr} + + // Sector size + mi, err := api.StateMinerInfo(ctx, maddr, ts.Key()) + if err != nil { + return nil, err + } + + i.SectorSize = types.SizeStr(types.NewInt(uint64(mi.SectorSize))) + + i.MinerPower, err = api.StateMinerPower(ctx, maddr, ts.Key()) + if err != nil { + return nil, err + } + + secCounts, err := api.StateMinerSectorCount(ctx, maddr, ts.Key()) + if err != nil { + return nil, err + } + faults, err := api.StateMinerFaults(ctx, maddr, ts.Key()) + if err != nil { + return nil, err + } + + nfaults, err := faults.Count() + if err != nil { + return nil, err + } + + i.CommittedBytes = types.BigMul(types.NewInt(secCounts.Live), types.NewInt(uint64(mi.SectorSize))) + i.ProvingBytes = types.BigMul(types.NewInt(secCounts.Active), types.NewInt(uint64(mi.SectorSize))) + + if nfaults != 0 { + if secCounts.Live != 0 { + i.FaultyPercentage = float64(10000*nfaults/secCounts.Live) / 100. + } + i.FaultyBytes = types.BigMul(types.NewInt(nfaults), types.NewInt(uint64(mi.SectorSize))) + } + + stor := store.ActorStore(ctx, blockstore.NewAPIBlockstore(api)) + mas, err := miner.Load(stor, mact) + if err != nil { + return nil, err + } + + funds, err := mas.LockedFunds() + if err != nil { + return nil, err + } + + i.Balance = mact.Balance + i.PreCommitDeposits = funds.PreCommitDeposits + i.LockedFunds = funds.VestingFunds + i.AvailableFunds, err = mas.AvailableBalance(mact.Balance) + if err != nil { + return nil, err + } + + wb, err := api.WalletBalance(ctx, mi.Worker) + if err != nil { + return nil, err + } + i.WorkerBalance = wb + + mb, err := api.StateMarketBalance(ctx, maddr, types.EmptyTSK) + if err != nil { + return nil, err + } + i.MarketEscrow = mb.Escrow + i.MarketLocked = mb.Locked + + sectors, err := m.MinerApi.SectorsList(ctx) + if err != nil { + return nil, err + } + + buckets := map[sealing.SectorState]int{ + "Total": len(sectors), + } + for _, s := range sectors { + st, err := m.MinerApi.SectorsStatus(ctx, s, true) + if err != nil { + return nil, err + } + + buckets[sealing.SectorState(st.State)]++ + } + i.SectorStateCounts = buckets + + return &i, nil +} + +type stateMeta struct { + i int + state sealing.SectorState +} + +var stateOrder = map[sealing.SectorState]stateMeta{} +var stateList = []stateMeta{ + {state: "Total"}, + {state: sealing.Proving}, + + {state: sealing.UndefinedSectorState}, + {state: sealing.Empty}, + {state: sealing.Packing}, + {state: sealing.PreCommit1}, + {state: sealing.PreCommit2}, + {state: sealing.PreCommitting}, + {state: sealing.PreCommitWait}, + {state: sealing.WaitSeed}, + {state: sealing.Committing}, + {state: sealing.CommitWait}, + {state: sealing.FinalizeSector}, + + {state: sealing.FailedUnrecoverable}, + {state: sealing.SealPreCommit1Failed}, + {state: sealing.SealPreCommit2Failed}, + {state: sealing.PreCommitFailed}, + {state: sealing.ComputeProofFailed}, + {state: sealing.CommitFailed}, + {state: sealing.PackingFailed}, + {state: sealing.FinalizeFailed}, + {state: sealing.Faulty}, + {state: sealing.FaultReported}, + {state: sealing.FaultedFinal}, +} + +func init() { + for i, state := range stateList { + stateOrder[state.state] = stateMeta{ + i: i, + } + } +} diff --git a/testplans/lotus-soup/rfwp/diffs.go b/testplans/lotus-soup/rfwp/diffs.go new file mode 100644 index 00000000000..0f6e347255d --- /dev/null +++ b/testplans/lotus-soup/rfwp/diffs.go @@ -0,0 +1,295 @@ +package rfwp + +import ( + "bufio" + "fmt" + "os" + "sort" + "sync" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/testplans/lotus-soup/testkit" +) + +type ChainState struct { + sync.Mutex + + PrevHeight abi.ChainEpoch + DiffHeight map[string]map[string]map[abi.ChainEpoch]big.Int // height -> value + DiffValue map[string]map[string]map[string][]abi.ChainEpoch // value -> []height + DiffCmp map[string]map[string]map[string][]abi.ChainEpoch // difference (height, height-1) -> []height + valueTypes []string +} + +func NewChainState() *ChainState { + cs := &ChainState{} + cs.PrevHeight = abi.ChainEpoch(-1) + cs.DiffHeight = make(map[string]map[string]map[abi.ChainEpoch]big.Int) // height -> value + cs.DiffValue = make(map[string]map[string]map[string][]abi.ChainEpoch) // value -> []height + cs.DiffCmp = make(map[string]map[string]map[string][]abi.ChainEpoch) // difference (height, height-1) -> []height + cs.valueTypes = []string{"MinerPower", "CommittedBytes", "ProvingBytes", "Balance", "PreCommitDeposits", "LockedFunds", "AvailableFunds", "WorkerBalance", "MarketEscrow", "MarketLocked", "Faults", "ProvenSectors", "Recoveries"} + return cs +} + +var ( + cs *ChainState +) + +func init() { + cs = NewChainState() +} + +func printDiff(t *testkit.TestEnvironment, mi *MinerInfo, height abi.ChainEpoch) { + maddr := mi.MinerAddr.String() + filename := fmt.Sprintf("%s%cdiff-%s-%d", t.TestOutputsPath, os.PathSeparator, maddr, height) + + f, err := os.Create(filename) + if err != nil { + panic(err) + } + defer f.Close() + + w := bufio.NewWriter(f) + defer w.Flush() + + keys := make([]string, 0, len(cs.DiffCmp[maddr])) + for k := range cs.DiffCmp[maddr] { + keys = append(keys, k) + } + sort.Strings(keys) + + fmt.Fprintln(w, "=====", maddr, "=====") + for i, valueName := range keys { + fmt.Fprintln(w, toCharStr(i), "=====", valueName, "=====") + if len(cs.DiffCmp[maddr][valueName]) > 0 { + fmt.Fprintf(w, "%s diff of |\n", toCharStr(i)) + } + + for difference, heights := range cs.DiffCmp[maddr][valueName] { + fmt.Fprintf(w, "%s diff of %30v at heights %v\n", toCharStr(i), difference, heights) + } + } +} + +func recordDiff(mi *MinerInfo, ps *ProvingInfoState, height abi.ChainEpoch) { + maddr := mi.MinerAddr.String() + if _, ok := cs.DiffHeight[maddr]; !ok { + cs.DiffHeight[maddr] = make(map[string]map[abi.ChainEpoch]big.Int) + cs.DiffValue[maddr] = make(map[string]map[string][]abi.ChainEpoch) + cs.DiffCmp[maddr] = make(map[string]map[string][]abi.ChainEpoch) + + for _, v := range cs.valueTypes { + cs.DiffHeight[maddr][v] = make(map[abi.ChainEpoch]big.Int) + cs.DiffValue[maddr][v] = make(map[string][]abi.ChainEpoch) + cs.DiffCmp[maddr][v] = make(map[string][]abi.ChainEpoch) + } + } + + { + value := big.Int(mi.MinerPower.MinerPower.RawBytePower) + cs.DiffHeight[maddr]["MinerPower"][height] = value + cs.DiffValue[maddr]["MinerPower"][value.String()] = append(cs.DiffValue[maddr]["MinerPower"][value.String()], height) + + if cs.PrevHeight != -1 { + prevValue := cs.DiffHeight[maddr]["MinerPower"][cs.PrevHeight] + cmp := big.Zero() + cmp.Sub(value.Int, prevValue.Int) // value - prevValue + if big.Cmp(cmp, big.Zero()) != 0 { + cs.DiffCmp[maddr]["MinerPower"][cmp.String()] = append(cs.DiffCmp[maddr]["MinerPower"][cmp.String()], height) + } + } + } + + { + value := big.Int(mi.CommittedBytes) + cs.DiffHeight[maddr]["CommittedBytes"][height] = value + cs.DiffValue[maddr]["CommittedBytes"][value.String()] = append(cs.DiffValue[maddr]["CommittedBytes"][value.String()], height) + + if cs.PrevHeight != -1 { + prevValue := cs.DiffHeight[maddr]["CommittedBytes"][cs.PrevHeight] + cmp := big.Zero() + cmp.Sub(value.Int, prevValue.Int) // value - prevValue + if big.Cmp(cmp, big.Zero()) != 0 { + cs.DiffCmp[maddr]["CommittedBytes"][cmp.String()] = append(cs.DiffCmp[maddr]["CommittedBytes"][cmp.String()], height) + } + } + } + + { + value := big.Int(mi.ProvingBytes) + cs.DiffHeight[maddr]["ProvingBytes"][height] = value + cs.DiffValue[maddr]["ProvingBytes"][value.String()] = append(cs.DiffValue[maddr]["ProvingBytes"][value.String()], height) + + if cs.PrevHeight != -1 { + prevValue := cs.DiffHeight[maddr]["ProvingBytes"][cs.PrevHeight] + cmp := big.Zero() + cmp.Sub(value.Int, prevValue.Int) // value - prevValue + if big.Cmp(cmp, big.Zero()) != 0 { + cs.DiffCmp[maddr]["ProvingBytes"][cmp.String()] = append(cs.DiffCmp[maddr]["ProvingBytes"][cmp.String()], height) + } + } + } + + { + value := big.Int(mi.Balance) + roundBalance(&value) + cs.DiffHeight[maddr]["Balance"][height] = value + cs.DiffValue[maddr]["Balance"][value.String()] = append(cs.DiffValue[maddr]["Balance"][value.String()], height) + + if cs.PrevHeight != -1 { + prevValue := cs.DiffHeight[maddr]["Balance"][cs.PrevHeight] + cmp := big.Zero() + cmp.Sub(value.Int, prevValue.Int) // value - prevValue + if big.Cmp(cmp, big.Zero()) != 0 { + cs.DiffCmp[maddr]["Balance"][cmp.String()] = append(cs.DiffCmp[maddr]["Balance"][cmp.String()], height) + } + } + } + + { + value := big.Int(mi.PreCommitDeposits) + cs.DiffHeight[maddr]["PreCommitDeposits"][height] = value + cs.DiffValue[maddr]["PreCommitDeposits"][value.String()] = append(cs.DiffValue[maddr]["PreCommitDeposits"][value.String()], height) + + if cs.PrevHeight != -1 { + prevValue := cs.DiffHeight[maddr]["PreCommitDeposits"][cs.PrevHeight] + cmp := big.Zero() + cmp.Sub(value.Int, prevValue.Int) // value - prevValue + if big.Cmp(cmp, big.Zero()) != 0 { + cs.DiffCmp[maddr]["PreCommitDeposits"][cmp.String()] = append(cs.DiffCmp[maddr]["PreCommitDeposits"][cmp.String()], height) + } + } + } + + { + value := big.Int(mi.LockedFunds) + roundBalance(&value) + cs.DiffHeight[maddr]["LockedFunds"][height] = value + cs.DiffValue[maddr]["LockedFunds"][value.String()] = append(cs.DiffValue[maddr]["LockedFunds"][value.String()], height) + + if cs.PrevHeight != -1 { + prevValue := cs.DiffHeight[maddr]["LockedFunds"][cs.PrevHeight] + cmp := big.Zero() + cmp.Sub(value.Int, prevValue.Int) // value - prevValue + if big.Cmp(cmp, big.Zero()) != 0 { + cs.DiffCmp[maddr]["LockedFunds"][cmp.String()] = append(cs.DiffCmp[maddr]["LockedFunds"][cmp.String()], height) + } + } + } + + { + value := big.Int(mi.AvailableFunds) + roundBalance(&value) + cs.DiffHeight[maddr]["AvailableFunds"][height] = value + cs.DiffValue[maddr]["AvailableFunds"][value.String()] = append(cs.DiffValue[maddr]["AvailableFunds"][value.String()], height) + + if cs.PrevHeight != -1 { + prevValue := cs.DiffHeight[maddr]["AvailableFunds"][cs.PrevHeight] + cmp := big.Zero() + cmp.Sub(value.Int, prevValue.Int) // value - prevValue + if big.Cmp(cmp, big.Zero()) != 0 { + cs.DiffCmp[maddr]["AvailableFunds"][cmp.String()] = append(cs.DiffCmp[maddr]["AvailableFunds"][cmp.String()], height) + } + } + } + + { + value := big.Int(mi.WorkerBalance) + cs.DiffHeight[maddr]["WorkerBalance"][height] = value + cs.DiffValue[maddr]["WorkerBalance"][value.String()] = append(cs.DiffValue[maddr]["WorkerBalance"][value.String()], height) + + if cs.PrevHeight != -1 { + prevValue := cs.DiffHeight[maddr]["WorkerBalance"][cs.PrevHeight] + cmp := big.Zero() + cmp.Sub(value.Int, prevValue.Int) // value - prevValue + if big.Cmp(cmp, big.Zero()) != 0 { + cs.DiffCmp[maddr]["WorkerBalance"][cmp.String()] = append(cs.DiffCmp[maddr]["WorkerBalance"][cmp.String()], height) + } + } + } + + { + value := big.Int(mi.MarketEscrow) + cs.DiffHeight[maddr]["MarketEscrow"][height] = value + cs.DiffValue[maddr]["MarketEscrow"][value.String()] = append(cs.DiffValue[maddr]["MarketEscrow"][value.String()], height) + + if cs.PrevHeight != -1 { + prevValue := cs.DiffHeight[maddr]["MarketEscrow"][cs.PrevHeight] + cmp := big.Zero() + cmp.Sub(value.Int, prevValue.Int) // value - prevValue + if big.Cmp(cmp, big.Zero()) != 0 { + cs.DiffCmp[maddr]["MarketEscrow"][cmp.String()] = append(cs.DiffCmp[maddr]["MarketEscrow"][cmp.String()], height) + } + } + } + + { + value := big.Int(mi.MarketLocked) + cs.DiffHeight[maddr]["MarketLocked"][height] = value + cs.DiffValue[maddr]["MarketLocked"][value.String()] = append(cs.DiffValue[maddr]["MarketLocked"][value.String()], height) + + if cs.PrevHeight != -1 { + prevValue := cs.DiffHeight[maddr]["MarketLocked"][cs.PrevHeight] + cmp := big.Zero() + cmp.Sub(value.Int, prevValue.Int) // value - prevValue + if big.Cmp(cmp, big.Zero()) != 0 { + cs.DiffCmp[maddr]["MarketLocked"][cmp.String()] = append(cs.DiffCmp[maddr]["MarketLocked"][cmp.String()], height) + } + } + } + + { + value := big.NewInt(int64(ps.Faults)) + cs.DiffHeight[maddr]["Faults"][height] = value + cs.DiffValue[maddr]["Faults"][value.String()] = append(cs.DiffValue[maddr]["Faults"][value.String()], height) + + if cs.PrevHeight != -1 { + prevValue := cs.DiffHeight[maddr]["Faults"][cs.PrevHeight] + cmp := big.Zero() + cmp.Sub(value.Int, prevValue.Int) // value - prevValue + if big.Cmp(cmp, big.Zero()) != 0 { + cs.DiffCmp[maddr]["Faults"][cmp.String()] = append(cs.DiffCmp[maddr]["Faults"][cmp.String()], height) + } + } + } + + { + value := big.NewInt(int64(ps.ProvenSectors)) + cs.DiffHeight[maddr]["ProvenSectors"][height] = value + cs.DiffValue[maddr]["ProvenSectors"][value.String()] = append(cs.DiffValue[maddr]["ProvenSectors"][value.String()], height) + + if cs.PrevHeight != -1 { + prevValue := cs.DiffHeight[maddr]["ProvenSectors"][cs.PrevHeight] + cmp := big.Zero() + cmp.Sub(value.Int, prevValue.Int) // value - prevValue + if big.Cmp(cmp, big.Zero()) != 0 { + cs.DiffCmp[maddr]["ProvenSectors"][cmp.String()] = append(cs.DiffCmp[maddr]["ProvenSectors"][cmp.String()], height) + } + } + } + + { + value := big.NewInt(int64(ps.Recoveries)) + cs.DiffHeight[maddr]["Recoveries"][height] = value + cs.DiffValue[maddr]["Recoveries"][value.String()] = append(cs.DiffValue[maddr]["Recoveries"][value.String()], height) + + if cs.PrevHeight != -1 { + prevValue := cs.DiffHeight[maddr]["Recoveries"][cs.PrevHeight] + cmp := big.Zero() + cmp.Sub(value.Int, prevValue.Int) // value - prevValue + if big.Cmp(cmp, big.Zero()) != 0 { + cs.DiffCmp[maddr]["Recoveries"][cmp.String()] = append(cs.DiffCmp[maddr]["Recoveries"][cmp.String()], height) + } + } + } +} + +func roundBalance(i *big.Int) { + *i = big.Div(*i, big.NewInt(1000000000000000)) + *i = big.Mul(*i, big.NewInt(1000000000000000)) +} + +func toCharStr(i int) string { + return string('a' + i) +} diff --git a/testplans/lotus-soup/rfwp/e2e.go b/testplans/lotus-soup/rfwp/e2e.go new file mode 100644 index 00000000000..c4c37b11111 --- /dev/null +++ b/testplans/lotus-soup/rfwp/e2e.go @@ -0,0 +1,347 @@ +package rfwp + +import ( + "context" + "errors" + "fmt" + "io/ioutil" + "math/rand" + "os" + "sort" + "strings" + "time" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/testplans/lotus-soup/testkit" + "golang.org/x/sync/errgroup" +) + +func RecoveryFromFailedWindowedPoStE2E(t *testkit.TestEnvironment) error { + switch t.Role { + case "bootstrapper": + return testkit.HandleDefaultRole(t) + case "client": + return handleClient(t) + case "miner": + return handleMiner(t) + case "miner-full-slash": + return handleMinerFullSlash(t) + case "miner-partial-slash": + return handleMinerPartialSlash(t) + } + + return fmt.Errorf("unknown role: %s", t.Role) +} + +func handleMiner(t *testkit.TestEnvironment) error { + m, err := testkit.PrepareMiner(t) + if err != nil { + return err + } + + ctx := context.Background() + myActorAddr, err := m.MinerApi.ActorAddress(ctx) + if err != nil { + return err + } + + t.RecordMessage("running miner: %s", myActorAddr) + + if t.GroupSeq == 1 { + go FetchChainState(t, m) + } + + go UpdateChainState(t, m) + + minersToBeSlashed := 2 + ch := make(chan testkit.SlashedMinerMsg) + sub := t.SyncClient.MustSubscribe(ctx, testkit.SlashedMinerTopic, ch) + var eg errgroup.Group + + for i := 0; i < minersToBeSlashed; i++ { + select { + case slashedMiner := <-ch: + // wait for slash + eg.Go(func() error { + select { + case <-waitForSlash(t, slashedMiner): + case err = <-t.SyncClient.MustBarrier(ctx, testkit.StateAbortTest, 1).C: + if err != nil { + return err + } + return errors.New("got abort signal, exitting") + } + return nil + }) + case err := <-sub.Done(): + return fmt.Errorf("got error while waiting for slashed miners: %w", err) + case err := <-t.SyncClient.MustBarrier(ctx, testkit.StateAbortTest, 1).C: + if err != nil { + return err + } + return errors.New("got abort signal, exitting") + } + } + + errc := make(chan error) + go func() { + errc <- eg.Wait() + }() + + select { + case err := <-errc: + if err != nil { + return err + } + case err := <-t.SyncClient.MustBarrier(ctx, testkit.StateAbortTest, 1).C: + if err != nil { + return err + } + return errors.New("got abort signal, exitting") + } + + t.SyncClient.MustSignalAndWait(ctx, testkit.StateDone, t.TestInstanceCount) + return nil +} + +func waitForSlash(t *testkit.TestEnvironment, msg testkit.SlashedMinerMsg) chan error { + // assert that balance got reduced with that much 5 times (sector fee) + // assert that balance got reduced with that much 2 times (termination fee) + // assert that balance got increased with that much 10 times (block reward) + // assert that power got increased with that much 1 times (after sector is sealed) + // assert that power got reduced with that much 1 times (after sector is announced faulty) + slashedMiner := msg.MinerActorAddr + + errc := make(chan error) + go func() { + foundSlashConditions := false + for range time.Tick(10 * time.Second) { + if foundSlashConditions { + close(errc) + return + } + t.RecordMessage("wait for slashing, tick") + func() { + cs.Lock() + defer cs.Unlock() + + negativeAmounts := []big.Int{} + negativeDiffs := make(map[big.Int][]abi.ChainEpoch) + + for am, heights := range cs.DiffCmp[slashedMiner.String()]["LockedFunds"] { + amount, err := big.FromString(am) + if err != nil { + errc <- fmt.Errorf("cannot parse LockedFunds amount: %w:", err) + return + } + + // amount is negative => slash condition + if big.Cmp(amount, big.Zero()) < 0 { + negativeDiffs[amount] = heights + negativeAmounts = append(negativeAmounts, amount) + } + } + + t.RecordMessage("negative diffs: %d", len(negativeDiffs)) + if len(negativeDiffs) < 3 { + return + } + + sort.Slice(negativeAmounts, func(i, j int) bool { return big.Cmp(negativeAmounts[i], negativeAmounts[j]) > 0 }) + + // TODO: confirm the largest is > 18 filecoin + // TODO: confirm the next largest is > 9 filecoin + foundSlashConditions = true + }() + } + }() + + return errc +} + +func handleMinerFullSlash(t *testkit.TestEnvironment) error { + m, err := testkit.PrepareMiner(t) + if err != nil { + return err + } + + ctx := context.Background() + myActorAddr, err := m.MinerApi.ActorAddress(ctx) + if err != nil { + return err + } + + t.RecordMessage("running miner, full slash: %s", myActorAddr) + + // TODO: wait until we have sealed a deal for a client + time.Sleep(240 * time.Second) + + t.RecordMessage("shutting down miner, full slash: %s", myActorAddr) + + ctxt, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + err = m.StopFn(ctxt) + if err != nil { + //return err + t.RecordMessage("err from StopFn: %s", err.Error()) // TODO: expect this to be fixed on Lotus + } + + t.RecordMessage("shutdown miner, full slash: %s", myActorAddr) + + t.SyncClient.MustPublish(ctx, testkit.SlashedMinerTopic, testkit.SlashedMinerMsg{ + MinerActorAddr: myActorAddr, + }) + + t.SyncClient.MustSignalAndWait(ctx, testkit.StateDone, t.TestInstanceCount) + return nil +} + +func handleMinerPartialSlash(t *testkit.TestEnvironment) error { + m, err := testkit.PrepareMiner(t) + if err != nil { + return err + } + + ctx := context.Background() + myActorAddr, err := m.MinerApi.ActorAddress(ctx) + if err != nil { + return err + } + + t.RecordMessage("running miner, partial slash: %s", myActorAddr) + + // TODO: wait until we have sealed a deal for a client + time.Sleep(185 * time.Second) + + t.RecordMessage("shutting down miner, partial slash: %s", myActorAddr) + + ctxt, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + err = m.StopFn(ctxt) + if err != nil { + //return err + t.RecordMessage("err from StopFn: %s", err.Error()) // TODO: expect this to be fixed on Lotus + } + + t.RecordMessage("shutdown miner, partial slash: %s", myActorAddr) + + t.SyncClient.MustPublish(ctx, testkit.SlashedMinerTopic, testkit.SlashedMinerMsg{ + MinerActorAddr: myActorAddr, + }) + + time.Sleep(300 * time.Second) + + rm, err := testkit.RestoreMiner(t, m) + if err != nil { + t.RecordMessage("got err: %s", err.Error()) + return err + } + + myActorAddr, err = rm.MinerApi.ActorAddress(ctx) + if err != nil { + t.RecordMessage("got err: %s", err.Error()) + return err + } + + t.RecordMessage("running miner again, partial slash: %s", myActorAddr) + + time.Sleep(3600 * time.Second) + + //t.SyncClient.MustSignalAndWait(ctx, testkit.StateDone, t.TestInstanceCount) + return nil +} + +func handleClient(t *testkit.TestEnvironment) error { + cl, err := testkit.PrepareClient(t) + if err != nil { + return err + } + + // This is a client role + t.RecordMessage("running client") + + ctx := context.Background() + client := cl.FullApi + + time.Sleep(10 * time.Second) + + // select a miner based on our GroupSeq (client 1 -> miner 1 ; client 2 -> miner 2) + // this assumes that all miner instances receive the same sorted MinerAddrs slice + minerAddr := cl.MinerAddrs[t.InitContext.GroupSeq-1] + if err := client.NetConnect(ctx, minerAddr.MinerNetAddrs); err != nil { + return err + } + t.D().Counter(fmt.Sprintf("send-data-to,miner=%s", minerAddr.MinerActorAddr)).Inc(1) + + t.RecordMessage("selected %s as the miner", minerAddr.MinerActorAddr) + + time.Sleep(2 * time.Second) + + // generate 1800 bytes of random data + data := make([]byte, 1800) + rand.New(rand.NewSource(time.Now().UnixNano())).Read(data) + + file, err := ioutil.TempFile("/tmp", "data") + if err != nil { + return err + } + defer os.Remove(file.Name()) + + _, err = file.Write(data) + if err != nil { + return err + } + + fcid, err := client.ClientImport(ctx, api.FileRef{Path: file.Name(), IsCAR: false}) + if err != nil { + return err + } + t.RecordMessage("file cid: %s", fcid) + + // start deal + t1 := time.Now() + fastRetrieval := false + deal := testkit.StartDeal(ctx, minerAddr.MinerActorAddr, client, fcid.Root, fastRetrieval) + t.RecordMessage("started deal: %s", deal) + + // this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this + time.Sleep(2 * time.Second) + + t.RecordMessage("waiting for deal to be sealed") + testkit.WaitDealSealed(t, ctx, client, deal) + t.D().ResettingHistogram("deal.sealed").Update(int64(time.Since(t1))) + + // TODO: wait to stop miner (ideally get a signal, rather than sleep) + time.Sleep(180 * time.Second) + + t.RecordMessage("trying to retrieve %s", fcid) + info, err := client.ClientGetDealInfo(ctx, *deal) + if err != nil { + return err + } + + carExport := true + err = testkit.RetrieveData(t, ctx, client, fcid.Root, &info.PieceCID, carExport, data) + if err != nil && strings.Contains(err.Error(), "cannot make retrieval deal for zero bytes") { + t.D().Counter("deal.expect-slashing").Inc(1) + } else if err != nil { + // unknown error => fail test + t.RecordFailure(err) + + // send signal to abort test + t.SyncClient.MustSignalEntry(ctx, testkit.StateAbortTest) + + t.D().ResettingHistogram("deal.retrieved.err").Update(int64(time.Since(t1))) + time.Sleep(10 * time.Second) // wait for metrics to be emitted + + return nil + } + + t.D().ResettingHistogram("deal.retrieved").Update(int64(time.Since(t1))) + time.Sleep(10 * time.Second) // wait for metrics to be emitted + + t.SyncClient.MustSignalAndWait(ctx, testkit.StateDone, t.TestInstanceCount) // TODO: not sure about this + return nil +} diff --git a/testplans/lotus-soup/rfwp/html_chain_state.go b/testplans/lotus-soup/rfwp/html_chain_state.go new file mode 100644 index 00000000000..7a3d56be4b7 --- /dev/null +++ b/testplans/lotus-soup/rfwp/html_chain_state.go @@ -0,0 +1,67 @@ +package rfwp + +import ( + "context" + "fmt" + "os" + + "github.com/filecoin-project/lotus/testplans/lotus-soup/testkit" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api/v0api" + "github.com/filecoin-project/lotus/cli" + tstats "github.com/filecoin-project/lotus/tools/stats" + "github.com/ipfs/go-cid" +) + +func FetchChainState(t *testkit.TestEnvironment, m *testkit.LotusMiner) error { + height := 0 + headlag := 3 + + ctx := context.Background() + api := m.FullApi + + tipsetsCh, err := tstats.GetTips(ctx, &v0api.WrapperV1Full{FullNode: m.FullApi}, abi.ChainEpoch(height), headlag) + if err != nil { + return err + } + + for tipset := range tipsetsCh { + err := func() error { + filename := fmt.Sprintf("%s%cchain-state-%d.html", t.TestOutputsPath, os.PathSeparator, tipset.Height()) + file, err := os.Create(filename) + defer file.Close() + if err != nil { + return err + } + + stout, err := api.StateCompute(ctx, tipset.Height(), nil, tipset.Key()) + if err != nil { + return err + } + + codeCache := map[address.Address]cid.Cid{} + getCode := func(addr address.Address) (cid.Cid, error) { + if c, found := codeCache[addr]; found { + return c, nil + } + + c, err := api.StateGetActor(ctx, addr, tipset.Key()) + if err != nil { + return cid.Cid{}, err + } + + codeCache[addr] = c.Code + return c.Code, nil + } + + return cli.ComputeStateHTMLTempl(file, tipset, stout, true, getCode) + }() + if err != nil { + return err + } + } + + return nil +} diff --git a/testplans/lotus-soup/runner/main.go b/testplans/lotus-soup/runner/main.go new file mode 100644 index 00000000000..e867533f09f --- /dev/null +++ b/testplans/lotus-soup/runner/main.go @@ -0,0 +1,120 @@ +package main + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path" + + "github.com/codeskyblue/go-sh" +) + +type jobDefinition struct { + runNumber int + compositionPath string + outputDir string + skipStdout bool +} + +type jobResult struct { + job jobDefinition + runError error +} + +func runComposition(job jobDefinition) jobResult { + outputArchive := path.Join(job.outputDir, "test-outputs.tgz") + cmd := sh.Command("testground", "run", "composition", "-f", job.compositionPath, "--collect", "-o", outputArchive) + if err := os.MkdirAll(job.outputDir, os.ModePerm); err != nil { + return jobResult{runError: fmt.Errorf("unable to make output directory: %w", err)} + } + + outPath := path.Join(job.outputDir, "run.out") + outFile, err := os.Create(outPath) + if err != nil { + return jobResult{runError: fmt.Errorf("unable to create output file %s: %w", outPath, err)} + } + if job.skipStdout { + cmd.Stdout = outFile + } else { + cmd.Stdout = io.MultiWriter(os.Stdout, outFile) + } + log.Printf("starting test run %d. writing testground client output to %s\n", job.runNumber, outPath) + if err = cmd.Run(); err != nil { + return jobResult{job: job, runError: err} + } + return jobResult{job: job} +} + +func worker(id int, jobs <-chan jobDefinition, results chan<- jobResult) { + log.Printf("started worker %d\n", id) + for j := range jobs { + log.Printf("worker %d started test run %d\n", id, j.runNumber) + results <- runComposition(j) + } +} + +func buildComposition(compositionPath string, outputDir string) (string, error) { + outComp := path.Join(outputDir, "composition.toml") + err := sh.Command("cp", compositionPath, outComp).Run() + if err != nil { + return "", err + } + + return outComp, sh.Command("testground", "build", "composition", "-w", "-f", outComp).Run() +} + +func main() { + runs := flag.Int("runs", 1, "number of times to run composition") + parallelism := flag.Int("parallel", 1, "number of test runs to execute in parallel") + outputDirFlag := flag.String("output", "", "path to output directory (will use temp dir if unset)") + flag.Parse() + + if len(flag.Args()) != 1 { + log.Fatal("must provide a single composition file path argument") + } + + outdir := *outputDirFlag + if outdir == "" { + var err error + outdir, err = ioutil.TempDir(os.TempDir(), "oni-batch-run-") + if err != nil { + log.Fatal(err) + } + } + if err := os.MkdirAll(outdir, os.ModePerm); err != nil { + log.Fatal(err) + } + + compositionPath := flag.Args()[0] + + // first build the composition and write out the artifacts. + // we copy to a temp file first to avoid modifying the original + log.Printf("building composition %s\n", compositionPath) + compositionPath, err := buildComposition(compositionPath, outdir) + if err != nil { + log.Fatal(err) + } + + jobs := make(chan jobDefinition, *runs) + results := make(chan jobResult, *runs) + for w := 1; w <= *parallelism; w++ { + go worker(w, jobs, results) + } + + for j := 1; j <= *runs; j++ { + dir := path.Join(outdir, fmt.Sprintf("run-%d", j)) + skipStdout := *parallelism != 1 + jobs <- jobDefinition{runNumber: j, compositionPath: compositionPath, outputDir: dir, skipStdout: skipStdout} + } + close(jobs) + + for i := 0; i < *runs; i++ { + r := <-results + if r.runError != nil { + log.Printf("error running job %d: %s\n", r.job.runNumber, r.runError) + } + } +} diff --git a/testplans/lotus-soup/sanity.go b/testplans/lotus-soup/sanity.go new file mode 100644 index 00000000000..b06a653c563 --- /dev/null +++ b/testplans/lotus-soup/sanity.go @@ -0,0 +1,35 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" +) + +func sanityCheck() { + enhanceMsg := func(msg string, a ...interface{}) string { + return fmt.Sprintf("sanity check: "+msg+"; if running on local:exec, make sure to run `make` from the root of the oni repo", a...) + } + + dir := "/var/tmp/filecoin-proof-parameters" + stat, err := os.Stat(dir) + if os.IsNotExist(err) { + panic(enhanceMsg("proofs parameters not available in /var/tmp/filecoin-proof-parameters")) + } + if err != nil { + panic(enhanceMsg("failed to stat /var/tmp/filecoin-proof-parameters: %s", err)) + } + + if !stat.IsDir() { + panic(enhanceMsg("/var/tmp/filecoin-proof-parameters is not a directory; aborting")) + } + + files, err := ioutil.ReadDir(dir) + if err != nil { + panic(enhanceMsg("failed list directory /var/tmp/filecoin-proof-parameters: %s", err)) + } + + if len(files) == 0 { + panic(enhanceMsg("no files in /var/tmp/filecoin-proof-parameters")) + } +} diff --git a/testplans/lotus-soup/statemachine/statemachine.go b/testplans/lotus-soup/statemachine/statemachine.go new file mode 100644 index 00000000000..17de614dba9 --- /dev/null +++ b/testplans/lotus-soup/statemachine/statemachine.go @@ -0,0 +1,108 @@ +package statemachine + +import ( + "errors" + "sync" +) + +// This code has been shamelessly lifted from this blog post: +// https://venilnoronha.io/a-simple-state-machine-framework-in-go +// Many thanks to the author, Venil Norohnha + +// ErrEventRejected is the error returned when the state machine cannot process +// an event in the state that it is in. +var ErrEventRejected = errors.New("event rejected") + +const ( + // Default represents the default state of the system. + Default StateType = "" + + // NoOp represents a no-op event. + NoOp EventType = "NoOp" +) + +// StateType represents an extensible state type in the state machine. +type StateType string + +// EventType represents an extensible event type in the state machine. +type EventType string + +// EventContext represents the context to be passed to the action implementation. +type EventContext interface{} + +// Action represents the action to be executed in a given state. +type Action interface { + Execute(eventCtx EventContext) EventType +} + +// Events represents a mapping of events and states. +type Events map[EventType]StateType + +// State binds a state with an action and a set of events it can handle. +type State struct { + Action Action + Events Events +} + +// States represents a mapping of states and their implementations. +type States map[StateType]State + +// StateMachine represents the state machine. +type StateMachine struct { + // Previous represents the previous state. + Previous StateType + + // Current represents the current state. + Current StateType + + // States holds the configuration of states and events handled by the state machine. + States States + + // mutex ensures that only 1 event is processed by the state machine at any given time. + mutex sync.Mutex +} + +// getNextState returns the next state for the event given the machine's current +// state, or an error if the event can't be handled in the given state. +func (s *StateMachine) getNextState(event EventType) (StateType, error) { + if state, ok := s.States[s.Current]; ok { + if state.Events != nil { + if next, ok := state.Events[event]; ok { + return next, nil + } + } + } + return Default, ErrEventRejected +} + +// SendEvent sends an event to the state machine. +func (s *StateMachine) SendEvent(event EventType, eventCtx EventContext) error { + s.mutex.Lock() + defer s.mutex.Unlock() + + for { + // Determine the next state for the event given the machine's current state. + nextState, err := s.getNextState(event) + if err != nil { + return ErrEventRejected + } + + // Identify the state definition for the next state. + state, ok := s.States[nextState] + if !ok || state.Action == nil { + // configuration error + } + + // Transition over to the next state. + s.Previous = s.Current + s.Current = nextState + + // Execute the next state's action and loop over again if the event returned + // is not a no-op. + nextEvent := state.Action.Execute(eventCtx) + if nextEvent == NoOp { + return nil + } + event = nextEvent + } +} diff --git a/testplans/lotus-soup/statemachine/suspend.go b/testplans/lotus-soup/statemachine/suspend.go new file mode 100644 index 00000000000..11bade7c197 --- /dev/null +++ b/testplans/lotus-soup/statemachine/suspend.go @@ -0,0 +1,128 @@ +package statemachine + +import ( + "fmt" + "strings" + "time" +) + +const ( + Running StateType = "running" + Suspended StateType = "suspended" + + Halt EventType = "halt" + Resume EventType = "resume" +) + +type Suspendable interface { + Halt() + Resume() +} + +type HaltAction struct{} + +func (a *HaltAction) Execute(ctx EventContext) EventType { + s, ok := ctx.(*Suspender) + if !ok { + fmt.Println("unable to halt, event context is not Suspendable") + return NoOp + } + s.target.Halt() + return NoOp +} + +type ResumeAction struct{} + +func (a *ResumeAction) Execute(ctx EventContext) EventType { + s, ok := ctx.(*Suspender) + if !ok { + fmt.Println("unable to resume, event context is not Suspendable") + return NoOp + } + s.target.Resume() + return NoOp +} + +type Suspender struct { + StateMachine + target Suspendable + log LogFn +} + +type LogFn func(fmt string, args ...interface{}) + +func NewSuspender(target Suspendable, log LogFn) *Suspender { + return &Suspender{ + target: target, + log: log, + StateMachine: StateMachine{ + Current: Running, + States: States{ + Running: State{ + Action: &ResumeAction{}, + Events: Events{ + Halt: Suspended, + }, + }, + + Suspended: State{ + Action: &HaltAction{}, + Events: Events{ + Resume: Running, + }, + }, + }, + }, + } +} + +func (s *Suspender) RunEvents(eventSpec string) { + s.log("running event spec: %s", eventSpec) + for _, et := range parseEventSpec(eventSpec, s.log) { + if et.delay != 0 { + //s.log("waiting %s", et.delay.String()) + time.Sleep(et.delay) + continue + } + if et.event == "" { + s.log("ignoring empty event") + continue + } + s.log("sending event %s", et.event) + err := s.SendEvent(et.event, s) + if err != nil { + s.log("error sending event %s: %s", et.event, err) + } + } +} + +type eventTiming struct { + delay time.Duration + event EventType +} + +func parseEventSpec(spec string, log LogFn) []eventTiming { + fields := strings.Split(spec, "->") + out := make([]eventTiming, 0, len(fields)) + for _, f := range fields { + f = strings.TrimSpace(f) + words := strings.Split(f, " ") + + // TODO: try to implement a "waiting" state instead of special casing like this + if words[0] == "wait" { + if len(words) != 2 { + log("expected 'wait' to be followed by duration, e.g. 'wait 30s'. ignoring.") + continue + } + d, err := time.ParseDuration(words[1]) + if err != nil { + log("bad argument for 'wait': %s", err) + continue + } + out = append(out, eventTiming{delay: d}) + } else { + out = append(out, eventTiming{event: EventType(words[0])}) + } + } + return out +} diff --git a/testplans/lotus-soup/testkit/deals.go b/testplans/lotus-soup/testkit/deals.go new file mode 100644 index 00000000000..f0910537da1 --- /dev/null +++ b/testplans/lotus-soup/testkit/deals.go @@ -0,0 +1,75 @@ +package testkit + +import ( + "context" + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v0api" + "github.com/filecoin-project/lotus/chain/types" + "github.com/ipfs/go-cid" + + tstats "github.com/filecoin-project/lotus/tools/stats" +) + +func StartDeal(ctx context.Context, minerActorAddr address.Address, client api.FullNode, fcid cid.Cid, fastRetrieval bool) *cid.Cid { + addr, err := client.WalletDefaultAddress(ctx) + if err != nil { + panic(err) + } + + deal, err := client.ClientStartDeal(ctx, &api.StartDealParams{ + Data: &storagemarket.DataRef{ + TransferType: storagemarket.TTGraphsync, + Root: fcid, + }, + Wallet: addr, + Miner: minerActorAddr, + EpochPrice: types.NewInt(4000000), + MinBlocksDuration: 640000, + DealStartEpoch: 200, + FastRetrieval: fastRetrieval, + }) + if err != nil { + panic(err) + } + return deal +} + +func WaitDealSealed(t *TestEnvironment, ctx context.Context, client api.FullNode, deal *cid.Cid) { + height := 0 + headlag := 3 + + cctx, cancel := context.WithCancel(ctx) + defer cancel() + + tipsetsCh, err := tstats.GetTips(cctx, &v0api.WrapperV1Full{FullNode: client}, abi.ChainEpoch(height), headlag) + if err != nil { + panic(err) + } + + for tipset := range tipsetsCh { + t.RecordMessage("got tipset: height %d", tipset.Height()) + + di, err := client.ClientGetDealInfo(ctx, *deal) + if err != nil { + panic(err) + } + switch di.State { + case storagemarket.StorageDealProposalRejected: + panic("deal rejected") + case storagemarket.StorageDealFailing: + panic("deal failed") + case storagemarket.StorageDealError: + panic(fmt.Sprintf("deal errored %s", di.Message)) + case storagemarket.StorageDealActive: + t.RecordMessage("completed deal: %s", di) + return + } + + t.RecordMessage("deal state: %s", storagemarket.DealStates[di.State]) + } +} diff --git a/testplans/lotus-soup/testkit/defaults.go b/testplans/lotus-soup/testkit/defaults.go new file mode 100644 index 00000000000..a0681f37cad --- /dev/null +++ b/testplans/lotus-soup/testkit/defaults.go @@ -0,0 +1,55 @@ +package testkit + +import "fmt" + +type RoleName = string + +var DefaultRoles = map[RoleName]func(*TestEnvironment) error{ + "bootstrapper": func(t *TestEnvironment) error { + b, err := PrepareBootstrapper(t) + if err != nil { + return err + } + return b.RunDefault() + }, + "miner": func(t *TestEnvironment) error { + m, err := PrepareMiner(t) + if err != nil { + return err + } + return m.RunDefault() + }, + "client": func(t *TestEnvironment) error { + c, err := PrepareClient(t) + if err != nil { + return err + } + return c.RunDefault() + }, + "drand": func(t *TestEnvironment) error { + d, err := PrepareDrandInstance(t) + if err != nil { + return err + } + return d.RunDefault() + }, + "pubsub-tracer": func(t *TestEnvironment) error { + tr, err := PreparePubsubTracer(t) + if err != nil { + return err + } + return tr.RunDefault() + }, +} + +// HandleDefaultRole handles a role by running its default behaviour. +// +// This function is suitable to forward to when a test case doesn't need to +// explicitly handle/alter a role. +func HandleDefaultRole(t *TestEnvironment) error { + f, ok := DefaultRoles[t.Role] + if !ok { + panic(fmt.Sprintf("unrecognized role: %s", t.Role)) + } + return f(t) +} diff --git a/testplans/lotus-soup/testkit/lotus_opts.go b/testplans/lotus-soup/testkit/lotus_opts.go new file mode 100644 index 00000000000..46df9078d4c --- /dev/null +++ b/testplans/lotus-soup/testkit/lotus_opts.go @@ -0,0 +1,67 @@ +package testkit + +import ( + "fmt" + + "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/modules" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/modules/lp2p" + "github.com/filecoin-project/lotus/node/repo" + + "github.com/libp2p/go-libp2p-core/peer" + ma "github.com/multiformats/go-multiaddr" +) + +func withGenesis(gb []byte) node.Option { + return node.Override(new(modules.Genesis), modules.LoadGenesis(gb)) +} + +func withBootstrapper(ab []byte) node.Option { + return node.Override(new(dtypes.BootstrapPeers), + func() (dtypes.BootstrapPeers, error) { + if ab == nil { + return dtypes.BootstrapPeers{}, nil + } + + a, err := ma.NewMultiaddrBytes(ab) + if err != nil { + return nil, err + } + ai, err := peer.AddrInfoFromP2pAddr(a) + if err != nil { + return nil, err + } + return dtypes.BootstrapPeers{*ai}, nil + }) +} + +func withPubsubConfig(bootstrapper bool, pubsubTracer string) node.Option { + return node.Override(new(*config.Pubsub), func() *config.Pubsub { + return &config.Pubsub{ + Bootstrapper: bootstrapper, + RemoteTracer: pubsubTracer, + } + }) +} + +func withListenAddress(ip string) node.Option { + addrs := []string{fmt.Sprintf("/ip4/%s/tcp/0", ip)} + return node.Override(node.StartListeningKey, lp2p.StartListening(addrs)) +} + +func withMinerListenAddress(ip string) node.Option { + addrs := []string{fmt.Sprintf("/ip4/%s/tcp/0", ip)} + return node.Override(node.StartListeningKey, lp2p.StartListening(addrs)) +} + +func withApiEndpoint(addr string) node.Option { + return node.Override(node.SetApiEndpointKey, func(lr repo.LockedRepo) error { + apima, err := ma.NewMultiaddr(addr) + if err != nil { + return err + } + return lr.SetAPIEndpoint(apima) + }) +} diff --git a/testplans/lotus-soup/testkit/net.go b/testplans/lotus-soup/testkit/net.go new file mode 100644 index 00000000000..d2dbc2ae635 --- /dev/null +++ b/testplans/lotus-soup/testkit/net.go @@ -0,0 +1,92 @@ +package testkit + +import ( + "context" + "fmt" + "time" + + "github.com/testground/sdk-go/network" + "github.com/testground/sdk-go/sync" +) + +func ApplyNetworkParameters(t *TestEnvironment) { + if !t.TestSidecar { + t.RecordMessage("no test sidecar, skipping network config") + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + ls := network.LinkShape{} + + if t.IsParamSet("latency_range") { + r := t.DurationRangeParam("latency_range") + ls.Latency = r.ChooseRandom() + t.D().RecordPoint("latency_ms", float64(ls.Latency.Milliseconds())) + } + + if t.IsParamSet("jitter_range") { + r := t.DurationRangeParam("jitter_range") + ls.Jitter = r.ChooseRandom() + t.D().RecordPoint("jitter_ms", float64(ls.Jitter.Milliseconds())) + } + + if t.IsParamSet("loss_range") { + r := t.FloatRangeParam("loss_range") + ls.Loss = r.ChooseRandom() + t.D().RecordPoint("packet_loss", float64(ls.Loss)) + } + + if t.IsParamSet("corrupt_range") { + r := t.FloatRangeParam("corrupt_range") + ls.Corrupt = r.ChooseRandom() + t.D().RecordPoint("corrupt_packet_probability", float64(ls.Corrupt)) + } + + if t.IsParamSet("corrupt_corr_range") { + r := t.FloatRangeParam("corrupt_corr_range") + ls.CorruptCorr = r.ChooseRandom() + t.D().RecordPoint("corrupt_packet_correlation", float64(ls.CorruptCorr)) + } + + if t.IsParamSet("reorder_range") { + r := t.FloatRangeParam("reorder_range") + ls.Reorder = r.ChooseRandom() + t.D().RecordPoint("reordered_packet_probability", float64(ls.Reorder)) + } + + if t.IsParamSet("reorder_corr_range") { + r := t.FloatRangeParam("reorder_corr_range") + ls.ReorderCorr = r.ChooseRandom() + t.D().RecordPoint("reordered_packet_correlation", float64(ls.ReorderCorr)) + } + + if t.IsParamSet("duplicate_range") { + r := t.FloatRangeParam("duplicate_range") + ls.Duplicate = r.ChooseRandom() + t.D().RecordPoint("duplicate_packet_probability", float64(ls.Duplicate)) + } + + if t.IsParamSet("duplicate_corr_range") { + r := t.FloatRangeParam("duplicate_corr_range") + ls.DuplicateCorr = r.ChooseRandom() + t.D().RecordPoint("duplicate_packet_correlation", float64(ls.DuplicateCorr)) + } + + if t.IsParamSet("bandwidth") { + ls.Bandwidth = t.SizeParam("bandwidth") + t.D().RecordPoint("bandwidth_bytes", float64(ls.Bandwidth)) + } + + t.NetClient.MustConfigureNetwork(ctx, &network.Config{ + Network: "default", + Enable: true, + Default: ls, + CallbackState: sync.State(fmt.Sprintf("latency-configured-%s", t.TestGroupID)), + CallbackTarget: t.TestGroupInstanceCount, + RoutingPolicy: network.AllowAll, + }) + + t.DumpJSON("network-link-shape.json", ls) +} diff --git a/testplans/lotus-soup/testkit/node.go b/testplans/lotus-soup/testkit/node.go new file mode 100644 index 00000000000..915f2a1ac4e --- /dev/null +++ b/testplans/lotus-soup/testkit/node.go @@ -0,0 +1,253 @@ +package testkit + +import ( + "context" + "fmt" + "net/http" + "os" + "sort" + "time" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v0api" + "github.com/filecoin-project/lotus/chain/beacon" + "github.com/filecoin-project/lotus/chain/wallet" + "github.com/filecoin-project/lotus/metrics" + "github.com/filecoin-project/lotus/miner" + "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/modules/dtypes" + modtest "github.com/filecoin-project/lotus/node/modules/testing" + tstats "github.com/filecoin-project/lotus/tools/stats" + + influxdb "github.com/kpacha/opencensus-influxdb" + ma "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr-net" + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" +) + +var PrepareNodeTimeout = 3 * time.Minute + +type LotusNode struct { + FullApi api.FullNode + MinerApi api.StorageMiner + StopFn node.StopFunc + Wallet *wallet.Key + MineOne func(context.Context, miner.MineReq) error +} + +func (n *LotusNode) setWallet(ctx context.Context, walletKey *wallet.Key) error { + _, err := n.FullApi.WalletImport(ctx, &walletKey.KeyInfo) + if err != nil { + return err + } + + err = n.FullApi.WalletSetDefault(ctx, walletKey.Address) + if err != nil { + return err + } + + n.Wallet = walletKey + + return nil +} + +func WaitForBalances(t *TestEnvironment, ctx context.Context, nodes int) ([]*InitialBalanceMsg, error) { + ch := make(chan *InitialBalanceMsg) + sub := t.SyncClient.MustSubscribe(ctx, BalanceTopic, ch) + + balances := make([]*InitialBalanceMsg, 0, nodes) + for i := 0; i < nodes; i++ { + select { + case m := <-ch: + balances = append(balances, m) + case err := <-sub.Done(): + return nil, fmt.Errorf("got error while waiting for balances: %w", err) + } + } + + return balances, nil +} + +func CollectPreseals(t *TestEnvironment, ctx context.Context, miners int) ([]*PresealMsg, error) { + ch := make(chan *PresealMsg) + sub := t.SyncClient.MustSubscribe(ctx, PresealTopic, ch) + + preseals := make([]*PresealMsg, 0, miners) + for i := 0; i < miners; i++ { + select { + case m := <-ch: + preseals = append(preseals, m) + case err := <-sub.Done(): + return nil, fmt.Errorf("got error while waiting for preseals: %w", err) + } + } + + sort.Slice(preseals, func(i, j int) bool { + return preseals[i].Seqno < preseals[j].Seqno + }) + + return preseals, nil +} + +func WaitForGenesis(t *TestEnvironment, ctx context.Context) (*GenesisMsg, error) { + genesisCh := make(chan *GenesisMsg) + sub := t.SyncClient.MustSubscribe(ctx, GenesisTopic, genesisCh) + + select { + case genesisMsg := <-genesisCh: + return genesisMsg, nil + case err := <-sub.Done(): + return nil, fmt.Errorf("error while waiting for genesis msg: %w", err) + } +} + +func CollectMinerAddrs(t *TestEnvironment, ctx context.Context, miners int) ([]MinerAddressesMsg, error) { + ch := make(chan MinerAddressesMsg) + sub := t.SyncClient.MustSubscribe(ctx, MinersAddrsTopic, ch) + + addrs := make([]MinerAddressesMsg, 0, miners) + for i := 0; i < miners; i++ { + select { + case a := <-ch: + addrs = append(addrs, a) + case err := <-sub.Done(): + return nil, fmt.Errorf("got error while waiting for miners addrs: %w", err) + } + } + + return addrs, nil +} + +func CollectClientAddrs(t *TestEnvironment, ctx context.Context, clients int) ([]*ClientAddressesMsg, error) { + ch := make(chan *ClientAddressesMsg) + sub := t.SyncClient.MustSubscribe(ctx, ClientsAddrsTopic, ch) + + addrs := make([]*ClientAddressesMsg, 0, clients) + for i := 0; i < clients; i++ { + select { + case a := <-ch: + addrs = append(addrs, a) + case err := <-sub.Done(): + return nil, fmt.Errorf("got error while waiting for clients addrs: %w", err) + } + } + + return addrs, nil +} + +func GetPubsubTracerMaddr(ctx context.Context, t *TestEnvironment) (string, error) { + if !t.BooleanParam("enable_pubsub_tracer") { + return "", nil + } + + ch := make(chan *PubsubTracerMsg) + sub := t.SyncClient.MustSubscribe(ctx, PubsubTracerTopic, ch) + + select { + case m := <-ch: + return m.Multiaddr, nil + case err := <-sub.Done(): + return "", fmt.Errorf("got error while waiting for pubsub tracer config: %w", err) + } +} + +func GetRandomBeaconOpts(ctx context.Context, t *TestEnvironment) (node.Option, error) { + beaconType := t.StringParam("random_beacon_type") + switch beaconType { + case "external-drand": + noop := func(settings *node.Settings) error { + return nil + } + return noop, nil + + case "local-drand": + cfg, err := waitForDrandConfig(ctx, t.SyncClient) + if err != nil { + t.RecordMessage("error getting drand config: %w", err) + return nil, err + + } + t.RecordMessage("setting drand config: %v", cfg) + return node.Options( + node.Override(new(dtypes.DrandConfig), cfg.Config), + node.Override(new(dtypes.DrandBootstrap), cfg.GossipBootstrap), + ), nil + + case "mock": + return node.Options( + node.Override(new(beacon.RandomBeacon), modtest.RandomBeacon), + node.Override(new(dtypes.DrandConfig), dtypes.DrandConfig{ + ChainInfoJSON: "{\"Hash\":\"wtf\"}", + }), + node.Override(new(dtypes.DrandBootstrap), dtypes.DrandBootstrap{}), + ), nil + + default: + return nil, fmt.Errorf("unknown random_beacon_type: %s", beaconType) + } +} + +func startServer(endpoint ma.Multiaddr, srv *http.Server) (listenAddr string, err error) { + lst, err := manet.Listen(endpoint) + if err != nil { + return "", fmt.Errorf("could not listen: %w", err) + } + + go func() { + _ = srv.Serve(manet.NetListener(lst)) + }() + + return lst.Addr().String(), nil +} + +func registerAndExportMetrics(instanceName string) { + // Register all Lotus metric views + err := view.Register(metrics.DefaultViews...) + if err != nil { + panic(err) + } + + // Set the metric to one so it is published to the exporter + stats.Record(context.Background(), metrics.LotusInfo.M(1)) + + // Register our custom exporter to opencensus + e, err := influxdb.NewExporter(context.Background(), influxdb.Options{ + Database: "testground", + Address: os.Getenv("INFLUXDB_URL"), + Username: "", + Password: "", + InstanceName: instanceName, + }) + if err != nil { + panic(err) + } + view.RegisterExporter(e) + view.SetReportingPeriod(5 * time.Second) +} + +func collectStats(t *TestEnvironment, ctx context.Context, api api.FullNode) error { + t.RecordMessage("collecting blockchain stats") + + influxAddr := os.Getenv("INFLUXDB_URL") + influxUser := "" + influxPass := "" + influxDb := "testground" + + influx, err := tstats.InfluxClient(influxAddr, influxUser, influxPass) + if err != nil { + t.RecordMessage(err.Error()) + return err + } + + height := int64(0) + headlag := 1 + + go func() { + time.Sleep(15 * time.Second) + t.RecordMessage("calling tstats.Collect") + tstats.Collect(context.Background(), &v0api.WrapperV1Full{FullNode: api}, influx, influxDb, height, headlag) + }() + + return nil +} diff --git a/testplans/lotus-soup/testkit/retrieval.go b/testplans/lotus-soup/testkit/retrieval.go new file mode 100644 index 00000000000..de3dee6be89 --- /dev/null +++ b/testplans/lotus-soup/testkit/retrieval.go @@ -0,0 +1,106 @@ +package testkit + +import ( + "bytes" + "context" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/filecoin-project/lotus/api" + "github.com/ipfs/go-cid" + files "github.com/ipfs/go-ipfs-files" + ipld "github.com/ipfs/go-ipld-format" + dag "github.com/ipfs/go-merkledag" + dstest "github.com/ipfs/go-merkledag/test" + unixfile "github.com/ipfs/go-unixfs/file" + "github.com/ipld/go-car" +) + +func RetrieveData(t *TestEnvironment, ctx context.Context, client api.FullNode, fcid cid.Cid, _ *cid.Cid, carExport bool, data []byte) error { + t1 := time.Now() + offers, err := client.ClientFindData(ctx, fcid, nil) + if err != nil { + panic(err) + } + for _, o := range offers { + t.D().Counter(fmt.Sprintf("find-data.offer,miner=%s", o.Miner)).Inc(1) + } + t.D().ResettingHistogram("find-data").Update(int64(time.Since(t1))) + + if len(offers) < 1 { + panic("no offers") + } + + rpath, err := ioutil.TempDir("", "lotus-retrieve-test-") + if err != nil { + panic(err) + } + defer os.RemoveAll(rpath) + + caddr, err := client.WalletDefaultAddress(ctx) + if err != nil { + return err + } + + ref := &api.FileRef{ + Path: filepath.Join(rpath, "ret"), + IsCAR: carExport, + } + t1 = time.Now() + err = client.ClientRetrieve(ctx, offers[0].Order(caddr), ref) + if err != nil { + return err + } + t.D().ResettingHistogram("retrieve-data").Update(int64(time.Since(t1))) + + rdata, err := ioutil.ReadFile(filepath.Join(rpath, "ret")) + if err != nil { + return err + } + + if carExport { + rdata = ExtractCarData(ctx, rdata, rpath) + } + + if !bytes.Equal(rdata, data) { + return errors.New("wrong data retrieved") + } + + t.RecordMessage("retrieved successfully") + + return nil +} + +func ExtractCarData(ctx context.Context, rdata []byte, rpath string) []byte { + bserv := dstest.Bserv() + ch, err := car.LoadCar(bserv.Blockstore(), bytes.NewReader(rdata)) + if err != nil { + panic(err) + } + b, err := bserv.GetBlock(ctx, ch.Roots[0]) + if err != nil { + panic(err) + } + nd, err := ipld.Decode(b) + if err != nil { + panic(err) + } + dserv := dag.NewDAGService(bserv) + fil, err := unixfile.NewUnixfsFile(ctx, dserv, nd) + if err != nil { + panic(err) + } + outPath := filepath.Join(rpath, "retLoadedCAR") + if err := files.WriteTo(fil, outPath); err != nil { + panic(err) + } + rdata, err = ioutil.ReadFile(outPath) + if err != nil { + panic(err) + } + return rdata +} diff --git a/testplans/lotus-soup/testkit/role_bootstrapper.go b/testplans/lotus-soup/testkit/role_bootstrapper.go new file mode 100644 index 00000000000..4a6ac56c9c0 --- /dev/null +++ b/testplans/lotus-soup/testkit/role_bootstrapper.go @@ -0,0 +1,203 @@ +package testkit + +import ( + "bytes" + "context" + "fmt" + mbig "math/big" + "time" + + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/gen" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/genesis" + "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/modules" + modtest "github.com/filecoin-project/lotus/node/modules/testing" + "github.com/filecoin-project/lotus/node/repo" + "github.com/google/uuid" + + "github.com/filecoin-project/go-state-types/big" + + "github.com/libp2p/go-libp2p-core/peer" + ma "github.com/multiformats/go-multiaddr" +) + +// Bootstrapper is a special kind of process that produces a genesis block with +// the initial wallet balances and preseals for all enlisted miners and clients. +type Bootstrapper struct { + *LotusNode + + t *TestEnvironment +} + +func PrepareBootstrapper(t *TestEnvironment) (*Bootstrapper, error) { + var ( + clients = t.IntParam("clients") + miners = t.IntParam("miners") + nodes = clients + miners + ) + + ctx, cancel := context.WithTimeout(context.Background(), PrepareNodeTimeout) + defer cancel() + + pubsubTracerMaddr, err := GetPubsubTracerMaddr(ctx, t) + if err != nil { + return nil, err + } + + randomBeaconOpt, err := GetRandomBeaconOpts(ctx, t) + if err != nil { + return nil, err + } + + // the first duty of the boostrapper is to construct the genesis block + // first collect all client and miner balances to assign initial funds + balances, err := WaitForBalances(t, ctx, nodes) + if err != nil { + return nil, err + } + + totalBalance := big.Zero() + for _, b := range balances { + totalBalance = big.Add(filToAttoFil(b.Balance), totalBalance) + } + + totalBalanceFil := attoFilToFil(totalBalance) + t.RecordMessage("TOTAL BALANCE: %s AttoFIL (%s FIL)", totalBalance, totalBalanceFil) + if max := types.TotalFilecoinInt; totalBalanceFil.GreaterThanEqual(max) { + panic(fmt.Sprintf("total sum of balances is greater than max Filecoin ever; sum=%s, max=%s", totalBalance, max)) + } + + // then collect all preseals from miners + preseals, err := CollectPreseals(t, ctx, miners) + if err != nil { + return nil, err + } + + // now construct the genesis block + var genesisActors []genesis.Actor + var genesisMiners []genesis.Miner + + for _, bm := range balances { + balance := filToAttoFil(bm.Balance) + t.RecordMessage("balance assigned to actor %s: %s AttoFIL", bm.Addr, balance) + genesisActors = append(genesisActors, + genesis.Actor{ + Type: genesis.TAccount, + Balance: balance, + Meta: (&genesis.AccountMeta{Owner: bm.Addr}).ActorMeta(), + }) + } + + for _, pm := range preseals { + genesisMiners = append(genesisMiners, pm.Miner) + } + + genesisTemplate := genesis.Template{ + Accounts: genesisActors, + Miners: genesisMiners, + Timestamp: uint64(time.Now().Unix()) - uint64(t.IntParam("genesis_timestamp_offset")), + VerifregRootKey: gen.DefaultVerifregRootkeyActor, + RemainderAccount: gen.DefaultRemainderAccountActor, + NetworkName: "testground-local-" + uuid.New().String(), + } + + // dump the genesis block + // var jsonBuf bytes.Buffer + // jsonEnc := json.NewEncoder(&jsonBuf) + // err := jsonEnc.Encode(genesisTemplate) + // if err != nil { + // panic(err) + // } + // runenv.RecordMessage(fmt.Sprintf("Genesis template: %s", string(jsonBuf.Bytes()))) + + // this is horrendously disgusting, we use this contraption to side effect the construction + // of the genesis block in the buffer -- yes, a side effect of dependency injection. + // I remember when software was straightforward... + var genesisBuffer bytes.Buffer + + bootstrapperIP := t.NetClient.MustGetDataNetworkIP().String() + + n := &LotusNode{} + r := repo.NewMemory(nil) + stop, err := node.New(context.Background(), + node.FullAPI(&n.FullApi), + node.Base(), + node.Repo(r), + node.Override(new(modules.Genesis), modtest.MakeGenesisMem(&genesisBuffer, genesisTemplate)), + withApiEndpoint(fmt.Sprintf("/ip4/0.0.0.0/tcp/%s", t.PortNumber("node_rpc", "0"))), + withListenAddress(bootstrapperIP), + withBootstrapper(nil), + withPubsubConfig(true, pubsubTracerMaddr), + randomBeaconOpt, + ) + if err != nil { + return nil, err + } + n.StopFn = stop + + var bootstrapperAddr ma.Multiaddr + + bootstrapperAddrs, err := n.FullApi.NetAddrsListen(ctx) + if err != nil { + stop(context.TODO()) + return nil, err + } + for _, a := range bootstrapperAddrs.Addrs { + ip, err := a.ValueForProtocol(ma.P_IP4) + if err != nil { + continue + } + if ip != bootstrapperIP { + continue + } + addrs, err := peer.AddrInfoToP2pAddrs(&peer.AddrInfo{ + ID: bootstrapperAddrs.ID, + Addrs: []ma.Multiaddr{a}, + }) + if err != nil { + panic(err) + } + bootstrapperAddr = addrs[0] + break + } + + if bootstrapperAddr == nil { + panic("failed to determine bootstrapper address") + } + + genesisMsg := &GenesisMsg{ + Genesis: genesisBuffer.Bytes(), + Bootstrapper: bootstrapperAddr.Bytes(), + } + t.SyncClient.MustPublish(ctx, GenesisTopic, genesisMsg) + + t.RecordMessage("waiting for all nodes to be ready") + t.SyncClient.MustSignalAndWait(ctx, StateReady, t.TestInstanceCount) + + return &Bootstrapper{n, t}, nil +} + +// RunDefault runs a default bootstrapper. +func (b *Bootstrapper) RunDefault() error { + b.t.RecordMessage("running bootstrapper") + ctx := context.Background() + b.t.SyncClient.MustSignalAndWait(ctx, StateDone, b.t.TestInstanceCount) + return nil +} + +// filToAttoFil converts a fractional filecoin value into AttoFIL, rounding if necessary +func filToAttoFil(f float64) big.Int { + a := mbig.NewFloat(f) + a.Mul(a, mbig.NewFloat(float64(build.FilecoinPrecision))) + i, _ := a.Int(nil) + return big.Int{Int: i} +} + +func attoFilToFil(atto big.Int) big.Int { + i := big.NewInt(0) + i.Add(i.Int, atto.Int) + i.Div(i.Int, big.NewIntUnsigned(build.FilecoinPrecision).Int) + return i +} diff --git a/testplans/lotus-soup/testkit/role_client.go b/testplans/lotus-soup/testkit/role_client.go new file mode 100644 index 00000000000..d18a835d2c4 --- /dev/null +++ b/testplans/lotus-soup/testkit/role_client.go @@ -0,0 +1,197 @@ +package testkit + +import ( + "context" + "fmt" + "net/http" + "time" + + "contrib.go.opencensus.io/exporter/prometheus" + "github.com/filecoin-project/go-jsonrpc" + "github.com/filecoin-project/go-jsonrpc/auth" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/wallet" + "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/repo" + "github.com/gorilla/mux" + "github.com/hashicorp/go-multierror" +) + +type LotusClient struct { + *LotusNode + + t *TestEnvironment + MinerAddrs []MinerAddressesMsg +} + +func PrepareClient(t *TestEnvironment) (*LotusClient, error) { + ctx, cancel := context.WithTimeout(context.Background(), PrepareNodeTimeout) + defer cancel() + + ApplyNetworkParameters(t) + + pubsubTracer, err := GetPubsubTracerMaddr(ctx, t) + if err != nil { + return nil, err + } + + drandOpt, err := GetRandomBeaconOpts(ctx, t) + if err != nil { + return nil, err + } + + // first create a wallet + walletKey, err := wallet.GenerateKey(types.KTBLS) + if err != nil { + return nil, err + } + + // publish the account ID/balance + balance := t.FloatParam("balance") + balanceMsg := &InitialBalanceMsg{Addr: walletKey.Address, Balance: balance} + t.SyncClient.Publish(ctx, BalanceTopic, balanceMsg) + + // then collect the genesis block and bootstrapper address + genesisMsg, err := WaitForGenesis(t, ctx) + if err != nil { + return nil, err + } + + clientIP := t.NetClient.MustGetDataNetworkIP().String() + + nodeRepo := repo.NewMemory(nil) + + // create the node + n := &LotusNode{} + stop, err := node.New(context.Background(), + node.FullAPI(&n.FullApi), + node.Base(), + node.Repo(nodeRepo), + withApiEndpoint(fmt.Sprintf("/ip4/0.0.0.0/tcp/%s", t.PortNumber("node_rpc", "0"))), + withGenesis(genesisMsg.Genesis), + withListenAddress(clientIP), + withBootstrapper(genesisMsg.Bootstrapper), + withPubsubConfig(false, pubsubTracer), + drandOpt, + ) + if err != nil { + return nil, err + } + + // set the wallet + err = n.setWallet(ctx, walletKey) + if err != nil { + _ = stop(context.TODO()) + return nil, err + } + + fullSrv, err := startFullNodeAPIServer(t, nodeRepo, n.FullApi) + if err != nil { + return nil, err + } + + n.StopFn = func(ctx context.Context) error { + var err *multierror.Error + err = multierror.Append(fullSrv.Shutdown(ctx)) + err = multierror.Append(stop(ctx)) + return err.ErrorOrNil() + } + + registerAndExportMetrics(fmt.Sprintf("client_%d", t.GroupSeq)) + + t.RecordMessage("publish our address to the clients addr topic") + addrinfo, err := n.FullApi.NetAddrsListen(ctx) + if err != nil { + return nil, err + } + t.SyncClient.MustPublish(ctx, ClientsAddrsTopic, &ClientAddressesMsg{ + PeerNetAddr: addrinfo, + WalletAddr: walletKey.Address, + GroupSeq: t.GroupSeq, + }) + + t.RecordMessage("waiting for all nodes to be ready") + t.SyncClient.MustSignalAndWait(ctx, StateReady, t.TestInstanceCount) + + // collect miner addresses. + addrs, err := CollectMinerAddrs(t, ctx, t.IntParam("miners")) + if err != nil { + return nil, err + } + t.RecordMessage("got %v miner addrs", len(addrs)) + + // densely connect the client to the full node and the miners themselves. + for _, miner := range addrs { + if err := n.FullApi.NetConnect(ctx, miner.FullNetAddrs); err != nil { + return nil, fmt.Errorf("client failed to connect to full node of miner: %w", err) + } + if err := n.FullApi.NetConnect(ctx, miner.MinerNetAddrs); err != nil { + return nil, fmt.Errorf("client failed to connect to storage miner node node of miner: %w", err) + } + } + + // wait for all clients to have completed identify, pubsub negotiation with miners. + time.Sleep(1 * time.Second) + + peers, err := n.FullApi.NetPeers(ctx) + if err != nil { + return nil, fmt.Errorf("failed to query connected peers: %w", err) + } + + t.RecordMessage("connected peers: %d", len(peers)) + + cl := &LotusClient{ + t: t, + LotusNode: n, + MinerAddrs: addrs, + } + return cl, nil +} + +func (c *LotusClient) RunDefault() error { + // run forever + c.t.RecordMessage("running default client forever") + c.t.WaitUntilAllDone() + return nil +} + +func startFullNodeAPIServer(t *TestEnvironment, repo repo.Repo, napi api.FullNode) (*http.Server, error) { + mux := mux.NewRouter() + + rpcServer := jsonrpc.NewServer() + rpcServer.Register("Filecoin", napi) + + mux.Handle("/rpc/v0", rpcServer) + + exporter, err := prometheus.NewExporter(prometheus.Options{ + Namespace: "lotus", + }) + if err != nil { + return nil, err + } + + mux.Handle("/debug/metrics", exporter) + + ah := &auth.Handler{ + Verify: func(ctx context.Context, token string) ([]auth.Permission, error) { + return api.AllPermissions, nil + }, + Next: mux.ServeHTTP, + } + + srv := &http.Server{Handler: ah} + + endpoint, err := repo.APIEndpoint() + if err != nil { + return nil, fmt.Errorf("no API endpoint in repo: %w", err) + } + + listenAddr, err := startServer(endpoint, srv) + if err != nil { + return nil, fmt.Errorf("failed to start client API endpoint: %w", err) + } + + t.RecordMessage("started node API server at %s", listenAddr) + return srv, nil +} diff --git a/testplans/lotus-soup/testkit/role_drand.go b/testplans/lotus-soup/testkit/role_drand.go new file mode 100644 index 00000000000..afe310dcd82 --- /dev/null +++ b/testplans/lotus-soup/testkit/role_drand.go @@ -0,0 +1,391 @@ +package testkit + +import ( + "bytes" + "context" + "encoding/hex" + "fmt" + "io/ioutil" + "net" + "os" + "path" + "time" + + "github.com/drand/drand/chain" + "github.com/drand/drand/client" + hclient "github.com/drand/drand/client/http" + "github.com/drand/drand/core" + "github.com/drand/drand/key" + "github.com/drand/drand/log" + "github.com/drand/drand/lp2p" + dnet "github.com/drand/drand/net" + "github.com/drand/drand/protobuf/drand" + dtest "github.com/drand/drand/test" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/libp2p/go-libp2p-core/peer" + ma "github.com/multiformats/go-multiaddr" + "github.com/testground/sdk-go/sync" + + "github.com/filecoin-project/lotus/testplans/lotus-soup/statemachine" +) + +var ( + PrepareDrandTimeout = 3 * time.Minute + secretDKG = "dkgsecret" +) + +type DrandInstance struct { + daemon *core.Drand + httpClient client.Client + ctrlClient *dnet.ControlClient + gossipRelay *lp2p.GossipRelayNode + + t *TestEnvironment + stateDir string + priv *key.Pair + pubAddr string + privAddr string + ctrlAddr string +} + +func (dr *DrandInstance) Start() error { + opts := []core.ConfigOption{ + core.WithLogLevel(getLogLevel(dr.t)), + core.WithConfigFolder(dr.stateDir), + core.WithPublicListenAddress(dr.pubAddr), + core.WithPrivateListenAddress(dr.privAddr), + core.WithControlPort(dr.ctrlAddr), + core.WithInsecure(), + } + conf := core.NewConfig(opts...) + fs := key.NewFileStore(conf.ConfigFolder()) + fs.SaveKeyPair(dr.priv) + key.Save(path.Join(dr.stateDir, "public.toml"), dr.priv.Public, false) + if dr.daemon == nil { + drand, err := core.NewDrand(fs, conf) + if err != nil { + return err + } + dr.daemon = drand + } else { + drand, err := core.LoadDrand(fs, conf) + if err != nil { + return err + } + drand.StartBeacon(true) + dr.daemon = drand + } + return nil +} + +func (dr *DrandInstance) Ping() bool { + cl := dr.ctrl() + if err := cl.Ping(); err != nil { + return false + } + return true +} + +func (dr *DrandInstance) Close() error { + dr.gossipRelay.Shutdown() + dr.daemon.Stop(context.Background()) + return os.RemoveAll(dr.stateDir) +} + +func (dr *DrandInstance) ctrl() *dnet.ControlClient { + if dr.ctrlClient != nil { + return dr.ctrlClient + } + cl, err := dnet.NewControlClient(dr.ctrlAddr) + if err != nil { + dr.t.RecordMessage("drand can't instantiate control client: %w", err) + return nil + } + dr.ctrlClient = cl + return cl +} + +func (dr *DrandInstance) RunDKG(nodes, thr int, timeout string, leader bool, leaderAddr string, beaconOffset int) *key.Group { + cl := dr.ctrl() + p := dr.t.DurationParam("drand_period") + catchupPeriod := dr.t.DurationParam("drand_catchup_period") + t, _ := time.ParseDuration(timeout) + var grp *drand.GroupPacket + var err error + if leader { + grp, err = cl.InitDKGLeader(nodes, thr, p, catchupPeriod, t, nil, secretDKG, beaconOffset) + } else { + leader := dnet.CreatePeer(leaderAddr, false) + grp, err = cl.InitDKG(leader, nil, secretDKG) + } + if err != nil { + dr.t.RecordMessage("drand dkg run failed: %w", err) + return nil + } + kg, _ := key.GroupFromProto(grp) + return kg +} + +func (dr *DrandInstance) Halt() { + dr.t.RecordMessage("drand node #%d halting", dr.t.GroupSeq) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + dr.daemon.Stop(ctx) +} + +func (dr *DrandInstance) Resume() { + dr.t.RecordMessage("drand node #%d resuming", dr.t.GroupSeq) + dr.Start() + // block until we can fetch the round corresponding to the current time + startTime := time.Now() + round := dr.httpClient.RoundAt(startTime) + timeout := 120 * time.Second + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + done := make(chan struct{}, 1) + go func() { + for { + res, err := dr.httpClient.Get(ctx, round) + if err == nil { + dr.t.RecordMessage("drand chain caught up to round %d", res.Round()) + done <- struct{}{} + return + } + time.Sleep(2 * time.Second) + } + }() + + select { + case <-ctx.Done(): + dr.t.RecordMessage("drand chain failed to catch up after %s", timeout.String()) + case <-done: + dr.t.RecordMessage("drand chain resumed after %s catchup time", time.Since(startTime)) + } +} + +func (dr *DrandInstance) RunDefault() error { + dr.t.RecordMessage("running drand node") + + if dr.t.IsParamSet("suspend_events") { + suspender := statemachine.NewSuspender(dr, dr.t.RecordMessage) + suspender.RunEvents(dr.t.StringParam("suspend_events")) + } + + dr.t.WaitUntilAllDone() + return nil +} + +// prepareDrandNode starts a drand instance and runs a DKG with the other members of the composition group. +// Once the chain is running, the leader publishes the chain info needed by lotus nodes on +// drandConfigTopic +func PrepareDrandInstance(t *TestEnvironment) (*DrandInstance, error) { + ctx, cancel := context.WithTimeout(context.Background(), PrepareDrandTimeout) + defer cancel() + + ApplyNetworkParameters(t) + + startTime := time.Now() + + seq := t.GroupSeq + isLeader := seq == 1 + nNodes := t.TestGroupInstanceCount + + myAddr := t.NetClient.MustGetDataNetworkIP() + threshold := t.IntParam("drand_threshold") + runGossipRelay := t.BooleanParam("drand_gossip_relay") + + beaconOffset := 3 + + stateDir, err := ioutil.TempDir("/tmp", fmt.Sprintf("drand-%d", t.GroupSeq)) + if err != nil { + return nil, err + } + + dr := DrandInstance{ + t: t, + stateDir: stateDir, + pubAddr: dtest.FreeBind(myAddr.String()), + privAddr: dtest.FreeBind(myAddr.String()), + ctrlAddr: dtest.FreeBind("localhost"), + } + dr.priv = key.NewKeyPair(dr.privAddr) + + // share the node addresses with other nodes + // TODO: if we implement TLS, this is where we'd share public TLS keys + type NodeAddr struct { + PrivateAddr string + PublicAddr string + IsLeader bool + } + addrTopic := sync.NewTopic("drand-addrs", &NodeAddr{}) + var publicAddrs []string + var leaderAddr string + ch := make(chan *NodeAddr) + _, sub := t.SyncClient.MustPublishSubscribe(ctx, addrTopic, &NodeAddr{ + PrivateAddr: dr.privAddr, + PublicAddr: dr.pubAddr, + IsLeader: isLeader, + }, ch) + for i := 0; i < nNodes; i++ { + select { + case msg := <-ch: + publicAddrs = append(publicAddrs, fmt.Sprintf("http://%s", msg.PublicAddr)) + if msg.IsLeader { + leaderAddr = msg.PrivateAddr + } + case err := <-sub.Done(): + return nil, fmt.Errorf("unable to read drand addrs from sync service: %w", err) + } + } + if leaderAddr == "" { + return nil, fmt.Errorf("got %d drand addrs, but no leader", len(publicAddrs)) + } + + t.SyncClient.MustSignalAndWait(ctx, "drand-start", nNodes) + t.RecordMessage("Starting drand sharing ceremony") + if err := dr.Start(); err != nil { + return nil, err + } + + alive := false + waitSecs := 10 + for i := 0; i < waitSecs; i++ { + if !dr.Ping() { + time.Sleep(time.Second) + continue + } + t.R().RecordPoint("drand_first_ping", time.Now().Sub(startTime).Seconds()) + alive = true + break + } + if !alive { + return nil, fmt.Errorf("drand node %d failed to start after %d seconds", t.GroupSeq, waitSecs) + } + + // run DKG + t.SyncClient.MustSignalAndWait(ctx, "drand-dkg-start", nNodes) + if !isLeader { + time.Sleep(3 * time.Second) + } + grp := dr.RunDKG(nNodes, threshold, "10s", isLeader, leaderAddr, beaconOffset) + if grp == nil { + return nil, fmt.Errorf("drand dkg failed") + } + t.R().RecordPoint("drand_dkg_complete", time.Now().Sub(startTime).Seconds()) + + t.RecordMessage("drand dkg complete, waiting for chain start: %v", time.Until(time.Unix(grp.GenesisTime, 0).Add(grp.Period))) + + // wait for chain to begin + to := time.Until(time.Unix(grp.GenesisTime, 0).Add(5 * time.Second).Add(grp.Period)) + time.Sleep(to) + + t.RecordMessage("drand beacon chain started, fetching initial round via http") + // verify that we can get a round of randomness from the chain using an http client + info := chain.NewChainInfo(grp) + myPublicAddr := fmt.Sprintf("http://%s", dr.pubAddr) + dr.httpClient, err = hclient.NewWithInfo(myPublicAddr, info, nil) + if err != nil { + return nil, fmt.Errorf("unable to create drand http client: %w", err) + } + + _, err = dr.httpClient.Get(ctx, 1) + if err != nil { + return nil, fmt.Errorf("unable to get initial drand round: %w", err) + } + + // start gossip relay (unless disabled via testplan parameter) + var relayAddrs []peer.AddrInfo + + if runGossipRelay { + gossipDir := path.Join(stateDir, "gossip-relay") + listenAddr := fmt.Sprintf("/ip4/%s/tcp/7777", myAddr.String()) + relayCfg := lp2p.GossipRelayConfig{ + ChainHash: hex.EncodeToString(info.Hash()), + Addr: listenAddr, + DataDir: gossipDir, + IdentityPath: path.Join(gossipDir, "identity.key"), + Insecure: true, + Client: dr.httpClient, + } + t.RecordMessage("starting drand gossip relay") + dr.gossipRelay, err = lp2p.NewGossipRelayNode(log.NewLogger(nil, getLogLevel(t)), &relayCfg) + if err != nil { + return nil, fmt.Errorf("failed to construct drand gossip relay: %w", err) + } + + t.RecordMessage("sharing gossip relay addrs") + // share the gossip relay addrs so we can publish them in DrandRuntimeInfo + relayInfo, err := relayAddrInfo(dr.gossipRelay.Multiaddrs(), myAddr) + if err != nil { + return nil, err + } + infoCh := make(chan *peer.AddrInfo, nNodes) + infoTopic := sync.NewTopic("drand-gossip-addrs", &peer.AddrInfo{}) + + _, sub := t.SyncClient.MustPublishSubscribe(ctx, infoTopic, relayInfo, infoCh) + for i := 0; i < nNodes; i++ { + select { + case ai := <-infoCh: + relayAddrs = append(relayAddrs, *ai) + case err := <-sub.Done(): + return nil, fmt.Errorf("unable to get drand relay addr from sync service: %w", err) + } + } + } + + // if we're the leader, publish the config to the sync service + if isLeader { + buf := bytes.Buffer{} + if err := info.ToJSON(&buf); err != nil { + return nil, fmt.Errorf("error marshaling chain info: %w", err) + } + cfg := DrandRuntimeInfo{ + Config: dtypes.DrandConfig{ + Servers: publicAddrs, + ChainInfoJSON: buf.String(), + }, + GossipBootstrap: relayAddrs, + } + t.DebugSpew("publishing drand config on sync topic: %v", cfg) + t.SyncClient.MustPublish(ctx, DrandConfigTopic, &cfg) + } + + // signal ready state + t.SyncClient.MustSignalAndWait(ctx, StateReady, t.TestInstanceCount) + return &dr, nil +} + +// waitForDrandConfig should be called by filecoin instances before constructing the lotus Node +// you can use the returned dtypes.DrandConfig to override the default production config. +func waitForDrandConfig(ctx context.Context, client sync.Client) (*DrandRuntimeInfo, error) { + ch := make(chan *DrandRuntimeInfo, 1) + sub := client.MustSubscribe(ctx, DrandConfigTopic, ch) + select { + case cfg := <-ch: + return cfg, nil + case err := <-sub.Done(): + return nil, err + } +} + +func relayAddrInfo(addrs []ma.Multiaddr, dataIP net.IP) (*peer.AddrInfo, error) { + for _, a := range addrs { + if ip, _ := a.ValueForProtocol(ma.P_IP4); ip != dataIP.String() { + continue + } + return peer.AddrInfoFromP2pAddr(a) + } + return nil, fmt.Errorf("no addr found with data ip %s in addrs: %v", dataIP, addrs) +} + +func getLogLevel(t *TestEnvironment) int { + switch t.StringParam("drand_log_level") { + case "info": + return log.LogInfo + case "debug": + return log.LogDebug + default: + return log.LogNone + } +} diff --git a/testplans/lotus-soup/testkit/role_miner.go b/testplans/lotus-soup/testkit/role_miner.go new file mode 100644 index 00000000000..52bcfc98b74 --- /dev/null +++ b/testplans/lotus-soup/testkit/role_miner.go @@ -0,0 +1,648 @@ +package testkit + +import ( + "context" + "crypto/rand" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "path/filepath" + "time" + + "contrib.go.opencensus.io/exporter/prometheus" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-jsonrpc" + "github.com/filecoin-project/go-jsonrpc/auth" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-storedcounter" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors" + genesis_chain "github.com/filecoin-project/lotus/chain/gen/genesis" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/wallet" + "github.com/filecoin-project/lotus/cmd/lotus-seed/seed" + "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/markets/storageadapter" + "github.com/filecoin-project/lotus/miner" + "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/impl" + "github.com/filecoin-project/lotus/node/modules" + "github.com/filecoin-project/lotus/node/repo" + "github.com/filecoin-project/specs-actors/actors/builtin" + saminer "github.com/filecoin-project/specs-actors/actors/builtin/miner" + "github.com/google/uuid" + "github.com/gorilla/mux" + "github.com/hashicorp/go-multierror" + "github.com/ipfs/go-datastore" + libp2pcrypto "github.com/libp2p/go-libp2p-core/crypto" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/testground/sdk-go/sync" +) + +const ( + sealDelay = 30 * time.Second +) + +type LotusMiner struct { + *LotusNode + + MinerRepo repo.Repo + NodeRepo repo.Repo + FullNetAddrs []peer.AddrInfo + GenesisMsg *GenesisMsg + Subsystems config.MinerSubsystemConfig + + t *TestEnvironment +} + +func PrepareMiner(t *TestEnvironment) (*LotusMiner, error) { + ctx, cancel := context.WithTimeout(context.Background(), PrepareNodeTimeout) + defer cancel() + + ApplyNetworkParameters(t) + + pubsubTracer, err := GetPubsubTracerMaddr(ctx, t) + if err != nil { + return nil, err + } + + drandOpt, err := GetRandomBeaconOpts(ctx, t) + if err != nil { + return nil, err + } + + // first create a wallet + walletKey, err := wallet.GenerateKey(types.KTBLS) + if err != nil { + return nil, err + } + + // publish the account ID/balance + balance := t.FloatParam("balance") + balanceMsg := &InitialBalanceMsg{Addr: walletKey.Address, Balance: balance} + t.SyncClient.Publish(ctx, BalanceTopic, balanceMsg) + + // create and publish the preseal commitment + priv, _, err := libp2pcrypto.GenerateEd25519Key(rand.Reader) + if err != nil { + return nil, err + } + + minerID, err := peer.IDFromPrivateKey(priv) + if err != nil { + return nil, err + } + + // pick unique sequence number for each miner, no matter in which group they are + seq := t.SyncClient.MustSignalAndWait(ctx, StateMinerPickSeqNum, t.IntParam("miners")) + + minerAddr, err := address.NewIDAddress(genesis_chain.MinerStart + uint64(seq-1)) + if err != nil { + return nil, err + } + + presealDir, err := ioutil.TempDir("", "preseal") + if err != nil { + return nil, err + } + + sectors := t.IntParam("sectors") + genMiner, _, err := seed.PreSeal(minerAddr, abi.RegisteredSealProof_StackedDrg8MiBV1, 0, sectors, presealDir, []byte("TODO: randomize this"), &walletKey.KeyInfo, false) + if err != nil { + return nil, err + } + genMiner.PeerId = minerID + + t.RecordMessage("Miner Info: Owner: %s Worker: %s", genMiner.Owner, genMiner.Worker) + + presealMsg := &PresealMsg{Miner: *genMiner, Seqno: seq} + t.SyncClient.Publish(ctx, PresealTopic, presealMsg) + + // then collect the genesis block and bootstrapper address + genesisMsg, err := WaitForGenesis(t, ctx) + if err != nil { + return nil, err + } + + // prepare the repo + minerRepoDir, err := ioutil.TempDir("", "miner-repo-dir") + if err != nil { + return nil, err + } + + minerRepo, err := repo.NewFS(minerRepoDir) + if err != nil { + return nil, err + } + + err = minerRepo.Init(repo.StorageMiner) + if err != nil { + return nil, err + } + + var subsystems config.MinerSubsystemConfig + + { + lr, err := minerRepo.Lock(repo.StorageMiner) + if err != nil { + return nil, err + } + + c, err := lr.Config() + if err != nil { + return nil, err + } + + cfg := c.(*config.StorageMiner) + subsystems = cfg.Subsystems + + ks, err := lr.KeyStore() + if err != nil { + return nil, err + } + + kbytes, err := priv.Bytes() + if err != nil { + return nil, err + } + + err = ks.Put("libp2p-host", types.KeyInfo{ + Type: "libp2p-host", + PrivateKey: kbytes, + }) + if err != nil { + return nil, err + } + + ds, err := lr.Datastore(context.Background(), "/metadata") + if err != nil { + return nil, err + } + + err = ds.Put(datastore.NewKey("miner-address"), minerAddr.Bytes()) + if err != nil { + return nil, err + } + + nic := storedcounter.New(ds, datastore.NewKey(modules.StorageCounterDSPrefix)) + for i := 0; i < (sectors + 1); i++ { + _, err = nic.Next() + if err != nil { + return nil, err + } + } + + var localPaths []stores.LocalPath + + b, err := json.MarshalIndent(&stores.LocalStorageMeta{ + ID: stores.ID(uuid.New().String()), + Weight: 10, + CanSeal: true, + CanStore: true, + }, "", " ") + if err != nil { + return nil, fmt.Errorf("marshaling storage config: %w", err) + } + + if err := ioutil.WriteFile(filepath.Join(lr.Path(), "sectorstore.json"), b, 0644); err != nil { + return nil, fmt.Errorf("persisting storage metadata (%s): %w", filepath.Join(lr.Path(), "sectorstore.json"), err) + } + + localPaths = append(localPaths, stores.LocalPath{ + Path: lr.Path(), + }) + + if err := lr.SetStorage(func(sc *stores.StorageConfig) { + sc.StoragePaths = append(sc.StoragePaths, localPaths...) + }); err != nil { + return nil, err + } + + err = lr.Close() + if err != nil { + return nil, err + } + } + + minerIP := t.NetClient.MustGetDataNetworkIP().String() + + // create the node + // we need both a full node _and_ and storage miner node + n := &LotusNode{} + + // prepare the repo + nodeRepoDir, err := ioutil.TempDir("", "node-repo-dir") + if err != nil { + return nil, err + } + + nodeRepo, err := repo.NewFS(nodeRepoDir) + if err != nil { + return nil, err + } + + err = nodeRepo.Init(repo.FullNode) + if err != nil { + return nil, err + } + + stop1, err := node.New(context.Background(), + node.FullAPI(&n.FullApi), + node.Base(), + node.Repo(nodeRepo), + withGenesis(genesisMsg.Genesis), + withApiEndpoint(fmt.Sprintf("/ip4/0.0.0.0/tcp/%s", t.PortNumber("node_rpc", "0"))), + withListenAddress(minerIP), + withBootstrapper(genesisMsg.Bootstrapper), + withPubsubConfig(false, pubsubTracer), + drandOpt, + ) + if err != nil { + return nil, fmt.Errorf("node node.new error: %w", err) + } + + // set the wallet + err = n.setWallet(ctx, walletKey) + if err != nil { + stop1(context.TODO()) + return nil, err + } + + minerOpts := []node.Option{ + node.StorageMiner(&n.MinerApi, subsystems), + node.Base(), + node.Repo(minerRepo), + node.Override(new(api.FullNode), n.FullApi), + node.Override(new(*storageadapter.DealPublisher), storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{ + Period: 15 * time.Second, + MaxDealsPerMsg: 1, + })), + withApiEndpoint(fmt.Sprintf("/ip4/0.0.0.0/tcp/%s", t.PortNumber("miner_rpc", "0"))), + withMinerListenAddress(minerIP), + } + + if t.StringParam("mining_mode") != "natural" { + mineBlock := make(chan miner.MineReq) + + minerOpts = append(minerOpts, + node.Override(new(*miner.Miner), miner.NewTestMiner(mineBlock, minerAddr))) + + n.MineOne = func(ctx context.Context, cb miner.MineReq) error { + select { + case mineBlock <- cb: + return nil + case <-ctx.Done(): + return ctx.Err() + } + } + } + + stop2, err := node.New(context.Background(), minerOpts...) + if err != nil { + stop1(context.TODO()) + return nil, fmt.Errorf("miner node.new error: %w", err) + } + + registerAndExportMetrics(minerAddr.String()) + + // collect stats based on blockchain from first instance of `miner` role + if t.InitContext.GroupSeq == 1 && t.Role == "miner" { + go collectStats(t, ctx, n.FullApi) + } + + // Start listening on the full node. + fullNodeNetAddrs, err := n.FullApi.NetAddrsListen(ctx) + if err != nil { + panic(err) + } + + // set seal delay to lower value than 1 hour + err = n.MinerApi.SectorSetSealDelay(ctx, sealDelay) + if err != nil { + return nil, err + } + + // set expected seal duration to 1 minute + err = n.MinerApi.SectorSetExpectedSealDuration(ctx, 1*time.Minute) + if err != nil { + return nil, err + } + + // print out the admin auth token + token, err := n.MinerApi.AuthNew(ctx, api.AllPermissions) + if err != nil { + return nil, err + } + + t.RecordMessage("Auth token: %s", string(token)) + + // add local storage for presealed sectors + err = n.MinerApi.StorageAddLocal(ctx, presealDir) + if err != nil { + return nil, err + } + + // set the miner PeerID + minerIDEncoded, err := actors.SerializeParams(&saminer.ChangePeerIDParams{NewID: abi.PeerID(minerID)}) + if err != nil { + return nil, err + } + + changeMinerID := &types.Message{ + To: minerAddr, + From: genMiner.Worker, + Method: builtin.MethodsMiner.ChangePeerID, + Params: minerIDEncoded, + Value: types.NewInt(0), + } + + _, err = n.FullApi.MpoolPushMessage(ctx, changeMinerID, nil) + if err != nil { + return nil, err + } + + t.RecordMessage("publish our address to the miners addr topic") + minerActor, err := n.MinerApi.ActorAddress(ctx) + if err != nil { + return nil, err + } + + minerNetAddrs, err := n.MinerApi.NetAddrsListen(ctx) + if err != nil { + return nil, err + } + + t.SyncClient.MustPublish(ctx, MinersAddrsTopic, MinerAddressesMsg{ + FullNetAddrs: fullNodeNetAddrs, + MinerNetAddrs: minerNetAddrs, + MinerActorAddr: minerActor, + WalletAddr: walletKey.Address, + }) + + t.RecordMessage("connecting to all other miners") + + // densely connect the miner's full nodes. + minerCh := make(chan *MinerAddressesMsg, 16) + sctx, cancel := context.WithCancel(ctx) + defer cancel() + t.SyncClient.MustSubscribe(sctx, MinersAddrsTopic, minerCh) + var fullNetAddrs []peer.AddrInfo + for i := 0; i < t.IntParam("miners"); i++ { + m := <-minerCh + if m.MinerActorAddr == minerActor { + // once I find myself, I stop connecting to others, to avoid a simopen problem. + break + } + err := n.FullApi.NetConnect(ctx, m.FullNetAddrs) + if err != nil { + return nil, fmt.Errorf("failed to connect to miner %s on: %v", m.MinerActorAddr, m.FullNetAddrs) + } + t.RecordMessage("connected to full node of miner %s on %v", m.MinerActorAddr, m.FullNetAddrs) + + fullNetAddrs = append(fullNetAddrs, m.FullNetAddrs) + } + + t.RecordMessage("waiting for all nodes to be ready") + t.SyncClient.MustSignalAndWait(ctx, StateReady, t.TestInstanceCount) + + fullSrv, err := startFullNodeAPIServer(t, nodeRepo, n.FullApi) + if err != nil { + return nil, err + } + + minerSrv, err := startStorageMinerAPIServer(t, minerRepo, n.MinerApi) + if err != nil { + return nil, err + } + + n.StopFn = func(ctx context.Context) error { + var err *multierror.Error + err = multierror.Append(fullSrv.Shutdown(ctx)) + err = multierror.Append(minerSrv.Shutdown(ctx)) + err = multierror.Append(stop2(ctx)) + err = multierror.Append(stop2(ctx)) + err = multierror.Append(stop1(ctx)) + return err.ErrorOrNil() + } + + m := &LotusMiner{n, minerRepo, nodeRepo, fullNetAddrs, genesisMsg, subsystems, t} + + return m, nil +} + +func RestoreMiner(t *TestEnvironment, m *LotusMiner) (*LotusMiner, error) { + ctx, cancel := context.WithTimeout(context.Background(), PrepareNodeTimeout) + defer cancel() + + minerRepo := m.MinerRepo + nodeRepo := m.NodeRepo + fullNetAddrs := m.FullNetAddrs + genesisMsg := m.GenesisMsg + + minerIP := t.NetClient.MustGetDataNetworkIP().String() + + drandOpt, err := GetRandomBeaconOpts(ctx, t) + if err != nil { + return nil, err + } + + // create the node + // we need both a full node _and_ and storage miner node + n := &LotusNode{} + + stop1, err := node.New(context.Background(), + node.FullAPI(&n.FullApi), + node.Base(), + node.Repo(nodeRepo), + //withGenesis(genesisMsg.Genesis), + withApiEndpoint(fmt.Sprintf("/ip4/0.0.0.0/tcp/%s", t.PortNumber("node_rpc", "0"))), + withListenAddress(minerIP), + withBootstrapper(genesisMsg.Bootstrapper), + //withPubsubConfig(false, pubsubTracer), + drandOpt, + ) + if err != nil { + return nil, err + } + + minerOpts := []node.Option{ + node.StorageMiner(&n.MinerApi, m.Subsystems), + node.Base(), + node.Repo(minerRepo), + node.Override(new(api.FullNode), n.FullApi), + withApiEndpoint(fmt.Sprintf("/ip4/0.0.0.0/tcp/%s", t.PortNumber("miner_rpc", "0"))), + withMinerListenAddress(minerIP), + } + + stop2, err := node.New(context.Background(), minerOpts...) + if err != nil { + stop1(context.TODO()) + return nil, err + } + + fullSrv, err := startFullNodeAPIServer(t, nodeRepo, n.FullApi) + if err != nil { + return nil, err + } + + minerSrv, err := startStorageMinerAPIServer(t, minerRepo, n.MinerApi) + if err != nil { + return nil, err + } + + n.StopFn = func(ctx context.Context) error { + var err *multierror.Error + err = multierror.Append(fullSrv.Shutdown(ctx)) + err = multierror.Append(minerSrv.Shutdown(ctx)) + err = multierror.Append(stop2(ctx)) + err = multierror.Append(stop2(ctx)) + err = multierror.Append(stop1(ctx)) + return err.ErrorOrNil() + } + + for i := 0; i < len(fullNetAddrs); i++ { + err := n.FullApi.NetConnect(ctx, fullNetAddrs[i]) + if err != nil { + // we expect a failure since we also shutdown another miner + t.RecordMessage("failed to connect to miner %d on: %v", i, fullNetAddrs[i]) + continue + } + t.RecordMessage("connected to full node of miner %d on %v", i, fullNetAddrs[i]) + } + + pm := &LotusMiner{n, minerRepo, nodeRepo, fullNetAddrs, genesisMsg, m.Subsystems, t} + + return pm, err +} + +func (m *LotusMiner) RunDefault() error { + var ( + t = m.t + clients = t.IntParam("clients") + miners = t.IntParam("miners") + ) + + t.RecordMessage("running miner") + t.RecordMessage("block delay: %v", build.BlockDelaySecs) + t.D().Gauge("miner.block-delay").Update(float64(build.BlockDelaySecs)) + + ctx := context.Background() + myActorAddr, err := m.MinerApi.ActorAddress(ctx) + if err != nil { + return err + } + + // mine / stop mining + mine := true + done := make(chan struct{}) + + if m.MineOne != nil { + go func() { + defer t.RecordMessage("shutting down mining") + defer close(done) + + var i int + for i = 0; mine; i++ { + // synchronize all miners to mine the next block + t.RecordMessage("synchronizing all miners to mine next block [%d]", i) + stateMineNext := sync.State(fmt.Sprintf("mine-block-%d", i)) + t.SyncClient.MustSignalAndWait(ctx, stateMineNext, miners) + + ch := make(chan error) + const maxRetries = 100 + success := false + for retries := 0; retries < maxRetries; retries++ { + f := func(mined bool, epoch abi.ChainEpoch, err error) { + if mined { + t.D().Counter(fmt.Sprintf("block.mine,miner=%s", myActorAddr)).Inc(1) + } + ch <- err + } + req := miner.MineReq{ + Done: f, + } + err := m.MineOne(ctx, req) + if err != nil { + panic(err) + } + + miningErr := <-ch + if miningErr == nil { + success = true + break + } + t.D().Counter("block.mine.err").Inc(1) + t.RecordMessage("retrying block [%d] after %d attempts due to mining error: %s", + i, retries, miningErr) + } + if !success { + panic(fmt.Errorf("failed to mine block %d after %d retries", i, maxRetries)) + } + } + + // signal the last block to make sure no miners are left stuck waiting for the next block signal + // while the others have stopped + stateMineLast := sync.State(fmt.Sprintf("mine-block-%d", i)) + t.SyncClient.MustSignalEntry(ctx, stateMineLast) + }() + } else { + close(done) + } + + // wait for a signal from all clients to stop mining + err = <-t.SyncClient.MustBarrier(ctx, StateStopMining, clients).C + if err != nil { + return err + } + + mine = false + <-done + + t.SyncClient.MustSignalAndWait(ctx, StateDone, t.TestInstanceCount) + return nil +} + +func startStorageMinerAPIServer(t *TestEnvironment, repo repo.Repo, minerApi api.StorageMiner) (*http.Server, error) { + mux := mux.NewRouter() + + rpcServer := jsonrpc.NewServer() + rpcServer.Register("Filecoin", minerApi) + + mux.Handle("/rpc/v0", rpcServer) + mux.PathPrefix("/remote").HandlerFunc(minerApi.(*impl.StorageMinerAPI).ServeRemote(true)) + mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof + + exporter, err := prometheus.NewExporter(prometheus.Options{ + Namespace: "lotus", + }) + if err != nil { + return nil, err + } + + mux.Handle("/debug/metrics", exporter) + + ah := &auth.Handler{ + Verify: func(ctx context.Context, token string) ([]auth.Permission, error) { + return api.AllPermissions, nil + }, + Next: mux.ServeHTTP, + } + + endpoint, err := repo.APIEndpoint() + if err != nil { + return nil, fmt.Errorf("no API endpoint in repo: %w", err) + } + + srv := &http.Server{Handler: ah} + + listenAddr, err := startServer(endpoint, srv) + if err != nil { + return nil, fmt.Errorf("failed to start storage miner API endpoint: %w", err) + } + + t.RecordMessage("started storage miner API server at %s", listenAddr) + return srv, nil +} diff --git a/testplans/lotus-soup/testkit/role_pubsub_tracer.go b/testplans/lotus-soup/testkit/role_pubsub_tracer.go new file mode 100644 index 00000000000..5b13e6b81ab --- /dev/null +++ b/testplans/lotus-soup/testkit/role_pubsub_tracer.go @@ -0,0 +1,79 @@ +package testkit + +import ( + "context" + "crypto/rand" + "fmt" + + "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p-core/crypto" + "github.com/libp2p/go-libp2p-core/host" + "github.com/libp2p/go-libp2p-pubsub-tracer/traced" + + ma "github.com/multiformats/go-multiaddr" +) + +type PubsubTracer struct { + t *TestEnvironment + host host.Host + traced *traced.TraceCollector +} + +func PreparePubsubTracer(t *TestEnvironment) (*PubsubTracer, error) { + ctx := context.Background() + + privk, _, err := crypto.GenerateEd25519Key(rand.Reader) + if err != nil { + return nil, err + } + + tracedIP := t.NetClient.MustGetDataNetworkIP().String() + tracedAddr := fmt.Sprintf("/ip4/%s/tcp/4001", tracedIP) + + host, err := libp2p.New(ctx, + libp2p.Identity(privk), + libp2p.ListenAddrStrings(tracedAddr), + ) + if err != nil { + return nil, err + } + + tracedDir := t.TestOutputsPath + "/traced.logs" + traced, err := traced.NewTraceCollector(host, tracedDir) + if err != nil { + host.Close() + return nil, err + } + + tracedMultiaddrStr := fmt.Sprintf("%s/p2p/%s", tracedAddr, host.ID()) + t.RecordMessage("I am %s", tracedMultiaddrStr) + + _ = ma.StringCast(tracedMultiaddrStr) + tracedMsg := &PubsubTracerMsg{Multiaddr: tracedMultiaddrStr} + t.SyncClient.MustPublish(ctx, PubsubTracerTopic, tracedMsg) + + t.RecordMessage("waiting for all nodes to be ready") + t.SyncClient.MustSignalAndWait(ctx, StateReady, t.TestInstanceCount) + + tracer := &PubsubTracer{t: t, host: host, traced: traced} + return tracer, nil +} + +func (tr *PubsubTracer) RunDefault() error { + tr.t.RecordMessage("running pubsub tracer") + + defer func() { + err := tr.Stop() + if err != nil { + tr.t.RecordMessage("error stoping tracer: %s", err) + } + }() + + tr.t.WaitUntilAllDone() + return nil +} + +func (tr *PubsubTracer) Stop() error { + tr.traced.Stop() + return tr.host.Close() +} diff --git a/testplans/lotus-soup/testkit/sync.go b/testplans/lotus-soup/testkit/sync.go new file mode 100644 index 00000000000..a61e2961262 --- /dev/null +++ b/testplans/lotus-soup/testkit/sync.go @@ -0,0 +1,69 @@ +package testkit + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/genesis" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/testground/sdk-go/sync" +) + +var ( + GenesisTopic = sync.NewTopic("genesis", &GenesisMsg{}) + BalanceTopic = sync.NewTopic("balance", &InitialBalanceMsg{}) + PresealTopic = sync.NewTopic("preseal", &PresealMsg{}) + ClientsAddrsTopic = sync.NewTopic("clients_addrs", &ClientAddressesMsg{}) + MinersAddrsTopic = sync.NewTopic("miners_addrs", &MinerAddressesMsg{}) + SlashedMinerTopic = sync.NewTopic("slashed_miner", &SlashedMinerMsg{}) + PubsubTracerTopic = sync.NewTopic("pubsub_tracer", &PubsubTracerMsg{}) + DrandConfigTopic = sync.NewTopic("drand_config", &DrandRuntimeInfo{}) +) + +var ( + StateReady = sync.State("ready") + StateDone = sync.State("done") + StateStopMining = sync.State("stop-mining") + StateMinerPickSeqNum = sync.State("miner-pick-seq-num") + StateAbortTest = sync.State("abort-test") +) + +type InitialBalanceMsg struct { + Addr address.Address + Balance float64 +} + +type PresealMsg struct { + Miner genesis.Miner + Seqno int64 +} + +type GenesisMsg struct { + Genesis []byte + Bootstrapper []byte +} + +type ClientAddressesMsg struct { + PeerNetAddr peer.AddrInfo + WalletAddr address.Address + GroupSeq int64 +} + +type MinerAddressesMsg struct { + FullNetAddrs peer.AddrInfo + MinerNetAddrs peer.AddrInfo + MinerActorAddr address.Address + WalletAddr address.Address +} + +type SlashedMinerMsg struct { + MinerActorAddr address.Address +} + +type PubsubTracerMsg struct { + Multiaddr string +} + +type DrandRuntimeInfo struct { + Config dtypes.DrandConfig + GossipBootstrap dtypes.DrandBootstrap +} diff --git a/testplans/lotus-soup/testkit/testenv.go b/testplans/lotus-soup/testkit/testenv.go new file mode 100644 index 00000000000..63c297b032b --- /dev/null +++ b/testplans/lotus-soup/testkit/testenv.go @@ -0,0 +1,88 @@ +package testkit + +import ( + "context" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/testground/sdk-go/run" + "github.com/testground/sdk-go/runtime" +) + +type TestEnvironment struct { + *runtime.RunEnv + *run.InitContext + + Role string +} + +// workaround for default params being wrapped in quote chars +func (t *TestEnvironment) StringParam(name string) string { + return strings.Trim(t.RunEnv.StringParam(name), "\"") +} + +func (t *TestEnvironment) DurationParam(name string) time.Duration { + d, err := time.ParseDuration(t.StringParam(name)) + if err != nil { + panic(fmt.Errorf("invalid duration value for param '%s': %w", name, err)) + } + return d +} + +func (t *TestEnvironment) DurationRangeParam(name string) DurationRange { + var r DurationRange + t.JSONParam(name, &r) + return r +} + +func (t *TestEnvironment) FloatRangeParam(name string) FloatRange { + r := FloatRange{} + t.JSONParam(name, &r) + return r +} + +func (t *TestEnvironment) DebugSpew(format string, args ...interface{}) { + t.RecordMessage(spew.Sprintf(format, args...)) +} + +func (t *TestEnvironment) DumpJSON(filename string, v interface{}) { + b, err := json.Marshal(v) + if err != nil { + t.RecordMessage("unable to marshal object to JSON: %s", err) + return + } + f, err := t.CreateRawAsset(filename) + if err != nil { + t.RecordMessage("unable to create asset file: %s", err) + return + } + defer f.Close() + + _, err = f.Write(b) + if err != nil { + t.RecordMessage("error writing json object dump: %s", err) + } +} + +// WaitUntilAllDone waits until all instances in the test case are done. +func (t *TestEnvironment) WaitUntilAllDone() { + ctx := context.Background() + t.SyncClient.MustSignalAndWait(ctx, StateDone, t.TestInstanceCount) +} + +// WrapTestEnvironment takes a test case function that accepts a +// *TestEnvironment, and adapts it to the original unwrapped SDK style +// (run.InitializedTestCaseFn). +func WrapTestEnvironment(f func(t *TestEnvironment) error) run.InitializedTestCaseFn { + return func(runenv *runtime.RunEnv, initCtx *run.InitContext) error { + t := &TestEnvironment{RunEnv: runenv, InitContext: initCtx} + t.Role = t.StringParam("role") + + t.DumpJSON("test-parameters.json", t.TestInstanceParams) + + return f(t) + } +} diff --git a/testplans/lotus-soup/testkit/testenv_ranges.go b/testplans/lotus-soup/testkit/testenv_ranges.go new file mode 100644 index 00000000000..110ce60d16f --- /dev/null +++ b/testplans/lotus-soup/testkit/testenv_ranges.go @@ -0,0 +1,77 @@ +package testkit + +import ( + "encoding/json" + "fmt" + "math/rand" + "time" + + "github.com/testground/sdk-go/ptypes" +) + +// DurationRange is a Testground parameter type that represents a duration +// range, suitable use in randomized tests. This type is encoded as a JSON array +// of length 2 of element type ptypes.Duration, e.g. ["10s", "10m"]. +type DurationRange struct { + Min time.Duration + Max time.Duration +} + +func (r *DurationRange) ChooseRandom() time.Duration { + i := int64(r.Min) + rand.Int63n(int64(r.Max)-int64(r.Min)) + return time.Duration(i) +} + +func (r *DurationRange) UnmarshalJSON(b []byte) error { + var s []ptypes.Duration + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if len(s) != 2 { + return fmt.Errorf("expected two-element array of duration strings, got array of length %d", len(s)) + } + if s[0].Duration > s[1].Duration { + return fmt.Errorf("expected first element to be <= second element") + } + r.Min = s[0].Duration + r.Max = s[1].Duration + return nil +} + +func (r *DurationRange) MarshalJSON() ([]byte, error) { + s := []ptypes.Duration{{r.Min}, {r.Max}} + return json.Marshal(s) +} + +// FloatRange is a Testground parameter type that represents a float +// range, suitable use in randomized tests. This type is encoded as a JSON array +// of length 2 of element type float32, e.g. [1.45, 10.675]. +type FloatRange struct { + Min float32 + Max float32 +} + +func (r *FloatRange) ChooseRandom() float32 { + return r.Min + rand.Float32()*(r.Max-r.Min) +} + +func (r *FloatRange) UnmarshalJSON(b []byte) error { + var s []float32 + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if len(s) != 2 { + return fmt.Errorf("expected two-element array of floats, got array of length %d", len(s)) + } + if s[0] > s[1] { + return fmt.Errorf("expected first element to be <= second element") + } + r.Min = s[0] + r.Max = s[1] + return nil +} + +func (r *FloatRange) MarshalJSON() ([]byte, error) { + s := []float32{r.Min, r.Max} + return json.Marshal(s) +} diff --git a/testplans/notes/.empty b/testplans/notes/.empty new file mode 100644 index 00000000000..e69de29bb2d diff --git a/testplans/notes/raulk.md b/testplans/notes/raulk.md new file mode 100644 index 00000000000..88476f2a307 --- /dev/null +++ b/testplans/notes/raulk.md @@ -0,0 +1,55 @@ +# Raúl's notes + +## Storage mining + +The Storage Mining System is the part of the Filecoin Protocol that deals with +storing Client’s data, producing proof artifacts that demonstrate correct +storage behavior, and managing the work involved. + +## Preseals + +In the Filecoin consensus protocol, the miners' probability of being eligible +to mine a block in a given epoch is directly correlated with their power in the +network. This creates a chicken-and-egg problem at genesis. Since there are no +miners, there is no power in the network, therefore no miner is eligible to mine +and advance the chain. + +Preseals are sealed sectors that are blessed at genesis, thus conferring +their miners the possibility to win round elections and successfully mine a +block. Without preseals, the chain would be dead on arrival. + +Preseals work with fauxrep and faux sealing, which are special-case +implementations of PoRep and the sealing logic that do not depend on slow +sealing. + +### Not implemented things + +**Sector Resealing:** Miners should be able to ’re-seal’ sectors, to allow them +to take a set of sectors with mostly expired pieces, and combine the +not-yet-expired pieces into a single (or multiple) sectors. + +**Sector Transfer:** Miners should be able to re-delegate the responsibility of +storing data to another miner. This is tricky for many reasons, and will not be +implemented in the initial release of Filecoin, but could provide interesting +capabilities down the road. + +## Catch-up/rush mining + +In catch-up or rush mining, miners make up for chain history that does not +exist. It's a recovery/healing procedure. The chain runs at at constant +25 second epoch time. When in the network mining halts for some reason +(consensus/liveness bug, drand availability issues, etc.), upon a restart miners +will go and backfill the chain history by mining backdated blocks in +the appropriate timestamps. + +There are a few things worth highlighting: + * mining runs in a hot loop, and there is no time for miners to gossip about + their blocks; therefore they end up building the chain solo, as they can't + incorprate other blocks into tipsets. + * the miner with most power will mine most blocks. + * presumably, as many forks in the network will appear as miners who mined a + block + a fork filled with null rounds only (for miners that didn't win a + round). + * at the end of the catch-up, the heaviest fork will win the race, and it may + be possible for the most powerful miner pre-disruption to affect the + outcome by choosing the messages that go in their blocks. diff --git a/tools/packer/etc/motd b/tools/packer/etc/motd new file mode 100644 index 00000000000..5966d972b4b --- /dev/null +++ b/tools/packer/etc/motd @@ -0,0 +1,64 @@ +Your lotus node is up and running! + +This image contains the two most important pieces of the lotus filecoin suite, the +daemon and the miner. The daemon is is configured to download a snapshot and start +running. In fact, by the time you read this, the daemon may already be in sync. +Go ahead and make sure everything is working correctly with the following commands. + + + +To check if the daemon is running: + + systemctl status lotus-daemon + + + +To check if the daemon is in sync: + + lotus sync status + + **note: When starting lotus for the first time, it will download a chain snapshot. + This is a large download and will take several minutes to complete. During + this time, the lotus API will not be up yet. Give it time! You can see + progress by looking at the systemd journal. + + +To check if the daemon is connecting to other lotus nodes: + + lotus net peers + + + +No wallets are crated by default. You can view, create, and delete wallets with +the lotus command. On this image, lotus is running as the user `fc`. +Be careful, now. Don't delete a wallet with funds! + + sudo -E -u fc lotus wallet list + sudo -E -u fc lotus wallet new bls + + + +The lotus miner is also installed, but it's not running by default. If you have no +special disk or worker requirements, you can initialize the lotus-miner repo like this: + + sudo -E -u fc lotus-miner init -o + + + +You only need to do this once, after which, you can enable and start the miner. + + sudo systemctl enable lotus-miner + sudo systemctl start lotus-miner + + + +Do you want to access your lotus daemon remotely? Learn how to setup token authentication +and use client libraries from lotus docs. + +https://docs.filecoin.io/build/lotus/enable-remote-api-access/ + + + +For more information, see https://docs.filecoin.io/ +Found a bug? let us know! https://github.com/filecoin-project/lotus +Chat with us on slack! https://filecoinproject.slack.com/archives/CEGN061C5 diff --git a/tools/packer/homedir/bashrc b/tools/packer/homedir/bashrc new file mode 100644 index 00000000000..db4dbd0b60e --- /dev/null +++ b/tools/packer/homedir/bashrc @@ -0,0 +1,12 @@ +PS1="[\h \w] ⨎ " + +export PROMT_DIRTRIM=1 + +# Where to find the lotus repo +export LOTUS_PATH=/var/lib/lotus + +# The miner is not running in this image by default. +# export LOTUS_MINER_PATH=/var/lib/lotus-miner + +# To access the lotus node remotely, the following environment variable may be used. +# export FULLNODE_API_INFO=:/ip4//tcp/1234/http diff --git a/tools/packer/lotus.pkr.hcl b/tools/packer/lotus.pkr.hcl new file mode 100644 index 00000000000..8ef41613b14 --- /dev/null +++ b/tools/packer/lotus.pkr.hcl @@ -0,0 +1,120 @@ +variable "ci_workspace_bins" { + type = string + default = "./linux" +} + +variable "lotus_network" { + type = string + default = "mainnet" +} + +variable "git_tag" { + type = string + default = "" +} + +locals { + timestamp = regex_replace(timestamp(), "[- TZ:]", "") +} + +source "amazon-ebs" "lotus" { + ami_name = "lotus-${var.lotus_network}-${var.git_tag}-${local.timestamp}" + ami_regions = [ + "ap-east-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-south-1", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", + "eu-central-1", + "eu-north-1", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-west-1", + "us-west-2", + ] + ami_groups = [ + # This causes the ami to be publicly-accessable. + "all", + ] + ami_description = "Lotus Filecoin AMI" + launch_block_device_mappings { + device_name = "/dev/sda1" + volume_size = 100 + delete_on_termination = true + } + + instance_type = "t2.micro" + source_ami_filter { + filters = { + name = "ubuntu/images/*ubuntu-focal-20.04-amd64-server-*" + root-device-type = "ebs" + virtualization-type = "hvm" + } + most_recent = true + owners = ["099720109477"] + } + ssh_username = "ubuntu" +} + +source "digitalocean" "lotus" { + droplet_name = "lotus-${var.lotus_network}" + size = "s-1vcpu-1gb" + region = "nyc3" + image = "ubuntu-20-04-x64" + snapshot_name = "lotus-${var.lotus_network}-${var.git_tag}-${local.timestamp}" + ssh_username = "root" +} + +build { + sources = [ + "source.amazon-ebs.lotus", + "source.digitalocean.lotus", + ] + + # Lotus software (from CI workspace) + provisioner "file" { + source = "${var.ci_workspace_bins}/lotus" + destination = "lotus" + } + provisioner "file" { + source = "${var.ci_workspace_bins}/lotus-miner" + destination = "lotus-miner" + } + # First run script + provisioner "file" { + source = "./tools/packer/scripts/${var.lotus_network}/lotus-init.sh" + destination = "lotus-init.sh" + } + # Systemd service units. + provisioner "file" { + source = "./tools/packer/systemd/lotus-daemon.service" + destination = "lotus-daemon.service" + } + provisioner "file" { + source = "./tools/packer/systemd/lotus-miner.service" + destination = "lotus-miner.service" + } + provisioner "file" { + source = "./tools/packer/repo/config.toml" + destination = "config.toml" + } + provisioner "file" { + source = "./tools/packer/etc/motd" + destination = "motd" + } + provisioner "file" { + source = "./tools/packer/homedir/bashrc" + destination = ".bashrc" + } + # build it. + provisioner "shell" { + script = "./tools/packer/setup.sh" + } +} diff --git a/tools/packer/repo/config.toml b/tools/packer/repo/config.toml new file mode 100644 index 00000000000..b8cbf0ec3d5 --- /dev/null +++ b/tools/packer/repo/config.toml @@ -0,0 +1,36 @@ +[API] +ListenAddress = "/ip4/0.0.0.0/tcp/1234/http" +# RemoteListenAddress = "" +# Timeout = "30s" +# +[Libp2p] +ListenAddresses = ["/ip4/0.0.0.0/tcp/5678", "/ip6/::/tcp/5678"] +# AnnounceAddresses = [] +# NoAnnounceAddresses = [] +# ConnMgrLow = 150 +# ConnMgrHigh = 180 +# ConnMgrGrace = "20s" +# +[Pubsub] +# Bootstrapper = false +# RemoteTracer = "/dns4/pubsub-tracer.filecoin.io/tcp/4001/p2p/QmTd6UvR47vUidRNZ1ZKXHrAFhqTJAD27rKL9XYghEKgKX" +# +[Client] +# UseIpfs = false +# IpfsOnlineMode = false +# IpfsMAddr = "" +# IpfsUseForRetrieval = false +# SimultaneousTransfers = 20 +# +[Metrics] +# Nickname = "" +# HeadNotifs = false +# +[Wallet] +# RemoteBackend = "" +# EnableLedger = false +# DisableLocal = false +# +[Fees] +# DefaultMaxFee = "0.007 FIL" +# diff --git a/tools/packer/scripts/butterflynet/lotus-init.sh b/tools/packer/scripts/butterflynet/lotus-init.sh new file mode 100755 index 00000000000..cfbf93f786a --- /dev/null +++ b/tools/packer/scripts/butterflynet/lotus-init.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +# This script sets up an initial configuraiton for the lotus daemon and miner +# It will only run once. + +GATE="$LOTUS_PATH"/date_initialized + +# Don't init if already initialized. +if [ -f "$GATE" ]; then + echo lotus already initialized. + exit 0 +fi + +# Not importing snapshot on butterflynet +# +# echo importing minimal snapshot +# lotus daemon --import-snapshot https://fil-chain-snapshots-fallback.s3.amazonaws.com/mainnet/minimal_finality_stateroots_latest.car --halt-after-import + +# Block future inits +date > "$GATE" diff --git a/tools/packer/scripts/calibrationnet/lotus-init.sh b/tools/packer/scripts/calibrationnet/lotus-init.sh new file mode 100755 index 00000000000..77260fa29e4 --- /dev/null +++ b/tools/packer/scripts/calibrationnet/lotus-init.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +# This script sets up an initial configuraiton for the lotus daemon and miner +# It will only run once. + +GATE="$LOTUS_PATH"/date_initialized + +# Don't init if already initialized. +if [ -f "$GATE" ]; then + echo lotus already initialized. + exit 0 +fi + +# Not importing snapshot on calibrationnet. +# +# echo importing minimal snapshot +# lotus daemon --import-snapshot https://fil-chain-snapshots-fallback.s3.amazonaws.com/mainnet/minimal_finality_stateroots_latest.car --halt-after-import + +# Block future inits +date > "$GATE" diff --git a/tools/packer/scripts/mainnet/lotus-init.sh b/tools/packer/scripts/mainnet/lotus-init.sh new file mode 100755 index 00000000000..b2285336522 --- /dev/null +++ b/tools/packer/scripts/mainnet/lotus-init.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +# This script sets up an initial configuraiton for the lotus daemon and miner +# It will only run once. + +GATE="$LOTUS_PATH"/date_initialized + +# Don't init if already initialized. +if [ -f "$GATE" ]; then + echo lotus already initialized. + exit 0 +fi + +echo importing minimal snapshot +lotus daemon --import-snapshot https://fil-chain-snapshots-fallback.s3.amazonaws.com/mainnet/minimal_finality_stateroots_latest.car --halt-after-import + +# Block future inits +date > "$GATE" diff --git a/tools/packer/scripts/nerpanet/lotus-init.sh b/tools/packer/scripts/nerpanet/lotus-init.sh new file mode 100755 index 00000000000..a0f19ae925b --- /dev/null +++ b/tools/packer/scripts/nerpanet/lotus-init.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +# This script sets up an initial configuraiton for the lotus daemon and miner +# It will only run once. + +GATE="$LOTUS_PATH"/date_initialized + +# Don't init if already initialized. +if [ -f "$GATE" ]; then + echo lotus already initialized. + exit 0 +fi + +# Not importing snapshot on nerpanet +# +# echo importing minimal snapshot +# lotus daemon --import-snapshot https://fil-chain-snapshots-fallback.s3.amazonaws.com/mainnet/minimal_finality_stateroots_latest.car --halt-after-import + +# Block future inits +date > "$GATE" diff --git a/tools/packer/setup.sh b/tools/packer/setup.sh new file mode 100644 index 00000000000..6c0742254ff --- /dev/null +++ b/tools/packer/setup.sh @@ -0,0 +1,67 @@ +#!/usr/bin/env bash + +# This script is executed by packer to setup the image. +# When this script is run, packer will have already copied binaries into the home directory of +# whichever user it has access too. This script is executed from within the home directory of that +# user. Bear in mind that different cloud providers, and different images on the same cloud +# provider will have a different initial user account. + +set -x + +# Become root, if we aren't already. +# Docker images will already be root. AMIs will have an SSH user account. +UID=$(id -u) +if [ x$UID != x0 ] +then + printf -v cmd_str '%q ' "$0" "$@" + exec sudo su -c "$cmd_str" +fi + +MANAGED_BINS=( lotus lotus-miner lotus-init.sh ) +MANAGED_FILES=( + /lib/systemd/system/lotus-daemon.service + /lib/systemd/system/lotus-miner.service + /etc/motd + /var/lib/lotus/config.toml +) + +# install libs. +export DEBIAN_FRONTEND=noninteractive +apt-get update +apt-get -y install libhwloc15 ocl-icd-libopencl1 ufw +apt-get -y upgrade -q -y -u -o Dpkg::Options::="--force-confold" +ln -s /usr/lib/x86_64-linux-gnu/libhwloc.so.15 /usr/lib/x86_64-linux-gnu/libhwloc.so.5 + +# Create lotus user +useradd -c "lotus system account" -r fc +install -o fc -g fc -d /var/lib/lotus +install -o fc -g fc -d /var/lib/lotus-miner + +# Install software +for i in "${MANAGED_BINS[@]}" +do + install -o root -g root -m 755 -t /usr/local/bin $i + rm $i +done + +# Install systemd and other files. +# Because packer doesn't copy files with root permisison, +# files are in the home directory of the ssh user. Copy +# these files into the right position. +for i in "${MANAGED_FILES[@]}" +do + fn=$(basename $i) + install -o root -g root -m 644 $fn $i + rm $fn +done + +# Enable services +systemctl daemon-reload +systemctl enable lotus-daemon + +# Setup firewall +yes | ufw enable +ufw default deny incoming +ufw default allow outgoing +ufw allow ssh +ufw allow 5678 #libp2p diff --git a/tools/packer/systemd/lotus-daemon.service b/tools/packer/systemd/lotus-daemon.service new file mode 100644 index 00000000000..edbc91151d0 --- /dev/null +++ b/tools/packer/systemd/lotus-daemon.service @@ -0,0 +1,17 @@ +[Unit] +Description=Lotus Daemon +After=network.target + +[Service] +User=fc +Group=fc +ExecStartPre=/usr/local/bin/lotus-init.sh +ExecStart=/usr/local/bin/lotus daemon +ExecStop=/usr/local/bin/lotus daemon stop +Environment=LOTUS_PATH=/var/lib/lotus +Restart=always +RestartSec=30 +TimeoutSec=infinity + +[Install] +WantedBy=multi-user.target diff --git a/tools/packer/systemd/lotus-miner.service b/tools/packer/systemd/lotus-miner.service new file mode 100644 index 00000000000..d7289c888b3 --- /dev/null +++ b/tools/packer/systemd/lotus-miner.service @@ -0,0 +1,15 @@ +[Unit] +Description=Lotus Miner +After=network.target + +[Service] +User=fc +Group=fc +ExecStart=/usr/local/bin/lotus-miner run +Environment=LOTUS_PATH=/var/lib/lotus +Environment=LOTUS_MINER_PATH=/var/lib/lotus-miner +Restart=always +RestartSec=30 + +[Install] +WantedBy=multi-user.target diff --git a/tools/stats/collect.go b/tools/stats/collect.go index 221dc37e2c9..e33ec994b66 100644 --- a/tools/stats/collect.go +++ b/tools/stats/collect.go @@ -5,11 +5,11 @@ import ( "time" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v0api" client "github.com/influxdata/influxdb1-client/v2" ) -func Collect(ctx context.Context, api api.FullNode, influx client.Client, database string, height int64, headlag int) { +func Collect(ctx context.Context, api v0api.FullNode, influx client.Client, database string, height int64, headlag int) { tipsetsCh, err := GetTips(ctx, api, abi.ChainEpoch(height), headlag) if err != nil { log.Fatal(err) diff --git a/tools/stats/metrics.go b/tools/stats/metrics.go index 795203c40a2..7764c4bcabd 100644 --- a/tools/stats/metrics.go +++ b/tools/stats/metrics.go @@ -11,7 +11,7 @@ import ( "time" "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/builtin/power" "github.com/filecoin-project/lotus/chain/actors/builtin/reward" @@ -115,7 +115,7 @@ func NewPointFrom(p models.Point) *client.Point { return client.NewPointFrom(p) } -func RecordTipsetPoints(ctx context.Context, api api.FullNode, pl *PointList, tipset *types.TipSet) error { +func RecordTipsetPoints(ctx context.Context, api v0api.FullNode, pl *PointList, tipset *types.TipSet) error { cids := []string{} for _, cid := range tipset.Cids() { cids = append(cids, cid.String()) @@ -238,7 +238,7 @@ func (ht *ApiIpldStore) Put(ctx context.Context, v interface{}) (cid.Cid, error) return cid.Undef, fmt.Errorf("Put is not implemented on ApiIpldStore") } -func RecordTipsetStatePoints(ctx context.Context, api api.FullNode, pl *PointList, tipset *types.TipSet) error { +func RecordTipsetStatePoints(ctx context.Context, api v0api.FullNode, pl *PointList, tipset *types.TipSet) error { attoFil := types.NewInt(build.FilecoinPrecision).Int //TODO: StatePledgeCollateral API is not implemented and is commented out - re-enable this block once the API is implemented again. @@ -299,7 +299,7 @@ type msgTag struct { exitcode uint8 } -func RecordTipsetMessagesPoints(ctx context.Context, api api.FullNode, pl *PointList, tipset *types.TipSet) error { +func RecordTipsetMessagesPoints(ctx context.Context, api v0api.FullNode, pl *PointList, tipset *types.TipSet) error { cids := tipset.Cids() if len(cids) == 0 { return fmt.Errorf("no cids in tipset") diff --git a/tools/stats/rpc.go b/tools/stats/rpc.go index b01c07a3579..0aa3d141ee4 100644 --- a/tools/stats/rpc.go +++ b/tools/stats/rpc.go @@ -13,6 +13,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api/client" + "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" @@ -45,7 +46,7 @@ func getAPI(path string) (string, http.Header, error) { return "ws://" + addr + "/rpc/v0", headers, nil } -func WaitForSyncComplete(ctx context.Context, napi api.FullNode) error { +func WaitForSyncComplete(ctx context.Context, napi v0api.FullNode) error { sync_complete: for { select { @@ -120,7 +121,7 @@ sync_complete: } } -func GetTips(ctx context.Context, api api.FullNode, lastHeight abi.ChainEpoch, headlag int) (<-chan *types.TipSet, error) { +func GetTips(ctx context.Context, api v0api.FullNode, lastHeight abi.ChainEpoch, headlag int) (<-chan *types.TipSet, error) { chmain := make(chan *types.TipSet) hb := newHeadBuffer(headlag) @@ -184,7 +185,7 @@ func GetTips(ctx context.Context, api api.FullNode, lastHeight abi.ChainEpoch, h return chmain, nil } -func loadTipsets(ctx context.Context, api api.FullNode, curr *types.TipSet, lowestHeight abi.ChainEpoch) ([]*types.TipSet, error) { +func loadTipsets(ctx context.Context, api v0api.FullNode, curr *types.TipSet, lowestHeight abi.ChainEpoch) ([]*types.TipSet, error) { tipsets := []*types.TipSet{} for { if curr.Height() == 0 { @@ -214,11 +215,11 @@ func loadTipsets(ctx context.Context, api api.FullNode, curr *types.TipSet, lowe return tipsets, nil } -func GetFullNodeAPI(ctx context.Context, repo string) (api.FullNode, jsonrpc.ClientCloser, error) { +func GetFullNodeAPI(ctx context.Context, repo string) (v0api.FullNode, jsonrpc.ClientCloser, error) { addr, headers, err := getAPI(repo) if err != nil { return nil, nil, err } - return client.NewFullNodeRPC(ctx, addr, headers) + return client.NewFullNodeRPCV0(ctx, addr, headers) }