diff --git a/.changelog/v0.10.1/bug-fixes/797-fix-shielded-to-shielded.md b/.changelog/v0.10.1/bug-fixes/797-fix-shielded-to-shielded.md new file mode 100644 index 0000000000..4cb3c35949 --- /dev/null +++ b/.changelog/v0.10.1/bug-fixes/797-fix-shielded-to-shielded.md @@ -0,0 +1,2 @@ +- Avoid reading from nonexistent storage keys in shielded-to-shielded transfers. + ([#797](https://github.com/anoma/namada/pull/797)) \ No newline at end of file diff --git a/.changelog/v0.11.0/bug-fixes/754-fix-abcipp.md b/.changelog/v0.11.0/bug-fixes/754-fix-abcipp.md new file mode 100644 index 0000000000..ca80419640 --- /dev/null +++ b/.changelog/v0.11.0/bug-fixes/754-fix-abcipp.md @@ -0,0 +1,2 @@ +- Fix building with the `abcipp` feature again + ([#754](https://github.com/anoma/namada/pull/754)) \ No newline at end of file diff --git a/.changelog/v0.11.0/bug-fixes/763-init-validator-vp-validation.md b/.changelog/v0.11.0/bug-fixes/763-init-validator-vp-validation.md new file mode 100644 index 0000000000..19769dbdfa --- /dev/null +++ b/.changelog/v0.11.0/bug-fixes/763-init-validator-vp-validation.md @@ -0,0 +1,2 @@ +- Fixed validation of a validator initialization transaction. + ([#763](https://github.com/anoma/namada/pull/763)) \ No newline at end of file diff --git a/.changelog/v0.11.0/features/582-native-token-param.md b/.changelog/v0.11.0/features/582-native-token-param.md new file mode 100644 index 0000000000..10dbb27503 --- /dev/null +++ b/.changelog/v0.11.0/features/582-native-token-param.md @@ -0,0 +1,2 @@ +- Allow to set the native token via genesis configuration. + ([#582](https://github.com/anoma/namada/pull/582)) \ No newline at end of file diff --git a/.changelog/v0.11.0/features/592-implicit-vp.md b/.changelog/v0.11.0/features/592-implicit-vp.md new file mode 100644 index 0000000000..ab93e1fc0f --- /dev/null +++ b/.changelog/v0.11.0/features/592-implicit-vp.md @@ -0,0 +1,6 @@ +- Added a validity predicate for implicit accounts. This is set in + protocol parameters and may be changed via governance. Additionally, + added automatic public key reveal in the client that use an implicit + account that hasn't revealed its PK yet as a source. It's also + possible to manually submit reveal transaction with client command + ([#592](https://github.com/anoma/namada/pull/592)) \ No newline at end of file diff --git a/.changelog/v0.11.0/features/687-remove-staking-address.md b/.changelog/v0.11.0/features/687-remove-staking-address.md new file mode 100644 index 0000000000..39d4def2aa --- /dev/null +++ b/.changelog/v0.11.0/features/687-remove-staking-address.md @@ -0,0 +1,2 @@ +- PoS: Removed staking reward addresses in preparation of auto-staked rewards + system. ([#687](https://github.com/anoma/namada/pull/687)) \ No newline at end of file diff --git a/.changelog/v0.11.0/features/695-validator-commission-rates.md b/.changelog/v0.11.0/features/695-validator-commission-rates.md new file mode 100644 index 0000000000..086227b595 --- /dev/null +++ b/.changelog/v0.11.0/features/695-validator-commission-rates.md @@ -0,0 +1,4 @@ +- Allow to set validator's commission rates and a limit on change of commission + rate per epoch. Commission rate can be changed via a transaction authorized + by the validator, but the limit is immutable value, set when the validator's + account is initialized. ([#695](https://github.com/anoma/namada/pull/695)) \ No newline at end of file diff --git a/.changelog/v0.11.0/features/707-refactor-voting-powers.md b/.changelog/v0.11.0/features/707-refactor-voting-powers.md new file mode 100644 index 0000000000..76c26cab67 --- /dev/null +++ b/.changelog/v0.11.0/features/707-refactor-voting-powers.md @@ -0,0 +1,5 @@ +- Optimize the PoS code to depend only on bonded stake, removing + the VotingPower(Delta) structs. This mitigates some previous + information loss in PoS calculations. Instead, the notion of + voting power is only relevant when communicating with Tendermint. + ([#707](https://github.com/anoma/namada/pull/707)) \ No newline at end of file diff --git a/.changelog/v0.11.0/features/708-update-pos-params.md b/.changelog/v0.11.0/features/708-update-pos-params.md new file mode 100644 index 0000000000..2941c5fc4e --- /dev/null +++ b/.changelog/v0.11.0/features/708-update-pos-params.md @@ -0,0 +1,4 @@ +- Update the set of parameters in the PoS system according to the + latest spec and standardizes the use of the rust_decimal crate + for parameters and calculations that require fractional numbers. + ([#708](https://github.com/anoma/namada/pull/708)) \ No newline at end of file diff --git a/.changelog/v0.11.0/features/775-rename-cli-fee-args.md b/.changelog/v0.11.0/features/775-rename-cli-fee-args.md new file mode 100644 index 0000000000..a81f75ef41 --- /dev/null +++ b/.changelog/v0.11.0/features/775-rename-cli-fee-args.md @@ -0,0 +1,2 @@ +- Renamed transaction CLI arguments `--fee-amount` and `--fee-token` to `--gas- + amount` and `--gas-token`. ([#775](https://github.com/anoma/namada/pull/775)) diff --git a/.changelog/v0.11.0/improvements/436-remove-f64.md b/.changelog/v0.11.0/improvements/436-remove-f64.md new file mode 100644 index 0000000000..e55af7ee8f --- /dev/null +++ b/.changelog/v0.11.0/improvements/436-remove-f64.md @@ -0,0 +1,2 @@ +- Refactored token decimal formatting. + ([#436](https://github.com/anoma/namada/pull/436)) \ No newline at end of file diff --git a/.changelog/v0.11.0/improvements/570-rpc-sub-vp-pos.md b/.changelog/v0.11.0/improvements/570-rpc-sub-vp-pos.md new file mode 100644 index 0000000000..3abd94115b --- /dev/null +++ b/.changelog/v0.11.0/improvements/570-rpc-sub-vp-pos.md @@ -0,0 +1 @@ +- Added PoS specific queries ([#570](https://github.com/anoma/namada/pull/570)) \ No newline at end of file diff --git a/.changelog/v0.11.0/improvements/674-event-log.md b/.changelog/v0.11.0/improvements/674-event-log.md new file mode 100644 index 0000000000..8dc0efaa55 --- /dev/null +++ b/.changelog/v0.11.0/improvements/674-event-log.md @@ -0,0 +1,3 @@ +- Added a custom events store and replaced WebSocket client for + transaction results with query endpoints to the events store. + ([#674](https://github.com/anoma/namada/pull/674)) \ No newline at end of file diff --git a/.changelog/v0.11.0/improvements/719-refactor-governance-storage-api.md b/.changelog/v0.11.0/improvements/719-refactor-governance-storage-api.md new file mode 100644 index 0000000000..fcbbffd213 --- /dev/null +++ b/.changelog/v0.11.0/improvements/719-refactor-governance-storage-api.md @@ -0,0 +1,2 @@ +- Refactored governance code to use storage_api. + ([#719](https://github.com/anoma/namada/pull/719)) \ No newline at end of file diff --git a/.changelog/v0.11.0/improvements/733-core-crate-split.md b/.changelog/v0.11.0/improvements/733-core-crate-split.md new file mode 100644 index 0000000000..6ad3737cae --- /dev/null +++ b/.changelog/v0.11.0/improvements/733-core-crate-split.md @@ -0,0 +1,4 @@ +- Public parts of shared `namada` crate have been split up into a + `namada_core` crate. The `namada_proof_of_stake`, `namada_vp_prelude` + and `namada_tx_prelude` crates now depend on this `namada_core` crate. + ([#733](https://github.com/anoma/namada/pull/733)) diff --git a/.changelog/v0.11.0/improvements/807-smaller-signing.md b/.changelog/v0.11.0/improvements/807-smaller-signing.md new file mode 100644 index 0000000000..1f58798f83 --- /dev/null +++ b/.changelog/v0.11.0/improvements/807-smaller-signing.md @@ -0,0 +1,2 @@ +- Sign over the hash of code rather than code in transaction signing. + ([#807](https://github.com/anoma/namada/pull/807)) \ No newline at end of file diff --git a/.changelog/v0.11.0/miscellaneous/650-last-block.md b/.changelog/v0.11.0/miscellaneous/650-last-block.md new file mode 100644 index 0000000000..bb5f264c55 --- /dev/null +++ b/.changelog/v0.11.0/miscellaneous/650-last-block.md @@ -0,0 +1,2 @@ +- Improve some docstrings relating to block heights + ([#650](https://github.com/anoma/namada/pull/650)) \ No newline at end of file diff --git a/.changelog/v0.11.0/summary.md b/.changelog/v0.11.0/summary.md new file mode 100644 index 0000000000..e9ba3c2763 --- /dev/null +++ b/.changelog/v0.11.0/summary.md @@ -0,0 +1 @@ +Namada 0.11.0 is a scheduled minor release. diff --git a/.changelog/unreleased/testing/694-dont-spawn-internal-account-vps.md b/.changelog/v0.11.0/testing/694-dont-spawn-internal-account-vps.md similarity index 100% rename from .changelog/unreleased/testing/694-dont-spawn-internal-account-vps.md rename to .changelog/v0.11.0/testing/694-dont-spawn-internal-account-vps.md diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 43d9f3ea57..fa3e5b67ed 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -1,6 +1,6 @@ --- name: Bug Report -about: Create a bug report for Anoma. +about: Create a bug report for Namada. labels: bug --- diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 0dbe6b2260..11a3a12a47 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -1,6 +1,6 @@ --- name: Feature Request -about: Request a new feature in Anoma. +about: Request a new feature in Namada. labels: enhancement --- diff --git a/.github/workflows/build-and-test-bridge.yml b/.github/workflows/build-and-test-bridge.yml index affdf5b523..2f9929cb0a 100644 --- a/.github/workflows/build-and-test-bridge.yml +++ b/.github/workflows/build-and-test-bridge.yml @@ -30,7 +30,7 @@ jobs: timeout-minutes: 30 runs-on: ${{ matrix.os }} container: - image: ghcr.io/anoma/namada:wasm-0.8.0 + image: ghcr.io/anoma/namada:wasm-0.11.0 strategy: fail-fast: false matrix: @@ -136,7 +136,7 @@ jobs: BUCKET_NAME: namada-wasm-master AWS_REGION: eu-west-1 - anoma-eth: + namada-eth: runs-on: ${{ matrix.os }} timeout-minutes: 80 needs: [build-wasm] @@ -144,7 +144,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - nightly_version: [nightly-2022-05-20] + nightly_version: [nightly-2022-11-03] mold_version: [1.7.0] make: - name: ABCI @@ -240,7 +240,7 @@ jobs: if: always() run: sccache --stop-server || true - anoma-release-eth: + namada-release-eth: runs-on: ${{ matrix.os }} timeout-minutes: 25 strategy: @@ -342,7 +342,7 @@ jobs: run: sccache --stop-server || true - anoma-e2e-eth: + namada-e2e-eth: runs-on: ${{ matrix.os }} timeout-minutes: 80 strategy: @@ -358,14 +358,14 @@ jobs: cache_key: anoma cache_version: v2 tendermint_artifact: tendermint-unreleased-ad825dcadbd4b98c3f91ce5a711e4fb36a69c377 - wait_for: anoma-release-eth (ubuntu-latest, 1.7.0, ABCI Release build, anoma-e2e-release, v2) + wait_for: namada-release-eth (ubuntu-latest, 1.7.0, ABCI Release build, namada-e2e-release, v2) - name: e2e suffix: '' index: 1 cache_key: anoma cache_version: v2 tendermint_artifact: tendermint-unreleased-ad825dcadbd4b98c3f91ce5a711e4fb36a69c377 - wait_for: anoma-release-eth (ubuntu-latest, 1.7.0, ABCI Release build, anoma-e2e-release, v2) + wait_for: namada-release-eth (ubuntu-latest, 1.7.0, ABCI Release build, namada-e2e-release, v2) env: CARGO_INCREMENTAL: 0 @@ -466,7 +466,7 @@ jobs: with: name: wasm-${{ github.event.pull_request.head.sha|| github.sha }} path: ./wasm - - name: Download anoma binaries + - name: Download namada binaries uses: actions/download-artifact@v3 with: name: binaries${{ matrix.make.suffix }}-${{ github.event.pull_request.head.sha || github.sha }} @@ -481,13 +481,13 @@ jobs: - name: Run e2e test run: python3 .github/workflows/scripts/schedule-e2e.py env: - ANOMA_TENDERMINT_WEBSOCKET_TIMEOUT: 20 - ANOMA_E2E_USE_PREBUILT_BINARIES: "true" - ANOMA_E2E_KEEP_TEMP: "true" - ANOMA_TM_STDOUT: "false" - ANOMA_LOG_COLOR: "false" - ANOMA_MASP_PARAMS_DIR: "/home/runner/work/masp" - ANOMA_LOG: "info" + NAMADA_TENDERMINT_WEBSOCKET_TIMEOUT: 20 + NAMADA_E2E_USE_PREBUILT_BINARIES: "true" + NAMADA_E2E_KEEP_TEMP: "true" + NAMADA_TM_STDOUT: "false" + NAMADA_LOG_COLOR: "false" + NAMADA_MASP_PARAMS_DIR: "/home/runner/work/masp" + NAMADA_LOG: "info" RUSTFLAGS: "-C linker=clang -C link-arg=-fuse-ld=/usr/local/bin/mold" INDEX: ${{ matrix.make.index }} - name: Upload e2e logs @@ -497,11 +497,11 @@ jobs: name: logs-e2e-${{ matrix.make.index }}-${{ github.event.pull_request.head.sha || github.sha }} path: | /tmp/.*/logs/ - /tmp/.*/e2e-test.*/setup/validator-*/.anoma/logs/*.log + /tmp/.*/e2e-test.*/setup/validator-*/.namada/logs/*.log retention-days: 5 - name: Print sccache stats if: always() run: sccache --show-stats - name: Stop sccache server if: always() - run: sccache --stop-server || true \ No newline at end of file + run: sccache --stop-server || true diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index f19d842a4d..d2be12f726 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -31,7 +31,7 @@ jobs: timeout-minutes: 30 runs-on: ${{ matrix.os }} container: - image: ghcr.io/anoma/namada:wasm-0.8.0 + image: ghcr.io/anoma/namada:wasm-0.11.0 strategy: fail-fast: false matrix: @@ -138,7 +138,7 @@ jobs: BUCKET_NAME: namada-wasm-master AWS_REGION: eu-west-1 - anoma: + namada: runs-on: ${{ matrix.os }} timeout-minutes: 80 needs: [build-wasm] @@ -146,7 +146,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - nightly_version: [nightly-2022-05-20] + nightly_version: [nightly-2022-11-03] mold_version: [1.7.0] make: - name: ABCI @@ -242,7 +242,7 @@ jobs: if: always() run: sccache --stop-server || true - anoma-release: + namada-release: runs-on: ${{ matrix.os }} timeout-minutes: 25 strategy: @@ -344,7 +344,7 @@ jobs: run: sccache --stop-server || true - anoma-e2e: + namada-e2e: runs-on: ${{ matrix.os }} timeout-minutes: 80 strategy: @@ -360,14 +360,14 @@ jobs: cache_key: anoma cache_version: v2 tendermint_artifact: tendermint-unreleased-ad825dcadbd4b98c3f91ce5a711e4fb36a69c377 - wait_for: anoma-release (ubuntu-latest, 1.7.0, ABCI Release build, anoma-e2e-release, v2) + wait_for: namada-release (ubuntu-latest, 1.7.0, ABCI Release build, namada-e2e-release, v2) - name: e2e suffix: '' index: 1 cache_key: anoma cache_version: v2 tendermint_artifact: tendermint-unreleased-ad825dcadbd4b98c3f91ce5a711e4fb36a69c377 - wait_for: anoma-release (ubuntu-latest, 1.7.0, ABCI Release build, anoma-e2e-release, v2) + wait_for: namada-release (ubuntu-latest, 1.7.0, ABCI Release build, namada-e2e-release, v2) env: CARGO_INCREMENTAL: 0 @@ -468,7 +468,7 @@ jobs: with: name: wasm-${{ github.event.pull_request.head.sha|| github.sha }} path: ./wasm - - name: Download anoma binaries + - name: Download namada binaries uses: actions/download-artifact@v3 with: name: binaries${{ matrix.make.suffix }}-${{ github.event.pull_request.head.sha || github.sha }} @@ -483,13 +483,13 @@ jobs: - name: Run e2e test run: python3 .github/workflows/scripts/schedule-e2e.py env: - ANOMA_TENDERMINT_WEBSOCKET_TIMEOUT: 20 - ANOMA_E2E_USE_PREBUILT_BINARIES: "true" - ANOMA_E2E_KEEP_TEMP: "true" - ANOMA_TM_STDOUT: "false" - ANOMA_LOG_COLOR: "false" - ANOMA_MASP_PARAMS_DIR: "/home/runner/work/masp" - ANOMA_LOG: "info" + NAMADA_TENDERMINT_WEBSOCKET_TIMEOUT: 20 + NAMADA_E2E_USE_PREBUILT_BINARIES: "true" + NAMADA_E2E_KEEP_TEMP: "true" + NAMADA_TM_STDOUT: "false" + NAMADA_LOG_COLOR: "false" + NAMADA_MASP_PARAMS_DIR: "/home/runner/work/masp" + NAMADA_LOG: "info" RUSTFLAGS: "-C linker=clang -C link-arg=-fuse-ld=/usr/local/bin/mold" INDEX: ${{ matrix.make.index }} - name: Upload e2e logs @@ -499,11 +499,11 @@ jobs: name: logs-e2e-${{ matrix.make.index }}-${{ github.event.pull_request.head.sha || github.sha }} path: | /tmp/.*/logs/ - /tmp/.*/e2e-test.*/setup/validator-*/.anoma/logs/*.log + /tmp/.*/e2e-test.*/setup/validator-*/.namada/logs/*.log retention-days: 5 - name: Print sccache stats if: always() run: sccache --show-stats - name: Stop sccache server if: always() - run: sccache --stop-server || true \ No newline at end of file + run: sccache --stop-server || true diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index 22792c8d9b..7364a477fa 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -27,7 +27,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - nightly_version: [nightly-2022-05-20] + nightly_version: [nightly-2022-11-03] make: - name: Clippy command: clippy diff --git a/.github/workflows/cron.yml b/.github/workflows/cron.yml index 51e19208e3..260b0de4c6 100644 --- a/.github/workflows/cron.yml +++ b/.github/workflows/cron.yml @@ -20,7 +20,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - nightly_version: [nightly-2022-05-20] + nightly_version: [nightly-2022-11-03] make: - name: Audit command: audit diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index cb8633c99a..b015bbd7cc 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -27,7 +27,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - nightly_version: [nightly-2022-05-20] + nightly_version: [nightly-2022-11-03] mdbook_version: [rust-lang/mdbook@v0.4.18] mdbook_mermaid: [badboy/mdbook-mermaid@v0.11.1] mdbook_linkcheck: [Michael-F-Bryan/mdbook-linkcheck@v0.7.6] diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 5cbc1e9207..51bd8ea3e8 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -19,7 +19,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, macos-latest] - anoma_cache_version: [v1] + namada_cache_version: [v1] make: - name: Build package command: package @@ -70,8 +70,8 @@ jobs: path: | ~/.cargo/registry ~/.cargo/git - key: ${{ runner.os }}-anoma-release-${{ matrix.anoma_cache_version }}-${{ hashFiles('**/Cargo.lock') }} - restore-keys: ${{ runner.os }}-anoma-release-${{ matrix.anoma_cache_version }} + key: ${{ runner.os }}-namada-release-${{ matrix.namada_cache_version }}-${{ hashFiles('**/Cargo.lock') }} + restore-keys: ${{ runner.os }}-namada-release-${{ matrix.namada_cache_version }} - name: Start sccache server run: sccache --start-server - name: ${{ matrix.make.name }} diff --git a/.github/workflows/scripts/e2e.json b/.github/workflows/scripts/e2e.json index fd8939ba9a..99a41ad092 100644 --- a/.github/workflows/scripts/e2e.json +++ b/.github/workflows/scripts/e2e.json @@ -13,7 +13,7 @@ "e2e::ledger_tests::proposal_submission": 35, "e2e::ledger_tests::run_ledger": 5, "e2e::ledger_tests::run_ledger_load_state_and_reset": 5, - "e2e::ledger_tests::test_anoma_shuts_down_if_tendermint_dies": 2, + "e2e::ledger_tests::test_namada_shuts_down_if_tendermint_dies": 2, "e2e::ledger_tests::test_genesis_validators": 9, "e2e::ledger_tests::test_node_connectivity_and_consensus": 20, "e2e::wallet_tests::wallet_address_cmds": 1, diff --git a/.gitignore b/.gitignore index 4718c258b2..dec9856215 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ -# Anoma default home dir +# Namada default home dir +/.namada /.anoma # Generated by Cargo @@ -7,8 +8,8 @@ debug/ target/ # Release packages -/anoma-*/ -/anoma-*.tar.gz +/namada-*/ +/namada-*.tar.gz # These are backup files generated by rustfmt **/*.rs.bk @@ -26,4 +27,4 @@ target/ wasm/*.wasm # app version string file -/apps/version.rs \ No newline at end of file +/apps/version.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 7a9cf655a0..c11e94955a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,80 @@ # CHANGELOG +## v0.11.0 + +Namada 0.11.0 is a scheduled minor release. + +### BUG FIXES + +- Fix building with the `abcipp` feature again + ([#754](https://github.com/anoma/namada/pull/754)) +- Fixed validation of a validator initialization transaction. + ([#763](https://github.com/anoma/namada/pull/763)) + +### FEATURES + +- Allow to set the native token via genesis configuration. + ([#582](https://github.com/anoma/namada/pull/582)) +- Added a validity predicate for implicit accounts. This is set in + protocol parameters and may be changed via governance. Additionally, + added automatic public key reveal in the client that use an implicit + account that hasn't revealed its PK yet as a source. It's also + possible to manually submit reveal transaction with client command + ([#592](https://github.com/anoma/namada/pull/592)) +- PoS: Removed staking reward addresses in preparation of auto-staked rewards + system. ([#687](https://github.com/anoma/namada/pull/687)) +- Allow to set validator's commission rates and a limit on change of commission + rate per epoch. Commission rate can be changed via a transaction authorized + by the validator, but the limit is immutable value, set when the validator's + account is initialized. ([#695](https://github.com/anoma/namada/pull/695)) +- Optimize the PoS code to depend only on bonded stake, removing + the VotingPower(Delta) structs. This mitigates some previous + information loss in PoS calculations. Instead, the notion of + voting power is only relevant when communicating with Tendermint. + ([#707](https://github.com/anoma/namada/pull/707)) +- Update the set of parameters in the PoS system according to the + latest spec and standardizes the use of the rust_decimal crate + for parameters and calculations that require fractional numbers. + ([#708](https://github.com/anoma/namada/pull/708)) +- Renamed transaction CLI arguments `--fee-amount` and `--fee-token` to `--gas- + amount` and `--gas-token`. ([#775](https://github.com/anoma/namada/pull/775)) + +### IMPROVEMENTS + +- Refactored token decimal formatting. + ([#436](https://github.com/anoma/namada/pull/436)) +- Added PoS specific queries ([#570](https://github.com/anoma/namada/pull/570)) +- Added a custom events store and replaced WebSocket client for + transaction results with query endpoints to the events store. + ([#674](https://github.com/anoma/namada/pull/674)) +- Refactored governance code to use storage_api. + ([#719](https://github.com/anoma/namada/pull/719)) +- Public parts of shared `namada` crate have been split up into a + `namada_core` crate. The `namada_proof_of_stake`, `namada_vp_prelude` + and `namada_tx_prelude` crates now depend on this `namada_core` crate. + ([#733](https://github.com/anoma/namada/pull/733)) +- Sign over the hash of code rather than code in transaction signing. + ([#807](https://github.com/anoma/namada/pull/807)) + +### MISCELLANEOUS + +- Improve some docstrings relating to block heights + ([#650](https://github.com/anoma/namada/pull/650)) + +### TESTING + +- Don't fake a wasm VP for internal addresses in tx tests + ([#694](https://github.com/anoma/namada/pull/694)) + ## v0.10.1 Namada 0.10.1 is a point release with fixes to shielded transactions. +### BUG FIXES + +- Avoid reading from nonexistent storage keys in shielded-to-shielded transfers. + ([#797](https://github.com/anoma/namada/pull/797)) + ## v0.10.0 Namada 0.10.0 is a scheduled minor release, focused on IBC and MASP diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c00d3f08ec..027c8dc4da 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,6 @@ -# Contributing to Anoma +# Contributing to Namada -Thank you for the interest in contributing to Anoma! +Thank you for the interest in contributing to Namada! All contributors are expected to follow the [Code of Conduct](CODE_OF_CONDUCT.md). @@ -12,7 +12,7 @@ Every pull request should start with an issue. A pull request should be as atomi ### Changelog -To track changes in Anoma and provide a nicely formatted change log with the releases, we utilize the [unclog CLI tool](https://github.com/informalsystems/unclog). Please do not modify the [change log](CHANGELOG.md) in your PRs, this file will be updated by the repository maintainers. +To track changes in Namada and provide a nicely formatted change log with the releases, we utilize the [unclog CLI tool](https://github.com/informalsystems/unclog). Please do not modify the [change log](CHANGELOG.md) in your PRs, this file will be updated by the repository maintainers. With every PR, please make a separate commit that adds a record in the `.changelog` directory with a section that this PR belongs to together with a high-level description of the change. @@ -37,7 +37,7 @@ unclog add \ --message ``` -The message should be a high-level description of the changes that should explain the scope of the change and affected components to Anoma's users (while git commit messages should target developers). +The message should be a high-level description of the changes that should explain the scope of the change and affected components to Namada's users (while git commit messages should target developers). If none of the sections fit, new sections may be added. To find the existing section names, you can use e.g.: diff --git a/Cargo.lock b/Cargo.lock index a8a2609943..8315106a65 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2424,8 +2424,7 @@ checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" [[package]] name = "funty" version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1847abb9cb65d566acd5942e94aea9c8f547ad02c98e1649326fc0e8910b8b1e" +source = "git+https://github.com/bitvecto-rs/funty/?rev=7ef0d890fbcd8b3def1635ac1a877fc298488446#7ef0d890fbcd8b3def1635ac1a877fc298488446" [[package]] name = "funty" @@ -4031,58 +4030,41 @@ dependencies = [ [[package]] name = "namada" -version = "0.10.1" +version = "0.11.0" dependencies = [ - "ark-bls12-381", - "ark-ec", - "ark-serialize", "assert_matches", "async-trait", - "bech32", "bellman", - "bit-vec", "bls12_381", "borsh", "byte-unit", - "chrono", "circular-queue", "clru", "data-encoding", "derivative", - "ed25519-consensus", - "ethabi", "eyre", - "ferveo", - "ferveo-common", - "group-threshold-cryptography", - "hex", "ibc 0.14.0 (git+https://github.com/heliaxdev/ibc-rs?rev=9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d)", "ibc 0.14.0 (git+https://github.com/heliaxdev/ibc-rs.git?rev=f4703dfe2c1f25cc431279ab74f10f3e0f6827e2)", "ibc-proto 0.17.1 (git+https://github.com/heliaxdev/ibc-rs?rev=9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d)", "ibc-proto 0.17.1 (git+https://github.com/heliaxdev/ibc-rs.git?rev=f4703dfe2c1f25cc431279ab74f10f3e0f6827e2)", - "ics23", "itertools", "libsecp256k1", "loupe", "masp_primitives", "masp_proofs", + "namada_core", "namada_proof_of_stake", "num-rational 0.4.1", - "parity-wasm", + "parity-wasm 0.45.0", "paste", "pretty_assertions", "proptest", "prost", - "prost-types", "pwasm-utils", - "rand 0.8.5", - "rand_core 0.6.4", "rayon", "rust_decimal", - "serde 1.0.147", "serde_json", "sha2 0.9.9", - "sparse-merkle-tree", "tempfile", "tendermint 0.23.5", "tendermint 0.23.6", @@ -4095,7 +4077,6 @@ dependencies = [ "tiny-keccak", "tokio", "toml", - "tonic-build", "tracing 0.1.37", "tracing-subscriber 0.3.16", "wasmer", @@ -4110,7 +4091,7 @@ dependencies = [ [[package]] name = "namada_apps" -version = "0.10.1" +version = "0.11.0" dependencies = [ "ark-serialize", "ark-std", @@ -4165,6 +4146,8 @@ dependencies = [ "rlimit", "rocksdb", "rpassword", + "rust_decimal", + "rust_decimal_macros", "semver 1.0.14", "serde 1.0.147", "serde_bytes", @@ -4202,9 +4185,61 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "namada_core" +version = "0.11.0" +dependencies = [ + "ark-bls12-381", + "ark-ec", + "ark-serialize", + "assert_matches", + "bech32", + "bellman", + "bit-vec", + "borsh", + "chrono", + "data-encoding", + "derivative", + "ed25519-consensus", + "ferveo", + "ferveo-common", + "group-threshold-cryptography", + "ibc 0.14.0 (git+https://github.com/heliaxdev/ibc-rs?rev=9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d)", + "ibc 0.14.0 (git+https://github.com/heliaxdev/ibc-rs.git?rev=f4703dfe2c1f25cc431279ab74f10f3e0f6827e2)", + "ibc-proto 0.17.1 (git+https://github.com/heliaxdev/ibc-rs?rev=9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d)", + "ibc-proto 0.17.1 (git+https://github.com/heliaxdev/ibc-rs.git?rev=f4703dfe2c1f25cc431279ab74f10f3e0f6827e2)", + "ics23", + "itertools", + "libsecp256k1", + "masp_primitives", + "pretty_assertions", + "proptest", + "prost", + "prost-types", + "rand 0.8.5", + "rand_core 0.6.4", + "rayon", + "rust_decimal", + "rust_decimal_macros", + "serde 1.0.147", + "serde_json", + "sha2 0.9.9", + "sparse-merkle-tree", + "tendermint 0.23.5", + "tendermint 0.23.6", + "tendermint-proto 0.23.5", + "tendermint-proto 0.23.6", + "test-log", + "thiserror", + "tonic-build", + "tracing 0.1.37", + "tracing-subscriber 0.3.16", + "zeroize", +] + [[package]] name = "namada_encoding_spec" -version = "0.10.1" +version = "0.11.0" dependencies = [ "borsh", "itertools", @@ -4215,7 +4250,7 @@ dependencies = [ [[package]] name = "namada_macros" -version = "0.10.1" +version = "0.11.0" dependencies = [ "quote", "syn", @@ -4223,17 +4258,21 @@ dependencies = [ [[package]] name = "namada_proof_of_stake" -version = "0.10.1" +version = "0.11.0" dependencies = [ "borsh", "derivative", + "namada_core", "proptest", + "rust_decimal", + "rust_decimal_macros", "thiserror", + "tracing 0.1.37", ] [[package]] name = "namada_tests" -version = "0.10.1" +version = "0.11.0" dependencies = [ "assert_cmd", "borsh", @@ -4259,6 +4298,8 @@ dependencies = [ "proptest", "prost", "rand 0.8.5", + "rust_decimal", + "rust_decimal_macros", "serde_json", "sha2 0.9.9", "tempfile", @@ -4275,35 +4316,38 @@ dependencies = [ [[package]] name = "namada_tx_prelude" -version = "0.10.1" +version = "0.11.0" dependencies = [ "borsh", "masp_primitives", - "namada", + "namada_core", "namada_macros", + "namada_proof_of_stake", "namada_vm_env", + "rust_decimal", "sha2 0.10.6", "thiserror", ] [[package]] name = "namada_vm_env" -version = "0.10.1" +version = "0.11.0" dependencies = [ "borsh", "hex", "masp_primitives", "masp_proofs", - "namada", + "namada_core", ] [[package]] name = "namada_vp_prelude" -version = "0.10.1" +version = "0.11.0" dependencies = [ "borsh", - "namada", + "namada_core", "namada_macros", + "namada_proof_of_stake", "namada_vm_env", "sha2 0.10.6", "thiserror", @@ -4784,6 +4828,12 @@ dependencies = [ "syn", ] +[[package]] +name = "parity-wasm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be5e13c266502aadf83426d87d81a0f5d1ef45b8027f5a471c360abfe4bfae92" + [[package]] name = "parity-wasm" version = "0.45.0" @@ -5259,12 +5309,13 @@ dependencies = [ [[package]] name = "pwasm-utils" -version = "0.20.0" -source = "git+https://github.com/heliaxdev/wasm-utils?tag=v0.20.0#782bfa7fb5e513b602e66af492cbc4cb1b06f2ba" +version = "0.18.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "880b3384fb00b8f6ecccd5d358b93bd2201900ae3daad213791d1864f6441f5c" dependencies = [ "byteorder", "log 0.4.17", - "parity-wasm", + "parity-wasm 0.42.2", ] [[package]] @@ -5812,10 +5863,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee9164faf726e4f3ece4978b25ca877ddc6802fa77f38cdccb32c7f805ecd70c" dependencies = [ "arrayvec 0.7.2", + "borsh", "num-traits 0.2.15", "serde 1.0.147", ] +[[package]] +name = "rust_decimal_macros" +version = "1.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4903d8db81d2321699ca8318035d6ff805c548868df435813968795a802171b2" +dependencies = [ + "quote", + "rust_decimal", +] + [[package]] name = "rustc-demangle" version = "0.1.21" diff --git a/Cargo.toml b/Cargo.toml index dea09bc6d0..42a99343fc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,6 +3,7 @@ resolver = "2" members = [ "apps", + "core", "proof_of_stake", "shared", "tests", @@ -51,6 +52,9 @@ ibc-relayer = {git = "https://github.com/heliaxdev/ibc-rs.git", rev = "f4703dfe2 # patched to a commit on the `eth-bridge-integration` branch of our fork tower-abci = {git = "https://github.com/heliaxdev/tower-abci.git", rev = "fcc0014d0bda707109901abfa1b2f782d242f082"} +# patched to the yanked 1.2.0 until masp updates bitvec +funty = { git = "https://github.com/bitvecto-rs/funty/", rev = "7ef0d890fbcd8b3def1635ac1a877fc298488446" } + [profile.release] lto = true opt-level = 3 diff --git a/Makefile b/Makefile index 81619c475c..58329ea22b 100644 --- a/Makefile +++ b/Makefile @@ -23,13 +23,13 @@ build-test: $(cargo) build --tests build-release: - ANOMA_DEV=false $(cargo) build --release --package namada_apps --manifest-path Cargo.toml + NAMADA_DEV=false $(cargo) build --release --package namada_apps --manifest-path Cargo.toml install-release: - ANOMA_DEV=false $(cargo) install --path ./apps --locked + NAMADA_DEV=false $(cargo) install --path ./apps --locked check-release: - ANOMA_DEV=false $(cargo) check --release --package namada_apps + NAMADA_DEV=false $(cargo) check --release --package namada_apps package: build-release scripts/make-package.sh @@ -41,16 +41,24 @@ check: make -C $(wasms_for_tests) check && \ $(foreach wasm,$(wasm_templates),$(check-wasm) && ) true +check-abcipp: + $(cargo) check \ + --workspace \ + --exclude namada_tests \ + --all-targets \ + --no-default-features \ + --features "abcipp ibc-mocks-abcipp testing" + clippy-wasm = $(cargo) +$(nightly) clippy --manifest-path $(wasm)/Cargo.toml --all-targets -- -D warnings clippy: - ANOMA_DEV=false $(cargo) +$(nightly) clippy --all-targets -- -D warnings && \ + NAMADA_DEV=false $(cargo) +$(nightly) clippy --all-targets -- -D warnings && \ make -C $(wasms) clippy && \ make -C $(wasms_for_tests) clippy && \ $(foreach wasm,$(wasm_templates),$(clippy-wasm) && ) true clippy-abcipp: - ANOMA_DEV=false $(cargo) +$(nightly) clippy --all-targets \ + NAMADA_DEV=false $(cargo) +$(nightly) clippy --all-targets \ --manifest-path ./apps/Cargo.toml \ --no-default-features \ --features "std testing abcipp" && \ @@ -60,12 +68,11 @@ clippy-abcipp: $(cargo) +$(nightly) clippy --all-targets \ --manifest-path ./shared/Cargo.toml \ --no-default-features \ - --features "testing wasm-runtime abcipp ibc-mocks-abcipp" && \ + --features "testing wasm-runtime abcipp ibc-mocks-abcipp ferveo-tpke" && \ $(cargo) +$(nightly) clippy \ --all-targets \ --manifest-path ./vm_env/Cargo.toml \ - --no-default-features \ - --features "abcipp" && \ + --no-default-features && \ make -C $(wasms) clippy && \ $(foreach wasm,$(wasm_templates),$(clippy-wasm) && ) true @@ -73,7 +80,7 @@ clippy-fix: $(cargo) +$(nightly) clippy --fix -Z unstable-options --all-targets --allow-dirty --allow-staged install: tendermint - ANOMA_DEV=false $(cargo) install --path ./apps --locked + NAMADA_DEV=false $(cargo) install --path ./apps --locked tendermint: ./scripts/get_tendermint.sh @@ -191,16 +198,18 @@ build-wasm-scripts-docker: build-wasm-image-docker docker run --rm -v ${PWD}:/__w/namada/namada namada-wasm make build-wasm-scripts debug-wasm-scripts-docker: build-wasm-image-docker - docker run --rm -v ${PWD}:/usr/local/rust/wasm anoma-wasm make debug-wasm-scripts + docker run --rm -v ${PWD}:/usr/local/rust/wasm namada-wasm make debug-wasm-scripts # Build the validity predicate and transactions wasm build-wasm-scripts: + rm wasm/*.wasm || true make -C $(wasms) make opt-wasm make checksum-wasm -# Debug build the validity predicate, transactions, matchmaker and matchmaker filter wasm +# Debug build the validity predicate and transactions wasm debug-wasm-scripts: + rm wasm/*.wasm || true make -C $(wasms) debug make opt-wasm make checksum-wasm diff --git a/README.md b/README.md index b96cb34229..06fe8b9621 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ the form of native protocol tokens. A multi-asset shielded transfer wallet is provided in order to facilitate safe and private user interaction with the protocol. -* Blogpost: [Introducing Namada: Shielded transfers with any assets](https://medium.com/anomanetwork/introducing-namada-shielded-transfers-with-any-assets-dce2e579384c) +* Blogpost: [Introducing Namada: Shielded transfers with any assets](https://medium.com/namadanetwork/introducing-namada-shielded-transfers-with-any-assets-dce2e579384c) ## ๐Ÿ““ Docs @@ -29,13 +29,13 @@ interaction with the protocol. ## ๐Ÿ’พ Installing -There is a single command to build and install Anoma executables from source (the node, the client and the wallet). This command will also verify that a compatible version of [Tendermint](#dependencies) is available and if not, attempt to install it. Note that currently at least 16GB RAM is needed to build from source. +There is a single command to build and install Namada executables from source (the node, the client and the wallet). This command will also verify that a compatible version of [Tendermint](#dependencies) is available and if not, attempt to install it. Note that currently at least 16GB RAM is needed to build from source. ```shell make install ``` -After installation, the main `anoma` executable will be available on path. +After installation, the main `namada` executable will be available on path. To find how to use it, check out the [User Guide section of the docs](https://docs.namada.net/user-guide/index.html). @@ -49,9 +49,9 @@ Guide. # Build the provided validity predicate and transaction wasm modules make build-wasm-scripts-docker -# Development (debug) build Anoma, which includes a validator and some default +# Development (debug) build Namada, which includes a validator and some default # accounts, whose keys and addresses are available in the wallet -ANOMA_DEV=true make +NAMADA_DEV=true make ``` ### Before submitting a PR, pls make sure to run the following @@ -66,7 +66,7 @@ make clippy ## ๐Ÿงพ Logging -To change the log level, set `ANOMA_LOG` environment variable to one of: +To change the log level, set `NAMADA_LOG` environment variable to one of: * `error` * `warn` diff --git a/apps/Cargo.toml b/apps/Cargo.toml index 300b78a962..a498528a97 100644 --- a/apps/Cargo.toml +++ b/apps/Cargo.toml @@ -6,7 +6,7 @@ license = "GPL-3.0" name = "namada_apps" readme = "../README.md" resolver = "2" -version = "0.10.1" +version = "0.11.0" default-run = "namada" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -19,25 +19,25 @@ path = "src/lib/mod.rs" [[bin]] doc = false name = "namada" -path = "src/bin/anoma/main.rs" +path = "src/bin/namada/main.rs" # Namada node [[bin]] doc = false name = "namadan" -path = "src/bin/anoma-node/main.rs" +path = "src/bin/namada-node/main.rs" # Namada client [[bin]] doc = false name = "namadac" -path = "src/bin/anoma-client/main.rs" +path = "src/bin/namada-client/main.rs" # Namada wallet [[bin]] doc = false name = "namadaw" -path = "src/bin/anoma-wallet/main.rs" +path = "src/bin/namada-wallet/main.rs" # Namada relayer [[bin]] @@ -52,25 +52,27 @@ std = ["ed25519-consensus/std", "rand/std", "rand_core/std"] # for integration tests and test utilies testing = ["dev"] abcipp = [ + "namada/abcipp", + "namada/tendermint-rpc-abcipp", "tendermint-abcipp", "tendermint-config-abcipp", "tendermint-proto-abcipp", "tendermint-rpc-abcipp", "tower-abci-abcipp", - "namada/abcipp" ] abciplus = [ + "namada/abciplus", + "namada/tendermint-rpc", "tendermint", "tendermint-config", "tendermint-rpc", "tendermint-proto", "tower-abci", - "namada/abciplus" ] [dependencies] -namada = {path = "../shared", features = ["wasm-runtime", "ferveo-tpke", "rand", "tendermint-rpc", "secp256k1-sign-verify"]} +namada = {path = "../shared", default-features = false, features = ["wasm-runtime", "ferveo-tpke"]} ark-serialize = "0.3.0" ark-std = "0.3.0" # branch = "bat/arse-merkle-tree" @@ -156,13 +158,15 @@ winapi = "0.3.9" masp_primitives = { git = "https://github.com/anoma/masp", rev = "bee40fc465f6afbd10558d12fe96eb1742eee45c", features = ["transparent-inputs"] } masp_proofs = { git = "https://github.com/anoma/masp", rev = "bee40fc465f6afbd10558d12fe96eb1742eee45c", features = ["bundled-prover", "download-params"] } bimap = {version = "0.6.2", features = ["serde"]} +rust_decimal = "1.26.1" +rust_decimal_macros = "1.26.1" warp = "0.3.2" bytes = "1.1.0" [dev-dependencies] assert_matches = "1.5.0" -namada = {path = "../shared", features = ["testing", "wasm-runtime"]} +namada = {path = "../shared", default-features = false, features = ["testing", "wasm-runtime"]} bit-set = "0.5.2" # A fork with state machime testing proptest = {git = "https://github.com/heliaxdev/proptest", branch = "tomas/sm"} diff --git a/apps/build.rs b/apps/build.rs index 32f7c57e87..60735e7e4a 100644 --- a/apps/build.rs +++ b/apps/build.rs @@ -24,7 +24,7 @@ fn main() { }; let mut version_rs = File::create("./version.rs").expect("cannot write version"); - let pre = "pub fn anoma_version() -> &'static str { \""; + let pre = "pub fn namada_version() -> &'static str { \""; let post = "\" }"; match version_string { Some(version_string) => { @@ -54,10 +54,10 @@ fn main() { // Tell Cargo that if the given file changes, to rerun this build script. println!("cargo:rerun-if-changed={}", PROTO_SRC); - // Tell Cargo to build when the `ANOMA_DEV` env var changes - println!("cargo:rerun-if-env-changed=ANOMA_DEV"); - // Enable "dev" feature if `ANOMA_DEV` is trueish - if let Ok(dev) = env::var("ANOMA_DEV") { + // Tell Cargo to build when the `NAMADA_DEV` env var changes + println!("cargo:rerun-if-env-changed=NAMADA_DEV"); + // Enable "dev" feature if `NAMADA_DEV` is trueish + if let Ok(dev) = env::var("NAMADA_DEV") { if dev.to_ascii_lowercase().trim() == "true" { println!("cargo:rustc-cfg=feature=\"dev\""); } diff --git a/apps/src/bin/anoma-client/cli.rs b/apps/src/bin/namada-client/cli.rs similarity index 86% rename from apps/src/bin/anoma-client/cli.rs rename to apps/src/bin/namada-client/cli.rs index 71cd04c94c..a5cc70451b 100644 --- a/apps/src/bin/anoma-client/cli.rs +++ b/apps/src/bin/namada-client/cli.rs @@ -1,4 +1,4 @@ -//! Anoma client CLI. +//! Namada client CLI. use color_eyre::eyre::Result; use namada_apps::cli; @@ -6,10 +6,10 @@ use namada_apps::cli::cmds::*; use namada_apps::client::{eth_bridge_pool, rpc, tx, utils}; pub async fn main() -> Result<()> { - match cli::anoma_client_cli()? { - cli::AnomaClient::WithContext(cmd_box) => { + match cli::namada_client_cli()? { + cli::NamadaClient::WithContext(cmd_box) => { let (cmd, ctx) = *cmd_box; - use AnomaClientWithContext as Sub; + use NamadaClientWithContext as Sub; match cmd { // Ledger cmds Sub::TxCustom(TxCustom(args)) => { @@ -36,6 +36,9 @@ pub async fn main() -> Result<()> { Sub::TxVoteProposal(TxVoteProposal(args)) => { tx::submit_vote_proposal(ctx, args).await; } + Sub::TxRevealPk(TxRevealPk(args)) => { + tx::submit_reveal_pk(ctx, args).await; + } Sub::Bond(Bond(args)) => { tx::submit_bond(ctx, args).await; } @@ -68,8 +71,11 @@ pub async fn main() -> Result<()> { Sub::QueryBonds(QueryBonds(args)) => { rpc::query_bonds(ctx, args).await; } - Sub::QueryVotingPower(QueryVotingPower(args)) => { - rpc::query_voting_power(ctx, args).await; + Sub::QueryBondedStake(QueryBondedStake(args)) => { + rpc::query_bonded_stake(ctx, args).await; + } + Sub::QueryCommissionRate(QueryCommissionRate(args)) => { + rpc::query_commission_rate(ctx, args).await; } Sub::QuerySlashes(QuerySlashes(args)) => { rpc::query_slashes(ctx, args).await; @@ -92,7 +98,7 @@ pub async fn main() -> Result<()> { } } } - cli::AnomaClient::WithoutContext(cmd, global_args) => match cmd { + cli::NamadaClient::WithoutContext(cmd, global_args) => match cmd { // Utils cmds Utils::JoinNetwork(JoinNetwork(args)) => { utils::join_network(global_args, args).await diff --git a/apps/src/bin/anoma-client/main.rs b/apps/src/bin/namada-client/main.rs similarity index 100% rename from apps/src/bin/anoma-client/main.rs rename to apps/src/bin/namada-client/main.rs diff --git a/apps/src/bin/anoma-node/cli.rs b/apps/src/bin/namada-node/cli.rs similarity index 86% rename from apps/src/bin/anoma-node/cli.rs rename to apps/src/bin/namada-node/cli.rs index d2ce7b608a..48f67d3273 100644 --- a/apps/src/bin/anoma-node/cli.rs +++ b/apps/src/bin/namada-node/cli.rs @@ -1,26 +1,26 @@ -//! Anoma node CLI. +//! Namada node CLI. use eyre::{Context, Result}; use namada_apps::cli::{self, cmds}; use namada_apps::node::ledger; pub fn main() -> Result<()> { - let (cmd, mut ctx) = cli::anoma_node_cli()?; + let (cmd, mut ctx) = cli::namada_node_cli()?; if let Some(mode) = ctx.global_args.mode.clone() { ctx.config.ledger.tendermint.tendermint_mode = mode; } match cmd { - cmds::AnomaNode::Ledger(sub) => match sub { + cmds::NamadaNode::Ledger(sub) => match sub { cmds::Ledger::Run(_) => { let wasm_dir = ctx.wasm_dir(); ledger::run(ctx.config.ledger, wasm_dir); } cmds::Ledger::Reset(_) => { ledger::reset(ctx.config.ledger) - .wrap_err("Failed to reset Anoma node")?; + .wrap_err("Failed to reset Namada node")?; } }, - cmds::AnomaNode::Config(sub) => match sub { + cmds::NamadaNode::Config(sub) => match sub { cmds::Config::Gen(cmds::ConfigGen) => { // If the config doesn't exit, it gets generated in the context. // In here, we just need to overwrite the default chain ID, in diff --git a/apps/src/bin/anoma-node/main.rs b/apps/src/bin/namada-node/main.rs similarity index 100% rename from apps/src/bin/anoma-node/main.rs rename to apps/src/bin/namada-node/main.rs diff --git a/apps/src/bin/anoma-wallet/README.md b/apps/src/bin/namada-wallet/README.md similarity index 95% rename from apps/src/bin/anoma-wallet/README.md rename to apps/src/bin/namada-wallet/README.md index 147cecd4fe..ec42ae2346 100644 --- a/apps/src/bin/anoma-wallet/README.md +++ b/apps/src/bin/namada-wallet/README.md @@ -1,4 +1,4 @@ -# Anoma CLI wallet +# Namada CLI wallet ## Features diff --git a/apps/src/bin/anoma-wallet/cli.rs b/apps/src/bin/namada-wallet/cli.rs similarity index 98% rename from apps/src/bin/anoma-wallet/cli.rs rename to apps/src/bin/namada-wallet/cli.rs index 970cf8d908..82a994b0ac 100644 --- a/apps/src/bin/anoma-wallet/cli.rs +++ b/apps/src/bin/namada-wallet/cli.rs @@ -1,4 +1,4 @@ -//! Anoma Wallet CLI. +//! Namada Wallet CLI. use std::fs::File; use std::io::{self, Write}; @@ -16,9 +16,9 @@ use namada_apps::wallet::{DecryptionError, FindKeyError}; use rand_core::OsRng; pub fn main() -> Result<()> { - let (cmd, ctx) = cli::anoma_wallet_cli()?; + let (cmd, ctx) = cli::namada_wallet_cli()?; match cmd { - cmds::AnomaWallet::Key(sub) => match sub { + cmds::NamadaWallet::Key(sub) => match sub { cmds::WalletKey::Gen(cmds::KeyGen(args)) => { key_and_address_gen(ctx, args) } @@ -28,7 +28,7 @@ pub fn main() -> Result<()> { key_export(ctx, args) } }, - cmds::AnomaWallet::Address(sub) => match sub { + cmds::NamadaWallet::Address(sub) => match sub { cmds::WalletAddress::Gen(cmds::AddressGen(args)) => { key_and_address_gen(ctx, args) } @@ -40,7 +40,7 @@ pub fn main() -> Result<()> { address_add(ctx, args) } }, - cmds::AnomaWallet::Masp(sub) => match sub { + cmds::NamadaWallet::Masp(sub) => match sub { cmds::WalletMasp::GenSpendKey(cmds::MaspGenSpendKey(args)) => { spending_key_gen(ctx, args) } @@ -455,8 +455,7 @@ fn address_or_alias_find(ctx: Context, args: args::AddressOrAliasFind) { message." ); } else if args.alias.is_some() { - if let Some(address) = - wallet.find_address(&args.alias.as_ref().unwrap()) + if let Some(address) = wallet.find_address(args.alias.as_ref().unwrap()) { println!("Found address {}", address.to_pretty_string()); } else { diff --git a/apps/src/bin/anoma-wallet/main.rs b/apps/src/bin/namada-wallet/main.rs similarity index 100% rename from apps/src/bin/anoma-wallet/main.rs rename to apps/src/bin/namada-wallet/main.rs diff --git a/apps/src/bin/anoma/cli.rs b/apps/src/bin/namada/cli.rs similarity index 81% rename from apps/src/bin/anoma/cli.rs rename to apps/src/bin/namada/cli.rs index 5fbf363c33..88d09da0cf 100644 --- a/apps/src/bin/anoma/cli.rs +++ b/apps/src/bin/namada/cli.rs @@ -1,8 +1,8 @@ -//! Anoma CLI. +//! Namada CLI. //! //! This CLI groups together the most commonly used commands inlined from the //! node and the client. The other commands for the node, client and wallet can -//! be dispatched via `anoma node ...`, `anoma client ...` or `anoma wallet +//! be dispatched via `namada node ...`, `namada client ...` or `namada wallet //! ...`, respectively. use std::env; @@ -12,18 +12,18 @@ use eyre::Result; use namada_apps::cli; pub fn main() -> Result<()> { - let (cmd, raw_sub_cmd) = cli::anoma_cli(); + let (cmd, raw_sub_cmd) = cli::namada_cli(); handle_command(cmd, raw_sub_cmd) } -fn handle_command(cmd: cli::cmds::Anoma, raw_sub_cmd: String) -> Result<()> { +fn handle_command(cmd: cli::cmds::Namada, raw_sub_cmd: String) -> Result<()> { let args = env::args(); let is_bin_sub_cmd = matches!( cmd, - cli::cmds::Anoma::Node(_) - | cli::cmds::Anoma::Client(_) - | cli::cmds::Anoma::Wallet(_) + cli::cmds::Namada::Node(_) + | cli::cmds::Namada::Client(_) + | cli::cmds::Namada::Wallet(_) ); // Skip the first arg, which is the name of the binary @@ -39,20 +39,21 @@ fn handle_command(cmd: cli::cmds::Anoma, raw_sub_cmd: String) -> Result<()> { } match cmd { - cli::cmds::Anoma::Node(_) | cli::cmds::Anoma::Ledger(_) => { + cli::cmds::Namada::Node(_) | cli::cmds::Namada::Ledger(_) => { handle_subcommand("namadan", sub_args) } - cli::cmds::Anoma::Client(_) - | cli::cmds::Anoma::TxCustom(_) - | cli::cmds::Anoma::TxTransfer(_) - | cli::cmds::Anoma::TxIbcTransfer(_) - | cli::cmds::Anoma::TxUpdateVp(_) - | cli::cmds::Anoma::TxInitProposal(_) - | cli::cmds::Anoma::TxVoteProposal(_) => { + cli::cmds::Namada::Client(_) + | cli::cmds::Namada::TxCustom(_) + | cli::cmds::Namada::TxTransfer(_) + | cli::cmds::Namada::TxIbcTransfer(_) + | cli::cmds::Namada::TxUpdateVp(_) + | cli::cmds::Namada::TxRevealPk(_) + | cli::cmds::Namada::TxInitProposal(_) + | cli::cmds::Namada::TxVoteProposal(_) => { handle_subcommand("namadac", sub_args) } - cli::cmds::Anoma::Wallet(_) => handle_subcommand("namadaw", sub_args), - cli::cmds::Anoma::EthBridgePool(_) => { + cli::cmds::Namada::Wallet(_) => handle_subcommand("namadaw", sub_args), + cli::cmds::Namada::EthBridgePool(_) => { handle_subcommand("namadar", sub_args) } } @@ -73,8 +74,8 @@ fn handle_subcommand(program: &str, mut sub_args: Vec) -> Result<()> { } else { // Get the full path to the program to be inside the parent directory of // the current process - let anoma_path = env::current_exe()?; - anoma_path.parent().unwrap().join(program) + let namada_path = env::current_exe()?; + namada_path.parent().unwrap().join(program) }; let mut cmd = Command::new(cmd_name); diff --git a/apps/src/bin/anoma/main.rs b/apps/src/bin/namada/main.rs similarity index 100% rename from apps/src/bin/anoma/main.rs rename to apps/src/bin/namada/main.rs diff --git a/apps/src/lib/cli.rs b/apps/src/lib/cli.rs index 41a21cf638..7a34db35ff 100644 --- a/apps/src/lib/cli.rs +++ b/apps/src/lib/cli.rs @@ -1,9 +1,9 @@ -//! The CLI commands that are re-used between the executables `anoma`, -//! `anoma-node` and `anoma-client`. +//! The CLI commands that are re-used between the executables `namada`, +//! `namada-node` and `namada-client`. //! -//! The `anoma` executable groups together the most commonly used commands +//! The `namada` executable groups together the most commonly used commands //! inlined from the node and the client. The other commands for the node or the -//! client can be dispatched via `anoma node ...` or `anoma client ...`, +//! client can be dispatched via `namada node ...` or `namada client ...`, //! respectively. pub mod context; @@ -20,7 +20,7 @@ include!("../../version.rs"); const APP_NAME: &str = "Namada"; -// Main Anoma sub-commands +// Main Namada sub-commands const NODE_CMD: &str = "node"; const CLIENT_CMD: &str = "client"; const WALLET_CMD: &str = "wallet"; @@ -33,14 +33,14 @@ pub mod cmds { use super::{args, ArgMatches, CLIENT_CMD, NODE_CMD, WALLET_CMD}; use crate::cli::BRIDGE_POOL_CMD; - /// Commands for `anoma` binary. + /// Commands for `namada` binary. #[allow(clippy::large_enum_variant)] #[derive(Clone, Debug)] - pub enum Anoma { + pub enum Namada { // Sub-binary-commands - Node(AnomaNode), - Client(AnomaClient), - Wallet(AnomaWallet), + Node(NamadaNode), + Client(NamadaClient), + Wallet(NamadaWallet), // Inlined commands from the node. EthBridgePool(EthBridgePool), @@ -53,13 +53,14 @@ pub mod cmds { TxUpdateVp(TxUpdateVp), TxInitProposal(TxInitProposal), TxVoteProposal(TxVoteProposal), + TxRevealPk(TxRevealPk), } - impl Cmd for Anoma { + impl Cmd for Namada { fn add_sub(app: App) -> App { - app.subcommand(AnomaNode::def()) - .subcommand(AnomaClient::def()) - .subcommand(AnomaWallet::def()) + app.subcommand(NamadaNode::def()) + .subcommand(NamadaClient::def()) + .subcommand(NamadaWallet::def()) .subcommand(EthBridgePool::def()) .subcommand(Ledger::def()) .subcommand(TxCustom::def()) @@ -68,6 +69,7 @@ pub mod cmds { .subcommand(TxUpdateVp::def()) .subcommand(TxInitProposal::def()) .subcommand(TxVoteProposal::def()) + .subcommand(TxRevealPk::def()) } fn parse(matches: &ArgMatches) -> Option { @@ -84,6 +86,7 @@ pub mod cmds { SubCmd::parse(matches).map(Self::TxInitProposal); let tx_vote_proposal = SubCmd::parse(matches).map(Self::TxVoteProposal); + let tx_reveal_pk = SubCmd::parse(matches).map(Self::TxRevealPk); node.or(client) .or(wallet) .or(ledger) @@ -93,19 +96,20 @@ pub mod cmds { .or(tx_update_vp) .or(tx_init_proposal) .or(tx_vote_proposal) + .or(tx_reveal_pk) } } - /// Used as top-level commands (`Cmd` instance) in `anoman` binary. - /// Used as sub-commands (`SubCmd` instance) in `anoma` binary. + /// Used as top-level commands (`Cmd` instance) in `namadan` binary. + /// Used as sub-commands (`SubCmd` instance) in `namada` binary. #[derive(Clone, Debug)] #[allow(clippy::large_enum_variant)] - pub enum AnomaNode { + pub enum NamadaNode { Ledger(Ledger), Config(Config), } - impl Cmd for AnomaNode { + impl Cmd for NamadaNode { fn add_sub(app: App) -> App { app.subcommand(Ledger::def()).subcommand(Config::def()) } @@ -116,7 +120,7 @@ pub mod cmds { ledger.or(config) } } - impl SubCmd for AnomaNode { + impl SubCmd for NamadaNode { const CMD: &'static str = NODE_CMD; fn parse(matches: &ArgMatches) -> Option { @@ -134,20 +138,20 @@ pub mod cmds { } } - /// Used as top-level commands (`Cmd` instance) in `anomac` binary. - /// Used as sub-commands (`SubCmd` instance) in `anoma` binary. + /// Used as top-level commands (`Cmd` instance) in `namadac` binary. + /// Used as sub-commands (`SubCmd` instance) in `namada` binary. #[derive(Clone, Debug)] #[allow(clippy::large_enum_variant)] - pub enum AnomaClient { + pub enum NamadaClient { /// The [`super::Context`] provides access to the wallet and the /// config. It will generate a new wallet and config, if they /// don't exist. - WithContext(AnomaClientWithContext), + WithContext(NamadaClientWithContext), /// Utils don't have [`super::Context`], only the global arguments. WithoutContext(Utils), } - impl Cmd for AnomaClient { + impl Cmd for NamadaClient { fn add_sub(app: App) -> App { app // Simple transactions @@ -156,11 +160,12 @@ pub mod cmds { .subcommand(TxIbcTransfer::def().display_order(1)) .subcommand(TxUpdateVp::def().display_order(1)) .subcommand(TxInitAccount::def().display_order(1)) - .subcommand(TxInitValidator::def().display_order(1)) + .subcommand(TxRevealPk::def().display_order(1)) // Proposal transactions .subcommand(TxInitProposal::def().display_order(1)) .subcommand(TxVoteProposal::def().display_order(1)) // PoS transactions + .subcommand(TxInitValidator::def().display_order(2)) .subcommand(Bond::def().display_order(2)) .subcommand(Unbond::def().display_order(2)) .subcommand(Withdraw::def().display_order(2)) @@ -173,7 +178,7 @@ pub mod cmds { .subcommand(QueryBlock::def().display_order(3)) .subcommand(QueryBalance::def().display_order(3)) .subcommand(QueryBonds::def().display_order(3)) - .subcommand(QueryVotingPower::def().display_order(3)) + .subcommand(QueryBondedStake::def().display_order(3)) .subcommand(QuerySlashes::def().display_order(3)) .subcommand(QueryResult::def().display_order(3)) .subcommand(QueryRawBytes::def().display_order(3)) @@ -185,7 +190,7 @@ pub mod cmds { } fn parse(matches: &ArgMatches) -> Option { - use AnomaClientWithContext::*; + use NamadaClientWithContext::*; let tx_custom = Self::parse_with_ctx(matches, TxCustom); let tx_transfer = Self::parse_with_ctx(matches, TxTransfer); let tx_ibc_transfer = Self::parse_with_ctx(matches, TxIbcTransfer); @@ -193,6 +198,7 @@ pub mod cmds { let tx_init_account = Self::parse_with_ctx(matches, TxInitAccount); let tx_init_validator = Self::parse_with_ctx(matches, TxInitValidator); + let tx_reveal_pk = Self::parse_with_ctx(matches, TxRevealPk); let tx_init_proposal = Self::parse_with_ctx(matches, TxInitProposal); let tx_vote_proposal = @@ -207,8 +213,8 @@ pub mod cmds { let query_block = Self::parse_with_ctx(matches, QueryBlock); let query_balance = Self::parse_with_ctx(matches, QueryBalance); let query_bonds = Self::parse_with_ctx(matches, QueryBonds); - let query_voting_power = - Self::parse_with_ctx(matches, QueryVotingPower); + let query_bonded_stake = + Self::parse_with_ctx(matches, QueryBondedStake); let query_slashes = Self::parse_with_ctx(matches, QuerySlashes); let query_result = Self::parse_with_ctx(matches, QueryResult); let query_raw_bytes = Self::parse_with_ctx(matches, QueryRawBytes); @@ -225,9 +231,10 @@ pub mod cmds { .or(tx_ibc_transfer) .or(tx_update_vp) .or(tx_init_account) - .or(tx_init_validator) + .or(tx_reveal_pk) .or(tx_init_proposal) .or(tx_vote_proposal) + .or(tx_init_validator) .or(bond) .or(unbond) .or(withdraw) @@ -238,7 +245,7 @@ pub mod cmds { .or(query_block) .or(query_balance) .or(query_bonds) - .or(query_voting_power) + .or(query_bonded_stake) .or(query_slashes) .or(query_result) .or(query_raw_bytes) @@ -249,18 +256,18 @@ pub mod cmds { } } - impl AnomaClient { + impl NamadaClient { /// A helper method to parse sub cmds with context fn parse_with_ctx( matches: &ArgMatches, - sub_to_self: impl Fn(T) -> AnomaClientWithContext, + sub_to_self: impl Fn(T) -> NamadaClientWithContext, ) -> Option { SubCmd::parse(matches) .map(|sub| Self::WithContext(sub_to_self(sub))) } } - impl SubCmd for AnomaClient { + impl SubCmd for NamadaClient { const CMD: &'static str = CLIENT_CMD; fn parse(matches: &ArgMatches) -> Option { @@ -279,7 +286,7 @@ pub mod cmds { } #[derive(Clone, Debug)] - pub enum AnomaClientWithContext { + pub enum NamadaClientWithContext { // Ledger cmds TxCustom(TxCustom), TxTransfer(TxTransfer), @@ -290,6 +297,7 @@ pub mod cmds { TxInitValidator(TxInitValidator), TxInitProposal(TxInitProposal), TxVoteProposal(TxVoteProposal), + TxRevealPk(TxRevealPk), Bond(Bond), Unbond(Unbond), Withdraw(Withdraw), @@ -300,7 +308,8 @@ pub mod cmds { QueryBlock(QueryBlock), QueryBalance(QueryBalance), QueryBonds(QueryBonds), - QueryVotingPower(QueryVotingPower), + QueryBondedStake(QueryBondedStake), + QueryCommissionRate(QueryCommissionRate), QuerySlashes(QuerySlashes), QueryRawBytes(QueryRawBytes), QueryProposal(QueryProposal), @@ -310,7 +319,7 @@ pub mod cmds { #[allow(clippy::large_enum_variant)] #[derive(Clone, Debug)] - pub enum AnomaWallet { + pub enum NamadaWallet { /// Key management commands Key(WalletKey), /// Address management commands @@ -319,7 +328,7 @@ pub mod cmds { Masp(WalletMasp), } - impl Cmd for AnomaWallet { + impl Cmd for NamadaWallet { fn add_sub(app: App) -> App { app.subcommand(WalletKey::def()) .subcommand(WalletAddress::def()) @@ -334,7 +343,7 @@ pub mod cmds { } } - impl SubCmd for AnomaWallet { + impl SubCmd for NamadaWallet { const CMD: &'static str = WALLET_CMD; fn parse(matches: &ArgMatches) -> Option { @@ -793,7 +802,7 @@ pub mod cmds { } fn def() -> App { - App::new(Self::CMD).about("Run Anoma ledger node.") + App::new(Self::CMD).about("Run Namada ledger node.") } } @@ -809,7 +818,7 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD).about( - "Delete Anoma ledger node's and Tendermint node's storage \ + "Delete Namada ledger node's and Tendermint node's storage \ data.", ) } @@ -1056,8 +1065,8 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD) .about( - "Send a signed transaction to create a new validator and \ - its staking reward account.", + "Send a signed transaction to create a new validator \ + account.", ) .add_args::() } @@ -1216,21 +1225,21 @@ pub mod cmds { } #[derive(Clone, Debug)] - pub struct QueryVotingPower(pub args::QueryVotingPower); + pub struct QueryBondedStake(pub args::QueryBondedStake); - impl SubCmd for QueryVotingPower { - const CMD: &'static str = "voting-power"; + impl SubCmd for QueryBondedStake { + const CMD: &'static str = "bonded-stake"; fn parse(matches: &ArgMatches) -> Option { matches.subcommand_matches(Self::CMD).map(|matches| { - QueryVotingPower(args::QueryVotingPower::parse(matches)) + QueryBondedStake(args::QueryBondedStake::parse(matches)) }) } fn def() -> App { App::new(Self::CMD) - .about("Query PoS voting power.") - .add_args::() + .about("Query PoS bonded stake.") + .add_args::() } } @@ -1253,6 +1262,25 @@ pub mod cmds { } } + #[derive(Clone, Debug)] + pub struct QueryCommissionRate(pub args::QueryCommissionRate); + + impl SubCmd for QueryCommissionRate { + const CMD: &'static str = "commission-rate"; + + fn parse(matches: &ArgMatches) -> Option { + matches.subcommand_matches(Self::CMD).map(|matches| { + QueryCommissionRate(args::QueryCommissionRate::parse(matches)) + }) + } + + fn def() -> App { + App::new(Self::CMD) + .about("Query commission rate.") + .add_args::() + } + } + #[derive(Clone, Debug)] pub struct QuerySlashes(pub args::QuerySlashes); @@ -1338,6 +1366,36 @@ pub mod cmds { } } + #[derive(Clone, Debug)] + pub struct TxRevealPk(pub args::RevealPk); + + impl SubCmd for TxRevealPk { + const CMD: &'static str = "reveal-pk"; + + fn parse(matches: &ArgMatches) -> Option + where + Self: Sized, + { + matches + .subcommand_matches(Self::CMD) + .map(|matches| TxRevealPk(args::RevealPk::parse(matches))) + } + + fn def() -> App { + App::new(Self::CMD) + .about( + "Submit a tx to reveal the public key an implicit \ + account. Typically, you don't have to do this manually \ + and the client will detect when a tx to reveal PK is \ + needed and submit it automatically. This will write the \ + PK into the account's storage so that it can be used for \ + signature verification on transactions authorized by \ + this account.", + ) + .add_args::() + } + } + #[derive(Clone, Debug)] pub enum Utils { JoinNetwork(JoinNetwork), @@ -1390,7 +1448,7 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD) - .about("Configure Anoma to join an existing network.") + .about("Configure Namada to join an existing network.") .add_args::() } } @@ -1448,9 +1506,9 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD) .about( - "Initialize genesis validator's address, staking reward \ - address, consensus key, validator account key and \ - staking rewards key and use it in the ledger's node.", + "Initialize genesis validator's address, consensus key \ + and validator account key and use it in the ledger's \ + node.", ) .add_args::() } @@ -1582,6 +1640,7 @@ pub mod args { use namada::types::token; use namada::types::token::Amount; use namada::types::transaction::GasLimit; + use rust_decimal::Decimal; use super::context::*; use super::utils::*; @@ -1601,7 +1660,7 @@ pub mod args { const BALANCE_OWNER: ArgOpt = arg_opt("owner"); const BASE_DIR: ArgDefault = arg_default( "base-dir", - DefaultFn(|| match env::var("ANOMA_BASE_DIR") { + DefaultFn(|| match env::var("NAMADA_BASE_DIR") { Ok(dir) => dir.into(), Err(_) => config::DEFAULT_BASE_DIR.into(), }), @@ -1613,6 +1672,7 @@ pub mod args { const CHANNEL_ID: Arg = arg("channel-id"); const CODE_PATH: Arg = arg("code-path"); const CODE_PATH_OPT: ArgOpt = CODE_PATH.opt(); + const COMMISSION_RATE: Arg = arg("commission-rate"); const CONSENSUS_TIMEOUT_COMMIT: ArgDefault = arg_default( "consensus-timeout-commit", DefaultFn(|| Timeout::from_str("1s").unwrap()), @@ -1630,6 +1690,7 @@ pub mod args { arg_default("fee-amount", DefaultFn(|| token::Amount::from(0))); const FEE_PAYER: Arg = arg("fee-payer"); const FORCE: ArgFlag = flag("force"); + const DONT_PREFETCH_WASM: ArgFlag = flag("dont-prefetch-wasm"); const GAS_AMOUNT: ArgDefault = arg_default("gas-amount", DefaultFn(|| token::Amount::from(0))); const GAS_LIMIT: ArgDefault = @@ -1651,6 +1712,8 @@ pub mod args { const LEDGER_ADDRESS: Arg = arg("ledger-address"); const LOCALHOST: ArgFlag = flag("localhost"); const MASP_VALUE: Arg = arg("value"); + const MAX_COMMISSION_RATE_CHANGE: Arg = + arg("max-commission-rate-change"); const MODE: ArgOpt = arg_opt("mode"); const NET_ADDRESS: Arg = arg("net-address"); const NO_CONVERSIONS: ArgFlag = flag("no-conversions"); @@ -1671,8 +1734,6 @@ pub mod args { const RAW_ADDRESS_OPT: ArgOpt
= RAW_ADDRESS.opt(); const RAW_PUBLIC_KEY_OPT: ArgOpt = arg_opt("public-key"); const RECEIVER: Arg = arg("receiver"); - const REWARDS_CODE_PATH: ArgOpt = arg_opt("rewards-code-path"); - const REWARDS_KEY: ArgOpt = arg_opt("rewards-key"); const SCHEME: ArgDefault = arg_default("scheme", DefaultFn(|| SchemeType::Ed25519)); const SIGNER: ArgOpt = arg_opt("signer"); @@ -1737,18 +1798,18 @@ pub mod args { .arg(BASE_DIR.def().about( "The base directory is where the nodes, client and wallet \ configuration and state is stored. This value can also \ - be set via `ANOMA_BASE_DIR` environment variable, but \ + be set via `NAMADA_BASE_DIR` environment variable, but \ the argument takes precedence, if specified. Defaults to \ - `.anoma`.", + `.namada`.", )) .arg(WASM_DIR.def().about( "Directory with built WASM validity predicates, \ transactions. This value can also be set via \ - `ANOMA_WASM_DIR` environment variable, but the argument \ + `NAMADA_WASM_DIR` environment variable, but the argument \ takes precedence, if specified.", )) .arg(MODE.def().about( - "The mode in which to run Anoma. Options are \n\t * \ + "The mode in which to run Namada. Options are \n\t * \ Validator (default)\n\t * Full\n\t * Seed", )) } @@ -2117,10 +2178,10 @@ pub mod args { pub consensus_key: Option, pub eth_cold_key: Option, pub eth_hot_key: Option, - pub rewards_account_key: Option, pub protocol_key: Option, + pub commission_rate: Decimal, + pub max_commission_rate_change: Decimal, pub validator_vp_code_path: Option, - pub rewards_vp_code_path: Option, pub unsafe_dont_encrypt: bool, } @@ -2133,10 +2194,11 @@ pub mod args { let consensus_key = VALIDATOR_CONSENSUS_KEY.parse(matches); let eth_cold_key = VALIDATOR_ETH_COLD_KEY.parse(matches); let eth_hot_key = VALIDATOR_ETH_HOT_KEY.parse(matches); - let rewards_account_key = REWARDS_KEY.parse(matches); let protocol_key = PROTOCOL_KEY.parse(matches); + let commission_rate = COMMISSION_RATE.parse(matches); + let max_commission_rate_change = + MAX_COMMISSION_RATE_CHANGE.parse(matches); let validator_vp_code_path = VALIDATOR_CODE_PATH.parse(matches); - let rewards_vp_code_path = REWARDS_CODE_PATH.parse(matches); let unsafe_dont_encrypt = UNSAFE_DONT_ENCRYPT.parse(matches); Self { tx, @@ -2146,10 +2208,10 @@ pub mod args { consensus_key, eth_cold_key, eth_hot_key, - rewards_account_key, protocol_key, + commission_rate, + max_commission_rate_change, validator_vp_code_path, - rewards_vp_code_path, unsafe_dont_encrypt, } } @@ -2182,24 +2244,26 @@ pub mod args { be generated if none given. Note that this must be \ secp256k1.", )) - .arg(REWARDS_KEY.def().about( - "A public key for the staking reward account. A new one \ - will be generated if none given.", - )) .arg(PROTOCOL_KEY.def().about( "A public key for signing protocol transactions. A new \ one will be generated if none given.", )) + .arg(COMMISSION_RATE.def().about( + "The commission rate charged by the validator for \ + delegation rewards. Expressed as a decimal between 0 and \ + 1. This is a required parameter.", + )) + .arg(MAX_COMMISSION_RATE_CHANGE.def().about( + "The maximum change per epoch in the commission rate \ + charged by the validator for delegation rewards. \ + Expressed as a decimal between 0 and 1. This is a \ + required parameter.", + )) .arg(VALIDATOR_CODE_PATH.def().about( "The path to the validity predicate WASM code to be used \ for the validator account. Uses the default validator VP \ if none specified.", )) - .arg(REWARDS_CODE_PATH.def().about( - "The path to the validity predicate WASM code to be used \ - for the staking reward account. Uses the default staking \ - reward VP if none specified.", - )) .arg(UNSAFE_DONT_ENCRYPT.def().about( "UNSAFE: Do not encrypt the generated keypairs. Do not \ use this for keys used in a live network.", @@ -2428,6 +2492,28 @@ pub mod args { } } + #[derive(Clone, Debug)] + pub struct RevealPk { + /// Common tx arguments + pub tx: Tx, + /// A public key to be revealed on-chain + pub public_key: WalletPublicKey, + } + + impl Args for RevealPk { + fn parse(matches: &ArgMatches) -> Self { + let tx = Tx::parse(matches); + let public_key = PUBLIC_KEY.parse(matches); + + Self { tx, public_key } + } + + fn def(app: App) -> App { + app.add_args::() + .arg(PUBLIC_KEY.def().about("A public key to reveal.")) + } + } + #[derive(Clone, Debug)] pub struct QueryProposal { /// Common query args @@ -2720,18 +2806,18 @@ pub mod args { } } - /// Query PoS voting power + /// Query PoS bonded stake #[derive(Clone, Debug)] - pub struct QueryVotingPower { + pub struct QueryBondedStake { /// Common query args pub query: Query, /// Address of a validator pub validator: Option, - /// Epoch in which to find voting power + /// Epoch in which to find bonded stake pub epoch: Option, } - impl Args for QueryVotingPower { + impl Args for QueryBondedStake { fn parse(matches: &ArgMatches) -> Self { let query = Query::parse(matches); let validator = VALIDATOR_OPT.parse(matches); @@ -2746,7 +2832,78 @@ pub mod args { fn def(app: App) -> App { app.add_args::() .arg(VALIDATOR_OPT.def().about( - "The validator's address whose voting power to query.", + "The validator's address whose bonded stake to query.", + )) + .arg(EPOCH.def().about( + "The epoch at which to query (last committed, if not \ + specified).", + )) + } + } + + #[derive(Clone, Debug)] + /// Commission rate change args + pub struct TxCommissionRateChange { + /// Common tx arguments + pub tx: Tx, + /// Validator address (should be self) + pub validator: WalletAddress, + /// Value to which the tx changes the commission rate + pub rate: Decimal, + } + + impl Args for TxCommissionRateChange { + fn parse(matches: &ArgMatches) -> Self { + let tx = Tx::parse(matches); + let validator = VALIDATOR.parse(matches); + let rate = COMMISSION_RATE.parse(matches); + Self { + tx, + validator, + rate, + } + } + + fn def(app: App) -> App { + app.add_args::() + .arg(VALIDATOR.def().about( + "The validator's address whose commission rate to change.", + )) + .arg( + COMMISSION_RATE + .def() + .about("The desired new commission rate."), + ) + } + } + + /// Query PoS commission rate + #[derive(Clone, Debug)] + pub struct QueryCommissionRate { + /// Common query args + pub query: Query, + /// Address of a validator + pub validator: WalletAddress, + /// Epoch in which to find commission rate + pub epoch: Option, + } + + impl Args for QueryCommissionRate { + fn parse(matches: &ArgMatches) -> Self { + let query = Query::parse(matches); + let validator = VALIDATOR.parse(matches); + let epoch = EPOCH.parse(matches); + Self { + query, + validator, + epoch, + } + } + + fn def(app: App) -> App { + app.add_args::() + .arg(VALIDATOR.def().about( + "The validator's address whose commission rate to query.", )) .arg(EPOCH.def().about( "The epoch at which to query (last committed, if not \ @@ -2872,7 +3029,7 @@ pub mod args { .arg(GAS_AMOUNT.def().about( "The amount being paid for the inclusion of this transaction", )) - .arg(GAS_TOKEN.def().about("The token for paying the fee")) + .arg(GAS_TOKEN.def().about("The token for paying the gas")) .arg( GAS_LIMIT.def().about( "The maximum amount of gas needed to run transaction", @@ -3440,6 +3597,8 @@ pub mod args { #[derive(Clone, Debug)] pub struct InitGenesisValidator { pub alias: String, + pub commission_rate: Decimal, + pub max_commission_rate_change: Decimal, pub net_address: SocketAddr, pub unsafe_dont_encrypt: bool, pub key_scheme: SchemeType, @@ -3448,6 +3607,9 @@ pub mod args { impl Args for InitGenesisValidator { fn parse(matches: &ArgMatches) -> Self { let alias = ALIAS.parse(matches); + let commission_rate = COMMISSION_RATE.parse(matches); + let max_commission_rate_change = + MAX_COMMISSION_RATE_CHANGE.parse(matches); let net_address = NET_ADDRESS.parse(matches); let unsafe_dont_encrypt = UNSAFE_DONT_ENCRYPT.parse(matches); let key_scheme = SCHEME.parse(matches); @@ -3456,6 +3618,8 @@ pub mod args { net_address, unsafe_dont_encrypt, key_scheme, + commission_rate, + max_commission_rate_change, } } @@ -3463,9 +3627,18 @@ pub mod args { app.arg(ALIAS.def().about("The validator address alias.")) .arg(NET_ADDRESS.def().about( "Static {host:port} of your validator node's P2P address. \ - Anoma uses port `26656` for P2P connections by default, \ + Namada uses port `26656` for P2P connections by default, \ but you can configure a different value.", )) + .arg(COMMISSION_RATE.def().about( + "The commission rate charged by the validator for \ + delegation rewards. This is a required parameter.", + )) + .arg(MAX_COMMISSION_RATE_CHANGE.def().about( + "The maximum change per epoch in the commission rate \ + charged by the validator for delegation rewards. This is \ + a required parameter.", + )) .arg(UNSAFE_DONT_ENCRYPT.def().about( "UNSAFE: Do not encrypt the generated keypairs. Do not \ use this for keys used in a live network.", @@ -3478,45 +3651,46 @@ pub mod args { } } -pub fn anoma_cli() -> (cmds::Anoma, String) { - let app = anoma_app(); +pub fn namada_cli() -> (cmds::Namada, String) { + let app = namada_app(); let matches = app.get_matches(); let raw_sub_cmd = matches.subcommand().map(|(raw, _matches)| raw.to_string()); - let result = cmds::Anoma::parse(&matches); + let result = cmds::Namada::parse(&matches); match (result, raw_sub_cmd) { (Some(cmd), Some(raw_sub)) => return (cmd, raw_sub), _ => { - anoma_app().print_help().unwrap(); + namada_app().print_help().unwrap(); } } safe_exit(2); } -pub fn anoma_node_cli() -> Result<(cmds::AnomaNode, Context)> { - let app = anoma_node_app(); - cmds::AnomaNode::parse_or_print_help(app) +pub fn namada_node_cli() -> Result<(cmds::NamadaNode, Context)> { + let app = namada_node_app(); + cmds::NamadaNode::parse_or_print_help(app) } -pub enum AnomaClient { +#[allow(clippy::large_enum_variant)] +pub enum NamadaClient { WithoutContext(cmds::Utils, args::Global), - WithContext(Box<(cmds::AnomaClientWithContext, Context)>), + WithContext(Box<(cmds::NamadaClientWithContext, Context)>), } -pub fn anoma_client_cli() -> Result { - let app = anoma_client_app(); - let mut app = cmds::AnomaClient::add_sub(app); +pub fn namada_client_cli() -> Result { + let app = namada_client_app(); + let mut app = cmds::NamadaClient::add_sub(app); let matches = app.clone().get_matches(); match Cmd::parse(&matches) { Some(cmd) => { let global_args = args::Global::parse(&matches); match cmd { - cmds::AnomaClient::WithContext(sub_cmd) => { + cmds::NamadaClient::WithContext(sub_cmd) => { let context = Context::new(global_args)?; - Ok(AnomaClient::WithContext(Box::new((sub_cmd, context)))) + Ok(NamadaClient::WithContext(Box::new((sub_cmd, context)))) } - cmds::AnomaClient::WithoutContext(sub_cmd) => { - Ok(AnomaClient::WithoutContext(sub_cmd, global_args)) + cmds::NamadaClient::WithoutContext(sub_cmd) => { + Ok(NamadaClient::WithoutContext(sub_cmd, global_args)) } } } @@ -3527,46 +3701,46 @@ pub fn anoma_client_cli() -> Result { } } -pub fn anoma_wallet_cli() -> Result<(cmds::AnomaWallet, Context)> { - let app = anoma_wallet_app(); - cmds::AnomaWallet::parse_or_print_help(app) +pub fn namada_wallet_cli() -> Result<(cmds::NamadaWallet, Context)> { + let app = namada_wallet_app(); + cmds::NamadaWallet::parse_or_print_help(app) } -pub fn anoma_relayer_cli() -> Result<(cmds::EthBridgePool, Context)> { - let app = anoma_relayer_app(); +pub fn namada_relayer_cli() -> Result<(cmds::EthBridgePool, Context)> { + let app = namada_relayer_app(); cmds::EthBridgePool::parse_or_print_help(app) } -fn anoma_app() -> App { +fn namada_app() -> App { let app = App::new(APP_NAME) - .version(anoma_version()) - .about("Anoma command line interface.") + .version(namada_version()) + .about("Namada command line interface.") .setting(AppSettings::SubcommandRequiredElseHelp); - cmds::Anoma::add_sub(args::Global::def(app)) + cmds::Namada::add_sub(args::Global::def(app)) } -fn anoma_node_app() -> App { +fn namada_node_app() -> App { let app = App::new(APP_NAME) - .version(anoma_version()) - .about("Anoma node command line interface.") + .version(namada_version()) + .about("Namada node command line interface.") .setting(AppSettings::SubcommandRequiredElseHelp); - cmds::AnomaNode::add_sub(args::Global::def(app)) + cmds::NamadaNode::add_sub(args::Global::def(app)) } -fn anoma_client_app() -> App { +fn namada_client_app() -> App { let app = App::new(APP_NAME) - .version(anoma_version()) - .about("Anoma client command line interface.") + .version(namada_version()) + .about("Namada client command line interface.") .setting(AppSettings::SubcommandRequiredElseHelp); - cmds::AnomaClient::add_sub(args::Global::def(app)) + cmds::NamadaClient::add_sub(args::Global::def(app)) } -fn anoma_wallet_app() -> App { +fn namada_wallet_app() -> App { let app = App::new(APP_NAME) - .version(anoma_version()) - .about("Anoma wallet command line interface.") + .version(namada_version()) + .about("Namada wallet command line interface.") .setting(AppSettings::SubcommandRequiredElseHelp); - cmds::AnomaWallet::add_sub(args::Global::def(app)) + cmds::NamadaWallet::add_sub(args::Global::def(app)) } fn anoma_relayer_app() -> App { diff --git a/apps/src/lib/cli/context.rs b/apps/src/lib/cli/context.rs index ed09905b44..e61fda9dfc 100644 --- a/apps/src/lib/cli/context.rs +++ b/apps/src/lib/cli/context.rs @@ -20,9 +20,9 @@ use crate::wallet::Wallet; use crate::wasm_loader; /// Env. var to set chain ID -const ENV_VAR_CHAIN_ID: &str = "ANOMA_CHAIN_ID"; +const ENV_VAR_CHAIN_ID: &str = "NAMADA_CHAIN_ID"; /// Env. var to set wasm directory -pub const ENV_VAR_WASM_DIR: &str = "ANOMA_WASM_DIR"; +pub const ENV_VAR_WASM_DIR: &str = "NAMADA_WASM_DIR"; /// A raw address (bech32m encoding) or an alias of an address that may be found /// in the wallet @@ -73,6 +73,8 @@ pub struct Context { pub config: Config, /// The context fr shielded operations pub shielded: ShieldedContext, + /// Native token's address + pub native_token: Address, } impl Context { @@ -88,14 +90,16 @@ impl Context { let chain_dir = global_args .base_dir - .join(&global_config.default_chain_id.as_str()); + .join(global_config.default_chain_id.as_str()); let genesis_file_path = global_args .base_dir .join(format!("{}.toml", global_config.default_chain_id.as_str())); - let wallet = Wallet::load_or_new_from_genesis( - &chain_dir, - genesis_config::open_genesis_config(&genesis_file_path)?, - ); + let genesis = genesis_config::read_genesis_config(&genesis_file_path); + let native_token = genesis.native_token; + let default_genesis = + genesis_config::open_genesis_config(genesis_file_path)?; + let wallet = + Wallet::load_or_new_from_genesis(&chain_dir, default_genesis); // If the WASM dir specified, put it in the config match global_args.wasm_dir.as_ref() { @@ -115,6 +119,7 @@ impl Context { global_config, config, shielded: ShieldedContext::new(chain_dir), + native_token, }) } diff --git a/apps/src/lib/client/rpc.rs b/apps/src/lib/client/rpc.rs index f0850b1d00..3fb4d8eea8 100644 --- a/apps/src/lib/client/rpc.rs +++ b/apps/src/lib/client/rpc.rs @@ -25,11 +25,9 @@ use masp_primitives::zip32::ExtendedFullViewingKey; use namada::ledger::events::Event; use namada::ledger::governance::parameters::GovParams; use namada::ledger::governance::storage as gov_storage; -use namada::ledger::governance::utils::Votes; +use namada::ledger::native_vp::governance::utils::Votes; use namada::ledger::parameters::{storage as param_storage, EpochDuration}; -use namada::ledger::pos::types::{ - Epoch as PosEpoch, VotingPower, WeightedValidator, -}; +use namada::ledger::pos::types::{decimal_mult_u64, WeightedValidator}; use namada::ledger::pos::{ self, is_validator_slashes_key, BondId, Bonds, PosParams, Slash, Unbonds, }; @@ -53,6 +51,7 @@ use namada::types::transaction::{ WrapperTx, }; use namada::types::{address, storage, token}; +use rust_decimal::Decimal; use tokio::time::{Duration, Instant}; use crate::cli::{self, args, Context}; @@ -130,7 +129,7 @@ pub async fn query_epoch(args: args::Query) -> Epoch { /// Query the last committed block pub async fn query_block( args: args::Query, -) -> tendermint_rpc::endpoint::block::Response { +) -> crate::facade::tendermint_rpc::endpoint::block::Response { let client = HttpClient::new(args.ledger_address).unwrap(); let response = client.latest_block().await.unwrap(); println!( @@ -776,6 +775,17 @@ pub async fn query_proposal(_ctx: Context, args: args::QueryProposal) { println!("{:4}Status: pending", ""); } else if start_epoch <= current_epoch && current_epoch <= end_epoch { + let votes = get_proposal_votes(client, start_epoch, id).await; + let partial_proposal_result = + compute_tally(client, start_epoch, votes).await; + println!( + "{:4}Yay votes: {}", + "", partial_proposal_result.total_yay_power + ); + println!( + "{:4}Nay votes: {}", + "", partial_proposal_result.total_nay_power + ); println!("{:4}Status: on-going", ""); } else { let votes = get_proposal_votes(client, start_epoch, id).await; @@ -1228,7 +1238,7 @@ pub async fn query_proposal_result( cli::safe_exit(1) } - let file = File::open(&path.join("proposal")) + let file = File::open(path.join("proposal")) .expect("Proposal file must exist."); let proposal: OfflineProposal = serde_json::from_reader(file).expect( @@ -1287,7 +1297,7 @@ pub async fn query_protocol_parameters( println!("Governance Parameters\n {:4}", gov_parameters); println!("Protocol parameters"); - let key = param_storage::get_epoch_storage_key(); + let key = param_storage::get_epoch_duration_storage_key(); let epoch_duration = query_storage_value::(&client, &key) .await .expect("Parameter should be definied."); @@ -1332,12 +1342,12 @@ pub async fn query_protocol_parameters( "", pos_params.block_vote_reward ); println!( - "{:4}Duplicate vote slash rate: {}", - "", pos_params.duplicate_vote_slash_rate + "{:4}Duplicate vote minimum slash rate: {}", + "", pos_params.duplicate_vote_min_slash_rate ); println!( - "{:4}Light client attack slash rate: {}", - "", pos_params.light_client_attack_slash_rate + "{:4}Light client attack minimum slash rate: {}", + "", pos_params.light_client_attack_min_slash_rate ); println!( "{:4}Max. validator slots: {}", @@ -1345,7 +1355,7 @@ pub async fn query_protocol_parameters( ); println!("{:4}Pipeline length: {}", "", pos_params.pipeline_len); println!("{:4}Unbonding length: {}", "", pos_params.unbonding_len); - println!("{:4}Votes per token: {}", "", pos_params.votes_per_token); + println!("{:4}Votes per token: {}", "", pos_params.tm_votes_per_token); } /// Query PoS bond(s) @@ -1695,8 +1705,8 @@ pub async fn query_bonds(ctx: Context, args: args::QueryBonds) { } } -/// Query PoS voting power -pub async fn query_voting_power(ctx: Context, args: args::QueryVotingPower) { +/// Query PoS bonded stake +pub async fn query_bonded_stake(ctx: Context, args: args::QueryBondedStake) { let epoch = match args.epoch { Some(epoch) => epoch, None => query_epoch(args.query.clone()).await, @@ -1712,26 +1722,26 @@ pub async fn query_voting_power(ctx: Context, args: args::QueryVotingPower) { let validator_set = validator_sets .get(epoch) .expect("Validator set should be always set in the current epoch"); + match args.validator { Some(validator) => { let validator = ctx.get(&validator); - // Find voting power for the given validator - let voting_power_key = pos::validator_voting_power_key(&validator); - let voting_powers = - query_storage_value::( - &client, - &voting_power_key, - ) - .await; - match voting_powers.and_then(|data| data.get(epoch)) { - Some(voting_power_delta) => { - let voting_power: VotingPower = - voting_power_delta.try_into().expect( - "The sum voting power deltas shouldn't be negative", - ); + // Find bonded stake for the given validator + let validator_deltas_key = pos::validator_deltas_key(&validator); + let validator_deltas = query_storage_value::( + &client, + &validator_deltas_key, + ) + .await; + match validator_deltas.and_then(|data| data.get(epoch)) { + Some(val_stake) => { + let bonded_stake: u64 = val_stake.try_into().expect( + "The sum of the bonded stake deltas shouldn't be \ + negative", + ); let weighted = WeightedValidator { address: validator.clone(), - voting_power, + bonded_stake, }; let is_active = validator_set.active.contains(&weighted); if !is_active { @@ -1740,14 +1750,14 @@ pub async fn query_voting_power(ctx: Context, args: args::QueryVotingPower) { ); } println!( - "Validator {} is {}, voting power: {}", + "Validator {} is {}, bonded stake: {}", validator.encode(), if is_active { "active" } else { "inactive" }, - voting_power + bonded_stake, ) } None => { - println!("No voting power found for {}", validator.encode()) + println!("No bonded stake found for {}", validator.encode()) } } } @@ -1762,7 +1772,7 @@ pub async fn query_voting_power(ctx: Context, args: args::QueryVotingPower) { w, " {}: {}", active.address.encode(), - active.voting_power + active.bonded_stake ) .unwrap(); } @@ -1773,24 +1783,82 @@ pub async fn query_voting_power(ctx: Context, args: args::QueryVotingPower) { w, " {}: {}", inactive.address.encode(), - inactive.voting_power + inactive.bonded_stake ) .unwrap(); } } } } - let total_voting_power_key = pos::total_voting_power_key(); - let total_voting_powers = query_storage_value::( - &client, - &total_voting_power_key, - ) - .await - .expect("Total voting power should always be set"); - let total_voting_power = total_voting_powers + let total_deltas_key = pos::total_deltas_key(); + let total_deltas = + query_storage_value::(&client, &total_deltas_key) + .await + .expect("Total bonded stake should always be set"); + let total_bonded_stake = total_deltas .get(epoch) - .expect("Total voting power should be always set in the current epoch"); - println!("Total voting power: {}", total_voting_power); + .expect("Total bonded stake should be always set in the current epoch"); + let total_bonded_stake: u64 = total_bonded_stake + .try_into() + .expect("total_bonded_stake should be a positive value"); + + println!("Total bonded stake: {}", total_bonded_stake); +} + +/// Query PoS validator's commission rate +pub async fn query_commission_rate( + ctx: Context, + args: args::QueryCommissionRate, +) { + let epoch = match args.epoch { + Some(epoch) => epoch, + None => query_epoch(args.query.clone()).await, + }; + let client = HttpClient::new(args.query.ledger_address.clone()).unwrap(); + let validator = ctx.get(&args.validator); + let is_validator = + is_validator(&validator, args.query.ledger_address).await; + + if is_validator { + let validator_commission_key = + pos::validator_commission_rate_key(&validator); + let validator_max_commission_change_key = + pos::validator_max_commission_rate_change_key(&validator); + let commission_rates = query_storage_value::( + &client, + &validator_commission_key, + ) + .await; + let max_rate_change = query_storage_value::( + &client, + &validator_max_commission_change_key, + ) + .await; + let max_rate_change = + max_rate_change.expect("No max rate change found"); + let commission_rates = + commission_rates.expect("No commission rate found "); + match commission_rates.get(epoch) { + Some(rate) => { + println!( + "Validator {} commission rate: {}, max change per epoch: \ + {}", + validator.encode(), + *rate, + max_rate_change, + ) + } + None => { + println!( + "No commission rate found for {} in epoch {}", + validator.encode(), + epoch + ) + } + } + } else { + println!("Cannot find validator with address {}", validator); + } } /// Query PoS slashes @@ -1890,10 +1958,7 @@ pub async fn is_validator( ledger_address: TendermintAddress, ) -> bool { let client = HttpClient::new(ledger_address).unwrap(); - let key = pos::validator_state_key(address); - let state: Option = - query_storage_value(&client, &key).await; - state.is_some() + unwrap_client_response(RPC.vp().pos().is_validator(&client, address).await) } /// Check if a given address is a known delegator @@ -1945,8 +2010,8 @@ pub async fn known_address( fn apply_slashes( slashes: &[Slash], mut delta: token::Amount, - epoch_start: PosEpoch, - withdraw_epoch: Option, + epoch_start: Epoch, + withdraw_epoch: Option, mut w: Option<&mut std::io::StdoutLock>, ) -> token::Amount { let mut slashed = token::Amount::default(); @@ -1963,7 +2028,8 @@ fn apply_slashes( .unwrap(); } let raw_delta: u64 = delta.into(); - let current_slashed = token::Amount::from(slash.rate * raw_delta); + let current_slashed = + token::Amount::from(decimal_mult_u64(slash.rate, raw_delta)); slashed += current_slashed; delta -= current_slashed; } @@ -1997,8 +2063,7 @@ fn process_bonds_query( .unwrap(); delta = apply_slashes(slashes, delta, *epoch_start, None, Some(w)); current_total += delta; - let epoch_start: Epoch = (*epoch_start).into(); - if epoch >= &epoch_start { + if epoch >= epoch_start { total_active += delta; } } @@ -2053,8 +2118,7 @@ fn process_unbonds_query( Some(w), ); current_total += delta; - let epoch_end: Epoch = (*epoch_end).into(); - if epoch > &epoch_end { + if epoch > epoch_end { withdrawable += delta; } } @@ -2352,11 +2416,11 @@ pub async fn query_tx_response( // applied to the blockchain let query_event_opt = response_block_results.end_block_events.and_then(|events| { - (&events) + events .iter() .find(|event| { event.type_str == tx_query.event_type() - && (&event.attributes).iter().any(|tag| { + && event.attributes.iter().any(|tag| { tag.key.as_ref() == "hash" && tag.value.as_ref() == tx_query.tx_hash() }) @@ -2371,8 +2435,8 @@ pub async fn query_tx_response( ) })?; // Reformat the event attributes so as to ease value extraction - let event_map: std::collections::HashMap<&str, &str> = (&query_event - .attributes) + let event_map: std::collections::HashMap<&str, &str> = query_event + .attributes .iter() .map(|tag| (tag.key.as_ref(), tag.value.as_ref())) .collect(); @@ -2461,8 +2525,10 @@ pub async fn get_proposal_votes( .expect("Vote key should contain the voting address.") .clone(); if vote.is_yay() && validators.contains(&voter_address) { - let amount = - get_validator_stake(client, epoch, &voter_address).await; + let amount: VotePower = + get_validator_stake(client, epoch, &voter_address) + .await + .into(); yay_validators.insert(voter_address, amount); } else if !validators.contains(&voter_address) { let validator_address = @@ -2536,12 +2602,13 @@ pub async fn get_proposal_offline_votes( if proposal_vote.vote.is_yay() && validators.contains(&proposal_vote.address) { - let amount = get_validator_stake( + let amount: VotePower = get_validator_stake( client, proposal.tally_epoch, &proposal_vote.address, ) - .await; + .await + .into(); yay_validators.insert(proposal_vote.address, amount); } else if is_delegator_at( client, @@ -2569,11 +2636,8 @@ pub async fn get_proposal_offline_votes( .await .unwrap_or_default(); let mut delegated_amount: token::Amount = 0.into(); - let epoch = namada::ledger::pos::types::Epoch::from( - proposal.tally_epoch.0, - ); let bond = epoched_bonds - .get(epoch) + .get(proposal.tally_epoch) .expect("Delegation bond should be defined."); let mut to_deduct = bond.neg_deltas; for (start_epoch, &(mut delta)) in @@ -2639,9 +2703,8 @@ pub async fn compute_tally( epoch: Epoch, votes: Votes, ) -> ProposalResult { - let validators = get_all_validators(client, epoch).await; - let total_stacked_tokens = - get_total_staked_tokes(client, epoch, &validators).await; + let total_staked_tokens: VotePower = + get_total_staked_tokens(client, epoch).await.into(); let Votes { yay_validators, @@ -2649,16 +2712,16 @@ pub async fn compute_tally( nay_delegators, } = votes; - let mut total_yay_stacked_tokens = VotePower::from(0_u64); + let mut total_yay_staked_tokens = VotePower::from(0_u64); for (_, amount) in yay_validators.clone().into_iter() { - total_yay_stacked_tokens += amount; + total_yay_staked_tokens += amount; } // YAY: Add delegator amount whose validator didn't vote / voted nay for (_, vote_map) in yay_delegators.iter() { for (validator_address, vote_power) in vote_map.iter() { if !yay_validators.contains_key(validator_address) { - total_yay_stacked_tokens += vote_power; + total_yay_staked_tokens += vote_power; } } } @@ -2667,23 +2730,23 @@ pub async fn compute_tally( for (_, vote_map) in nay_delegators.iter() { for (validator_address, vote_power) in vote_map.iter() { if yay_validators.contains_key(validator_address) { - total_yay_stacked_tokens -= vote_power; + total_yay_staked_tokens -= vote_power; } } } - if total_yay_stacked_tokens >= (total_stacked_tokens / 3) * 2 { + if total_yay_staked_tokens >= (total_staked_tokens / 3) * 2 { ProposalResult { result: TallyResult::Passed, - total_voting_power: total_stacked_tokens, - total_yay_power: total_yay_stacked_tokens, + total_voting_power: total_staked_tokens, + total_yay_power: total_yay_staked_tokens, total_nay_power: 0, } } else { ProposalResult { result: TallyResult::Rejected, - total_voting_power: total_stacked_tokens, - total_yay_power: total_yay_stacked_tokens, + total_voting_power: total_staked_tokens, + total_yay_power: total_yay_staked_tokens, total_nay_power: 0, } } @@ -2730,8 +2793,7 @@ pub async fn get_bond_amount_at( None, None, ); - let epoch_start: Epoch = (*epoch_start).into(); - if epoch >= epoch_start { + if epoch >= *epoch_start { delegated_amount += delta; } } @@ -2745,69 +2807,42 @@ pub async fn get_bond_amount_at( pub async fn get_all_validators( client: &HttpClient, epoch: Epoch, -) -> Vec
{ - let validator_set_key = pos::validator_set_key(); - let validator_sets = - query_storage_value::(client, &validator_set_key) - .await - .expect("Validator set should always be set"); - let validator_set = validator_sets - .get(epoch) - .expect("Validator set should be always set in the current epoch"); - let all_validators = validator_set.active.union(&validator_set.inactive); - all_validators - .map(|validator| validator.address.clone()) - .collect() +) -> HashSet
{ + unwrap_client_response( + RPC.vp() + .pos() + .validator_addresses(client, &Some(epoch)) + .await, + ) } -pub async fn get_total_staked_tokes( +pub async fn get_total_staked_tokens( client: &HttpClient, epoch: Epoch, - validators: &[Address], -) -> VotePower { - let mut total = VotePower::from(0_u64); - - for validator in validators { - total += get_validator_stake(client, epoch, validator).await; - } - total +) -> token::Amount { + unwrap_client_response( + RPC.vp().pos().total_stake(client, &Some(epoch)).await, + ) } async fn get_validator_stake( client: &HttpClient, epoch: Epoch, validator: &Address, -) -> VotePower { - let total_voting_power_key = pos::validator_total_deltas_key(validator); - let total_voting_power = query_storage_value::( - client, - &total_voting_power_key, +) -> token::Amount { + unwrap_client_response( + RPC.vp() + .pos() + .validator_stake(client, validator, &Some(epoch)) + .await, ) - .await - .expect("Total deltas should be defined"); - let epoched_total_voting_power = total_voting_power.get(epoch); - - VotePower::try_from(epoched_total_voting_power.unwrap_or_default()) - .unwrap_or_default() } pub async fn get_delegators_delegation( client: &HttpClient, address: &Address, - _epoch: Epoch, -) -> Vec
{ - let key = pos::bonds_for_source_prefix(address); - let bonds_iter = query_storage_prefix::(client, &key).await; - - let mut delegation_addresses: Vec
= Vec::new(); - if let Some(bonds) = bonds_iter { - for (key, _epoched_amount) in bonds { - let validator_address = pos::get_validator_address_from_bond(&key) - .expect("Delegation key should contain validator address."); - delegation_addresses.push(validator_address); - } - } - delegation_addresses +) -> HashSet
{ + unwrap_client_response(RPC.vp().pos().delegations(client, address).await) } pub async fn get_governance_parameters(client: &HttpClient) -> GovParams { diff --git a/apps/src/lib/client/signing.rs b/apps/src/lib/client/signing.rs index 681df3f8b6..ed7ab484a9 100644 --- a/apps/src/lib/client/signing.rs +++ b/apps/src/lib/client/signing.rs @@ -109,9 +109,20 @@ pub async fn tx_signer( args.ledger_address.clone(), ) .await; + // Check if the signer is implicit account that needs to reveal its + // PK first + if matches!(signer, Address::Implicit(_)) { + let pk: common::PublicKey = signing_key.ref_to(); + super::tx::reveal_pk_if_needed(ctx, &pk, args).await; + } + signing_key + } + TxSigningKey::SecretKey(signing_key) => { + // Check if the signing key needs to reveal its PK first + let pk: common::PublicKey = signing_key.ref_to(); + super::tx::reveal_pk_if_needed(ctx, &pk, args).await; signing_key } - TxSigningKey::SecretKey(signing_key) => signing_key, TxSigningKey::None => { panic!( "All transactions must be signed; please either specify the \ diff --git a/apps/src/lib/client/tendermint_rpc_types.rs b/apps/src/lib/client/tendermint_rpc_types.rs index 66fe1912df..537cca243f 100644 --- a/apps/src/lib/client/tendermint_rpc_types.rs +++ b/apps/src/lib/client/tendermint_rpc_types.rs @@ -72,10 +72,7 @@ impl TryFrom for TxResponse { .map(String::as_str) // TODO: fix finalize block, to return initialized accounts, // even when we reject a tx? - .or(Some("[]")) - // NOTE: at this point we only have `Some(vec)`, not `None` - .ok_or_else(|| unreachable!()) - .and_then(|initialized_accounts| { + .map_or(Ok(vec![]), |initialized_accounts| { serde_json::from_str(initialized_accounts) .map_err(|err| format!("JSON decode error: {err}")) })?; diff --git a/apps/src/lib/client/tx.rs b/apps/src/lib/client/tx.rs index 82b21c2b0f..35a27384b0 100644 --- a/apps/src/lib/client/tx.rs +++ b/apps/src/lib/client/tx.rs @@ -7,7 +7,6 @@ use std::fs::{File, OpenOptions}; use std::io::{Read, Write}; use std::ops::Deref; use std::path::PathBuf; -use std::time::Duration; use async_std::io::prelude::WriteExt; use async_std::io::{self}; @@ -39,9 +38,9 @@ use namada::ibc::Height as IbcHeight; use namada::ibc_proto::cosmos::base::v1beta1::Coin; use namada::ledger::governance::storage as gov_storage; use namada::ledger::masp; -use namada::ledger::pos::{BondId, Bonds, Unbonds}; +use namada::ledger::pos::{BondId, Bonds, CommissionRates, Unbonds}; use namada::proto::Tx; -use namada::types::address::{masp, masp_tx_key, nam, Address}; +use namada::types::address::{masp, masp_tx_key, Address}; use namada::types::governance::{ OfflineProposal, OfflineVote, Proposal, ProposalVote, }; @@ -58,11 +57,12 @@ use namada::types::transaction::governance::{ InitProposalData, VoteProposalData, }; use namada::types::transaction::{pos, InitAccount, InitValidator, UpdateVp}; -use namada::types::{address, token}; +use namada::types::{address, storage, token}; use namada::{ledger, vm}; use rand_core::{CryptoRng, OsRng, RngCore}; +use rust_decimal::Decimal; use sha2::Digest; -use tokio::time::Instant; +use tokio::time::{Duration, Instant}; use super::rpc; use super::types::ShieldedTransferContext; @@ -82,6 +82,7 @@ const TX_INIT_ACCOUNT_WASM: &str = "tx_init_account.wasm"; const TX_INIT_VALIDATOR_WASM: &str = "tx_init_validator.wasm"; const TX_INIT_PROPOSAL: &str = "tx_init_proposal.wasm"; const TX_VOTE_PROPOSAL: &str = "tx_vote_proposal.wasm"; +const TX_REVEAL_PK: &str = "tx_reveal_pk.wasm"; const TX_UPDATE_VP_WASM: &str = "tx_update_vp.wasm"; const TX_TRANSFER_WASM: &str = "tx_transfer.wasm"; const TX_IBC_WASM: &str = "tx_ibc.wasm"; @@ -89,6 +90,7 @@ const VP_USER_WASM: &str = "vp_user.wasm"; const TX_BOND_WASM: &str = "tx_bond.wasm"; const TX_UNBOND_WASM: &str = "tx_unbond.wasm"; const TX_WITHDRAW_WASM: &str = "tx_withdraw.wasm"; +const TX_CHANGE_COMMISSION_WASM: &str = "tx_change_validator_commission.wasm"; /// Timeout for requests to the `/accepted` and `/applied` /// ABCI query endpoints. @@ -202,10 +204,10 @@ pub async fn submit_init_validator( consensus_key, eth_cold_key, eth_hot_key, - rewards_account_key, protocol_key, + commission_rate, + max_commission_rate_change, validator_vp_code_path, - rewards_vp_code_path, unsafe_dont_encrypt, }: args::TxInitValidator, ) { @@ -217,7 +219,6 @@ pub async fn submit_init_validator( let validator_key_alias = format!("{}-key", alias); let consensus_key_alias = format!("{}-consensus-key", alias); - let rewards_key_alias = format!("{}-rewards-key", alias); let eth_hot_key_alias = format!("{}-eth-hot-key", alias); let eth_cold_key_alias = format!("{}-eth-cold-key", alias); let account_key = ctx.get_opt_cached(&account_key).unwrap_or_else(|| { @@ -294,19 +295,6 @@ pub async fn submit_init_validator( ) .1 }); - - let rewards_account_key = - ctx.get_opt_cached(&rewards_account_key).unwrap_or_else(|| { - println!("Generating staking reward account key..."); - ctx.wallet - .gen_key( - scheme, - Some(rewards_key_alias.clone()), - unsafe_dont_encrypt, - ) - .1 - .ref_to() - }); let protocol_key = ctx.get_opt_cached(&protocol_key); if protocol_key.is_none() { @@ -330,24 +318,32 @@ pub async fn submit_init_validator( let validator_vp_code = validator_vp_code_path .map(|path| ctx.read_wasm(path)) .unwrap_or_else(|| ctx.read_wasm(VP_USER_WASM)); - // Validate the validator VP code - if let Err(err) = vm::validate_untrusted_wasm(&validator_vp_code) { + + // Validate the commission rate data + if commission_rate > Decimal::ONE || commission_rate < Decimal::ZERO { eprintln!( - "Validator validity predicate code validation failed with {}", - err + "The validator commission rate must not exceed 1.0 or 100%, and \ + it must be 0 or positive" ); if !tx_args.force { safe_exit(1) } } - let rewards_vp_code = rewards_vp_code_path - .map(|path| ctx.read_wasm(path)) - .unwrap_or_else(|| ctx.read_wasm(VP_USER_WASM)); - // Validate the rewards VP code - if let Err(err) = vm::validate_untrusted_wasm(&rewards_vp_code) { + if max_commission_rate_change > Decimal::ONE + || max_commission_rate_change < Decimal::ZERO + { + eprintln!( + "The validator maximum change in commission rate per epoch must \ + not exceed 1.0 or 100%" + ); + if !tx_args.force { + safe_exit(1) + } + } + // Validate the validator VP code + if let Err(err) = vm::validate_untrusted_wasm(&validator_vp_code) { eprintln!( - "Staking reward account validity predicate code validation failed \ - with {}", + "Validator validity predicate code validation failed with {}", err ); if !tx_args.force { @@ -367,11 +363,11 @@ pub async fn submit_init_validator( ð_hot_key.ref_to(), ) .unwrap(), - rewards_account_key, protocol_key, dkg_key, + commission_rate, + max_commission_rate_change, validator_vp_code, - rewards_vp_code, }; let data = data.try_to_vec().expect("Encoding tx data shouldn't fail"); let tx = Tx::new(tx_code, Some(data)); @@ -379,21 +375,10 @@ pub async fn submit_init_validator( process_tx(ctx, &tx_args, tx, TxSigningKey::WalletAddress(source)) .await; if !tx_args.dry_run { - let (validator_address_alias, validator_address, rewards_address_alias) = + let (validator_address_alias, validator_address) = match &initialized_accounts[..] { - // There should be 2 accounts, one for the validator itself, one - // for its staking reward address. - [account_1, account_2] => { - // We need to find out which address is which - let (validator_address, rewards_address) = - if rpc::is_validator(account_1, tx_args.ledger_address) - .await - { - (account_1, account_2) - } else { - (account_2, account_1) - }; - + // There should be 1 account for the validator itself + [validator_address] => { let validator_address_alias = match tx_args .initialized_account_alias { @@ -428,23 +413,7 @@ pub async fn submit_init_validator( validator_address.encode() ); } - let rewards_address_alias = - format!("{}-rewards", validator_address_alias); - if let Some(new_alias) = ctx.wallet.add_address( - rewards_address_alias.clone(), - rewards_address.clone(), - ) { - println!( - "Added alias {} for address {}.", - new_alias, - rewards_address.encode() - ); - } - ( - validator_address_alias, - validator_address.clone(), - rewards_address_alias, - ) + (validator_address_alias, validator_address.clone()) } _ => { eprintln!("Expected two accounts to be created"); @@ -465,10 +434,8 @@ pub async fn submit_init_validator( "The validator's addresses and keys were stored in the wallet:" ); println!(" Validator address \"{}\"", validator_address_alias); - println!(" Staking reward address \"{}\"", rewards_address_alias); println!(" Validator account key \"{}\"", validator_key_alias); println!(" Consensus key \"{}\"", consensus_key_alias); - println!(" Staking reward key \"{}\"", rewards_key_alias); println!( "The ledger node has been setup to use this validator's address \ and consensus key." @@ -1362,7 +1329,7 @@ fn make_asset_type(epoch: Epoch, token: &Address) -> AssetType { AssetType::new(token_bytes.as_ref()).expect("unable to create asset type") } -/// Convert Anoma amount and token type to MASP equivalents +/// Convert Namada amount and token type to MASP equivalents fn convert_amount( epoch: Epoch, token: &Address, @@ -1650,7 +1617,11 @@ pub async fn submit_transfer(mut ctx: Context, args: args::TxTransfer) { let (default_signer, amount, token) = if source == masp_addr && target == masp_addr { // TODO Refactor me, we shouldn't rely on any specific token here. - (TxSigningKey::SecretKey(masp_tx_key()), 0.into(), nam()) + ( + TxSigningKey::SecretKey(masp_tx_key()), + 0.into(), + ctx.native_token.clone(), + ) } else if source == masp_addr { ( TxSigningKey::SecretKey(masp_tx_key()), @@ -1953,9 +1924,13 @@ pub async fn submit_init_proposal(mut ctx: Context, args: args::InitProposal) { safe_exit(1) }; - let balance = rpc::get_token_balance(&client, &nam(), &proposal.author) - .await - .unwrap_or_default(); + let balance = rpc::get_token_balance( + &client, + &ctx.native_token, + &proposal.author, + ) + .await + .unwrap_or_default(); if balance < token::Amount::from(governance_parameters.min_proposal_fund) { @@ -2071,12 +2046,9 @@ pub async fn submit_vote_proposal(mut ctx: Context, args: args::VoteProposal) { safe_exit(1) } } - let mut delegation_addresses = rpc::get_delegators_delegation( - &client, - &voter_address, - epoch, - ) - .await; + let mut delegations = + rpc::get_delegators_delegation(&client, &voter_address) + .await; // Optimize by quering if a vote from a validator // is equal to ours. If so, we can avoid voting, but ONLY if we @@ -2093,22 +2065,20 @@ pub async fn submit_vote_proposal(mut ctx: Context, args: args::VoteProposal) { ) .await { - delegation_addresses = filter_delegations( + delegations = filter_delegations( &client, - delegation_addresses, + delegations, proposal_id, &args.vote, ) .await; } - println!("{:?}", delegation_addresses); - let tx_data = VoteProposalData { id: proposal_id, vote: args.vote, voter: voter_address, - delegations: delegation_addresses, + delegations: delegations.into_iter().collect(), }; let data = tx_data @@ -2138,6 +2108,114 @@ pub async fn submit_vote_proposal(mut ctx: Context, args: args::VoteProposal) { } } +pub async fn submit_reveal_pk(mut ctx: Context, args: args::RevealPk) { + let args::RevealPk { + tx: args, + public_key, + } = args; + let public_key = ctx.get_cached(&public_key); + if !reveal_pk_if_needed(&mut ctx, &public_key, &args).await { + let addr: Address = (&public_key).into(); + println!("PK for {addr} is already revealed, nothing to do."); + } +} + +pub async fn reveal_pk_if_needed( + ctx: &mut Context, + public_key: &common::PublicKey, + args: &args::Tx, +) -> bool { + let addr: Address = public_key.into(); + // Check if PK revealed + if args.force || !has_revealed_pk(&addr, args.ledger_address.clone()).await + { + // If not, submit it + submit_reveal_pk_aux(ctx, public_key, args).await; + true + } else { + false + } +} + +pub async fn has_revealed_pk( + addr: &Address, + ledger_address: TendermintAddress, +) -> bool { + rpc::get_public_key(addr, ledger_address).await.is_some() +} + +pub async fn submit_reveal_pk_aux( + ctx: &mut Context, + public_key: &common::PublicKey, + args: &args::Tx, +) { + let addr: Address = public_key.into(); + println!("Submitting a tx to reveal the public key for address {addr}..."); + let tx_data = public_key + .try_to_vec() + .expect("Encoding a public key shouldn't fail"); + let tx_code = ctx.read_wasm(TX_REVEAL_PK); + let tx = Tx::new(tx_code, Some(tx_data)); + + // submit_tx without signing the inner tx + let keypair = if let Some(signing_key) = &args.signing_key { + ctx.get_cached(signing_key) + } else if let Some(signer) = args.signer.as_ref() { + let signer = ctx.get(signer); + find_keypair(&mut ctx.wallet, &signer, args.ledger_address.clone()) + .await + } else { + find_keypair(&mut ctx.wallet, &addr, args.ledger_address.clone()).await + }; + let epoch = rpc::query_epoch(args::Query { + ledger_address: args.ledger_address.clone(), + }) + .await; + let to_broadcast = if args.dry_run { + TxBroadcastData::DryRun(tx) + } else { + super::signing::sign_wrapper(ctx, args, epoch, tx, &keypair).await + }; + + if args.dry_run { + if let TxBroadcastData::DryRun(tx) = to_broadcast { + rpc::dry_run_tx(&args.ledger_address, tx.to_bytes()).await; + } else { + panic!( + "Expected a dry-run transaction, received a wrapper \ + transaction instead" + ); + } + } else { + // Either broadcast or submit transaction and collect result into + // sum type + let result = if args.broadcast_only { + Left(broadcast_tx(args.ledger_address.clone(), &to_broadcast).await) + } else { + Right(submit_tx(args.ledger_address.clone(), to_broadcast).await) + }; + // Return result based on executed operation, otherwise deal with + // the encountered errors uniformly + match result { + Right(Err(err)) => { + eprintln!( + "Encountered error while broadcasting transaction: {}", + err + ); + safe_exit(1) + } + Left(Err(err)) => { + eprintln!( + "Encountered error while broadcasting transaction: {}", + err + ); + safe_exit(1) + } + _ => {} + } + } +} + /// Check if current epoch is in the last third of the voting period of the /// proposal. This ensures that it is safe to optimize the vote writing to /// storage. @@ -2157,7 +2235,7 @@ async fn is_safe_voting_window( match proposal_end_epoch { Some(proposal_end_epoch) => { - !namada::ledger::governance::vp::is_valid_validator_voting_period( + !namada::ledger::native_vp::governance::utils::is_valid_validator_voting_period( current_epoch, proposal_start_epoch, proposal_end_epoch, @@ -2174,33 +2252,37 @@ async fn is_safe_voting_window( /// vote) async fn filter_delegations( client: &HttpClient, - mut delegation_addresses: Vec
, + delegations: HashSet
, proposal_id: u64, delegator_vote: &ProposalVote, -) -> Vec
{ - let mut remove_indexes: Vec = vec![]; - - for (index, validator_address) in delegation_addresses.iter().enumerate() { - let vote_key = gov_storage::get_vote_proposal_key( - proposal_id, - validator_address.to_owned(), - validator_address.to_owned(), - ); - - if let Some(validator_vote) = - rpc::query_storage_value::(client, &vote_key).await - { - if &validator_vote == delegator_vote { - remove_indexes.push(index); - } - } - } - - for index in remove_indexes { - delegation_addresses.swap_remove(index); - } +) -> HashSet
{ + // Filter delegations by their validator's vote concurrently + let delegations = futures::future::join_all( + delegations + .into_iter() + // we cannot use `filter/filter_map` directly because we want to + // return a future + .map(|validator_address| async { + let vote_key = gov_storage::get_vote_proposal_key( + proposal_id, + validator_address.to_owned(), + validator_address.to_owned(), + ); - delegation_addresses + if let Some(validator_vote) = + rpc::query_storage_value::(client, &vote_key) + .await + { + if &validator_vote == delegator_vote { + return None; + } + } + Some(validator_address) + }), + ) + .await; + // Take out the `None`s + delegations.into_iter().flatten().collect() } pub async fn submit_bond(ctx: Context, args: args::Bond) { @@ -2232,7 +2314,7 @@ pub async fn submit_bond(ctx: Context, args: args::Bond) { // Check bond's source (source for delegation or validator for self-bonds) // balance let bond_source = source.as_ref().unwrap_or(&validator); - let balance_key = token::balance_key(&address::nam(), bond_source); + let balance_key = token::balance_key(&ctx.native_token, bond_source); let client = HttpClient::new(args.tx.ledger_address.clone()).unwrap(); match rpc::query_storage_value::(&client, &balance_key).await { @@ -2421,6 +2503,88 @@ pub async fn submit_withdraw(ctx: Context, args: args::Withdraw) { .await; } +pub async fn submit_validator_commission_change( + ctx: Context, + args: args::TxCommissionRateChange, +) { + let epoch = rpc::query_epoch(args::Query { + ledger_address: args.tx.ledger_address.clone(), + }) + .await; + + let tx_code = ctx.read_wasm(TX_CHANGE_COMMISSION_WASM); + let client = HttpClient::new(args.tx.ledger_address.clone()).unwrap(); + + let validator = ctx.get(&args.validator); + if rpc::is_validator(&validator, args.tx.ledger_address.clone()).await { + if args.rate < Decimal::ZERO || args.rate > Decimal::ONE { + eprintln!("Invalid new commission rate, received {}", args.rate); + if !args.tx.force { + safe_exit(1) + } + } + + let commission_rate_key = + ledger::pos::validator_commission_rate_key(&validator); + let max_commission_rate_change_key = + ledger::pos::validator_max_commission_rate_change_key(&validator); + let commission_rates = rpc::query_storage_value::( + &client, + &commission_rate_key, + ) + .await; + let max_change = rpc::query_storage_value::( + &client, + &max_commission_rate_change_key, + ) + .await; + + match (commission_rates, max_change) { + (Some(rates), Some(max_change)) => { + // Assuming that pipeline length = 2 + let rate_next_epoch = rates.get(epoch.next()).unwrap(); + if (args.rate - rate_next_epoch).abs() > max_change { + eprintln!( + "New rate is too large of a change with respect to \ + the predecessor epoch in which the rate will take \ + effect." + ); + if !args.tx.force { + safe_exit(1) + } + } + } + _ => { + eprintln!("Error retrieving from storage"); + if !args.tx.force { + safe_exit(1) + } + } + } + } else { + eprintln!("The given address {validator} is not a validator."); + if !args.tx.force { + safe_exit(1) + } + } + + let data = pos::CommissionChange { + validator: ctx.get(&args.validator), + new_rate: args.rate, + }; + let data = data.try_to_vec().expect("Encoding tx data shouldn't fail"); + + let tx = Tx::new(tx_code, Some(data)); + let default_signer = args.validator; + process_tx( + ctx, + &args.tx, + tx, + TxSigningKey::WalletAddress(default_signer), + ) + .await; +} + /// Submit transaction and wait for result. Returns a list of addresses /// initialized in the transaction if any. In dry run, this is always empty. pub async fn process_tx( diff --git a/apps/src/lib/client/types.rs b/apps/src/lib/client/types.rs index 1f94838d25..5a26244474 100644 --- a/apps/src/lib/client/types.rs +++ b/apps/src/lib/client/types.rs @@ -8,11 +8,11 @@ use namada::types::masp::{TransferSource, TransferTarget}; use namada::types::storage::Epoch; use namada::types::transaction::GasLimit; use namada::types::{key, token}; -use tendermint_config::net::Address as TendermintAddress; use super::rpc; use crate::cli::{args, Context}; use crate::client::tx::Conversions; +use crate::facade::tendermint_config::net::Address as TendermintAddress; #[derive(Clone, Debug)] pub struct ParsedTxArgs { diff --git a/apps/src/lib/client/utils.rs b/apps/src/lib/client/utils.rs index 5be3bb73fc..99e7d4b7d4 100644 --- a/apps/src/lib/client/utils.rs +++ b/apps/src/lib/client/utils.rs @@ -16,6 +16,7 @@ use namada::types::key::*; use prost::bytes::Bytes; use rand::prelude::ThreadRng; use rand::thread_rng; +use rust_decimal::Decimal; use serde_json::json; use sha2::{Digest, Sha256}; @@ -34,15 +35,16 @@ use crate::wasm_loader; pub const NET_ACCOUNTS_DIR: &str = "setup"; pub const NET_OTHER_ACCOUNTS_DIR: &str = "other"; -/// Github URL prefix of released Anoma network configs -pub const ENV_VAR_NETWORK_CONFIGS_SERVER: &str = "ANOMA_NETWORK_CONFIGS_SERVER"; +/// Github URL prefix of released Namada network configs +pub const ENV_VAR_NETWORK_CONFIGS_SERVER: &str = + "NAMADA_NETWORK_CONFIGS_SERVER"; const DEFAULT_NETWORK_CONFIGS_SERVER: &str = "https://github.com/heliaxdev/anoma-network-config/releases/download"; /// We do pre-genesis validator set up in this directory pub const PRE_GENESIS_DIR: &str = "pre-genesis"; -/// Configure Anoma to join an existing network. The chain must be released in +/// Configure Namada to join an existing network. The chain must be released in /// the repository. pub async fn join_network( global_args: args::Global, @@ -158,7 +160,7 @@ pub async fn join_network( // Rename the base-dir from the default and rename wasm-dir, if non-default. if non_default_dir { - // For compatibility for networks released with Anoma <= v0.4: + // For compatibility for networks released with Namada <= v0.4: // The old releases include the WASM directory at root path of the // archive. This has been moved into the chain directory, so if the // WASM dir is found at the old path, we move it to the new path. @@ -400,8 +402,7 @@ pub fn init_network( archive_dir, }: args::InitNetwork, ) { - let mut config = - genesis_config::open_genesis_config(&genesis_path).unwrap(); + let mut config = genesis_config::open_genesis_config(genesis_path).unwrap(); // Update the WASM checksums let checksums = @@ -475,10 +476,7 @@ pub fn init_network( // Generate account and reward addresses let address = address::gen_established_address("validator account"); - let reward_address = - address::gen_established_address("validator reward account"); config.address = Some(address.to_string()); - config.staking_reward_address = Some(reward_address.to_string()); // Generate the consensus, account and reward keys, unless they're // pre-defined. @@ -518,24 +516,6 @@ pub fn init_network( keypair.ref_to() }); - let staking_reward_pk = try_parse_public_key( - format!("validator {name} staking reward key"), - &config.staking_reward_public_key, - ) - .unwrap_or_else(|| { - let alias = format!("{}-reward-key", name); - println!( - "Generating validator {} staking reward account key...", - name - ); - let (_alias, keypair) = wallet.gen_key( - SchemeType::Ed25519, - Some(alias), - unsafe_dont_encrypt, - ); - keypair.ref_to() - }); - let protocol_pk = try_parse_public_key( format!("validator {name} protocol key"), &config.protocol_public_key, @@ -614,8 +594,6 @@ pub fn init_network( Some(genesis_config::HexString(consensus_pk.to_string())); config.account_public_key = Some(genesis_config::HexString(account_pk.to_string())); - config.staking_reward_public_key = - Some(genesis_config::HexString(staking_reward_pk.to_string())); config.eth_cold_key = Some(genesis_config::HexString(eth_cold_pk.to_string())); config.eth_hot_key = @@ -628,7 +606,6 @@ pub fn init_network( // Write keypairs to wallet wallet.add_address(name.clone(), address); - wallet.add_address(format!("{}-reward", &name), reward_address); wallet.save().unwrap(); }); @@ -647,18 +624,16 @@ pub fn init_network( }) } - if let Some(token) = &mut config.token { - token.iter_mut().for_each(|(name, config)| { - if config.address.is_none() { - let address = address::gen_established_address("token"); - config.address = Some(address.to_string()); - wallet.add_address(name.clone(), address); - } - if config.vp.is_none() { - config.vp = Some("vp_token".to_string()); - } - }) - } + config.token.iter_mut().for_each(|(name, config)| { + if config.address.is_none() { + let address = address::gen_established_address("token"); + config.address = Some(address.to_string()); + wallet.add_address(name.clone(), address); + } + if config.vp.is_none() { + config.vp = Some("vp_token".to_string()); + } + }); if let Some(implicit) = &mut config.implicit { implicit.iter_mut().for_each(|(name, config)| { @@ -714,7 +689,7 @@ pub fn init_network( fs::rename(&temp_dir, &chain_dir).unwrap(); // Copy the WASM checksums - let wasm_dir_full = chain_dir.join(&config::DEFAULT_WASM_DIR); + let wasm_dir_full = chain_dir.join(config::DEFAULT_WASM_DIR); fs::create_dir_all(&wasm_dir_full).unwrap(); fs::copy( &wasm_checksums_path, @@ -731,14 +706,14 @@ pub fn init_network( .join(config::DEFAULT_BASE_DIR); let temp_validator_chain_dir = validator_dir.join(temp_chain_id.as_str()); - let validator_chain_dir = validator_dir.join(&chain_id.as_str()); + let validator_chain_dir = validator_dir.join(chain_id.as_str()); // Rename the generated directories for validators from `temp_chain_id` // to `chain_id` - std::fs::rename(&temp_validator_chain_dir, &validator_chain_dir) + std::fs::rename(temp_validator_chain_dir, &validator_chain_dir) .unwrap(); // Copy the WASM checksums - let wasm_dir_full = validator_chain_dir.join(&config::DEFAULT_WASM_DIR); + let wasm_dir_full = validator_chain_dir.join(config::DEFAULT_WASM_DIR); fs::create_dir_all(&wasm_dir_full).unwrap(); fs::copy( &wasm_checksums_path, @@ -931,18 +906,36 @@ fn init_established_account( } } -/// Initialize genesis validator's address, staking reward address, -/// consensus key, validator account key and staking rewards key and use -/// it in the ledger's node. +/// Initialize genesis validator's address, consensus key and validator account +/// key and use it in the ledger's node. pub fn init_genesis_validator( global_args: args::Global, args::InitGenesisValidator { alias, + commission_rate, + max_commission_rate_change, net_address, unsafe_dont_encrypt, key_scheme, }: args::InitGenesisValidator, ) { + // Validate the commission rate data + if commission_rate > Decimal::ONE || commission_rate < Decimal::ZERO { + eprintln!( + "The validator commission rate must not exceed 1.0 or 100%, and \ + it must be 0 or positive" + ); + cli::safe_exit(1) + } + if max_commission_rate_change > Decimal::ONE + || max_commission_rate_change < Decimal::ZERO + { + eprintln!( + "The validator maximum change in commission rate per epoch must \ + not exceed 1.0 or 100%" + ); + cli::safe_exit(1) + } let pre_genesis_dir = validator_pre_genesis_dir(&global_args.base_dir, &alias); println!("Generating validator keys..."); @@ -979,9 +972,6 @@ pub fn init_genesis_validator( account_public_key: Some(HexString( pre_genesis.account_key.ref_to().to_string(), )), - staking_reward_public_key: Some(HexString( - pre_genesis.rewards_key.ref_to().to_string(), - )), protocol_public_key: Some(HexString( pre_genesis .store @@ -1000,6 +990,8 @@ pub fn init_genesis_validator( .public() .to_string(), )), + commission_rate: Some(commission_rate), + max_commission_rate_change: Some(max_commission_rate_change), tendermint_node_key: Some(HexString( pre_genesis.tendermint_node_key.ref_to().to_string(), )), @@ -1091,7 +1083,7 @@ pub fn write_tendermint_node_key( .create(true) .write(true) .truncate(true) - .open(&node_key_path) + .open(node_key_path) .expect("Couldn't create validator node key file"); serde_json::to_writer_pretty(file, &tm_node_keypair_json) .expect("Couldn't write validator node key file"); diff --git a/apps/src/lib/config/genesis.rs b/apps/src/lib/config/genesis.rs index d27827a1d9..c62333f139 100644 --- a/apps/src/lib/config/genesis.rs +++ b/apps/src/lib/config/genesis.rs @@ -8,15 +8,16 @@ use borsh::{BorshDeserialize, BorshSerialize}; use derivative::Derivative; use namada::ledger::eth_bridge::parameters::EthereumBridgeConfig; use namada::ledger::governance::parameters::GovParams; -use namada::ledger::parameters::Parameters; +use namada::ledger::parameters::EpochDuration; use namada::ledger::pos::{GenesisValidator, PosParams}; use namada::types::address::Address; #[cfg(not(feature = "dev"))] use namada::types::chain::ChainId; use namada::types::key::dkg_session_keys::DkgPublicKey; use namada::types::key::*; -use namada::types::time::DateTimeUtc; +use namada::types::time::{DateTimeUtc, DurationSecs}; use namada::types::{storage, token}; +use rust_decimal::Decimal; /// Genesis configuration file format pub mod genesis_config { @@ -29,20 +30,20 @@ pub mod genesis_config { use data_encoding::HEXLOWER; use eyre::Context; use namada::ledger::governance::parameters::GovParams; - use namada::ledger::parameters::{EpochDuration, Parameters}; - use namada::ledger::pos::types::BasisPoints; + use namada::ledger::parameters::EpochDuration; use namada::ledger::pos::{GenesisValidator, PosParams}; use namada::types::address::Address; use namada::types::key::dkg_session_keys::DkgPublicKey; use namada::types::key::*; use namada::types::time::Rfc3339String; use namada::types::{storage, token}; + use rust_decimal::Decimal; use serde::{Deserialize, Serialize}; use thiserror::Error; use super::{ EstablishedAccount, EthereumBridgeConfig, Genesis, ImplicitAccount, - TokenAccount, Validator, + Parameters, TokenAccount, Validator, }; use crate::cli; @@ -106,10 +107,13 @@ pub mod genesis_config { pub struct GenesisConfig { // Genesis timestamp pub genesis_time: Rfc3339String, + // Name of the native token - this must one of the tokens included in + // the `token` field + pub native_token: String, // Initial validator set pub validator: HashMap, // Token accounts present at genesis - pub token: Option>, + pub token: HashMap, // Established accounts present at genesis pub established: Option>, // Implicit accounts present at genesis @@ -167,8 +171,6 @@ pub mod genesis_config { pub eth_hot_key: Option, // Public key for validator account. (default: generate) pub account_public_key: Option, - // Public key for staking reward account. (default: generate) - pub staking_reward_public_key: Option, // Public protocol signing key for validator account. (default: // generate) pub protocol_public_key: Option, @@ -176,18 +178,19 @@ pub mod genesis_config { pub dkg_public_key: Option, // Validator address (default: generate). pub address: Option, - // Staking reward account address (default: generate). - pub staking_reward_address: Option, // Total number of tokens held at genesis. // XXX: u64 doesn't work with toml-rs! pub tokens: Option, // Unstaked balance at genesis. // XXX: u64 doesn't work with toml-rs! pub non_staked_balance: Option, + /// Commission rate charged on rewards for delegators (bounded inside + /// 0-1) + pub commission_rate: Option, + /// Maximum change in commission rate permitted per epoch + pub max_commission_rate_change: Option, // Filename of validator VP. (default: default validator VP) pub validator_vp: Option, - // Filename of staking reward account VP. (default: user VP) - pub staking_reward_vp: Option, // IP:port of the validator. (used in generation only) pub net_address: Option, /// Tendermint node key is used to derive Tendermint node ID for node @@ -229,9 +232,6 @@ pub mod genesis_config { // Minimum number of blocks per epoch. // XXX: u64 doesn't work with toml-rs! pub min_num_of_blocks: u64, - // Minimum duration of an epoch (in seconds). - // TODO: this is i64 because datetime wants it - pub min_duration: i64, // Maximum duration per block (in seconds). // TODO: this is i64 because datetime wants it pub max_expected_time_per_block: i64, @@ -241,6 +241,14 @@ pub mod genesis_config { // Hashes of whitelisted txs array. `None` value or an empty array // disables whitelisting. pub tx_whitelist: Option>, + /// Filename of implicit accounts validity predicate WASM code + pub implicit_vp: String, + /// Expected number of epochs per year + pub epochs_per_year: u64, + /// PoS gain p + pub pos_gain_p: Decimal, + /// PoS gain d + pub pos_gain_d: Decimal, } #[derive(Clone, Debug, Deserialize, Serialize)] @@ -254,23 +262,28 @@ pub mod genesis_config { // Unbonding length (in epochs). // XXX: u64 doesn't work with toml-rs! pub unbonding_len: u64, - // Votes per token (in basis points). + // Votes per token. // XXX: u64 doesn't work with toml-rs! - pub votes_per_token: u64, + pub tm_votes_per_token: Decimal, // Reward for proposing a block. // XXX: u64 doesn't work with toml-rs! - pub block_proposer_reward: u64, + pub block_proposer_reward: Decimal, // Reward for voting on a block. // XXX: u64 doesn't work with toml-rs! - pub block_vote_reward: u64, + pub block_vote_reward: Decimal, + // Maximum staking APY + // XXX: u64 doesn't work with toml-rs! + pub max_inflation_rate: Decimal, + // Target ratio of staked NAM tokens to total NAM tokens + pub target_staked_ratio: Decimal, // Portion of a validator's stake that should be slashed on a - // duplicate vote (in basis points). + // duplicate vote. // XXX: u64 doesn't work with toml-rs! - pub duplicate_vote_slash_rate: u64, + pub duplicate_vote_min_slash_rate: Decimal, // Portion of a validator's stake that should be slashed on a - // light client attack (in basis points). + // light client attack. // XXX: u64 doesn't work with toml-rs! - pub light_client_attack_slash_rate: u64, + pub light_client_attack_min_slash_rate: Decimal, } #[derive(Clone, Debug, Deserialize, Serialize)] @@ -285,17 +298,11 @@ pub mod genesis_config { ) -> Validator { let validator_vp_name = config.validator_vp.as_ref().unwrap(); let validator_vp_config = wasm.get(validator_vp_name).unwrap(); - let reward_vp_name = config.staking_reward_vp.as_ref().unwrap(); - let reward_vp_config = wasm.get(reward_vp_name).unwrap(); Validator { pos_data: GenesisValidator { - address: Address::decode(&config.address.as_ref().unwrap()) + address: Address::decode(config.address.as_ref().unwrap()) .unwrap(), - staking_reward_address: Address::decode( - &config.staking_reward_address.as_ref().unwrap(), - ) - .unwrap(), tokens: token::Amount::whole(config.tokens.unwrap_or_default()), consensus_key: config .consensus_public_key @@ -303,12 +310,6 @@ pub mod genesis_config { .unwrap() .to_public_key() .unwrap(), - staking_reward_key: config - .staking_reward_public_key - .as_ref() - .unwrap() - .to_public_key() - .unwrap(), eth_cold_key: config .eth_cold_key .as_ref() @@ -321,6 +322,29 @@ pub mod genesis_config { .unwrap() .to_public_key() .unwrap(), + commission_rate: config + .commission_rate + .and_then(|rate| { + if rate >= Decimal::ZERO && rate <= Decimal::ONE { + Some(rate) + } else { + None + } + }) + .expect("Commission rate must be between 0.0 and 1.0"), + max_commission_rate_change: config + .max_commission_rate_change + .and_then(|rate| { + if rate >= Decimal::ZERO && rate <= Decimal::ONE { + Some(rate) + } else { + None + } + }) + .expect( + "Max commission rate change must be between 0.0 and \ + 1.0", + ), }, account_key: config .account_public_key @@ -350,16 +374,6 @@ pub mod genesis_config { .unwrap() .to_sha256_bytes() .unwrap(), - reward_vp_code_path: reward_vp_config.filename.to_owned(), - reward_vp_sha256: reward_vp_config - .sha256 - .clone() - .unwrap_or_else(|| { - eprintln!("Unknown validator VP WASM sha256"); - cli::safe_exit(1); - }) - .to_sha256_bytes() - .unwrap(), } } @@ -374,8 +388,7 @@ pub mod genesis_config { let token_vp_config = wasm.get(token_vp_name).unwrap(); TokenAccount { - address: Address::decode(&config.address.as_ref().unwrap()) - .unwrap(), + address: Address::decode(config.address.as_ref().unwrap()).unwrap(), vp_code_path: token_vp_config.filename.to_owned(), vp_sha256: token_vp_config .sha256 @@ -393,7 +406,7 @@ pub mod genesis_config { .iter() .map(|(alias_or_address, amount)| { ( - match Address::decode(&alias_or_address) { + match Address::decode(alias_or_address) { Ok(address) => address, Err(decode_err) => { if let Some(alias) = @@ -456,8 +469,7 @@ pub mod genesis_config { let account_vp_config = wasm.get(account_vp_name).unwrap(); EstablishedAccount { - address: Address::decode(&config.address.as_ref().unwrap()) - .unwrap(), + address: Address::decode(config.address.as_ref().unwrap()).unwrap(), vp_code_path: account_vp_config.filename.to_owned(), vp_sha256: account_vp_config .sha256 @@ -479,7 +491,7 @@ pub mod genesis_config { .iter() .map(|(address, hex)| { ( - storage::Key::parse(&address).unwrap(), + storage::Key::parse(address).unwrap(), hex.to_bytes().unwrap(), ) }) @@ -499,32 +511,54 @@ pub mod genesis_config { } pub fn load_genesis_config(config: GenesisConfig) -> Genesis { - let wasms = config.wasm; - let validators: HashMap = config - .validator - .iter() - .map(|(name, cfg)| (name.clone(), load_validator(cfg, &wasms))) - .collect(); - let established_accounts: HashMap = config - .established - .unwrap_or_default() + let GenesisConfig { + genesis_time, + native_token, + validator, + token, + established, + implicit, + parameters, + pos_params, + gov_params, + wasm, + } = config; + + let native_token = Address::decode( + token + .get(&native_token) + .expect( + "Native token's alias must be present in the declared \ + tokens", + ) + .address + .as_ref() + .expect("Missing native token address"), + ) + .expect("Invalid address"); + + let validators: HashMap = validator .iter() - .map(|(name, cfg)| (name.clone(), load_established(cfg, &wasms))) + .map(|(name, cfg)| (name.clone(), load_validator(cfg, &wasm))) .collect(); - let implicit_accounts: HashMap = config - .implicit + let established_accounts: HashMap = + established + .unwrap_or_default() + .iter() + .map(|(name, cfg)| (name.clone(), load_established(cfg, &wasm))) + .collect(); + let implicit_accounts: HashMap = implicit .unwrap_or_default() .iter() .map(|(name, cfg)| (name.clone(), load_implicit(cfg))) .collect(); - let token_accounts = config - .token - .unwrap_or_default() + #[allow(clippy::iter_kv_map)] + let token_accounts = token .iter() .map(|(_name, cfg)| { load_token( cfg, - &wasms, + &wasm, &validators, &established_accounts, &implicit_accounts, @@ -532,55 +566,89 @@ pub mod genesis_config { }) .collect(); + let implicit_vp_config = wasm.get(¶meters.implicit_vp).unwrap(); + let implicit_vp_code_path = implicit_vp_config.filename.to_owned(); + let implicit_vp_sha256 = implicit_vp_config + .sha256 + .clone() + .unwrap_or_else(|| { + eprintln!("Unknown implicit VP WASM sha256"); + cli::safe_exit(1); + }) + .to_sha256_bytes() + .unwrap(); + + let min_duration: i64 = + 60 * 60 * 24 * 365 / (parameters.epochs_per_year as i64); let parameters = Parameters { epoch_duration: EpochDuration { - min_num_of_blocks: config.parameters.min_num_of_blocks, + min_num_of_blocks: parameters.min_num_of_blocks, min_duration: namada::types::time::Duration::seconds( - config.parameters.min_duration, + min_duration, ) .into(), }, max_expected_time_per_block: namada::types::time::Duration::seconds( - config.parameters.max_expected_time_per_block, + parameters.max_expected_time_per_block, ) .into(), - vp_whitelist: config.parameters.vp_whitelist.unwrap_or_default(), - tx_whitelist: config.parameters.tx_whitelist.unwrap_or_default(), + vp_whitelist: parameters.vp_whitelist.unwrap_or_default(), + tx_whitelist: parameters.tx_whitelist.unwrap_or_default(), + implicit_vp_code_path, + implicit_vp_sha256, + epochs_per_year: parameters.epochs_per_year, + pos_gain_p: parameters.pos_gain_p, + pos_gain_d: parameters.pos_gain_d, + staked_ratio: Decimal::ZERO, + pos_inflation_amount: 0, }; + let GovernanceParamsConfig { + min_proposal_fund, + max_proposal_code_size, + min_proposal_period, + max_proposal_content_size, + min_proposal_grace_epochs, + max_proposal_period, + } = gov_params; let gov_params = GovParams { - min_proposal_fund: config.gov_params.min_proposal_fund, - max_proposal_code_size: config.gov_params.max_proposal_code_size, - min_proposal_period: config.gov_params.min_proposal_period, - max_proposal_period: config.gov_params.max_proposal_period, - max_proposal_content_size: config - .gov_params - .max_proposal_content_size, - min_proposal_grace_epochs: config - .gov_params - .min_proposal_grace_epochs, + min_proposal_fund, + max_proposal_code_size, + min_proposal_period, + max_proposal_content_size, + min_proposal_grace_epochs, + max_proposal_period, }; + let PosParamsConfig { + max_validator_slots, + pipeline_len, + unbonding_len, + tm_votes_per_token, + block_proposer_reward, + block_vote_reward, + max_inflation_rate, + target_staked_ratio, + duplicate_vote_min_slash_rate, + light_client_attack_min_slash_rate, + } = pos_params; let pos_params = PosParams { - max_validator_slots: config.pos_params.max_validator_slots, - pipeline_len: config.pos_params.pipeline_len, - unbonding_len: config.pos_params.unbonding_len, - votes_per_token: BasisPoints::new( - config.pos_params.votes_per_token, - ), - block_proposer_reward: config.pos_params.block_proposer_reward, - block_vote_reward: config.pos_params.block_vote_reward, - duplicate_vote_slash_rate: BasisPoints::new( - config.pos_params.duplicate_vote_slash_rate, - ), - light_client_attack_slash_rate: BasisPoints::new( - config.pos_params.light_client_attack_slash_rate, - ), + max_validator_slots, + pipeline_len, + unbonding_len, + tm_votes_per_token, + block_proposer_reward, + block_vote_reward, + max_inflation_rate, + target_staked_ratio, + duplicate_vote_min_slash_rate, + light_client_attack_min_slash_rate, }; let mut genesis = Genesis { - genesis_time: config.genesis_time.try_into().unwrap(), + genesis_time: genesis_time.try_into().unwrap(), + native_token, validators: validators.into_values().collect(), token_accounts, established_accounts: established_accounts.into_values().collect(), @@ -629,6 +697,7 @@ pub mod genesis_config { #[borsh_init(init)] pub struct Genesis { pub genesis_time: DateTimeUtc, + pub native_token: Address, pub validators: Vec, pub token_accounts: Vec, pub established_accounts: Vec, @@ -674,17 +743,13 @@ pub struct Validator { pub protocol_key: common::PublicKey, /// The public DKG session key used during the DKG protocol pub dkg_public_key: DkgPublicKey, - /// These tokens are no staked and hence do not contribute to the + /// These tokens are not staked and hence do not contribute to the /// validator's voting power pub non_staked_balance: token::Amount, /// Validity predicate code WASM pub validator_vp_code_path: String, /// Expected SHA-256 hash of the validator VP pub validator_vp_sha256: [u8; 32], - /// Staking reward account code WASM - pub reward_vp_code_path: String, - /// Expected SHA-256 hash of the staking reward VP - pub reward_vp_sha256: [u8; 32], } #[derive( @@ -737,6 +802,46 @@ pub struct ImplicitAccount { pub public_key: common::PublicKey, } +/// Protocol parameters. This is almost the same as +/// `ledger::parameters::Parameters`, but instead of having the `implicit_vp` +/// WASM code bytes, it only has the name and sha as the actual code is loaded +/// on `init_chain` +#[derive( + Clone, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + BorshSerialize, + BorshDeserialize, +)] +pub struct Parameters { + /// Epoch duration + pub epoch_duration: EpochDuration, + /// Maximum expected time per block + pub max_expected_time_per_block: DurationSecs, + /// Whitelisted validity predicate hashes + pub vp_whitelist: Vec, + /// Whitelisted tx hashes + pub tx_whitelist: Vec, + /// Implicit accounts validity predicate code WASM + pub implicit_vp_code_path: String, + /// Expected SHA-256 hash of the implicit VP + pub implicit_vp_sha256: [u8; 32], + /// Expected number of epochs per year (read only) + pub epochs_per_year: u64, + /// PoS gain p (read only) + pub pos_gain_p: Decimal, + /// PoS gain d (read only) + pub pos_gain_d: Decimal, + /// PoS staked ratio (read + write for every epoch) + pub staked_ratio: Decimal, + /// PoS inflation amount from the last epoch (read + write for every epoch) + pub pos_inflation_amount: u64, +} + #[cfg(not(feature = "dev"))] pub fn genesis(base_dir: impl AsRef, chain_id: &ChainId) -> Genesis { let path = base_dir @@ -746,58 +851,48 @@ pub fn genesis(base_dir: impl AsRef, chain_id: &ChainId) -> Genesis { } #[cfg(feature = "dev")] pub fn genesis() -> Genesis { - use namada::ledger::parameters::EpochDuration; use namada::types::address; + use rust_decimal_macros::dec; use crate::wallet; + let vp_implicit_path = "vp_implicit.wasm"; let vp_token_path = "vp_token.wasm"; let vp_user_path = "vp_user.wasm"; // NOTE When the validator's key changes, tendermint must be reset with - // `anoma reset` command. To generate a new validator, use the + // `namada reset` command. To generate a new validator, use the // `tests::gen_genesis_validator` below. let consensus_keypair = wallet::defaults::validator_keypair(); let account_keypair = wallet::defaults::validator_keypair(); - let ed_staking_reward_keypair = ed25519::SecretKey::try_from_slice(&[ - 61, 198, 87, 204, 44, 94, 234, 228, 217, 72, 245, 27, 40, 2, 151, 174, - 24, 247, 69, 6, 9, 30, 44, 16, 88, 238, 77, 162, 243, 125, 240, 206, - ]) - .unwrap(); - let secp_eth_cold_keypair = secp256k1::SecretKey::try_from_slice(&[ 90, 83, 107, 155, 193, 251, 120, 27, 76, 1, 188, 8, 116, 121, 90, 99, 65, 17, 187, 6, 238, 141, 63, 188, 76, 38, 102, 7, 47, 185, 28, 52, ]) .unwrap(); - let staking_reward_keypair = - common::SecretKey::try_from_sk(&ed_staking_reward_keypair).unwrap(); let eth_cold_keypair = common::SecretKey::try_from_sk(&secp_eth_cold_keypair).unwrap(); let address = wallet::defaults::validator_address(); - let staking_reward_address = Address::decode("atest1v4ehgw36xcersvee8qerxd35x9prsw2xg5erxv6pxfpygd2x89z5xsf5xvmnysejgv6rwd2rnj2avt").unwrap(); let (protocol_keypair, eth_bridge_keypair, dkg_keypair) = wallet::defaults::validator_keys(); let validator = Validator { pos_data: GenesisValidator { address, - staking_reward_address, tokens: token::Amount::whole(200_000), consensus_key: consensus_keypair.ref_to(), - staking_reward_key: staking_reward_keypair.ref_to(), eth_cold_key: eth_cold_keypair.ref_to(), eth_hot_key: eth_bridge_keypair.ref_to(), + commission_rate: dec!(0.05), + max_commission_rate_change: dec!(0.01), }, account_key: account_keypair.ref_to(), protocol_key: protocol_keypair.ref_to(), dkg_public_key: dkg_keypair.public(), non_staked_balance: token::Amount::whole(100_000), - // TODO replace with https://github.com/anoma/anoma/issues/25) + // TODO replace with https://github.com/anoma/namada/issues/25) validator_vp_code_path: vp_user_path.into(), validator_vp_sha256: Default::default(), - reward_vp_code_path: vp_user_path.into(), - reward_vp_sha256: Default::default(), }; let parameters = Parameters { epoch_duration: EpochDuration { @@ -807,6 +902,14 @@ pub fn genesis() -> Genesis { max_expected_time_per_block: namada::types::time::DurationSecs(30), vp_whitelist: vec![], tx_whitelist: vec![], + implicit_vp_code_path: vp_implicit_path.into(), + implicit_vp_sha256: Default::default(), + epochs_per_year: 525_600, /* seconds in yr (60*60*24*365) div seconds + * per epoch (60 = min_duration) */ + pos_gain_p: dec!(0.1), + pos_gain_d: dec!(0.1), + staked_ratio: dec!(0.0), + pos_inflation_amount: 0, }; let albert = EstablishedAccount { address: wallet::defaults::albert_address(), @@ -864,8 +967,8 @@ pub fn genesis() -> Genesis { ((&validator.account_key).into(), default_key_tokens), ]); let token_accounts = address::tokens() - .into_iter() - .map(|(address, _)| TokenAccount { + .into_keys() + .map(|address| TokenAccount { address, vp_code_path: vp_token_path.into(), vp_sha256: Default::default(), @@ -882,6 +985,7 @@ pub fn genesis() -> Genesis { pos_params: PosParams::default(), gov_params: GovParams::default(), ethereum_bridge_params: None, + native_token: address::nam(), } } @@ -896,18 +1000,14 @@ pub mod tests { use crate::wallet; /// Run `cargo test gen_genesis_validator -- --nocapture` to generate a - /// new genesis validator address, staking reward address and keypair. + /// new genesis validator address and keypair. #[test] fn gen_genesis_validator() { let address = gen_established_address(); - let staking_reward_address = gen_established_address(); let mut rng: ThreadRng = thread_rng(); let keypair: common::SecretKey = ed25519::SigScheme::generate(&mut rng).try_to_sk().unwrap(); let kp_arr = keypair.try_to_vec().unwrap(); - let staking_reward_keypair: common::SecretKey = - ed25519::SigScheme::generate(&mut rng).try_to_sk().unwrap(); - let srkp_arr = staking_reward_keypair.try_to_vec().unwrap(); let (protocol_keypair, _eth_hot_bridge_keypair, dkg_keypair) = wallet::defaults::validator_keys(); @@ -922,9 +1022,7 @@ pub mod tests { .unwrap(); println!("address: {}", address); - println!("staking_reward_address: {}", staking_reward_address); println!("keypair: {:?}", kp_arr); - println!("staking_reward_keypair: {:?}", srkp_arr); println!("protocol_keypair: {:?}", protocol_keypair); println!("dkg_keypair: {:?}", dkg_keypair.try_to_vec().unwrap()); println!( diff --git a/apps/src/lib/config/mod.rs b/apps/src/lib/config/mod.rs index b435433c76..e89aced251 100644 --- a/apps/src/lib/config/mod.rs +++ b/apps/src/lib/config/mod.rs @@ -21,17 +21,17 @@ use crate::facade::tendermint::Timeout; use crate::facade::tendermint_config::net::Address as TendermintAddress; /// Base directory contains global config and chain directories. -pub const DEFAULT_BASE_DIR: &str = ".anoma"; +pub const DEFAULT_BASE_DIR: &str = ".namada"; /// Default WASM dir. pub const DEFAULT_WASM_DIR: &str = "wasm"; /// The WASM checksums file contains the hashes of built WASMs. It is inside the /// WASM dir. pub const DEFAULT_WASM_CHECKSUMS_FILE: &str = "checksums.json"; -/// Chain-specific Anoma configuration. Nested in chain dirs. +/// Chain-specific Namada configuration. Nested in chain dirs. pub const FILENAME: &str = "config.toml"; /// Chain-specific Tendermint configuration. Nested in chain dirs. pub const TENDERMINT_DIR: &str = "tendermint"; -/// Chain-specific Anoma DB. Nested in chain dirs. +/// Chain-specific Namada DB. Nested in chain dirs. pub const DB_DIR: &str = "db"; #[derive(Clone, Debug, Serialize, Deserialize)] @@ -165,7 +165,7 @@ impl Ledger { IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 26661, ), - instrumentation_namespace: "anoman_tm".to_string(), + instrumentation_namespace: "namadan_tm".to_string(), }, ethereum_bridge: ethereum_bridge::ledger::Config::default(), } @@ -297,7 +297,7 @@ impl Config { .and_then(|c| c.merge(config::File::with_name(file_name))) .and_then(|c| { c.merge( - config::Environment::with_prefix("anoma").separator("__"), + config::Environment::with_prefix("namada").separator("__"), ) }) .map_err(Error::ReadError)?; diff --git a/apps/src/lib/logging.rs b/apps/src/lib/logging.rs index a71559a7f9..b60bab1f69 100644 --- a/apps/src/lib/logging.rs +++ b/apps/src/lib/logging.rs @@ -7,10 +7,10 @@ use tracing_log::LogTracer; use tracing_subscriber::filter::{Directive, EnvFilter}; use tracing_subscriber::fmt::Subscriber; -pub const ENV_KEY: &str = "ANOMA_LOG"; +pub const ENV_KEY: &str = "NAMADA_LOG"; // Env var to enable/disable color log -const COLOR_ENV_KEY: &str = "ANOMA_LOG_COLOR"; +const COLOR_ENV_KEY: &str = "NAMADA_LOG_COLOR"; pub fn init_from_env_or(default: impl Into) -> Result<()> { let filter = filter_from_env_or(default); diff --git a/apps/src/lib/mod.rs b/apps/src/lib/mod.rs index d8ab71236c..65d0472e9e 100644 --- a/apps/src/lib/mod.rs +++ b/apps/src/lib/mod.rs @@ -1,7 +1,7 @@ //! Shared code for the node, client etc. -#![doc(html_favicon_url = "https://dev.anoma.net/master/favicon.png")] -#![doc(html_logo_url = "https://dev.anoma.net/master/rustdoc-logo.png")] +#![doc(html_favicon_url = "https://dev.namada.net/master/favicon.png")] +#![doc(html_logo_url = "https://dev.namada.net/master/rustdoc-logo.png")] #![deny(rustdoc::broken_intra_doc_links)] #![deny(rustdoc::private_intra_doc_links)] diff --git a/apps/src/lib/node/ledger/mod.rs b/apps/src/lib/node/ledger/mod.rs index 99769d9254..f6eb4c5f5b 100644 --- a/apps/src/lib/node/ledger/mod.rs +++ b/apps/src/lib/node/ledger/mod.rs @@ -1,7 +1,6 @@ mod abortable; mod broadcaster; mod ethereum_node; -pub mod rpc; mod shell; mod shims; pub mod storage; @@ -32,6 +31,7 @@ use crate::config::{ethereum_bridge, TendermintMode}; use crate::facade::tendermint_proto::abci::CheckTxType; use crate::facade::tower_abci::{response, split, Server}; use crate::node::ledger::broadcaster::Broadcaster; +use crate::node::ledger::config::genesis; use crate::node::ledger::ethereum_node::oracle; use crate::node::ledger::shell::{Error, MempoolTxType, Shell}; use crate::node::ledger::shims::abcipp_shim::AbcippShim; @@ -39,10 +39,10 @@ use crate::node::ledger::shims::abcipp_shim_types::shim::{Request, Response}; use crate::{config, wasm_loader}; /// Env. var to set a number of Tokio RT worker threads -const ENV_VAR_TOKIO_THREADS: &str = "ANOMA_TOKIO_THREADS"; +const ENV_VAR_TOKIO_THREADS: &str = "NAMADA_TOKIO_THREADS"; /// Env. var to set a number of Rayon global worker threads -const ENV_VAR_RAYON_THREADS: &str = "ANOMA_RAYON_THREADS"; +const ENV_VAR_RAYON_THREADS: &str = "NAMADA_RAYON_THREADS"; /// The maximum number of Ethereum events the channel between /// the oracle and the shell can hold. @@ -146,7 +146,7 @@ impl Shell { CheckTxType::New => MempoolTxType::NewTransaction, CheckTxType::Recheck => MempoolTxType::RecheckTransaction, }; - Ok(Response::CheckTx(self.mempool_validate(&*tx.tx, r#type))) + Ok(Response::CheckTx(self.mempool_validate(&tx.tx, r#type))) } Request::ListSnapshots(_) => { Ok(Response::ListSnapshots(Default::default())) @@ -287,7 +287,7 @@ async fn run_aux(config: config::Ledger, wasm_dir: PathBuf) { } } - tracing::info!("Anoma ledger node has shut down."); + tracing::info!("Namada ledger node has shut down."); let res = task::block_in_place(move || shell_handler.join()); @@ -456,6 +456,10 @@ fn start_abci_broadcaster_shell( // Construct our ABCI application. let tendermint_mode = config.tendermint.tendermint_mode.clone(); let ledger_address = config.shell.ledger_address; + #[cfg(not(feature = "dev"))] + let genesis = genesis::genesis(&config.shell.base_dir, &config.chain_id); + #[cfg(feature = "dev")] + let genesis = genesis::genesis(); let (shell, abci_service) = AbcippShim::new( config, wasm_dir, @@ -464,6 +468,7 @@ fn start_abci_broadcaster_shell( &db_cache, vp_wasm_compilation_cache, tx_wasm_compilation_cache, + genesis.native_token, ); // Channel for signalling shut down to ABCI server @@ -486,7 +491,7 @@ fn start_abci_broadcaster_shell( let thread_builder = thread::Builder::new().name("ledger-shell".into()); let shell_handler = thread_builder .spawn(move || { - tracing::info!("Anoma ledger node started."); + tracing::info!("Namada ledger node started."); match tendermint_mode { TendermintMode::Validator => { tracing::info!("This node is a validator"); diff --git a/apps/src/lib/node/ledger/rpc.rs b/apps/src/lib/node/ledger/rpc.rs deleted file mode 100644 index b7a1ebcfad..0000000000 --- a/apps/src/lib/node/ledger/rpc.rs +++ /dev/null @@ -1,123 +0,0 @@ -//! RPC endpoint is used for ledger state queries - -use std::fmt::Display; -use std::str::FromStr; - -use masp_primitives::asset_type::AssetType; -use namada::types::address::Address; -use namada::types::storage; -use namada::types::token::CONVERSION_KEY_PREFIX; -use thiserror::Error; - -use crate::facade::tendermint::abci::Path as AbciPath; - -/// RPC query path -#[derive(Debug, Clone)] -pub enum Path { - /// Dry run a transaction - DryRunTx, - /// Epoch of the last committed block - Epoch, - /// Results of all committed blocks - Results, - /// Read a storage value with exact storage key - Value(storage::Key), - /// Read a range of storage values with a matching key prefix - Prefix(storage::Key), - /// Check if the given storage key exists - HasKey(storage::Key), - /// Conversion associated with given asset type - Conversion(AssetType), -} - -#[derive(Debug, Clone)] -pub struct BalanceQuery { - #[allow(dead_code)] - owner: Option
, - #[allow(dead_code)] - token: Option
, -} - -const DRY_RUN_TX_PATH: &str = "dry_run_tx"; -const EPOCH_PATH: &str = "epoch"; -const RESULTS_PATH: &str = "results"; -const VALUE_PREFIX: &str = "value"; -const PREFIX_PREFIX: &str = "prefix"; -const HAS_KEY_PREFIX: &str = "has_key"; - -impl Display for Path { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Path::DryRunTx => write!(f, "{}", DRY_RUN_TX_PATH), - Path::Epoch => write!(f, "{}", EPOCH_PATH), - Path::Results => write!(f, "{}", RESULTS_PATH), - Path::Value(storage_key) => { - write!(f, "{}/{}", VALUE_PREFIX, storage_key) - } - Path::Prefix(storage_key) => { - write!(f, "{}/{}", PREFIX_PREFIX, storage_key) - } - Path::HasKey(storage_key) => { - write!(f, "{}/{}", HAS_KEY_PREFIX, storage_key) - } - Path::Conversion(asset_type) => { - write!(f, "{}/{}", CONVERSION_KEY_PREFIX, asset_type) - } - } - } -} - -impl FromStr for Path { - type Err = PathParseError; - - fn from_str(s: &str) -> Result { - match s { - DRY_RUN_TX_PATH => Ok(Self::DryRunTx), - EPOCH_PATH => Ok(Self::Epoch), - RESULTS_PATH => Ok(Self::Results), - _ => match s.split_once('/') { - Some((VALUE_PREFIX, storage_key)) => { - let key = storage::Key::parse(storage_key) - .map_err(PathParseError::InvalidStorageKey)?; - Ok(Self::Value(key)) - } - Some((PREFIX_PREFIX, storage_key)) => { - let key = storage::Key::parse(storage_key) - .map_err(PathParseError::InvalidStorageKey)?; - Ok(Self::Prefix(key)) - } - Some((HAS_KEY_PREFIX, storage_key)) => { - let key = storage::Key::parse(storage_key) - .map_err(PathParseError::InvalidStorageKey)?; - Ok(Self::HasKey(key)) - } - Some((CONVERSION_KEY_PREFIX, asset_type)) => { - let key = AssetType::from_str(asset_type) - .map_err(PathParseError::InvalidAssetType)?; - Ok(Self::Conversion(key)) - } - _ => Err(PathParseError::InvalidPath(s.to_string())), - }, - } - } -} - -impl From for AbciPath { - fn from(path: Path) -> Self { - let path = path.to_string(); - // TODO: update in tendermint-rs to allow to construct this from owned - // string. It's what `from_str` does anyway - AbciPath::from_str(&path).unwrap() - } -} - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum PathParseError { - #[error("Unrecognized query path: {0}")] - InvalidPath(String), - #[error("Invalid storage key: {0}")] - InvalidStorageKey(storage::Error), - #[error("Unrecognized asset type: {0}")] - InvalidAssetType(std::io::Error), -} diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index e26506566c..df76865d88 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -1,5 +1,6 @@ //! Implementation of the `FinalizeBlock` ABCI++ method for the Shell +use namada::ledger::pos::types::into_tm_voting_power; use namada::ledger::protocol; use namada::types::storage::{BlockHash, BlockResults, Header}; use namada::types::transaction::protocol::ProtocolTxType; @@ -318,21 +319,16 @@ where .begin_block(hash, height) .expect("Beginning a block shouldn't fail"); + let header_time = header.time; self.storage .set_header(header) .expect("Setting a header shouldn't fail"); self.byzantine_validators = byzantine_validators; - let header = self - .storage - .header - .as_ref() - .expect("Header must have been set in prepare_proposal."); - let time = header.time; let new_epoch = self .storage - .update_epoch(height, time) + .update_epoch(height, header_time) .expect("Must be able to update epoch"); self.slash(); @@ -344,18 +340,19 @@ where fn update_epoch(&self, response: &mut shim::response::FinalizeBlock) { // Apply validator set update let (current_epoch, _gas) = self.storage.get_current_epoch(); + let pos_params = self.storage.read_pos_params(); // TODO ABCI validator updates on block H affects the validator set // on block H+2, do we need to update a block earlier? self.storage.validator_set_update(current_epoch, |update| { let (consensus_key, power) = match update { ValidatorSetUpdate::Active(ActiveValidator { consensus_key, - voting_power, + bonded_stake, }) => { - let power: u64 = voting_power.into(); - let power: i64 = power - .try_into() - .expect("unexpected validator's voting power"); + let power: i64 = into_tm_voting_power( + pos_params.tm_votes_per_token, + bonded_stake, + ); (consensus_key, power) } ValidatorSetUpdate::Deactivated(consensus_key) => { @@ -412,7 +409,7 @@ mod test_finalize_block { let wrapper = WrapperTx::new( Fee { amount: i.into(), - token: nam(), + token: shell.storage.native_token.clone(), }, &keypair, Epoch(0), @@ -483,7 +480,7 @@ mod test_finalize_block { let wrapper = WrapperTx::new( Fee { amount: 0.into(), - token: nam(), + token: shell.storage.native_token.clone(), }, &keypair, Epoch(0), @@ -535,7 +532,7 @@ mod test_finalize_block { let wrapper = WrapperTx { fee: Fee { amount: 0.into(), - token: nam(), + token: shell.storage.native_token.clone(), }, pk: keypair.ref_to(), epoch: Epoch(0), @@ -601,7 +598,7 @@ mod test_finalize_block { let wrapper_tx = WrapperTx::new( Fee { amount: 0.into(), - token: nam(), + token: shell.storage.native_token.clone(), }, &keypair, Epoch(0), @@ -632,7 +629,7 @@ mod test_finalize_block { let wrapper_tx = WrapperTx::new( Fee { amount: 0.into(), - token: nam(), + token: shell.storage.native_token.clone(), }, &keypair, Epoch(0), diff --git a/apps/src/lib/node/ledger/shell/governance.rs b/apps/src/lib/node/ledger/shell/governance.rs index f6a064c9c4..f4771f40fe 100644 --- a/apps/src/lib/node/ledger/shell/governance.rs +++ b/apps/src/lib/node/ledger/shell/governance.rs @@ -1,15 +1,15 @@ +use namada::core::ledger::slash_fund::ADDRESS as slash_fund_address; use namada::ledger::events::EventType; -use namada::ledger::governance::storage as gov_storage; -use namada::ledger::governance::utils::{ +use namada::ledger::governance::{ + storage as gov_storage, ADDRESS as gov_address, +}; +use namada::ledger::native_vp::governance::utils::{ compute_tally, get_proposal_votes, ProposalEvent, }; -use namada::ledger::governance::vp::ADDRESS as gov_address; use namada::ledger::protocol; -use namada::ledger::slash_fund::ADDRESS as slash_fund_address; -use namada::ledger::storage::traits::StorageHasher; use namada::ledger::storage::types::encode; -use namada::ledger::storage::{DBIter, DB}; -use namada::types::address::{nam, Address}; +use namada::ledger::storage::{DBIter, StorageHasher, DB}; +use namada::types::address::Address; use namada::types::governance::TallyResult; use namada::types::storage::Epoch; use namada::types::token; @@ -51,11 +51,12 @@ where })?; let votes = get_proposal_votes(&shell.storage, proposal_end_epoch, id); - let tally_result = - compute_tally(&shell.storage, proposal_end_epoch, votes); + let is_accepted = votes.and_then(|votes| { + compute_tally(&shell.storage, proposal_end_epoch, votes) + }); - let transfer_address = match tally_result { - TallyResult::Passed => { + let transfer_address = match is_accepted { + Ok(true) => { let proposal_author_key = gov_storage::get_author_key(id); let proposal_author = shell .read_storage_key::
(&proposal_author_key) @@ -163,7 +164,7 @@ where } } } - TallyResult::Rejected | TallyResult::Unknown => { + Ok(false) => { let proposal_event: Event = ProposalEvent::new( EventType::Proposal.to_string(), TallyResult::Rejected, @@ -175,14 +176,35 @@ where response.events.push(proposal_event); proposals_result.rejected.push(id); + slash_fund_address + } + Err(err) => { + tracing::error!( + "Unexpectedly failed to tally proposal ID {id} with error \ + {err}" + ); + let proposal_event: Event = ProposalEvent::new( + EventType::Proposal.to_string(), + TallyResult::Failed, + id, + false, + false, + ) + .into(); + response.events.push(proposal_event); + slash_fund_address } }; + let native_token = shell.storage.native_token.clone(); // transfer proposal locked funds - shell - .storage - .transfer(&nam(), funds, &gov_address, &transfer_address); + shell.storage.transfer( + &native_token, + funds, + &gov_address, + &transfer_address, + ); } Ok(proposals_result) diff --git a/apps/src/lib/node/ledger/shell/init_chain.rs b/apps/src/lib/node/ledger/shell/init_chain.rs index 6bc119a6e0..1819d48525 100644 --- a/apps/src/lib/node/ledger/shell/init_chain.rs +++ b/apps/src/lib/node/ledger/shell/init_chain.rs @@ -2,7 +2,8 @@ use std::collections::HashMap; use std::hash::Hash; -use namada::ledger::pos::PosParams; +use namada::ledger::parameters::Parameters; +use namada::ledger::pos::{into_tm_voting_power, PosParams}; use namada::ledger::storage::traits::StorageHasher; use namada::ledger::storage::{DBIter, DB}; use namada::ledger::{ibc, pos}; @@ -69,7 +70,55 @@ where .expect("genesis time should be a valid timestamp") .into(); - genesis.parameters.init_storage(&mut self.storage); + // Initialize protocol parameters + let genesis::Parameters { + epoch_duration, + max_expected_time_per_block, + vp_whitelist, + tx_whitelist, + implicit_vp_code_path, + implicit_vp_sha256, + epochs_per_year, + pos_gain_p, + pos_gain_d, + staked_ratio, + pos_inflation_amount, + } = genesis.parameters; + // borrow necessary for release build, annoys clippy on dev build + #[allow(clippy::needless_borrow)] + let implicit_vp = + wasm_loader::read_wasm(&self.wasm_dir, &implicit_vp_code_path) + .map_err(Error::ReadingWasm)?; + // In dev, we don't check the hash + #[cfg(feature = "dev")] + let _ = implicit_vp_sha256; + #[cfg(not(feature = "dev"))] + { + let mut hasher = Sha256::new(); + hasher.update(&implicit_vp); + let vp_code_hash = hasher.finalize(); + assert_eq!( + vp_code_hash.as_slice(), + &implicit_vp_sha256, + "Invalid implicit account's VP sha256 hash for {}", + implicit_vp_code_path + ); + } + let parameters = Parameters { + epoch_duration, + max_expected_time_per_block, + vp_whitelist, + tx_whitelist, + implicit_vp, + epochs_per_year, + pos_gain_p, + pos_gain_d, + staked_ratio, + pos_inflation_amount, + }; + parameters.init_storage(&mut self.storage); + + // Initialize governance parameters genesis.gov_params.init_storage(&mut self.storage); // configure the Ethereum bridge if the configuration is set. if let Some(config) = genesis.ethereum_bridge_params { @@ -78,11 +127,7 @@ where // Depends on parameters being initialized self.storage - .init_genesis_epoch( - initial_height, - genesis_time, - &genesis.parameters, - ) + .init_genesis_epoch(initial_height, genesis_time, ¶meters) .expect("Initializing genesis epoch must not fail"); // Loaded VP code cache to avoid loading the same files multiple times @@ -289,7 +334,7 @@ where // Account balance (tokens no staked in PoS) self.storage .write( - &token::balance_key(&address::nam(), addr), + &token::balance_key(&self.storage.native_token, addr), validator .non_staked_balance .try_to_vec() @@ -344,10 +389,10 @@ where sum: Some(key_to_tendermint(&consensus_key).unwrap()), }; abci_validator.pub_key = Some(pub_key); - let power: u64 = validator.pos_data.voting_power(pos_params).into(); - abci_validator.power = power - .try_into() - .expect("unexpected validator's voting power"); + abci_validator.power = into_tm_voting_power( + pos_params.tm_votes_per_token, + validator.pos_data.tokens, + ); response.validators.push(abci_validator); } response diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 8768f5d7e4..dc4e25e10d 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -1,10 +1,10 @@ -//! The ledger shell connects the ABCI++ interface with the Anoma ledger app. +//! The ledger shell connects the ABCI++ interface with the Namada ledger app. //! //! Any changes applied before [`Shell::finalize_block`] might have to be //! reverted, so any changes applied in the methods [`Shell::prepare_proposal`] //! and [`Shell::process_proposal`] must be also reverted //! (unless we can simply overwrite them in the next block). -//! More info in . +//! More info in . mod finalize_block; mod governance; mod init_chain; @@ -34,8 +34,7 @@ use namada::ledger::storage::write_log::WriteLog; use namada::ledger::storage::{DBIter, Storage, DB}; use namada::ledger::{pos, protocol}; use namada::proto::{self, Tx}; -use namada::types::address; -use namada::types::address::{masp, masp_tx_key}; +use namada::types::address::{masp, masp_tx_key, Address}; use namada::types::chain::ChainId; use namada::types::ethereum_events::EthereumEvent; use namada::types::key::*; @@ -154,7 +153,7 @@ pub type Result = std::result::Result; pub fn reset(config: config::Ledger) -> Result<()> { // simply nuke the DB files let db_path = &config.db_dir(); - match std::fs::remove_dir_all(&db_path) { + match std::fs::remove_dir_all(db_path) { Err(e) if e.kind() == std::io::ErrorKind::NotFound => (), res => res.map_err(Error::RemoveDB)?, }; @@ -355,6 +354,7 @@ where db_cache: Option<&D::Cache>, vp_wasm_compilation_cache: u64, tx_wasm_compilation_cache: u64, + native_token: Address, ) -> Self { let chain_id = config.chain_id; let db_path = config.shell.db_dir(&chain_id); @@ -364,10 +364,11 @@ where config.shell.storage_read_past_height_limit; if !Path::new(&base_dir).is_dir() { std::fs::create_dir(&base_dir) - .expect("Creating directory for Anoma should not fail"); + .expect("Creating directory for Namada should not fail"); } // load last state from storage - let mut storage = Storage::open(db_path, chain_id.clone(), db_cache); + let mut storage = + Storage::open(db_path, chain_id.clone(), native_token, db_cache); storage .load_last_state() .map_err(|e| { @@ -828,9 +829,8 @@ mod test_utils { #[cfg(not(feature = "abcipp"))] use namada::ledger::pos::namada_proof_of_stake::types::VotingPower; use namada::ledger::storage::mockdb::MockDB; - use namada::ledger::storage::traits::Sha256Hasher; - use namada::ledger::storage::{BlockStateWrite, MerkleTree}; - use namada::types::address::{nam, EstablishedAddressGen}; + use namada::ledger::storage::{BlockStateWrite, MerkleTree, Sha256Hasher}; + use namada::types::address::EstablishedAddressGen; use namada::types::chain::ChainId; use namada::types::hash::Hash; use namada::types::key::*; @@ -981,6 +981,7 @@ mod test_utils { None, vp_wasm_compilation_cache, tx_wasm_compilation_cache, + address::nam(), ); shell.storage.last_height = height.into(); (Self { shell }, receiver, eth_sender) @@ -1108,6 +1109,7 @@ mod test_utils { tokio::sync::mpsc::channel(ORACLE_CHANNEL_BUFFER_SIZE); let vp_wasm_compilation_cache = 50 * 1024 * 1024; // 50 kiB let tx_wasm_compilation_cache = 50 * 1024 * 1024; // 50 kiB + let native_token = address::nam(); let mut shell = Shell::::new( config::Ledger::new( base_dir.clone(), @@ -1120,6 +1122,7 @@ mod test_utils { None, vp_wasm_compilation_cache, tx_wasm_compilation_cache, + native_token.clone(), ); let keypair = gen_keypair(); // enqueue a wrapper tx @@ -1130,7 +1133,7 @@ mod test_utils { let wrapper = WrapperTx::new( Fee { amount: 0.into(), - token: nam(), + token: native_token, }, &keypair, Epoch(0), @@ -1181,6 +1184,7 @@ mod test_utils { None, vp_wasm_compilation_cache, tx_wasm_compilation_cache, + address::nam(), ); assert!(!shell.storage.tx_queue.is_empty()); } diff --git a/apps/src/lib/node/ledger/shell/prepare_proposal.rs b/apps/src/lib/node/ledger/shell/prepare_proposal.rs index da4c1adf42..c4497c1fde 100644 --- a/apps/src/lib/node/ledger/shell/prepare_proposal.rs +++ b/apps/src/lib/node/ledger/shell/prepare_proposal.rs @@ -802,7 +802,7 @@ mod test_prepare_proposal { WrapperTx::new( Fee { amount: 0.into(), - token: nam(), + token: shell.storage.native_token.clone(), }, &keypair, Epoch(0), @@ -860,7 +860,7 @@ mod test_prepare_proposal { let wrapper_tx = WrapperTx::new( Fee { amount: 0.into(), - token: nam(), + token: shell.storage.native_token.clone(), }, &keypair, Epoch(0), diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index 4d8f0e61d5..5a79eb68fa 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -546,7 +546,6 @@ mod test_process_proposal { use assert_matches::assert_matches; use borsh::BorshDeserialize; use namada::proto::SignedTxData; - use namada::types::address::nam; use namada::types::ethereum_events::EthereumEvent; use namada::types::hash::Hash; use namada::types::key::*; @@ -889,7 +888,7 @@ mod test_process_proposal { let wrapper = WrapperTx::new( Fee { amount: 0.into(), - token: nam(), + token: shell.storage.native_token.clone(), }, &keypair, Epoch(0), @@ -952,7 +951,7 @@ mod test_process_proposal { let mut wrapper = WrapperTx::new( Fee { amount: 100.into(), - token: nam(), + token: shell.storage.native_token.clone(), }, &keypair, Epoch(0), @@ -1052,7 +1051,7 @@ mod test_process_proposal { let wrapper = WrapperTx::new( Fee { amount: 1.into(), - token: nam(), + token: shell.storage.native_token.clone(), }, &keypair, Epoch(0), @@ -1115,7 +1114,7 @@ mod test_process_proposal { let wrapper = WrapperTx::new( Fee { amount: Amount::whole(1_000_100), - token: nam(), + token: shell.storage.native_token.clone(), }, &keypair, Epoch(0), @@ -1180,7 +1179,7 @@ mod test_process_proposal { let wrapper = WrapperTx::new( Fee { amount: i.into(), - token: nam(), + token: shell.storage.native_token.clone(), }, &keypair, Epoch(0), @@ -1275,7 +1274,7 @@ mod test_process_proposal { let wrapper = WrapperTx::new( Fee { amount: 0.into(), - token: nam(), + token: shell.storage.native_token.clone(), }, &keypair, Epoch(0), @@ -1343,7 +1342,7 @@ mod test_process_proposal { let mut wrapper = WrapperTx::new( Fee { amount: 0.into(), - token: nam(), + token: shell.storage.native_token.clone(), }, &keypair, Epoch(0), @@ -1406,7 +1405,7 @@ mod test_process_proposal { let wrapper = WrapperTx { fee: Fee { amount: 0.into(), - token: nam(), + token: shell.storage.native_token.clone(), }, pk: keypair.ref_to(), epoch: Epoch(0), diff --git a/apps/src/lib/node/ledger/shell/queries.rs b/apps/src/lib/node/ledger/shell/queries.rs index 9217ffdc70..860142633a 100644 --- a/apps/src/lib/node/ledger/shell/queries.rs +++ b/apps/src/lib/node/ledger/shell/queries.rs @@ -1,5 +1,8 @@ //! Shell methods for querying state +use borsh::{BorshDeserialize, BorshSerialize}; +use ferveo_common::TendermintValidator; +use namada::ledger::pos::into_tm_voting_power; use namada::ledger::queries::{RequestCtx, ResponseQuery}; use super::*; @@ -172,4 +175,58 @@ mod test_queries { (2, 28, false), ], } + + /// Lookup data about a validator from their protocol signing key + #[allow(dead_code)] + pub fn get_validator_from_protocol_pk( + &self, + pk: &key::common::PublicKey, + ) -> Option> { + let pk_bytes = pk + .try_to_vec() + .expect("Serializing public key should not fail"); + // get the current epoch + let (current_epoch, _) = self.storage.get_current_epoch(); + // get the PoS params + let pos_params = self.storage.read_pos_params(); + // get the active validator set + self.storage + .read_validator_set() + .get(current_epoch) + .expect("Validators for the next epoch should be known") + .active + .iter() + .find(|validator| { + let pk_key = key::protocol_pk_key(&validator.address); + match self.storage.read(&pk_key) { + Ok((Some(bytes), _)) => bytes == pk_bytes, + _ => false, + } + }) + .map(|validator| { + let dkg_key = + key::dkg_session_keys::dkg_pk_key(&validator.address); + let bytes = self + .storage + .read(&dkg_key) + .expect("Validator should have public dkg key") + .0 + .expect("Validator should have public dkg key"); + let dkg_publickey = + &::deserialize( + &mut bytes.as_ref(), + ) + .expect( + "DKG public key in storage should be deserializable", + ); + TendermintValidator { + power: into_tm_voting_power( + pos_params.tm_votes_per_token, + validator.bonded_stake, + ) as u64, + address: validator.address.to_string(), + public_key: dkg_publickey.into(), + } + }) + } } diff --git a/apps/src/lib/node/ledger/shims/abcipp_shim.rs b/apps/src/lib/node/ledger/shims/abcipp_shim.rs index b3e143c2ca..7ee55b608b 100644 --- a/apps/src/lib/node/ledger/shims/abcipp_shim.rs +++ b/apps/src/lib/node/ledger/shims/abcipp_shim.rs @@ -5,6 +5,7 @@ use std::pin::Pin; use std::task::{Context, Poll}; use futures::future::FutureExt; +use namada::types::address::Address; use namada::types::ethereum_events::EthereumEvent; #[cfg(not(feature = "abcipp"))] use namada::types::hash::Hash; @@ -52,6 +53,7 @@ impl AbcippShim { db_cache: &rocksdb::Cache, vp_wasm_compilation_cache: u64, tx_wasm_compilation_cache: u64, + native_token: Address, ) -> (Self, AbciService) { // We can use an unbounded channel here, because tower-abci limits the // the number of requests that can come in @@ -66,6 +68,7 @@ impl AbcippShim { Some(db_cache), vp_wasm_compilation_cache, tx_wasm_compilation_cache, + native_token, ), #[cfg(not(feature = "abcipp"))] begin_block_request: None, diff --git a/apps/src/lib/node/ledger/storage/mod.rs b/apps/src/lib/node/ledger/storage/mod.rs index 0f1e42d9ce..4a275a15bf 100644 --- a/apps/src/lib/node/ledger/storage/mod.rs +++ b/apps/src/lib/node/ledger/storage/mod.rs @@ -46,12 +46,13 @@ impl fmt::Debug for PersistentStorageHasher { } fn new_blake2b() -> Blake2b { - Blake2bBuilder::new(32).personal(b"anoma storage").build() + Blake2bBuilder::new(32).personal(b"namada storage").build() } #[cfg(test)] mod tests { use namada::ledger::storage::types; + use namada::types::address; use namada::types::chain::ChainId; use namada::types::storage::{BlockHash, BlockHeight, Key}; use proptest::collection::vec; @@ -65,8 +66,12 @@ mod tests { fn test_crud_value() { let db_path = TempDir::new().expect("Unable to create a temporary DB directory"); - let mut storage = - PersistentStorage::open(db_path.path(), ChainId::default(), None); + let mut storage = PersistentStorage::open( + db_path.path(), + ChainId::default(), + address::nam(), + None, + ); let key = Key::parse("key").expect("cannot parse the key string"); let value: u64 = 1; let value_bytes = types::encode(&value); @@ -89,7 +94,7 @@ mod tests { assert_eq!(gas, key.len() as u64); let (result, gas) = storage.read(&key).expect("read failed"); let read_value: u64 = - types::decode(&result.expect("value doesn't exist")) + types::decode(result.expect("value doesn't exist")) .expect("decoding failed"); assert_eq!(read_value, value); assert_eq!(gas, key.len() as u64 + value_bytes_len as u64); @@ -108,8 +113,12 @@ mod tests { fn test_commit_block() { let db_path = TempDir::new().expect("Unable to create a temporary DB directory"); - let mut storage = - PersistentStorage::open(db_path.path(), ChainId::default(), None); + let mut storage = PersistentStorage::open( + db_path.path(), + ChainId::default(), + address::nam(), + None, + ); storage .begin_block(BlockHash::default(), BlockHeight(100)) .expect("begin_block failed"); @@ -130,8 +139,12 @@ mod tests { drop(storage); // load the last state - let mut storage = - PersistentStorage::open(db_path.path(), ChainId::default(), None); + let mut storage = PersistentStorage::open( + db_path.path(), + ChainId::default(), + address::nam(), + None, + ); storage .load_last_state() .expect("loading the last state failed"); @@ -149,8 +162,12 @@ mod tests { fn test_iter() { let db_path = TempDir::new().expect("Unable to create a temporary DB directory"); - let mut storage = - PersistentStorage::open(db_path.path(), ChainId::default(), None); + let mut storage = PersistentStorage::open( + db_path.path(), + ChainId::default(), + address::nam(), + None, + ); storage .begin_block(BlockHash::default(), BlockHeight(100)) .expect("begin_block failed"); @@ -189,8 +206,12 @@ mod tests { fn test_validity_predicate() { let db_path = TempDir::new().expect("Unable to create a temporary DB directory"); - let mut storage = - PersistentStorage::open(db_path.path(), ChainId::default(), None); + let mut storage = PersistentStorage::open( + db_path.path(), + ChainId::default(), + address::nam(), + None, + ); storage .begin_block(BlockHash::default(), BlockHeight(100)) .expect("begin_block failed"); @@ -243,8 +264,12 @@ mod tests { ) -> namada::ledger::storage::Result<()> { let db_path = TempDir::new().expect("Unable to create a temporary DB directory"); - let mut storage = - PersistentStorage::open(db_path.path(), ChainId::default(), None); + let mut storage = PersistentStorage::open( + db_path.path(), + ChainId::default(), + address::nam(), + None, + ); // 1. For each `blocks_write_value`, write the current block height if // true or delete otherwise. diff --git a/apps/src/lib/node/ledger/storage/rocksdb.rs b/apps/src/lib/node/ledger/storage/rocksdb.rs index 60cafa37c2..e0c7581ea0 100644 --- a/apps/src/lib/node/ledger/storage/rocksdb.rs +++ b/apps/src/lib/node/ledger/storage/rocksdb.rs @@ -37,9 +37,9 @@ use namada::ledger::storage::{ types, BlockStateRead, BlockStateWrite, DBIter, DBWriteBatch, Error, MerkleTreeStoresRead, Result, StoreType, DB, }; +use namada::types::internal::TxQueue; use namada::types::storage::{ - BlockHeight, BlockResults, Header, Key, KeySeg, TxQueue, - KEY_SEGMENT_SEPARATOR, + BlockHeight, BlockResults, Header, Key, KeySeg, KEY_SEGMENT_SEPARATOR, }; use namada::types::time::DateTimeUtc; use rocksdb::{ @@ -53,7 +53,7 @@ use crate::config::utils::num_of_threads; /// Env. var to set a number of Rayon global worker threads const ENV_VAR_ROCKSDB_COMPACTION_THREADS: &str = - "ANOMA_ROCKSDB_COMPACTION_THREADS"; + "NAMADA_ROCKSDB_COMPACTION_THREADS"; /// RocksDB handle #[derive(Debug)] @@ -758,7 +758,7 @@ impl DB for RocksDB { // Write the new key-val self.0 - .put(&subspace_key.to_string(), value) + .put(subspace_key.to_string(), value) .map_err(|e| Error::DBError(e.into_string()))?; Ok(size_diff) diff --git a/apps/src/lib/node/ledger/tendermint_node.rs b/apps/src/lib/node/ledger/tendermint_node.rs index dc932f0ada..f8b3820a13 100644 --- a/apps/src/lib/node/ledger/tendermint_node.rs +++ b/apps/src/lib/node/ledger/tendermint_node.rs @@ -26,7 +26,7 @@ use crate::facade::tendermint_config::{ }; /// Env. var to output Tendermint log to stdout -pub const ENV_VAR_TM_STDOUT: &str = "ANOMA_TM_STDOUT"; +pub const ENV_VAR_TM_STDOUT: &str = "NAMADA_TM_STDOUT"; #[cfg(feature = "abciplus")] pub const VERSION_REQUIREMENTS: &str = ">= 0.37.0-alpha.2, <0.38.0"; @@ -160,7 +160,7 @@ pub async fn run( // init and run a tendermint node child process let output = Command::new(&tendermint_path) - .args(&["init", &mode, "--home", &home_dir_string]) + .args(["init", &mode, "--home", &home_dir_string]) .output() .await .map_err(Error::Init)?; @@ -184,7 +184,7 @@ pub async fn run( update_tendermint_config(&home_dir, config).await?; let mut tendermint_node = Command::new(&tendermint_path); - tendermint_node.args(&[ + tendermint_node.args([ "start", "--proxy_app", &ledger_address, @@ -244,7 +244,7 @@ pub fn reset(tendermint_dir: impl AsRef) -> Result<()> { let tendermint_dir = tendermint_dir.as_ref().to_string_lossy(); // reset all the Tendermint state, if any std::process::Command::new(tendermint_path) - .args(&[ + .args([ "reset-state", "unsafe-all", // NOTE: log config: https://docs.tendermint.com/master/nodes/logging.html#configuring-log-levels diff --git a/apps/src/lib/wallet/alias.rs b/apps/src/lib/wallet/alias.rs index 25fcf03d11..13d977b852 100644 --- a/apps/src/lib/wallet/alias.rs +++ b/apps/src/lib/wallet/alias.rs @@ -97,11 +97,6 @@ pub fn validator_consensus_key(validator_alias: &Alias) -> Alias { format!("{validator_alias}-consensus-key").into() } -/// Default alias of a validator's staking rewards key -pub fn validator_rewards_key(validator_alias: &Alias) -> Alias { - format!("{validator_alias}-rewards-key").into() -} - /// Default alias of a validator's Tendermint node key pub fn validator_tendermint_node_key(validator_alias: &Alias) -> Alias { format!("{validator_alias}-tendermint-node-key").into() diff --git a/apps/src/lib/wallet/defaults.rs b/apps/src/lib/wallet/defaults.rs index 9fbda9b76b..5cbdd11d9d 100644 --- a/apps/src/lib/wallet/defaults.rs +++ b/apps/src/lib/wallet/defaults.rs @@ -33,16 +33,14 @@ pub fn addresses_from_genesis(genesis: GenesisConfig) -> Vec<(Alias, Address)> { }); addresses.extend(validator_addresses); // Genesis tokens - if let Some(accounts) = genesis.token { - let token_addresses = accounts.into_iter().map(|(alias, token)| { - // The address must be set in the genesis config file - ( - alias.into(), - Address::decode(token.address.unwrap()).unwrap(), - ) - }); - addresses.extend(token_addresses); - } + let token_addresses = genesis.token.into_iter().map(|(alias, token)| { + // The address must be set in the genesis config file + ( + alias.into(), + Address::decode(token.address.unwrap()).unwrap(), + ) + }); + addresses.extend(token_addresses); // Genesis established accounts if let Some(accounts) = genesis.established { let est_addresses = accounts.into_iter().map(|(alias, established)| { @@ -127,7 +125,7 @@ mod dev { let mut addresses: Vec<(Alias, Address)> = vec![ ("pos".into(), pos::ADDRESS), ("pos_slash_pool".into(), pos::SLASH_POOL_ADDRESS), - ("governance".into(), governance::vp::ADDRESS), + ("governance".into(), governance::ADDRESS), ("validator".into(), validator_address()), ("albert".into(), albert_address()), ("bertha".into(), bertha_address()), diff --git a/apps/src/lib/wallet/mod.rs b/apps/src/lib/wallet/mod.rs index 37d8cc3a63..c6a806912f 100644 --- a/apps/src/lib/wallet/mod.rs +++ b/apps/src/lib/wallet/mod.rs @@ -558,10 +558,10 @@ pub fn read_and_confirm_pwd(unsafe_dont_encrypt: bool) -> Option { /// Read the password for encryption/decryption from the file/env/stdin. Panics /// if all options are empty/invalid. pub fn read_password(prompt_msg: &str) -> String { - let pwd = match env::var("ANOMA_WALLET_PASSWORD_FILE") { + let pwd = match env::var("NAMADA_WALLET_PASSWORD_FILE") { Ok(path) => fs::read_to_string(path) .expect("Something went wrong reading the file"), - Err(_) => match env::var("ANOMA_WALLET_PASSWORD") { + Err(_) => match env::var("NAMADA_WALLET_PASSWORD") { Ok(password) => password, Err(_) => rpassword::read_password_from_tty(Some(prompt_msg)) .unwrap_or_default(), diff --git a/apps/src/lib/wallet/pre_genesis.rs b/apps/src/lib/wallet/pre_genesis.rs index 2b665d7546..9b7572d8b8 100644 --- a/apps/src/lib/wallet/pre_genesis.rs +++ b/apps/src/lib/wallet/pre_genesis.rs @@ -43,8 +43,6 @@ pub struct ValidatorWallet { pub eth_cold_key: common::SecretKey, /// Cryptographic keypair for eth hot key pub eth_hot_key: common::SecretKey, - /// Cryptographic keypair for rewards key - pub rewards_key: common::SecretKey, /// Cryptographic keypair for Tendermint node key pub tendermint_node_key: common::SecretKey, } @@ -59,8 +57,6 @@ pub struct ValidatorStore { pub consensus_key: wallet::StoredKeypair, /// Cryptographic keypair for eth cold key pub eth_cold_key: wallet::StoredKeypair, - /// Cryptographic keypair for rewards key - pub rewards_key: wallet::StoredKeypair, /// Cryptographic keypair for Tendermint node key pub tendermint_node_key: wallet::StoredKeypair, /// Special validator keys. Contains the ETH hot key. @@ -112,7 +108,6 @@ impl ValidatorWallet { let password = if store.account_key.is_encrypted() || store.consensus_key.is_encrypted() - || store.rewards_key.is_encrypted() || store.account_key.is_encrypted() { Some(wallet::read_password("Enter decryption password: ")) @@ -128,9 +123,6 @@ impl ValidatorWallet { store.eth_cold_key.get(true, password.clone())?; let eth_hot_key = store.validator_keys.eth_bridge_keypair.clone(); - - let rewards_key = - store.rewards_key.get(true, password.clone())?; let tendermint_node_key = store.tendermint_node_key.get(true, password)?; @@ -140,7 +132,6 @@ impl ValidatorWallet { consensus_key, eth_cold_key, eth_hot_key, - rewards_key, tendermint_node_key, }) } @@ -163,8 +154,6 @@ impl ValidatorWallet { ); let (eth_cold_key, eth_cold_sk) = gen_key_to_store(SchemeType::Secp256k1, &password); - - let (rewards_key, rewards_sk) = gen_key_to_store(scheme, &password); let (tendermint_node_key, tendermint_node_sk) = gen_key_to_store( // Note that TM only allows ed25519 for node IDs SchemeType::Ed25519, @@ -177,7 +166,6 @@ impl ValidatorWallet { account_key, consensus_key, eth_cold_key, - rewards_key, tendermint_node_key, validator_keys, }; @@ -187,7 +175,6 @@ impl ValidatorWallet { consensus_key: consensus_sk, eth_cold_key: eth_cold_sk, eth_hot_key, - rewards_key: rewards_sk, tendermint_node_key: tendermint_node_sk, } } diff --git a/apps/src/lib/wallet/store.rs b/apps/src/lib/wallet/store.rs index d7af231d86..6606486f83 100644 --- a/apps/src/lib/wallet/store.rs +++ b/apps/src/lib/wallet/store.rs @@ -64,7 +64,7 @@ pub struct Store { payment_addrs: HashMap, /// Cryptographic keypairs keys: HashMap>, - /// Anoma address book + /// Namada address book addresses: BiHashMap, /// Known mappings of public key hashes to their aliases in the `keys` /// field. Used for look-up by a public key. @@ -435,7 +435,7 @@ impl Store { if alias.is_empty() { println!( "Empty alias given, defaulting to {}.", - alias = Into::::into(pkh.to_string()) + Into::::into(pkh.to_string()) ); } // Addresses and keypairs can share aliases, so first remove any @@ -587,10 +587,7 @@ impl Store { address: Address, ) -> Option { if alias.is_empty() { - println!( - "Empty alias given, defaulting to {}.", - alias = address.encode() - ); + println!("Empty alias given, defaulting to {}.", address.encode()); } // Addresses and keypairs can share aliases, so first remove any keys // sharing the same namesake before checking if alias has been used. @@ -645,7 +642,6 @@ impl Store { other: pre_genesis::ValidatorWallet, ) { let account_key_alias = alias::validator_key(&validator_alias); - let rewards_key_alias = alias::validator_rewards_key(&validator_alias); let consensus_key_alias = alias::validator_consensus_key(&validator_alias); let tendermint_node_key_alias = @@ -653,7 +649,6 @@ impl Store { let keys = [ (account_key_alias.clone(), other.store.account_key), - (rewards_key_alias.clone(), other.store.rewards_key), (consensus_key_alias.clone(), other.store.consensus_key), ( tendermint_node_key_alias.clone(), @@ -663,12 +658,10 @@ impl Store { self.keys.extend(keys.into_iter()); let account_pk = other.account_key.ref_to(); - let rewards_pk = other.rewards_key.ref_to(); let consensus_pk = other.consensus_key.ref_to(); let tendermint_node_pk = other.tendermint_node_key.ref_to(); let addresses = [ (account_key_alias.clone(), (&account_pk).into()), - (rewards_key_alias.clone(), (&rewards_pk).into()), (consensus_key_alias.clone(), (&consensus_pk).into()), ( tendermint_node_key_alias.clone(), @@ -679,7 +672,6 @@ impl Store { let pkhs = [ ((&account_pk).into(), account_key_alias), - ((&rewards_pk).into(), rewards_key_alias), ((&consensus_pk).into(), consensus_key_alias), ((&tendermint_node_pk).into(), tendermint_node_key_alias), ]; diff --git a/apps/src/lib/wasm_loader/mod.rs b/apps/src/lib/wasm_loader/mod.rs index e82bb92452..9a075fbcf8 100644 --- a/apps/src/lib/wasm_loader/mod.rs +++ b/apps/src/lib/wasm_loader/mod.rs @@ -112,7 +112,7 @@ pub async fn pre_fetch_wasm(wasm_directory: impl AsRef) { // If the checksums file doesn't exists ... if tokio::fs::canonicalize(&checksums_path).await.is_err() { tokio::fs::create_dir_all(&wasm_directory).await.unwrap(); - // ... try to copy checksums from the Anoma WASM root dir + // ... try to copy checksums from the Namada WASM root dir if tokio::fs::copy( std::env::current_dir() .unwrap() @@ -161,7 +161,7 @@ pub async fn pre_fetch_wasm(wasm_directory: impl AsRef) { ); #[cfg(feature = "dev")] { - // try to copy built file from the Anoma WASM root dir + // try to copy built file from the Namada WASM root dir if tokio::fs::copy( std::env::current_dir() .unwrap() @@ -204,7 +204,7 @@ pub async fn pre_fetch_wasm(wasm_directory: impl AsRef) { std::io::ErrorKind::NotFound => { #[cfg(feature = "dev")] { - // try to copy built file from the Anoma WASM root + // try to copy built file from the Namada WASM root // dir if tokio::fs::copy( std::env::current_dir() diff --git a/core/Cargo.toml b/core/Cargo.toml new file mode 100644 index 0000000000..5749522136 --- /dev/null +++ b/core/Cargo.toml @@ -0,0 +1,112 @@ +[package] +authors = ["Heliax AG "] +edition = "2021" +license = "GPL-3.0" +name = "namada_core" +resolver = "2" +version = "0.11.0" + +[features] +default = [] +ferveo-tpke = [ + "ferveo", + "tpke", + "ark-ec", + "rand_core", + "rand", +] +wasm-runtime = [ + "rayon", +] +# secp256k1 key signing and verification, disabled in WASM build by default as +# it bloats the build a lot +secp256k1-sign-verify = [ + "libsecp256k1/hmac", +] + +abcipp = [ + "ibc-proto-abcipp", + "ibc-abcipp", + "tendermint-abcipp", + "tendermint-proto-abcipp" +] +abciplus = [ + "ibc", + "ibc-proto", + "tendermint", + "tendermint-proto", +] + +ibc-mocks = [ + "ibc/mocks", +] +ibc-mocks-abcipp = [ + "ibc-abcipp/mocks", +] + +# for integration tests and test utilies +testing = [ + "rand", + "rand_core", + "proptest", +] + +[dependencies] +ark-bls12-381 = {version = "0.3"} +ark-ec = {version = "0.3", optional = true} +ark-serialize = {version = "0.3"} +# We switch off "blake2b" because it cannot be compiled to wasm +# branch = "bat/arse-merkle-tree" +arse-merkle-tree = {package = "sparse-merkle-tree", git = "https://github.com/heliaxdev/sparse-merkle-tree", rev = "04ad1eeb28901b57a7599bbe433b3822965dabe8", default-features = false, features = ["std", "borsh"]} +bech32 = "0.8.0" +bellman = "0.11.2" +bit-vec = "0.6.3" +borsh = "0.9.0" +chrono = {version = "0.4.22", default-features = false, features = ["clock", "std"]} +data-encoding = "2.3.2" +derivative = "2.2.0" +ed25519-consensus = "1.2.0" +ferveo = {optional = true, git = "https://github.com/anoma/ferveo"} +ferveo-common = {git = "https://github.com/anoma/ferveo"} +tpke = {package = "group-threshold-cryptography", optional = true, git = "https://github.com/anoma/ferveo"} +# TODO using the same version of tendermint-rs as we do here. +ibc = {version = "0.14.0", default-features = false, optional = true} +ibc-proto = {version = "0.17.1", default-features = false, optional = true} +ibc-abcipp = {package = "ibc", git = "https://github.com/heliaxdev/ibc-rs", rev = "9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d", default-features = false, optional = true} +ibc-proto-abcipp = {package = "ibc-proto", git = "https://github.com/heliaxdev/ibc-rs", rev = "9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d", default-features = false, optional = true} +ics23 = "0.7.0" +itertools = "0.10.0" +libsecp256k1 = {git = "https://github.com/heliaxdev/libsecp256k1", rev = "bbb3bd44a49db361f21d9db80f9a087c194c0ae9", default-features = false, features = ["std", "static-context"]} +masp_primitives = { git = "https://github.com/anoma/masp", rev = "bee40fc465f6afbd10558d12fe96eb1742eee45c" } +proptest = {git = "https://github.com/heliaxdev/proptest", branch = "tomas/sm", optional = true} +prost = "0.9.0" +prost-types = "0.9.0" +rand = {version = "0.8", optional = true} +rand_core = {version = "0.6", optional = true} +rayon = {version = "=1.5.3", optional = true} +rust_decimal = { version = "1.26.1", features = ["borsh"] } +rust_decimal_macros = "1.26.1" +serde = {version = "1.0.125", features = ["derive"]} +serde_json = "1.0.62" +sha2 = "0.9.3" +tendermint = {version = "0.23.6", optional = true} +tendermint-proto = {version = "0.23.6", optional = true} +tendermint-abcipp = {package = "tendermint", git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9", optional = true} +tendermint-proto-abcipp = {package = "tendermint-proto", git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9", optional = true} +thiserror = "1.0.30" +tracing = "0.1.30" +zeroize = {version = "1.5.5", features = ["zeroize_derive"]} + +[dev-dependencies] +assert_matches = "1.5.0" +libsecp256k1 = {git = "https://github.com/heliaxdev/libsecp256k1", rev = "bbb3bd44a49db361f21d9db80f9a087c194c0ae9"} +pretty_assertions = "0.7.2" +# A fork with state machine testing +proptest = {git = "https://github.com/heliaxdev/proptest", branch = "tomas/sm"} +rand = {version = "0.8"} +rand_core = {version = "0.6"} +test-log = {version = "0.2.7", default-features = false, features = ["trace"]} +tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} + +[build-dependencies] +tonic-build = "0.6.0" diff --git a/core/build.rs b/core/build.rs new file mode 100644 index 0000000000..c5b251c519 --- /dev/null +++ b/core/build.rs @@ -0,0 +1,50 @@ +use std::fs::read_to_string; +use std::process::Command; +use std::{env, str}; + +/// Path to the .proto source files, relative to `core` directory +const PROTO_SRC: &str = "./proto"; + +/// The version should match the one we use in the `Makefile` +const RUSTFMT_TOOLCHAIN_SRC: &str = "../rust-nightly-version"; + +fn main() { + if let Ok(val) = env::var("COMPILE_PROTO") { + if val.to_ascii_lowercase() == "false" { + // Skip compiling proto files + return; + } + } + + // Tell Cargo that if the given file changes, to rerun this build script. + println!("cargo:rerun-if-changed={}", PROTO_SRC); + + let mut use_rustfmt = false; + + // The version should match the one we use in the `Makefile` + if let Ok(rustfmt_toolchain) = read_to_string(RUSTFMT_TOOLCHAIN_SRC) { + // Try to find the path to rustfmt. + if let Ok(output) = Command::new("rustup") + .args(["which", "rustfmt", "--toolchain", rustfmt_toolchain.trim()]) + .output() + { + if let Ok(rustfmt) = str::from_utf8(&output.stdout) { + // Set the command to be used by tonic_build below to format the + // generated files + let rustfmt = rustfmt.trim(); + if !rustfmt.is_empty() { + println!("using rustfmt from path \"{}\"", rustfmt); + env::set_var("RUSTFMT", rustfmt); + use_rustfmt = true + } + } + } + } + + tonic_build::configure() + .out_dir("src/proto/generated") + .format(use_rustfmt) + .protoc_arg("--experimental_allow_proto3_optional") + .compile(&[format!("{}/types.proto", PROTO_SRC)], &[PROTO_SRC]) + .unwrap(); +} diff --git a/shared/proto b/core/proto similarity index 100% rename from shared/proto rename to core/proto diff --git a/shared/src/bytes.rs b/core/src/bytes.rs similarity index 100% rename from shared/src/bytes.rs rename to core/src/bytes.rs diff --git a/shared/src/ledger/gas.rs b/core/src/ledger/gas.rs similarity index 99% rename from shared/src/ledger/gas.rs rename to core/src/ledger/gas.rs index c7da7b132c..99eb606b7b 100644 --- a/shared/src/ledger/gas.rs +++ b/core/src/ledger/gas.rs @@ -208,7 +208,7 @@ impl VpsGas { let parallel_gas = self.rest.iter().sum::() / PARALLEL_GAS_DIVIDER; self.max .unwrap_or_default() - .checked_add(parallel_gas as u64) + .checked_add(parallel_gas) .ok_or(Error::GasOverflow) } } diff --git a/core/src/ledger/governance/mod.rs b/core/src/ledger/governance/mod.rs new file mode 100644 index 0000000000..8e3fb977f3 --- /dev/null +++ b/core/src/ledger/governance/mod.rs @@ -0,0 +1,11 @@ +//! Governance library code + +use crate::types::address::{Address, InternalAddress}; + +/// governance parameters +pub mod parameters; +/// governance storage +pub mod storage; + +/// The governance internal address +pub const ADDRESS: Address = Address::Internal(InternalAddress::Governance); diff --git a/shared/src/ledger/governance/parameters.rs b/core/src/ledger/governance/parameters.rs similarity index 100% rename from shared/src/ledger/governance/parameters.rs rename to core/src/ledger/governance/parameters.rs diff --git a/shared/src/ledger/governance/storage.rs b/core/src/ledger/governance/storage.rs similarity index 84% rename from shared/src/ledger/governance/storage.rs rename to core/src/ledger/governance/storage.rs index 9d2f0a4e4a..fb4ecaf76b 100644 --- a/shared/src/ledger/governance/storage.rs +++ b/core/src/ledger/governance/storage.rs @@ -1,4 +1,4 @@ -use super::vp::ADDRESS; +use crate::ledger::governance::ADDRESS; use crate::types::address::Address; use crate::types::storage::{DbKeySeg, Key, KeySeg}; @@ -175,121 +175,74 @@ pub fn is_end_epoch_key(key: &Key) -> bool { /// Check if key is counter key pub fn is_counter_key(key: &Key) -> bool { - match &key.segments[..] { - [DbKeySeg::AddressSeg(addr), DbKeySeg::StringSeg(counter)] - if addr == &ADDRESS && counter == COUNTER_KEY => - { - true - } - _ => false, - } + matches!(&key.segments[..], [DbKeySeg::AddressSeg(addr), DbKeySeg::StringSeg(counter)] if addr == &ADDRESS && counter == COUNTER_KEY) } /// Check if key is a proposal fund parameter key pub fn is_min_proposal_fund_key(key: &Key) -> bool { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(min_funds_param), - ] if addr == &ADDRESS && min_funds_param == MIN_PROPOSAL_FUND_KEY => { - true - } - _ => false, - } + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(min_funds_param), + ] if addr == &ADDRESS && min_funds_param == MIN_PROPOSAL_FUND_KEY) } /// Check if key is a proposal max content parameter key pub fn is_max_content_size_key(key: &Key) -> bool { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(max_content_size_param), - ] if addr == &ADDRESS - && max_content_size_param == MAX_PROPOSAL_CONTENT_SIZE_KEY => - { - true - } - _ => false, - } + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(max_content_size_param), + ] if addr == &ADDRESS + && max_content_size_param == MAX_PROPOSAL_CONTENT_SIZE_KEY) } /// Check if key is a max proposal size key pub fn is_max_proposal_code_size_key(key: &Key) -> bool { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(max_content_size_param), - ] if addr == &ADDRESS - && max_content_size_param == MAX_PROPOSAL_CONTENT_SIZE_KEY => - { - true - } - _ => false, - } + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(max_content_size_param), + ] if addr == &ADDRESS + && max_content_size_param == MAX_PROPOSAL_CONTENT_SIZE_KEY) } /// Check if key is a min proposal period param key pub fn is_min_proposal_period_key(key: &Key) -> bool { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(min_proposal_period_param), - ] if addr == &ADDRESS - && min_proposal_period_param == MIN_PROPOSAL_PERIOD_KEY => - { - true - } - _ => false, - } + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(min_proposal_period_param), + ] if addr == &ADDRESS + && min_proposal_period_param == MIN_PROPOSAL_PERIOD_KEY) } /// Check if key is a max proposal period param key pub fn is_max_proposal_period_key(key: &Key) -> bool { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(max_proposal_period_param), - ] if addr == &ADDRESS - && max_proposal_period_param == MAX_PROPOSAL_PERIOD_KEY => - { - true - } - _ => false, - } + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(max_proposal_period_param), + ] if addr == &ADDRESS + && max_proposal_period_param == MAX_PROPOSAL_PERIOD_KEY) } /// Check if key is a min grace epoch key pub fn is_commit_proposal_key(key: &Key) -> bool { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(prefix), - DbKeySeg::StringSeg(epoch_prefix), - DbKeySeg::StringSeg(_epoch), - DbKeySeg::StringSeg(_id), - ] if addr == &ADDRESS - && prefix == PROPOSAL_PREFIX - && epoch_prefix == PROPOSAL_COMMITTING_EPOCH => - { - true - } - _ => false, - } + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(prefix), + DbKeySeg::StringSeg(epoch_prefix), + DbKeySeg::StringSeg(_epoch), + DbKeySeg::StringSeg(_id), + ] if addr == &ADDRESS + && prefix == PROPOSAL_PREFIX + && epoch_prefix == PROPOSAL_COMMITTING_EPOCH + ) } /// Check if key is a commit proposal key pub fn is_min_grace_epoch_key(key: &Key) -> bool { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(min_grace_epoch_param), - ] if addr == &ADDRESS - && min_grace_epoch_param == MIN_GRACE_EPOCH_KEY => - { - true - } - _ => false, - } + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(min_grace_epoch_param), + ] if addr == &ADDRESS + && min_grace_epoch_param == MIN_GRACE_EPOCH_KEY) } /// Check if key is parameter key diff --git a/shared/src/ledger/ibc/handler.rs b/core/src/ledger/ibc/actions.rs similarity index 99% rename from shared/src/ledger/ibc/handler.rs rename to core/src/ledger/ibc/actions.rs index 0c76f086eb..4e09f269c2 100644 --- a/shared/src/ledger/ibc/handler.rs +++ b/core/src/ledger/ibc/actions.rs @@ -68,16 +68,16 @@ use crate::ibc::events::IbcEvent; #[cfg(any(feature = "ibc-mocks-abcipp", feature = "ibc-mocks"))] use crate::ibc::mock::client_state::{MockClientState, MockConsensusState}; use crate::ibc::timestamp::Timestamp; +use crate::ledger::ibc::data::{ + Error as IbcDataError, FungibleTokenPacketData, IbcMessage, PacketAck, + PacketReceipt, +}; use crate::ledger::ibc::storage; use crate::ledger::storage_api; use crate::tendermint::Time; use crate::tendermint_proto::{Error as ProtoError, Protobuf}; use crate::types::address::{Address, InternalAddress}; -use crate::types::ibc::data::{ - Error as IbcDataError, FungibleTokenPacketData, IbcMessage, PacketAck, - PacketReceipt, -}; -use crate::types::ibc::IbcEvent as AnomaIbcEvent; +use crate::types::ibc::IbcEvent as NamadaIbcEvent; use crate::types::storage::{BlockHeight, Key}; use crate::types::time::Rfc3339String; use crate::types::token::{self, Amount}; @@ -157,7 +157,7 @@ pub trait IbcActions { /// Emit an IBC event fn emit_ibc_event( &mut self, - event: AnomaIbcEvent, + event: NamadaIbcEvent, ) -> std::result::Result<(), Self::Error>; /// Transfer token @@ -939,7 +939,7 @@ pub trait IbcActions { if let Some(hash) = storage::token_hash_from_denom(&data.denom) .map_err(Error::IbcStorage)? { - let denom_key = storage::ibc_denom_key(&hash); + let denom_key = storage::ibc_denom_key(hash); let denom_bytes = self.read_ibc_data(&denom_key)?.ok_or_else(|| { Error::SendingToken(format!( @@ -1339,7 +1339,7 @@ pub fn channel_counterparty( ChanCounterparty::new(port_id, Some(channel_id)) } -/// Returns Anoma commitment prefix +/// Returns Namada commitment prefix pub fn commitment_prefix() -> CommitmentPrefix { CommitmentPrefix::try_from(COMMITMENT_PREFIX.to_vec()) .expect("the conversion shouldn't fail") diff --git a/shared/src/types/ibc/data.rs b/core/src/ledger/ibc/data.rs similarity index 100% rename from shared/src/types/ibc/data.rs rename to core/src/ledger/ibc/data.rs diff --git a/core/src/ledger/ibc/mod.rs b/core/src/ledger/ibc/mod.rs new file mode 100644 index 0000000000..f98fb2e432 --- /dev/null +++ b/core/src/ledger/ibc/mod.rs @@ -0,0 +1,5 @@ +//! IBC library code + +pub mod actions; +pub mod data; +pub mod storage; diff --git a/shared/src/ledger/ibc/storage.rs b/core/src/ledger/ibc/storage.rs similarity index 100% rename from shared/src/ledger/ibc/storage.rs rename to core/src/ledger/ibc/storage.rs diff --git a/core/src/ledger/mod.rs b/core/src/ledger/mod.rs new file mode 100644 index 0000000000..83568c0da7 --- /dev/null +++ b/core/src/ledger/mod.rs @@ -0,0 +1,12 @@ +//! The ledger modules + +pub mod gas; +pub mod governance; +#[cfg(any(feature = "abciplus", feature = "abcipp"))] +pub mod ibc; +pub mod parameters; +pub mod slash_fund; +pub mod storage; +pub mod storage_api; +pub mod tx_env; +pub mod vp_env; diff --git a/core/src/ledger/parameters/mod.rs b/core/src/ledger/parameters/mod.rs new file mode 100644 index 0000000000..cb84bd56e7 --- /dev/null +++ b/core/src/ledger/parameters/mod.rs @@ -0,0 +1,491 @@ +//! Protocol parameters +pub mod storage; + +use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use rust_decimal::Decimal; +use thiserror::Error; + +use super::storage::types::{decode, encode}; +use super::storage::{types, Storage}; +use crate::ledger::storage::{self as ledger_storage}; +use crate::types::address::{Address, InternalAddress}; +use crate::types::storage::Key; +use crate::types::time::DurationSecs; + +const ADDRESS: Address = Address::Internal(InternalAddress::Parameters); + +/// Protocol parameters +#[derive( + Clone, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + BorshSerialize, + BorshDeserialize, + BorshSchema, +)] +pub struct Parameters { + /// Epoch duration (read only) + pub epoch_duration: EpochDuration, + /// Maximum expected time per block (read only) + pub max_expected_time_per_block: DurationSecs, + /// Whitelisted validity predicate hashes (read only) + pub vp_whitelist: Vec, + /// Whitelisted tx hashes (read only) + pub tx_whitelist: Vec, + /// Implicit accounts validity predicate WASM code + pub implicit_vp: Vec, + /// Expected number of epochs per year (read only) + pub epochs_per_year: u64, + /// PoS gain p (read only) + pub pos_gain_p: Decimal, + /// PoS gain d (read only) + pub pos_gain_d: Decimal, + /// PoS staked ratio (read + write for every epoch) + pub staked_ratio: Decimal, + /// PoS inflation amount from the last epoch (read + write for every epoch) + pub pos_inflation_amount: u64, +} + +/// Epoch duration. A new epoch begins as soon as both the `min_num_of_blocks` +/// and `min_duration` have passed since the beginning of the current epoch. +#[derive( + Clone, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + BorshSerialize, + BorshDeserialize, + BorshSchema, +)] +pub struct EpochDuration { + /// Minimum number of blocks in an epoch + pub min_num_of_blocks: u64, + /// Minimum duration of an epoch + pub min_duration: DurationSecs, +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum ReadError { + #[error("Storage error: {0}")] + StorageError(ledger_storage::Error), + #[error("Storage type error: {0}")] + StorageTypeError(types::Error), + #[error("Protocol parameters are missing, they must be always set")] + ParametersMissing, +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum WriteError { + #[error("Storage error: {0}")] + StorageError(ledger_storage::Error), + #[error("Serialize error: {0}")] + SerializeError(String), +} + +impl Parameters { + /// Initialize parameters in storage in the genesis block. + pub fn init_storage(&self, storage: &mut Storage) + where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: ledger_storage::StorageHasher, + { + let Self { + epoch_duration, + max_expected_time_per_block, + vp_whitelist, + tx_whitelist, + implicit_vp, + epochs_per_year, + pos_gain_p, + pos_gain_d, + staked_ratio, + pos_inflation_amount, + } = self; + + // write epoch parameters + let epoch_key = storage::get_epoch_duration_storage_key(); + let epoch_value = encode(epoch_duration); + storage.write(&epoch_key, epoch_value).expect( + "Epoch parameters must be initialized in the genesis block", + ); + + // write vp whitelist parameter + let vp_whitelist_key = storage::get_vp_whitelist_storage_key(); + let vp_whitelist_value = encode(&vp_whitelist); + storage.write(&vp_whitelist_key, vp_whitelist_value).expect( + "Vp whitelist parameter must be initialized in the genesis block", + ); + + // write tx whitelist parameter + let tx_whitelist_key = storage::get_tx_whitelist_storage_key(); + let tx_whitelist_value = encode(&tx_whitelist); + storage.write(&tx_whitelist_key, tx_whitelist_value).expect( + "Tx whitelist parameter must be initialized in the genesis block", + ); + + // write tx whitelist parameter + let max_expected_time_per_block_key = + storage::get_max_expected_time_per_block_key(); + let max_expected_time_per_block_value = + encode(&max_expected_time_per_block); + storage + .write( + &max_expected_time_per_block_key, + max_expected_time_per_block_value, + ) + .expect( + "Max expected time per block parameter must be initialized in \ + the genesis block", + ); + + // write implicit vp parameter + let implicit_vp_key = storage::get_implicit_vp_key(); + storage.write(&implicit_vp_key, implicit_vp).expect( + "Implicit VP parameter must be initialized in the genesis block", + ); + + let epochs_per_year_key = storage::get_epochs_per_year_key(); + let epochs_per_year_value = encode(epochs_per_year); + storage + .write(&epochs_per_year_key, epochs_per_year_value) + .expect( + "Epochs per year parameter must be initialized in the genesis \ + block", + ); + + let pos_gain_p_key = storage::get_pos_gain_p_key(); + let pos_gain_p_value = encode(pos_gain_p); + storage.write(&pos_gain_p_key, pos_gain_p_value).expect( + "PoS P-gain parameter must be initialized in the genesis block", + ); + + let pos_gain_d_key = storage::get_pos_gain_d_key(); + let pos_gain_d_value = encode(pos_gain_d); + storage.write(&pos_gain_d_key, pos_gain_d_value).expect( + "PoS D-gain parameter must be initialized in the genesis block", + ); + + let staked_ratio_key = storage::get_staked_ratio_key(); + let staked_ratio_val = encode(staked_ratio); + storage.write(&staked_ratio_key, staked_ratio_val).expect( + "PoS staked ratio parameter must be initialized in the genesis \ + block", + ); + + let pos_inflation_key = storage::get_pos_inflation_amount_key(); + let pos_inflation_val = encode(pos_inflation_amount); + storage.write(&pos_inflation_key, pos_inflation_val).expect( + "PoS inflation rate parameter must be initialized in the genesis \ + block", + ); + } +} +/// Update the max_expected_time_per_block parameter in storage. Returns the +/// parameters and gas cost. +pub fn update_max_expected_time_per_block_parameter( + storage: &mut Storage, + value: &DurationSecs, +) -> std::result::Result +where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: ledger_storage::StorageHasher, +{ + let key = storage::get_max_expected_time_per_block_key(); + update(storage, value, key) +} + +/// Update the vp whitelist parameter in storage. Returns the parameters and gas +/// cost. +pub fn update_vp_whitelist_parameter( + storage: &mut Storage, + value: Vec, +) -> std::result::Result +where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: ledger_storage::StorageHasher, +{ + let key = storage::get_vp_whitelist_storage_key(); + update(storage, &value, key) +} + +/// Update the tx whitelist parameter in storage. Returns the parameters and gas +/// cost. +pub fn update_tx_whitelist_parameter( + storage: &mut Storage, + value: Vec, +) -> std::result::Result +where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: ledger_storage::StorageHasher, +{ + let key = storage::get_tx_whitelist_storage_key(); + update(storage, &value, key) +} + +/// Update the epoch parameter in storage. Returns the parameters and gas +/// cost. +pub fn update_epoch_parameter( + storage: &mut Storage, + value: &EpochDuration, +) -> std::result::Result +where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: ledger_storage::StorageHasher, +{ + let key = storage::get_epoch_duration_storage_key(); + update(storage, value, key) +} + +/// Update the epochs_per_year parameter in storage. Returns the parameters and +/// gas cost. +pub fn update_epochs_per_year_parameter( + storage: &mut Storage, + value: &EpochDuration, +) -> std::result::Result +where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: ledger_storage::StorageHasher, +{ + let key = storage::get_epochs_per_year_key(); + update(storage, value, key) +} + +/// Update the PoS P-gain parameter in storage. Returns the parameters and gas +/// cost. +pub fn update_pos_gain_p_parameter( + storage: &mut Storage, + value: &EpochDuration, +) -> std::result::Result +where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: ledger_storage::StorageHasher, +{ + let key = storage::get_pos_gain_p_key(); + update(storage, value, key) +} + +/// Update the PoS D-gain parameter in storage. Returns the parameters and gas +/// cost. +pub fn update_pos_gain_d_parameter( + storage: &mut Storage, + value: &EpochDuration, +) -> std::result::Result +where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: ledger_storage::StorageHasher, +{ + let key = storage::get_pos_gain_d_key(); + update(storage, value, key) +} + +/// Update the PoS staked ratio parameter in storage. Returns the parameters and +/// gas cost. +pub fn update_staked_ratio_parameter( + storage: &mut Storage, + value: &EpochDuration, +) -> std::result::Result +where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: ledger_storage::StorageHasher, +{ + let key = storage::get_staked_ratio_key(); + update(storage, value, key) +} + +/// Update the PoS inflation rate parameter in storage. Returns the parameters +/// and gas cost. +pub fn update_pos_inflation_amount_parameter( + storage: &mut Storage, + value: &EpochDuration, +) -> std::result::Result +where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: ledger_storage::StorageHasher, +{ + let key = storage::get_pos_inflation_amount_key(); + update(storage, value, key) +} + +/// Update the implicit VP parameter in storage. Return the gas cost. +pub fn update_implicit_vp( + storage: &mut Storage, + implicit_vp: &[u8], +) -> std::result::Result +where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: ledger_storage::StorageHasher, +{ + let key = storage::get_implicit_vp_key(); + // Not using `fn update` here, because implicit_vp doesn't need to be + // encoded, it's bytes already. + let (gas, _size_diff) = storage + .write(&key, implicit_vp) + .map_err(WriteError::StorageError)?; + Ok(gas) +} + +/// Update the parameters in storage. Returns the parameters and gas +/// cost. +pub fn update( + storage: &mut Storage, + value: &T, + key: Key, +) -> std::result::Result +where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: ledger_storage::StorageHasher, + T: BorshSerialize, +{ + let serialized_value = value + .try_to_vec() + .map_err(|e| WriteError::SerializeError(e.to_string()))?; + let (gas, _size_diff) = storage + .write(&key, serialized_value) + .map_err(WriteError::StorageError)?; + Ok(gas) +} + +/// Read the the epoch duration parameter from store +pub fn read_epoch_duration_parameter( + storage: &Storage, +) -> std::result::Result<(EpochDuration, u64), ReadError> +where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: ledger_storage::StorageHasher, +{ + // read epoch + let epoch_key = storage::get_epoch_duration_storage_key(); + let (value, gas) = + storage.read(&epoch_key).map_err(ReadError::StorageError)?; + let epoch_duration: EpochDuration = + decode(value.ok_or(ReadError::ParametersMissing)?) + .map_err(ReadError::StorageTypeError)?; + + Ok((epoch_duration, gas)) +} + +// Read the all the parameters from storage. Returns the parameters and gas +/// cost. +pub fn read( + storage: &Storage, +) -> std::result::Result<(Parameters, u64), ReadError> +where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: ledger_storage::StorageHasher, +{ + // read epoch duration + let (epoch_duration, gas_epoch) = read_epoch_duration_parameter(storage) + .expect("Couldn't read epoch duration parameters"); + + // read vp whitelist + let vp_whitelist_key = storage::get_vp_whitelist_storage_key(); + let (value, gas_vp) = storage + .read(&vp_whitelist_key) + .map_err(ReadError::StorageError)?; + let vp_whitelist: Vec = + decode(value.ok_or(ReadError::ParametersMissing)?) + .map_err(ReadError::StorageTypeError)?; + + // read tx whitelist + let tx_whitelist_key = storage::get_tx_whitelist_storage_key(); + let (value, gas_tx) = storage + .read(&tx_whitelist_key) + .map_err(ReadError::StorageError)?; + let tx_whitelist: Vec = + decode(value.ok_or(ReadError::ParametersMissing)?) + .map_err(ReadError::StorageTypeError)?; + + let max_expected_time_per_block_key = + storage::get_max_expected_time_per_block_key(); + let (value, gas_time) = storage + .read(&max_expected_time_per_block_key) + .map_err(ReadError::StorageError)?; + let max_expected_time_per_block: DurationSecs = + decode(value.ok_or(ReadError::ParametersMissing)?) + .map_err(ReadError::StorageTypeError)?; + + let implicit_vp_key = storage::get_implicit_vp_key(); + let (value, gas_implicit_vp) = storage + .read(&implicit_vp_key) + .map_err(ReadError::StorageError)?; + let implicit_vp = value.ok_or(ReadError::ParametersMissing)?; + + // read epochs per year + let epochs_per_year_key = storage::get_epochs_per_year_key(); + let (value, gas_epy) = storage + .read(&epochs_per_year_key) + .map_err(ReadError::StorageError)?; + let epochs_per_year: u64 = + decode(value.ok_or(ReadError::ParametersMissing)?) + .map_err(ReadError::StorageTypeError)?; + + // read PoS gain P + let pos_gain_p_key = storage::get_pos_gain_p_key(); + let (value, gas_gain_p) = storage + .read(&pos_gain_p_key) + .map_err(ReadError::StorageError)?; + let pos_gain_p: Decimal = + decode(value.ok_or(ReadError::ParametersMissing)?) + .map_err(ReadError::StorageTypeError)?; + + // read PoS gain D + let pos_gain_d_key = storage::get_pos_gain_d_key(); + let (value, gas_gain_d) = storage + .read(&pos_gain_d_key) + .map_err(ReadError::StorageError)?; + let pos_gain_d: Decimal = + decode(value.ok_or(ReadError::ParametersMissing)?) + .map_err(ReadError::StorageTypeError)?; + + // read staked ratio + let staked_ratio_key = storage::get_staked_ratio_key(); + let (value, gas_staked) = storage + .read(&staked_ratio_key) + .map_err(ReadError::StorageError)?; + let staked_ratio: Decimal = + decode(value.ok_or(ReadError::ParametersMissing)?) + .map_err(ReadError::StorageTypeError)?; + + // read PoS inflation rate + let pos_inflation_key = storage::get_pos_inflation_amount_key(); + let (value, gas_reward) = storage + .read(&pos_inflation_key) + .map_err(ReadError::StorageError)?; + let pos_inflation_amount: u64 = + decode(value.ok_or(ReadError::ParametersMissing)?) + .map_err(ReadError::StorageTypeError)?; + + Ok(( + Parameters { + epoch_duration, + max_expected_time_per_block, + vp_whitelist, + tx_whitelist, + implicit_vp, + epochs_per_year, + pos_gain_p, + pos_gain_d, + staked_ratio, + pos_inflation_amount, + }, + gas_epoch + + gas_tx + + gas_vp + + gas_time + + gas_implicit_vp + + gas_epy + + gas_gain_p + + gas_gain_d + + gas_staked + + gas_reward, + )) +} diff --git a/core/src/ledger/parameters/storage.rs b/core/src/ledger/parameters/storage.rs new file mode 100644 index 0000000000..b8dc84fd76 --- /dev/null +++ b/core/src/ledger/parameters/storage.rs @@ -0,0 +1,207 @@ +//! Parameters storage +use super::ADDRESS; +use crate::types::storage::{DbKeySeg, Key}; + +const EPOCH_DURATION_KEY: &str = "epoch_duration"; +const VP_WHITELIST_KEY: &str = "vp_whitelist"; +const TX_WHITELIST_KEY: &str = "tx_whitelist"; +const MAX_EXPECTED_TIME_PER_BLOCK_KEY: &str = "max_expected_time_per_block"; +const IMPLICIT_VP_KEY: &str = "implicit_vp"; +const EPOCHS_PER_YEAR_KEY: &str = "epochs_per_year"; +const POS_GAIN_P_KEY: &str = "pos_gain_p"; +const POS_GAIN_D_KEY: &str = "pos_gain_d"; +const STAKED_RATIO_KEY: &str = "staked_ratio_key"; +const POS_INFLATION_AMOUNT_KEY: &str = "pos_inflation_amount_key"; + +/// Returns if the key is a parameter key. +pub fn is_parameter_key(key: &Key) -> bool { + matches!(&key.segments[0], DbKeySeg::AddressSeg(addr) if addr == &ADDRESS) +} + +/// Returns if the key is a protocol parameter key. +pub fn is_protocol_parameter_key(key: &Key) -> bool { + is_epoch_duration_storage_key(key) + || is_max_expected_time_per_block_key(key) + || is_tx_whitelist_key(key) + || is_vp_whitelist_key(key) +} + +/// Returns if the key is an epoch storage key. +pub fn is_epoch_duration_storage_key(key: &Key) -> bool { + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(epoch_duration), + ] if addr == &ADDRESS && epoch_duration == EPOCH_DURATION_KEY) +} + +/// Returns if the key is the max_expected_time_per_block key. +pub fn is_max_expected_time_per_block_key(key: &Key) -> bool { + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(max_expected_time_per_block), + ] if addr == &ADDRESS && max_expected_time_per_block == MAX_EXPECTED_TIME_PER_BLOCK_KEY) +} + +/// Returns if the key is the tx_whitelist key. +pub fn is_tx_whitelist_key(key: &Key) -> bool { + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(tx_whitelist), + ] if addr == &ADDRESS && tx_whitelist == TX_WHITELIST_KEY) +} + +/// Returns if the key is the vp_whitelist key. +pub fn is_vp_whitelist_key(key: &Key) -> bool { + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(vp_whitelist), + ] if addr == &ADDRESS && vp_whitelist == VP_WHITELIST_KEY) +} + +/// Returns if the key is the implicit VP key. +pub fn is_implicit_vp_key(key: &Key) -> bool { + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(sub_key), + ] if addr == &ADDRESS && sub_key == IMPLICIT_VP_KEY) +} + +/// Returns if the key is the epoch_per_year key. +pub fn is_epochs_per_year_key(key: &Key) -> bool { + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(epochs_per_year), + ] if addr == &ADDRESS && epochs_per_year == EPOCHS_PER_YEAR_KEY) +} + +/// Returns if the key is the pos_gain_p key. +pub fn is_pos_gain_p_key(key: &Key) -> bool { + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(pos_gain_p), + ] if addr == &ADDRESS && pos_gain_p == POS_GAIN_P_KEY) +} + +/// Returns if the key is the pos_gain_d key. +pub fn is_pos_gain_d_key(key: &Key) -> bool { + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(pos_gain_d), + ] if addr == &ADDRESS && pos_gain_d == POS_GAIN_D_KEY) +} + +/// Returns if the key is the staked ratio key. +pub fn is_staked_ratio_key(key: &Key) -> bool { + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(staked_ratio), + ] if addr == &ADDRESS && staked_ratio == STAKED_RATIO_KEY) +} + +/// Returns if the key is the PoS reward rate key. +pub fn is_pos_inflation_amount_key(key: &Key) -> bool { + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(pos_inflation_amount), + ] if addr == &ADDRESS && pos_inflation_amount == POS_INFLATION_AMOUNT_KEY) +} + +/// Storage key used for epoch parameter. +pub fn get_epoch_duration_storage_key() -> Key { + Key { + segments: vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(EPOCH_DURATION_KEY.to_string()), + ], + } +} + +/// Storage key used for vp whitelist parameter. +pub fn get_vp_whitelist_storage_key() -> Key { + Key { + segments: vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(VP_WHITELIST_KEY.to_string()), + ], + } +} + +/// Storage key used for tx whitelist parameter. +pub fn get_tx_whitelist_storage_key() -> Key { + Key { + segments: vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(TX_WHITELIST_KEY.to_string()), + ], + } +} + +/// Storage key used for max_epected_time_per_block parameter. +pub fn get_max_expected_time_per_block_key() -> Key { + Key { + segments: vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(MAX_EXPECTED_TIME_PER_BLOCK_KEY.to_string()), + ], + } +} + +/// Storage key used for implicit VP parameter. +pub fn get_implicit_vp_key() -> Key { + Key { + segments: vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(IMPLICIT_VP_KEY.to_string()), + ], + } +} + +/// Storage key used for epochs_per_year parameter. +pub fn get_epochs_per_year_key() -> Key { + Key { + segments: vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(EPOCHS_PER_YEAR_KEY.to_string()), + ], + } +} + +/// Storage key used for pos_gain_p parameter. +pub fn get_pos_gain_p_key() -> Key { + Key { + segments: vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(POS_GAIN_P_KEY.to_string()), + ], + } +} + +/// Storage key used for pos_gain_d parameter. +pub fn get_pos_gain_d_key() -> Key { + Key { + segments: vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(POS_GAIN_D_KEY.to_string()), + ], + } +} + +/// Storage key used for staked ratio parameter. +pub fn get_staked_ratio_key() -> Key { + Key { + segments: vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(STAKED_RATIO_KEY.to_string()), + ], + } +} + +/// Storage key used for the inflation amount parameter. +pub fn get_pos_inflation_amount_key() -> Key { + Key { + segments: vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(POS_INFLATION_AMOUNT_KEY.to_string()), + ], + } +} diff --git a/core/src/ledger/slash_fund/mod.rs b/core/src/ledger/slash_fund/mod.rs new file mode 100644 index 0000000000..7a7d53963b --- /dev/null +++ b/core/src/ledger/slash_fund/mod.rs @@ -0,0 +1,8 @@ +//! SlashFund library code + +use crate::types::address::{Address, InternalAddress}; + +/// Internal SlashFund address +pub const ADDRESS: Address = Address::Internal(InternalAddress::SlashFund); + +pub mod storage; diff --git a/shared/src/ledger/slash_fund/storage.rs b/core/src/ledger/slash_fund/storage.rs similarity index 80% rename from shared/src/ledger/slash_fund/storage.rs rename to core/src/ledger/slash_fund/storage.rs index 60d29f0f48..9c437da591 100644 --- a/shared/src/ledger/slash_fund/storage.rs +++ b/core/src/ledger/slash_fund/storage.rs @@ -1,7 +1,8 @@ -use super::ADDRESS; +//! Slash fund storage + use crate::types::storage::{DbKeySeg, Key}; /// Check if a key is a slash fund key pub fn is_slash_fund_key(key: &Key) -> bool { - matches!(&key.segments[0], DbKeySeg::AddressSeg(addr) if addr == &ADDRESS) + matches!(&key.segments[0], DbKeySeg::AddressSeg(addr) if addr == &super::ADDRESS) } diff --git a/shared/src/ledger/storage/ics23_specs.rs b/core/src/ledger/storage/ics23_specs.rs similarity index 100% rename from shared/src/ledger/storage/ics23_specs.rs rename to core/src/ledger/storage/ics23_specs.rs diff --git a/shared/src/ledger/storage/merkle_tree.rs b/core/src/ledger/storage/merkle_tree.rs similarity index 94% rename from shared/src/ledger/storage/merkle_tree.rs rename to core/src/ledger/storage/merkle_tree.rs index edd4d2b452..d5f9163880 100644 --- a/shared/src/ledger/storage/merkle_tree.rs +++ b/core/src/ledger/storage/merkle_tree.rs @@ -10,23 +10,21 @@ use arse_merkle_tree::{ use borsh::{BorshDeserialize, BorshSerialize}; use ics23::commitment_proof::Proof as Ics23Proof; use ics23::{CommitmentProof, ExistenceProof, NonExistenceProof}; -use prost::Message; use thiserror::Error; use super::traits::{StorageHasher, SubTreeRead, SubTreeWrite}; -use super::IBC_KEY_LIMIT; use crate::bytes::ByteBuf; use crate::ledger::eth_bridge::storage::bridge_pool::{ get_signed_root_key, BridgePoolTree, }; use crate::ledger::storage::ics23_specs::ibc_leaf_spec; use crate::ledger::storage::{ics23_specs, types}; -use crate::tendermint::merkle::proof::{Proof, ProofOp}; use crate::types::address::{Address, InternalAddress}; use crate::types::hash::Hash; use crate::types::keccak::KeccakHash; use crate::types::storage::{ - DbKeySeg, Error as StorageError, Key, MembershipProof, StringKey, TreeBytes, + self, DbKeySeg, Error as StorageError, Key, MembershipProof, StringKey, + TreeBytes, TreeKeyError, IBC_KEY_LIMIT, }; #[allow(missing_docs)] @@ -36,6 +34,8 @@ pub enum Error { InvalidKey(StorageError), #[error("Invalid key for merkle tree: {0}")] InvalidMerkleKey(String), + #[error("Storage tree key error: {0}")] + StorageTreeKey(#[from] TreeKeyError), #[error("Empty Key: {0}")] EmptyKey(String), #[error("Merkle Tree error: {0}")] @@ -59,10 +59,15 @@ type Result = std::result::Result; pub(super) type StorageBytes<'a> = &'a [u8]; /// Type aliases for the different merkle trees and backing stores +/// Sparse-merkle-tree store pub type SmtStore = DefaultStore; +/// Arse-merkle-tree store pub type AmtStore = DefaultStore; +/// Bridge pool store pub type BridgePoolStore = std::collections::BTreeSet; +/// Sparse-merkle-tree pub type Smt = ArseMerkleTree; +/// Arse-merkle-tree pub type Amt = ArseMerkleTree; @@ -106,6 +111,7 @@ pub enum Store { } impl Store { + /// Convert to a `StoreRef` with borrowed store pub fn as_ref(&self) -> StoreRef { match self { Self::Base(store) => StoreRef::Base(store), @@ -432,25 +438,15 @@ impl MerkleTree { } // Get a proof of the sub tree - self.get_tendermint_proof(key, nep) + self.get_sub_tree_proof(key, nep) } /// Get the Tendermint proof with the base proof - pub fn get_tendermint_proof( + pub fn get_sub_tree_proof( &self, key: &Key, sub_proof: CommitmentProof, ) -> Result { - let mut data = vec![]; - sub_proof - .encode(&mut data) - .expect("Encoding proof shouldn't fail"); - let sub_proof_op = ProofOp { - field_type: "ics23_CommitmentProof".to_string(), - key: key.to_string().as_bytes().to_vec(), - data, - }; - // Get a membership proof of the base tree because the sub root should // exist let (store_type, _) = StoreType::sub_key(key)?; @@ -469,19 +465,10 @@ impl MerkleTree { _ => unreachable!(), }; - let mut data = vec![]; - base_proof - .encode(&mut data) - .expect("Encoding proof shouldn't fail"); - let base_proof_op = ProofOp { - field_type: "ics23_CommitmentProof".to_string(), - key: key.to_string().as_bytes().to_vec(), - data, - }; - - // Set ProofOps from leaf to root Ok(Proof { - ops: vec![sub_proof_op, base_proof_op], + key: key.clone(), + sub_proof, + base_proof, }) } } @@ -598,6 +585,57 @@ impl From for Error { } } +/// A storage key existence or non-existence proof +#[derive(Debug)] +pub struct Proof { + /// Storage key + pub key: storage::Key, + /// Sub proof + pub sub_proof: CommitmentProof, + /// Base proof + pub base_proof: CommitmentProof, +} + +#[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] +impl From for crate::tendermint::merkle::proof::Proof { + fn from( + Proof { + key, + sub_proof, + base_proof, + }: Proof, + ) -> Self { + use prost::Message; + + use crate::tendermint::merkle::proof::{Proof, ProofOp}; + + let mut data = vec![]; + sub_proof + .encode(&mut data) + .expect("Encoding proof shouldn't fail"); + let sub_proof_op = ProofOp { + field_type: "ics23_CommitmentProof".to_string(), + key: key.to_string().as_bytes().to_vec(), + data, + }; + + let mut data = vec![]; + base_proof + .encode(&mut data) + .expect("Encoding proof shouldn't fail"); + let base_proof_op = ProofOp { + field_type: "ics23_CommitmentProof".to_string(), + key: key.to_string().as_bytes().to_vec(), + data, + }; + + // Set ProofOps from leaf to root + Proof { + ops: vec![sub_proof_op, base_proof_op], + } + } +} + #[cfg(test)] mod test { use super::*; @@ -642,9 +680,7 @@ mod test { let nep = tree .get_non_existence_proof(&ibc_non_key) .expect("Test failed"); - let subtree_nep = nep.ops.get(0).expect("Test failed"); - let nep_commitment_proof = - CommitmentProof::decode(&*subtree_nep.data).expect("Test failed"); + let nep_commitment_proof = nep.sub_proof; let non_existence_proof = match nep_commitment_proof.clone().proof.expect("Test failed") { Ics23Proof::Nonexist(nep) => nep, @@ -668,9 +704,7 @@ mod test { sub_key.to_string().as_bytes(), ); assert!(nep_verification_res); - let basetree_ep = nep.ops.get(1).unwrap(); - let basetree_ep_commitment_proof = - CommitmentProof::decode(&*basetree_ep.data).unwrap(); + let basetree_ep_commitment_proof = nep.base_proof; let basetree_ics23_ep = match basetree_ep_commitment_proof.clone().proof.unwrap() { Ics23Proof::Exist(ep) => ep, @@ -740,17 +774,19 @@ mod test { MembershipProof::ICS23(proof) => proof, _ => panic!("Test failed"), }; - let proof = tree.get_tendermint_proof(&ibc_key, proof).unwrap(); + let proof = tree.get_sub_tree_proof(&ibc_key, proof).unwrap(); let (store_type, sub_key) = StoreType::sub_key(&ibc_key).unwrap(); let paths = vec![sub_key.to_string(), store_type.to_string()]; let mut sub_root = ibc_val.clone(); let mut value = ibc_val; // First, the sub proof is verified. Next the base proof is verified // with the sub root - for ((p, spec), key) in - proof.ops.iter().zip(specs.iter()).zip(paths.iter()) + for ((commitment_proof, spec), key) in + [proof.sub_proof, proof.base_proof] + .into_iter() + .zip(specs.iter()) + .zip(paths.iter()) { - let commitment_proof = CommitmentProof::decode(&*p.data).unwrap(); let existence_proof = match commitment_proof.clone().proof.unwrap() { Ics23Proof::Exist(ep) => ep, @@ -800,17 +836,19 @@ mod test { _ => panic!("Test failed"), }; - let proof = tree.get_tendermint_proof(&pos_key, proof).unwrap(); + let proof = tree.get_sub_tree_proof(&pos_key, proof).unwrap(); let (store_type, sub_key) = StoreType::sub_key(&pos_key).unwrap(); let paths = vec![sub_key.to_string(), store_type.to_string()]; let mut sub_root = pos_val.clone(); let mut value = pos_val; // First, the sub proof is verified. Next the base proof is verified // with the sub root - for ((p, spec), key) in - proof.ops.iter().zip(specs.iter()).zip(paths.iter()) + for ((commitment_proof, spec), key) in + [proof.sub_proof, proof.base_proof] + .into_iter() + .zip(specs.iter()) + .zip(paths.iter()) { - let commitment_proof = CommitmentProof::decode(&*p.data).unwrap(); let existence_proof = match commitment_proof.clone().proof.unwrap() { Ics23Proof::Exist(ep) => ep, @@ -850,9 +888,7 @@ mod test { let nep = tree .get_non_existence_proof(&ibc_non_key) .expect("Test failed"); - let subtree_nep = nep.ops.get(0).expect("Test failed"); - let nep_commitment_proof = - CommitmentProof::decode(&*subtree_nep.data).expect("Test failed"); + let nep_commitment_proof = nep.sub_proof; let non_existence_proof = match nep_commitment_proof.clone().proof.expect("Test failed") { Ics23Proof::Nonexist(nep) => nep, @@ -876,9 +912,7 @@ mod test { sub_key.to_string().as_bytes(), ); assert!(nep_verification_res); - let basetree_ep = nep.ops.get(1).unwrap(); - let basetree_ep_commitment_proof = - CommitmentProof::decode(&*basetree_ep.data).unwrap(); + let basetree_ep_commitment_proof = nep.base_proof; let basetree_ics23_ep = match basetree_ep_commitment_proof.clone().proof.unwrap() { Ics23Proof::Exist(ep) => ep, diff --git a/shared/src/ledger/storage/mockdb.rs b/core/src/ledger/storage/mockdb.rs similarity index 99% rename from shared/src/ledger/storage/mockdb.rs rename to core/src/ledger/storage/mockdb.rs index 5f4e583c08..950084acc8 100644 --- a/shared/src/ledger/storage/mockdb.rs +++ b/core/src/ledger/storage/mockdb.rs @@ -14,7 +14,7 @@ use super::{ }; use crate::ledger::storage::types::{self, KVBytes, PrefixIterator}; #[cfg(feature = "ferveo-tpke")] -use crate::types::storage::TxQueue; +use crate::types::internal::TxQueue; use crate::types::storage::{ BlockHeight, BlockResults, Header, Key, KeySeg, KEY_SEGMENT_SEPARATOR, }; diff --git a/core/src/ledger/storage/mod.rs b/core/src/ledger/storage/mod.rs new file mode 100644 index 0000000000..3b2c5b59e8 --- /dev/null +++ b/core/src/ledger/storage/mod.rs @@ -0,0 +1,1373 @@ +//! Ledger's state storage with key-value backed store and a merkle tree + +pub mod ics23_specs; +pub mod merkle_tree; +#[cfg(any(test, feature = "testing"))] +pub mod mockdb; +pub mod traits; +pub mod types; + +use core::fmt::Debug; +use std::collections::BTreeMap; + +use borsh::{BorshDeserialize, BorshSerialize}; +use masp_primitives::asset_type::AssetType; +use masp_primitives::convert::AllowedConversion; +use masp_primitives::merkle_tree::FrozenCommitmentTree; +use masp_primitives::sapling::Node; +pub use merkle_tree::{ + MerkleTree, MerkleTreeStoresRead, MerkleTreeStoresWrite, StoreType, +}; +#[cfg(feature = "wasm-runtime")] +use rayon::iter::{ + IndexedParallelIterator, IntoParallelIterator, ParallelIterator, +}; +#[cfg(feature = "wasm-runtime")] +use rayon::prelude::ParallelSlice; +use thiserror::Error; +pub use traits::{Sha256Hasher, StorageHasher}; + +use self::merkle_tree::StorageBytes; +use crate::ledger::gas::MIN_STORAGE_GAS; +use crate::ledger::parameters::{self, EpochDuration, Parameters}; +use crate::ledger::storage::merkle_tree::{ + Error as MerkleTreeError, MerkleRoot, +}; +use crate::ledger::storage_api; +use crate::ledger::storage_api::{ResultExt, StorageRead, StorageWrite}; +#[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] +use crate::tendermint::merkle::proof::Proof; +use crate::types::address::{ + masp, Address, EstablishedAddressGen, InternalAddress, +}; +use crate::types::chain::{ChainId, CHAIN_ID_LENGTH}; +// TODO +#[cfg(feature = "ferveo-tpke")] +use crate::types::internal::TxQueue; +use crate::types::storage::{ + BlockHash, BlockHeight, BlockResults, Epoch, Epochs, Header, Key, KeySeg, + MembershipProof, TxIndex, BLOCK_HASH_LENGTH, +}; +use crate::types::time::DateTimeUtc; +use crate::types::token; + +/// A result of a function that may fail +pub type Result = std::result::Result; +/// A representation of the conversion state +#[derive(Debug, Default, BorshSerialize, BorshDeserialize)] +pub struct ConversionState { + /// The merkle root from the previous epoch + pub prev_root: Node, + /// The tree currently containing all the conversions + pub tree: FrozenCommitmentTree, + /// Map assets to their latest conversion and position in Merkle tree + pub assets: BTreeMap, +} + +/// The storage data +#[derive(Debug)] +pub struct Storage +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + /// The database for the storage + pub db: D, + /// The ID of the chain + pub chain_id: ChainId, + /// The address of the native token - this is not stored in DB, but read + /// from genesis + pub native_token: Address, + /// Block storage data + pub block: BlockStorage, + /// During `FinalizeBlock`, this is the header of the block that is + /// going to be committed. After a block is committed, this is reset to + /// `None` until the next `FinalizeBlock` phase is reached. + pub header: Option
, + /// The height of the most recently committed block, or `BlockHeight(0)` if + /// no block has been committed for this chain yet. + pub last_height: BlockHeight, + /// The epoch of the most recently committed block. If it is `Epoch(0)`, + /// then no block may have been committed for this chain yet. + pub last_epoch: Epoch, + /// Minimum block height at which the next epoch may start + pub next_epoch_min_start_height: BlockHeight, + /// Minimum block time at which the next epoch may start + pub next_epoch_min_start_time: DateTimeUtc, + /// The current established address generator + pub address_gen: EstablishedAddressGen, + /// The shielded transaction index + pub tx_index: TxIndex, + /// The currently saved conversion state + pub conversion_state: ConversionState, + /// Wrapper txs to be decrypted in the next block proposal + #[cfg(feature = "ferveo-tpke")] + pub tx_queue: TxQueue, +} + +/// The block storage data +#[derive(Debug)] +pub struct BlockStorage { + /// Merkle tree of all the other data in block storage + pub tree: MerkleTree, + /// During `FinalizeBlock`, this is updated to be the hash of the block + /// that is going to be committed. If it is `BlockHash::default()`, + /// then no `FinalizeBlock` stage has been reached yet. + pub hash: BlockHash, + /// From the start of `FinalizeBlock` until the end of `Commit`, this is + /// height of the block that is going to be committed. Otherwise, it is the + /// height of the most recently committed block, or `BlockHeight(0)` if no + /// block has been committed yet. + pub height: BlockHeight, + /// From the start of `FinalizeBlock` until the end of `Commit`, this is + /// height of the block that is going to be committed. Otherwise it is the + /// epoch of the most recently committed block, or `Epoch(0)` if no block + /// has been committed yet. + pub epoch: Epoch, + /// Results of applying transactions + pub results: BlockResults, + /// Predecessor block epochs + pub pred_epochs: Epochs, +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum Error { + #[error("TEMPORARY error: {error}")] + Temporary { error: String }, + #[error("Found an unknown key: {key}")] + UnknownKey { key: String }, + #[error("Storage key error {0}")] + KeyError(crate::types::storage::Error), + #[error("Coding error: {0}")] + CodingError(types::Error), + #[error("Merkle tree error: {0}")] + MerkleTreeError(MerkleTreeError), + #[error("DB error: {0}")] + DBError(String), + #[error("Borsh (de)-serialization error: {0}")] + BorshCodingError(std::io::Error), + #[error("Merkle tree at the height {height} is not stored")] + NoMerkleTree { height: BlockHeight }, +} + +/// The block's state as stored in the database. +pub struct BlockStateRead { + /// Merkle tree stores + pub merkle_tree_stores: MerkleTreeStoresRead, + /// Hash of the block + pub hash: BlockHash, + /// Height of the block + pub height: BlockHeight, + /// Epoch of the block + pub epoch: Epoch, + /// Predecessor block epochs + pub pred_epochs: Epochs, + /// Minimum block height at which the next epoch may start + pub next_epoch_min_start_height: BlockHeight, + /// Minimum block time at which the next epoch may start + pub next_epoch_min_start_time: DateTimeUtc, + /// Established address generator + pub address_gen: EstablishedAddressGen, + /// Results of applying transactions + pub results: BlockResults, + /// Wrapper txs to be decrypted in the next block proposal + #[cfg(feature = "ferveo-tpke")] + pub tx_queue: TxQueue, +} + +/// The block's state to write into the database. +pub struct BlockStateWrite<'a> { + /// Merkle tree stores + pub merkle_tree_stores: MerkleTreeStoresWrite<'a>, + /// Header of the block + pub header: Option<&'a Header>, + /// Hash of the block + pub hash: &'a BlockHash, + /// Height of the block + pub height: BlockHeight, + /// Epoch of the block + pub epoch: Epoch, + /// Predecessor block epochs + pub pred_epochs: &'a Epochs, + /// Minimum block height at which the next epoch may start + pub next_epoch_min_start_height: BlockHeight, + /// Minimum block time at which the next epoch may start + pub next_epoch_min_start_time: DateTimeUtc, + /// Established address generator + pub address_gen: &'a EstablishedAddressGen, + /// Results of applying transactions + pub results: &'a BlockResults, + /// Wrapper txs to be decrypted in the next block proposal + #[cfg(feature = "ferveo-tpke")] + pub tx_queue: &'a TxQueue, +} + +/// A database backend. +pub trait DB: std::fmt::Debug { + /// A DB's cache + type Cache; + /// A handle for batch writes + type WriteBatch: DBWriteBatch; + + /// Open the database from provided path + fn open( + db_path: impl AsRef, + cache: Option<&Self::Cache>, + ) -> Self; + + /// Flush data on the memory to persistent them + fn flush(&self, wait: bool) -> Result<()>; + + /// Read the last committed block's metadata + fn read_last_block(&mut self) -> Result>; + + /// Write block's metadata + fn write_block(&mut self, state: BlockStateWrite) -> Result<()>; + + /// Read the block header with the given height from the DB + fn read_block_header(&self, height: BlockHeight) -> Result>; + + /// Read the merkle tree stores with the given height + fn read_merkle_tree_stores( + &self, + height: BlockHeight, + ) -> Result>; + + /// Read the latest value for account subspace key from the DB + fn read_subspace_val(&self, key: &Key) -> Result>>; + + /// Read the value for account subspace key at the given height from the DB. + /// In our `PersistentStorage` (rocksdb), to find a value from arbitrary + /// height requires looking for diffs from the given `height`, possibly + /// up to the `last_height`. + fn read_subspace_val_with_height( + &self, + key: &Key, + height: BlockHeight, + last_height: BlockHeight, + ) -> Result>>; + + /// Write the value with the given height and account subspace key to the + /// DB. Returns the size difference from previous value, if any, or the + /// size of the value otherwise. + fn write_subspace_val( + &mut self, + height: BlockHeight, + key: &Key, + value: impl AsRef<[u8]>, + ) -> Result; + + /// Delete the value with the given height and account subspace key from the + /// DB. Returns the size of the removed value, if any, 0 if no previous + /// value was found. + fn delete_subspace_val( + &mut self, + height: BlockHeight, + key: &Key, + ) -> Result; + + /// Start write batch. + fn batch() -> Self::WriteBatch; + + /// Execute write batch. + fn exec_batch(&mut self, batch: Self::WriteBatch) -> Result<()>; + + /// Batch write the value with the given height and account subspace key to + /// the DB. Returns the size difference from previous value, if any, or + /// the size of the value otherwise. + fn batch_write_subspace_val( + &self, + batch: &mut Self::WriteBatch, + height: BlockHeight, + key: &Key, + value: impl AsRef<[u8]>, + ) -> Result; + + /// Batch delete the value with the given height and account subspace key + /// from the DB. Returns the size of the removed value, if any, 0 if no + /// previous value was found. + fn batch_delete_subspace_val( + &self, + batch: &mut Self::WriteBatch, + height: BlockHeight, + key: &Key, + ) -> Result; +} + +/// A database prefix iterator. +pub trait DBIter<'iter> { + /// The concrete type of the iterator + type PrefixIter: Debug + Iterator, u64)>; + + /// Read account subspace key value pairs with the given prefix from the DB, + /// ordered by the storage keys. + fn iter_prefix(&'iter self, prefix: &Key) -> Self::PrefixIter; + + /// Read account subspace key value pairs with the given prefix from the DB, + /// reverse ordered by the storage keys. + fn rev_iter_prefix(&'iter self, prefix: &Key) -> Self::PrefixIter; + + /// Read results subspace key value pairs from the DB + fn iter_results(&'iter self) -> Self::PrefixIter; +} + +/// Atomic batch write. +pub trait DBWriteBatch { + /// Insert a value into the database under the given key. + fn put(&mut self, key: K, value: V) + where + K: AsRef<[u8]>, + V: AsRef<[u8]>; + + /// Removes the database entry for key. Does nothing if the key was not + /// found. + fn delete>(&mut self, key: K); +} + +impl Storage +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + /// open up a new instance of the storage given path to db and chain id + pub fn open( + db_path: impl AsRef, + chain_id: ChainId, + native_token: Address, + cache: Option<&D::Cache>, + ) -> Self { + let block = BlockStorage { + tree: MerkleTree::default(), + hash: BlockHash::default(), + height: BlockHeight::default(), + epoch: Epoch::default(), + pred_epochs: Epochs::default(), + results: BlockResults::default(), + }; + Storage:: { + db: D::open(db_path, cache), + chain_id, + block, + header: None, + last_height: BlockHeight(0), + last_epoch: Epoch::default(), + next_epoch_min_start_height: BlockHeight::default(), + next_epoch_min_start_time: DateTimeUtc::now(), + address_gen: EstablishedAddressGen::new( + "Privacy is a function of liberty.", + ), + tx_index: TxIndex::default(), + conversion_state: ConversionState::default(), + #[cfg(feature = "ferveo-tpke")] + tx_queue: TxQueue::default(), + native_token, + } + } + + /// Load the full state at the last committed height, if any. Returns the + /// Merkle root hash and the height of the committed block. + pub fn load_last_state(&mut self) -> Result<()> { + if let Some(BlockStateRead { + merkle_tree_stores, + hash, + height, + epoch, + pred_epochs, + next_epoch_min_start_height, + next_epoch_min_start_time, + results, + address_gen, + #[cfg(feature = "ferveo-tpke")] + tx_queue, + }) = self.db.read_last_block()? + { + self.block.tree = MerkleTree::new(merkle_tree_stores); + self.block.hash = hash; + self.block.height = height; + self.block.epoch = epoch; + self.block.results = results; + self.block.pred_epochs = pred_epochs; + self.last_height = height; + self.last_epoch = epoch; + self.next_epoch_min_start_height = next_epoch_min_start_height; + self.next_epoch_min_start_time = next_epoch_min_start_time; + self.address_gen = address_gen; + if self.last_epoch.0 > 1 { + // The derived conversions will be placed in MASP address space + let masp_addr = masp(); + let key_prefix: Key = masp_addr.to_db_key().into(); + // Load up the conversions currently being given as query + // results + let state_key = key_prefix + .push(&(token::CONVERSION_KEY_PREFIX.to_owned())) + .map_err(Error::KeyError)?; + self.conversion_state = types::decode( + self.read(&state_key) + .expect("unable to read conversion state") + .0 + .expect("unable to find conversion state"), + ) + .expect("unable to decode conversion state") + } + #[cfg(feature = "ferveo-tpke")] + { + self.tx_queue = tx_queue; + } + tracing::debug!("Loaded storage from DB"); + } else { + tracing::info!("No state could be found"); + } + Ok(()) + } + + /// Returns the Merkle root hash and the height of the committed block. If + /// no block exists, returns None. + pub fn get_state(&self) -> Option<(MerkleRoot, u64)> { + if self.block.height.0 != 0 { + Some((self.block.tree.root(), self.block.height.0)) + } else { + None + } + } + + /// Persist the current block's state to the database + pub fn commit(&mut self) -> Result<()> { + let state = BlockStateWrite { + merkle_tree_stores: self.block.tree.stores(), + header: self.header.as_ref(), + hash: &self.block.hash, + height: self.block.height, + epoch: self.block.epoch, + results: &self.block.results, + pred_epochs: &self.block.pred_epochs, + next_epoch_min_start_height: self.next_epoch_min_start_height, + next_epoch_min_start_time: self.next_epoch_min_start_time, + address_gen: &self.address_gen, + #[cfg(feature = "ferveo-tpke")] + tx_queue: &self.tx_queue, + }; + self.db.write_block(state)?; + self.last_height = self.block.height; + self.last_epoch = self.block.epoch; + self.header = None; + Ok(()) + } + + /// Find the root hash of the merkle tree + pub fn merkle_root(&self) -> MerkleRoot { + self.block.tree.root() + } + + /// Check if the given key is present in storage. Returns the result and the + /// gas cost. + pub fn has_key(&self, key: &Key) -> Result<(bool, u64)> { + Ok((self.block.tree.has_key(key)?, key.len() as _)) + } + + /// Returns a value from the specified subspace and the gas cost + pub fn read(&self, key: &Key) -> Result<(Option>, u64)> { + tracing::debug!("storage read key {}", key); + let (present, gas) = self.has_key(key)?; + if !present { + return Ok((None, gas)); + } + + match self.db.read_subspace_val(key)? { + Some(v) => { + let gas = key.len() + v.len(); + Ok((Some(v), gas as _)) + } + None => Ok((None, key.len() as _)), + } + } + + /// Returns a value from the specified subspace at the given height and the + /// gas cost + pub fn read_with_height( + &self, + key: &Key, + height: BlockHeight, + ) -> Result<(Option>, u64)> { + if height >= self.last_height { + self.read(key) + } else { + match self.db.read_subspace_val_with_height( + key, + height, + self.last_height, + )? { + Some(v) => { + let gas = key.len() + v.len(); + Ok((Some(v), gas as _)) + } + None => Ok((None, key.len() as _)), + } + } + } + + /// Returns a prefix iterator, ordered by storage keys, and the gas cost + pub fn iter_prefix( + &self, + prefix: &Key, + ) -> (>::PrefixIter, u64) { + (self.db.iter_prefix(prefix), prefix.len() as _) + } + + /// Returns a prefix iterator, reverse ordered by storage keys, and the gas + /// cost + pub fn rev_iter_prefix( + &self, + prefix: &Key, + ) -> (>::PrefixIter, u64) { + (self.db.rev_iter_prefix(prefix), prefix.len() as _) + } + + /// Returns a prefix iterator and the gas cost + pub fn iter_results(&self) -> (>::PrefixIter, u64) { + (self.db.iter_results(), 0) + } + + /// Write a value to the specified subspace and returns the gas cost and the + /// size difference + pub fn write( + &mut self, + key: &Key, + value: impl AsRef<[u8]>, + ) -> Result<(u64, i64)> { + // Note that this method is the same as `StorageWrite::write_bytes`, + // but with gas and storage bytes len diff accounting + tracing::debug!("storage write key {}", key,); + let value = value.as_ref(); + self.block.tree.update(key, value)?; + + let len = value.len(); + let gas = key.len() + len; + let size_diff = + self.db.write_subspace_val(self.block.height, key, value)?; + Ok((gas as _, size_diff)) + } + + /// Delete the specified subspace and returns the gas cost and the size + /// difference + pub fn delete(&mut self, key: &Key) -> Result<(u64, i64)> { + // Note that this method is the same as `StorageWrite::delete`, + // but with gas and storage bytes len diff accounting + let mut deleted_bytes_len = 0; + if self.has_key(key)?.0 { + self.block.tree.delete(key)?; + deleted_bytes_len = + self.db.delete_subspace_val(self.block.height, key)?; + } + let gas = key.len() + deleted_bytes_len as usize; + Ok((gas as _, deleted_bytes_len)) + } + + /// Set the block header. + /// The header is not in the Merkle tree as it's tracked by Tendermint. + /// Hence, we don't update the tree when this is set. + pub fn set_header(&mut self, header: Header) -> Result<()> { + self.header = Some(header); + Ok(()) + } + + /// Block data is in the Merkle tree as it's tracked by Tendermint in the + /// block header. Hence, we don't update the tree when this is set. + pub fn begin_block( + &mut self, + hash: BlockHash, + height: BlockHeight, + ) -> Result<()> { + self.block.hash = hash; + self.block.height = height; + Ok(()) + } + + /// Get a validity predicate for the given account address and the gas cost + /// for reading it. + pub fn validity_predicate( + &self, + addr: &Address, + ) -> Result<(Option>, u64)> { + let key = if let Address::Implicit(_) = addr { + parameters::storage::get_implicit_vp_key() + } else { + Key::validity_predicate(addr) + }; + self.read(&key) + } + + #[allow(dead_code)] + /// Check if the given address exists on chain and return the gas cost. + pub fn exists(&self, addr: &Address) -> Result<(bool, u64)> { + let key = Key::validity_predicate(addr); + self.has_key(&key) + } + + /// Get the chain ID as a raw string + pub fn get_chain_id(&self) -> (String, u64) { + (self.chain_id.to_string(), CHAIN_ID_LENGTH as _) + } + + /// Get the block height + pub fn get_block_height(&self) -> (BlockHeight, u64) { + (self.block.height, MIN_STORAGE_GAS) + } + + /// Get the block hash + pub fn get_block_hash(&self) -> (BlockHash, u64) { + (self.block.hash.clone(), BLOCK_HASH_LENGTH as _) + } + + /// Get a Tendermint-compatible existence proof. + /// + /// Proofs from the Ethereum bridge pool are not + /// Tendermint-compatible. Requesting for a key + /// belonging to the bridge pool will cause this + /// method to error. + pub fn get_existence_proof( + &self, + key: &Key, + value: StorageBytes, + height: BlockHeight, + ) -> Result { + if height >= self.get_block_height().0 { + if let MembershipProof::ICS23(proof) = self + .block + .tree + .get_sub_tree_existence_proof(array::from_ref(key), vec![value]) + .map_err(Error::MerkleTreeError)? + { + self.block + .tree + .get_tendermint_proof(key, proof) + .map_err(Error::MerkleTreeError) + } else { + Err(Error::MerkleTreeError(MerkleTreeError::TendermintProof)) + } + } else { + match self.db.read_merkle_tree_stores(height)? { + Some(stores) => { + let tree = MerkleTree::::new(stores); + if let MembershipProof::ICS23(proof) = tree + .get_sub_tree_existence_proof( + array::from_ref(key), + vec![value], + ) + .map_err(Error::MerkleTreeError)? + { + tree.get_tendermint_proof(key, proof) + .map_err(Error::MerkleTreeError) + } else { + Err(Error::MerkleTreeError( + MerkleTreeError::TendermintProof, + )) + } + } + None => Err(Error::NoMerkleTree { height }), + } + } + } + + /// Get the non-existence proof + pub fn get_non_existence_proof( + &self, + key: &Key, + height: BlockHeight, + ) -> Result { + if height >= self.last_height { + Ok(self.block.tree.get_non_existence_proof(key)?) + } else { + match self.db.read_merkle_tree_stores(height)? { + Some(stores) => Ok(MerkleTree::::new(stores) + .get_non_existence_proof(key)?), + None => Err(Error::NoMerkleTree { height }), + } + } + } + + /// Get the current (yet to be committed) block epoch + pub fn get_current_epoch(&self) -> (Epoch, u64) { + (self.block.epoch, MIN_STORAGE_GAS) + } + + /// Get the epoch of the last committed block + pub fn get_last_epoch(&self) -> (Epoch, u64) { + (self.last_epoch, MIN_STORAGE_GAS) + } + + /// Initialize the first epoch. The first epoch begins at genesis time. + pub fn init_genesis_epoch( + &mut self, + initial_height: BlockHeight, + genesis_time: DateTimeUtc, + parameters: &Parameters, + ) -> Result<()> { + let EpochDuration { + min_num_of_blocks, + min_duration, + } = parameters.epoch_duration; + self.next_epoch_min_start_height = initial_height + min_num_of_blocks; + self.next_epoch_min_start_time = genesis_time + min_duration; + self.update_epoch_in_merkle_tree() + } + + /// Get the block header + pub fn get_block_header( + &self, + height: Option, + ) -> Result<(Option
, u64)> { + match height { + Some(h) if h == self.get_block_height().0 => { + Ok((self.header.clone(), MIN_STORAGE_GAS)) + } + Some(h) => match self.db.read_block_header(h)? { + Some(header) => { + let gas = header.encoded_len() as u64; + Ok((Some(header), gas)) + } + None => Ok((None, MIN_STORAGE_GAS)), + }, + None => Ok((self.header.clone(), MIN_STORAGE_GAS)), + } + } + + /// Initialize a new epoch when the current epoch is finished. Returns + /// `true` on a new epoch. + #[cfg(feature = "wasm-runtime")] + pub fn update_epoch( + &mut self, + height: BlockHeight, + time: DateTimeUtc, + ) -> Result { + let (parameters, _gas) = + parameters::read(self).expect("Couldn't read protocol parameters"); + + // Check if the current epoch is over + let new_epoch = height >= self.next_epoch_min_start_height + && time >= self.next_epoch_min_start_time; + if new_epoch { + // Begin a new epoch + self.block.epoch = self.block.epoch.next(); + let EpochDuration { + min_num_of_blocks, + min_duration, + } = parameters.epoch_duration; + self.next_epoch_min_start_height = height + min_num_of_blocks; + self.next_epoch_min_start_time = time + min_duration; + // TODO put this into PoS parameters and pass it to tendermint + // `consensus_params` on `InitChain` and `EndBlock` + let evidence_max_age_num_blocks: u64 = 100000; + self.block + .pred_epochs + .new_epoch(height, evidence_max_age_num_blocks); + tracing::info!("Began a new epoch {}", self.block.epoch); + self.update_allowed_conversions()?; + } + self.update_epoch_in_merkle_tree()?; + Ok(new_epoch) + } + + /// Get the current conversions + pub fn get_conversion_state(&self) -> &ConversionState { + &self.conversion_state + } + + // Construct MASP asset type with given timestamp for given token + #[cfg(feature = "wasm-runtime")] + fn encode_asset_type(addr: Address, epoch: Epoch) -> AssetType { + let new_asset_bytes = (addr, epoch.0) + .try_to_vec() + .expect("unable to serialize address and epoch"); + AssetType::new(new_asset_bytes.as_ref()) + .expect("unable to derive asset identifier") + } + + #[cfg(feature = "wasm-runtime")] + /// Update the MASP's allowed conversions + fn update_allowed_conversions(&mut self) -> Result<()> { + use masp_primitives::ff::PrimeField; + use masp_primitives::transaction::components::Amount as MaspAmount; + + use crate::types::address::{masp_rewards, nam}; + + // The derived conversions will be placed in MASP address space + let masp_addr = masp(); + let key_prefix: Key = masp_addr.to_db_key().into(); + + let masp_rewards = masp_rewards(); + // The total transparent value of the rewards being distributed + let mut total_reward = token::Amount::from(0); + + // Construct MASP asset type for rewards. Always timestamp reward tokens + // with the zeroth epoch to minimize the number of convert notes clients + // have to use. This trick works under the assumption that reward tokens + // from different epochs are exactly equivalent. + let reward_asset_bytes = (nam(), 0u64) + .try_to_vec() + .expect("unable to serialize address and epoch"); + let reward_asset = AssetType::new(reward_asset_bytes.as_ref()) + .expect("unable to derive asset identifier"); + // Conversions from the previous to current asset for each address + let mut current_convs = BTreeMap::::new(); + // Reward all tokens according to above reward rates + for (addr, reward) in &masp_rewards { + // Dispence a transparent reward in parallel to the shielded rewards + let token_key = self.read(&token::balance_key(addr, &masp_addr)); + if let Ok((Some(addr_balance), _)) = token_key { + // The reward for each reward.1 units of the current asset is + // reward.0 units of the reward token + let addr_bal: token::Amount = + types::decode(addr_balance).expect("invalid balance"); + // Since floor(a) + floor(b) <= floor(a+b), there will always be + // enough rewards to reimburse users + total_reward += (addr_bal * *reward).0; + } + // Provide an allowed conversion from previous timestamp. The + // negative sign allows each instance of the old asset to be + // cancelled out/replaced with the new asset + let old_asset = + Self::encode_asset_type(addr.clone(), self.last_epoch); + let new_asset = + Self::encode_asset_type(addr.clone(), self.block.epoch); + current_convs.insert( + addr.clone(), + (MaspAmount::from_pair(old_asset, -(reward.1 as i64)).unwrap() + + MaspAmount::from_pair(new_asset, reward.1).unwrap() + + MaspAmount::from_pair(reward_asset, reward.0).unwrap()) + .into(), + ); + // Add a conversion from the previous asset type + self.conversion_state.assets.insert( + old_asset, + (addr.clone(), self.last_epoch, MaspAmount::zero().into(), 0), + ); + } + + // Try to distribute Merkle leaf updating as evenly as possible across + // multiple cores + let num_threads = rayon::current_num_threads(); + // Put assets into vector to enable computation batching + let assets: Vec<_> = self + .conversion_state + .assets + .values_mut() + .enumerate() + .collect(); + // ceil(assets.len() / num_threads) + let notes_per_thread_max = (assets.len() - 1) / num_threads + 1; + // floor(assets.len() / num_threads) + let notes_per_thread_min = assets.len() / num_threads; + // Now on each core, add the latest conversion to each conversion + let conv_notes: Vec = assets + .into_par_iter() + .with_min_len(notes_per_thread_min) + .with_max_len(notes_per_thread_max) + .map(|(idx, (addr, _epoch, conv, pos))| { + // Use transitivity to update conversion + *conv += current_convs[addr].clone(); + // Update conversion position to leaf we are about to create + *pos = idx; + // The merkle tree need only provide the conversion commitment, + // the remaining information is provided through the storage API + Node::new(conv.cmu().to_repr()) + }) + .collect(); + + // Update the MASP's transparent reward token balance to ensure that it + // is sufficiently backed to redeem rewards + let reward_key = token::balance_key(&nam(), &masp_addr); + if let Ok((Some(addr_bal), _)) = self.read(&reward_key) { + // If there is already a balance, then add to it + let addr_bal: token::Amount = + types::decode(addr_bal).expect("invalid balance"); + let new_bal = types::encode(&(addr_bal + total_reward)); + self.write(&reward_key, new_bal) + .expect("unable to update MASP transparent balance"); + } else { + // Otherwise the rewards form the entirity of the reward token + // balance + self.write(&reward_key, types::encode(&total_reward)) + .expect("unable to update MASP transparent balance"); + } + // Try to distribute Merkle tree construction as evenly as possible + // across multiple cores + // Merkle trees must have exactly 2^n leaves to be mergeable + let mut notes_per_thread_rounded = 1; + while notes_per_thread_max > notes_per_thread_rounded * 4 { + notes_per_thread_rounded *= 2; + } + // Make the sub-Merkle trees in parallel + let tree_parts: Vec<_> = conv_notes + .par_chunks(notes_per_thread_rounded) + .map(FrozenCommitmentTree::new) + .collect(); + + // Keep the merkle root from the old tree for transactions constructed + // close to the epoch boundary + self.conversion_state.prev_root = self.conversion_state.tree.root(); + + // Convert conversion vector into tree so that Merkle paths can be + // obtained + self.conversion_state.tree = FrozenCommitmentTree::merge(&tree_parts); + + // Add purely decoding entries to the assets map. These will be + // overwritten before the creation of the next commitment tree + for addr in masp_rewards.keys() { + // Add the decoding entry for the new asset type. An uncommited + // node position is used since this is not a conversion. + let new_asset = + Self::encode_asset_type(addr.clone(), self.block.epoch); + self.conversion_state.assets.insert( + new_asset, + ( + addr.clone(), + self.block.epoch, + MaspAmount::zero().into(), + self.conversion_state.tree.size(), + ), + ); + } + + // Save the current conversion state in order to avoid computing + // conversion commitments from scratch in the next epoch + let state_key = key_prefix + .push(&(token::CONVERSION_KEY_PREFIX.to_owned())) + .map_err(Error::KeyError)?; + self.write(&state_key, types::encode(&self.conversion_state)) + .expect("unable to save current conversion state"); + Ok(()) + } + + /// Update the merkle tree with epoch data + fn update_epoch_in_merkle_tree(&mut self) -> Result<()> { + let key_prefix: Key = + Address::Internal(InternalAddress::PoS).to_db_key().into(); + + let key = key_prefix + .push(&"epoch_start_height".to_string()) + .map_err(Error::KeyError)?; + self.block + .tree + .update(&key, types::encode(&self.next_epoch_min_start_height))?; + + let key = key_prefix + .push(&"epoch_start_time".to_string()) + .map_err(Error::KeyError)?; + self.block + .tree + .update(&key, types::encode(&self.next_epoch_min_start_time))?; + + let key = key_prefix + .push(&"current_epoch".to_string()) + .map_err(Error::KeyError)?; + self.block + .tree + .update(&key, types::encode(&self.block.epoch))?; + + Ok(()) + } + + /// Start write batch. + pub fn batch() -> D::WriteBatch { + D::batch() + } + + /// Execute write batch. + pub fn exec_batch(&mut self, batch: D::WriteBatch) -> Result<()> { + self.db.exec_batch(batch) + } + + /// Batch write the value with the given height and account subspace key to + /// the DB. Returns the size difference from previous value, if any, or + /// the size of the value otherwise. + pub fn batch_write_subspace_val( + &mut self, + batch: &mut D::WriteBatch, + key: &Key, + value: impl AsRef<[u8]>, + ) -> Result { + let value = value.as_ref(); + self.block.tree.update(key, value)?; + self.db + .batch_write_subspace_val(batch, self.block.height, key, value) + } + + /// Batch delete the value with the given height and account subspace key + /// from the DB. Returns the size of the removed value, if any, 0 if no + /// previous value was found. + pub fn batch_delete_subspace_val( + &mut self, + batch: &mut D::WriteBatch, + key: &Key, + ) -> Result { + self.block.tree.delete(key)?; + self.db + .batch_delete_subspace_val(batch, self.block.height, key) + } +} + +impl<'iter, D, H> StorageRead<'iter> for Storage +where + D: DB + for<'iter_> DBIter<'iter_>, + H: StorageHasher, +{ + type PrefixIter = >::PrefixIter; + + fn read_bytes( + &self, + key: &crate::types::storage::Key, + ) -> std::result::Result>, storage_api::Error> { + self.db.read_subspace_val(key).into_storage_result() + } + + fn has_key( + &self, + key: &crate::types::storage::Key, + ) -> std::result::Result { + self.block.tree.has_key(key).into_storage_result() + } + + fn iter_prefix( + &'iter self, + prefix: &crate::types::storage::Key, + ) -> std::result::Result { + Ok(self.db.iter_prefix(prefix)) + } + + fn rev_iter_prefix( + &'iter self, + prefix: &crate::types::storage::Key, + ) -> std::result::Result { + Ok(self.db.rev_iter_prefix(prefix)) + } + + fn iter_next( + &self, + iter: &mut Self::PrefixIter, + ) -> std::result::Result)>, storage_api::Error> + { + Ok(iter.next().map(|(key, val, _gas)| (key, val))) + } + + fn get_chain_id(&self) -> std::result::Result { + Ok(self.chain_id.to_string()) + } + + fn get_block_height( + &self, + ) -> std::result::Result { + Ok(self.block.height) + } + + fn get_block_hash( + &self, + ) -> std::result::Result { + Ok(self.block.hash.clone()) + } + + fn get_block_epoch( + &self, + ) -> std::result::Result { + Ok(self.block.epoch) + } + + fn get_tx_index(&self) -> std::result::Result { + Ok(self.tx_index) + } + + fn get_native_token( + &self, + ) -> std::result::Result { + Ok(self.native_token.clone()) + } +} + +impl StorageWrite for Storage +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + fn write_bytes( + &mut self, + key: &crate::types::storage::Key, + val: impl AsRef<[u8]>, + ) -> storage_api::Result<()> { + // Note that this method is the same as `Storage::write`, but without + // gas and storage bytes len diff accounting, because it can only be + // used by the protocol that has a direct mutable access to storage + let val = val.as_ref(); + self.block.tree.update(key, val).into_storage_result()?; + let _ = self + .db + .write_subspace_val(self.block.height, key, val) + .into_storage_result()?; + Ok(()) + } + + fn delete( + &mut self, + key: &crate::types::storage::Key, + ) -> storage_api::Result<()> { + // Note that this method is the same as `Storage::delete`, but without + // gas and storage bytes len diff accounting, because it can only be + // used by the protocol that has a direct mutable access to storage + self.block.tree.delete(key).into_storage_result()?; + let _ = self + .db + .delete_subspace_val(self.block.height, key) + .into_storage_result()?; + Ok(()) + } +} + +impl StorageWrite for &mut Storage +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + fn write( + &mut self, + key: &crate::types::storage::Key, + val: T, + ) -> storage_api::Result<()> { + let val = val.try_to_vec().unwrap(); + self.write_bytes(key, val) + } + + fn write_bytes( + &mut self, + key: &crate::types::storage::Key, + val: impl AsRef<[u8]>, + ) -> storage_api::Result<()> { + let _ = self + .db + .write_subspace_val(self.block.height, key, val) + .into_storage_result()?; + Ok(()) + } + + fn delete( + &mut self, + key: &crate::types::storage::Key, + ) -> storage_api::Result<()> { + let _ = self + .db + .delete_subspace_val(self.block.height, key) + .into_storage_result()?; + Ok(()) + } +} + +impl From for Error { + fn from(error: MerkleTreeError) -> Self { + Self::MerkleTreeError(error) + } +} + +/// Helpers for testing components that depend on storage +#[cfg(any(test, feature = "testing"))] +pub mod testing { + use super::mockdb::MockDB; + use super::*; + use crate::ledger::storage::traits::Sha256Hasher; + use crate::types::address; + /// Storage with a mock DB for testing + pub type TestStorage = Storage; + + impl Default for TestStorage { + fn default() -> Self { + let chain_id = ChainId::default(); + let tree = MerkleTree::default(); + let block = BlockStorage { + tree, + hash: BlockHash::default(), + height: BlockHeight::default(), + epoch: Epoch::default(), + pred_epochs: Epochs::default(), + results: BlockResults::default(), + }; + Self { + db: MockDB::default(), + chain_id, + block, + header: None, + last_height: BlockHeight(0), + last_epoch: Epoch::default(), + next_epoch_min_start_height: BlockHeight::default(), + next_epoch_min_start_time: DateTimeUtc::now(), + address_gen: EstablishedAddressGen::new( + "Test address generator seed", + ), + tx_index: TxIndex::default(), + conversion_state: ConversionState::default(), + #[cfg(feature = "ferveo-tpke")] + tx_queue: TxQueue::default(), + native_token: address::nam(), + } + } + } +} + +#[cfg(test)] +mod tests { + use chrono::{TimeZone, Utc}; + use proptest::prelude::*; + use rust_decimal_macros::dec; + + use super::testing::*; + use super::*; + use crate::ledger::parameters::{self, Parameters}; + use crate::types::time::{self, Duration}; + + prop_compose! { + /// Setup test input data with arbitrary epoch duration, epoch start + /// height and time, and a block height and time that are greater than + /// the epoch start height and time, and the change to be applied to + /// the epoch duration parameters. + fn arb_and_epoch_duration_start_and_block() + ( + start_height in 0..1000_u64, + start_time in 0..10000_i64, + min_num_of_blocks in 1..10_u64, + min_duration in 1..100_i64, + max_expected_time_per_block in 1..100_i64, + ) + ( + min_num_of_blocks in Just(min_num_of_blocks), + min_duration in Just(min_duration), + max_expected_time_per_block in Just(max_expected_time_per_block), + start_height in Just(start_height), + start_time in Just(start_time), + block_height in start_height + 1..(start_height + 2 * min_num_of_blocks), + block_time in start_time + 1..(start_time + 2 * min_duration), + // Delta will be applied on the `min_num_of_blocks` parameter + min_blocks_delta in -(min_num_of_blocks as i64 - 1)..5, + // Delta will be applied on the `min_duration` parameter + min_duration_delta in -(min_duration - 1)..50, + // Delta will be applied on the `max_expected_time_per_block` parameter + max_time_per_block_delta in -(max_expected_time_per_block - 1)..50, + ) -> (EpochDuration, i64, BlockHeight, DateTimeUtc, BlockHeight, DateTimeUtc, + i64, i64, i64) { + let epoch_duration = EpochDuration { + min_num_of_blocks, + min_duration: Duration::seconds(min_duration).into(), + }; + (epoch_duration, max_expected_time_per_block, + BlockHeight(start_height), Utc.timestamp_opt(start_time, 0).single().expect("expected valid timestamp").into(), + BlockHeight(block_height), Utc.timestamp_opt(block_time, 0).single().expect("expected valid timestamp").into(), + min_blocks_delta, min_duration_delta, max_time_per_block_delta) + } + } + + proptest! { + /// Test that: + /// 1. When the minimum blocks have been created since the epoch + /// start height and minimum time passed since the epoch start time, + /// a new epoch must start. + /// 2. When the epoch duration parameters change, the current epoch's + /// duration doesn't change, but the next one does. + #[test] + fn update_epoch_after_its_duration( + (epoch_duration, max_expected_time_per_block, start_height, start_time, block_height, block_time, + min_blocks_delta, min_duration_delta, max_time_per_block_delta) + in arb_and_epoch_duration_start_and_block()) + { + let mut storage = TestStorage { + next_epoch_min_start_height: + start_height + epoch_duration.min_num_of_blocks, + next_epoch_min_start_time: + start_time + epoch_duration.min_duration, + ..Default::default() + }; + let mut parameters = Parameters { + epoch_duration: epoch_duration.clone(), + max_expected_time_per_block: Duration::seconds(max_expected_time_per_block).into(), + vp_whitelist: vec![], + tx_whitelist: vec![], + implicit_vp: vec![], + epochs_per_year: 100, + pos_gain_p: dec!(0.1), + pos_gain_d: dec!(0.1), + staked_ratio: dec!(0.1), + pos_inflation_amount: 0, + }; + parameters.init_storage(&mut storage); + + let epoch_before = storage.last_epoch; + assert_eq!(epoch_before, storage.block.epoch); + + // Try to apply the epoch update + storage.update_epoch(block_height, block_time).unwrap(); + + // Test for 1. + if block_height.0 - start_height.0 + >= epoch_duration.min_num_of_blocks + && time::duration_passed( + block_time, + start_time, + epoch_duration.min_duration, + ) + { + assert_eq!(storage.block.epoch, epoch_before.next()); + assert_eq!(storage.next_epoch_min_start_height, + block_height + epoch_duration.min_num_of_blocks); + assert_eq!(storage.next_epoch_min_start_time, + block_time + epoch_duration.min_duration); + assert_eq!( + storage.block.pred_epochs.get_epoch(BlockHeight(block_height.0 - 1)), + Some(epoch_before)); + assert_eq!( + storage.block.pred_epochs.get_epoch(block_height), + Some(epoch_before.next())); + } else { + assert_eq!(storage.block.epoch, epoch_before); + assert_eq!( + storage.block.pred_epochs.get_epoch(BlockHeight(block_height.0 - 1)), + Some(epoch_before)); + assert_eq!( + storage.block.pred_epochs.get_epoch(block_height), + Some(epoch_before)); + } + // Last epoch should only change when the block is committed + assert_eq!(storage.last_epoch, epoch_before); + + // Update the epoch duration parameters + parameters.epoch_duration.min_num_of_blocks = + (parameters.epoch_duration.min_num_of_blocks as i64 + min_blocks_delta) as u64; + let min_duration: i64 = parameters.epoch_duration.min_duration.0 as _; + parameters.epoch_duration.min_duration = + Duration::seconds(min_duration + min_duration_delta).into(); + parameters.max_expected_time_per_block = + Duration::seconds(max_expected_time_per_block + max_time_per_block_delta).into(); + parameters::update_max_expected_time_per_block_parameter(&mut storage, ¶meters.max_expected_time_per_block).unwrap(); + parameters::update_epoch_parameter(&mut storage, ¶meters.epoch_duration).unwrap(); + + // Test for 2. + let epoch_before = storage.block.epoch; + let height_of_update = storage.next_epoch_min_start_height.0 ; + let time_of_update = storage.next_epoch_min_start_time; + let height_before_update = BlockHeight(height_of_update - 1); + let height_of_update = BlockHeight(height_of_update); + let time_before_update = time_of_update - Duration::seconds(1); + + // No update should happen before both epoch duration conditions are + // satisfied + storage.update_epoch(height_before_update, time_before_update).unwrap(); + assert_eq!(storage.block.epoch, epoch_before); + storage.update_epoch(height_of_update, time_before_update).unwrap(); + assert_eq!(storage.block.epoch, epoch_before); + storage.update_epoch(height_before_update, time_of_update).unwrap(); + assert_eq!(storage.block.epoch, epoch_before); + + // Update should happen at this or after this height and time + storage.update_epoch(height_of_update, time_of_update).unwrap(); + assert_eq!(storage.block.epoch, epoch_before.next()); + // The next epoch's minimum duration should change + assert_eq!(storage.next_epoch_min_start_height, + height_of_update + parameters.epoch_duration.min_num_of_blocks); + assert_eq!(storage.next_epoch_min_start_time, + time_of_update + parameters.epoch_duration.min_duration); + } + } +} diff --git a/shared/src/ledger/storage/traits.rs b/core/src/ledger/storage/traits.rs similarity index 98% rename from shared/src/ledger/storage/traits.rs rename to core/src/ledger/storage/traits.rs index b615abaa98..6e109ee53e 100644 --- a/shared/src/ledger/storage/traits.rs +++ b/core/src/ledger/storage/traits.rs @@ -10,13 +10,15 @@ use ics23::commitment_proof::Proof as Ics23Proof; use ics23::{CommitmentProof, ExistenceProof}; use sha2::{Digest, Sha256}; +use super::ics23_specs; use super::merkle_tree::{Amt, Error, Smt}; -use super::{ics23_specs, IBC_KEY_LIMIT}; use crate::ledger::eth_bridge::storage::bridge_pool::BridgePoolTree; use crate::ledger::storage::merkle_tree::StorageBytes; use crate::types::eth_bridge_pool::PendingTransfer; use crate::types::hash::Hash; -use crate::types::storage::{Key, MembershipProof, StringKey, TreeBytes}; +use crate::types::storage::{ + Key, MembershipProof, StringKey, TreeBytes, IBC_KEY_LIMIT, +}; /// Trait for reading from a merkle tree that is a sub-tree /// of the global merkle tree. diff --git a/shared/src/ledger/storage/types.rs b/core/src/ledger/storage/types.rs similarity index 100% rename from shared/src/ledger/storage/types.rs rename to core/src/ledger/storage/types.rs diff --git a/shared/src/ledger/storage_api/collections/lazy_map.rs b/core/src/ledger/storage_api/collections/lazy_map.rs similarity index 100% rename from shared/src/ledger/storage_api/collections/lazy_map.rs rename to core/src/ledger/storage_api/collections/lazy_map.rs diff --git a/shared/src/ledger/storage_api/collections/lazy_vec.rs b/core/src/ledger/storage_api/collections/lazy_vec.rs similarity index 100% rename from shared/src/ledger/storage_api/collections/lazy_vec.rs rename to core/src/ledger/storage_api/collections/lazy_vec.rs diff --git a/shared/src/ledger/storage_api/collections/mod.rs b/core/src/ledger/storage_api/collections/mod.rs similarity index 100% rename from shared/src/ledger/storage_api/collections/mod.rs rename to core/src/ledger/storage_api/collections/mod.rs diff --git a/shared/src/ledger/storage_api/error.rs b/core/src/ledger/storage_api/error.rs similarity index 100% rename from shared/src/ledger/storage_api/error.rs rename to core/src/ledger/storage_api/error.rs diff --git a/core/src/ledger/storage_api/key.rs b/core/src/ledger/storage_api/key.rs new file mode 100644 index 0000000000..06b3c76bad --- /dev/null +++ b/core/src/ledger/storage_api/key.rs @@ -0,0 +1,26 @@ +//! Cryptographic signature keys storage API + +use super::*; +use crate::types::address::Address; +use crate::types::key::*; + +/// Get the public key associated with the given address. Returns `Ok(None)` if +/// not found. +pub fn get(storage: &S, owner: &Address) -> Result> +where + S: for<'iter> StorageRead<'iter>, +{ + let key = pk_key(owner); + storage.read(&key) +} + +/// Reveal a PK of an implicit account - the PK is written into the storage +/// of the address derived from the PK. +pub fn reveal_pk(storage: &mut S, pk: &common::PublicKey) -> Result<()> +where + S: StorageWrite, +{ + let addr: Address = pk.into(); + let key = pk_key(&addr); + storage.write(&key, pk) +} diff --git a/shared/src/ledger/storage_api/mod.rs b/core/src/ledger/storage_api/mod.rs similarity index 98% rename from shared/src/ledger/storage_api/mod.rs rename to core/src/ledger/storage_api/mod.rs index a762cabb67..1a44abfbcd 100644 --- a/shared/src/ledger/storage_api/mod.rs +++ b/core/src/ledger/storage_api/mod.rs @@ -3,12 +3,14 @@ pub mod collections; mod error; +pub mod key; pub mod queries; pub mod validation; use borsh::{BorshDeserialize, BorshSerialize}; pub use error::{CustomError, Error, OptionExt, Result, ResultExt}; +use crate::types::address::Address; use crate::types::storage::{self, BlockHash, BlockHeight, Epoch, TxIndex}; /// Common storage read interface @@ -100,6 +102,9 @@ pub trait StorageRead<'iter> { /// Get the transaction index. fn get_tx_index(&self) -> Result; + + /// Get the native token address + fn get_native_token(&self) -> Result
; } /// Common storage write interface diff --git a/shared/src/ledger/storage_api/queries.rs b/core/src/ledger/storage_api/queries.rs similarity index 100% rename from shared/src/ledger/storage_api/queries.rs rename to core/src/ledger/storage_api/queries.rs diff --git a/shared/src/ledger/storage_api/validation/mod.rs b/core/src/ledger/storage_api/validation/mod.rs similarity index 100% rename from shared/src/ledger/storage_api/validation/mod.rs rename to core/src/ledger/storage_api/validation/mod.rs diff --git a/shared/src/ledger/tx_env.rs b/core/src/ledger/tx_env.rs similarity index 100% rename from shared/src/ledger/tx_env.rs rename to core/src/ledger/tx_env.rs diff --git a/core/src/ledger/vp_env.rs b/core/src/ledger/vp_env.rs new file mode 100644 index 0000000000..49bd5d515c --- /dev/null +++ b/core/src/ledger/vp_env.rs @@ -0,0 +1,180 @@ +//! Validity predicate environment contains functions that can be called from +//! inside validity predicates. + +use borsh::BorshDeserialize; + +use super::storage_api::{self, StorageRead}; +use crate::types::address::Address; +use crate::types::hash::Hash; +use crate::types::key::common; +use crate::types::storage::{BlockHash, BlockHeight, Epoch, Key, TxIndex}; + +/// Validity predicate's environment is available for native VPs and WASM VPs +pub trait VpEnv<'view> { + /// Storage read prefix iterator + type PrefixIter; + + /// Type to read storage state before the transaction execution + type Pre: StorageRead<'view, PrefixIter = Self::PrefixIter>; + + /// Type to read storage state after the transaction execution + type Post: StorageRead<'view, PrefixIter = Self::PrefixIter>; + + /// Read storage state before the transaction execution + fn pre(&'view self) -> Self::Pre; + + /// Read storage state after the transaction execution + fn post(&'view self) -> Self::Post; + + /// Storage read temporary state Borsh encoded value (after tx execution). + /// It will try to read from only the write log and then decode it if + /// found. + fn read_temp( + &self, + key: &Key, + ) -> Result, storage_api::Error>; + + /// Storage read temporary state raw bytes (after tx execution). It will try + /// to read from only the write log. + fn read_bytes_temp( + &self, + key: &Key, + ) -> Result>, storage_api::Error>; + + /// Getting the chain ID. + fn get_chain_id(&'view self) -> Result; + + /// Getting the block height. The height is that of the block to which the + /// current transaction is being applied. + fn get_block_height(&'view self) + -> Result; + + /// Getting the block hash. The height is that of the block to which the + /// current transaction is being applied. + fn get_block_hash(&'view self) -> Result; + + /// Getting the block epoch. The epoch is that of the block to which the + /// current transaction is being applied. + fn get_block_epoch(&'view self) -> Result; + + /// Get the shielded transaction index. + fn get_tx_index(&'view self) -> Result; + + /// Get the address of the native token. + fn get_native_token(&'view self) -> Result; + + /// Storage prefix iterator, ordered by storage keys. It will try to get an + /// iterator from the storage. + fn iter_prefix( + &'view self, + prefix: &Key, + ) -> Result; + + /// Storage prefix iterator, reverse ordered by storage keys. It will try to + /// get an iterator from the storage. + fn rev_iter_prefix( + &self, + prefix: &Key, + ) -> Result; + + /// Evaluate a validity predicate with given data. The address, changed + /// storage keys and verifiers will have the same values as the input to + /// caller's validity predicate. + /// + /// If the execution fails for whatever reason, this will return `false`. + /// Otherwise returns the result of evaluation. + fn eval( + &self, + vp_code: Vec, + input_data: Vec, + ) -> Result; + + /// Verify a transaction signature. The signature is expected to have been + /// produced on the encoded transaction [`crate::proto::Tx`] + /// using [`crate::proto::Tx::sign`]. + fn verify_tx_signature( + &self, + pk: &common::PublicKey, + sig: &common::Signature, + ) -> Result; + + /// Get a tx hash + fn get_tx_code_hash(&self) -> Result; + + /// Verify a MASP transaction + fn verify_masp(&self, tx: Vec) -> Result; + + // ---- Methods below have default implementation via `pre/post` ---- + + /// Storage read prior state Borsh encoded value (before tx execution). It + /// will try to read from the storage and decode it if found. + fn read_pre( + &'view self, + key: &Key, + ) -> Result, storage_api::Error> { + self.pre().read(key) + } + + /// Storage read prior state raw bytes (before tx execution). It + /// will try to read from the storage. + fn read_bytes_pre( + &'view self, + key: &Key, + ) -> Result>, storage_api::Error> { + self.pre().read_bytes(key) + } + + /// Storage read posterior state Borsh encoded value (after tx execution). + /// It will try to read from the write log first and if no entry found + /// then from the storage and then decode it if found. + fn read_post( + &'view self, + key: &Key, + ) -> Result, storage_api::Error> { + self.post().read(key) + } + + /// Storage read posterior state raw bytes (after tx execution). It will try + /// to read from the write log first and if no entry found then from the + /// storage. + fn read_bytes_post( + &'view self, + key: &Key, + ) -> Result>, storage_api::Error> { + self.post().read_bytes(key) + } + + /// Storage `has_key` in prior state (before tx execution). It will try to + /// read from the storage. + fn has_key_pre(&'view self, key: &Key) -> Result { + self.pre().has_key(key) + } + + /// Storage `has_key` in posterior state (after tx execution). It will try + /// to check the write log first and if no entry found then the storage. + fn has_key_post( + &'view self, + key: &Key, + ) -> Result { + self.post().has_key(key) + } + + /// Storage prefix iterator for prior state (before tx execution). It will + /// try to read from the storage. + fn iter_pre_next( + &'view self, + iter: &mut Self::PrefixIter, + ) -> Result)>, storage_api::Error> { + self.pre().iter_next(iter) + } + + /// Storage prefix iterator next for posterior state (after tx execution). + /// It will try to read from the write log first and if no entry found + /// then from the storage. + fn iter_post_next( + &'view self, + iter: &mut Self::PrefixIter, + ) -> Result)>, storage_api::Error> { + self.post().iter_next(iter) + } +} diff --git a/core/src/lib.rs b/core/src/lib.rs new file mode 100644 index 0000000000..c9bd40084e --- /dev/null +++ b/core/src/lib.rs @@ -0,0 +1,26 @@ +//! The core public types, storage_api, VpEnv and TxEnv. + +#![doc(html_favicon_url = "https://dev.namada.net/master/favicon.png")] +#![doc(html_logo_url = "https://dev.namada.net/master/rustdoc-logo.png")] +#![warn(missing_docs)] +#![deny(rustdoc::broken_intra_doc_links)] +#![deny(rustdoc::private_intra_doc_links)] + +pub mod bytes; +pub mod ledger; +pub mod proto; +pub mod types; + +#[cfg(feature = "abciplus")] +pub use {ibc, ibc_proto, tendermint, tendermint_proto}; +#[cfg(feature = "abcipp")] +pub use { + ibc_abcipp as ibc, ibc_proto_abcipp as ibc_proto, + tendermint_abcipp as tendermint, + tendermint_proto_abcipp as tendermint_proto, +}; + +// A handy macro for tests +#[cfg(test)] +#[macro_use] +extern crate assert_matches; diff --git a/shared/src/proto/generated.rs b/core/src/proto/generated.rs similarity index 100% rename from shared/src/proto/generated.rs rename to core/src/proto/generated.rs diff --git a/shared/src/proto/generated/.gitignore b/core/src/proto/generated/.gitignore similarity index 100% rename from shared/src/proto/generated/.gitignore rename to core/src/proto/generated/.gitignore diff --git a/shared/src/proto/mod.rs b/core/src/proto/mod.rs similarity index 100% rename from shared/src/proto/mod.rs rename to core/src/proto/mod.rs diff --git a/shared/src/proto/types.rs b/core/src/proto/types.rs similarity index 83% rename from shared/src/proto/types.rs rename to core/src/proto/types.rs index c771a21b88..a34fddf414 100644 --- a/shared/src/proto/types.rs +++ b/core/src/proto/types.rs @@ -7,25 +7,11 @@ use borsh::schema::{Declaration, Definition}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use prost::Message; use serde::{Deserialize, Serialize}; -#[cfg(not(feature = "ABCI"))] -#[cfg(feature = "ferveo-tpke")] -use tendermint_proto::abci::Event; -#[cfg(not(feature = "ABCI"))] -#[cfg(feature = "ferveo-tpke")] -use tendermint_proto::abci::EventAttribute; -#[cfg(not(feature = "ABCI"))] -use tendermint_proto::abci::ResponseDeliverTx; -#[cfg(feature = "ABCI")] -#[cfg(feature = "ferveo-tpke")] -use tendermint_proto_abci::abci::Event; -#[cfg(feature = "ABCI")] -#[cfg(feature = "ferveo-tpke")] -use tendermint_proto_abci::abci::EventAttribute; -#[cfg(feature = "ABCI")] -use tendermint_proto_abci::abci::ResponseDeliverTx; use thiserror::Error; use super::generated::types; +#[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] +use crate::tendermint_proto::abci::ResponseDeliverTx; use crate::types::key::*; use crate::types::time::DateTimeUtc; #[cfg(feature = "ferveo-tpke")] @@ -186,6 +172,100 @@ impl> Signed { } } +/// A Tx with its code replaced by a hash salted with the Borsh +/// serialized timestamp of the transaction. This structure will almost +/// certainly be smaller than a Tx, yet in the usual cases it contains +/// enough information to confirm that the Tx is as intended and make a +/// non-malleable signature. +#[derive( + Clone, Debug, PartialEq, BorshSerialize, BorshDeserialize, BorshSchema, Hash, +)] +pub struct SigningTx { + pub code_hash: [u8; 32], + pub data: Option>, + pub timestamp: DateTimeUtc, +} + +impl SigningTx { + pub fn hash(&self) -> [u8; 32] { + let timestamp = Some(self.timestamp.into()); + let mut bytes = vec![]; + types::Tx { + code: self.code_hash.to_vec(), + data: self.data.clone(), + timestamp, + } + .encode(&mut bytes) + .expect("encoding a transaction failed"); + hash_tx(&bytes).0 + } + + /// Sign a transaction using [`SignedTxData`]. + pub fn sign(self, keypair: &common::SecretKey) -> Self { + let to_sign = self.hash(); + let sig = common::SigScheme::sign(keypair, to_sign); + let signed = SignedTxData { + data: self.data, + sig, + } + .try_to_vec() + .expect("Encoding transaction data shouldn't fail"); + SigningTx { + code_hash: self.code_hash, + data: Some(signed), + timestamp: self.timestamp, + } + } + + /// Verify that the transaction has been signed by the secret key + /// counterpart of the given public key. + pub fn verify_sig( + &self, + pk: &common::PublicKey, + sig: &common::Signature, + ) -> std::result::Result<(), VerifySigError> { + // Try to get the transaction data from decoded `SignedTxData` + let tx_data = self.data.clone().ok_or(VerifySigError::MissingData)?; + let signed_tx_data = SignedTxData::try_from_slice(&tx_data[..]) + .expect("Decoding transaction data shouldn't fail"); + let data = signed_tx_data.data; + let tx = SigningTx { + code_hash: self.code_hash, + data, + timestamp: self.timestamp, + }; + let signed_data = tx.hash(); + common::SigScheme::verify_signature_raw(pk, &signed_data, sig) + } + + /// Expand this reduced Tx using the supplied code only if the the code + /// hashes to the stored code hash + pub fn expand(self, code: Vec) -> Option { + if hash_tx(&code).0 == self.code_hash { + Some(Tx { + code, + data: self.data, + timestamp: self.timestamp, + }) + } else { + None + } + } +} + +impl From for SigningTx { + fn from(tx: Tx) -> SigningTx { + SigningTx { + code_hash: hash_tx(&tx.code).0, + data: tx.data, + timestamp: tx.timestamp, + } + } +} + +/// A SigningTx but with the full code embedded. This structure will almost +/// certainly be bigger than SigningTxs and contains enough information to +/// execute the transaction. #[derive( Clone, Debug, PartialEq, BorshSerialize, BorshDeserialize, BorshSchema, Hash, )] @@ -223,6 +303,7 @@ impl From for types::Tx { } } +#[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] impl From for ResponseDeliverTx { #[cfg(not(feature = "ferveo-tpke"))] fn from(_tx: Tx) -> ResponseDeliverTx { @@ -232,6 +313,8 @@ impl From for ResponseDeliverTx { /// Annotate the Tx with meta-data based on its contents #[cfg(feature = "ferveo-tpke")] fn from(tx: Tx) -> ResponseDeliverTx { + use crate::tendermint_proto::abci::{Event, EventAttribute}; + #[cfg(feature = "ABCI")] fn encode_str(x: &str) -> Vec { x.as_bytes().to_vec() @@ -320,28 +403,20 @@ impl Tx { } pub fn hash(&self) -> [u8; 32] { - hash_tx(&self.to_bytes()).0 + SigningTx::from(self.clone()).hash() } pub fn code_hash(&self) -> [u8; 32] { - hash_tx(&self.code).0 + SigningTx::from(self.clone()).code_hash } /// Sign a transaction using [`SignedTxData`]. pub fn sign(self, keypair: &common::SecretKey) -> Self { - let to_sign = self.hash(); - let sig = common::SigScheme::sign(keypair, &to_sign); - let signed = SignedTxData { - data: self.data, - sig, - } - .try_to_vec() - .expect("Encoding transaction data shouldn't fail"); - Tx { - code: self.code, - data: Some(signed), - timestamp: self.timestamp, - } + let code = self.code.clone(); + SigningTx::from(self) + .sign(keypair) + .expand(code) + .expect("code hashes to unexpected value") } /// Verify that the transaction has been signed by the secret key @@ -351,18 +426,7 @@ impl Tx { pk: &common::PublicKey, sig: &common::Signature, ) -> std::result::Result<(), VerifySigError> { - // Try to get the transaction data from decoded `SignedTxData` - let tx_data = self.data.clone().ok_or(VerifySigError::MissingData)?; - let signed_tx_data = SignedTxData::try_from_slice(&tx_data[..]) - .expect("Decoding transaction data shouldn't fail"); - let data = signed_tx_data.data; - let tx = Tx { - code: self.code.clone(), - data, - timestamp: self.timestamp, - }; - let signed_data = tx.hash(); - common::SigScheme::verify_signature_raw(pk, &signed_data, sig) + SigningTx::from(self.clone()).verify_sig(pk, sig) } } diff --git a/shared/src/types/address.rs b/core/src/types/address.rs similarity index 100% rename from shared/src/types/address.rs rename to core/src/types/address.rs diff --git a/shared/src/types/chain.rs b/core/src/types/chain.rs similarity index 98% rename from shared/src/types/chain.rs rename to core/src/types/chain.rs index 18627903cb..06a5d3938c 100644 --- a/shared/src/types/chain.rs +++ b/core/src/types/chain.rs @@ -18,11 +18,11 @@ pub const CHAIN_ID_PREFIX_SEP: char = '.'; /// Development default chain ID. Must be [`CHAIN_ID_LENGTH`] long. #[cfg(feature = "dev")] -pub const DEFAULT_CHAIN_ID: &str = "anoma-devchain.000000000000000"; +pub const DEFAULT_CHAIN_ID: &str = "namada-devchain.00000000000000"; /// Release default chain ID. Must be [`CHAIN_ID_LENGTH`] long. #[cfg(not(feature = "dev"))] -pub const DEFAULT_CHAIN_ID: &str = "anoma-internal.000000000000000"; +pub const DEFAULT_CHAIN_ID: &str = "namada-internal.00000000000000"; /// Chain ID #[derive( diff --git a/shared/src/types/governance.rs b/core/src/types/governance.rs similarity index 85% rename from shared/src/types/governance.rs rename to core/src/types/governance.rs index 5f82335cb2..438017a370 100644 --- a/shared/src/types/governance.rs +++ b/core/src/types/governance.rs @@ -9,13 +9,12 @@ use rust_decimal::Decimal; use serde::{Deserialize, Serialize}; use thiserror::Error; -use super::address::Address; -use super::hash::Hash; -use super::key::common::{self, Signature}; -use super::key::SigScheme; -use super::storage::Epoch; -use super::token::SCALE; -use super::transaction::governance::InitProposalData; +use crate::types::address::Address; +use crate::types::hash::Hash; +use crate::types::key::common::{self, Signature}; +use crate::types::key::SigScheme; +use crate::types::storage::Epoch; +use crate::types::token::SCALE; /// Type alias for vote power pub type VotePower = u128; @@ -84,8 +83,8 @@ pub enum TallyResult { Passed, /// Proposal was rejected Rejected, - /// Proposal result is unknown - Unknown, + /// A critical error in tally computation + Failed, } /// The result with votes of a proposal @@ -124,7 +123,7 @@ impl Display for TallyResult { match self { TallyResult::Passed => write!(f, "passed"), TallyResult::Rejected => write!(f, "rejected"), - TallyResult::Unknown => write!(f, "unknown"), + TallyResult::Failed => write!(f, "failed"), } } } @@ -163,31 +162,6 @@ pub enum ProposalError { InvalidProposalData, } -impl TryFrom for InitProposalData { - type Error = ProposalError; - - fn try_from(proposal: Proposal) -> Result { - let proposal_code = if let Some(path) = proposal.proposal_code_path { - match std::fs::read(path) { - Ok(bytes) => Some(bytes), - Err(_) => return Err(Self::Error::InvalidProposalData), - } - } else { - None - }; - - Ok(InitProposalData { - id: proposal.id, - content: proposal.content.try_to_vec().unwrap(), - author: proposal.author, - voting_start_epoch: proposal.voting_start_epoch, - voting_end_epoch: proposal.voting_end_epoch, - grace_epoch: proposal.grace_epoch, - proposal_code, - }) - } -} - #[derive( Debug, Clone, BorshSerialize, BorshDeserialize, Serialize, Deserialize, )] @@ -224,9 +198,9 @@ impl OfflineProposal { tally_epoch_serialized, ] .concat(); - let proposal_data_hash = Hash::sha256(&proposal_serialized); + let proposal_data_hash = Hash::sha256(proposal_serialized); let signature = - common::SigScheme::sign(signing_key, &proposal_data_hash); + common::SigScheme::sign(signing_key, proposal_data_hash); Self { content: proposal.content, author: proposal.author, @@ -261,7 +235,7 @@ impl OfflineProposal { tally_epoch_serialized, ] .concat(); - Hash::sha256(&proposal_serialized) + Hash::sha256(proposal_serialized) } } @@ -297,7 +271,7 @@ impl OfflineVote { .expect("Conversion to bytes shouldn't fail."); let vote_serialized = &[proposal_hash_data, proposal_vote_data].concat(); - let signature = common::SigScheme::sign(signing_key, &vote_serialized); + let signature = common::SigScheme::sign(signing_key, vote_serialized); Self { proposal_hash, vote, diff --git a/shared/src/types/hash.rs b/core/src/types/hash.rs similarity index 68% rename from shared/src/types/hash.rs rename to core/src/types/hash.rs index 99d058967d..0e178adbd6 100644 --- a/shared/src/types/hash.rs +++ b/core/src/types/hash.rs @@ -5,16 +5,14 @@ use std::ops::Deref; use std::str::FromStr; use arse_merkle_tree::traits::Value; +use arse_merkle_tree::{Hash as TreeHash, H256}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; -use hex::FromHex; +use data_encoding::HEXUPPER; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use thiserror::Error; -use crate::tendermint::abci::transaction; -use crate::tendermint::Hash as TmHash; - -/// The length of the raw transaction hash. +/// The length of the transaction hash string pub const HASH_LENGTH: usize = 32; /// The length of the hex encoded transaction hash. @@ -28,7 +26,7 @@ pub enum Error { #[error("Failed trying to convert slice to a hash: {0}")] ConversionFailed(std::array::TryFromSliceError), #[error("Failed to convert string into a hash: {0}")] - FromStringError(hex::FromHexError), + FromStringError(data_encoding::DecodeError), } /// Result for functions that may fail @@ -48,14 +46,11 @@ pub type HashResult = std::result::Result; Deserialize, )] /// A hash, typically a sha-2 hash of a tx -pub struct Hash(pub [u8; 32]); +pub struct Hash(pub [u8; HASH_LENGTH]); impl Display for Hash { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - for byte in &self.0 { - write!(f, "{:02X}", byte)?; - } - Ok(()) + write!(f, "{}", HEXUPPER.encode(&self.0)) } } @@ -66,7 +61,7 @@ impl AsRef<[u8]> for Hash { } impl Deref for Hash { - type Target = [u8; 32]; + type Target = [u8; HASH_LENGTH]; fn deref(&self) -> &Self::Target { &self.0 @@ -86,7 +81,7 @@ impl TryFrom<&[u8]> for Hash { ), }); } - let hash: [u8; 32] = + let hash: [u8; HASH_LENGTH] = TryFrom::try_from(value).map_err(Error::ConversionFailed)?; Ok(Hash(hash)) } @@ -104,16 +99,18 @@ impl TryFrom<&str> for Hash { type Error = self::Error; fn try_from(string: &str) -> HashResult { - Ok(Self( - <[u8; HASH_LENGTH]>::from_hex(string) - .map_err(Error::FromStringError)?, - )) + let vec = HEXUPPER + .decode(string.as_ref()) + .map_err(Error::FromStringError)?; + Self::try_from(&vec[..]) } } -impl From for transaction::Hash { - fn from(hash: Hash) -> Self { - Self::new(hash.0) +impl FromStr for Hash { + type Err = self::Error; + + fn from_str(str: &str) -> Result { + Self::try_from(str) } } @@ -132,15 +129,27 @@ impl Hash { Self(*digest.as_ref()) } + fn zero() -> Self { + Self([0u8; HASH_LENGTH]) + } + /// Check if the hash is all zeros pub fn is_zero(&self) -> bool { self == &Self::zero() } } -impl From for TmHash { +#[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] +impl From for crate::tendermint::abci::transaction::Hash { + fn from(hash: Hash) -> Self { + Self::new(hash.0) + } +} + +#[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] +impl From for crate::tendermint::Hash { fn from(hash: Hash) -> Self { - TmHash::Sha256(hash.0) + Self::Sha256(hash.0) } } @@ -163,3 +172,32 @@ mod tests { } } } + +impl Value for Hash { + fn as_slice(&self) -> &[u8] { + self.0.as_slice() + } + + fn zero() -> Self { + Hash([0u8; HASH_LENGTH]) + } +} + +impl From for H256 { + fn from(hash: Hash) -> Self { + hash.0.into() + } +} + +impl From for Hash { + fn from(hash: H256) -> Self { + Self(hash.into()) + } +} + +impl From<&H256> for Hash { + fn from(hash: &H256) -> Self { + let hash = hash.to_owned(); + Self(hash.into()) + } +} diff --git a/core/src/types/ibc.rs b/core/src/types/ibc.rs new file mode 100644 index 0000000000..3d537cb025 --- /dev/null +++ b/core/src/types/ibc.rs @@ -0,0 +1,74 @@ +//! IBC event without IBC-related data types + +use std::collections::HashMap; + +use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; + +/// Wrapped IbcEvent +#[derive( + Debug, Clone, BorshSerialize, BorshDeserialize, BorshSchema, PartialEq, Eq, +)] +pub struct IbcEvent { + /// The IBC event type + pub event_type: String, + /// The attributes of the IBC event + pub attributes: HashMap, +} + +impl std::fmt::Display for IbcEvent { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let attributes = self + .attributes + .iter() + .map(|(k, v)| format!("{}: {};", k, v)) + .collect::>() + .join(", "); + write!( + f, + "Event type: {}, Attributes: {}", + self.event_type, attributes + ) + } +} + +#[cfg(any(feature = "abciplus", feature = "abcipp"))] +mod ibc_rs_conversion { + use std::collections::HashMap; + + use thiserror::Error; + + use super::IbcEvent; + use crate::ibc::events::{Error as IbcEventError, IbcEvent as RawIbcEvent}; + use crate::tendermint::abci::Event as AbciEvent; + + #[allow(missing_docs)] + #[derive(Error, Debug)] + pub enum Error { + #[error("IBC event error: {0}")] + IbcEvent(IbcEventError), + } + + /// Conversion functions result + pub type Result = std::result::Result; + + impl TryFrom for IbcEvent { + type Error = Error; + + fn try_from(e: RawIbcEvent) -> Result { + let event_type = e.event_type().as_str().to_string(); + let abci_event = AbciEvent::try_from(e).map_err(Error::IbcEvent)?; + let attributes: HashMap<_, _> = abci_event + .attributes + .iter() + .map(|tag| (tag.key.to_string(), tag.value.to_string())) + .collect(); + Ok(Self { + event_type, + attributes, + }) + } + } +} + +#[cfg(any(feature = "abciplus", feature = "abcipp"))] +pub use ibc_rs_conversion::*; diff --git a/core/src/types/internal.rs b/core/src/types/internal.rs new file mode 100644 index 0000000000..848c09bec1 --- /dev/null +++ b/core/src/types/internal.rs @@ -0,0 +1,82 @@ +//! Shared internal types between the host env and guest (wasm). + +use borsh::{BorshDeserialize, BorshSerialize}; + +/// A result of a wasm call to host functions that may fail. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum HostEnvResult { + /// A success + Success = 1, + /// A non-fatal failure does **not** interrupt WASM execution + Fail = -1, +} + +/// Key-value pair represents data from account's subspace. +/// It is used for prefix iterator's WASM host_env functions. +#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)] +pub struct KeyVal { + /// The storage key + pub key: String, + /// The value as arbitrary bytes + pub val: Vec, +} + +impl HostEnvResult { + /// Convert result to `i64`, which can be passed to wasm + pub fn to_i64(self) -> i64 { + self as _ + } + + /// Check if the given result as `i64` is a success + pub fn is_success(int: i64) -> bool { + int == Self::Success.to_i64() + } + + /// Check if the given result as `i64` is a non-fatal failure + pub fn is_fail(int: i64) -> bool { + int == Self::Fail.to_i64() + } +} + +impl From for HostEnvResult { + fn from(success: bool) -> Self { + if success { Self::Success } else { Self::Fail } + } +} + +#[cfg(feature = "ferveo-tpke")] +mod tx_queue { + use borsh::{BorshDeserialize, BorshSerialize}; + + use crate::types::transaction::WrapperTx; + + #[derive(Default, Debug, Clone, BorshDeserialize, BorshSerialize)] + /// Wrapper txs to be decrypted in the next block proposal + pub struct TxQueue(std::collections::VecDeque); + + impl TxQueue { + /// Add a new wrapper at the back of the queue + pub fn push(&mut self, wrapper: WrapperTx) { + self.0.push_back(wrapper); + } + + /// Remove the wrapper at the head of the queue + pub fn pop(&mut self) -> Option { + self.0.pop_front() + } + + /// Get an iterator over the queue + pub fn iter(&self) -> impl std::iter::Iterator { + self.0.iter() + } + + /// Check if there are any txs in the queue + #[allow(dead_code)] + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + } +} + +#[cfg(feature = "ferveo-tpke")] +pub use tx_queue::TxQueue; diff --git a/shared/src/types/key/common.rs b/core/src/types/key/common.rs similarity index 96% rename from shared/src/types/key/common.rs rename to core/src/types/key/common.rs index 8144acf466..633367053c 100644 --- a/shared/src/types/key/common.rs +++ b/core/src/types/key/common.rs @@ -6,16 +6,15 @@ use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use data_encoding::HEXLOWER; -use namada_proof_of_stake::types::PublicKeyTmRawHash; #[cfg(feature = "rand")] use rand::{CryptoRng, RngCore}; use serde::{Deserialize, Serialize}; use thiserror::Error; use super::{ - ed25519, secp256k1, tm_consensus_key_raw_hash, ParsePublicKeyError, - ParseSecretKeyError, ParseSignatureError, RefTo, SchemeType, - SigScheme as SigSchemeTrait, VerifySigError, + ed25519, secp256k1, ParsePublicKeyError, ParseSecretKeyError, + ParseSignatureError, RefTo, SchemeType, SigScheme as SigSchemeTrait, + VerifySigError, }; use crate::types::ethereum_events::EthAddress; @@ -345,9 +344,3 @@ impl super::SigScheme for SigScheme { } } } - -impl PublicKeyTmRawHash for PublicKey { - fn tm_raw_hash(&self) -> String { - tm_consensus_key_raw_hash(self) - } -} diff --git a/shared/src/types/key/dkg_session_keys.rs b/core/src/types/key/dkg_session_keys.rs similarity index 100% rename from shared/src/types/key/dkg_session_keys.rs rename to core/src/types/key/dkg_session_keys.rs diff --git a/shared/src/types/key/ed25519.rs b/core/src/types/key/ed25519.rs similarity index 100% rename from shared/src/types/key/ed25519.rs rename to core/src/types/key/ed25519.rs diff --git a/core/src/types/key/mod.rs b/core/src/types/key/mod.rs new file mode 100644 index 0000000000..3a500addb0 --- /dev/null +++ b/core/src/types/key/mod.rs @@ -0,0 +1,537 @@ +//! Cryptographic keys + +pub mod common; +/// Elliptic curve keys for the DKG +pub mod dkg_session_keys; +pub mod ed25519; +pub mod secp256k1; + +use std::fmt::{Debug, Display}; +use std::hash::Hash; +use std::str::FromStr; + +use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use data_encoding::HEXUPPER; +#[cfg(feature = "rand")] +use rand::{CryptoRng, RngCore}; +use serde::{Deserialize, Serialize}; +use sha2::{Digest, Sha256}; +use thiserror::Error; + +use super::address::Address; +use super::storage::{self, DbKeySeg, Key, KeySeg}; +use crate::types::address; + +const PK_STORAGE_KEY: &str = "public_key"; +const PROTOCOL_PK_STORAGE_KEY: &str = "protocol_public_key"; + +/// Obtain a storage key for user's public key. +pub fn pk_key(owner: &Address) -> storage::Key { + Key::from(owner.to_db_key()) + .push(&PK_STORAGE_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Check if the given storage key is a public key. If it is, returns the owner. +pub fn is_pk_key(key: &Key) -> Option<&Address> { + match &key.segments[..] { + [DbKeySeg::AddressSeg(owner), DbKeySeg::StringSeg(key)] + if key == PK_STORAGE_KEY => + { + Some(owner) + } + _ => None, + } +} + +/// Obtain a storage key for user's protocol public key. +pub fn protocol_pk_key(owner: &Address) -> storage::Key { + Key::from(owner.to_db_key()) + .push(&PROTOCOL_PK_STORAGE_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Check if the given storage key is a public key. If it is, returns the owner. +pub fn is_protocol_pk_key(key: &Key) -> Option<&Address> { + match &key.segments[..] { + [DbKeySeg::AddressSeg(owner), DbKeySeg::StringSeg(key)] + if key == PROTOCOL_PK_STORAGE_KEY => + { + Some(owner) + } + _ => None, + } +} + +/// Represents an error in signature verification +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum VerifySigError { + #[error("Signature verification failed: {0}")] + SigVerifyError(String), + #[error("Signature verification failed to encode the data: {0}")] + DataEncodingError(std::io::Error), + #[error("Transaction doesn't have any data with a signature.")] + MissingData, + #[error("Signature belongs to a different scheme from the public key.")] + MismatchedScheme, +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum ParsePublicKeyError { + #[error("Invalid public key hex: {0}")] + InvalidHex(data_encoding::DecodeError), + #[error("Invalid public key encoding: {0}")] + InvalidEncoding(std::io::Error), + #[error("Parsed public key does not belong to desired scheme")] + MismatchedScheme, +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum ParseSignatureError { + #[error("Invalid signature hex: {0}")] + InvalidHex(data_encoding::DecodeError), + #[error("Invalid signature encoding: {0}")] + InvalidEncoding(std::io::Error), + #[error("Parsed signature does not belong to desired scheme")] + MismatchedScheme, +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum ParseSecretKeyError { + #[error("Invalid secret key hex: {0}")] + InvalidHex(data_encoding::DecodeError), + #[error("Invalid secret key encoding: {0}")] + InvalidEncoding(std::io::Error), + #[error("Parsed secret key does not belong to desired scheme")] + MismatchedScheme, +} + +/// A value-to-value conversion that consumes the input value. + +pub trait RefTo { + /// Performs the conversion. + fn ref_to(&self) -> T; +} + +/// Simple and safe type conversions that may fail in a controlled +/// way under some circumstances. + +pub trait TryFromRef: Sized { + /// The type returned in the event of a conversion error. + type Error; + /// Performs the conversion. + fn try_from_ref(value: &T) -> Result; +} + +/// Type capturing signature scheme IDs +#[derive(PartialEq, Eq, Copy, Clone, Debug)] +pub enum SchemeType { + /// Type identifier for Ed25519 scheme + Ed25519, + /// Type identifier for Secp256k1 scheme + Secp256k1, + /// Type identifier for Common + Common, +} + +impl FromStr for SchemeType { + type Err = (); + + fn from_str(input: &str) -> Result { + match input.to_lowercase().as_str() { + "ed25519" => Ok(Self::Ed25519), + "secp256k1" => Ok(Self::Secp256k1), + "common" => Ok(Self::Common), + _ => Err(()), + } + } +} + +/// Represents a signature + +pub trait Signature: + Hash + PartialOrd + Serialize + BorshSerialize + BorshDeserialize + BorshSchema +{ + /// The scheme type of this implementation + const TYPE: SchemeType; + /// Convert from one Signature type to another + fn try_from_sig( + sig: &SIG, + ) -> Result { + if SIG::TYPE == Self::TYPE { + let sig_arr = sig.try_to_vec().unwrap(); + let res = Self::try_from_slice(sig_arr.as_ref()); + res.map_err(ParseSignatureError::InvalidEncoding) + } else { + Err(ParseSignatureError::MismatchedScheme) + } + } + /// Convert from self to another SecretKey type + fn try_to_sig(&self) -> Result { + SIG::try_from_sig(self) + } +} + +/// Represents a public key + +pub trait PublicKey: + BorshSerialize + + BorshDeserialize + + BorshSchema + + Ord + + Clone + + Display + + Debug + + PartialOrd + + FromStr + + Hash + + Send + + Sync +{ + /// The scheme type of this implementation + const TYPE: SchemeType; + /// Convert from one PublicKey type to another + fn try_from_pk( + pk: &PK, + ) -> Result { + if Self::TYPE == PK::TYPE { + let pk_arr = pk.try_to_vec().unwrap(); + let res = Self::try_from_slice(pk_arr.as_ref()); + res.map_err(ParsePublicKeyError::InvalidEncoding) + } else { + Err(ParsePublicKeyError::MismatchedScheme) + } + } + /// Convert from self to another PublicKey type + fn try_to_pk(&self) -> Result { + PK::try_from_pk(self) + } +} + +/// Represents a secret key + +pub trait SecretKey: + BorshSerialize + + BorshDeserialize + + BorshSchema + + Display + + Debug + + RefTo + + FromStr + + Clone + + Sync + + Send +{ + /// The scheme type of this implementation + const TYPE: SchemeType; + /// Represents the public part of this keypair + type PublicKey: PublicKey; + /// Convert from one SecretKey type to self + fn try_from_sk( + sk: &SK, + ) -> Result { + if SK::TYPE == Self::TYPE { + let sk_vec = sk.try_to_vec().unwrap(); + let res = Self::try_from_slice(sk_vec.as_ref()); + res.map_err(ParseSecretKeyError::InvalidEncoding) + } else { + Err(ParseSecretKeyError::MismatchedScheme) + } + } + /// Convert from self to another SecretKey type + fn try_to_sk(&self) -> Result { + SK::try_from_sk(self) + } +} + +/// Represents a digital signature scheme. More precisely this trait captures +/// the concepts of public keys, private keys, and signatures as well as +/// the algorithms over these concepts to generate keys, sign messages, and +/// verify signatures. + +pub trait SigScheme: Eq + Ord + Debug + Serialize + Default { + /// Represents the signature for this scheme + type Signature: 'static + Signature; + /// Represents the public key for this scheme + type PublicKey: 'static + PublicKey; + /// Represents the secret key for this scheme + type SecretKey: 'static + SecretKey; + /// The scheme type of this implementation + const TYPE: SchemeType; + /// Generate a keypair. + #[cfg(feature = "rand")] + fn generate(csprng: &mut R) -> Self::SecretKey + where + R: CryptoRng + RngCore; + /// Sign the data with a key. + fn sign( + keypair: &Self::SecretKey, + data: impl AsRef<[u8]>, + ) -> Self::Signature; + /// Check that the public key matches the signature on the given data. + fn verify_signature( + pk: &Self::PublicKey, + data: &T, + sig: &Self::Signature, + ) -> Result<(), VerifySigError>; + /// Check that the public key matches the signature on the given raw data. + fn verify_signature_raw( + pk: &Self::PublicKey, + data: &[u8], + sig: &Self::Signature, + ) -> Result<(), VerifySigError>; +} + +/// Public key hash derived from `common::Key` borsh encoded bytes (hex string +/// of the first 40 chars of sha256 hash) +#[derive( + Debug, + Clone, + BorshSerialize, + BorshDeserialize, + BorshSchema, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Serialize, + Deserialize, +)] +#[serde(transparent)] +pub struct PublicKeyHash(pub(crate) String); + +const PKH_HASH_LEN: usize = address::HASH_LEN; + +impl From for String { + fn from(pkh: PublicKeyHash) -> Self { + pkh.0 + } +} + +impl Display for PublicKeyHash { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl FromStr for PublicKeyHash { + type Err = PkhFromStringError; + + fn from_str(s: &str) -> Result { + if s.len() != PKH_HASH_LEN { + return Err(Self::Err::UnexpectedLen(s.len())); + } + Ok(Self(s.to_owned())) + } +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum PkhFromStringError { + #[error("Wrong PKH len. Expected {PKH_HASH_LEN}, got {0}")] + UnexpectedLen(usize), +} + +impl From<&PK> for PublicKeyHash { + fn from(pk: &PK) -> Self { + let pk_bytes = + pk.try_to_vec().expect("Public key encoding shouldn't fail"); + let mut hasher = Sha256::new(); + hasher.update(pk_bytes); + // hex of the first 40 chars of the hash + PublicKeyHash(format!( + "{:.width$X}", + hasher.finalize(), + width = PKH_HASH_LEN + )) + } +} + +/// Derive Tendermint raw hash from the public key +pub trait PublicKeyTmRawHash { + /// Derive Tendermint raw hash from the public key + fn tm_raw_hash(&self) -> String; +} + +impl PublicKeyTmRawHash for common::PublicKey { + fn tm_raw_hash(&self) -> String { + tm_consensus_key_raw_hash(self) + } +} + +/// Convert validator's consensus key into address raw hash that is compatible +/// with Tendermint +pub fn tm_consensus_key_raw_hash(pk: &common::PublicKey) -> String { + match pk { + common::PublicKey::Ed25519(pk) => { + let pkh = PublicKeyHash::from(pk); + pkh.0 + } + common::PublicKey::Secp256k1(pk) => { + let pkh = PublicKeyHash::from(pk); + pkh.0 + } + } +} + +/// Convert Tendermint validator's raw hash bytes to Namada raw hash string +pub fn tm_raw_hash_to_string(raw_hash: impl AsRef<[u8]>) -> String { + HEXUPPER.encode(raw_hash.as_ref()) +} + +/// Helpers for testing with keys. +#[cfg(any(test, feature = "testing"))] +pub mod testing { + use borsh::BorshDeserialize; + use proptest::prelude::*; + use rand::prelude::{StdRng, ThreadRng}; + use rand::{thread_rng, SeedableRng}; + + use super::SigScheme; + use crate::types::key::*; + + /// A keypair for tests + pub fn keypair_1() -> ::SecretKey { + // generated from `cargo test gen_keypair -- --nocapture` + let bytes = [ + 33, 82, 91, 186, 100, 168, 220, 158, 185, 140, 63, 172, 3, 88, 52, + 113, 94, 30, 213, 84, 175, 184, 235, 169, 70, 175, 36, 252, 45, + 190, 138, 79, + ]; + ed25519::SecretKey::try_from_slice(bytes.as_ref()) + .unwrap() + .try_to_sk() + .unwrap() + } + + /// A keypair for tests + pub fn keypair_2() -> ::SecretKey { + // generated from `cargo test gen_keypair -- --nocapture` + let bytes = [ + 27, 238, 157, 32, 131, 242, 184, 142, 146, 189, 24, 249, 68, 165, + 205, 71, 213, 158, 25, 253, 52, 217, 87, 52, 171, 225, 110, 131, + 238, 58, 94, 56, + ]; + ed25519::SecretKey::try_from_slice(bytes.as_ref()) + .unwrap() + .try_to_sk() + .unwrap() + } + + /// Generate an arbitrary [`super::SecretKey`]. + pub fn arb_keypair() -> impl Strategy { + any::<[u8; 32]>().prop_map(move |seed| { + let mut rng = StdRng::from_seed(seed); + S::generate(&mut rng) + }) + } + + /// Generate an arbitrary [`common::SecretKey`]. + pub fn arb_common_keypair() -> impl Strategy { + arb_keypair::() + .prop_map(|keypair| keypair.try_to_sk().unwrap()) + } + + /// Generate a new random [`super::SecretKey`]. + pub fn gen_keypair() -> S::SecretKey { + let mut rng: ThreadRng = thread_rng(); + S::generate(&mut rng) + } +} + +#[cfg(test)] +macro_rules! sigscheme_test { + ($name:ident, $type:ty) => { + pub mod $name { + use super::*; + + /// Run `cargo test gen_keypair -- --nocapture` to generate a + /// keypair. + #[test] + fn gen_keypair0() { + use rand::prelude::ThreadRng; + use rand::thread_rng; + + let mut rng: ThreadRng = thread_rng(); + let keypair = <$type>::generate(&mut rng); + println!( + "keypair {:?}", + keypair.try_to_vec().unwrap().as_slice() + ); + } + /// Run `cargo test gen_keypair -- --nocapture` to generate a + /// new keypair. + #[test] + fn gen_keypair1() { + let secret_key = testing::gen_keypair::<$type>(); + let public_key = secret_key.ref_to(); + println!("Public key: {}", public_key); + println!("Secret key: {}", secret_key); + } + + /// Sign a simple message and verify the signature. + #[test] + fn gen_sign_verify() { + use rand::prelude::ThreadRng; + use rand::thread_rng; + + let mut rng: ThreadRng = thread_rng(); + let sk = <$type>::generate(&mut rng); + let sig = <$type>::sign(&sk, b"hello"); + assert!( + <$type>::verify_signature_raw(&sk.ref_to(), b"hello", &sig) + .is_ok() + ); + } + } + }; +} + +#[cfg(test)] +sigscheme_test! {ed25519_test, ed25519::SigScheme} +#[cfg(test)] +sigscheme_test! {secp256k1_test, secp256k1::SigScheme} + +#[cfg(test)] +mod more_tests { + use super::*; + + #[test] + fn zeroize_keypair_ed25519() { + use rand::thread_rng; + + let sk = ed25519::SigScheme::generate(&mut thread_rng()); + let sk_bytes = sk.0.as_bytes(); + let len = sk_bytes.len(); + let ptr = sk_bytes.as_ptr(); + + drop(sk); + + assert_eq!(&[0u8; 32], unsafe { + core::slice::from_raw_parts(ptr, len) + }); + } + + #[test] + fn zeroize_keypair_secp256k1() { + use rand::thread_rng; + + let mut sk = secp256k1::SigScheme::generate(&mut thread_rng()); + let sk_scalar = sk.0.to_scalar_ref(); + let len = sk_scalar.0.len(); + let ptr = sk_scalar.0.as_ref().as_ptr(); + + let original_data = sk_scalar.0; + + drop(sk); + + assert_ne!(&original_data, unsafe { + core::slice::from_raw_parts(ptr, len) + }); + } +} diff --git a/shared/src/types/key/secp256k1.rs b/core/src/types/key/secp256k1.rs similarity index 100% rename from shared/src/types/key/secp256k1.rs rename to core/src/types/key/secp256k1.rs diff --git a/shared/src/types/masp.rs b/core/src/types/masp.rs similarity index 100% rename from shared/src/types/masp.rs rename to core/src/types/masp.rs diff --git a/core/src/types/mod.rs b/core/src/types/mod.rs new file mode 100644 index 0000000000..0550060498 --- /dev/null +++ b/core/src/types/mod.rs @@ -0,0 +1,15 @@ +//! Types definitions. + +pub mod address; +pub mod chain; +pub mod governance; +pub mod hash; +pub mod ibc; +pub mod internal; +pub mod key; +pub mod masp; +pub mod storage; +pub mod time; +pub mod token; +pub mod transaction; +pub mod validity_predicate; diff --git a/shared/src/types/named_address.rs b/core/src/types/named_address.rs similarity index 100% rename from shared/src/types/named_address.rs rename to core/src/types/named_address.rs diff --git a/shared/src/types/storage.rs b/core/src/types/storage.rs similarity index 92% rename from shared/src/types/storage.rs rename to core/src/types/storage.rs index 78cdf912a0..8dac327075 100644 --- a/shared/src/types/storage.rs +++ b/core/src/types/storage.rs @@ -1,29 +1,30 @@ //! Storage types use std::convert::{TryFrom, TryInto}; use std::fmt::Display; +use std::io::Write; use std::num::ParseIntError; use std::ops::{Add, Deref, Div, Mul, Rem, Sub}; use std::str::FromStr; -use arse_merkle_tree::InternalKey; +use arse_merkle_tree::traits::Value; +use arse_merkle_tree::{InternalKey, Key as TreeKey}; use bit_vec::BitVec; -use borsh::maybestd::io::Write; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use data_encoding::BASE32HEX_NOPAD; use ics23::CommitmentProof; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use thiserror::Error; -#[cfg(feature = "ferveo-tpke")] -use super::transaction::WrapperTx; use crate::bytes::ByteBuf; use crate::ledger::eth_bridge::storage::bridge_pool::BridgePoolProof; -use crate::ledger::storage::IBC_KEY_LIMIT; use crate::types::address::{self, Address}; use crate::types::hash::Hash; use crate::types::keccak::{KeccakHash, TryFromError}; use crate::types::time::DateTimeUtc; +/// The maximum size of an IBC key (in bytes) allowed in merkle-ized storage +pub const IBC_KEY_LIMIT: usize = 120; + #[allow(missing_docs)] #[derive(Error, Debug)] pub enum Error { @@ -284,7 +285,7 @@ impl core::fmt::Debug for BlockHash { } /// The data from Tendermint header -/// relevant for Anoma storage +/// relevant for Namada storage #[derive(Clone, Debug, BorshSerialize, BorshDeserialize)] pub struct Header { /// Merkle root hash of block @@ -359,6 +360,42 @@ pub struct StringKey { pub length: usize, } +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum TreeKeyError { + #[error("Invalid key for merkle tree: {0}")] + InvalidMerkleKey(String), +} + +impl TreeKey for StringKey { + type Error = TreeKeyError; + + fn as_slice(&self) -> &[u8] { + &self.original.as_slice()[..self.length] + } + + fn try_from_bytes(bytes: &[u8]) -> std::result::Result { + let mut tree_key = [0u8; IBC_KEY_LIMIT]; + let mut original = [0u8; IBC_KEY_LIMIT]; + let mut length = 0; + for (i, byte) in bytes.iter().enumerate() { + if i >= IBC_KEY_LIMIT { + return Err(TreeKeyError::InvalidMerkleKey( + "Input IBC key is too large".into(), + )); + } + original[i] = *byte; + tree_key[i] = byte.wrapping_add(1); + length += 1; + } + Ok(Self { + original, + tree_key: tree_key.into(), + length, + }) + } +} + impl Deref for StringKey { type Target = InternalKey; @@ -440,6 +477,16 @@ impl From for MembershipProof { } } +impl Value for TreeBytes { + fn as_slice(&self) -> &[u8] { + self.0.as_slice() + } + + fn zero() -> Self { + TreeBytes::zero() + } +} + impl From for MembershipProof { fn from(proof: BridgePoolProof) -> Self { Self::BridgePool(proof) @@ -924,6 +971,53 @@ impl Epoch { pub fn prev(&self) -> Self { Self(self.0 - 1) } + + /// Iterate a range of consecutive epochs starting from `self` of a given + /// length. Work-around for `Step` implementation pending on stabilization of . + pub fn iter_range(self, len: u64) -> impl Iterator + Clone { + let start_ix: u64 = self.into(); + let end_ix: u64 = start_ix + len; + (start_ix..end_ix).map(Epoch::from) + } + + /// Checked epoch subtraction. Computes self - rhs, returning None if + /// overflow occurred. + #[must_use = "this returns the result of the operation, without modifying \ + the original"] + pub fn checked_sub(self, rhs: Epoch) -> Option { + if rhs.0 > self.0 { + None + } else { + Some(Self(self.0 - rhs.0)) + } + } + + /// Checked epoch subtraction. Computes self - rhs, returning default + /// `Epoch(0)` if overflow occurred. + #[must_use = "this returns the result of the operation, without modifying \ + the original"] + pub fn sub_or_default(self, rhs: Epoch) -> Self { + self.checked_sub(rhs).unwrap_or_default() + } +} + +impl From for Epoch { + fn from(epoch: u64) -> Self { + Epoch(epoch) + } +} + +impl From for u64 { + fn from(epoch: Epoch) -> Self { + epoch.0 + } +} + +// TODO remove this once it's not being used +impl From for usize { + fn from(epoch: Epoch) -> Self { + epoch.0 as usize + } } impl Add for Epoch { @@ -934,6 +1028,15 @@ impl Add for Epoch { } } +// TODO remove this once it's not being used +impl Add for Epoch { + type Output = Self; + + fn add(self, rhs: usize) -> Self::Output { + Epoch(self.0 + rhs as u64) + } +} + impl Sub for Epoch { type Output = Epoch; @@ -942,6 +1045,14 @@ impl Sub for Epoch { } } +impl Sub for Epoch { + type Output = Self; + + fn sub(self, rhs: Epoch) -> Self::Output { + Epoch(self.0 - rhs.0) + } +} + impl Mul for Epoch { type Output = Epoch; @@ -966,14 +1077,6 @@ impl Rem for Epoch { } } -impl Sub for Epoch { - type Output = Epoch; - - fn sub(self, rhs: Self) -> Self::Output { - Self(self.0 - rhs.0) - } -} - impl Add for Epoch { type Output = Epoch; @@ -990,18 +1093,6 @@ impl Mul for Epoch { } } -impl From for u64 { - fn from(epoch: Epoch) -> Self { - epoch.0 - } -} - -impl From for Epoch { - fn from(value: u64) -> Self { - Self(value) - } -} - /// Predecessor block epochs #[derive( Clone, @@ -1089,35 +1180,6 @@ impl Epochs { } } -#[cfg(feature = "ferveo-tpke")] -#[derive(Default, Debug, Clone, BorshDeserialize, BorshSerialize)] -/// Wrapper txs to be decrypted in the next block proposal -pub struct TxQueue(std::collections::VecDeque); - -#[cfg(feature = "ferveo-tpke")] -impl TxQueue { - /// Add a new wrapper at the back of the queue - pub fn push(&mut self, wrapper: WrapperTx) { - self.0.push_back(wrapper); - } - - /// Remove the wrapper at the head of the queue - pub fn pop(&mut self) -> Option { - self.0.pop_front() - } - - /// Get an iterator over the queue - pub fn iter(&self) -> impl std::iter::Iterator { - self.0.iter() - } - - /// Check if there are any txs in the queue - #[allow(dead_code)] - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } -} - /// A value of a storage prefix iterator. #[derive(Debug, Clone, BorshSerialize, BorshDeserialize, BorshSchema)] pub struct PrefixValue { diff --git a/shared/src/types/time.rs b/core/src/types/time.rs similarity index 89% rename from shared/src/types/time.rs rename to core/src/types/time.rs index dfca614c82..a508501d94 100644 --- a/shared/src/types/time.rs +++ b/core/src/types/time.rs @@ -7,10 +7,6 @@ use std::ops::{Add, Sub}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; pub use chrono::{DateTime, Duration, TimeZone, Utc}; -use crate::tendermint::time::Time; -use crate::tendermint::Error as TendermintError; -use crate::tendermint_proto::google::protobuf; - /// Check if the given `duration` has passed since the given `start. pub fn duration_passed( current: DateTimeUtc, @@ -198,10 +194,15 @@ impl From for prost_types::Timestamp { } } -impl TryFrom for DateTimeUtc { +#[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] +impl TryFrom + for DateTimeUtc +{ type Error = prost_types::TimestampOutOfSystemRangeError; - fn try_from(timestamp: protobuf::Timestamp) -> Result { + fn try_from( + timestamp: crate::tendermint_proto::google::protobuf::Timestamp, + ) -> Result { Self::try_from(prost_types::Timestamp { seconds: timestamp.seconds, nanos: timestamp.nanos, @@ -230,18 +231,20 @@ impl From for Rfc3339String { } } -impl TryFrom for Time { - type Error = TendermintError; +#[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] +impl TryFrom for crate::tendermint::time::Time { + type Error = crate::tendermint::Error; fn try_from(dt: DateTimeUtc) -> Result { Self::parse_from_rfc3339(&DateTime::to_rfc3339(&dt.0)) } } -impl TryFrom