diff --git a/.github/workflows/docker-antithesis.yml b/.github/workflows/docker-antithesis.yml deleted file mode 100644 index a96431fafbd..00000000000 --- a/.github/workflows/docker-antithesis.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: docker antithesis - -on: - push: - branches: - - unstable - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -env: - ANTITHESIS_PASSWORD: ${{ secrets.ANTITHESIS_PASSWORD }} - ANTITHESIS_USERNAME: ${{ secrets.ANTITHESIS_USERNAME }} - ANTITHESIS_SERVER: ${{ secrets.ANTITHESIS_SERVER }} - REPOSITORY: ${{ secrets.ANTITHESIS_REPOSITORY }} - IMAGE_NAME: lighthouse - TAG: libvoidstar - -jobs: - build-docker: - runs-on: ubuntu-22.04 - steps: - - uses: actions/checkout@v3 - - name: Update Rust - run: rustup update stable - - name: Dockerhub login - run: | - echo "${ANTITHESIS_PASSWORD}" | docker login --username ${ANTITHESIS_USERNAME} https://${ANTITHESIS_SERVER} --password-stdin - - name: Build AMD64 dockerfile (with push) - run: | - docker build \ - --tag ${ANTITHESIS_SERVER}/${REPOSITORY}/${IMAGE_NAME}:${TAG} \ - --file ./testing/antithesis/Dockerfile.libvoidstar . - docker push ${ANTITHESIS_SERVER}/${REPOSITORY}/${IMAGE_NAME}:${TAG} diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 91a0b734537..5819f804847 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -67,6 +67,8 @@ jobs: run: rustup update stable - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 + with: + version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - name: Run tests in release run: make test-release release-tests-windows: @@ -88,6 +90,8 @@ jobs: npm config set msvs_version 2019 - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 + with: + version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - name: Install make run: choco install -y make - uses: KyleMayes/install-llvm-action@v1 @@ -143,6 +147,8 @@ jobs: run: rustup update stable - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 + with: + version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - name: Run tests in debug run: make test-debug state-transition-vectors-ubuntu: @@ -189,6 +195,8 @@ jobs: run: rustup update stable - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 + with: + version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - name: Run the beacon chain sim that starts from an eth1 contract run: cargo run --release --bin simulator eth1-sim merge-transition-ubuntu: @@ -201,6 +209,8 @@ jobs: run: rustup update stable - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 + with: + version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - name: Run the beacon chain sim and go through the merge transition run: cargo run --release --bin simulator eth1-sim --post-merge no-eth1-simulator-ubuntu: @@ -223,6 +233,8 @@ jobs: run: rustup update stable - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 + with: + version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - name: Run the syncing simulator run: cargo run --release --bin simulator syncing-sim doppelganger-protection-test: diff --git a/Cargo.lock b/Cargo.lock index 233b3901ebf..9afe6fe183f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -33,6 +33,7 @@ dependencies = [ "serde", "serde_json", "slashing_protection", + "slog", "slot_clock", "tempfile", "tokio", @@ -97,12 +98,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" dependencies = [ "cfg-if", - "cipher", + "cipher 0.3.0", "cpufeatures", "ctr", "opaque-debug", ] +[[package]] +name = "aes" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" +dependencies = [ + "cfg-if", + "cipher 0.4.4", + "cpufeatures", +] + [[package]] name = "aes-gcm" version = "0.9.4" @@ -110,8 +122,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df5f85a83a7d8b0442b6aa7b504b8212c1733da07b98aae43d4bc21b2cb3cdf6" dependencies = [ "aead", - "aes", - "cipher", + "aes 0.7.5", + "cipher 0.3.0", "ctr", "ghash", "subtle", @@ -141,9 +153,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.0.2" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" +checksum = "6748e8def348ed4d14996fa801f4122cd763fff530258cdc03f64b25f89d3a5a" dependencies = [ "memchr", ] @@ -194,9 +206,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.72" +version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b13c32d80ecc7ab747b80c3784bce54ee8a7a0cc4fbda9bf4cda2cf6fe90854" +checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" [[package]] name = "arbitrary" @@ -234,6 +246,45 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +[[package]] +name = "asn1-rs" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0" +dependencies = [ + "asn1-rs-derive", + "asn1-rs-impl", + "displaydoc", + "nom", + "num-traits", + "rusticata-macros", + "thiserror", + "time", +] + +[[package]] +name = "asn1-rs-derive" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", + "synstructure", +] + +[[package]] +name = "asn1-rs-impl" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "asn1_der" version = "0.7.6" @@ -262,9 +313,9 @@ dependencies = [ [[package]] name = "async-lock" -version = "2.7.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa24f727524730b077666307f2734b4a1a1c57acb79193127dcc8914d5242dd7" +checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" dependencies = [ "event-listener", ] @@ -277,7 +328,7 @@ checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", ] [[package]] @@ -288,18 +339,18 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] name = "async-trait" -version = "0.1.72" +version = "0.1.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc6dde6e4ed435a4c1ee4e73592f5ba9da2151af10076cc04858746af9352d09" +checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -310,7 +361,7 @@ checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" dependencies = [ "futures", "pharos", - "rustc_version 0.4.0", + "rustc_version", ] [[package]] @@ -323,7 +374,7 @@ dependencies = [ "futures-sink", "futures-util", "memchr", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", ] [[package]] @@ -378,9 +429,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.6.19" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6a1de45611fdb535bfde7b7de4fd54f4fd2b17b1737c0a59b69bf9b92074b8c" +checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" dependencies = [ "async-trait", "axum-core", @@ -395,7 +446,7 @@ dependencies = [ "memchr", "mime", "percent-encoding", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "rustversion", "serde", "serde_json", @@ -555,7 +606,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "4.3.0" +version = "4.4.0" dependencies = [ "beacon_chain", "clap", @@ -649,9 +700,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.3.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" +checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" [[package]] name = "bitvec" @@ -732,31 +783,29 @@ dependencies = [ [[package]] name = "blst" -version = "0.3.10" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a30d0edd9dd1c60ddb42b80341c7852f6f985279a5c1a83659dcb65899dec99" +checksum = "c94087b935a822949d3291a9989ad2b2051ea141eda0fd4e478a75f6aa3e604b" dependencies = [ "cc", "glob", "threadpool", - "which", "zeroize", ] [[package]] name = "bollard-stubs" -version = "1.41.0" +version = "1.42.0-rc.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed2f2e73fffe9455141e170fb9c1feb0ac521ec7e7dcd47a7cab72a658490fb8" +checksum = "ed59b5c00048f48d7af971b71f800fdf23e858844a6f9e4d32ca72e9399e7864" dependencies = [ - "chrono", "serde", "serde_with", ] [[package]] name = "boot_node" -version = "4.3.0" +version = "4.4.0" dependencies = [ "beacon_node", "clap", @@ -795,16 +844,6 @@ dependencies = [ "tinyvec", ] -[[package]] -name = "buf_redux" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b953a6887648bb07a535631f2bc00fbdb2a2216f135552cb3f534ed136b9c07f" -dependencies = [ - "memchr", - "safemem", -] - [[package]] name = "builder_client" version = "0.1.0" @@ -906,7 +945,7 @@ checksum = "eee4243f1f26fc7a42710e7439c149e2b10b05472f88090acce52632f231a73a" dependencies = [ "camino", "cargo-platform", - "semver 1.0.18", + "semver", "serde", "serde_json", "thiserror", @@ -920,9 +959,13 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.79" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" +checksum = "305fe645edc1442a0fa8b6726ba61d422798d37a52e12eaecf4b022ebbb88f01" +dependencies = [ + "jobserver", + "libc", +] [[package]] name = "cexpr" @@ -930,7 +973,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" dependencies = [ - "nom 7.1.3", + "nom", ] [[package]] @@ -946,7 +989,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c80e5460aa66fe3b91d40bcbdab953a597b60053e34d684ac6903f863b680a6" dependencies = [ "cfg-if", - "cipher", + "cipher 0.3.0", "cpufeatures", "zeroize", ] @@ -959,7 +1002,7 @@ checksum = "a18446b09be63d457bbec447509e85f662f32952b035ce892290396bc0b0cff5" dependencies = [ "aead", "chacha20", - "cipher", + "cipher 0.3.0", "poly1305", "zeroize", ] @@ -972,11 +1015,7 @@ checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" dependencies = [ "android-tzdata", "iana-time-zone", - "js-sys", "num-traits", - "serde", - "time 0.1.45", - "wasm-bindgen", "winapi", ] @@ -989,6 +1028,16 @@ dependencies = [ "generic-array", ] +[[package]] +name = "cipher" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" +dependencies = [ + "crypto-common", + "inout", +] + [[package]] name = "clang-sys" version = "1.6.1" @@ -1054,7 +1103,6 @@ dependencies = [ "logging", "monitoring_api", "network", - "num_cpus", "operation_pool", "parking_lot 0.12.1", "sensitive_url", @@ -1068,7 +1116,7 @@ dependencies = [ "state_processing", "store", "task_executor", - "time 0.3.24", + "time", "timer", "tokio", "types", @@ -1109,9 +1157,15 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.9.4" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" + +[[package]] +name = "constant_time_eq" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "795bc6e66a8e340f075fcf6227e417a2dc976b92b91f3cdc778bb858778b6747" +checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] name = "convert_case" @@ -1328,7 +1382,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "049bb91fb4aaf0e3c7efa6cd5ef877dbbbd15b39dad06d9948de4ec8a75761ea" dependencies = [ - "cipher", + "cipher 0.3.0", ] [[package]] @@ -1338,7 +1392,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a011bbe2c35ce9c1f143b7af6f94f29a167beb4cd1d29e6740ce836f723120e" dependencies = [ "nix 0.26.2", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -1356,19 +1410,32 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.0.0-rc.1" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d4ba9852b42210c7538b75484f9daa0655e9a3ac04f693747bb0f02cf3cfe16" +checksum = "f711ade317dd348950a9910f81c5947e3d8907ebd2b83f76203ff1807e6a2bc2" dependencies = [ "cfg-if", + "cpufeatures", + "curve25519-dalek-derive", "digest 0.10.7", "fiat-crypto", - "packed_simd_2", "platforms 3.0.2", + "rustc_version", "subtle", "zeroize", ] +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.29", +] + [[package]] name = "darling" version = "0.13.4" @@ -1510,20 +1577,34 @@ dependencies = [ [[package]] name = "der" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c7ed52955ce76b1554f509074bb357d3fb8ac9b51288a65a3fd480d1dfba946" +checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" dependencies = [ "const-oid", "pem-rfc7468", "zeroize", ] +[[package]] +name = "der-parser" +version = "8.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e" +dependencies = [ + "asn1-rs", + "displaydoc", + "nom", + "num-bigint", + "num-traits", + "rusticata-macros", +] + [[package]] name = "deranged" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8810e7e2cf385b1e9b50d68264908ec367ba642c96d02edfe61c39e88e2a3c01" +checksum = "7684a49fb1af197853ef7b2ee694bc1f5b4179556f1e5710e1760c5db6f5e929" [[package]] name = "derivative" @@ -1544,7 +1625,7 @@ checksum = "53e0efad4403bfc52dc201159c4b842a246a14b98c64b55dfd0f2d89729dfeb8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -1556,7 +1637,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "rustc_version 0.4.0", + "rustc_version", "syn 1.0.109", ] @@ -1566,7 +1647,7 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7a532c1f99a0f596f6960a60d1e119e91582b24b39e2d83a190e61262c3ef0c" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.0", "byteorder", "diesel_derives", "itoa", @@ -1583,7 +1664,7 @@ dependencies = [ "diesel_table_macro_syntax", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -1603,7 +1684,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc5557efc453706fed5e4fa85006fe9817c224c3f480a34c7e5959fd700921c5" dependencies = [ - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -1683,7 +1764,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98c05fa26996c6141f78ac4fafbe297a7fa69690565ba4e0d1f2e60bde5ce501" dependencies = [ - "aes", + "aes 0.7.5", "aes-gcm", "arrayvec", "delay_map", @@ -1710,6 +1791,17 @@ dependencies = [ "zeroize", ] +[[package]] +name = "displaydoc" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.29", +] + [[package]] name = "dtoa" version = "1.0.9" @@ -1740,7 +1832,7 @@ version = "0.16.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4b1e0c257a9e9f25f90ff76d7a68360ed497ee519c8e428d1825ef0000799d4" dependencies = [ - "der 0.7.7", + "der 0.7.8", "digest 0.10.7", "elliptic-curve 0.13.5", "rfc6979 0.4.0", @@ -1750,18 +1842,9 @@ dependencies = [ [[package]] name = "ed25519" -version = "1.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7" -dependencies = [ - "signature 1.6.4", -] - -[[package]] -name = "ed25519" -version = "2.2.1" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fb04eee5d9d907f29e80ee6b0e78f7e2c82342c63e3580d8c4f69d9d5aad963" +checksum = "60f6d271ca33075c88028be6f04d502853d63a5ece419d269c15315d4fc1cf1d" dependencies = [ "pkcs8 0.10.2", "signature 2.1.0", @@ -1769,26 +1852,12 @@ dependencies = [ [[package]] name = "ed25519-dalek" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" -dependencies = [ - "curve25519-dalek 3.2.0", - "ed25519 1.5.3", - "rand 0.7.3", - "serde", - "sha2 0.9.9", - "zeroize", -] - -[[package]] -name = "ed25519-dalek" -version = "2.0.0-pre.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bd577ba9d4bcab443cac60003d8fd32c638e7024a3ec92c200d7af5d2c397ed" +checksum = "7277392b266383ef8396db7fdeb1e77b6c52fed775f5df15bb24f35b72156980" dependencies = [ - "curve25519-dalek 4.0.0-rc.1", - "ed25519 2.2.1", + "curve25519-dalek 4.0.0", + "ed25519", "rand_core 0.6.4", "serde", "sha2 0.10.7", @@ -1908,7 +1977,7 @@ checksum = "0be7b2ac146c1f99fe245c02d16af0696450d8e06c135db75e10eeb9e642c20d" dependencies = [ "base64 0.21.2", "bytes", - "ed25519-dalek 2.0.0-pre.0", + "ed25519-dalek", "hex", "k256 0.13.1", "log", @@ -1991,7 +2060,7 @@ checksum = "6b30f669a7961ef1631673d2766cc92f52d64f7ef354d4fe0ddfd30ed52f0f4f" dependencies = [ "errno-dragonfly", "libc", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -2077,7 +2146,7 @@ dependencies = [ "mediatype", "mime", "pretty_reqwest_error", - "procinfo", + "procfs", "proto_array", "psutil", "reqwest", @@ -2130,7 +2199,7 @@ dependencies = [ name = "eth2_keystore" version = "0.1.0" dependencies = [ - "aes", + "aes 0.7.5", "bls", "eth2_key_derivation", "hex", @@ -2155,9 +2224,16 @@ dependencies = [ "discv5", "eth2_config", "ethereum_ssz", + "logging", + "pretty_reqwest_error", + "reqwest", + "sensitive_url", "serde_yaml", + "sha2 0.10.7", + "slog", "tempfile", "types", + "url", "zip", ] @@ -2639,7 +2715,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38e2275cc4e4fc009b0669731a1e5ab7ebf11f469eaede2bab9309a5b4d6057f" dependencies = [ "memoffset 0.9.0", - "rustc_version 0.4.0", + "rustc_version", ] [[package]] @@ -2677,9 +2753,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.26" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" +checksum = "c6c98ee8095e9d1dcbf2fcc6d95acccb90d1c81db1e44725c6a984b1dbdfb010" dependencies = [ "crc32fast", "libz-sys", @@ -2813,7 +2889,7 @@ dependencies = [ "futures-io", "memchr", "parking", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "waker-fn", ] @@ -2825,18 +2901,17 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] name = "futures-rustls" -version = "0.22.2" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2411eed028cdf8c8034eaf21f9915f956b6c3abec4d4c7949ee67f0721127bd" +checksum = "35bd3cf68c183738046838e300353e4716c674dc5e56890de4826801a6622a28" dependencies = [ "futures-io", - "rustls 0.20.8", - "webpki 0.22.0", + "rustls 0.21.6", ] [[package]] @@ -2881,7 +2956,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "pin-utils", "slab", ] @@ -3260,7 +3335,7 @@ checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", ] [[package]] @@ -3340,9 +3415,9 @@ checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "humantime" @@ -3366,7 +3441,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "socket2 0.4.9", "tokio", "tower-service", @@ -3383,7 +3458,7 @@ dependencies = [ "futures-util", "http", "hyper", - "rustls 0.21.5", + "rustls 0.21.6", "tokio", "tokio-rustls 0.24.1", ] @@ -3596,6 +3671,15 @@ dependencies = [ "hashbrown 0.14.0", ] +[[package]] +name = "inout" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" +dependencies = [ + "generic-array", +] + [[package]] name = "instant" version = "0.1.12" @@ -3634,7 +3718,7 @@ checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ "hermit-abi 0.3.2", "libc", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -3645,7 +3729,7 @@ checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ "socket2 0.5.3", "widestring 1.0.2", - "windows-sys", + "windows-sys 0.48.0", "winreg 0.50.0", ] @@ -3701,6 +3785,15 @@ dependencies = [ "libc", ] +[[package]] +name = "jobserver" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" +dependencies = [ + "libc", +] + [[package]] name = "js-sys" version = "0.3.64" @@ -3776,7 +3869,7 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" dependencies = [ - "spin", + "spin 0.5.2", ] [[package]] @@ -3787,7 +3880,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "4.3.0" +version = "4.4.0" dependencies = [ "account_utils", "beacon_chain", @@ -3882,12 +3975,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "libm" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fc7aa29613bd6a620df431842069224d8bc9011086b1db4c0e0cd47fa03ec9a" - [[package]] name = "libm" version = "0.2.7" @@ -3911,9 +3998,9 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.52.1" +version = "0.52.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38039ba2df4f3255842050845daef4a004cc1f26da03dbc645535088b51910ef" +checksum = "32d07d1502a027366d55afe187621c2d7895dc111a3df13b35fed698049681d7" dependencies = [ "bytes", "futures", @@ -3931,6 +4018,7 @@ dependencies = [ "libp2p-metrics", "libp2p-noise", "libp2p-plaintext", + "libp2p-quic", "libp2p-swarm", "libp2p-tcp", "libp2p-websocket", @@ -4008,9 +4096,9 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" -version = "0.45.0" +version = "0.45.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e378da62e8c9251f6e885ed173a561663f29b251e745586cf6ae6150b295c37" +checksum = "2d157562dba6017193e5285acf6b1054759e83540bfd79f75b69d6ce774c88da" dependencies = [ "asynchronous-codec", "base64 0.21.2", @@ -4062,13 +4150,13 @@ dependencies = [ [[package]] name = "libp2p-identity" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a38d6012784fe4cc14e6d443eb415b11fc7c456dc15d9f0d90d9b70bc7ac3ec1" +checksum = "686e73aff5e23efbb99bc85340ea6fd8686986aa7b283a881ba182cfca535ca9" dependencies = [ "asn1_der", "bs58 0.5.0", - "ed25519-dalek 1.0.1", + "ed25519-dalek", "libsecp256k1", "log", "multihash 0.19.0", @@ -4105,9 +4193,9 @@ dependencies = [ [[package]] name = "libp2p-metrics" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3787ea81798dcc5bf1d8b40a8e8245cf894b168d04dd70aa48cb3ff2fff141d2" +checksum = "239ba7d28f8d0b5d77760dc6619c05c7e88e74ec8fbbe97f856f20a56745e620" dependencies = [ "instant", "libp2p-core", @@ -4119,14 +4207,33 @@ dependencies = [ "prometheus-client", ] +[[package]] +name = "libp2p-mplex" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93959ed08b6caf9810e067655e25f1362098797fef7c44d3103e63dcb6f0fabe" +dependencies = [ + "asynchronous-codec", + "bytes", + "futures", + "libp2p-core", + "libp2p-identity", + "log", + "nohash-hasher", + "parking_lot 0.12.1", + "rand 0.8.5", + "smallvec 1.11.0", + "unsigned-varint 0.7.1", +] + [[package]] name = "libp2p-noise" -version = "0.43.0" +version = "0.43.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87945db2b3f977af09b62b9aa0a5f3e4870995a577ecd845cdeba94cdf6bbca7" +checksum = "71ce70757f2c0d82e9a3ef738fb10ea0723d16cec37f078f719e2c247704c1bb" dependencies = [ "bytes", - "curve25519-dalek 3.2.0", + "curve25519-dalek 4.0.0", "futures", "libp2p-core", "libp2p-identity", @@ -4160,11 +4267,34 @@ dependencies = [ "unsigned-varint 0.7.1", ] +[[package]] +name = "libp2p-quic" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cb763e88f9a043546bfebd3575f340e7dd3d6c1b2cf2629600ec8965360c63a" +dependencies = [ + "bytes", + "futures", + "futures-timer", + "if-watch", + "libp2p-core", + "libp2p-identity", + "libp2p-tls", + "log", + "parking_lot 0.12.1", + "quinn", + "rand 0.8.5", + "rustls 0.21.6", + "socket2 0.5.3", + "thiserror", + "tokio", +] + [[package]] name = "libp2p-swarm" -version = "0.43.2" +version = "0.43.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43106820057e0f65c77b01a3873593f66e676da4e40c70c3a809b239109f1d30" +checksum = "28016944851bd73526d3c146aabf0fa9bbe27c558f080f9e5447da3a1772c01a" dependencies = [ "either", "fnv", @@ -4193,7 +4323,7 @@ dependencies = [ "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -4213,11 +4343,30 @@ dependencies = [ "tokio", ] +[[package]] +name = "libp2p-tls" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8218d1d5482b122ccae396bbf38abdcb283ecc96fa54760e1dfd251f0546ac61" +dependencies = [ + "futures", + "futures-rustls", + "libp2p-core", + "libp2p-identity", + "rcgen", + "ring", + "rustls 0.21.6", + "rustls-webpki", + "thiserror", + "x509-parser", + "yasna", +] + [[package]] name = "libp2p-websocket" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "956d981ebc84abc3377e5875483c06d94ff57bc6b25f725047f9fd52592f72d4" +checksum = "3facf0691bab65f571bc97c6c65ffa836248ca631d631b7691ac91deb7fceb5f" dependencies = [ "either", "futures", @@ -4230,14 +4379,14 @@ dependencies = [ "rw-stream-sink", "soketto", "url", - "webpki-roots 0.23.1", + "webpki-roots 0.25.2", ] [[package]] name = "libp2p-yamux" -version = "0.44.0" +version = "0.44.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0a9b42ab6de15c6f076d8fb11dc5f48d899a10b55a2e16b12be9012a05287b0" +checksum = "8eedcb62824c4300efb9cfd4e2a6edaf3ca097b9e68b36dabe45a44469fd6a85" dependencies = [ "futures", "libp2p-core", @@ -4318,7 +4467,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "4.3.0" +version = "4.4.0" dependencies = [ "account_manager", "account_utils", @@ -4384,6 +4533,7 @@ dependencies = [ "hex", "lazy_static", "libp2p", + "libp2p-mplex", "lighthouse_metrics", "lighthouse_version", "lru 0.7.8", @@ -4434,6 +4584,12 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" +[[package]] +name = "linux-raw-sys" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" + [[package]] name = "linux-raw-sys" version = "0.3.8" @@ -4487,9 +4643,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.19" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] name = "logging" @@ -4593,9 +4749,9 @@ checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" [[package]] name = "matchit" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67827e6ea8ee8a7c4a72227ef4fc08957040acffdb5f122733b24fa12daff41b" +checksum = "ed1202b2a6f884ae56f04cff409ab315c5ce26b5e58d7412e484f01fd52f52ef" [[package]] name = "maybe-uninit" @@ -4781,7 +4937,7 @@ checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" dependencies = [ "libc", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -4810,6 +4966,24 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7843ec2de400bcbc6a6328c958dc38e5359da6e93e72e37bc5246bf1ae776389" +[[package]] +name = "multer" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01acbdc23469fd8fe07ab135923371d5f5a422fbf9c522158677c8eb15bc51c2" +dependencies = [ + "bytes", + "encoding_rs", + "futures-util", + "http", + "httparse", + "log", + "memchr", + "mime", + "spin 0.9.8", + "version_check", +] + [[package]] name = "multiaddr" version = "0.14.0" @@ -4895,24 +5069,6 @@ dependencies = [ "synstructure", ] -[[package]] -name = "multipart" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00dec633863867f29cb39df64a397cdf4a6354708ddd7759f70c7fb51c5f9182" -dependencies = [ - "buf_redux", - "httparse", - "log", - "mime", - "mime_guess", - "quick-error", - "rand 0.8.5", - "safemem", - "tempfile", - "twoway", -] - [[package]] name = "multistream-select" version = "0.13.0" @@ -5038,7 +5194,6 @@ dependencies = [ "logging", "lru_cache", "matches", - "num_cpus", "operation_pool", "parking_lot 0.12.1", "rand 0.8.5", @@ -5123,12 +5278,6 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" -[[package]] -name = "nom" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf51a729ecf40266a2368ad335a5fdde43471f545a967109cd62146ecf8b66ff" - [[package]] name = "nom" version = "7.1.3" @@ -5178,7 +5327,7 @@ dependencies = [ "autocfg 0.1.8", "byteorder", "lazy_static", - "libm 0.2.7", + "libm", "num-integer", "num-iter", "num-traits", @@ -5246,6 +5395,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "oid-registry" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff" +dependencies = [ + "asn1-rs", +] + [[package]] name = "once_cell" version = "1.18.0" @@ -5298,9 +5456,9 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.55" +version = "0.10.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "345df152bc43501c5eb9e4654ff05f794effb78d4efe3d53abc158baddc0703d" +checksum = "729b745ad4a5575dd06a3e1af1414bd330ee561c01b3899eb584baeaa8def17e" dependencies = [ "bitflags 1.3.2", "cfg-if", @@ -5319,7 +5477,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -5330,18 +5488,18 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "111.26.0+1.1.1u" +version = "111.27.0+1.1.1v" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efc62c9f12b22b8f5208c23a7200a442b2e5999f8bdf80233852122b5a4f6f37" +checksum = "06e8f197c82d7511c5b014030c9b1efeda40d7d5f99d23b4ceed3524a5e63f02" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.90" +version = "0.9.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "374533b0e45f3a7ced10fcaeccca020e66656bc03dac384f852e4e5a7a8104a6" +checksum = "866b5f16f90776b9bb8dc1e1802ac6f0513de3a7a7465867bfbc563dc737faac" dependencies = [ "cc", "libc", @@ -5392,16 +5550,6 @@ dependencies = [ "sha2 0.10.7", ] -[[package]] -name = "packed_simd_2" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1914cd452d8fccd6f9db48147b29fd4ae05bea9dc5d9ad578509f72415de282" -dependencies = [ - "cfg-if", - "libm 0.1.4", -] - [[package]] name = "parity-scale-codec" version = "2.3.1" @@ -5505,7 +5653,18 @@ dependencies = [ "libc", "redox_syscall 0.3.5", "smallvec 1.11.0", - "windows-targets", + "windows-targets 0.48.3", +] + +[[package]] +name = "password-hash" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700" +dependencies = [ + "base64ct", + "rand_core 0.6.4", + "subtle", ] [[package]] @@ -5532,6 +5691,18 @@ dependencies = [ "crypto-mac 0.11.1", ] +[[package]] +name = "pbkdf2" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" +dependencies = [ + "digest 0.10.7", + "hmac 0.12.1", + "password-hash", + "sha2 0.10.7", +] + [[package]] name = "peeking_take_while" version = "0.1.2" @@ -5569,7 +5740,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" dependencies = [ "futures", - "rustc_version 0.4.0", + "rustc_version", ] [[package]] @@ -5592,22 +5763,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "030ad2bc4db10a8944cb0d837f158bdfec4d4a4873ab701a95046770d11f8842" +checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec2e072ecce94ec471b13398d5402c188e76ac03cf74dd1a975161b23a3f6d9c" +checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -5618,9 +5789,9 @@ checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" [[package]] name = "pin-project-lite" -version = "0.2.10" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c40d25201921e5ff0c862a505c6557ea88568a4e3ace775ab55e93f2f4f9d57" +checksum = "12cc1b0bf1727a77a54b6654e7b5f1af8604923edc8b81885f8ec92f9e3f0a05" [[package]] name = "pin-utils" @@ -5644,7 +5815,7 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der 0.7.7", + "der 0.7.8", "spki 0.7.2", ] @@ -5706,8 +5877,8 @@ dependencies = [ "concurrent-queue", "libc", "log", - "pin-project-lite 0.2.10", - "windows-sys", + "pin-project-lite 0.2.12", + "windows-sys 0.48.0", ] [[package]] @@ -5869,7 +6040,7 @@ checksum = "70550716265d1ec349c41f70dd4f964b4fd88394efe4405f0c1da679c4799a07" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -5882,15 +6053,18 @@ dependencies = [ ] [[package]] -name = "procinfo" -version = "0.4.2" +name = "procfs" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ab1427f3d2635891f842892dda177883dca0639e05fe66796a62c9d2f23b49c" +checksum = "943ca7f9f29bab5844ecd8fdb3992c5969b6622bb9609b9502fef9b4310e3f1f" dependencies = [ + "bitflags 1.3.2", "byteorder", - "libc", - "nom 2.2.1", - "rustc_version 0.2.3", + "chrono", + "flate2", + "hex", + "lazy_static", + "rustix 0.36.15", ] [[package]] @@ -5922,13 +6096,13 @@ dependencies = [ [[package]] name = "prometheus-client-derive-encode" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b6a5217beb0ad503ee7fa752d451c905113d70721b937126158f3106a48cc1" +checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.29", ] [[package]] @@ -6032,11 +6206,59 @@ dependencies = [ "pin-project-lite 0.1.12", ] +[[package]] +name = "quinn" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cc2c5017e4b43d5995dcea317bc46c1e09404c0a9664d2908f7f02dfe943d75" +dependencies = [ + "bytes", + "futures-io", + "pin-project-lite 0.2.12", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls 0.21.6", + "thiserror", + "tokio", + "tracing", +] + +[[package]] +name = "quinn-proto" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13f81c9a9d574310b8351f8666f5a93ac3b0069c45c28ad52c10291389a7cf9" +dependencies = [ + "bytes", + "rand 0.8.5", + "ring", + "rustc-hash", + "rustls 0.21.6", + "slab", + "thiserror", + "tinyvec", + "tracing", +] + +[[package]] +name = "quinn-udp" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" +dependencies = [ + "bytes", + "libc", + "socket2 0.5.3", + "tracing", + "windows-sys 0.48.0", +] + [[package]] name = "quote" -version = "1.0.32" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f3b39ccfb720540debaa0164757101c08ecb8d326b15358ce76a62c7e85965" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ "proc-macro2", ] @@ -6176,6 +6398,18 @@ dependencies = [ "num_cpus", ] +[[package]] +name = "rcgen" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" +dependencies = [ + "pem", + "ring", + "time", + "yasna", +] + [[package]] name = "redox_syscall" version = "0.2.16" @@ -6207,13 +6441,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.1" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575" +checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.3.4", + "regex-automata 0.3.6", "regex-syntax 0.7.4", ] @@ -6228,9 +6462,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.4" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7b6d6190b7594385f61bd3911cd1be99dfddcfc365a4160cc2ab5bff4aed294" +checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" dependencies = [ "aho-corasick", "memchr", @@ -6273,8 +6507,8 @@ dependencies = [ "native-tls", "once_cell", "percent-encoding", - "pin-project-lite 0.2.10", - "rustls 0.21.5", + "pin-project-lite 0.2.12", + "rustls 0.21.6", "rustls-pemfile", "serde", "serde_json", @@ -6333,7 +6567,7 @@ dependencies = [ "cc", "libc", "once_cell", - "spin", + "spin 0.5.2", "untrusted", "web-sys", "winapi", @@ -6425,60 +6659,61 @@ checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" [[package]] name = "rustc_version" -version = "0.2.3" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 0.9.0", + "semver", ] [[package]] -name = "rustc_version" -version = "0.4.0" +name = "rusticata-macros" +version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" dependencies = [ - "semver 1.0.18", + "nom", ] [[package]] name = "rustix" -version = "0.37.23" +version = "0.36.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d69718bf81c6127a49dc64e44a742e8bb9213c0ff8869a22c308f84c1d4ab06" +checksum = "c37f1bd5ef1b5422177b7646cba67430579cfe2ace80f284fee876bca52ad941" dependencies = [ "bitflags 1.3.2", "errno", "io-lifetimes", "libc", - "linux-raw-sys 0.3.8", - "windows-sys", + "linux-raw-sys 0.1.4", + "windows-sys 0.45.0", ] [[package]] name = "rustix" -version = "0.38.4" +version = "0.37.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a962918ea88d644592894bc6dc55acc6c0956488adcebbfb6e273506b7fd6e5" +checksum = "4d69718bf81c6127a49dc64e44a742e8bb9213c0ff8869a22c308f84c1d4ab06" dependencies = [ - "bitflags 2.3.3", + "bitflags 1.3.2", "errno", + "io-lifetimes", "libc", - "linux-raw-sys 0.4.5", - "windows-sys", + "linux-raw-sys 0.3.8", + "windows-sys 0.48.0", ] [[package]] -name = "rustls" -version = "0.19.1" +name = "rustix" +version = "0.38.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" +checksum = "19ed4fa021d81c8392ce04db050a3da9a60299050b7ae1cf482d862b54a7218f" dependencies = [ - "base64 0.13.1", - "log", - "ring", - "sct 0.6.1", - "webpki 0.21.4", + "bitflags 2.4.0", + "errno", + "libc", + "linux-raw-sys 0.4.5", + "windows-sys 0.48.0", ] [[package]] @@ -6489,20 +6724,20 @@ checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" dependencies = [ "log", "ring", - "sct 0.7.0", - "webpki 0.22.0", + "sct", + "webpki", ] [[package]] name = "rustls" -version = "0.21.5" +version = "0.21.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79ea77c539259495ce8ca47f53e66ae0330a8819f67e23ac96ca02f50e7b7d36" +checksum = "1d1feddffcfcc0b33f5c6ce9a29e341e4cd59c3f78e7ee45f4a40c038b1d6cbb" dependencies = [ "log", "ring", - "rustls-webpki 0.101.2", - "sct 0.7.0", + "rustls-webpki", + "sct", ] [[package]] @@ -6516,19 +6751,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.100.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6207cd5ed3d8dca7816f8f3725513a34609c0c765bf652b8c3cb4cfd87db46b" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "rustls-webpki" -version = "0.101.2" +version = "0.101.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "513722fd73ad80a71f72b61009ea1b584bcfa1483ca93949c8f290298837fa59" +checksum = "7d93931baf2d282fff8d3a532bbfd7653f734643161b87e3e01e59a04439bf0d" dependencies = [ "ring", "untrusted", @@ -6561,19 +6786,13 @@ checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" name = "safe_arith" version = "0.1.0" -[[package]] -name = "safemem" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" - [[package]] name = "salsa20" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ecbd2eb639fd7cab5804a0837fe373cc2172d15437e804c054a9fb885cb923b0" dependencies = [ - "cipher", + "cipher 0.3.0", ] [[package]] @@ -6615,7 +6834,7 @@ version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" dependencies = [ - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -6651,16 +6870,6 @@ dependencies = [ "sha2 0.9.9", ] -[[package]] -name = "sct" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "sct" version = "0.7.0" @@ -6692,7 +6901,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ "base16ct 0.2.0", - "der 0.7.7", + "der 0.7.8", "generic-array", "pkcs8 0.10.2", "subtle", @@ -6722,15 +6931,6 @@ dependencies = [ "libc", ] -[[package]] -name = "semver" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -dependencies = [ - "semver-parser", -] - [[package]] name = "semver" version = "1.0.18" @@ -6740,12 +6940,6 @@ dependencies = [ "serde", ] -[[package]] -name = "semver-parser" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" - [[package]] name = "send_wrapper" version = "0.6.0" @@ -6762,9 +6956,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.180" +version = "1.0.183" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ea67f183f058fe88a4e3ec6e2788e003840893b91bac4559cabedd00863b3ed" +checksum = "32ac8da02677876d532745a130fc9d8e6edfa81a269b107c5b00829b91d8eb3c" dependencies = [ "serde_derive", ] @@ -6802,20 +6996,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.180" +version = "1.0.183" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24e744d7782b686ab3b73267ef05697159cc0e5abbed3f47f9933165e5219036" +checksum = "aafe972d60b0b9bee71a91b92fee2d4fb3c9d7e8f6b179aa99f27203d99a4816" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] name = "serde_json" -version = "1.0.104" +version = "1.0.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "076066c5f1078eac5b722a31827a8832fe108bed65dfa75e233c89f8206e976c" +checksum = "693151e1ac27563d6dbcec9dee9fbd5da8539b20fa14ad3752b2e6d363ace360" dependencies = [ "itoa", "ryu", @@ -6840,7 +7034,7 @@ checksum = "8725e1dfadb3a50f7e5ce0b1a540466f6ed3fe7a0fca2ac2b8b831d31316bd00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -7032,7 +7226,7 @@ dependencies = [ "num-bigint", "num-traits", "thiserror", - "time 0.3.24", + "time", ] [[package]] @@ -7164,7 +7358,7 @@ dependencies = [ "serde", "serde_json", "slog", - "time 0.3.24", + "time", ] [[package]] @@ -7209,14 +7403,14 @@ dependencies = [ "slog", "term", "thread_local", - "time 0.3.24", + "time", ] [[package]] name = "sloggers" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e20d36cb80da75a9c5511872f15247ddad14ead8c1dd97a86b56d1be9f5d4a0e" +checksum = "7a0a4d8569a69ee56f277bffc2f6eee637b98ed468448e8a5a84fa63efe4de9d" dependencies = [ "chrono", "libc", @@ -7269,17 +7463,17 @@ checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831" [[package]] name = "snow" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ccba027ba85743e09d15c03296797cad56395089b832b48b5a5217880f57733" +checksum = "0c9d1425eb528a21de2755c75af4c9b5d57f50a0d4c3b7f1828a4cd03f8ba155" dependencies = [ "aes-gcm", "blake2", "chacha20poly1305", - "curve25519-dalek 4.0.0-rc.1", + "curve25519-dalek 4.0.0", "rand_core 0.6.4", "ring", - "rustc_version 0.4.0", + "rustc_version", "sha2 0.10.7", "subtle", ] @@ -7301,7 +7495,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" dependencies = [ "libc", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -7325,6 +7519,12 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + [[package]] name = "spki" version = "0.6.0" @@ -7342,7 +7542,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" dependencies = [ "base64ct", - "der 0.7.7", + "der 0.7.8", ] [[package]] @@ -7558,9 +7758,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.28" +version = "2.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04361975b3f5e348b2189d8dc55bc942f278b2d482a6a0365de5bdd62d351567" +checksum = "c324c494eba9d92503e6f1ef2e6df781e78f6a7705a0202d9801b198807d518a" dependencies = [ "proc-macro2", "quote", @@ -7674,15 +7874,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.7.0" +version = "3.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5486094ee78b2e5038a6382ed7645bc084dc2ec433426ca4c3cb61e2007b8998" +checksum = "dc02fddf48964c42031a0b3fe0428320ecf3a73c401040fc0096f97794310651" dependencies = [ "cfg-if", "fastrand 2.0.0", "redox_syscall 0.3.5", - "rustix 0.38.4", - "windows-sys", + "rustix 0.38.8", + "windows-sys 0.48.0", ] [[package]] @@ -7724,8 +7924,7 @@ dependencies = [ [[package]] name = "testcontainers" version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e2b1567ca8a2b819ea7b28c92be35d9f76fb9edb214321dcc86eb96023d1f87" +source = "git+https://github.com/testcontainers/testcontainers-rs/?rev=0f2c9851#0f2c985160e51a200cfc847097c15b8d85ed7df1" dependencies = [ "bollard-stubs", "futures", @@ -7749,22 +7948,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.44" +version = "1.0.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "611040a08a0439f8248d1990b111c95baa9c704c805fa1f62104b39655fd7f90" +checksum = "97a802ec30afc17eee47b2855fc72e0c4cd62be9b4efe6591edde0ec5bd68d8f" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.44" +version = "1.0.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "090198534930841fab3a5d1bb637cde49e339654e606195f8d9c76eeb081dc96" +checksum = "6bb623b56e39ab7dcd4b1b98bb6c8f8d907ed255b18de254088016b27a8ee19b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -7788,20 +7987,9 @@ dependencies = [ [[package]] name = "time" -version = "0.1.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" -dependencies = [ - "libc", - "wasi 0.10.0+wasi-snapshot-preview1", - "winapi", -] - -[[package]] -name = "time" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b79eabcd964882a646b3584543ccabeae7869e9ac32a46f6f22b7a5bd405308b" +checksum = "b0fdd63d58b18d663fbdf70e049f00a22c8e42be082203be7f26589213cd75ea" dependencies = [ "deranged", "itoa", @@ -7893,22 +8081,21 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.29.1" +version = "1.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "532826ff75199d5833b9d2c5fe410f29235e25704ee5f0ef599fb51c21f4a4da" +checksum = "17ed6077ed6cd6c74735e21f37eb16dc3935f96878b1fe961074089cc80893f9" dependencies = [ - "autocfg 1.1.0", "backtrace", "bytes", "libc", "mio", "num_cpus", "parking_lot 0.12.1", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "signal-hook-registry", - "socket2 0.4.9", + "socket2 0.5.3", "tokio-macros", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -7917,7 +8104,7 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" dependencies = [ - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "tokio", ] @@ -7929,7 +8116,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -7958,7 +8145,7 @@ dependencies = [ "parking_lot 0.12.1", "percent-encoding", "phf", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "postgres-protocol", "postgres-types", "socket2 0.5.3", @@ -7966,17 +8153,6 @@ dependencies = [ "tokio-util 0.7.8", ] -[[package]] -name = "tokio-rustls" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" -dependencies = [ - "rustls 0.19.1", - "tokio", - "webpki 0.21.4", -] - [[package]] name = "tokio-rustls" version = "0.23.4" @@ -7985,7 +8161,7 @@ checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ "rustls 0.20.8", "tokio", - "webpki 0.22.0", + "webpki", ] [[package]] @@ -7994,7 +8170,7 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls 0.21.5", + "rustls 0.21.6", "tokio", ] @@ -8005,38 +8181,37 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "tokio", "tokio-util 0.7.8", ] [[package]] name = "tokio-tungstenite" -version = "0.15.0" +version = "0.17.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "511de3f85caf1c98983545490c3d09685fa8eb634e57eec22bb4db271f46cbd8" +checksum = "f714dd15bead90401d77e04243611caec13726c2408afd5b31901dfcdcb3b181" dependencies = [ "futures-util", "log", - "pin-project", + "rustls 0.20.8", "tokio", - "tungstenite 0.14.0", + "tokio-rustls 0.23.4", + "tungstenite 0.17.3", + "webpki", + "webpki-roots 0.22.6", ] [[package]] name = "tokio-tungstenite" -version = "0.17.2" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f714dd15bead90401d77e04243611caec13726c2408afd5b31901dfcdcb3b181" +checksum = "54319c93411147bced34cb5609a80e0a8e44c5999c93903a81cd866630ec0bfd" dependencies = [ "futures-util", "log", - "rustls 0.20.8", "tokio", - "tokio-rustls 0.23.4", - "tungstenite 0.17.3", - "webpki 0.22.0", - "webpki-roots 0.22.6", + "tungstenite 0.18.0", ] [[package]] @@ -8050,7 +8225,7 @@ dependencies = [ "futures-io", "futures-sink", "log", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "slab", "tokio", ] @@ -8064,7 +8239,7 @@ dependencies = [ "bytes", "futures-core", "futures-sink", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "slab", "tokio", "tracing", @@ -8122,7 +8297,7 @@ dependencies = [ "futures-core", "futures-util", "pin-project", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "tokio", "tower-layer", "tower-service", @@ -8149,7 +8324,7 @@ checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ "cfg-if", "log", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "tracing-attributes", "tracing-core", ] @@ -8162,7 +8337,7 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -8319,9 +8494,9 @@ checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "tungstenite" -version = "0.14.0" +version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0b2d8558abd2e276b0a8df5c05a2ec762609344191e5fd23e292c910e9165b5" +checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" dependencies = [ "base64 0.13.1", "byteorder", @@ -8330,17 +8505,19 @@ dependencies = [ "httparse", "log", "rand 0.8.5", - "sha-1 0.9.8", + "rustls 0.20.8", + "sha-1 0.10.1", "thiserror", "url", "utf-8", + "webpki", ] [[package]] name = "tungstenite" -version = "0.17.3" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" +checksum = "30ee6ab729cd4cf0fd55218530c4522ed30b7b6081752839b68fcec8d0960788" dependencies = [ "base64 0.13.1", "byteorder", @@ -8349,21 +8526,10 @@ dependencies = [ "httparse", "log", "rand 0.8.5", - "rustls 0.20.8", - "sha-1 0.10.1", + "sha1", "thiserror", "url", "utf-8", - "webpki 0.22.0", -] - -[[package]] -name = "twoway" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59b11b2b5241ba34be09c3cc85a36e56e48f9888862e19cedf23336d35316ed1" -dependencies = [ - "memchr", ] [[package]] @@ -8714,8 +8880,8 @@ dependencies = [ [[package]] name = "warp" -version = "0.3.2" -source = "git+https://github.com/macladson/warp?rev=7e75acc368229a46a236a8c991bf251fe7fe50ef#7e75acc368229a46a236a8c991bf251fe7fe50ef" +version = "0.3.5" +source = "git+https://github.com/seanmonstar/warp.git?rev=149913fe#149913fed948bbe2149b52b9016170bcaef950ab" dependencies = [ "bytes", "futures-channel", @@ -8726,18 +8892,19 @@ dependencies = [ "log", "mime", "mime_guess", - "multipart", + "multer", "percent-encoding", "pin-project", + "rustls-pemfile", "scoped-tls", "serde", "serde_json", "serde_urlencoded", "tokio", - "tokio-rustls 0.22.0", + "tokio-rustls 0.23.4", "tokio-stream", - "tokio-tungstenite 0.15.0", - "tokio-util 0.6.10", + "tokio-tungstenite 0.18.0", + "tokio-util 0.7.8", "tower-service", "tracing", ] @@ -8766,12 +8933,6 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" -[[package]] -name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -8799,7 +8960,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", "wasm-bindgen-shared", ] @@ -8833,7 +8994,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8890,6 +9051,7 @@ dependencies = [ "http_api", "hyper", "log", + "logging", "network", "r2d2", "rand 0.7.3", @@ -8943,16 +9105,6 @@ dependencies = [ "zip", ] -[[package]] -name = "webpki" -version = "0.21.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "webpki" version = "0.22.0" @@ -8969,28 +9121,14 @@ version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" dependencies = [ - "webpki 0.22.0", + "webpki", ] [[package]] name = "webpki-roots" -version = "0.23.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338" -dependencies = [ - "rustls-webpki 0.100.1", -] - -[[package]] -name = "which" -version = "4.4.0" +version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" -dependencies = [ - "either", - "libc", - "once_cell", -] +checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" [[package]] name = "widestring" @@ -9060,7 +9198,7 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" dependencies = [ - "windows-targets", + "windows-targets 0.48.3", ] [[package]] @@ -9075,35 +9213,65 @@ dependencies = [ "winapi", ] +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets 0.42.2", +] + [[package]] name = "windows-sys" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets", + "windows-targets 0.48.3", +] + +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", ] [[package]] name = "windows-targets" -version = "0.48.1" +version = "0.48.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f" +checksum = "27f51fb4c64f8b770a823c043c7fad036323e1c48f55287b7bbb7987b2fcdf3b" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc 0.48.0", - "windows_i686_gnu 0.48.0", - "windows_i686_msvc 0.48.0", - "windows_x86_64_gnu 0.48.0", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc 0.48.0", + "windows_aarch64_gnullvm 0.48.3", + "windows_aarch64_msvc 0.48.3", + "windows_i686_gnu 0.48.3", + "windows_i686_msvc 0.48.3", + "windows_x86_64_gnu 0.48.3", + "windows_x86_64_gnullvm 0.48.3", + "windows_x86_64_msvc 0.48.3", ] [[package]] name = "windows_aarch64_gnullvm" -version = "0.48.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fde1bb55ae4ce76a597a8566d82c57432bc69c039449d61572a7a353da28f68c" [[package]] name = "windows_aarch64_msvc" @@ -9113,9 +9281,15 @@ checksum = "17cffbe740121affb56fad0fc0e421804adf0ae00891205213b5cecd30db881d" [[package]] name = "windows_aarch64_msvc" -version = "0.48.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1513e8d48365a78adad7322fd6b5e4c4e99d92a69db8df2d435b25b1f1f286d4" [[package]] name = "windows_i686_gnu" @@ -9125,9 +9299,15 @@ checksum = "2564fde759adb79129d9b4f54be42b32c89970c18ebf93124ca8870a498688ed" [[package]] name = "windows_i686_gnu" -version = "0.48.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60587c0265d2b842298f5858e1a5d79d146f9ee0c37be5782e92a6eb5e1d7a83" [[package]] name = "windows_i686_msvc" @@ -9137,9 +9317,15 @@ checksum = "9cd9d32ba70453522332c14d38814bceeb747d80b3958676007acadd7e166956" [[package]] name = "windows_i686_msvc" -version = "0.48.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "224fe0e0ffff5d2ea6a29f82026c8f43870038a0ffc247aa95a52b47df381ac4" [[package]] name = "windows_x86_64_gnu" @@ -9149,15 +9335,27 @@ checksum = "cfce6deae227ee8d356d19effc141a509cc503dfd1f850622ec4b0f84428e1f4" [[package]] name = "windows_x86_64_gnu" -version = "0.48.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62fc52a0f50a088de499712cbc012df7ebd94e2d6eb948435449d76a6287e7ad" [[package]] name = "windows_x86_64_gnullvm" -version = "0.48.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2093925509d91ea3d69bcd20238f4c2ecdb1a29d3c281d026a09705d0dd35f3d" [[package]] name = "windows_x86_64_msvc" @@ -9167,15 +9365,21 @@ checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" [[package]] name = "windows_x86_64_msvc" -version = "0.48.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6ade45bc8bf02ae2aa34a9d54ba660a1a58204da34ba793c00d83ca3730b5f1" [[package]] name = "winnow" -version = "0.5.3" +version = "0.5.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f46aab759304e4d7b2075a9aecba26228bb073ee8c50db796b2c72c676b5d807" +checksum = "d09770118a7eb1ccaf4a594a221334119a44a814fcb0d31c5b85e83e97227a97" dependencies = [ "memchr", ] @@ -9196,7 +9400,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ "cfg-if", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -9210,7 +9414,7 @@ dependencies = [ "js-sys", "log", "pharos", - "rustc_version 0.4.0", + "rustc_version", "send_wrapper", "thiserror", "wasm-bindgen", @@ -9244,6 +9448,23 @@ dependencies = [ "zeroize", ] +[[package]] +name = "x509-parser" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7069fba5b66b9193bd2c5d3d4ff12b839118f6bcbef5328efafafb5395cf63da" +dependencies = [ + "asn1-rs", + "data-encoding", + "der-parser", + "lazy_static", + "nom", + "oid-registry", + "rusticata-macros", + "thiserror", + "time", +] + [[package]] name = "xml-rs" version = "0.8.16" @@ -9270,18 +9491,28 @@ dependencies = [ [[package]] name = "yamux" -version = "0.10.2" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d9ba232399af1783a58d8eb26f6b5006fbefe2dc9ef36bd283324792d03ea5" +checksum = "0329ef377816896f014435162bb3711ea7a07729c23d0960e6f8048b21b8fe91" dependencies = [ "futures", "log", "nohash-hasher", "parking_lot 0.12.1", + "pin-project", "rand 0.8.5", "static_assertions", ] +[[package]] +name = "yasna" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" +dependencies = [ + "time", +] + [[package]] name = "zeroize" version = "1.6.0" @@ -9299,19 +9530,55 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] name = "zip" -version = "0.5.13" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93ab48844d61251bb3835145c521d88aa4031d7139e8485990f60ca911fa0815" +checksum = "760394e246e4c28189f19d488c058bf16f564016aefac5d32bb1f3b51d5e9261" dependencies = [ + "aes 0.8.3", "byteorder", "bzip2", + "constant_time_eq", "crc32fast", + "crossbeam-utils", "flate2", - "thiserror", - "time 0.1.45", + "hmac 0.12.1", + "pbkdf2 0.11.0", + "sha1", + "time", + "zstd", +] + +[[package]] +name = "zstd" +version = "0.11.2+zstd.1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "5.0.2+zstd.1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d2a5585e04f9eea4b2a3d1eca508c4dee9592a89ef6f450c11719da0726f4db" +dependencies = [ + "libc", + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "2.0.8+zstd.1.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5556e6ee25d32df2586c098bbfa278803692a20d0ab9565e049480d52707ec8c" +dependencies = [ + "cc", + "libc", + "pkg-config", ] diff --git a/Cargo.toml b/Cargo.toml index 15906a03065..9930658e652 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -91,7 +91,8 @@ resolver = "2" [patch] [patch.crates-io] -warp = { git = "https://github.com/macladson/warp", rev="7e75acc368229a46a236a8c991bf251fe7fe50ef" } +# TODO: remove when 0.3.6 get's released. +warp = { git = "https://github.com/seanmonstar/warp.git", rev="149913fe" } [profile.maxperf] inherits = "release" diff --git a/Makefile b/Makefile index b833686e1b5..1e99b3dbb33 100644 --- a/Makefile +++ b/Makefile @@ -71,13 +71,13 @@ install-lcli: # optimized CPU functions that may not be available on some systems. This # results in a more portable binary with ~20% slower BLS verification. build-x86_64: - cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "modern,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" + cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "modern,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked build-x86_64-portable: - cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" + cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked build-aarch64: - cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" + cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked build-aarch64-portable: - cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" + cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked # Create a `.tar.gz` containing a binary for a specific target. define tarball_release_binary @@ -207,7 +207,7 @@ arbitrary-fuzz: # Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database) audit: cargo install --force cargo-audit - cargo audit --ignore RUSTSEC-2020-0071 + cargo audit --ignore RUSTSEC-2023-0052 # Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose. vendor: diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index 7d90cbb427d..238e4a77e08 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -1,7 +1,10 @@ [package] name = "account_manager" version = "0.3.5" -authors = ["Paul Hauner ", "Luke Anderson "] +authors = [ + "Paul Hauner ", + "Luke Anderson ", +] edition = "2021" [dependencies] @@ -19,13 +22,14 @@ tokio = { version = "1.14.0", features = ["full"] } eth2_keystore = { path = "../crypto/eth2_keystore" } account_utils = { path = "../common/account_utils" } slashing_protection = { path = "../validator_client/slashing_protection" } -eth2 = {path = "../common/eth2"} -safe_arith = {path = "../consensus/safe_arith"} +eth2 = { path = "../common/eth2" } +safe_arith = { path = "../consensus/safe_arith" } slot_clock = { path = "../common/slot_clock" } filesystem = { path = "../common/filesystem" } sensitive_url = { path = "../common/sensitive_url" } serde = { version = "1.0.116", features = ["derive"] } serde_json = "1.0.58" +slog = { version = "2.5.2" } [dev-dependencies] tempfile = "3.1.0" diff --git a/account_manager/src/validator/exit.rs b/account_manager/src/validator/exit.rs index 5755a355f31..1ff61a7c01d 100644 --- a/account_manager/src/validator/exit.rs +++ b/account_manager/src/validator/exit.rs @@ -10,6 +10,7 @@ use eth2_keystore::Keystore; use eth2_network_config::Eth2NetworkConfig; use safe_arith::SafeArith; use sensitive_url::SensitiveUrl; +use slog::Logger; use slot_clock::{SlotClock, SystemTimeSlotClock}; use std::path::{Path, PathBuf}; use std::time::Duration; @@ -78,6 +79,12 @@ pub fn cli_run(matches: &ArgMatches, env: Environment) -> Result< let password_file_path: Option = clap_utils::parse_optional(matches, PASSWORD_FILE_FLAG)?; + let genesis_state_url: Option = + clap_utils::parse_optional(matches, "genesis-state-url")?; + let genesis_state_url_timeout = + clap_utils::parse_required(matches, "genesis-state-url-timeout") + .map(Duration::from_secs)?; + let stdin_inputs = cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG); let no_wait = matches.is_present(NO_WAIT); let no_confirmation = matches.is_present(NO_CONFIRMATION); @@ -104,6 +111,9 @@ pub fn cli_run(matches: &ArgMatches, env: Environment) -> Result< ð2_network_config, no_wait, no_confirmation, + genesis_state_url, + genesis_state_url_timeout, + env.core_context().log(), ))?; Ok(()) @@ -120,13 +130,14 @@ async fn publish_voluntary_exit( eth2_network_config: &Eth2NetworkConfig, no_wait: bool, no_confirmation: bool, + genesis_state_url: Option, + genesis_state_url_timeout: Duration, + log: &Logger, ) -> Result<(), String> { let genesis_data = get_geneisis_data(client).await?; let testnet_genesis_root = eth2_network_config - .beacon_state::() - .as_ref() - .expect("network should have valid genesis state") - .genesis_validators_root(); + .genesis_validators_root::(genesis_state_url.as_deref(), genesis_state_url_timeout, log)? + .ok_or("Genesis state is unknown")?; // Verify that the beacon node and validator being exited are on the same network. if genesis_data.genesis_validators_root != testnet_genesis_root { diff --git a/account_manager/src/validator/slashing_protection.rs b/account_manager/src/validator/slashing_protection.rs index f25bbd8159f..570f29b4ad6 100644 --- a/account_manager/src/validator/slashing_protection.rs +++ b/account_manager/src/validator/slashing_protection.rs @@ -7,7 +7,8 @@ use slashing_protection::{ use std::fs::File; use std::path::PathBuf; use std::str::FromStr; -use types::{BeaconState, Epoch, EthSpec, PublicKeyBytes, Slot}; +use std::time::Duration; +use types::{Epoch, EthSpec, PublicKeyBytes, Slot}; pub const CMD: &str = "slashing-protection"; pub const IMPORT_CMD: &str = "import"; @@ -82,19 +83,24 @@ pub fn cli_run( ) -> Result<(), String> { let slashing_protection_db_path = validator_base_dir.join(SLASHING_PROTECTION_FILENAME); + let genesis_state_url: Option = + clap_utils::parse_optional(matches, "genesis-state-url")?; + let genesis_state_url_timeout = + clap_utils::parse_required(matches, "genesis-state-url-timeout") + .map(Duration::from_secs)?; + + let context = env.core_context(); let eth2_network_config = env .eth2_network_config .ok_or("Unable to get testnet configuration from the environment")?; let genesis_validators_root = eth2_network_config - .beacon_state::() - .map(|state: BeaconState| state.genesis_validators_root()) - .map_err(|e| { - format!( - "Unable to get genesis state, has genesis occurred? Detail: {:?}", - e - ) - })?; + .genesis_validators_root::( + genesis_state_url.as_deref(), + genesis_state_url_timeout, + context.log(), + )? + .ok_or_else(|| "Unable to get genesis state, has genesis occurred?".to_string())?; match matches.subcommand() { (IMPORT_CMD, Some(matches)) => { diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index f55c724dc39..e7d24e8ab46 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,7 +1,10 @@ [package] name = "beacon_node" -version = "4.3.0" -authors = ["Paul Hauner ", "Age Manning ", + "Age Manning BeaconChain { self.log, "Produced block on state"; "block_size" => block_size, + "slot" => block.slot(), ); metrics::observe(&metrics::BLOCK_SIZE, block_size as f64); @@ -6163,14 +6164,16 @@ impl BeaconChain { let (mut state, state_root) = if let Some((state, state_root)) = head_state_opt { (state, state_root) } else { - let state_root = head_block.state_root; - let state = self + let block_state_root = head_block.state_root; + let max_slot = shuffling_epoch.start_slot(T::EthSpec::slots_per_epoch()); + let (state_root, state) = self .store .get_inconsistent_state_for_attestation_verification_only( - &state_root, - Some(head_block.slot), + &head_block_root, + max_slot, + block_state_root, )? - .ok_or(Error::MissingBeaconState(head_block.state_root))?; + .ok_or(Error::MissingBeaconState(block_state_root))?; (state, state_root) }; diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 9b2edbd8b5d..2a42b49b422 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -321,9 +321,17 @@ where .deconstruct() .0; - let state = self + let max_slot = self + .justified_checkpoint + .epoch + .start_slot(E::slots_per_epoch()); + let (_, state) = self .store - .get_state(&justified_block.state_root(), Some(justified_block.slot())) + .get_advanced_hot_state( + self.justified_checkpoint.root, + max_slot, + justified_block.state_root(), + ) .map_err(Error::FailedToReadState)? .ok_or_else(|| Error::MissingState(justified_block.state_root()))?; diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 0a82eae3711..3654484e1f5 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1261,7 +1261,7 @@ impl ExecutionPendingBlock { // Perform a sanity check on the pre-state. let parent_slot = parent.beacon_block.slot(); - if state.slot() < parent_slot || state.slot() > parent_slot + 1 { + if state.slot() < parent_slot || state.slot() > block.slot() { return Err(BeaconChainError::BadPreState { parent_root: parent.beacon_block_root, parent_slot, @@ -1760,13 +1760,18 @@ fn load_parent( BlockError::from(BeaconChainError::MissingBeaconBlock(block.parent_root())) })?; - // Load the parent blocks state from the database, returning an error if it is not found. + // Load the parent block's state from the database, returning an error if it is not found. // It is an error because if we know the parent block we should also know the parent state. - let parent_state_root = parent_block.state_root(); - let parent_state = chain - .get_state(&parent_state_root, Some(parent_block.slot()))? + // Retrieve any state that is advanced through to at most `block.slot()`: this is + // particularly important if `block` descends from the finalized/split block, but at a slot + // prior to the finalized slot (which is invalid and inaccessible in our DB schema). + let (parent_state_root, parent_state) = chain + .store + .get_advanced_hot_state(root, block.slot(), parent_block.state_root())? .ok_or_else(|| { - BeaconChainError::DBInconsistent(format!("Missing state {:?}", parent_state_root)) + BeaconChainError::DBInconsistent( + format!("Missing state for parent block {root:?}",), + ) })?; metrics::inc_counter(&metrics::BLOCK_PROCESSING_SNAPSHOT_CACHE_MISSES); diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 044391c415e..54739f2b8ac 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -24,8 +24,9 @@ use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::RwLock; use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; use slasher::Slasher; -use slog::{crit, error, info, Logger}; +use slog::{crit, debug, error, info, Logger}; use slot_clock::{SlotClock, TestingSlotClock}; +use state_processing::per_slot_processing; use std::marker::PhantomData; use std::sync::Arc; use std::time::Duration; @@ -287,7 +288,7 @@ where let genesis_state = store .get_state(&genesis_block.state_root(), Some(genesis_block.slot())) .map_err(|e| descriptive_db_error("genesis state", &e))? - .ok_or("Genesis block not found in store")?; + .ok_or("Genesis state not found in store")?; self.genesis_time = Some(genesis_state.genesis_time()); @@ -382,6 +383,16 @@ where let (genesis, updated_builder) = self.set_genesis_state(beacon_state)?; self = updated_builder; + // Stage the database's metadata fields for atomic storage when `build` is called. + // Since v4.4.0 we will set the anchor with a dummy state upper limit in order to prevent + // historic states from being retained (unless `--reconstruct-historic-states` is set). + let retain_historic_states = self.chain_config.reconstruct_historic_states; + self.pending_io_batch.push( + store + .init_anchor_info(genesis.beacon_block.message(), retain_historic_states) + .map_err(|e| format!("Failed to initialize genesis anchor: {:?}", e))?, + ); + let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis) .map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?; let current_slot = None; @@ -408,30 +419,28 @@ where weak_subj_block: SignedBeaconBlock, genesis_state: BeaconState, ) -> Result { - let store = self.store.clone().ok_or("genesis_state requires a store")?; - - let weak_subj_slot = weak_subj_state.slot(); - let weak_subj_block_root = weak_subj_block.canonical_root(); - let weak_subj_state_root = weak_subj_block.state_root(); - - // Check that the given block lies on an epoch boundary. Due to the database only storing - // full states on epoch boundaries and at restore points it would be difficult to support - // starting from a mid-epoch state. - if weak_subj_slot % TEthSpec::slots_per_epoch() != 0 { - return Err(format!( - "Checkpoint block at slot {} is not aligned to epoch start. \ - Please supply an aligned checkpoint with block.slot % 32 == 0", - weak_subj_block.slot(), - )); - } + let store = self + .store + .clone() + .ok_or("weak_subjectivity_state requires a store")?; + let log = self + .log + .as_ref() + .ok_or("weak_subjectivity_state requires a log")?; - // Check that the block and state have consistent slots and state roots. - if weak_subj_state.slot() != weak_subj_block.slot() { - return Err(format!( - "Slot of snapshot block ({}) does not match snapshot state ({})", - weak_subj_block.slot(), - weak_subj_state.slot(), - )); + // Ensure the state is advanced to an epoch boundary. + let slots_per_epoch = TEthSpec::slots_per_epoch(); + if weak_subj_state.slot() % slots_per_epoch != 0 { + debug!( + log, + "Advancing checkpoint state to boundary"; + "state_slot" => weak_subj_state.slot(), + "block_slot" => weak_subj_block.slot(), + ); + while weak_subj_state.slot() % slots_per_epoch != 0 { + per_slot_processing(&mut weak_subj_state, None, &self.spec) + .map_err(|e| format!("Error advancing state: {e:?}"))?; + } } // Prime all caches before storing the state in the database and computing the tree hash @@ -439,15 +448,19 @@ where weak_subj_state .build_caches(&self.spec) .map_err(|e| format!("Error building caches on checkpoint state: {e:?}"))?; - - let computed_state_root = weak_subj_state + let weak_subj_state_root = weak_subj_state .update_tree_hash_cache() .map_err(|e| format!("Error computing checkpoint state root: {:?}", e))?; - if weak_subj_state_root != computed_state_root { + let weak_subj_slot = weak_subj_state.slot(); + let weak_subj_block_root = weak_subj_block.canonical_root(); + + // Validate the state's `latest_block_header` against the checkpoint block. + let state_latest_block_root = weak_subj_state.get_latest_block_root(weak_subj_state_root); + if weak_subj_block_root != state_latest_block_root { return Err(format!( - "Snapshot state root does not match block, expected: {:?}, got: {:?}", - weak_subj_state_root, computed_state_root + "Snapshot state's most recent block root does not match block, expected: {:?}, got: {:?}", + weak_subj_block_root, state_latest_block_root )); } @@ -464,10 +477,25 @@ where // Set the store's split point *before* storing genesis so that genesis is stored // immediately in the freezer DB. - store.set_split(weak_subj_slot, weak_subj_state_root); + store.set_split(weak_subj_slot, weak_subj_state_root, weak_subj_block_root); let (_, updated_builder) = self.set_genesis_state(genesis_state)?; self = updated_builder; + // Fill in the linear block roots between the checkpoint block's slot and the aligned + // state's slot. All slots less than the block's slot will be handled by block backfill, + // while states greater or equal to the checkpoint state will be handled by `migrate_db`. + let block_root_batch = store + .store_frozen_block_root_at_skip_slots( + weak_subj_block.slot(), + weak_subj_state.slot(), + weak_subj_block_root, + ) + .map_err(|e| format!("Error writing frozen block roots: {e:?}"))?; + store + .cold_db + .do_atomically(block_root_batch) + .map_err(|e| format!("Error writing frozen block roots: {e:?}"))?; + // Write the state and block non-atomically, it doesn't matter if they're forgotten // about on a crash restart. store @@ -480,10 +508,11 @@ where // Stage the database's metadata fields for atomic storage when `build` is called. // This prevents the database from restarting in an inconsistent state if the anchor // info or split point is written before the `PersistedBeaconChain`. + let retain_historic_states = self.chain_config.reconstruct_historic_states; self.pending_io_batch.push(store.store_split_in_batch()); self.pending_io_batch.push( store - .init_anchor_info(weak_subj_block.message()) + .init_anchor_info(weak_subj_block.message(), retain_historic_states) .map_err(|e| format!("Failed to initialize anchor info: {:?}", e))?, ); @@ -503,13 +532,12 @@ where let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &snapshot) .map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?; - let current_slot = Some(snapshot.beacon_block.slot()); let fork_choice = ForkChoice::from_anchor( fc_store, snapshot.beacon_block_root, &snapshot.beacon_block, &snapshot.beacon_state, - current_slot, + Some(weak_subj_slot), &self.spec, ) .map_err(|e| format!("Unable to initialize ForkChoice: {:?}", e))?; @@ -672,9 +700,8 @@ where Err(e) => return Err(descriptive_db_error("head block", &e)), }; - let head_state_root = head_block.state_root(); - let head_state = store - .get_state(&head_state_root, Some(head_block.slot())) + let (_head_state_root, head_state) = store + .get_advanced_hot_state(head_block_root, current_slot, head_block.state_root()) .map_err(|e| descriptive_db_error("head state", &e))? .ok_or("Head state not found in store")?; diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 2b1f714362f..7fa5b015214 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -47,7 +47,8 @@ use crate::{ }; use eth2::types::{EventKind, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead}; use fork_choice::{ - ExecutionStatus, ForkChoiceView, ForkchoiceUpdateParameters, ProtoBlock, ResetPayloadStatuses, + ExecutionStatus, ForkChoiceStore, ForkChoiceView, ForkchoiceUpdateParameters, ProtoBlock, + ResetPayloadStatuses, }; use itertools::process_results; use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; @@ -298,10 +299,10 @@ impl CanonicalHead { let beacon_block = store .get_full_block(&beacon_block_root)? .ok_or(Error::MissingBeaconBlock(beacon_block_root))?; - let beacon_state_root = beacon_block.state_root(); - let beacon_state = store - .get_state(&beacon_state_root, Some(beacon_block.slot()))? - .ok_or(Error::MissingBeaconState(beacon_state_root))?; + let current_slot = fork_choice.fc_store().get_current_slot(); + let (_, beacon_state) = store + .get_advanced_hot_state(beacon_block_root, current_slot, beacon_block.state_root())? + .ok_or(Error::MissingBeaconState(beacon_block.state_root()))?; let snapshot = BeaconSnapshot { beacon_block_root, @@ -669,10 +670,14 @@ impl BeaconChain { .get_full_block(&new_view.head_block_root)? .ok_or(Error::MissingBeaconBlock(new_view.head_block_root))?; - let beacon_state_root = beacon_block.state_root(); - let beacon_state: BeaconState = self - .get_state(&beacon_state_root, Some(beacon_block.slot()))? - .ok_or(Error::MissingBeaconState(beacon_state_root))?; + let (_, beacon_state) = self + .store + .get_advanced_hot_state( + new_view.head_block_root, + current_slot, + beacon_block.state_root(), + )? + .ok_or(Error::MissingBeaconState(beacon_block.state_root()))?; Ok(BeaconSnapshot { beacon_block: Arc::new(beacon_block), diff --git a/beacon_node/beacon_chain/src/events.rs b/beacon_node/beacon_chain/src/events.rs index fed05032374..b267cc853f8 100644 --- a/beacon_node/beacon_chain/src/events.rs +++ b/beacon_node/beacon_chain/src/events.rs @@ -21,8 +21,11 @@ pub struct ServerSentEventHandler { } impl ServerSentEventHandler { - pub fn new(log: Logger) -> Self { - Self::new_with_capacity(log, DEFAULT_CHANNEL_CAPACITY) + pub fn new(log: Logger, capacity_multiplier: usize) -> Self { + Self::new_with_capacity( + log, + capacity_multiplier.saturating_mul(DEFAULT_CHANNEL_CAPACITY), + ) } pub fn new_with_capacity(log: Logger, capacity: usize) -> Self { diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index 8306b66d7b5..6353a64e007 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -266,6 +266,7 @@ impl, Cold: ItemStore> BackgroundMigrator state, @@ -319,7 +320,12 @@ impl, Cold: ItemStore> BackgroundMigrator {} Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => { debug!( diff --git a/beacon_node/beacon_chain/src/observed_attesters.rs b/beacon_node/beacon_chain/src/observed_attesters.rs index 59c67bd1b95..605a1343210 100644 --- a/beacon_node/beacon_chain/src/observed_attesters.rs +++ b/beacon_node/beacon_chain/src/observed_attesters.rs @@ -841,7 +841,7 @@ mod tests { let mut store = $type::default(); let max_cap = store.max_capacity(); - let to_skip = vec![1_u64, 3, 4, 5]; + let to_skip = [1_u64, 3, 4, 5]; let periods = (0..max_cap * 3) .into_iter() .filter(|i| !to_skip.contains(i)) @@ -1012,7 +1012,7 @@ mod tests { let mut store = $type::default(); let max_cap = store.max_capacity(); - let to_skip = vec![1_u64, 3, 4, 5]; + let to_skip = [1_u64, 3, 4, 5]; let periods = (0..max_cap * 3) .into_iter() .filter(|i| !to_skip.contains(i)) @@ -1121,7 +1121,7 @@ mod tests { let mut store = $type::default(); let max_cap = store.max_capacity(); - let to_skip = vec![1_u64, 3, 4, 5]; + let to_skip = [1_u64, 3, 4, 5]; let periods = (0..max_cap * 3) .into_iter() .filter(|i| !to_skip.contains(i)) diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 5cea51090b9..7878fd14aa0 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -9,7 +9,7 @@ use beacon_chain::{ test_utils::{ test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }, - BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped, + BeaconChain, BeaconChainError, BeaconChainTypes, ChainConfig, WhenSlotSkipped, }; use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; use int_to_bytes::int_to_bytes32; @@ -47,6 +47,10 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness Vec> { fn get_harness(validator_count: usize) -> BeaconChainHarness> { let harness = BeaconChainHarness::builder(MainnetEthSpec) .default_spec() + .chain_config(ChainConfig { + reconstruct_historic_states: true, + ..ChainConfig::default() + }) .keypairs(KEYPAIRS[0..validator_count].to_vec()) .fresh_ephemeral_store() .mock_execution_layer() diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 9a8c324d09f..cd4351297bc 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -7,7 +7,7 @@ use beacon_chain::otb_verification_service::{ use beacon_chain::{ canonical_head::{CachedHead, CanonicalHead}, test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BeaconChainError, BlockError, ExecutionPayloadError, NotifyExecutionLayer, + BeaconChainError, BlockError, ChainConfig, ExecutionPayloadError, NotifyExecutionLayer, OverrideForkchoiceUpdate, StateSkipConfig, WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, @@ -59,6 +59,10 @@ impl InvalidPayloadRig { let harness = BeaconChainHarness::builder(MainnetEthSpec) .spec(spec) + .chain_config(ChainConfig { + reconstruct_historic_states: true, + ..ChainConfig::default() + }) .logger(test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) .mock_execution_layer() diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 29027748259..ab54af42c78 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -9,14 +9,15 @@ use beacon_chain::test_utils::{ use beacon_chain::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD; use beacon_chain::{ historical_blocks::HistoricalBlockError, migrate::MigratorConfig, BeaconChain, - BeaconChainError, BeaconChainTypes, BeaconSnapshot, ChainConfig, NotifyExecutionLayer, - ServerSentEventHandler, WhenSlotSkipped, + BeaconChainError, BeaconChainTypes, BeaconSnapshot, BlockError, ChainConfig, + NotifyExecutionLayer, ServerSentEventHandler, WhenSlotSkipped, }; use lazy_static::lazy_static; use logging::test_logger; use maplit::hashset; use rand::Rng; -use state_processing::BlockReplayer; +use slot_clock::{SlotClock, TestingSlotClock}; +use state_processing::{state_advance::complete_state_advance, BlockReplayer}; use std::collections::HashMap; use std::collections::HashSet; use std::convert::TryInto; @@ -65,6 +66,19 @@ fn get_store_with_spec( fn get_harness( store: Arc, LevelDB>>, validator_count: usize, +) -> TestHarness { + // Most tests expect to retain historic states, so we use this as the default. + let chain_config = ChainConfig { + reconstruct_historic_states: true, + ..ChainConfig::default() + }; + get_harness_generic(store, validator_count, chain_config) +} + +fn get_harness_generic( + store: Arc, LevelDB>>, + validator_count: usize, + chain_config: ChainConfig, ) -> TestHarness { let harness = BeaconChainHarness::builder(MinimalEthSpec) .default_spec() @@ -72,6 +86,7 @@ fn get_harness( .logger(store.logger().clone()) .fresh_disk_store(store) .mock_execution_layer() + .chain_config(chain_config) .build(); harness.advance_slot(); harness @@ -406,7 +421,7 @@ async fn forwards_iter_block_and_state_roots_until() { // The last restore point slot is the point at which the hybrid forwards iterator behaviour // changes. - let last_restore_point_slot = store.get_latest_restore_point_slot(); + let last_restore_point_slot = store.get_latest_restore_point_slot().unwrap(); assert!(last_restore_point_slot > 0); let chain = &harness.chain; @@ -460,13 +475,15 @@ async fn block_replay_with_inaccurate_state_roots() { .await; // Slot must not be 0 mod 32 or else no blocks will be replayed. - let (mut head_state, head_root) = harness.get_current_state_and_root(); + let (mut head_state, head_state_root) = harness.get_current_state_and_root(); + let head_block_root = harness.head_block_root(); assert_ne!(head_state.slot() % 32, 0); - let mut fast_head_state = store + let (_, mut fast_head_state) = store .get_inconsistent_state_for_attestation_verification_only( - &head_root, - Some(head_state.slot()), + &head_block_root, + head_state.slot(), + head_state_root, ) .unwrap() .unwrap(); @@ -565,14 +582,7 @@ async fn block_replayer_hooks() { async fn delete_blocks_and_states() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); - let validators_keypairs = - types::test_utils::generate_deterministic_keypairs(LOW_VALIDATOR_COUNT); - let harness = BeaconChainHarness::builder(MinimalEthSpec) - .default_spec() - .keypairs(validators_keypairs) - .fresh_disk_store(store.clone()) - .mock_execution_layer() - .build(); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); let unforked_blocks: u64 = 4 * E::slots_per_epoch(); @@ -1015,18 +1025,14 @@ fn check_shuffling_compatible( // Ensure blocks from abandoned forks are pruned from the Hot DB #[tokio::test] async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { - const HONEST_VALIDATOR_COUNT: usize = 32 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; - let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); - let rig = BeaconChainHarness::builder(MinimalEthSpec) - .default_spec() - .keypairs(validators_keypairs) - .fresh_ephemeral_store() - .mock_execution_layer() - .build(); + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let rig = get_harness(store.clone(), VALIDATOR_COUNT); let slots_per_epoch = rig.slots_per_epoch(); let (mut state, state_root) = rig.get_current_state_and_root(); @@ -1125,18 +1131,14 @@ async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { #[tokio::test] async fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { - const HONEST_VALIDATOR_COUNT: usize = 32 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; - let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); - let rig = BeaconChainHarness::builder(MinimalEthSpec) - .default_spec() - .keypairs(validators_keypairs) - .fresh_ephemeral_store() - .mock_execution_layer() - .build(); + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let rig = get_harness(store.clone(), VALIDATOR_COUNT); let slots_per_epoch = rig.slots_per_epoch(); let (state, state_root) = rig.get_current_state_and_root(); @@ -1260,15 +1262,11 @@ async fn pruning_does_not_touch_blocks_prior_to_finalization() { const HONEST_VALIDATOR_COUNT: usize = 32; const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; - let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); - let rig = BeaconChainHarness::builder(MinimalEthSpec) - .default_spec() - .keypairs(validators_keypairs) - .fresh_ephemeral_store() - .mock_execution_layer() - .build(); + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let rig = get_harness(store.clone(), VALIDATOR_COUNT); let slots_per_epoch = rig.slots_per_epoch(); let (mut state, state_root) = rig.get_current_state_and_root(); @@ -1352,18 +1350,14 @@ async fn pruning_does_not_touch_blocks_prior_to_finalization() { #[tokio::test] async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { - const HONEST_VALIDATOR_COUNT: usize = 32 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; - let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); - let rig = BeaconChainHarness::builder(MinimalEthSpec) - .default_spec() - .keypairs(validators_keypairs) - .fresh_ephemeral_store() - .mock_execution_layer() - .build(); + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let rig = get_harness(store.clone(), VALIDATOR_COUNT); let (state, state_root) = rig.get_current_state_and_root(); // Fill up 0th epoch with canonical chain blocks @@ -1497,18 +1491,14 @@ async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { // This is to check if state outside of normal block processing are pruned correctly. #[tokio::test] async fn prunes_skipped_slots_states() { - const HONEST_VALIDATOR_COUNT: usize = 32 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; - let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); - let rig = BeaconChainHarness::builder(MinimalEthSpec) - .default_spec() - .keypairs(validators_keypairs) - .fresh_ephemeral_store() - .mock_execution_layer() - .build(); + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let rig = get_harness(store.clone(), VALIDATOR_COUNT); let (state, state_root) = rig.get_current_state_and_root(); let canonical_slots_zeroth_epoch: Vec = @@ -1626,18 +1616,14 @@ async fn prunes_skipped_slots_states() { // This is to check if state outside of normal block processing are pruned correctly. #[tokio::test] async fn finalizes_non_epoch_start_slot() { - const HONEST_VALIDATOR_COUNT: usize = 32 + 0; - const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0; + const HONEST_VALIDATOR_COUNT: usize = 32; + const ADVERSARIAL_VALIDATOR_COUNT: usize = 16; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; - let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); - let rig = BeaconChainHarness::builder(MinimalEthSpec) - .default_spec() - .keypairs(validators_keypairs) - .fresh_ephemeral_store() - .mock_execution_layer() - .build(); + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let rig = get_harness(store.clone(), VALIDATOR_COUNT); let (state, state_root) = rig.get_current_state_and_root(); let canonical_slots_zeroth_epoch: Vec = @@ -2053,39 +2039,82 @@ async fn garbage_collect_temp_states_from_failed_block() { } #[tokio::test] -async fn weak_subjectivity_sync() { +async fn weak_subjectivity_sync_easy() { + let num_initial_slots = E::slots_per_epoch() * 11; + let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9); + let slots = (1..num_initial_slots).map(Slot::new).collect(); + weak_subjectivity_sync_test(slots, checkpoint_slot).await +} + +#[tokio::test] +async fn weak_subjectivity_sync_unaligned_advanced_checkpoint() { + let num_initial_slots = E::slots_per_epoch() * 11; + let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9); + let slots = (1..num_initial_slots) + .map(Slot::new) + .filter(|&slot| { + // Skip 3 slots leading up to the checkpoint slot. + slot <= checkpoint_slot - 3 || slot > checkpoint_slot + }) + .collect(); + weak_subjectivity_sync_test(slots, checkpoint_slot).await +} + +#[tokio::test] +async fn weak_subjectivity_sync_unaligned_unadvanced_checkpoint() { + let num_initial_slots = E::slots_per_epoch() * 11; + let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9 - 3); + let slots = (1..num_initial_slots) + .map(Slot::new) + .filter(|&slot| { + // Skip 3 slots after the checkpoint slot. + slot <= checkpoint_slot || slot > checkpoint_slot + 3 + }) + .collect(); + weak_subjectivity_sync_test(slots, checkpoint_slot).await +} + +async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { // Build an initial chain on one harness, representing a synced node with full history. - let num_initial_blocks = E::slots_per_epoch() * 11; let num_final_blocks = E::slots_per_epoch() * 2; let temp1 = tempdir().unwrap(); let full_store = get_store(&temp1); let harness = get_harness(full_store.clone(), LOW_VALIDATOR_COUNT); + let all_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); + + let (genesis_state, genesis_state_root) = harness.get_current_state_and_root(); harness - .extend_chain( - num_initial_blocks as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, + .add_attested_blocks_at_slots( + genesis_state.clone(), + genesis_state_root, + &slots, + &all_validators, ) .await; - let genesis_state = full_store - .get_state(&harness.chain.genesis_state_root, Some(Slot::new(0))) + let wss_block_root = harness + .chain + .block_root_at_slot(checkpoint_slot, WhenSlotSkipped::Prev) + .unwrap() + .unwrap(); + let wss_state_root = harness + .chain + .state_root_at_slot(checkpoint_slot) .unwrap() .unwrap(); - let wss_checkpoint = harness.finalized_checkpoint(); + let wss_block = harness .chain .store - .get_full_block(&wss_checkpoint.root) + .get_full_block(&wss_block_root) .unwrap() .unwrap(); let wss_state = full_store - .get_state(&wss_block.state_root(), None) + .get_state(&wss_state_root, Some(checkpoint_slot)) .unwrap() .unwrap(); - let wss_slot = wss_block.slot(); // Add more blocks that advance finalization further. harness.advance_slot(); @@ -2104,20 +2133,26 @@ async fn weak_subjectivity_sync() { let spec = test_spec::(); let seconds_per_slot = spec.seconds_per_slot; - // Initialise a new beacon chain from the finalized checkpoint + // Initialise a new beacon chain from the finalized checkpoint. + // The slot clock must be set to a time ahead of the checkpoint state. + let slot_clock = TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(harness.chain.genesis_time), + Duration::from_secs(seconds_per_slot), + ); + slot_clock.set_slot(harness.get_current_slot().as_u64()); let beacon_chain = Arc::new( BeaconChainBuilder::new(MinimalEthSpec) .store(store.clone()) .custom_spec(test_spec::()) .task_executor(harness.chain.task_executor.clone()) + .logger(log.clone()) .weak_subjectivity_state(wss_state, wss_block.clone(), genesis_state) .unwrap() - .logger(log.clone()) .store_migrator_config(MigratorConfig::default().blocking()) .dummy_eth1_backend() .expect("should build dummy backend") - .testing_slot_clock(Duration::from_secs(seconds_per_slot)) - .expect("should configure testing slot clock") + .slot_clock(slot_clock) .shutdown_sender(shutdown_tx) .chain_config(ChainConfig::default()) .event_handler(Some(ServerSentEventHandler::new_with_capacity( @@ -2131,9 +2166,9 @@ async fn weak_subjectivity_sync() { // Apply blocks forward to reach head. let chain_dump = harness.chain.chain_dump().unwrap(); - let new_blocks = &chain_dump[wss_slot.as_usize() + 1..]; - - assert_eq!(new_blocks[0].beacon_block.slot(), wss_slot + 1); + let new_blocks = chain_dump + .iter() + .filter(|snapshot| snapshot.beacon_block.slot() > checkpoint_slot); for snapshot in new_blocks { let full_block = harness @@ -2219,13 +2254,17 @@ async fn weak_subjectivity_sync() { assert_eq!(forwards, expected); // All blocks can be loaded. + let mut prev_block_root = Hash256::zero(); for (block_root, slot) in beacon_chain .forwards_iter_block_roots(Slot::new(0)) .unwrap() .map(Result::unwrap) { let block = store.get_blinded_block(&block_root).unwrap().unwrap(); - assert_eq!(block.slot(), slot); + if block_root != prev_block_root { + assert_eq!(block.slot(), slot); + } + prev_block_root = block_root; } // All states from the oldest state slot can be loaded. @@ -2240,14 +2279,141 @@ async fn weak_subjectivity_sync() { assert_eq!(state.canonical_root(), state_root); } - // Anchor slot is still set to the starting slot. - assert_eq!(store.get_anchor_slot(), Some(wss_slot)); + // Anchor slot is still set to the slot of the checkpoint block. + assert_eq!(store.get_anchor_slot(), Some(wss_block.slot())); // Reconstruct states. store.clone().reconstruct_historic_states().unwrap(); assert_eq!(store.get_anchor_slot(), None); } +/// Test that blocks and attestations that refer to states around an unaligned split state are +/// processed correctly. +#[tokio::test] +async fn process_blocks_and_attestations_for_unaligned_checkpoint() { + let temp = tempdir().unwrap(); + let store = get_store(&temp); + let chain_config = ChainConfig { + reconstruct_historic_states: false, + ..ChainConfig::default() + }; + let harness = get_harness_generic(store.clone(), LOW_VALIDATOR_COUNT, chain_config); + + let all_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); + + let split_slot = Slot::new(E::slots_per_epoch() * 4); + let pre_skips = 1; + let post_skips = 1; + + // Build the chain up to the intended split slot, with 3 skips before the split. + let slots = (1..=split_slot.as_u64() - pre_skips) + .map(Slot::new) + .collect::>(); + + let (genesis_state, genesis_state_root) = harness.get_current_state_and_root(); + harness + .add_attested_blocks_at_slots( + genesis_state.clone(), + genesis_state_root, + &slots, + &all_validators, + ) + .await; + + // Before the split slot becomes finalized, create two forking blocks that build on the split + // block: + // + // - one that is invalid because it conflicts with finalization (slot <= finalized_slot) + // - one that is valid because its slot is not finalized (slot > finalized_slot) + let (unadvanced_split_state, unadvanced_split_state_root) = + harness.get_current_state_and_root(); + + let (invalid_fork_block, _) = harness + .make_block(unadvanced_split_state.clone(), split_slot) + .await; + let (valid_fork_block, _) = harness + .make_block(unadvanced_split_state.clone(), split_slot + 1) + .await; + + // Advance the chain so that the intended split slot is finalized. + // Do not attest in the epoch boundary slot, to make attestation production later easier (no + // equivocations). + let finalizing_slot = split_slot + 2 * E::slots_per_epoch(); + for _ in 0..pre_skips + post_skips { + harness.advance_slot(); + } + harness.extend_to_slot(finalizing_slot - 1).await; + harness + .add_block_at_slot(finalizing_slot, harness.get_current_state()) + .await + .unwrap(); + + // Check that the split slot is as intended. + let split = store.get_split_info(); + assert_eq!(split.slot, split_slot); + assert_eq!(split.block_root, valid_fork_block.parent_root()); + assert_ne!(split.state_root, unadvanced_split_state_root); + + // Applying the invalid block should fail. + let err = harness + .chain + .process_block( + invalid_fork_block.canonical_root(), + Arc::new(invalid_fork_block.clone()), + NotifyExecutionLayer::Yes, + || Ok(()), + ) + .await + .unwrap_err(); + assert!(matches!(err, BlockError::WouldRevertFinalizedSlot { .. })); + + // Applying the valid block should succeed, but it should not become head. + harness + .chain + .process_block( + valid_fork_block.canonical_root(), + Arc::new(valid_fork_block.clone()), + NotifyExecutionLayer::Yes, + || Ok(()), + ) + .await + .unwrap(); + harness.chain.recompute_head_at_current_slot().await; + assert_ne!(harness.head_block_root(), valid_fork_block.canonical_root()); + + // Attestations to the split block in the next 2 epochs should be processed successfully. + let attestation_start_slot = harness.get_current_slot(); + let attestation_end_slot = attestation_start_slot + 2 * E::slots_per_epoch(); + let (split_state_root, mut advanced_split_state) = harness + .chain + .store + .get_advanced_hot_state(split.block_root, split.slot, split.state_root) + .unwrap() + .unwrap(); + complete_state_advance( + &mut advanced_split_state, + Some(split_state_root), + attestation_start_slot, + &harness.chain.spec, + ) + .unwrap(); + advanced_split_state + .build_caches(&harness.chain.spec) + .unwrap(); + let advanced_split_state_root = advanced_split_state.update_tree_hash_cache().unwrap(); + for slot in (attestation_start_slot.as_u64()..attestation_end_slot.as_u64()).map(Slot::new) { + let attestations = harness.make_attestations( + &all_validators, + &advanced_split_state, + advanced_split_state_root, + split.block_root.into(), + slot, + ); + harness.advance_slot(); + harness.process_attestations(attestations); + } +} + #[tokio::test] async fn finalizes_after_resuming_from_db() { let validator_count = 16; @@ -2306,6 +2472,7 @@ async fn finalizes_after_resuming_from_db() { .default_spec() .keypairs(KEYPAIRS[0..validator_count].to_vec()) .resumed_disk_store(store) + .testing_slot_clock(original_chain.slot_clock.clone()) .mock_execution_layer() .build(); @@ -2559,6 +2726,9 @@ async fn schema_downgrade_to_min_version() { SchemaVersion(11) }; + // Save the slot clock so that the new harness doesn't revert in time. + let slot_clock = harness.chain.slot_clock.clone(); + // Close the database to ensure everything is written to disk. drop(store); drop(harness); @@ -2589,11 +2759,21 @@ async fn schema_downgrade_to_min_version() { ) .expect("schema upgrade from minimum version should work"); - // Rescreate the harness. + // Recreate the harness. + /* + let slot_clock = TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(harness.chain.genesis_time), + Duration::from_secs(spec.seconds_per_slot), + ); + slot_clock.set_slot(harness.get_current_slot().as_u64()); + */ + let harness = BeaconChainHarness::builder(MinimalEthSpec) .default_spec() .keypairs(KEYPAIRS[0..LOW_VALIDATOR_COUNT].to_vec()) .logger(store.logger().clone()) + .testing_slot_clock(slot_clock) .resumed_disk_store(store.clone()) .mock_execution_layer() .build(); diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index c5b2892cbdb..8935c69926c 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -6,7 +6,7 @@ use beacon_chain::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, OP_POOL_DB_KEY, }, - BeaconChain, NotifyExecutionLayer, StateSkipConfig, WhenSlotSkipped, + BeaconChain, ChainConfig, NotifyExecutionLayer, StateSkipConfig, WhenSlotSkipped, }; use lazy_static::lazy_static; use operation_pool::PersistedOperationPool; @@ -28,6 +28,10 @@ lazy_static! { fn get_harness(validator_count: usize) -> BeaconChainHarness> { let harness = BeaconChainHarness::builder(MinimalEthSpec) .default_spec() + .chain_config(ChainConfig { + reconstruct_historic_states: true, + ..ChainConfig::default() + }) .keypairs(KEYPAIRS[0..validator_count].to_vec()) .fresh_ephemeral_store() .mock_execution_layer() diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index bf5d8bced46..4c1da85fa5c 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -708,7 +708,6 @@ impl Stream for InboundEvents { pub struct BeaconProcessor { pub network_globals: Arc>, pub executor: TaskExecutor, - pub max_workers: usize, pub current_workers: usize, pub config: BeaconProcessorConfig, pub log: Logger, @@ -721,7 +720,7 @@ impl BeaconProcessor { /// - Performed immediately, if a worker is available. /// - Queued for later processing, if no worker is currently available. /// - /// Only `self.max_workers` will ever be spawned at one time. Each worker is a `tokio` task + /// Only `self.config.max_workers` will ever be spawned at one time. Each worker is a `tokio` task /// started with `spawn_blocking`. /// /// The optional `work_journal_tx` allows for an outside process to receive a log of all work @@ -896,7 +895,7 @@ impl BeaconProcessor { let _ = work_journal_tx.try_send(id); } - let can_spawn = self.current_workers < self.max_workers; + let can_spawn = self.current_workers < self.config.max_workers; let drop_during_sync = work_event .as_ref() .map_or(false, |event| event.drop_during_sync); diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 72b6a6c7d47..87e16509026 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -6,11 +6,11 @@ edition = "2021" [dev-dependencies] serde_yaml = "0.8.13" -state_processing = { path = "../../consensus/state_processing" } operation_pool = { path = "../operation_pool" } tokio = "1.14.0" [dependencies] +state_processing = { path = "../../consensus/state_processing" } beacon_chain = { path = "../beacon_chain" } store = { path = "../store" } network = { path = "../network" } @@ -44,4 +44,3 @@ slasher_service = { path = "../../slasher/service" } monitoring_api = {path = "../../common/monitoring_api"} execution_layer = { path = "../execution_layer" } beacon_processor = { path = "../beacon_processor" } -num_cpus = "1.13.0" diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 8383963b7c2..07990e3c1fb 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -29,7 +29,6 @@ use network::{NetworkConfig, NetworkSenders, NetworkService}; use slasher::Slasher; use slasher_service::SlasherService; use slog::{debug, info, warn, Logger}; -use std::cmp; use std::net::TcpListener; use std::path::{Path, PathBuf}; use std::sync::Arc; @@ -155,14 +154,18 @@ where let runtime_context = runtime_context.ok_or("beacon_chain_start_method requires a runtime context")?; let context = runtime_context.service_context("beacon".into()); + let log = context.log(); let spec = chain_spec.ok_or("beacon_chain_start_method requires a chain spec")?; let event_handler = if self.http_api_config.enabled { - Some(ServerSentEventHandler::new(context.log().clone())) + Some(ServerSentEventHandler::new( + context.log().clone(), + self.http_api_config.sse_capacity_multiplier, + )) } else { None }; - let execution_layer = if let Some(config) = config.execution_layer { + let execution_layer = if let Some(config) = config.execution_layer.clone() { let context = runtime_context.service_context("exec".into()); let execution_layer = ExecutionLayer::from_config( config, @@ -247,23 +250,19 @@ where )?; builder.genesis_state(genesis_state).map(|v| (v, None))? } - ClientGenesis::SszBytes { - genesis_state_bytes, - } => { + ClientGenesis::GenesisState => { info!( context.log(), "Starting from known genesis state"; ); - let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes, &spec) - .map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?; + let genesis_state = genesis_state(&runtime_context, &config, log)?; builder.genesis_state(genesis_state).map(|v| (v, None))? } ClientGenesis::WeakSubjSszBytes { anchor_state_bytes, anchor_block_bytes, - genesis_state_bytes, } => { info!(context.log(), "Starting checkpoint sync"); if config.chain.genesis_backfill { @@ -277,17 +276,13 @@ where .map_err(|e| format!("Unable to parse weak subj state SSZ: {:?}", e))?; let anchor_block = SignedBeaconBlock::from_ssz_bytes(&anchor_block_bytes, &spec) .map_err(|e| format!("Unable to parse weak subj block SSZ: {:?}", e))?; - let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes, &spec) - .map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?; + let genesis_state = genesis_state(&runtime_context, &config, log)?; builder .weak_subjectivity_state(anchor_state, anchor_block, genesis_state) .map(|v| (v, None))? } - ClientGenesis::CheckpointSyncUrl { - genesis_state_bytes, - url, - } => { + ClientGenesis::CheckpointSyncUrl { url } => { info!( context.log(), "Starting checkpoint sync"; @@ -306,7 +301,6 @@ where config.chain.checkpoint_sync_url_timeout, )), ); - let slots_per_epoch = TEthSpec::slots_per_epoch(); let deposit_snapshot = if config.sync_eth1_chain { // We want to fetch deposit snapshot before fetching the finalized beacon state to @@ -353,10 +347,23 @@ where None }; - debug!(context.log(), "Downloading finalized block"); - // Find a suitable finalized block on an epoch boundary. - let mut block = remote - .get_beacon_blocks_ssz::(BlockId::Finalized, &spec) + debug!( + context.log(), + "Downloading finalized state"; + ); + let state = remote + .get_debug_beacon_states_ssz::(StateId::Finalized, &spec) + .await + .map_err(|e| format!("Error loading checkpoint state from remote: {:?}", e))? + .ok_or_else(|| "Checkpoint state missing from remote".to_string())?; + + debug!(context.log(), "Downloaded finalized state"; "slot" => ?state.slot()); + + let finalized_block_slot = state.latest_block_header().slot; + + debug!(context.log(), "Downloading finalized block"; "block_slot" => ?finalized_block_slot); + let block = remote + .get_beacon_blocks_ssz::(BlockId::Slot(finalized_block_slot), &spec) .await .map_err(|e| match e { ApiError::InvalidSsz(e) => format!( @@ -370,65 +377,14 @@ where debug!(context.log(), "Downloaded finalized block"); - let mut block_slot = block.slot(); - - while block.slot() % slots_per_epoch != 0 { - block_slot = (block_slot / slots_per_epoch - 1) * slots_per_epoch; - - debug!( - context.log(), - "Searching for aligned checkpoint block"; - "block_slot" => block_slot - ); - - if let Some(found_block) = remote - .get_beacon_blocks_ssz::(BlockId::Slot(block_slot), &spec) - .await - .map_err(|e| { - format!("Error fetching block at slot {}: {:?}", block_slot, e) - })? - { - block = found_block; - } - } - - debug!( - context.log(), - "Downloaded aligned finalized block"; - "block_root" => ?block.canonical_root(), - "block_slot" => block.slot(), - ); - - let state_root = block.state_root(); - debug!( - context.log(), - "Downloading finalized state"; - "state_root" => ?state_root - ); - let state = remote - .get_debug_beacon_states_ssz::(StateId::Root(state_root), &spec) - .await - .map_err(|e| { - format!( - "Error loading checkpoint state from remote {:?}: {:?}", - state_root, e - ) - })? - .ok_or_else(|| { - format!("Checkpoint state missing from remote: {:?}", state_root) - })?; - - debug!(context.log(), "Downloaded finalized state"); - - let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes, &spec) - .map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?; + let genesis_state = genesis_state(&runtime_context, &config, log)?; info!( context.log(), "Loaded checkpoint block and state"; - "slot" => block.slot(), + "block_slot" => block.slot(), + "state_slot" => state.slot(), "block_root" => ?block.canonical_root(), - "state_root" => ?state_root, ); let service = @@ -792,7 +748,6 @@ where BeaconProcessor { network_globals: network_globals.clone(), executor: beacon_processor_context.executor.clone(), - max_workers: cmp::max(1, num_cpus::get()), current_workers: 0, config: beacon_processor_config, log: beacon_processor_context.log().clone(), @@ -1126,3 +1081,22 @@ where Ok(self) } } + +/// Obtain the genesis state from the `eth2_network_config` in `context`. +fn genesis_state( + context: &RuntimeContext, + config: &ClientConfig, + log: &Logger, +) -> Result, String> { + let eth2_network_config = context + .eth2_network_config + .as_ref() + .ok_or("An eth2_network_config is required to obtain the genesis state")?; + eth2_network_config + .genesis_state::( + config.genesis_state_url.as_deref(), + config.genesis_state_url_timeout, + log, + )? + .ok_or_else(|| "Genesis state is unknown".to_string()) +} diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index b4deb52fc31..adaf0279847 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -7,6 +7,7 @@ use sensitive_url::SensitiveUrl; use serde_derive::{Deserialize, Serialize}; use std::fs; use std::path::PathBuf; +use std::time::Duration; use types::{Graffiti, PublicKeyBytes}; /// Default directory name for the freezer database under the top-level data dir. const DEFAULT_FREEZER_DB_DIR: &str = "freezer_db"; @@ -25,18 +26,13 @@ pub enum ClientGenesis { /// contract. #[default] DepositContract, - /// Loads the genesis state from SSZ-encoded `BeaconState` bytes. - /// - /// We include the bytes instead of the `BeaconState` because the `EthSpec` type - /// parameter would be very annoying. - SszBytes { genesis_state_bytes: Vec }, + /// Loads the genesis state from the genesis state in the `Eth2NetworkConfig`. + GenesisState, WeakSubjSszBytes { - genesis_state_bytes: Vec, anchor_state_bytes: Vec, anchor_block_bytes: Vec, }, CheckpointSyncUrl { - genesis_state_bytes: Vec, url: SensitiveUrl, }, } @@ -80,8 +76,9 @@ pub struct Config { pub monitoring_api: Option, pub slasher: Option, pub logger_config: LoggerConfig, - pub always_prefer_builder_payload: bool, pub beacon_processor: BeaconProcessorConfig, + pub genesis_state_url: Option, + pub genesis_state_url_timeout: Duration, } impl Default for Config { @@ -108,8 +105,10 @@ impl Default for Config { validator_monitor_pubkeys: vec![], validator_monitor_individual_tracking_threshold: DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, logger_config: LoggerConfig::default(), - always_prefer_builder_payload: false, beacon_processor: <_>::default(), + genesis_state_url: <_>::default(), + // This default value should always be overwritten by the CLI default value. + genesis_state_url_timeout: Duration::from_secs(60), } } } diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 8492a5159e2..f2259a4813b 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -50,4 +50,4 @@ keccak-hash = "0.10.0" hash256-std-hasher = "0.15.2" triehash = "0.8.4" hash-db = "0.15.2" -pretty_reqwest_error = { path = "../../common/pretty_reqwest_error" } \ No newline at end of file +pretty_reqwest_error = { path = "../../common/pretty_reqwest_error" } diff --git a/beacon_node/genesis/src/common.rs b/beacon_node/genesis/src/common.rs index 06bf99f9f63..e48fa362046 100644 --- a/beacon_node/genesis/src/common.rs +++ b/beacon_node/genesis/src/common.rs @@ -39,7 +39,7 @@ pub fn genesis_deposits( Ok(deposit_data .into_iter() - .zip(proofs.into_iter()) + .zip(proofs) .map(|(data, proof)| (data, proof.into())) .map(|(data, proof)| Deposit { proof, data }) .collect()) diff --git a/beacon_node/http_api/src/attester_duties.rs b/beacon_node/http_api/src/attester_duties.rs index aad405d56ba..f3242a2b374 100644 --- a/beacon_node/http_api/src/attester_duties.rs +++ b/beacon_node/http_api/src/attester_duties.rs @@ -35,7 +35,6 @@ pub fn attester_duties( .epoch(T::EthSpec::slots_per_epoch()); if request_epoch == current_epoch - || request_epoch == tolerant_current_epoch || request_epoch == current_epoch + 1 || request_epoch == tolerant_current_epoch + 1 { @@ -46,7 +45,7 @@ pub fn attester_duties( request_epoch, current_epoch ))) } else { - // request_epoch < current_epoch + // request_epoch < current_epoch, in fact we only allow `request_epoch == current_epoch-1` in this case compute_historic_attester_duties(request_epoch, request_indices, chain) } } diff --git a/beacon_node/http_api/src/builder_states.rs b/beacon_node/http_api/src/builder_states.rs new file mode 100644 index 00000000000..90203f2d60c --- /dev/null +++ b/beacon_node/http_api/src/builder_states.rs @@ -0,0 +1,72 @@ +use crate::StateId; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use safe_arith::SafeArith; +use state_processing::per_block_processing::get_expected_withdrawals; +use state_processing::state_advance::partial_state_advance; +use std::sync::Arc; +use types::{BeaconState, EthSpec, ForkName, Slot, Withdrawals}; + +const MAX_EPOCH_LOOKAHEAD: u64 = 2; + +/// Get the withdrawals computed from the specified state, that will be included in the block +/// that gets built on the specified state. +pub fn get_next_withdrawals( + chain: &Arc>, + mut state: BeaconState, + state_id: StateId, + proposal_slot: Slot, +) -> Result, warp::Rejection> { + get_next_withdrawals_sanity_checks(chain, &state, proposal_slot)?; + + // advance the state to the epoch of the proposal slot. + let proposal_epoch = proposal_slot.epoch(T::EthSpec::slots_per_epoch()); + let (state_root, _, _) = state_id.root(chain)?; + if proposal_epoch != state.current_epoch() { + if let Err(e) = + partial_state_advance(&mut state, Some(state_root), proposal_slot, &chain.spec) + { + return Err(warp_utils::reject::custom_server_error(format!( + "failed to advance to the epoch of the proposal slot: {:?}", + e + ))); + } + } + + match get_expected_withdrawals(&state, &chain.spec) { + Ok(withdrawals) => Ok(withdrawals), + Err(e) => Err(warp_utils::reject::custom_server_error(format!( + "failed to get expected withdrawal: {:?}", + e + ))), + } +} + +fn get_next_withdrawals_sanity_checks( + chain: &BeaconChain, + state: &BeaconState, + proposal_slot: Slot, +) -> Result<(), warp::Rejection> { + if proposal_slot <= state.slot() { + return Err(warp_utils::reject::custom_bad_request( + "proposal slot must be greater than the pre-state slot".to_string(), + )); + } + + let fork = chain.spec.fork_name_at_slot::(proposal_slot); + if let ForkName::Base | ForkName::Altair | ForkName::Merge = fork { + return Err(warp_utils::reject::custom_bad_request( + "the specified state is a pre-capella state.".to_string(), + )); + } + + let look_ahead_limit = MAX_EPOCH_LOOKAHEAD + .safe_mul(T::EthSpec::slots_per_epoch()) + .map_err(warp_utils::reject::arith_error)?; + if proposal_slot >= state.slot() + look_ahead_limit { + return Err(warp_utils::reject::custom_bad_request(format!( + "proposal slot is greater than or equal to the look ahead limit: {look_ahead_limit}" + ))); + } + + Ok(()) +} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 09f0bf822e2..fb05052c3ad 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -10,6 +10,7 @@ mod attester_duties; mod block_id; mod block_packing_efficiency; mod block_rewards; +mod builder_states; mod database; mod metrics; mod proposer_duties; @@ -33,6 +34,7 @@ use beacon_chain::{ use crate::validator::{produce_block_v3, produce_block_v2}; use beacon_processor::BeaconProcessorSend; pub use block_id::BlockId; +use builder_states::get_next_withdrawals; use bytes::Bytes; use directory::DEFAULT_ROOT_DIR; use eth2::types::{ @@ -66,7 +68,10 @@ use tokio::sync::{ mpsc::{Sender, UnboundedSender}, oneshot, }; -use tokio_stream::{wrappers::BroadcastStream, StreamExt}; +use tokio_stream::{ + wrappers::{errors::BroadcastStreamRecvError, BroadcastStream}, + StreamExt, +}; use types::{ Attestation, AttestationData, AttestationShufflingId, AttesterSlashing, BeaconStateError, BlindedPayload, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, @@ -133,7 +138,10 @@ pub struct Config { pub allow_sync_stalled: bool, pub spec_fork_name: Option, pub data_dir: PathBuf, + pub sse_capacity_multiplier: usize, pub enable_beacon_processor: bool, + #[serde(with = "eth2::types::serde_status_code")] + pub duplicate_block_status_code: StatusCode, } impl Default for Config { @@ -147,7 +155,9 @@ impl Default for Config { allow_sync_stalled: false, spec_fork_name: None, data_dir: PathBuf::from(DEFAULT_ROOT_DIR), + sse_capacity_multiplier: 1, enable_beacon_processor: true, + duplicate_block_status_code: StatusCode::ACCEPTED, } } } @@ -505,6 +515,8 @@ pub fn serve( let task_spawner_filter = warp::any().map(move || TaskSpawner::new(beacon_processor_send.clone())); + let duplicate_block_status_code = ctx.config.duplicate_block_status_code; + /* * * Start of HTTP method definitions. @@ -1279,11 +1291,11 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( - |block: Arc>, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>, - log: Logger| { + move |block: Arc>, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { publish_blocks::publish_block( None, @@ -1292,9 +1304,9 @@ pub fn serve( &network_tx, log, BroadcastValidation::default(), + duplicate_block_status_code, ) .await - .map(|()| warp::reply().into_response()) }) }, ); @@ -1309,11 +1321,11 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( - |block_bytes: Bytes, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>, - log: Logger| { + move |block_bytes: Bytes, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { let block = SignedBeaconBlock::::from_ssz_bytes(&block_bytes, &chain.spec) @@ -1329,9 +1341,9 @@ pub fn serve( &network_tx, log, BroadcastValidation::default(), + duplicate_block_status_code, ) .await - .map(|()| warp::reply().into_response()) }) }, ); @@ -1347,12 +1359,12 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( - |validation_level: api_types::BroadcastValidationQuery, - block: Arc>, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>, - log: Logger| { + move |validation_level: api_types::BroadcastValidationQuery, + block: Arc>, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { publish_blocks::publish_block( None, @@ -1361,9 +1373,9 @@ pub fn serve( &network_tx, log, validation_level.broadcast_validation, + duplicate_block_status_code, ) .await - .map(|()| warp::reply().into_response()) }) }, ); @@ -1379,12 +1391,12 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( - |validation_level: api_types::BroadcastValidationQuery, - block_bytes: Bytes, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>, - log: Logger| { + move |validation_level: api_types::BroadcastValidationQuery, + block_bytes: Bytes, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { let block = SignedBeaconBlock::::from_ssz_bytes(&block_bytes, &chain.spec) @@ -1400,9 +1412,9 @@ pub fn serve( &network_tx, log, validation_level.broadcast_validation, + duplicate_block_status_code, ) .await - .map(|()| warp::reply().into_response()) }) }, ); @@ -1422,11 +1434,11 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( - |block: SignedBeaconBlock>, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>, - log: Logger| { + move |block: SignedBlindedBeaconBlock, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { publish_blocks::publish_blinded_block( block, @@ -1434,9 +1446,9 @@ pub fn serve( &network_tx, log, BroadcastValidation::default(), + duplicate_block_status_code, ) .await - .map(|()| warp::reply().into_response()) }) }, ); @@ -1452,13 +1464,13 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( - |block_bytes: Bytes, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>, - log: Logger| { + move |block_bytes: Bytes, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { - let block = SignedBeaconBlock::>::from_ssz_bytes( + let block = SignedBlindedBeaconBlock::::from_ssz_bytes( &block_bytes, &chain.spec, ) @@ -1471,9 +1483,9 @@ pub fn serve( &network_tx, log, BroadcastValidation::default(), + duplicate_block_status_code, ) .await - .map(|()| warp::reply().into_response()) }) }, ); @@ -1489,87 +1501,63 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( - |validation_level: api_types::BroadcastValidationQuery, - block: SignedBeaconBlock>, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>, - log: Logger| { - task_spawner.spawn_async(Priority::P0, async move { - match publish_blocks::publish_blinded_block( + move |validation_level: api_types::BroadcastValidationQuery, + block: SignedBlindedBeaconBlock, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + publish_blocks::publish_blinded_block( block, chain, &network_tx, log, validation_level.broadcast_validation, + duplicate_block_status_code, ) .await - { - Ok(()) => warp::reply().into_response(), - Err(e) => match warp_utils::reject::handle_rejection(e).await { - Ok(reply) => reply.into_response(), - Err(_) => warp::reply::with_status( - StatusCode::INTERNAL_SERVER_ERROR, - eth2::StatusCode::INTERNAL_SERVER_ERROR, - ) - .into_response(), - }, - } }) }, ); - let post_beacon_blinded_blocks_v2_ssz = - eth_v2 - .and(warp::path("beacon")) - .and(warp::path("blinded_blocks")) - .and(warp::query::()) - .and(warp::path::end()) - .and(warp::body::bytes()) - .and(chain_filter.clone()) - .and(network_tx_filter.clone()) - .and(log_filter.clone()) - .then( - |validation_level: api_types::BroadcastValidationQuery, - block_bytes: Bytes, - chain: Arc>, - network_tx: UnboundedSender>, - log: Logger| async move { - let block = - match SignedBeaconBlock::>::from_ssz_bytes( - &block_bytes, - &chain.spec, - ) { - Ok(data) => data, - Err(_) => { - return warp::reply::with_status( - StatusCode::BAD_REQUEST, - eth2::StatusCode::BAD_REQUEST, - ) - .into_response(); - } - }; - match publish_blocks::publish_blinded_block( + let post_beacon_blinded_blocks_v2_ssz = eth_v2 + .and(warp::path("beacon")) + .and(warp::path("blinded_blocks")) + .and(warp::query::()) + .and(warp::path::end()) + .and(warp::body::bytes()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) + .and(log_filter.clone()) + .then( + move |validation_level: api_types::BroadcastValidationQuery, + block_bytes: Bytes, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + let block = SignedBlindedBeaconBlock::::from_ssz_bytes( + &block_bytes, + &chain.spec, + ) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) + })?; + publish_blocks::publish_blinded_block( block, chain, &network_tx, log, validation_level.broadcast_validation, + duplicate_block_status_code, ) .await - { - Ok(()) => warp::reply().into_response(), - Err(e) => match warp_utils::reject::handle_rejection(e).await { - Ok(reply) => reply.into_response(), - Err(_) => warp::reply::with_status( - StatusCode::INTERNAL_SERVER_ERROR, - eth2::StatusCode::INTERNAL_SERVER_ERROR, - ) - .into_response(), - }, - } - }, - ); + }) + }, + ); let block_id_or_err = warp::path::param::().or_else(|_| async { Err(warp_utils::reject::custom_bad_request( @@ -2288,6 +2276,60 @@ pub fn serve( }, ); + /* + * builder/states + */ + + let builder_states_path = eth_v1 + .and(warp::path("builder")) + .and(warp::path("states")) + .and(chain_filter.clone()); + + // GET builder/states/{state_id}/expected_withdrawals + let get_expected_withdrawals = builder_states_path + .clone() + .and(task_spawner_filter.clone()) + .and(warp::path::param::()) + .and(warp::path("expected_withdrawals")) + .and(warp::query::()) + .and(warp::path::end()) + .and(warp::header::optional::("accept")) + .then( + |chain: Arc>, + task_spawner: TaskSpawner, + state_id: StateId, + query: api_types::ExpectedWithdrawalsQuery, + accept_header: Option| { + task_spawner.blocking_response_task(Priority::P1, move || { + let (state, execution_optimistic, finalized) = state_id.state(&chain)?; + let proposal_slot = query.proposal_slot.unwrap_or(state.slot() + 1); + let withdrawals = + get_next_withdrawals::(&chain, state, state_id, proposal_slot)?; + + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .header("Content-Type", "application/octet-stream") + .body(withdrawals.as_ssz_bytes().into()) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }), + _ => Ok(warp::reply::json( + &api_types::ExecutionOptimisticFinalizedResponse { + data: withdrawals, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }, + ) + .into_response()), + } + }) + }, + ); + /* * beacon/rewards */ @@ -4319,22 +4361,29 @@ pub fn serve( } }; - receivers.push(BroadcastStream::new(receiver).map(|msg| { - match msg { - Ok(data) => Event::default() - .event(data.topic_name()) - .json_data(data) - .map_err(|e| { - warp_utils::reject::server_sent_event_error(format!( - "{:?}", - e - )) - }), - Err(e) => Err(warp_utils::reject::server_sent_event_error( - format!("{:?}", e), - )), - } - })); + receivers.push( + BroadcastStream::new(receiver) + .map(|msg| { + match msg { + Ok(data) => Event::default() + .event(data.topic_name()) + .json_data(data) + .unwrap_or_else(|e| { + Event::default() + .comment(format!("error - bad json: {e:?}")) + }), + // Do not terminate the stream if the channel fills + // up. Just drop some messages and send a comment to + // the client. + Err(BroadcastStreamRecvError::Lagged(n)) => { + Event::default().comment(format!( + "error - dropped {n} messages" + )) + } + } + }) + .map(Ok::<_, std::convert::Infallible>), + ); } } else { return Err(warp_utils::reject::custom_server_error( @@ -4344,7 +4393,7 @@ pub fn serve( let s = futures::stream::select_all(receivers); - Ok::<_, warp::Rejection>(warp::sse::reply(warp::sse::keep_alive().stream(s))) + Ok(warp::sse::reply(warp::sse::keep_alive().stream(s))) }) }, ); @@ -4462,6 +4511,7 @@ pub fn serve( .uor(get_lighthouse_block_packing_efficiency) .uor(get_lighthouse_merge_readiness) .uor(get_events) + .uor(get_expected_withdrawals) .uor(lighthouse_log_events.boxed()) .recover(warp_utils::reject::handle_rejection), ) diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 0f2f7b361c7..58524f06981 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -4,7 +4,7 @@ use beacon_chain::{ BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, IntoGossipVerifiedBlock, NotifyExecutionLayer, }; -use eth2::types::BroadcastValidation; +use eth2::types::{BroadcastValidation, ErrorMessage}; use execution_layer::ProvenancedPayload; use lighthouse_network::PubsubMessage; use network::NetworkMessage; @@ -19,7 +19,8 @@ use types::{ AbstractExecPayload, BeaconBlockRef, BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash, FullPayload, Hash256, SignedBeaconBlock, }; -use warp::Rejection; +use warp::http::StatusCode; +use warp::{reply::Response, Rejection, Reply}; pub enum ProvenancedBlock> { /// The payload was built using a local EE. @@ -47,7 +48,8 @@ pub async fn publish_block>( network_tx: &UnboundedSender>, log: Logger, validation_level: BroadcastValidation, -) -> Result<(), Rejection> { + duplicate_status_code: StatusCode, +) -> Result { let seen_timestamp = timestamp_now(); let (block, is_locally_built_block) = match provenanced_block { ProvenancedBlock::Local(block, _) => (block, true), @@ -75,10 +77,30 @@ pub async fn publish_block>( }; /* if we can form a `GossipVerifiedBlock`, we've passed our basic gossip checks */ - let gossip_verified_block = block.into_gossip_verified_block(&chain).map_err(|e| { - warn!(log, "Not publishing block, not gossip verified"; "slot" => beacon_block.slot(), "error" => ?e); - warp_utils::reject::custom_bad_request(e.to_string()) - })?; + let gossip_verified_block = match block.into_gossip_verified_block(&chain) { + Ok(b) => b, + Err(BlockError::BlockIsAlreadyKnown) => { + // Allow the status code for duplicate blocks to be overridden based on config. + return Ok(warp::reply::with_status( + warp::reply::json(&ErrorMessage { + code: duplicate_status_code.as_u16(), + message: "duplicate block".to_string(), + stacktraces: vec![], + }), + duplicate_status_code, + ) + .into_response()); + } + Err(e) => { + warn!( + log, + "Not publishing block - not gossip verified"; + "slot" => beacon_block.slot(), + "error" => ?e + ); + return Err(warp_utils::reject::custom_bad_request(e.to_string())); + } + }; let block_root = block_root.unwrap_or(gossip_verified_block.block_root); @@ -167,8 +189,7 @@ pub async fn publish_block>( &log, ) } - - Ok(()) + Ok(warp::reply().into_response()) } Err(BlockError::BeaconChainError(BeaconChainError::UnableToPublish)) => { Err(warp_utils::reject::custom_server_error( @@ -178,10 +199,6 @@ pub async fn publish_block>( Err(BlockError::Slashable) => Err(warp_utils::reject::custom_bad_request( "proposal for this slot and proposer has already been seen".to_string(), )), - Err(BlockError::BlockIsAlreadyKnown) => { - info!(log, "Block from HTTP API already known"; "block" => ?block_root); - Ok(()) - } Err(e) => { if let BroadcastValidation::Gossip = validation_level { Err(warp_utils::reject::broadcast_without_import(format!("{e}"))) @@ -208,7 +225,8 @@ pub async fn publish_blinded_block( network_tx: &UnboundedSender>, log: Logger, validation_level: BroadcastValidation, -) -> Result<(), Rejection> { + duplicate_status_code: StatusCode, +) -> Result { let block_root = block.canonical_root(); let full_block: ProvenancedBlock>> = reconstruct_block(chain.clone(), block_root, block, log.clone()).await?; @@ -219,6 +237,7 @@ pub async fn publish_blinded_block( network_tx, log, validation_level, + duplicate_status_code, ) .await } diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index 5e86053771e..1a76333e2d4 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -89,9 +89,7 @@ impl StateId { } else { // This block is either old and finalized, or recent and unfinalized, so // it's safe to fallback to the optimistic status of the finalized block. - chain - .canonical_head - .fork_choice_read_lock() + fork_choice .is_optimistic_or_invalid_block(&hot_summary.latest_block_root) .map_err(BeaconChainError::ForkChoiceError) .map_err(warp_utils::reject::beacon_chain_error)? diff --git a/beacon_node/http_api/src/task_spawner.rs b/beacon_node/http_api/src/task_spawner.rs index 503faff717b..8768e057dac 100644 --- a/beacon_node/http_api/src/task_spawner.rs +++ b/beacon_node/http_api/src/task_spawner.rs @@ -159,46 +159,6 @@ impl TaskSpawner { .and_then(|x| x) } } - - /// Executes an async task which always returns a `Response`. - pub async fn spawn_async( - self, - priority: Priority, - func: impl Future + Send + Sync + 'static, - ) -> Response { - if let Some(beacon_processor_send) = &self.beacon_processor_send { - // Create a wrapper future that will execute `func` and send the - // result to a channel held by this thread. - let (tx, rx) = oneshot::channel(); - let process_fn = async move { - // Await the future, collect the return value. - let func_result = func.await; - // Send the result down the channel. Ignore any failures; the - // send can only fail if the receiver is dropped. - let _ = tx.send(func_result); - }; - - // Send the function to the beacon processor for execution at some arbitrary time. - let result = send_to_beacon_processor( - beacon_processor_send, - priority, - BlockingOrAsync::Async(Box::pin(process_fn)), - rx, - ) - .await; - convert_rejection(result).await - } else { - // There is no beacon processor so spawn a task directly on the - // tokio executor. - tokio::task::spawn(func).await.unwrap_or_else(|e| { - warp::reply::with_status( - warp::reply::json(&format!("Tokio did not execute task: {e:?}")), - eth2::StatusCode::INTERNAL_SERVER_ERROR, - ) - .into_response() - }) - } - } } /// Send a task to the beacon processor and await execution. diff --git a/beacon_node/http_api/src/test_utils.rs b/beacon_node/http_api/src/test_utils.rs index 0367776f8de..33834d58ca0 100644 --- a/beacon_node/http_api/src/test_utils.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -23,7 +23,7 @@ use network::{NetworkReceivers, NetworkSenders}; use sensitive_url::SensitiveUrl; use slog::Logger; use std::future::Future; -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; use store::MemoryStore; @@ -184,7 +184,14 @@ pub async fn create_api_server_on_port( let eth1_service = eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone()).unwrap(); - let beacon_processor_config = BeaconProcessorConfig::default(); + let beacon_processor_config = BeaconProcessorConfig { + // The number of workers must be greater than one. Tests which use the + // builder workflow sometimes require an internal HTTP request in order + // to fulfill an already in-flight HTTP request, therefore having only + // one worker will result in a deadlock. + max_workers: 2, + ..BeaconProcessorConfig::default() + }; let BeaconProcessorChannels { beacon_processor_tx, beacon_processor_rx, @@ -196,11 +203,6 @@ pub async fn create_api_server_on_port( BeaconProcessor { network_globals: network_globals.clone(), executor: test_runtime.task_executor.clone(), - // The number of workers must be greater than one. Tests which use the - // builder workflow sometimes require an internal HTTP request in order - // to fulfill an already in-flight HTTP request, therefore having only - // one worker will result in a deadlock. - max_workers: 2, current_workers: 0, config: beacon_processor_config, log: log.clone(), @@ -218,14 +220,9 @@ pub async fn create_api_server_on_port( let ctx = Arc::new(Context { config: Config { enabled: true, - listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), listen_port: port, - allow_origin: None, - tls_config: None, - allow_sync_stalled: false, data_dir: std::path::PathBuf::from(DEFAULT_ROOT_DIR), - spec_fork_name: None, - enable_beacon_processor: true, + ..Config::default() }, chain: Some(chain), network_senders: Some(network_senders), diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 00825890009..96ff37d81af 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -364,13 +364,14 @@ pub async fn consensus_partial_pass_only_consensus() { /* submit `block_b` which should induce equivocation */ let channel = tokio::sync::mpsc::unbounded_channel(); - let publication_result: Result<(), Rejection> = publish_block( + let publication_result = publish_block( None, ProvenancedBlock::local(gossip_block_b.unwrap()), tester.harness.chain.clone(), &channel.0, test_logger, validation_level.unwrap(), + StatusCode::ACCEPTED, ) .await; @@ -641,13 +642,14 @@ pub async fn equivocation_consensus_late_equivocation() { let channel = tokio::sync::mpsc::unbounded_channel(); - let publication_result: Result<(), Rejection> = publish_block( + let publication_result = publish_block( None, ProvenancedBlock::local(gossip_block_b.unwrap()), tester.harness.chain, &channel.0, test_logger, validation_level.unwrap(), + StatusCode::ACCEPTED, ) .await; @@ -1294,12 +1296,13 @@ pub async fn blinded_equivocation_consensus_late_equivocation() { let channel = tokio::sync::mpsc::unbounded_channel(); - let publication_result: Result<(), Rejection> = publish_blinded_block( + let publication_result = publish_blinded_block( block_b, tester.harness.chain, &channel.0, test_logger, validation_level.unwrap(), + StatusCode::ACCEPTED, ) .await; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 3ae495378e3..adaf1a0f2d4 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1,14 +1,14 @@ use beacon_chain::test_utils::RelativeSyncCommittee; use beacon_chain::{ test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, - BeaconChain, StateSkipConfig, WhenSlotSkipped, + BeaconChain, ChainConfig, StateSkipConfig, WhenSlotSkipped, }; use environment::null_logger; use eth2::{ mixin::{RequestAccept, ResponseForkName, ResponseOptional}, reqwest::RequestBuilder, types::{BlockId as CoreBlockId, ForkChoiceNode, StateId as CoreStateId, *}, - BeaconNodeHttpClient, Error, Timeouts, + BeaconNodeHttpClient, Error, StatusCode, Timeouts, }; use execution_layer::test_utils::TestingBuilder; use execution_layer::test_utils::DEFAULT_BUILDER_THRESHOLD_WEI; @@ -28,6 +28,7 @@ use sensitive_url::SensitiveUrl; use slot_clock::SlotClock; use state_processing::per_block_processing::get_expected_withdrawals; use state_processing::per_slot_processing; +use state_processing::state_advance::partial_state_advance; use std::convert::TryInto; use std::sync::Arc; use tokio::time::Duration; @@ -77,6 +78,7 @@ struct ApiTester { struct ApiTesterConfig { spec: ChainSpec, + retain_historic_states: bool, builder_threshold: Option, } @@ -86,11 +88,19 @@ impl Default for ApiTesterConfig { spec.shard_committee_period = 2; Self { spec, + retain_historic_states: false, builder_threshold: None, } } } +impl ApiTesterConfig { + fn retain_historic_states(mut self) -> Self { + self.retain_historic_states = true; + self + } +} + impl ApiTester { pub async fn new() -> Self { // This allows for testing voluntary exits without building out a massive chain. @@ -118,6 +128,10 @@ impl ApiTester { let harness = Arc::new( BeaconChainHarness::builder(MainnetEthSpec) .spec(spec.clone()) + .chain_config(ChainConfig { + reconstruct_historic_states: config.retain_historic_states, + ..ChainConfig::default() + }) .logger(logging::test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() @@ -375,6 +389,7 @@ impl ApiTester { pub async fn new_mev_tester_no_builder_threshold() -> Self { let mut config = ApiTesterConfig { builder_threshold: Some(0), + retain_historic_states: false, spec: E::default_spec(), }; config.spec.altair_fork_epoch = Some(Epoch::new(0)); @@ -1303,6 +1318,63 @@ impl ApiTester { self } + pub async fn test_post_beacon_blocks_duplicate(self) -> Self { + let block = self + .harness + .make_block( + self.harness.get_current_state(), + self.harness.get_current_slot(), + ) + .await + .0; + + assert!(self.client.post_beacon_blocks(&block).await.is_ok()); + + let blinded_block = block.clone_as_blinded(); + + // Test all the POST methods in sequence, they should all behave the same. + let responses = vec![ + self.client.post_beacon_blocks(&block).await.unwrap_err(), + self.client + .post_beacon_blocks_v2(&block, None) + .await + .unwrap_err(), + self.client + .post_beacon_blocks_ssz(&block) + .await + .unwrap_err(), + self.client + .post_beacon_blocks_v2_ssz(&block, None) + .await + .unwrap_err(), + self.client + .post_beacon_blinded_blocks(&blinded_block) + .await + .unwrap_err(), + self.client + .post_beacon_blinded_blocks_v2(&blinded_block, None) + .await + .unwrap_err(), + self.client + .post_beacon_blinded_blocks_ssz(&blinded_block) + .await + .unwrap_err(), + self.client + .post_beacon_blinded_blocks_v2_ssz(&blinded_block, None) + .await + .unwrap_err(), + ]; + for (i, response) in responses.into_iter().enumerate() { + assert_eq!( + response.status().unwrap(), + StatusCode::ACCEPTED, + "response {i}" + ); + } + + self + } + pub async fn test_beacon_blocks(self) -> Self { for block_id in self.interesting_block_ids() { let expected = block_id @@ -4327,6 +4399,72 @@ impl ApiTester { self } + pub async fn test_get_expected_withdrawals_invalid_state(self) -> Self { + let state_id = CoreStateId::Root(Hash256::zero()); + + let result = self.client.get_expected_withdrawals(&state_id).await; + + match result { + Err(e) => { + assert_eq!(e.status().unwrap(), 404); + } + _ => panic!("query did not fail correctly"), + } + + self + } + + pub async fn test_get_expected_withdrawals_capella(self) -> Self { + let slot = self.chain.slot().unwrap(); + let state_id = CoreStateId::Slot(slot); + + // calculate the expected withdrawals + let (mut state, _, _) = StateId(state_id).state(&self.chain).unwrap(); + let proposal_slot = state.slot() + 1; + let proposal_epoch = proposal_slot.epoch(E::slots_per_epoch()); + let (state_root, _, _) = StateId(state_id).root(&self.chain).unwrap(); + if proposal_epoch != state.current_epoch() { + let _ = partial_state_advance( + &mut state, + Some(state_root), + proposal_slot, + &self.chain.spec, + ); + } + let expected_withdrawals = get_expected_withdrawals(&state, &self.chain.spec).unwrap(); + + // fetch expected withdrawals from the client + let result = self.client.get_expected_withdrawals(&state_id).await; + match result { + Ok(withdrawal_response) => { + assert_eq!(withdrawal_response.execution_optimistic, Some(false)); + assert_eq!(withdrawal_response.finalized, Some(false)); + assert_eq!(withdrawal_response.data, expected_withdrawals.to_vec()); + } + Err(e) => { + println!("{:?}", e); + panic!("query failed incorrectly"); + } + } + + self + } + + pub async fn test_get_expected_withdrawals_pre_capella(self) -> Self { + let state_id = CoreStateId::Head; + + let result = self.client.get_expected_withdrawals(&state_id).await; + + match result { + Err(e) => { + assert_eq!(e.status().unwrap(), 400); + } + _ => panic!("query did not fail correctly"), + } + + self + } + pub async fn test_get_events_altair(self) -> Self { let topics = vec![EventTopic::ContributionAndProof]; let mut events_future = self @@ -4570,6 +4708,14 @@ async fn post_beacon_blocks_invalid() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_beacon_blocks_duplicate() { + ApiTester::new() + .await + .test_post_beacon_blocks_duplicate() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn beacon_pools_post_attestations_valid() { ApiTester::new() @@ -4705,7 +4851,7 @@ async fn get_validator_duties_attester_with_skip_slots() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_validator_duties_proposer() { - ApiTester::new() + ApiTester::new_from_config(ApiTesterConfig::default().retain_historic_states()) .await .test_get_validator_duties_proposer() .await; @@ -4713,7 +4859,7 @@ async fn get_validator_duties_proposer() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_validator_duties_proposer_with_skip_slots() { - ApiTester::new() + ApiTester::new_from_config(ApiTesterConfig::default().retain_historic_states()) .await .skip_slots(E::slots_per_epoch() * 2) .test_get_validator_duties_proposer() @@ -5045,6 +5191,7 @@ async fn builder_payload_chosen_by_profit() { async fn builder_works_post_capella() { let mut config = ApiTesterConfig { builder_threshold: Some(0), + retain_historic_states: false, spec: E::default_spec(), }; config.spec.altair_fork_epoch = Some(Epoch::new(0)); @@ -5108,3 +5255,37 @@ async fn optimistic_responses() { .test_check_optimistic_responses() .await; } + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn expected_withdrawals_invalid_pre_capella() { + let mut config = ApiTesterConfig::default(); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + ApiTester::new_from_config(config) + .await + .test_get_expected_withdrawals_pre_capella() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn expected_withdrawals_invalid_state() { + let mut config = ApiTesterConfig::default(); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.capella_fork_epoch = Some(Epoch::new(0)); + ApiTester::new_from_config(config) + .await + .test_get_expected_withdrawals_invalid_state() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn expected_withdrawals_valid_capella() { + let mut config = ApiTesterConfig::default(); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.capella_fork_epoch = Some(Epoch::new(0)); + ApiTester::new_from_config(config) + .await + .test_get_expected_withdrawals_capella() + .await; +} diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index f71845fed25..925d278ad62 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -44,6 +44,7 @@ prometheus-client = "0.21.0" unused_port = { path = "../../common/unused_port" } delay_map = "0.3.0" void = "1" +libp2p-mplex = "0.40.0" [dependencies.libp2p] version = "0.52" diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 82a371d8a20..c3819f1eb94 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -647,7 +647,7 @@ impl Discovery { if subnet_queries.len() == MAX_SUBNETS_IN_QUERY || self.queued_queries.is_empty() { // This query is for searching for peers of a particular subnet // Drain subnet_queries so we can re-use it as we continue to process the queue - let grouped_queries: Vec = subnet_queries.drain(..).collect(); + let grouped_queries: Vec = std::mem::take(&mut subnet_queries); self.start_subnet_query(grouped_queries); processed = true; } diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 4f3454f4033..f6158ed3a0c 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -969,6 +969,7 @@ impl PeerManager { macro_rules! prune_peers { ($filter: expr) => { + let filter = $filter; for (peer_id, info) in self .network_globals .peers @@ -976,7 +977,7 @@ impl PeerManager { .worst_connected_peers() .iter() .filter(|(_, info)| { - !info.has_future_duty() && !info.is_trusted() && $filter(*info) + !info.has_future_duty() && !info.is_trusted() && filter(*info) }) { if peers_to_prune.len() diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index 21fd09b6b0f..b8acc4ed6d5 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -50,13 +50,21 @@ pub fn build_transport( transport.or_transport(libp2p::websocket::WsConfig::new(trans_clone)) }; + // mplex config + let mut mplex_config = libp2p_mplex::MplexConfig::new(); + mplex_config.set_max_buffer_size(256); + mplex_config.set_max_buffer_behaviour(libp2p_mplex::MaxBufferBehaviour::Block); + // yamux config let mut yamux_config = yamux::Config::default(); yamux_config.set_window_update_mode(yamux::WindowUpdateMode::on_read()); let (transport, bandwidth) = transport .upgrade(core::upgrade::Version::V1) .authenticate(generate_noise_config(&local_private_key)) - .multiplex(yamux_config) + .multiplex(core::upgrade::SelectUpgrade::new( + yamux_config, + mplex_config, + )) .timeout(Duration::from_secs(10)) .boxed() .with_bandwidth_logging(); diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index c37b0fa45d7..715e77bcade 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -36,7 +36,6 @@ logging = { path = "../../common/logging" } task_executor = { path = "../../common/task_executor" } igd = "0.12.1" itertools = "0.10.0" -num_cpus = "1.13.0" lru_cache = { path = "../../common/lru_cache" } if-addrs = "0.6.4" strum = "0.24.0" diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index cb4d6f9c2bd..ac7479db011 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -2062,7 +2062,7 @@ impl NetworkBeaconProcessor { ); } AttnError::BeaconChainError(BeaconChainError::DBError(Error::HotColdDBError( - HotColdDBError::AttestationStateIsFinalized { .. }, + HotColdDBError::FinalizedStateNotInHotDatabase { .. }, ))) => { debug!(self.log, "Attestation for finalized state"; "peer_id" => % peer_id); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index ce5b9141172..a678edbf1ff 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -20,7 +20,6 @@ use lighthouse_network::{ Client, MessageId, NetworkGlobals, PeerId, }; use slot_clock::SlotClock; -use std::cmp; use std::iter::Iterator; use std::sync::Arc; use std::time::Duration; @@ -228,7 +227,6 @@ impl TestRig { let beacon_processor = BeaconProcessor { network_globals, executor, - max_workers: cmp::max(1, num_cpus::get()), current_workers: 0, config: beacon_processor_config, log: log.clone(), diff --git a/beacon_node/network/src/subnet_service/attestation_subnets.rs b/beacon_node/network/src/subnet_service/attestation_subnets.rs index b4f52df39d3..1cae6299e1c 100644 --- a/beacon_node/network/src/subnet_service/attestation_subnets.rs +++ b/beacon_node/network/src/subnet_service/attestation_subnets.rs @@ -302,9 +302,16 @@ impl AttestationService { /// Gets the long lived subnets the node should be subscribed to during the current epoch and /// the remaining duration for which they remain valid. fn recompute_long_lived_subnets_inner(&mut self) -> Result { - let current_epoch = self.beacon_chain.epoch().map_err( - |e| error!(self.log, "Failed to get the current epoch from clock"; "err" => ?e), - )?; + let current_epoch = self.beacon_chain.epoch().map_err(|e| { + if !self + .beacon_chain + .slot_clock + .is_prior_to_genesis() + .unwrap_or(false) + { + error!(self.log, "Failed to get the current epoch from clock"; "err" => ?e) + } + })?; let (subnets, next_subscription_epoch) = SubnetId::compute_subnets_for_epoch::( self.node_id.raw().into(), diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 0330bd3f7cc..837625e12a7 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -382,6 +382,24 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { stalled. This is useful for very small testnets. TESTING ONLY. DO NOT USE ON \ MAINNET.") ) + .arg( + Arg::with_name("http-sse-capacity-multiplier") + .long("http-sse-capacity-multiplier") + .takes_value(true) + .default_value("1") + .value_name("N") + .help("Multiplier to apply to the length of HTTP server-sent-event (SSE) channels. \ + Increasing this value can prevent messages from being dropped.") + ) + .arg( + Arg::with_name("http-duplicate-block-status") + .long("http-duplicate-block-status") + .takes_value(true) + .default_value("202") + .value_name("STATUS_CODE") + .help("Status code to send when a block that is already known is POSTed to the \ + HTTP API.") + ) .arg( Arg::with_name("http-enable-beacon-processor") .long("http-enable-beacon-processor") @@ -1116,7 +1134,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("gui") .long("gui") - .hidden(true) .help("Enable the graphical user interface and all its requirements. \ This enables --http and --validator-monitor-auto and enables SSE logging.") .takes_value(false) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 21df86620df..70495777e28 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -149,9 +149,15 @@ pub fn get_config( client_config.http_api.allow_sync_stalled = true; } + client_config.http_api.sse_capacity_multiplier = + parse_required(cli_args, "http-sse-capacity-multiplier")?; + client_config.http_api.enable_beacon_processor = parse_required(cli_args, "http-enable-beacon-processor")?; + client_config.http_api.duplicate_block_status_code = + parse_required(cli_args, "http-duplicate-block-status")?; + if let Some(cache_size) = clap_utils::parse_optional(cli_args, "shuffling-cache-size")? { client_config.chain.shuffling_cache_size = cache_size; } @@ -348,6 +354,9 @@ pub fn get_config( el_config.default_datadir = client_config.data_dir().clone(); el_config.builder_profit_threshold = clap_utils::parse_required(cli_args, "builder-profit-threshold")?; + el_config.always_prefer_builder_payload = + cli_args.is_present("always-prefer-builder-payload"); + let execution_timeout_multiplier = clap_utils::parse_required(cli_args, "execution-timeout-multiplier")?; el_config.execution_timeout_multiplier = Some(execution_timeout_multiplier); @@ -462,9 +471,30 @@ pub fn get_config( client_config.chain.checkpoint_sync_url_timeout = clap_utils::parse_required::(cli_args, "checkpoint-sync-url-timeout")?; - client_config.genesis = if let Some(genesis_state_bytes) = - eth2_network_config.genesis_state_bytes.clone() - { + client_config.genesis_state_url_timeout = + clap_utils::parse_required(cli_args, "genesis-state-url-timeout") + .map(Duration::from_secs)?; + + let genesis_state_url_opt = + clap_utils::parse_optional::(cli_args, "genesis-state-url")?; + let checkpoint_sync_url_opt = + clap_utils::parse_optional::(cli_args, "checkpoint-sync-url")?; + + // If the `--genesis-state-url` is defined, use that to download the + // genesis state bytes. If it's not defined, try `--checkpoint-sync-url`. + client_config.genesis_state_url = if let Some(genesis_state_url) = genesis_state_url_opt { + Some(genesis_state_url) + } else if let Some(checkpoint_sync_url) = checkpoint_sync_url_opt { + // If the checkpoint sync URL is going to be used to download the + // genesis state, adopt the timeout from the checkpoint sync URL too. + client_config.genesis_state_url_timeout = + Duration::from_secs(client_config.chain.checkpoint_sync_url_timeout); + Some(checkpoint_sync_url) + } else { + None + }; + + client_config.genesis = if eth2_network_config.genesis_state_is_known() { // Set up weak subjectivity sync, or start from the hardcoded genesis state. if let (Some(initial_state_path), Some(initial_block_path)) = ( cli_args.value_of("checkpoint-state"), @@ -486,7 +516,6 @@ pub fn get_config( let anchor_block_bytes = read(initial_block_path)?; ClientGenesis::WeakSubjSszBytes { - genesis_state_bytes, anchor_state_bytes, anchor_block_bytes, } @@ -494,17 +523,9 @@ pub fn get_config( let url = SensitiveUrl::parse(remote_bn_url) .map_err(|e| format!("Invalid checkpoint sync URL: {:?}", e))?; - ClientGenesis::CheckpointSyncUrl { - genesis_state_bytes, - url, - } + ClientGenesis::CheckpointSyncUrl { url } } else { - // Note: re-serializing the genesis state is not so efficient, however it avoids adding - // trait bounds to the `ClientGenesis` enum. This would have significant flow-on - // effects. - ClientGenesis::SszBytes { - genesis_state_bytes, - } + ClientGenesis::GenesisState } } else { if cli_args.is_present("checkpoint-state") || cli_args.is_present("checkpoint-sync-url") { @@ -798,10 +819,6 @@ pub fn get_config( if cli_args.is_present("genesis-backfill") { client_config.chain.genesis_backfill = true; } - // Payload selection configs - if cli_args.is_present("always-prefer-builder-payload") { - client_config.always_prefer_builder_payload = true; - } // Backfill sync rate-limiting client_config.beacon_processor.enable_backfill_rate_limiting = diff --git a/beacon_node/store/src/chunked_iter.rs b/beacon_node/store/src/chunked_iter.rs index 8ef0b6d201d..b3322b5225d 100644 --- a/beacon_node/store/src/chunked_iter.rs +++ b/beacon_node/store/src/chunked_iter.rs @@ -30,16 +30,16 @@ where /// Create a new iterator which can yield elements from `start_vindex` up to the last /// index stored by the restore point at `last_restore_point_slot`. /// - /// The `last_restore_point` slot should be the slot of a recent restore point as obtained from - /// `HotColdDB::get_latest_restore_point_slot`. We pass it as a parameter so that the caller can + /// The `freezer_upper_limit` slot should be the slot of a recent restore point as obtained from + /// `Root::freezer_upper_limit`. We pass it as a parameter so that the caller can /// maintain a stable view of the database (see `HybridForwardsBlockRootsIterator`). pub fn new( store: &'a HotColdDB, start_vindex: usize, - last_restore_point_slot: Slot, + freezer_upper_limit: Slot, spec: &ChainSpec, ) -> Self { - let (_, end_vindex) = F::start_and_end_vindex(last_restore_point_slot, spec); + let (_, end_vindex) = F::start_and_end_vindex(freezer_upper_limit, spec); // Set the next chunk to the one containing `start_vindex`. let next_cindex = start_vindex / F::chunk_size(); diff --git a/beacon_node/store/src/chunked_vector.rs b/beacon_node/store/src/chunked_vector.rs index 73edfbb0744..537614f2817 100644 --- a/beacon_node/store/src/chunked_vector.rs +++ b/beacon_node/store/src/chunked_vector.rs @@ -299,7 +299,8 @@ macro_rules! field { } fn update_pattern(spec: &ChainSpec) -> UpdatePattern { - $update_pattern(spec) + let update_pattern = $update_pattern; + update_pattern(spec) } fn get_value( @@ -307,7 +308,8 @@ macro_rules! field { vindex: u64, spec: &ChainSpec, ) -> Result { - $get_value(state, vindex, spec) + let get_value = $get_value; + get_value(state, vindex, spec) } fn is_fixed_length() -> bool { diff --git a/beacon_node/store/src/forwards_iter.rs b/beacon_node/store/src/forwards_iter.rs index 353be6bf058..125b73a458f 100644 --- a/beacon_node/store/src/forwards_iter.rs +++ b/beacon_node/store/src/forwards_iter.rs @@ -19,6 +19,14 @@ pub trait Root: Field { end_state: BeaconState, end_root: Hash256, ) -> Result; + + /// The first slot for which this field is *no longer* stored in the freezer database. + /// + /// If `None`, then this field is not stored in the freezer database at all due to pruning + /// configuration. + fn freezer_upper_limit, Cold: ItemStore>( + store: &HotColdDB, + ) -> Option; } impl Root for BlockRoots { @@ -39,6 +47,13 @@ impl Root for BlockRoots { )?; Ok(SimpleForwardsIterator { values }) } + + fn freezer_upper_limit, Cold: ItemStore>( + store: &HotColdDB, + ) -> Option { + // Block roots are stored for all slots up to the split slot (exclusive). + Some(store.get_split_slot()) + } } impl Root for StateRoots { @@ -59,6 +74,15 @@ impl Root for StateRoots { )?; Ok(SimpleForwardsIterator { values }) } + + fn freezer_upper_limit, Cold: ItemStore>( + store: &HotColdDB, + ) -> Option { + // State roots are stored for all slots up to the latest restore point (exclusive). + // There may not be a latest restore point if state pruning is enabled, in which + // case this function will return `None`. + store.get_latest_restore_point_slot() + } } /// Forwards root iterator that makes use of a flat field table in the freezer DB. @@ -118,6 +142,7 @@ impl Iterator for SimpleForwardsIterator { pub enum HybridForwardsIterator<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> { PreFinalization { iter: Box>, + end_slot: Option, /// Data required by the `PostFinalization` iterator when we get to it. continuation_data: Option, Hash256)>>, }, @@ -129,6 +154,7 @@ pub enum HybridForwardsIterator<'a, E: EthSpec, F: Root, Hot: ItemStore, C PostFinalization { iter: SimpleForwardsIterator, }, + Finished, } impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> @@ -138,8 +164,8 @@ impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> /// /// The `get_state` closure should return a beacon state and final block/state root to backtrack /// from in the case where the iterated range does not lie entirely within the frozen portion of - /// the database. If an `end_slot` is provided and it is before the database's latest restore - /// point slot then the `get_state` closure will not be called at all. + /// the database. If an `end_slot` is provided and it is before the database's freezer upper + /// limit for the field then the `get_state` closure will not be called at all. /// /// It is OK for `get_state` to hold a lock while this function is evaluated, as the returned /// iterator is as lazy as possible and won't do any work apart from calling `get_state`. @@ -155,13 +181,15 @@ impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> ) -> Result { use HybridForwardsIterator::*; - let latest_restore_point_slot = store.get_latest_restore_point_slot(); + // First slot at which this field is *not* available in the freezer. i.e. all slots less + // than this slot have their data available in the freezer. + let freezer_upper_limit = F::freezer_upper_limit(store).unwrap_or(Slot::new(0)); - let result = if start_slot < latest_restore_point_slot { + let result = if start_slot < freezer_upper_limit { let iter = Box::new(FrozenForwardsIterator::new( store, start_slot, - latest_restore_point_slot, + freezer_upper_limit, spec, )); @@ -169,13 +197,14 @@ impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> // `end_slot`. If it tries to continue further a `NoContinuationData` error will be // returned. let continuation_data = - if end_slot.map_or(false, |end_slot| end_slot < latest_restore_point_slot) { + if end_slot.map_or(false, |end_slot| end_slot < freezer_upper_limit) { None } else { Some(Box::new(get_state())) }; PreFinalization { iter, + end_slot, continuation_data, } } else { @@ -195,6 +224,7 @@ impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> match self { PreFinalization { iter, + end_slot, continuation_data, } => { match iter.next() { @@ -203,10 +233,17 @@ impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> // to a post-finalization iterator beginning from the last slot // of the pre iterator. None => { + // If the iterator has an end slot (inclusive) which has already been + // covered by the (exclusive) frozen forwards iterator, then we're done! + let iter_end_slot = Slot::from(iter.inner.end_vindex); + if end_slot.map_or(false, |end_slot| iter_end_slot == end_slot + 1) { + *self = Finished; + return Ok(None); + } + let continuation_data = continuation_data.take(); let store = iter.inner.store; - let start_slot = Slot::from(iter.inner.end_vindex); - + let start_slot = iter_end_slot; *self = PostFinalizationLazy { continuation_data, store, @@ -230,6 +267,7 @@ impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> self.do_next() } PostFinalization { iter } => iter.next().transpose(), + Finished => Ok(None), } } } diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 7695ea520e8..87f8e0ffc36 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -14,11 +14,11 @@ use crate::memory_store::MemoryStore; use crate::metadata::{ AnchorInfo, CompactionTimestamp, PruningCheckpoint, SchemaVersion, ANCHOR_INFO_KEY, COMPACTION_TIMESTAMP_KEY, CONFIG_KEY, CURRENT_SCHEMA_VERSION, PRUNING_CHECKPOINT_KEY, - SCHEMA_VERSION_KEY, SPLIT_KEY, + SCHEMA_VERSION_KEY, SPLIT_KEY, STATE_UPPER_LIMIT_NO_RETAIN, }; use crate::metrics; use crate::{ - get_key_for_col, DBColumn, DatabaseBlock, Error, ItemStore, KeyValueStoreOp, + get_key_for_col, ChunkWriter, DBColumn, DatabaseBlock, Error, ItemStore, KeyValueStoreOp, PartialBeaconState, StoreItem, StoreOp, }; use itertools::process_results; @@ -110,10 +110,10 @@ pub enum HotColdDBError { IterationError { unexpected_key: BytesKey, }, - AttestationStateIsFinalized { + FinalizedStateNotInHotDatabase { split_slot: Slot, - request_slot: Option, - state_root: Hash256, + request_slot: Slot, + block_root: Hash256, }, } @@ -545,7 +545,7 @@ impl, Cold: ItemStore> HotColdDB /// upon that state (e.g., state roots). Additionally, only states from the hot store are /// returned. /// - /// See `Self::get_state` for information about `slot`. + /// See `Self::get_advanced_hot_state` for information about `max_slot`. /// /// ## Warning /// @@ -557,23 +557,78 @@ impl, Cold: ItemStore> HotColdDB /// - `state.block_roots` pub fn get_inconsistent_state_for_attestation_verification_only( &self, - state_root: &Hash256, - slot: Option, - ) -> Result>, Error> { + block_root: &Hash256, + max_slot: Slot, + state_root: Hash256, + ) -> Result)>, Error> { metrics::inc_counter(&metrics::BEACON_STATE_GET_COUNT); + self.get_advanced_hot_state_with_strategy( + *block_root, + max_slot, + state_root, + StateProcessingStrategy::Inconsistent, + ) + } - let split_slot = self.get_split_slot(); + /// Get a state with `latest_block_root == block_root` advanced through to at most `max_slot`. + /// + /// The `state_root` argument is used to look up the block's un-advanced state in case an + /// advanced state is not found. + /// + /// Return the `(result_state_root, state)` satisfying: + /// + /// - `result_state_root == state.canonical_root()` + /// - `state.slot() <= max_slot` + /// - `state.get_latest_block_root(result_state_root) == block_root` + /// + /// Presently this is only used to avoid loading the un-advanced split state, but in future will + /// be expanded to return states from an in-memory cache. + pub fn get_advanced_hot_state( + &self, + block_root: Hash256, + max_slot: Slot, + state_root: Hash256, + ) -> Result)>, Error> { + self.get_advanced_hot_state_with_strategy( + block_root, + max_slot, + state_root, + StateProcessingStrategy::Accurate, + ) + } + + /// Same as `get_advanced_hot_state` but taking a `StateProcessingStrategy`. + pub fn get_advanced_hot_state_with_strategy( + &self, + block_root: Hash256, + max_slot: Slot, + state_root: Hash256, + state_processing_strategy: StateProcessingStrategy, + ) -> Result)>, Error> { + // Hold a read lock on the split point so it can't move while we're trying to load the + // state. + let split = self.split.read_recursive(); - if slot.map_or(false, |slot| slot < split_slot) { - Err(HotColdDBError::AttestationStateIsFinalized { - split_slot, - request_slot: slot, - state_root: *state_root, + // Sanity check max-slot against the split slot. + if max_slot < split.slot { + return Err(HotColdDBError::FinalizedStateNotInHotDatabase { + split_slot: split.slot, + request_slot: max_slot, + block_root, } - .into()) - } else { - self.load_hot_state(state_root, StateProcessingStrategy::Inconsistent) + .into()); } + + let state_root = if block_root == split.block_root && split.slot <= max_slot { + split.state_root + } else { + state_root + }; + let state = self + .load_hot_state(&state_root, state_processing_strategy)? + .map(|state| (state_root, state)); + drop(split); + Ok(state) } /// Delete a state, ensuring it is removed from the LRU cache, as well as from on-disk. @@ -908,6 +963,9 @@ impl, Cold: ItemStore> HotColdDB ops.push(op); // 2. Store updated vector entries. + // Block roots need to be written here as well as by the `ChunkWriter` in `migrate_db` + // because states may require older block roots, and the writer only stores block roots + // between the previous split point and the new split point. let db = &self.cold_db; store_updated_vector(BlockRoots, db, state, &self.spec, ops)?; store_updated_vector(StateRoots, db, state, &self.spec, ops)?; @@ -1180,14 +1238,29 @@ impl, Cold: ItemStore> HotColdDB *self.split.read_recursive() } - pub fn set_split(&self, slot: Slot, state_root: Hash256) { - *self.split.write() = Split { slot, state_root }; + pub fn set_split(&self, slot: Slot, state_root: Hash256, block_root: Hash256) { + *self.split.write() = Split { + slot, + state_root, + block_root, + }; } - /// Fetch the slot of the most recently stored restore point. - pub fn get_latest_restore_point_slot(&self) -> Slot { - (self.get_split_slot() - 1) / self.config.slots_per_restore_point - * self.config.slots_per_restore_point + /// Fetch the slot of the most recently stored restore point (if any). + pub fn get_latest_restore_point_slot(&self) -> Option { + let split_slot = self.get_split_slot(); + let anchor = self.get_anchor_info(); + + // There are no restore points stored if the state upper limit lies in the hot database. + // It hasn't been reached yet, and may never be. + if anchor.map_or(false, |a| a.state_upper_limit >= split_slot) { + None + } else { + Some( + (split_slot - 1) / self.config.slots_per_restore_point + * self.config.slots_per_restore_point, + ) + } } /// Load the database schema version from disk. @@ -1216,25 +1289,36 @@ impl, Cold: ItemStore> HotColdDB } /// Initialise the anchor info for checkpoint sync starting from `block`. - pub fn init_anchor_info(&self, block: BeaconBlockRef<'_, E>) -> Result { + pub fn init_anchor_info( + &self, + block: BeaconBlockRef<'_, E>, + retain_historic_states: bool, + ) -> Result { let anchor_slot = block.slot(); let slots_per_restore_point = self.config.slots_per_restore_point; - // Set the `state_upper_limit` to the slot of the *next* restore point. - // See `get_state_upper_limit` for rationale. - let next_restore_point_slot = if anchor_slot % slots_per_restore_point == 0 { + let state_upper_limit = if !retain_historic_states { + STATE_UPPER_LIMIT_NO_RETAIN + } else if anchor_slot % slots_per_restore_point == 0 { anchor_slot } else { + // Set the `state_upper_limit` to the slot of the *next* restore point. + // See `get_state_upper_limit` for rationale. (anchor_slot / slots_per_restore_point + 1) * slots_per_restore_point }; - let anchor_info = AnchorInfo { - anchor_slot, - oldest_block_slot: anchor_slot, - oldest_block_parent: block.parent_root(), - state_upper_limit: next_restore_point_slot, - state_lower_limit: self.spec.genesis_slot, + let anchor_info = if state_upper_limit == 0 && anchor_slot == 0 { + // Genesis archive node: no anchor because we *will* store all states. + None + } else { + Some(AnchorInfo { + anchor_slot, + oldest_block_slot: anchor_slot, + oldest_block_parent: block.parent_root(), + state_upper_limit, + state_lower_limit: self.spec.genesis_slot, + }) }; - self.compare_and_set_anchor_info(None, Some(anchor_info)) + self.compare_and_set_anchor_info(None, anchor_info) } /// Get a clone of the store's anchor info. @@ -1361,11 +1445,26 @@ impl, Cold: ItemStore> HotColdDB self.hot_db.put(&CONFIG_KEY, &self.config.as_disk_config()) } - /// Load the split point from disk. - fn load_split(&self) -> Result, Error> { + /// Load the split point from disk, sans block root. + fn load_split_partial(&self) -> Result, Error> { self.hot_db.get(&SPLIT_KEY) } + /// Load the split point from disk, including block root. + fn load_split(&self) -> Result, Error> { + match self.load_split_partial()? { + Some(mut split) => { + // Load the hot state summary to get the block root. + let summary = self.load_hot_state_summary(&split.state_root)?.ok_or( + HotColdDBError::MissingSplitState(split.state_root, split.slot), + )?; + split.block_root = summary.latest_block_root; + Ok(Some(split)) + } + None => Ok(None), + } + } + /// Stage the split for storage to disk. pub fn store_split_in_batch(&self) -> KeyValueStoreOp { self.split.read_recursive().as_kv_store_op(SPLIT_KEY) @@ -1500,6 +1599,25 @@ impl, Cold: ItemStore> HotColdDB ) } + /// Update the linear array of frozen block roots with the block root for several skipped slots. + /// + /// Write the block root at all slots from `start_slot` (inclusive) to `end_slot` (exclusive). + pub fn store_frozen_block_root_at_skip_slots( + &self, + start_slot: Slot, + end_slot: Slot, + block_root: Hash256, + ) -> Result, Error> { + let mut ops = vec![]; + let mut block_root_writer = + ChunkWriter::::new(&self.cold_db, start_slot.as_usize())?; + for slot in start_slot.as_usize()..end_slot.as_usize() { + block_root_writer.set(slot, block_root, &mut ops)?; + } + block_root_writer.write(&mut ops)?; + Ok(ops) + } + /// Try to prune all execution payloads, returning early if there is no need to prune. pub fn try_prune_execution_payloads(&self, force: bool) -> Result<(), Error> { let split = self.get_split_info(); @@ -1611,42 +1729,47 @@ impl, Cold: ItemStore> HotColdDB /// Advance the split point of the store, moving new finalized states to the freezer. pub fn migrate_database, Cold: ItemStore>( store: Arc>, - frozen_head_root: Hash256, - frozen_head: &BeaconState, + finalized_state_root: Hash256, + finalized_block_root: Hash256, + finalized_state: &BeaconState, ) -> Result<(), Error> { debug!( store.log, "Freezer migration started"; - "slot" => frozen_head.slot() + "slot" => finalized_state.slot() ); // 0. Check that the migration is sensible. - // The new frozen head must increase the current split slot, and lie on an epoch + // The new finalized state must increase the current split slot, and lie on an epoch // boundary (in order for the hot state summary scheme to work). let current_split_slot = store.split.read_recursive().slot; - let anchor_slot = store - .anchor_info - .read_recursive() - .as_ref() - .map(|a| a.anchor_slot); + let anchor_info = store.anchor_info.read_recursive().clone(); + let anchor_slot = anchor_info.as_ref().map(|a| a.anchor_slot); - if frozen_head.slot() < current_split_slot { + if finalized_state.slot() < current_split_slot { return Err(HotColdDBError::FreezeSlotError { current_split_slot, - proposed_split_slot: frozen_head.slot(), + proposed_split_slot: finalized_state.slot(), } .into()); } - if frozen_head.slot() % E::slots_per_epoch() != 0 { - return Err(HotColdDBError::FreezeSlotUnaligned(frozen_head.slot()).into()); + if finalized_state.slot() % E::slots_per_epoch() != 0 { + return Err(HotColdDBError::FreezeSlotUnaligned(finalized_state.slot()).into()); } - let mut hot_db_ops: Vec> = Vec::new(); + let mut hot_db_ops = vec![]; + let mut cold_db_ops = vec![]; + + // Chunk writer for the linear block roots in the freezer DB. + // Start at the new upper limit because we iterate backwards. + let new_frozen_block_root_upper_limit = finalized_state.slot().as_usize().saturating_sub(1); + let mut block_root_writer = + ChunkWriter::::new(&store.cold_db, new_frozen_block_root_upper_limit)?; - // 1. Copy all of the states between the head and the split slot, from the hot DB + // 1. Copy all of the states between the new finalized state and the split slot, from the hot DB // to the cold DB. Delete the execution payloads of these now-finalized blocks. - let state_root_iter = RootsIterator::new(&store, frozen_head); + let state_root_iter = RootsIterator::new(&store, finalized_state); for maybe_tuple in state_root_iter.take_while(|result| match result { Ok((_, _, slot)) => { slot >= ¤t_split_slot @@ -1656,13 +1779,31 @@ pub fn migrate_database, Cold: ItemStore>( }) { let (block_root, state_root, slot) = maybe_tuple?; - let mut cold_db_ops: Vec = Vec::new(); + // Delete the execution payload if payload pruning is enabled. At a skipped slot we may + // delete the payload for the finalized block itself, but that's OK as we only guarantee + // that payloads are present for slots >= the split slot. The payload fetching code is also + // forgiving of missing payloads. + if store.config.prune_payloads { + hot_db_ops.push(StoreOp::DeleteExecutionPayload(block_root)); + } - if slot % store.config.slots_per_restore_point == 0 { - let state: BeaconState = get_full_state(&store.hot_db, &state_root, &store.spec)? - .ok_or(HotColdDBError::MissingStateToFreeze(state_root))?; + // Delete the old summary, and the full state if we lie on an epoch boundary. + hot_db_ops.push(StoreOp::DeleteState(state_root, Some(slot))); - store.store_cold_state(&state_root, &state, &mut cold_db_ops)?; + // Store the block root for this slot in the linear array of frozen block roots. + block_root_writer.set(slot.as_usize(), block_root, &mut cold_db_ops)?; + + // Do not try to store states if a restore point is yet to be stored, or will never be + // stored (see `STATE_UPPER_LIMIT_NO_RETAIN`). Make an exception for the genesis state + // which always needs to be copied from the hot DB to the freezer and should not be deleted. + if slot != 0 + && anchor_info + .as_ref() + .map_or(false, |anchor| slot < anchor.state_upper_limit) + { + debug!(store.log, "Pruning finalized state"; "slot" => slot); + + continue; } // Store a pointer from this state root to its slot, so we can later reconstruct states @@ -1671,22 +1812,24 @@ pub fn migrate_database, Cold: ItemStore>( let op = cold_state_summary.as_kv_store_op(state_root); cold_db_ops.push(op); - // There are data dependencies between calls to `store_cold_state()` that prevent us from - // doing one big call to `store.cold_db.do_atomically()` at end of the loop. - store.cold_db.do_atomically(cold_db_ops)?; + if slot % store.config.slots_per_restore_point == 0 { + let state: BeaconState = get_full_state(&store.hot_db, &state_root, &store.spec)? + .ok_or(HotColdDBError::MissingStateToFreeze(state_root))?; - // Delete the old summary, and the full state if we lie on an epoch boundary. - hot_db_ops.push(StoreOp::DeleteState(state_root, Some(slot))); + store.store_cold_state(&state_root, &state, &mut cold_db_ops)?; - // Delete the execution payload if payload pruning is enabled. At a skipped slot we may - // delete the payload for the finalized block itself, but that's OK as we only guarantee - // that payloads are present for slots >= the split slot. The payload fetching code is also - // forgiving of missing payloads. - if store.config.prune_payloads { - hot_db_ops.push(StoreOp::DeleteExecutionPayload(block_root)); + // Commit the batch of cold DB ops whenever a full state is written. Each state stored + // may read the linear fields of previous states stored. + store + .cold_db + .do_atomically(std::mem::take(&mut cold_db_ops))?; } } + // Finish writing the block roots and commit the remaining cold DB ops. + block_root_writer.write(&mut cold_db_ops)?; + store.cold_db.do_atomically(cold_db_ops)?; + // Warning: Critical section. We have to take care not to put any of the two databases in an // inconsistent state if the OS process dies at any point during the freezeing // procedure. @@ -1724,8 +1867,9 @@ pub fn migrate_database, Cold: ItemStore>( // Before updating the in-memory split value, we flush it to disk first, so that should the // OS process die at this point, we pick up from the right place after a restart. let split = Split { - slot: frozen_head.slot(), - state_root: frozen_head_root, + slot: finalized_state.slot(), + state_root: finalized_state_root, + block_root: finalized_block_root, }; store.hot_db.put_sync(&SPLIT_KEY, &split)?; @@ -1741,7 +1885,7 @@ pub fn migrate_database, Cold: ItemStore>( debug!( store.log, "Freezer migration complete"; - "slot" => frozen_head.slot() + "slot" => finalized_state.slot() ); Ok(()) @@ -1750,8 +1894,16 @@ pub fn migrate_database, Cold: ItemStore>( /// Struct for storing the split slot and state root in the database. #[derive(Debug, Clone, Copy, PartialEq, Default, Encode, Decode, Deserialize, Serialize)] pub struct Split { - pub(crate) slot: Slot, - pub(crate) state_root: Hash256, + pub slot: Slot, + pub state_root: Hash256, + /// The block root of the split state. + /// + /// This is used to provide special handling for the split state in the case where there are + /// skipped slots. The split state will *always* be the advanced state, so callers + /// who only have the finalized block root should use `get_advanced_hot_state` to get this state, + /// rather than fetching `block.state_root()` (the unaligned state) which will have been pruned. + #[ssz(skip_serializing, skip_deserializing)] + pub block_root: Hash256, } impl StoreItem for Split { diff --git a/beacon_node/store/src/leveldb_store.rs b/beacon_node/store/src/leveldb_store.rs index 86bd4ffaccd..7aac9f72d91 100644 --- a/beacon_node/store/src/leveldb_store.rs +++ b/beacon_node/store/src/leveldb_store.rs @@ -167,7 +167,7 @@ impl KeyValueStore for LevelDB { ) }; - for (start_key, end_key) in vec![ + for (start_key, end_key) in [ endpoints(DBColumn::BeaconStateTemporary), endpoints(DBColumn::BeaconState), ] { diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index 6f50d7038f8..ccfddcf8f84 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -16,6 +16,9 @@ pub const PRUNING_CHECKPOINT_KEY: Hash256 = Hash256::repeat_byte(3); pub const COMPACTION_TIMESTAMP_KEY: Hash256 = Hash256::repeat_byte(4); pub const ANCHOR_INFO_KEY: Hash256 = Hash256::repeat_byte(5); +/// State upper limit value used to indicate that a node is not storing historic states. +pub const STATE_UPPER_LIMIT_NO_RETAIN: Slot = Slot::new(u64::MAX); + #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub struct SchemaVersion(pub u64); diff --git a/book/src/checkpoint-sync.md b/book/src/checkpoint-sync.md index 57883828947..0c375a5f009 100644 --- a/book/src/checkpoint-sync.md +++ b/book/src/checkpoint-sync.md @@ -75,7 +75,7 @@ Once backfill is complete, a `INFO Historical block download complete` log will > Note: Since [v4.1.0](https://github.com/sigp/lighthouse/releases/tag/v4.1.0), Lighthouse implements rate-limited backfilling to mitigate validator performance issues after a recent checkpoint sync. This means that the speed at which historical blocks are downloaded is limited, typically to less than 20 slots/sec. This will not affect validator performance. However, if you would still prefer to sync the chain as fast as possible, you can add the flag `--disable-backfill-rate-limiting` to the beacon node. -> Note: Since [v4.2.0](https://github.com/sigp/lighthouse/releases/tag/v4.2.0), Lighthouse limits the backfill sync to only sync backwards to the weak subjectivity point (approximately 5 months). This will help to save disk space. However, if you would like to sync back to the genesis, you can add the flag `--genesis-backfill` to the beacon node. +> Note: Since [v4.2.0](https://github.com/sigp/lighthouse/releases/tag/v4.2.0), Lighthouse limits the backfill sync to only sync backwards to the weak subjectivity point (approximately 5 months). This will help to save disk space. However, if you would like to sync back to the genesis, you can add the flag `--genesis-backfill` to the beacon node. ## FAQ @@ -116,8 +116,9 @@ states: database. Additionally, the genesis block is always available. * `state_lower_limit`: All states with slots _less than or equal to_ this value are available in the database. The minimum value is 0, indicating that the genesis state is always available. -* `state_upper_limit`: All states with slots _greater than or equal to_ this value are available - in the database. +* `state_upper_limit`: All states with slots _greater than or equal to_ `min(split.slot, + state_upper_limit)` are available in the database. In the case where the `state_upper_limit` is + higher than the `split.slot`, this means states are not being written to the freezer database. Reconstruction runs from the state lower limit to the upper limit, narrowing the window of unavailable states as it goes. It will log messages like the following to show its progress: @@ -153,18 +154,8 @@ To manually specify a checkpoint use the following two flags: * `--checkpoint-state`: accepts an SSZ-encoded `BeaconState` blob * `--checkpoint-block`: accepts an SSZ-encoded `SignedBeaconBlock` blob -_Both_ the state and block must be provided and **must** adhere to the [Alignment -Requirements](#alignment-requirements) described below. - -### Alignment Requirements - -* The block must be a finalized block from an epoch boundary, i.e. `block.slot() % 32 == 0`. -* The state must be the state corresponding to `block` with `state.slot() == block.slot()` - and `state.hash_tree_root() == block.state_root()`. - -These requirements are imposed to align with Lighthouse's database schema, and notably exclude -finalized blocks from skipped slots. You can avoid alignment issues by using -[Automatic Checkpoint Sync](#automatic-checkpoint-sync), which will search for a suitable block -and state pair. +_Both_ the state and block must be provided and the state **must** match the block. The +state may be from the same slot as the block (unadvanced), or advanced to an epoch boundary, +in which case it will be assumed to be finalized at that epoch. [weak-subj]: https://blog.ethereum.org/2014/11/25/proof-stake-learned-love-weak-subjectivity/ diff --git a/book/src/merge-migration.md b/book/src/merge-migration.md index acca0bbeb3e..bab520b569b 100644 --- a/book/src/merge-migration.md +++ b/book/src/merge-migration.md @@ -21,7 +21,7 @@ engine to a merge-ready version. ## When? -All networks (**Mainnet**, **Goerli (Prater)**, **Ropsten**, **Sepolia**, **Kiln**, **Gnosis**) have successfully undergone the Bellatrix fork and transitioned to a post-merge Network. Your node must have a merge-ready configuration to continue operating. Table below lists the date at which Bellatrix and The Merge occurred: +All networks (**Mainnet**, **Goerli (Prater)**, **Ropsten**, **Sepolia**, **Kiln**, **Chiado**, **Gnosis**) have successfully undergone the Bellatrix fork and transitioned to a post-merge Network. Your node must have a merge-ready configuration to continue operating. Table below lists the date at which Bellatrix and The Merge occurred:
@@ -31,6 +31,7 @@ All networks (**Mainnet**, **Goerli (Prater)**, **Ropsten**, **Sepolia**, **Kiln | Sepolia | 20th June 2022 | 6th July 2022 | | | Goerli | 4th August 2022 | 10th August 2022 | Previously named `Prater`| | Mainnet | 6th September 2022 | 15th September 2022 | +| Chiado | 10th October 2022 | 4th November 2022 | | Gnosis| 30th November 2022 | 8th December 2022
diff --git a/book/src/run_a_node.md b/book/src/run_a_node.md index a31aedf785a..1ea14273357 100644 --- a/book/src/run_a_node.md +++ b/book/src/run_a_node.md @@ -58,7 +58,8 @@ Notable flags: - `lighthouse --network mainnet`: Mainnet. - `lighthouse --network goerli`: Goerli (testnet). - `lighthouse --network sepolia`: Sepolia (testnet). - - `lighthouse --network gnosis`: Gnosis chain + - `lighthouse --network chiado`: Chiado (testnet). + - `lighthouse --network gnosis`: Gnosis chain. > Note: Using the correct `--network` flag is very important; using the wrong flag can result in penalties, slashings or lost deposits. As a rule of thumb, *always* diff --git a/book/src/setup.md b/book/src/setup.md index 533e1d463d3..1ae6e635408 100644 --- a/book/src/setup.md +++ b/book/src/setup.md @@ -9,7 +9,7 @@ particularly useful for development but still a good way to ensure you have the base dependencies. The additional requirements for developers are: -- [`anvil`](https://github.com/foundry-rs/foundry/tree/master/anvil). This is used to +- [`anvil`](https://github.com/foundry-rs/foundry/tree/master/crates/anvil). This is used to simulate the execution chain during tests. You'll get failures during tests if you don't have `anvil` available on your `PATH`. - [`cmake`](https://cmake.org/cmake/help/latest/command/install.html). Used by diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index c3dd3bd1936..3db521c0e27 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "4.3.0" +version = "4.4.0" authors = ["Sigma Prime "] edition = "2021" diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index d006156bf9d..779269921a5 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -10,6 +10,7 @@ use lighthouse_network::{ use serde_derive::{Deserialize, Serialize}; use ssz::Encode; use std::net::{SocketAddrV4, SocketAddrV6}; +use std::time::Duration; use std::{marker::PhantomData, path::PathBuf}; use types::EthSpec; @@ -90,8 +91,19 @@ impl BootNodeConfig { let enr_fork = { let spec = eth2_network_config.chain_spec::()?; - if eth2_network_config.beacon_state_is_known() { - let genesis_state = eth2_network_config.beacon_state::()?; + let genesis_state_url: Option = + clap_utils::parse_optional(matches, "genesis-state-url")?; + let genesis_state_url_timeout = + clap_utils::parse_required(matches, "genesis-state-url-timeout") + .map(Duration::from_secs)?; + + if eth2_network_config.genesis_state_is_known() { + let genesis_state = eth2_network_config + .genesis_state::(genesis_state_url.as_deref(), genesis_state_url_timeout, &logger)? + .ok_or_else(|| { + "The genesis state for this network is not known, this is an unsupported mode" + .to_string() + })?; slog::info!(logger, "Genesis state found"; "root" => genesis_state.canonical_root().to_string()); let enr_fork = spec.enr_fork_id::( diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index d8e1a375fd5..8cb3de3a002 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -35,14 +35,8 @@ tokio = { version = "1.14.0", features = ["full"] } [target.'cfg(target_os = "linux")'.dependencies] psutil = { version = "3.2.2", optional = true } -procinfo = { version = "0.4.2", optional = true } +procfs = { version = "0.15.1", optional = true } [features] default = ["lighthouse"] -lighthouse = [ - "proto_array", - "psutil", - "procinfo", - "store", - "slashing_protection", -] +lighthouse = ["proto_array", "psutil", "procfs", "store", "slashing_protection"] diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 44ead55d2f9..afd37782762 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -1266,6 +1266,23 @@ impl BeaconNodeHttpClient { Ok(()) } + // GET builder/states/{state_id}/expected_withdrawals + pub async fn get_expected_withdrawals( + &self, + state_id: &StateId, + ) -> Result>, Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("builder") + .push("states") + .push(&state_id.to_string()) + .push("expected_withdrawals"); + + self.get(path).await + } + /// `POST validator/contribution_and_proofs` pub async fn post_validator_contribution_and_proofs( &self, diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 1b4bcc0e395..dfc19db4928 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -95,8 +95,8 @@ pub struct ValidatorInclusionData { #[cfg(target_os = "linux")] use { - procinfo::pid, psutil::cpu::os::linux::CpuTimesExt, - psutil::memory::os::linux::VirtualMemoryExt, psutil::process::Process, + psutil::cpu::os::linux::CpuTimesExt, psutil::memory::os::linux::VirtualMemoryExt, + psutil::process::Process, }; /// Reports on the health of the Lighthouse instance. @@ -238,7 +238,7 @@ pub struct ProcessHealth { /// The pid of this process. pub pid: u32, /// The number of threads used by this pid. - pub pid_num_threads: i32, + pub pid_num_threads: i64, /// The total resident memory used by this pid. pub pid_mem_resident_set_size: u64, /// The total virtual memory used by this pid. @@ -262,7 +262,12 @@ impl ProcessHealth { .memory_info() .map_err(|e| format!("Unable to get process memory info: {:?}", e))?; - let stat = pid::stat_self().map_err(|e| format!("Unable to get stat: {:?}", e))?; + let me = procfs::process::Process::myself() + .map_err(|e| format!("Unable to get process: {:?}", e))?; + let stat = me + .stat() + .map_err(|e| format!("Unable to get stat: {:?}", e))?; + let process_times = process .cpu_times() .map_err(|e| format!("Unable to get process cpu times : {:?}", e))?; diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index e851af71ad8..d4f40cf4bc0 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -581,6 +581,11 @@ pub struct SyncingData { pub sync_distance: Slot, } +#[derive(Serialize, Deserialize)] +pub struct ExpectedWithdrawalsQuery { + pub proposal_slot: Option, +} + #[derive(Clone, PartialEq, Debug, Deserialize)] #[serde(try_from = "String", bound = "T: FromStr")] pub struct QueryVec { @@ -1311,11 +1316,32 @@ pub struct BroadcastValidationQuery { pub broadcast_validation: BroadcastValidation, } +pub mod serde_status_code { + use crate::StatusCode; + use serde::{de::Error, Deserialize, Serialize}; + + pub fn serialize(status_code: &StatusCode, ser: S) -> Result + where + S: serde::Serializer, + { + status_code.as_u16().serialize(ser) + } + + pub fn deserialize<'de, D>(de: D) -> Result + where + D: serde::de::Deserializer<'de>, + { + let status_code = u16::deserialize(de)?; + StatusCode::try_from(status_code).map_err(D::Error::custom) + } +} + pub enum ForkVersionedBeaconBlockType { Full(ForkVersionedResponse>>), Blinded(ForkVersionedResponse>>) } + #[cfg(test)] mod tests { use super::*; diff --git a/common/eth2_config/src/lib.rs b/common/eth2_config/src/lib.rs index 7e5506667ff..6487151a92c 100644 --- a/common/eth2_config/src/lib.rs +++ b/common/eth2_config/src/lib.rs @@ -23,6 +23,23 @@ pub const PREDEFINED_NETWORKS_DIR: &str = predefined_networks_dir!(); pub const GENESIS_FILE_NAME: &str = "genesis.ssz"; pub const GENESIS_ZIP_FILE_NAME: &str = "genesis.ssz.zip"; +const HOLESKY_GENESIS_STATE_SOURCE: GenesisStateSource = GenesisStateSource::Url { + urls: &[ + // This is an AWS S3 bucket hosted by Sigma Prime. See Paul Hauner for + // more details. + "https://sigp-public-genesis-states.s3.ap-southeast-2.amazonaws.com/holesky/", + ], + checksum: "0x76631cd0b9ddc5b2c766b496e23f16759ce1181446a4efb40e5540cd15b78a07", + genesis_validators_root: "0x9143aa7c615a7f7115e2b6aac319c03529df8242ae705fba9df39b79c59fa8b1", +}; + +const CHIADO_GENESIS_STATE_SOURCE: GenesisStateSource = GenesisStateSource::Url { + // No default checkpoint sources are provided. + urls: &[], + checksum: "0xd4a039454c7429f1dfaa7e11e397ef3d0f50d2d5e4c0e4dc04919d153aa13af1", + genesis_validators_root: "0x9d642dac73058fbf39c0ae41ab1e34e4d889043cb199851ded7095bc99eb4c1e", +}; + /// The core configuration of a Lighthouse beacon node. #[derive(Debug, Clone)] pub struct Eth2Config { @@ -62,6 +79,32 @@ impl Eth2Config { } } +/// Describes how a genesis state may be obtained. +#[derive(Copy, Clone, Debug, PartialEq)] +pub enum GenesisStateSource { + /// The genesis state for this network is not yet known. + Unknown, + /// The genesis state for this network is included in the binary via + /// `include_bytes!` or by loading from a testnet dir. + IncludedBytes, + /// The genesis state for this network should be downloaded from a URL. + Url { + /// URLs to try to download the file from, in order. + urls: &'static [&'static str], + /// The SHA256 of the genesis state bytes. This is *not* a hash tree + /// root to simplify the types (i.e., to avoid getting EthSpec + /// involved). + /// + /// The format should be 0x-prefixed ASCII bytes. + checksum: &'static str, + /// The `genesis_validators_root` of the genesis state. Used to avoid + /// downloading the state for simple signing operations. + /// + /// The format should be 0x-prefixed ASCII bytes. + genesis_validators_root: &'static str, + }, +} + /// A directory that can be built by downloading files via HTTP. /// /// Used by the `eth2_network_config` crate to initialize the network directories during build and @@ -70,7 +113,7 @@ impl Eth2Config { pub struct Eth2NetArchiveAndDirectory<'a> { pub name: &'a str, pub config_dir: &'a str, - pub genesis_is_known: bool, + pub genesis_state_source: GenesisStateSource, } impl<'a> Eth2NetArchiveAndDirectory<'a> { @@ -89,15 +132,11 @@ impl<'a> Eth2NetArchiveAndDirectory<'a> { } } -/// Indicates that the `genesis.ssz.zip` file is present on the filesystem. This means that the -/// deposit ceremony has concluded and the final genesis `BeaconState` is known. -const GENESIS_STATE_IS_KNOWN: bool = true; - -#[derive(Copy, Clone, Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq)] pub struct HardcodedNet { pub name: &'static str, pub config_dir: &'static str, - pub genesis_is_known: bool, + pub genesis_state_source: GenesisStateSource, pub config: &'static [u8], pub deploy_block: &'static [u8], pub boot_enr: &'static [u8], @@ -109,7 +148,7 @@ pub struct HardcodedNet { /// It also defines a `include__file!` macro which provides a wrapper around /// `std::include_bytes`, allowing the inclusion of bytes from the specific testnet directory. macro_rules! define_archive { - ($name_ident: ident, $config_dir: tt, $genesis_is_known: ident) => { + ($name_ident: ident, $config_dir: tt, $genesis_state_source: path) => { paste! { #[macro_use] pub mod $name_ident { @@ -118,7 +157,7 @@ macro_rules! define_archive { pub const ETH2_NET_DIR: Eth2NetArchiveAndDirectory = Eth2NetArchiveAndDirectory { name: stringify!($name_ident), config_dir: $config_dir, - genesis_is_known: $genesis_is_known, + genesis_state_source: $genesis_state_source, }; /// A wrapper around `std::include_bytes` which includes a file from a specific network @@ -151,7 +190,7 @@ macro_rules! define_net { $this_crate::HardcodedNet { name: ETH2_NET_DIR.name, config_dir: ETH2_NET_DIR.config_dir, - genesis_is_known: ETH2_NET_DIR.genesis_is_known, + genesis_state_source: ETH2_NET_DIR.genesis_state_source, config: $this_crate::$include_file!($this_crate, "../", "config.yaml"), deploy_block: $this_crate::$include_file!($this_crate, "../", "deploy_block.txt"), boot_enr: $this_crate::$include_file!($this_crate, "../", "boot_enr.yaml"), @@ -199,9 +238,9 @@ macro_rules! define_nets { /// `build.rs` which will unzip the genesis states. Then, that `eth2_network_configs` crate can /// perform the final step of using `std::include_bytes` to bake the files (bytes) into the binary. macro_rules! define_hardcoded_nets { - ($(($name_ident: ident, $config_dir: tt, $genesis_is_known: ident)),+) => { + ($(($name_ident: ident, $config_dir: tt, $genesis_state_source: path)),+) => { $( - define_archive!($name_ident, $config_dir, $genesis_is_known); + define_archive!($name_ident, $config_dir, $genesis_state_source); )+ pub const ETH2_NET_DIRS: &[Eth2NetArchiveAndDirectory<'static>] = &[$($name_ident::ETH2_NET_DIR,)+]; @@ -242,9 +281,8 @@ define_hardcoded_nets!( // The name of the directory in the `eth2_network_config/built_in_network_configs` // directory where the configuration files are located for this network. "mainnet", - // Set to `true` if the genesis state can be found in the `built_in_network_configs` - // directory. - GENESIS_STATE_IS_KNOWN + // Describes how the genesis state can be obtained. + GenesisStateSource::IncludedBytes ), ( // Network name (must be unique among all networks). @@ -252,9 +290,8 @@ define_hardcoded_nets!( // The name of the directory in the `eth2_network_config/built_in_network_configs` // directory where the configuration files are located for this network. "prater", - // Set to `true` if the genesis state can be found in the `built_in_network_configs` - // directory. - GENESIS_STATE_IS_KNOWN + // Describes how the genesis state can be obtained. + GenesisStateSource::IncludedBytes ), ( // Network name (must be unique among all networks). @@ -264,9 +301,8 @@ define_hardcoded_nets!( // // The Goerli network is effectively an alias to Prater. "prater", - // Set to `true` if the genesis state can be found in the `built_in_network_configs` - // directory. - GENESIS_STATE_IS_KNOWN + // Describes how the genesis state can be obtained. + GenesisStateSource::IncludedBytes ), ( // Network name (must be unique among all networks). @@ -274,9 +310,18 @@ define_hardcoded_nets!( // The name of the directory in the `eth2_network_config/built_in_network_configs` // directory where the configuration files are located for this network. "gnosis", + // Describes how the genesis state can be obtained. + GenesisStateSource::IncludedBytes + ), + ( + // Network name (must be unique among all networks). + chiado, + // The name of the directory in the `eth2_network_config/built_in_network_configs` + // directory where the configuration files are located for this network. + "chiado", // Set to `true` if the genesis state can be found in the `built_in_network_configs` // directory. - GENESIS_STATE_IS_KNOWN + CHIADO_GENESIS_STATE_SOURCE ), ( // Network name (must be unique among all networks). @@ -284,8 +329,16 @@ define_hardcoded_nets!( // The name of the directory in the `eth2_network_config/built_in_network_configs` // directory where the configuration files are located for this network. "sepolia", - // Set to `true` if the genesis state can be found in the `built_in_network_configs` - // directory. - GENESIS_STATE_IS_KNOWN + // Describes how the genesis state can be obtained. + GenesisStateSource::IncludedBytes + ), + ( + // Network name (must be unique among all networks). + holesky, + // The name of the directory in the `eth2_network_config/built_in_network_configs` + // directory where the configuration files are located for this network. + "holesky", + // Describes how the genesis state can be obtained. + HOLESKY_GENESIS_STATE_SOURCE ) ); diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index 338a2d243bc..e73f64d5a83 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -7,15 +7,22 @@ edition = "2021" build = "build.rs" [build-dependencies] -zip = "0.5.8" -eth2_config = { path = "../eth2_config"} +zip = "0.6" +eth2_config = { path = "../eth2_config" } [dev-dependencies] tempfile = "3.1.0" [dependencies] serde_yaml = "0.8.13" -types = { path = "../../consensus/types"} +types = { path = "../../consensus/types" } ethereum_ssz = "0.5.0" -eth2_config = { path = "../eth2_config"} -discv5 = "0.3.1" \ No newline at end of file +eth2_config = { path = "../eth2_config" } +discv5 = "0.3.1" +reqwest = { version = "0.11.0", features = ["blocking"] } +pretty_reqwest_error = { path = "../pretty_reqwest_error" } +sha2 = "0.10" +url = "2.2.2" +sensitive_url = { path = "../sensitive_url" } +slog = "2.5.2" +logging = { path = "../logging" } diff --git a/common/eth2_network_config/build.rs b/common/eth2_network_config/build.rs index fa45fafa4e7..3165930f4a8 100644 --- a/common/eth2_network_config/build.rs +++ b/common/eth2_network_config/build.rs @@ -1,5 +1,7 @@ //! Extracts zipped genesis states on first run. -use eth2_config::{Eth2NetArchiveAndDirectory, ETH2_NET_DIRS, GENESIS_FILE_NAME}; +use eth2_config::{ + Eth2NetArchiveAndDirectory, GenesisStateSource, ETH2_NET_DIRS, GENESIS_FILE_NAME, +}; use std::fs::File; use std::io; use zip::ZipArchive; @@ -26,7 +28,7 @@ fn uncompress_state(network: &Eth2NetArchiveAndDirectory<'static>) -> Result<(), return Ok(()); } - if network.genesis_is_known { + if network.genesis_state_source == GenesisStateSource::IncludedBytes { // Extract genesis state from genesis.ssz.zip let archive_path = network.genesis_state_archive(); let archive_file = File::open(&archive_path) @@ -46,7 +48,8 @@ fn uncompress_state(network: &Eth2NetArchiveAndDirectory<'static>) -> Result<(), io::copy(&mut file, &mut outfile) .map_err(|e| format!("Error writing file {:?}: {}", genesis_ssz_path, e))?; } else { - // Create empty genesis.ssz if genesis is unknown + // Create empty genesis.ssz if genesis is unknown or to be downloaded via URL. + // This is a bit of a hack to make `include_bytes!` easier to deal with. File::create(genesis_ssz_path) .map_err(|e| format!("Failed to create {}: {}", GENESIS_FILE_NAME, e))?; } diff --git a/common/eth2_network_config/built_in_network_configs/chiado/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/chiado/boot_enr.yaml new file mode 100644 index 00000000000..96baffde6fb --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/chiado/boot_enr.yaml @@ -0,0 +1,8 @@ +# chiado-teku-0 +- "enr:-Ly4QLYLNqrjvSxD3lpAPBUNlxa6cIbe79JqLZLFcZZjWoCjZcw-85agLUErHiygG2weRSCLnd5V460qTbLbwJQsfZkoh2F0dG5ldHOI__________-EZXRoMpAxNnBDAgAAb___________gmlkgnY0gmlwhKq7mu-Jc2VjcDI1NmsxoQP900YAYa9kdvzlSKGjVo-F3XVzATjOYp3BsjLjSophO4hzeW5jbmV0cw-DdGNwgiMog3VkcIIjKA" +# chiado-teku-1 +- "enr:-Ly4QCGeYvTCNOGKi0mKRUd45rLj96b4pH98qG7B9TCUGXGpHZALtaL2-XfjASQyhbCqENccI4PGXVqYTIehNT9KJMQgh2F0dG5ldHOI__________-EZXRoMpAxNnBDAgAAb___________gmlkgnY0gmlwhIuQrVSJc2VjcDI1NmsxoQP9iDchx2PGl3JyJ29B9fhLCvVMN6n23pPAIIeFV-sHOIhzeW5jbmV0cw-DdGNwgiMog3VkcIIjKA" +#GnosisDAO Bootnode: 3.71.132.231 +- "enr:-Ly4QAtr21x5Ps7HYhdZkIBRBgcBkvlIfEel1YNjtFWf4cV3au2LgBGICz9PtEs9-p2HUl_eME8m1WImxTxSB3AkCMwBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpAxNnBDAgAAb___________gmlkgnY0gmlwhANHhOeJc2VjcDI1NmsxoQNLp1QPV8-pyMCohOtj6xGtSBM_GtVTqzlbvNsCF4ezkYhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA" +#GnosisDAO Bootnode: 3.69.35.13 +- "enr:-Ly4QLgn8Bx6faigkKUGZQvd1HDToV2FAxZIiENK-lczruzQb90qJK-4E65ADly0s4__dQOW7IkLMW7ZAyJy2vtiLy8Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpAxNnBDAgAAb___________gmlkgnY0gmlwhANFIw2Jc2VjcDI1NmsxoQMa-fWEy9UJHfOl_lix3wdY5qust78sHAqZnWwEiyqKgYhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA" diff --git a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml new file mode 100644 index 00000000000..47b285a654f --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml @@ -0,0 +1,154 @@ +# Extends the mainnet preset +PRESET_BASE: gnosis +# needs to exist because of Prysm. Otherwise it conflicts with mainnet genesis +CONFIG_NAME: chiado + +# Genesis +MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 6000 +# 10 October 2022 10:00:00 GMT+0000 +MIN_GENESIS_TIME: 1665396000 +GENESIS_DELAY: 300 + +# Projected time: 2022-11-04T15:00:00.000Z, block: 680928 +TERMINAL_TOTAL_DIFFICULTY: 231707791542740786049188744689299064356246512 + +# Deposit contract +# --------------------------------------------------------------- +# NOTE: Don't use a value too high, or Teku rejects it (4294906129 NOK) +DEPOSIT_CHAIN_ID: 10200 +DEPOSIT_NETWORK_ID: 10200 +DEPOSIT_CONTRACT_ADDRESS: 0xb97036A26259B7147018913bD58a774cf91acf25 + +# Misc +# --------------------------------------------------------------- +# 2**6 (= 64) +MAX_COMMITTEES_PER_SLOT: 64 +# 2**7 (= 128) +TARGET_COMMITTEE_SIZE: 128 +# 2**11 (= 2,048) +MAX_VALIDATORS_PER_COMMITTEE: 2048 +# 2**2 (= 4) +MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**12 (= 4096) +CHURN_LIMIT_QUOTIENT: 4096 +# See issue 563 +SHUFFLE_ROUND_COUNT: 90 +# 4 +HYSTERESIS_QUOTIENT: 4 +# 1 (minus 0.25) +HYSTERESIS_DOWNWARD_MULTIPLIER: 1 +# 5 (plus 1.25) +HYSTERESIS_UPWARD_MULTIPLIER: 5 +# Validator +# --------------------------------------------------------------- +# 2**10 (= 1024) ~1.4 hour +ETH1_FOLLOW_DISTANCE: 1024 +# 2**4 (= 16) +TARGET_AGGREGATORS_PER_COMMITTEE: 16 +# 2**0 (= 1) +RANDOM_SUBNETS_PER_VALIDATOR: 1 +# 2**8 (= 256) +EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION: 256 +# 6 (estimate from xDai mainnet) +SECONDS_PER_ETH1_BLOCK: 6 + +# Gwei values +# --------------------------------------------------------------- +# 2**0 * 10**9 (= 1,000,000,000) Gwei +MIN_DEPOSIT_AMOUNT: 1000000000 +# 2**5 * 10**9 (= 32,000,000,000) Gwei +MAX_EFFECTIVE_BALANCE: 32000000000 +# 2**4 * 10**9 (= 16,000,000,000) Gwei +EJECTION_BALANCE: 16000000000 +# 2**0 * 10**9 (= 1,000,000,000) Gwei +EFFECTIVE_BALANCE_INCREMENT: 1000000000 +# Initial values +# --------------------------------------------------------------- +# GBC area code +GENESIS_FORK_VERSION: 0x0000006f +BLS_WITHDRAWAL_PREFIX: 0x00 +# Time parameters +# --------------------------------------------------------------- +# 5 seconds +SECONDS_PER_SLOT: 5 +# 2**0 (= 1) slots 12 seconds +MIN_ATTESTATION_INCLUSION_DELAY: 1 +# 2**4 (= 16) slots 1.87 minutes +SLOTS_PER_EPOCH: 16 +# 2**0 (= 1) epochs 1.87 minutes +MIN_SEED_LOOKAHEAD: 1 +# 2**2 (= 4) epochs 7.47 minutes +MAX_SEED_LOOKAHEAD: 4 +# 2**6 (= 64) epochs ~2 hours +EPOCHS_PER_ETH1_VOTING_PERIOD: 64 +# 2**13 (= 8,192) slots ~15.9 hours +SLOTS_PER_HISTORICAL_ROOT: 8192 +# 2**8 (= 256) epochs ~8 hours +MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 +# 2**8 (= 256) epochs ~8 hours +SHARD_COMMITTEE_PERIOD: 256 +# 2**2 (= 4) epochs 7.47 minutes +MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4 + +# State vector lengths +# --------------------------------------------------------------- +# 2**16 (= 65,536) epochs ~85 days +EPOCHS_PER_HISTORICAL_VECTOR: 65536 +# 2**13 (= 8,192) epochs ~10.6 days +EPOCHS_PER_SLASHINGS_VECTOR: 8192 +# 2**24 (= 16,777,216) historical roots, ~15,243 years +HISTORICAL_ROOTS_LIMIT: 16777216 +# 2**40 (= 1,099,511,627,776) validator spots +VALIDATOR_REGISTRY_LIMIT: 1099511627776 +# Reward and penalty quotients +# --------------------------------------------------------------- +# 25 +BASE_REWARD_FACTOR: 25 +# 2**9 (= 512) +WHISTLEBLOWER_REWARD_QUOTIENT: 512 +# 2**3 (= 8) +PROPOSER_REWARD_QUOTIENT: 8 +# 2**26 (= 67,108,864) +INACTIVITY_PENALTY_QUOTIENT: 67108864 +# 2**7 (= 128) (lower safety margin at Phase 0 genesis) +MIN_SLASHING_PENALTY_QUOTIENT: 128 +# 1 (lower safety margin at Phase 0 genesis) +PROPORTIONAL_SLASHING_MULTIPLIER: 1 +# Max operations per block +# --------------------------------------------------------------- +# 2**4 (= 16) +MAX_PROPOSER_SLASHINGS: 16 +# 2**1 (= 2) +MAX_ATTESTER_SLASHINGS: 2 +# 2**7 (= 128) +MAX_ATTESTATIONS: 128 +# 2**4 (= 16) +MAX_DEPOSITS: 16 +# 2**4 (= 16) +MAX_VOLUNTARY_EXITS: 16 +# Signature domains +# --------------------------------------------------------------- +DOMAIN_BEACON_PROPOSER: 0x00000000 +DOMAIN_BEACON_ATTESTER: 0x01000000 +DOMAIN_RANDAO: 0x02000000 +DOMAIN_DEPOSIT: 0x03000000 +DOMAIN_VOLUNTARY_EXIT: 0x04000000 +DOMAIN_SELECTION_PROOF: 0x05000000 +DOMAIN_AGGREGATE_AND_PROOF: 0x06000000 +DOMAIN_SYNC_COMMITTEE: 0x07000000 +DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF: 0x08000000 +DOMAIN_CONTRIBUTION_AND_PROOF: 0x09000000 + +# Altair +ALTAIR_FORK_VERSION: 0x0100006f +ALTAIR_FORK_EPOCH: 90 # Mon Oct 10 2022 12:00:00 GMT+0000 +# Bellatrix +BELLATRIX_FORK_VERSION: 0x0200006f +BELLATRIX_FORK_EPOCH: 180 # Mon Oct 10 2022 14:00:00 GMT+0000 +# Capella +CAPELLA_FORK_VERSION: 0x0300006f +CAPELLA_FORK_EPOCH: 244224 # Wed May 24 2023 13:12:00 GMT+0000 + +INACTIVITY_SCORE_BIAS: 4 +# 2**4 (= 16) +INACTIVITY_SCORE_RECOVERY_RATE: 16 diff --git a/common/eth2_network_config/built_in_network_configs/chiado/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/chiado/deploy_block.txt new file mode 100644 index 00000000000..573541ac970 --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/chiado/deploy_block.txt @@ -0,0 +1 @@ +0 diff --git a/common/eth2_network_config/built_in_network_configs/holesky/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/holesky/boot_enr.yaml new file mode 100644 index 00000000000..616d41d672a --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/holesky/boot_enr.yaml @@ -0,0 +1,8 @@ +# EF +- enr:-Iq4QJk4WqRkjsX5c2CXtOra6HnxN-BMXnWhmhEQO9Bn9iABTJGdjUOurM7Btj1ouKaFkvTRoju5vz2GPmVON2dffQKGAX53x8JigmlkgnY0gmlwhLKAlv6Jc2VjcDI1NmsxoQK6S-Cii_KmfFdUJL2TANL3ksaKUnNXvTCv1tLwXs0QgIN1ZHCCIyk +- enr:-KG4QF6d6vMSboSujAXTI4vYqArccm0eIlXfcxf2Lx_VE1q6IkQo_2D5LAO3ZSBVUs0w5rrVDmABJZuMzISe_pZundADhGV0aDKQqX6DZjABcAAAAQAAAAAAAIJpZIJ2NIJpcISygIjpiXNlY3AyNTZrMaEDF3aSa7QSCvdqLpANNd8GML4PLEZVg45fKQwMWhDZjd2DdGNwgiMog3VkcIIjKA +- enr:-Ly4QJLXSSAj3ggPBIcodvBU6IyfpU_yW7E9J-5syoJorBuvcYj_Fokcjr303bQoTdWXADf8po0ssh75Mr5wVGzZZsMBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCpfoNmMAFwAAABAAAAAAAAgmlkgnY0gmlwhJK-DYCJc2VjcDI1NmsxoQJrIlXIQDvQ6t9yDySqJYDXgZgLXzTvq8W7OI51jfmxJohzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA +# Teku +- enr:-LK4QMlzEff6d-M0A1pSFG5lJ2c56i_I-ZftdojZbW3ehkGNM4pkQuHQqzVvF1BG9aDjIakjnmO23mCBFFZ2w5zOsugEh2F0dG5ldHOIAAAAAAYAAACEZXRoMpCpfoNmMAFwAAABAAAAAAAAgmlkgnY0gmlwhKyuI_mJc2VjcDI1NmsxoQIH1kQRCZW-4AIVyAeXj5o49m_IqNFKRHp6tSpfXMUrSYN0Y3CCIyiDdWRwgiMo +# Sigma Prime +- enr:-Le4QI88slOwzz66Ksq8Vnz324DPb1BzSiY-WYPvnoJIl-lceW9bmSJnwDzgNbCjp5wsBigg76x4tValvGgQPxxSjrMBhGV0aDKQqX6DZjABcAAAAQAAAAAAAIJpZIJ2NIJpcIQ5gR6Wg2lwNpAgAUHQBwEQAAAAAAAAADR-iXNlY3AyNTZrMaEDPMSNdcL92uNIyCsS177Z6KTXlbZakQqxv3aQcWawNXeDdWRwgiMohHVkcDaCI4I diff --git a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml new file mode 100644 index 00000000000..a6bfd87adec --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml @@ -0,0 +1,117 @@ +# Extends the mainnet preset +PRESET_BASE: 'mainnet' +CONFIG_NAME: holesky + +# Genesis +# --------------------------------------------------------------- +# `2**14` (= 16,384) +MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384 +# Sep-15-2023 13:55:00 +UTC +MIN_GENESIS_TIME: 1694786100 +GENESIS_FORK_VERSION: 0x00017000 +# Genesis delay 5 mins +GENESIS_DELAY: 300 + + +# Forking +# --------------------------------------------------------------- +# Some forks are disabled for now: +# - These may be re-assigned to another fork-version later +# - Temporarily set to max uint64 value: 2**64 - 1 + +# Altair +ALTAIR_FORK_VERSION: 0x10017000 +ALTAIR_FORK_EPOCH: 0 +# Merge +BELLATRIX_FORK_VERSION: 0x20017000 +BELLATRIX_FORK_EPOCH: 0 +TERMINAL_TOTAL_DIFFICULTY: 0 +TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 +TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 + +# Capella +CAPELLA_FORK_VERSION: 0x30017000 +CAPELLA_FORK_EPOCH: 256 + +# DENEB +DENEB_FORK_VERSION: 0x40017000 +DENEB_FORK_EPOCH: 18446744073709551615 + +# Time parameters +# --------------------------------------------------------------- +# 12 seconds +SECONDS_PER_SLOT: 12 +# 14 (estimate from Eth1 mainnet) +SECONDS_PER_ETH1_BLOCK: 14 +# 2**8 (= 256) epochs ~27 hours +MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 +# 2**8 (= 256) epochs ~27 hours +SHARD_COMMITTEE_PERIOD: 256 +# 2**11 (= 2,048) Eth1 blocks ~8 hours +ETH1_FOLLOW_DISTANCE: 2048 + + +# Validator cycle +# --------------------------------------------------------------- +# 2**2 (= 4) +INACTIVITY_SCORE_BIAS: 4 +# 2**4 (= 16) +INACTIVITY_SCORE_RECOVERY_RATE: 16 +# 28,000,000,000 Gwei to ensure quicker ejection +EJECTION_BALANCE: 28000000000 +# 2**2 (= 4) +MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**16 (= 65,536) +CHURN_LIMIT_QUOTIENT: 65536 + +# Fork choice +# --------------------------------------------------------------- +# 40% +PROPOSER_SCORE_BOOST: 40 + +# Deposit contract +# --------------------------------------------------------------- +DEPOSIT_CHAIN_ID: 17000 +DEPOSIT_NETWORK_ID: 17000 +DEPOSIT_CONTRACT_ADDRESS: 0x4242424242424242424242424242424242424242 + +# Networking +# --------------------------------------------------------------- +# `10 * 2**20` (= 10485760, 10 MiB) +GOSSIP_MAX_SIZE: 10485760 +# `2**10` (= 1024) +MAX_REQUEST_BLOCKS: 1024 +# `2**8` (= 256) +EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 +# `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) +MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 +# `10 * 2**20` (=10485760, 10 MiB) +MAX_CHUNK_SIZE: 10485760 +# 5s +TTFB_TIMEOUT: 5 +# 10s +RESP_TIMEOUT: 10 +ATTESTATION_PROPAGATION_SLOT_RANGE: 32 +# 500ms +MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500 +MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 +MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 +# 2 subnets per node +SUBNETS_PER_NODE: 2 +# 2**8 (= 64) +ATTESTATION_SUBNET_COUNT: 64 +ATTESTATION_SUBNET_EXTRA_BITS: 0 +# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS +ATTESTATION_SUBNET_PREFIX_BITS: 6 + +# Deneb +# `2**7` (=128) +MAX_REQUEST_BLOCKS_DENEB: 128 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK +MAX_REQUEST_BLOB_SIDECARS: 768 +# `2**12` (= 4096 epochs, ~18 days) +MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 +# `6` +BLOB_SIDECAR_SUBNET_COUNT: 6 +# `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 diff --git a/common/eth2_network_config/built_in_network_configs/holesky/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/holesky/deploy_block.txt new file mode 100644 index 00000000000..573541ac970 --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/holesky/deploy_block.txt @@ -0,0 +1 @@ +0 diff --git a/common/eth2_network_config/src/lib.rs b/common/eth2_network_config/src/lib.rs index 7274bbf029b..769f656e85d 100644 --- a/common/eth2_network_config/src/lib.rs +++ b/common/eth2_network_config/src/lib.rs @@ -13,10 +13,20 @@ use discv5::enr::{CombinedKey, Enr}; use eth2_config::{instantiate_hardcoded_nets, HardcodedNet}; +use pretty_reqwest_error::PrettyReqwestError; +use reqwest::blocking::Client; +use sensitive_url::SensitiveUrl; +use sha2::{Digest, Sha256}; +use slog::{info, warn, Logger}; use std::fs::{create_dir_all, File}; use std::io::{Read, Write}; use std::path::PathBuf; -use types::{BeaconState, ChainSpec, Config, EthSpec, EthSpecId}; +use std::str::FromStr; +use std::time::Duration; +use types::{BeaconState, ChainSpec, Config, EthSpec, EthSpecId, Hash256}; +use url::Url; + +pub use eth2_config::GenesisStateSource; pub const DEPLOY_BLOCK_FILE: &str = "deploy_block.txt"; pub const BOOT_ENR_FILE: &str = "boot_enr.yaml"; @@ -32,6 +42,35 @@ instantiate_hardcoded_nets!(eth2_config); pub const DEFAULT_HARDCODED_NETWORK: &str = "mainnet"; +/// A simple slice-or-vec enum to avoid cloning the beacon state bytes in the +/// binary whilst also supporting loading them from a file at runtime. +#[derive(Clone, PartialEq, Debug)] +pub enum GenesisStateBytes { + Slice(&'static [u8]), + Vec(Vec<u8>), +} + +impl AsRef<[u8]> for GenesisStateBytes { + fn as_ref(&self) -> &[u8] { + match self { + GenesisStateBytes::Slice(slice) => slice, + GenesisStateBytes::Vec(vec) => vec.as_ref(), + } + } +} + +impl From<&'static [u8]> for GenesisStateBytes { + fn from(slice: &'static [u8]) -> Self { + GenesisStateBytes::Slice(slice) + } +} + +impl From<Vec<u8>> for GenesisStateBytes { + fn from(vec: Vec<u8>) -> Self { + GenesisStateBytes::Vec(vec) + } +} + /// Specifies an Eth2 network. /// /// See the crate-level documentation for more details. @@ -41,7 +80,8 @@ pub struct Eth2NetworkConfig { /// value to be the block number where the first deposit occurs. pub deposit_contract_deploy_block: u64, pub boot_enr: Option<Vec<Enr<CombinedKey>>>, - pub genesis_state_bytes: Option<Vec<u8>>, + pub genesis_state_source: GenesisStateSource, + pub genesis_state_bytes: Option<GenesisStateBytes>, pub config: Config, } @@ -65,8 +105,10 @@ impl Eth2NetworkConfig { serde_yaml::from_reader(net.boot_enr) .map_err(|e| format!("Unable to parse boot enr: {:?}", e))?, ), - genesis_state_bytes: Some(net.genesis_state_bytes.to_vec()) - .filter(|bytes| !bytes.is_empty()), + genesis_state_source: net.genesis_state_source, + genesis_state_bytes: Some(net.genesis_state_bytes) + .filter(|bytes| !bytes.is_empty()) + .map(Into::into), config: serde_yaml::from_reader(net.config) .map_err(|e| format!("Unable to parse yaml config: {:?}", e))?, }) @@ -81,8 +123,37 @@ impl Eth2NetworkConfig { } /// Returns `true` if this configuration contains a `BeaconState`. - pub fn beacon_state_is_known(&self) -> bool { - self.genesis_state_bytes.is_some() + pub fn genesis_state_is_known(&self) -> bool { + self.genesis_state_source != GenesisStateSource::Unknown + } + + /// The `genesis_validators_root` of the genesis state. May download the + /// genesis state if the value is not already available. + pub fn genesis_validators_root<E: EthSpec>( + &self, + genesis_state_url: Option<&str>, + timeout: Duration, + log: &Logger, + ) -> Result<Option<Hash256>, String> { + if let GenesisStateSource::Url { + genesis_validators_root, + .. + } = self.genesis_state_source + { + Hash256::from_str(genesis_validators_root) + .map(Option::Some) + .map_err(|e| { + format!( + "Unable to parse genesis state genesis_validators_root: {:?}", + e + ) + }) + } else { + self.genesis_state::<E>(genesis_state_url, timeout, log)? + .map(|state| state.genesis_validators_root()) + .map(Result::Ok) + .transpose() + } } /// Construct a consolidated `ChainSpec` from the YAML config. @@ -96,15 +167,65 @@ impl Eth2NetworkConfig { } /// Attempts to deserialize `self.beacon_state`, returning an error if it's missing or invalid. - pub fn beacon_state<E: EthSpec>(&self) -> Result<BeaconState<E>, String> { + /// + /// If the genesis state is configured to be downloaded from a URL, then the + /// `genesis_state_url` will override the built-in list of download URLs. + pub fn genesis_state<E: EthSpec>( + &self, + genesis_state_url: Option<&str>, + timeout: Duration, + log: &Logger, + ) -> Result<Option<BeaconState<E>>, String> { let spec = self.chain_spec::<E>()?; - let genesis_state_bytes = self - .genesis_state_bytes - .as_ref() - .ok_or("Genesis state is unknown")?; + match &self.genesis_state_source { + GenesisStateSource::Unknown => Ok(None), + GenesisStateSource::IncludedBytes => { + let state = self + .genesis_state_bytes + .as_ref() + .map(|bytes| { + BeaconState::from_ssz_bytes(bytes.as_ref(), &spec).map_err(|e| { + format!("Built-in genesis state SSZ bytes are invalid: {:?}", e) + }) + }) + .ok_or("Genesis state bytes missing from Eth2NetworkConfig")??; + Ok(Some(state)) + } + GenesisStateSource::Url { + urls: built_in_urls, + checksum, + genesis_validators_root, + } => { + let checksum = Hash256::from_str(checksum).map_err(|e| { + format!("Unable to parse genesis state bytes checksum: {:?}", e) + })?; + let bytes = if let Some(specified_url) = genesis_state_url { + download_genesis_state(&[specified_url], timeout, checksum, log) + } else { + download_genesis_state(built_in_urls, timeout, checksum, log) + }?; + let state = BeaconState::from_ssz_bytes(bytes.as_ref(), &spec).map_err(|e| { + format!("Downloaded genesis state SSZ bytes are invalid: {:?}", e) + })?; + + let genesis_validators_root = + Hash256::from_str(genesis_validators_root).map_err(|e| { + format!( + "Unable to parse genesis state genesis_validators_root: {:?}", + e + ) + })?; + if state.genesis_validators_root() != genesis_validators_root { + return Err(format!( + "Downloaded genesis validators root {:?} does not match expected {:?}", + state.genesis_validators_root(), + genesis_validators_root + )); + } - BeaconState::from_ssz_bytes(genesis_state_bytes, &spec) - .map_err(|e| format!("Genesis state SSZ bytes are invalid: {:?}", e)) + Ok(Some(state)) + } + } } /// Write the files to the directory. @@ -162,7 +283,7 @@ impl Eth2NetworkConfig { File::create(&file) .map_err(|e| format!("Unable to create {:?}: {:?}", file, e)) .and_then(|mut file| { - file.write_all(genesis_state_bytes) + file.write_all(genesis_state_bytes.as_ref()) .map_err(|e| format!("Unable to write {:?}: {:?}", file, e)) })?; } @@ -198,7 +319,7 @@ impl Eth2NetworkConfig { // The genesis state is a special case because it uses SSZ, not YAML. let genesis_file_path = base_dir.join(GENESIS_STATE_FILE); - let genesis_state_bytes = if genesis_file_path.exists() { + let (genesis_state_bytes, genesis_state_source) = if genesis_file_path.exists() { let mut bytes = vec![]; File::open(&genesis_file_path) .map_err(|e| format!("Unable to open {:?}: {:?}", genesis_file_path, e)) @@ -207,20 +328,105 @@ impl Eth2NetworkConfig { .map_err(|e| format!("Unable to read {:?}: {:?}", file, e)) })?; - Some(bytes).filter(|bytes| !bytes.is_empty()) + let state = Some(bytes).filter(|bytes| !bytes.is_empty()); + let genesis_state_source = if state.is_some() { + GenesisStateSource::IncludedBytes + } else { + GenesisStateSource::Unknown + }; + (state, genesis_state_source) } else { - None + (None, GenesisStateSource::Unknown) }; Ok(Self { deposit_contract_deploy_block, boot_enr, - genesis_state_bytes, + genesis_state_source, + genesis_state_bytes: genesis_state_bytes.map(Into::into), config, }) } } +/// Try to download a genesis state from each of the `urls` in the order they +/// are defined. Return `Ok` if any url returns a response that matches the +/// given `checksum`. +fn download_genesis_state( + urls: &[&str], + timeout: Duration, + checksum: Hash256, + log: &Logger, +) -> Result<Vec<u8>, String> { + if urls.is_empty() { + return Err( + "The genesis state is not present in the binary and there are no known download URLs. \ + Please use --checkpoint-sync-url or --genesis-state-url." + .to_string(), + ); + } + + let mut errors = vec![]; + for url in urls { + // URLs are always expected to be the base URL of a server that supports + // the beacon-API. + let url = parse_state_download_url(url)?; + let redacted_url = SensitiveUrl::new(url.clone()) + .map(|url| url.to_string()) + .unwrap_or_else(|_| "<REDACTED>".to_string()); + + info!( + log, + "Downloading genesis state"; + "server" => &redacted_url, + "timeout" => ?timeout, + "info" => "this may take some time on testnets with large validator counts" + ); + + let client = Client::new(); + let response = client + .get(url) + .header("Accept", "application/octet-stream") + .timeout(timeout) + .send() + .and_then(|r| r.error_for_status().and_then(|r| r.bytes())); + + match response { + Ok(bytes) => { + // Check the server response against our local checksum. + if Sha256::digest(bytes.as_ref())[..] == checksum[..] { + return Ok(bytes.into()); + } else { + warn!( + log, + "Genesis state download failed"; + "server" => &redacted_url, + "timeout" => ?timeout, + ); + errors.push(format!( + "Response from {} did not match local checksum", + redacted_url + )) + } + } + Err(e) => errors.push(PrettyReqwestError::from(e).to_string()), + } + } + Err(format!( + "Unable to download a genesis state from {} source(s): {}", + errors.len(), + errors.join(",") + )) +} + +/// Parses the `url` and joins the necessary state download path. +fn parse_state_download_url(url: &str) -> Result<Url, String> { + Url::parse(url) + .map_err(|e| format!("Invalid genesis state URL: {:?}", e))? + .join("eth/v2/debug/beacon/states/genesis") + .map_err(|e| format!("Failed to append genesis state path to URL: {:?}", e)) +} + #[cfg(test)] mod tests { use super::*; @@ -260,7 +466,9 @@ mod tests { #[test] fn mainnet_genesis_state() { let config = Eth2NetworkConfig::from_hardcoded_net(&MAINNET).unwrap(); - config.beacon_state::<E>().expect("beacon state can decode"); + config + .genesis_state::<E>(None, Duration::from_secs(1), &logging::test_logger()) + .expect("beacon state can decode"); } #[test] @@ -274,10 +482,10 @@ mod tests { fn hard_coded_nets_work() { for net in HARDCODED_NETS { let config = Eth2NetworkConfig::from_hardcoded_net(net) - .unwrap_or_else(|_| panic!("{:?}", net.name)); + .unwrap_or_else(|e| panic!("{:?}: {:?}", net.name, e)); // Ensure we can parse the YAML config to a chain spec. - if net.name == types::GNOSIS { + if config.config.preset_base == types::GNOSIS { config.chain_spec::<GnosisEthSpec>().unwrap(); } else { config.chain_spec::<MainnetEthSpec>().unwrap(); @@ -285,10 +493,25 @@ mod tests { assert_eq!( config.genesis_state_bytes.is_some(), - net.genesis_is_known, + net.genesis_state_source == GenesisStateSource::IncludedBytes, "{:?}", net.name ); + + if let GenesisStateSource::Url { + urls, + checksum, + genesis_validators_root, + } = net.genesis_state_source + { + Hash256::from_str(checksum).expect("the checksum must be a valid 32-byte value"); + Hash256::from_str(genesis_validators_root) + .expect("the GVR must be a valid 32-byte value"); + for url in urls { + parse_state_download_url(url).expect("url must be valid"); + } + } + assert_eq!(config.config.config_name, Some(net.config_dir.to_string())); } } @@ -324,10 +547,20 @@ mod tests { let base_dir = temp_dir.path().join("my_testnet"); let deposit_contract_deploy_block = 42; + let genesis_state_source = if genesis_state.is_some() { + GenesisStateSource::IncludedBytes + } else { + GenesisStateSource::Unknown + }; + let testnet: Eth2NetworkConfig = Eth2NetworkConfig { deposit_contract_deploy_block, boot_enr, - genesis_state_bytes: genesis_state.as_ref().map(Encode::as_ssz_bytes), + genesis_state_source, + genesis_state_bytes: genesis_state + .as_ref() + .map(Encode::as_ssz_bytes) + .map(Into::into), config, }; diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index e874432fbca..65f57531d33 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v4.3.0-", - fallback = "Lighthouse/v4.3.0" + prefix = "Lighthouse/v4.4.0-", + fallback = "Lighthouse/v4.4.0" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/common/logging/Cargo.toml b/common/logging/Cargo.toml index b6179d9e782..0e158f58ff5 100644 --- a/common/logging/Cargo.toml +++ b/common/logging/Cargo.toml @@ -17,6 +17,6 @@ sloggers = { version = "2.1.1", features = ["json"] } slog-async = "2.7.0" take_mut = "0.2.2" parking_lot = "0.12.1" -serde = "1.0.153" +serde = "1.0.153" serde_json = "1.0.94" -chrono = "0.4.23" +chrono = { version = "0.4", default-features = false, features = ["clock", "std"] } diff --git a/common/pretty_reqwest_error/src/lib.rs b/common/pretty_reqwest_error/src/lib.rs index 4c605f38aeb..0aaee5965ee 100644 --- a/common/pretty_reqwest_error/src/lib.rs +++ b/common/pretty_reqwest_error/src/lib.rs @@ -55,6 +55,12 @@ impl fmt::Debug for PrettyReqwestError { } } +impl fmt::Display for PrettyReqwestError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self) + } +} + impl From<reqwest::Error> for PrettyReqwestError { fn from(inner: reqwest::Error) -> Self { Self(inner) diff --git a/common/warp_utils/src/cors.rs b/common/warp_utils/src/cors.rs index 314ea9c8f74..55043dfd7dd 100644 --- a/common/warp_utils/src/cors.rs +++ b/common/warp_utils/src/cors.rs @@ -10,10 +10,14 @@ pub fn set_builder_origins( default_origin: (IpAddr, u16), ) -> Result<Builder, String> { if let Some(allow_origin) = allow_origin { - let origins = allow_origin - .split(',') - .map(|s| verify_cors_origin_str(s).map(|_| s)) - .collect::<Result<Vec<_>, _>>()?; + let mut origins = vec![]; + for origin in allow_origin.split(',') { + verify_cors_origin_str(origin)?; + if origin == "*" { + return Ok(builder.allow_any_origin()); + } + origins.push(origin) + } Ok(builder.allow_origins(origins)) } else { let origin = match default_origin.0 { diff --git a/common/warp_utils/src/metrics.rs b/common/warp_utils/src/metrics.rs index 1b9d89db91a..d93b74ca956 100644 --- a/common/warp_utils/src/metrics.rs +++ b/common/warp_utils/src/metrics.rs @@ -87,7 +87,7 @@ pub fn scrape_process_health_metrics() { // This will silently fail if we are unable to observe the health. This is desired behaviour // since we don't support `Health` for all platforms. if let Ok(health) = ProcessHealth::observe() { - set_gauge(&PROCESS_NUM_THREADS, health.pid_num_threads as i64); + set_gauge(&PROCESS_NUM_THREADS, health.pid_num_threads); set_gauge(&PROCESS_RES_MEM, health.pid_mem_resident_set_size as i64); set_gauge(&PROCESS_VIRT_MEM, health.pid_mem_virtual_memory_size as i64); set_gauge(&PROCESS_SECONDS, health.pid_process_seconds_total as i64); diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 4f563f86398..ea3a58127b2 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -355,7 +355,7 @@ where spec: &ChainSpec, ) -> Result<Self, Error<T::Error>> { // Sanity check: the anchor must lie on an epoch boundary. - if anchor_block.slot() % E::slots_per_epoch() != 0 { + if anchor_state.slot() % E::slots_per_epoch() != 0 { return Err(Error::InvalidAnchor { block_slot: anchor_block.slot(), state_slot: anchor_state.slot(), @@ -391,6 +391,7 @@ where let current_slot = current_slot.unwrap_or_else(|| fc_store.get_current_slot()); let proto_array = ProtoArrayForkChoice::new::<E>( + current_slot, finalized_block_slot, finalized_block_state_root, *fc_store.justified_checkpoint(), @@ -749,7 +750,7 @@ where .unrealized_justified_checkpoint .zip(parent_block.unrealized_finalized_checkpoint) .filter(|(parent_justified, parent_finalized)| { - parent_justified.epoch == block_epoch && parent_finalized.epoch + 1 >= block_epoch + parent_justified.epoch == block_epoch && parent_finalized.epoch + 1 == block_epoch }); let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) = if let Some(( diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 157f072ad37..98d43e4850c 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -80,6 +80,7 @@ impl ForkChoiceTestDefinition { let junk_shuffling_id = AttestationShufflingId::from_components(Epoch::new(0), Hash256::zero()); let mut fork_choice = ProtoArrayForkChoice::new::<MainnetEthSpec>( + self.finalized_block_slot, self.finalized_block_slot, Hash256::zero(), self.justified_checkpoint, diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index fe831b3c357..5911e50fcdc 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -345,6 +345,7 @@ pub struct ProtoArrayForkChoice { impl ProtoArrayForkChoice { #[allow(clippy::too_many_arguments)] pub fn new<E: EthSpec>( + current_slot: Slot, finalized_block_slot: Slot, finalized_block_state_root: Hash256, justified_checkpoint: Checkpoint, @@ -380,7 +381,7 @@ impl ProtoArrayForkChoice { }; proto_array - .on_block::<E>(block, finalized_block_slot) + .on_block::<E>(block, current_slot) .map_err(|e| format!("Failed to add finalized block to proto_array: {:?}", e))?; Ok(Self { @@ -983,6 +984,7 @@ mod test_compute_deltas { }; let mut fc = ProtoArrayForkChoice::new::<MainnetEthSpec>( + genesis_slot, genesis_slot, state_root, genesis_checkpoint, @@ -1108,6 +1110,7 @@ mod test_compute_deltas { }; let mut fc = ProtoArrayForkChoice::new::<MainnetEthSpec>( + genesis_slot, genesis_slot, junk_state_root, genesis_checkpoint, diff --git a/consensus/safe_arith/src/iter.rs b/consensus/safe_arith/src/iter.rs index 1fc3d3a1a7a..d5ee51b588d 100644 --- a/consensus/safe_arith/src/iter.rs +++ b/consensus/safe_arith/src/iter.rs @@ -28,10 +28,10 @@ mod test { #[test] fn unsigned_sum_small() { - let v = vec![400u64, 401, 402, 403, 404, 405, 406]; + let arr = [400u64, 401, 402, 403, 404, 405, 406]; assert_eq!( - v.iter().copied().safe_sum().unwrap(), - v.iter().copied().sum() + arr.iter().copied().safe_sum().unwrap(), + arr.iter().copied().sum() ); } @@ -61,10 +61,10 @@ mod test { #[test] fn signed_sum_almost_overflow() { - let v = vec![i64::MIN, 1, -1i64, i64::MAX, i64::MAX, 1]; + let arr = [i64::MIN, 1, -1i64, i64::MAX, i64::MAX, 1]; assert_eq!( - v.iter().copied().safe_sum().unwrap(), - v.iter().copied().sum() + arr.iter().copied().safe_sum().unwrap(), + arr.iter().copied().sum() ); } } diff --git a/consensus/state_processing/src/per_slot_processing.rs b/consensus/state_processing/src/per_slot_processing.rs index ead06edbf56..e16fb4a7b11 100644 --- a/consensus/state_processing/src/per_slot_processing.rs +++ b/consensus/state_processing/src/per_slot_processing.rs @@ -21,7 +21,7 @@ impl From<ArithError> for Error { /// /// If the root of the supplied `state` is known, then it can be passed as `state_root`. If /// `state_root` is `None`, the root of `state` will be computed using a cached tree hash. -/// Providing the `state_root` makes this function several orders of magniude faster. +/// Providing the `state_root` makes this function several orders of magnitude faster. pub fn per_slot_processing<T: EthSpec>( state: &mut BeaconState<T>, state_root: Option<Hash256>, diff --git a/crypto/bls/src/generic_public_key_bytes.rs b/crypto/bls/src/generic_public_key_bytes.rs index 59b0ffc43f1..240568b4f67 100644 --- a/crypto/bls/src/generic_public_key_bytes.rs +++ b/crypto/bls/src/generic_public_key_bytes.rs @@ -27,10 +27,7 @@ impl<Pub> Copy for GenericPublicKeyBytes<Pub> {} impl<Pub> Clone for GenericPublicKeyBytes<Pub> { fn clone(&self) -> Self { - Self { - bytes: self.bytes, - _phantom: PhantomData, - } + *self } } diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index f9d0a6a31c7..8b838ac6f82 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "4.3.0" +version = "4.4.0" authors = ["Paul Hauner <paul@paulhauner.com>"] edition = "2021" diff --git a/lcli/src/eth1_genesis.rs b/lcli/src/eth1_genesis.rs index 34144cd86db..bddd4baad8b 100644 --- a/lcli/src/eth1_genesis.rs +++ b/lcli/src/eth1_genesis.rs @@ -49,7 +49,7 @@ pub fn run<T: EthSpec>( .wait_for_genesis_state::<T>(ETH1_GENESIS_UPDATE_INTERVAL, spec) .await .map(move |genesis_state| { - eth2_network_config.genesis_state_bytes = Some(genesis_state.as_ssz_bytes()); + eth2_network_config.genesis_state_bytes = Some(genesis_state.as_ssz_bytes().into()); eth2_network_config.force_write_to_file(testnet_dir) }) .map_err(|e| format!("Failed to find genesis: {}", e))?; diff --git a/lcli/src/interop_genesis.rs b/lcli/src/interop_genesis.rs index 57a5ba00988..1a0b81fcb7f 100644 --- a/lcli/src/interop_genesis.rs +++ b/lcli/src/interop_genesis.rs @@ -42,7 +42,7 @@ pub fn run<T: EthSpec>(testnet_dir: PathBuf, matches: &ArgMatches) -> Result<(), &spec, )?; - eth2_network_config.genesis_state_bytes = Some(genesis_state.as_ssz_bytes()); + eth2_network_config.genesis_state_bytes = Some(genesis_state.as_ssz_bytes().into()); eth2_network_config.force_write_to_file(testnet_dir)?; Ok(()) diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index 01a44cabef7..973993f9790 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -1,7 +1,7 @@ use account_utils::eth2_keystore::keypair_from_secret; use clap::ArgMatches; use clap_utils::{parse_optional, parse_required, parse_ssz_optional}; -use eth2_network_config::Eth2NetworkConfig; +use eth2_network_config::{Eth2NetworkConfig, GenesisStateSource}; use eth2_wallet::bip39::Seed; use eth2_wallet::bip39::{Language, Mnemonic}; use eth2_wallet::{recover_validator_secret_from_mnemonic, KeyType}; @@ -190,7 +190,8 @@ pub fn run<T: EthSpec>(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul let testnet = Eth2NetworkConfig { deposit_contract_deploy_block, boot_enr: Some(vec![]), - genesis_state_bytes, + genesis_state_bytes: genesis_state_bytes.map(Into::into), + genesis_state_source: GenesisStateSource::IncludedBytes, config: Config::from_chain_spec::<T>(&spec), }; diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 8003236f2df..d836c4d96de 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "4.3.0" +version = "4.4.0" authors = ["Sigma Prime <contact@sigmaprime.io>"] edition = "2021" autotests = false diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index d8b522307c4..6384fc53cd6 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -324,6 +324,30 @@ fn main() { .takes_value(true) .global(true) ) + .arg( + Arg::with_name("genesis-state-url") + .long("genesis-state-url") + .value_name("URL") + .help( + "A URL of a beacon-API compatible server from which to download the genesis state. \ + Checkpoint sync server URLs can generally be used with this flag. \ + If not supplied, a default URL or the --checkpoint-sync-url may be used. \ + If the genesis state is already included in this binary then this value will be ignored.", + ) + .takes_value(true) + .global(true), + ) + .arg( + Arg::with_name("genesis-state-url-timeout") + .long("genesis-state-url-timeout") + .value_name("SECONDS") + .help( + "The timeout in seconds for the request to --genesis-state-url.", + ) + .takes_value(true) + .default_value("180") + .global(true), + ) .subcommand(beacon_node::cli_app()) .subcommand(boot_node::cli_app()) .subcommand(validator_client::cli_app()) diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index ecc936cbfb4..05b4358509b 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -11,6 +11,7 @@ use lighthouse_network::PeerId; use std::fs::File; use std::io::{Read, Write}; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; +use std::path::Path; use std::path::PathBuf; use std::process::Command; use std::str::FromStr; @@ -366,21 +367,6 @@ fn genesis_backfill_with_historic_flag() { .with_config(|config| assert_eq!(config.chain.genesis_backfill, true)); } -#[test] -fn always_prefer_builder_payload_flag() { - CommandLineTest::new() - .flag("always-prefer-builder-payload", None) - .run_with_zero_port() - .with_config(|config| assert!(config.always_prefer_builder_payload)); -} - -#[test] -fn no_flag_sets_always_prefer_builder_payload_to_false() { - CommandLineTest::new() - .run_with_zero_port() - .with_config(|config| assert!(!config.always_prefer_builder_payload)); -} - // Tests for Eth1 flags. #[test] fn dummy_eth1_flag() { @@ -735,6 +721,38 @@ fn builder_fallback_flags() { ); }, ); + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + Some("always-prefer-builder-payload"), + None, + |config| { + assert_eq!( + config + .execution_layer + .as_ref() + .unwrap() + .always_prefer_builder_payload, + true + ); + }, + ); + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + None, + None, + |config| { + assert_eq!( + config + .execution_layer + .as_ref() + .unwrap() + .always_prefer_builder_payload, + false + ); + }, + ); } #[test] @@ -1443,15 +1461,20 @@ fn disable_inbound_rate_limiter_flag() { #[test] fn http_allow_origin_flag() { CommandLineTest::new() - .flag("http-allow-origin", Some("127.0.0.99")) + .flag("http", None) + .flag("http-allow-origin", Some("http://127.0.0.99")) .run_with_zero_port() .with_config(|config| { - assert_eq!(config.http_api.allow_origin, Some("127.0.0.99".to_string())); + assert_eq!( + config.http_api.allow_origin, + Some("http://127.0.0.99".to_string()) + ); }); } #[test] fn http_allow_origin_all_flag() { CommandLineTest::new() + .flag("http", None) .flag("http-allow-origin", Some("*")) .run_with_zero_port() .with_config(|config| assert_eq!(config.http_api.allow_origin, Some("*".to_string()))); @@ -1459,6 +1482,7 @@ fn http_allow_origin_all_flag() { #[test] fn http_allow_sync_stalled_flag() { CommandLineTest::new() + .flag("http", None) .flag("http-allow-sync-stalled", None) .run_with_zero_port() .with_config(|config| assert_eq!(config.http_api.allow_sync_stalled, true)); @@ -1466,32 +1490,29 @@ fn http_allow_sync_stalled_flag() { #[test] fn http_enable_beacon_processor() { CommandLineTest::new() + .flag("http", None) .run_with_zero_port() .with_config(|config| assert_eq!(config.http_api.enable_beacon_processor, true)); CommandLineTest::new() + .flag("http", None) .flag("http-enable-beacon-processor", Some("true")) .run_with_zero_port() .with_config(|config| assert_eq!(config.http_api.enable_beacon_processor, true)); CommandLineTest::new() + .flag("http", None) .flag("http-enable-beacon-processor", Some("false")) .run_with_zero_port() .with_config(|config| assert_eq!(config.http_api.enable_beacon_processor, false)); } #[test] fn http_tls_flags() { - let dir = TempDir::new().expect("Unable to create temporary directory"); CommandLineTest::new() + .flag("http", None) .flag("http-enable-tls", None) - .flag( - "http-tls-cert", - dir.path().join("certificate.crt").as_os_str().to_str(), - ) - .flag( - "http-tls-key", - dir.path().join("private.key").as_os_str().to_str(), - ) + .flag("http-tls-cert", Some("tests/tls/cert.pem")) + .flag("http-tls-key", Some("tests/tls/key.rsa")) .run_with_zero_port() .with_config(|config| { let tls_config = config @@ -1499,14 +1520,15 @@ fn http_tls_flags() { .tls_config .as_ref() .expect("tls_config was empty."); - assert_eq!(tls_config.cert, dir.path().join("certificate.crt")); - assert_eq!(tls_config.key, dir.path().join("private.key")); + assert_eq!(tls_config.cert, Path::new("tests/tls/cert.pem")); + assert_eq!(tls_config.key, Path::new("tests/tls/key.rsa")); }); } #[test] fn http_spec_fork_default() { CommandLineTest::new() + .flag("http", None) .run_with_zero_port() .with_config(|config| assert_eq!(config.http_api.spec_fork_name, None)); } @@ -1514,6 +1536,7 @@ fn http_spec_fork_default() { #[test] fn http_spec_fork_override() { CommandLineTest::new() + .flag("http", None) .flag("http-spec-fork", Some("altair")) .run_with_zero_port() .with_config(|config| assert_eq!(config.http_api.spec_fork_name, Some(ForkName::Altair))); @@ -2349,3 +2372,62 @@ fn beacon_processor_zero_workers() { .flag("beacon-processor-max-workers", Some("0")) .run_with_zero_port(); } + +#[test] +fn http_sse_capacity_multiplier_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert_eq!(config.http_api.sse_capacity_multiplier, 1)); +} + +#[test] +fn http_sse_capacity_multiplier_override() { + CommandLineTest::new() + .flag("http-sse-capacity-multiplier", Some("10")) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.http_api.sse_capacity_multiplier, 10)); +} + +#[test] +fn http_duplicate_block_status_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.http_api.duplicate_block_status_code.as_u16(), 202) + }); +} + +#[test] +fn http_duplicate_block_status_override() { + CommandLineTest::new() + .flag("http-duplicate-block-status", Some("301")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.http_api.duplicate_block_status_code.as_u16(), 301) + }); +} + +#[test] +fn genesis_state_url_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.genesis_state_url, None); + assert_eq!(config.genesis_state_url_timeout, Duration::from_secs(180)); + }); +} + +#[test] +fn genesis_state_url_value() { + CommandLineTest::new() + .flag("genesis-state-url", Some("http://genesis.com")) + .flag("genesis-state-url-timeout", Some("42")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.genesis_state_url.as_deref(), + Some("http://genesis.com") + ); + assert_eq!(config.genesis_state_url_timeout, Duration::from_secs(42)); + }); +} diff --git a/lighthouse/tests/tls/cert.pem b/lighthouse/tests/tls/cert.pem new file mode 100644 index 00000000000..03af12ff819 --- /dev/null +++ b/lighthouse/tests/tls/cert.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIEADCCAmigAwIBAgICAcgwDQYJKoZIhvcNAQELBQAwLDEqMCgGA1UEAwwhcG9u +eXRvd24gUlNBIGxldmVsIDIgaW50ZXJtZWRpYXRlMB4XDTE2MDgxMzE2MDcwNFoX +DTIyMDIwMzE2MDcwNFowGTEXMBUGA1UEAwwOdGVzdHNlcnZlci5jb20wggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCpVhh1/FNP2qvWenbZSghari/UThwe +dynfnHG7gc3JmygkEdErWBO/CHzHgsx7biVE5b8sZYNEDKFojyoPHGWK2bQM/FTy +niJCgNCLdn6hUqqxLAml3cxGW77hAWu94THDGB1qFe+eFiAUnDmob8gNZtAzT6Ky +b/JGJdrEU0wj+Rd7wUb4kpLInNH/Jc+oz2ii2AjNbGOZXnRz7h7Kv3sO9vABByYe +LcCj3qnhejHMqVhbAT1MD6zQ2+YKBjE52MsQKU/xhUpu9KkUyLh0cxkh3zrFiKh4 +Vuvtc+n7aeOv2jJmOl1dr0XLlSHBlmoKqH6dCTSbddQLmlK7dms8vE01AgMBAAGj +gb4wgbswDAYDVR0TAQH/BAIwADALBgNVHQ8EBAMCBsAwHQYDVR0OBBYEFMeUzGYV +bXwJNQVbY1+A8YXYZY8pMEIGA1UdIwQ7MDmAFJvEsUi7+D8vp8xcWvnEdVBGkpoW +oR6kHDAaMRgwFgYDVQQDDA9wb255dG93biBSU0EgQ0GCAXswOwYDVR0RBDQwMoIO +dGVzdHNlcnZlci5jb22CFXNlY29uZC50ZXN0c2VydmVyLmNvbYIJbG9jYWxob3N0 +MA0GCSqGSIb3DQEBCwUAA4IBgQBsk5ivAaRAcNgjc7LEiWXFkMg703AqDDNx7kB1 +RDgLalLvrjOfOp2jsDfST7N1tKLBSQ9bMw9X4Jve+j7XXRUthcwuoYTeeo+Cy0/T +1Q78ctoX74E2nB958zwmtRykGrgE/6JAJDwGcgpY9kBPycGxTlCN926uGxHsDwVs +98cL6ZXptMLTR6T2XP36dAJZuOICSqmCSbFR8knc/gjUO36rXTxhwci8iDbmEVaf +BHpgBXGU5+SQ+QM++v6bHGf4LNQC5NZ4e4xvGax8ioYu/BRsB/T3Lx+RlItz4zdU +XuxCNcm3nhQV2ZHquRdbSdoyIxV5kJXel4wCmOhWIq7A2OBKdu5fQzIAzzLi65EN +RPAKsKB4h7hGgvciZQ7dsMrlGw0DLdJ6UrFyiR5Io7dXYT/+JP91lP5xsl6Lhg9O +FgALt7GSYRm2cZdgi9pO9rRr83Br1VjQT1vHz6yoZMXSqc4A2zcN2a2ZVq//rHvc +FZygs8miAhWPzqnpmgTj1cPiU1M= +-----END CERTIFICATE----- diff --git a/lighthouse/tests/tls/key.rsa b/lighthouse/tests/tls/key.rsa new file mode 100644 index 00000000000..b13bf5d07f9 --- /dev/null +++ b/lighthouse/tests/tls/key.rsa @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAqVYYdfxTT9qr1np22UoIWq4v1E4cHncp35xxu4HNyZsoJBHR +K1gTvwh8x4LMe24lROW/LGWDRAyhaI8qDxxlitm0DPxU8p4iQoDQi3Z+oVKqsSwJ +pd3MRlu+4QFrveExwxgdahXvnhYgFJw5qG/IDWbQM0+ism/yRiXaxFNMI/kXe8FG ++JKSyJzR/yXPqM9ootgIzWxjmV50c+4eyr97DvbwAQcmHi3Ao96p4XoxzKlYWwE9 +TA+s0NvmCgYxOdjLEClP8YVKbvSpFMi4dHMZId86xYioeFbr7XPp+2njr9oyZjpd +Xa9Fy5UhwZZqCqh+nQk0m3XUC5pSu3ZrPLxNNQIDAQABAoIBAFKtZJgGsK6md4vq +kyiYSufrcBLaaEQ/rkQtYCJKyC0NAlZKFLRy9oEpJbNLm4cQSkYPXn3Qunx5Jj2k +2MYz+SgIDy7f7KHgr52Ew020dzNQ52JFvBgt6NTZaqL1TKOS1fcJSSNIvouTBerK +NCSXHzfb4P+MfEVe/w1c4ilE+kH9SzdEo2jK/sRbzHIY8TX0JbmQ4SCLLayr22YG +usIxtIYcWt3MMP/G2luRnYzzBCje5MXdpAhlHLi4TB6x4h5PmBKYc57uOVNngKLd +YyrQKcszW4Nx5v0a4HG3A5EtUXNCco1+5asXOg2lYphQYVh2R+1wgu5WiDjDVu+6 +EYgjFSkCgYEA0NBk6FDoxE/4L/4iJ4zIhu9BptN8Je/uS5c6wRejNC/VqQyw7SHb +hRFNrXPvq5Y+2bI/DxtdzZLKAMXOMjDjj0XEgfOIn2aveOo3uE7zf1i+njxwQhPu +uSYA9AlBZiKGr2PCYSDPnViHOspVJjxRuAgyWM1Qf+CTC0D95aj0oz8CgYEAz5n4 +Cb3/WfUHxMJLljJ7PlVmlQpF5Hk3AOR9+vtqTtdxRjuxW6DH2uAHBDdC3OgppUN4 +CFj55kzc2HUuiHtmPtx8mK6G+otT7Lww+nLSFL4PvZ6CYxqcio5MPnoYd+pCxrXY +JFo2W7e4FkBOxb5PF5So5plg+d0z/QiA7aFP1osCgYEAtgi1rwC5qkm8prn4tFm6 +hkcVCIXc+IWNS0Bu693bXKdGr7RsmIynff1zpf4ntYGpEMaeymClCY0ppDrMYlzU +RBYiFNdlBvDRj6s/H+FTzHRk2DT/99rAhY9nzVY0OQFoQIXK8jlURGrkmI/CYy66 +XqBmo5t4zcHM7kaeEBOWEKkCgYAYnO6VaRtPNQfYwhhoFFAcUc+5t+AVeHGW/4AY +M5qlAlIBu64JaQSI5KqwS0T4H+ZgG6Gti68FKPO+DhaYQ9kZdtam23pRVhd7J8y+ +xMI3h1kiaBqZWVxZ6QkNFzizbui/2mtn0/JB6YQ/zxwHwcpqx0tHG8Qtm5ZAV7PB +eLCYhQKBgQDALJxU/6hMTdytEU5CLOBSMby45YD/RrfQrl2gl/vA0etPrto4RkVq +UrkDO/9W4mZORClN3knxEFSTlYi8YOboxdlynpFfhcs82wFChs+Ydp1eEsVHAqtu +T+uzn0sroycBiBfVB949LExnzGDFUkhG0i2c2InarQYLTsIyHCIDEA== +-----END RSA PRIVATE KEY----- diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 9bcfe2a1d50..062b7e7786a 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -260,6 +260,7 @@ fn http_flag() { fn http_address_flag() { let addr = "127.0.0.99".parse::<IpAddr>().unwrap(); CommandLineTest::new() + .flag("http", None) .flag("http-address", Some("127.0.0.99")) .flag("unencrypted-http-transport", None) .run() @@ -269,6 +270,7 @@ fn http_address_flag() { fn http_address_ipv6_flag() { let addr = "::1".parse::<IpAddr>().unwrap(); CommandLineTest::new() + .flag("http", None) .flag("http-address", Some("::1")) .flag("unencrypted-http-transport", None) .run() @@ -279,6 +281,7 @@ fn http_address_ipv6_flag() { fn missing_unencrypted_http_transport_flag() { let addr = "127.0.0.99".parse::<IpAddr>().unwrap(); CommandLineTest::new() + .flag("http", None) .flag("http-address", Some("127.0.0.99")) .run() .with_config(|config| assert_eq!(config.http_api.listen_addr, addr)); @@ -286,6 +289,7 @@ fn missing_unencrypted_http_transport_flag() { #[test] fn http_port_flag() { CommandLineTest::new() + .flag("http", None) .flag("http-port", Some("9090")) .run() .with_config(|config| assert_eq!(config.http_api.listen_port, 9090)); @@ -293,6 +297,7 @@ fn http_port_flag() { #[test] fn http_allow_origin_flag() { CommandLineTest::new() + .flag("http", None) .flag("http-allow-origin", Some("http://localhost:9009")) .run() .with_config(|config| { @@ -305,6 +310,7 @@ fn http_allow_origin_flag() { #[test] fn http_allow_origin_all_flag() { CommandLineTest::new() + .flag("http", None) .flag("http-allow-origin", Some("*")) .run() .with_config(|config| assert_eq!(config.http_api.allow_origin, Some("*".to_string()))); @@ -312,12 +318,14 @@ fn http_allow_origin_all_flag() { #[test] fn http_allow_keystore_export_default() { CommandLineTest::new() + .flag("http", None) .run() .with_config(|config| assert!(!config.http_api.allow_keystore_export)); } #[test] fn http_allow_keystore_export_present() { CommandLineTest::new() + .flag("http", None) .flag("http-allow-keystore-export", None) .run() .with_config(|config| assert!(config.http_api.allow_keystore_export)); @@ -325,12 +333,14 @@ fn http_allow_keystore_export_present() { #[test] fn http_store_keystore_passwords_in_secrets_dir_default() { CommandLineTest::new() + .flag("http", None) .run() .with_config(|config| assert!(!config.http_api.store_passwords_in_secrets_dir)); } #[test] fn http_store_keystore_passwords_in_secrets_dir_present() { CommandLineTest::new() + .flag("http", None) .flag("http-store-passwords-in-secrets-dir", None) .run() .with_config(|config| assert!(config.http_api.store_passwords_in_secrets_dir)); @@ -348,6 +358,7 @@ fn metrics_flag() { fn metrics_address_flag() { let addr = "127.0.0.99".parse::<IpAddr>().unwrap(); CommandLineTest::new() + .flag("metrics", None) .flag("metrics-address", Some("127.0.0.99")) .run() .with_config(|config| assert_eq!(config.http_metrics.listen_addr, addr)); @@ -356,6 +367,7 @@ fn metrics_address_flag() { fn metrics_address_ipv6_flag() { let addr = "::1".parse::<IpAddr>().unwrap(); CommandLineTest::new() + .flag("metrics", None) .flag("metrics-address", Some("::1")) .run() .with_config(|config| assert_eq!(config.http_metrics.listen_addr, addr)); @@ -363,6 +375,7 @@ fn metrics_address_ipv6_flag() { #[test] fn metrics_port_flag() { CommandLineTest::new() + .flag("metrics", None) .flag("metrics-port", Some("9090")) .run() .with_config(|config| assert_eq!(config.http_metrics.listen_port, 9090)); @@ -370,6 +383,7 @@ fn metrics_port_flag() { #[test] fn metrics_allow_origin_flag() { CommandLineTest::new() + .flag("metrics", None) .flag("metrics-allow-origin", Some("http://localhost:9009")) .run() .with_config(|config| { @@ -382,6 +396,7 @@ fn metrics_allow_origin_flag() { #[test] fn metrics_allow_origin_all_flag() { CommandLineTest::new() + .flag("metrics", None) .flag("metrics-allow-origin", Some("*")) .run() .with_config(|config| assert_eq!(config.http_metrics.allow_origin, Some("*".to_string()))); diff --git a/testing/antithesis/Dockerfile.libvoidstar b/testing/antithesis/Dockerfile.libvoidstar deleted file mode 100644 index c790e248dfe..00000000000 --- a/testing/antithesis/Dockerfile.libvoidstar +++ /dev/null @@ -1,25 +0,0 @@ -FROM rust:1.68.2-bullseye AS builder -RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev -COPY . lighthouse - -# Build lighthouse directly with a cargo build command, bypassing the Makefile. -RUN cd lighthouse && LD_LIBRARY_PATH=/lighthouse/testing/antithesis/libvoidstar/ RUSTFLAGS="-Cpasses=sancov-module -Cllvm-args=-sanitizer-coverage-level=3 -Cllvm-args=-sanitizer-coverage-trace-pc-guard -Ccodegen-units=1 -Cdebuginfo=2 -L/lighthouse/testing/antithesis/libvoidstar/ -lvoidstar" cargo build --release --manifest-path lighthouse/Cargo.toml --target x86_64-unknown-linux-gnu --features modern --verbose --bin lighthouse -# build lcli binary directly with cargo install command, bypassing the makefile -RUN cargo install --path /lighthouse/lcli --force --locked - -FROM ubuntu:latest -RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-recommends \ - libssl-dev \ - ca-certificates \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -# create and move the libvoidstar file -RUN mkdir libvoidstar -COPY --from=builder /lighthouse/testing/antithesis/libvoidstar/libvoidstar.so /usr/lib/libvoidstar.so - -# set the env variable to avoid having to always set it -ENV LD_LIBRARY_PATH=/usr/lib -# move the lighthouse binary and lcli binary -COPY --from=builder /lighthouse/target/x86_64-unknown-linux-gnu/release/lighthouse /usr/local/bin/lighthouse -COPY --from=builder /lighthouse/target/release/lcli /usr/local/bin/lcli \ No newline at end of file diff --git a/testing/antithesis/libvoidstar/libvoidstar.so b/testing/antithesis/libvoidstar/libvoidstar.so deleted file mode 100644 index 0f8a0f23c3f..00000000000 Binary files a/testing/antithesis/libvoidstar/libvoidstar.so and /dev/null differ diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 9627d2cde03..c4f288a8aa6 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -7,7 +7,7 @@ use beacon_chain::{ obtain_indexed_attestation_and_committees_per_slot, VerifiedAttestation, }, test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BeaconChainTypes, CachedHead, NotifyExecutionLayer, + BeaconChainTypes, CachedHead, ChainConfig, NotifyExecutionLayer, }; use execution_layer::{json_structures::JsonPayloadStatusV1Status, PayloadStatusV1}; use serde::Deserialize; @@ -303,6 +303,10 @@ impl<E: EthSpec> Tester<E> { let harness = BeaconChainHarness::builder(E::default()) .spec(spec.clone()) .keypairs(vec![]) + .chain_config(ChainConfig { + reconstruct_historic_states: true, + ..ChainConfig::default() + }) .genesis_state_ephemeral_store(case.anchor_state.clone()) .mock_execution_layer() .recalculate_fork_times_with_genesis(0) diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index 394f8558fae..3cd8205eb65 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -115,6 +115,9 @@ pub fn testing_client_config() -> ClientConfig { genesis_time: now, }; + // Simulator tests expect historic states to be available for post-run checks. + client_config.chain.reconstruct_historic_states = true; + // Specify a constant count of beacon processor workers. Having this number // too low can cause annoying HTTP timeouts, especially on Github runners // with 2 logical CPUs. diff --git a/testing/web3signer_tests/Cargo.toml b/testing/web3signer_tests/Cargo.toml index c0fbf667236..faad76a19c3 100644 --- a/testing/web3signer_tests/Cargo.toml +++ b/testing/web3signer_tests/Cargo.toml @@ -26,6 +26,6 @@ serde_derive = "1.0.116" serde_yaml = "0.8.13" eth2_network_config = { path = "../../common/eth2_network_config" } serde_json = "1.0.58" -zip = "0.5.13" +zip = "0.6" lazy_static = "1.4.0" -parking_lot = "0.12.0" \ No newline at end of file +parking_lot = "0.12.0" diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index dd17ae23b15..463de0c8b3a 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -51,7 +51,7 @@ mod tests { /// If the we are unable to reach the Web3Signer HTTP API within this time out then we will /// assume it failed to start. - const UPCHECK_TIMEOUT: Duration = Duration::from_secs(20); + const UPCHECK_TIMEOUT: Duration = Duration::from_secs(30); /// Set to `false` to send the Web3Signer logs to the console during tests. Logs are useful when /// debugging. diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 2a09455b6ff..094b85bf810 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -10,7 +10,8 @@ use crate::{ validator_store::{Error as ValidatorStoreError, ValidatorStore}, }; use environment::RuntimeContext; -use eth2::BeaconNodeHttpClient; +use eth2::{BeaconNodeHttpClient, StatusCode}; +use slog::Logger; use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use std::fmt::Debug; @@ -593,12 +594,7 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> { beacon_node .post_beacon_blocks(&signed_block) .await - .map_err(|e| { - BlockError::Irrecoverable(format!( - "Error from beacon node when publishing block: {:?}", - e - )) - })? + .or_else(|e| handle_block_post_error(e, slot, log))? } BlockType::Blinded => { let _post_timer = metrics::start_timer_vec( @@ -608,12 +604,7 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> { beacon_node .post_beacon_blinded_blocks(&signed_block) .await - .map_err(|e| { - BlockError::Irrecoverable(format!( - "Error from beacon node when publishing block: {:?}", - e - )) - })? + .or_else(|e| handle_block_post_error(e, slot, log))? } } Ok::<_, BlockError>(()) @@ -634,3 +625,29 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> { Ok(()) } } + +fn handle_block_post_error(err: eth2::Error, slot: Slot, log: &Logger) -> Result<(), BlockError> { + // Handle non-200 success codes. + if let Some(status) = err.status() { + if status == StatusCode::ACCEPTED { + info!( + log, + "Block is already known to BN or might be invalid"; + "slot" => slot, + "status_code" => status.as_u16(), + ); + return Ok(()); + } else if status.is_success() { + debug!( + log, + "Block published with non-standard success code"; + "slot" => slot, + "status_code" => status.as_u16(), + ); + return Ok(()); + } + } + Err(BlockError::Irrecoverable(format!( + "Error from beacon node when publishing block: {err:?}", + ))) +} diff --git a/validator_client/src/duties_service/sync.rs b/validator_client/src/duties_service/sync.rs index 1e66d947a21..cf63d8ac625 100644 --- a/validator_client/src/duties_service/sync.rs +++ b/validator_client/src/duties_service/sync.rs @@ -2,8 +2,10 @@ use crate::beacon_node_fallback::{OfflineOnFailure, RequireSynced}; use crate::{ doppelganger_service::DoppelgangerStatus, duties_service::{DutiesService, Error}, + http_metrics::metrics, validator_store::Error as ValidatorStoreError, }; + use futures::future::join_all; use itertools::Itertools; use parking_lot::{MappedRwLockReadGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}; @@ -426,6 +428,10 @@ pub async fn poll_sync_committee_duties_for_period<T: SlotClock + 'static, E: Et RequireSynced::No, OfflineOnFailure::Yes, |beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::DUTIES_SERVICE_TIMES, + &[metrics::VALIDATOR_DUTIES_SYNC_HTTP_POST], + ); beacon_node .post_validator_duties_sync(period_start_epoch, local_indices) .await diff --git a/validator_client/src/http_metrics/metrics.rs b/validator_client/src/http_metrics/metrics.rs index 8a52a4d35e9..52b52126bd6 100644 --- a/validator_client/src/http_metrics/metrics.rs +++ b/validator_client/src/http_metrics/metrics.rs @@ -29,6 +29,7 @@ pub const UPDATE_ATTESTERS_FETCH: &str = "update_attesters_fetch"; pub const UPDATE_ATTESTERS_STORE: &str = "update_attesters_store"; pub const ATTESTER_DUTIES_HTTP_POST: &str = "attester_duties_http_post"; pub const PROPOSER_DUTIES_HTTP_GET: &str = "proposer_duties_http_get"; +pub const VALIDATOR_DUTIES_SYNC_HTTP_POST: &str = "validator_duties_sync_http_post"; pub const VALIDATOR_ID_HTTP_GET: &str = "validator_id_http_get"; pub const SUBSCRIPTIONS_HTTP_POST: &str = "subscriptions_http_post"; pub const UPDATE_PROPOSERS: &str = "update_proposers"; diff --git a/watch/Cargo.toml b/watch/Cargo.toml index 23e2c566dc1..3dc3b7c1905 100644 --- a/watch/Cargo.toml +++ b/watch/Cargo.toml @@ -41,6 +41,8 @@ tokio-postgres = "0.7.5" http_api = { path = "../beacon_node/http_api" } beacon_chain = { path = "../beacon_node/beacon_chain" } network = { path = "../beacon_node/network" } -testcontainers = "0.14.0" +# TODO: update to 0.15 when released: https://github.com/testcontainers/testcontainers-rs/issues/497 +testcontainers = { git = "https://github.com/testcontainers/testcontainers-rs/", rev = "0f2c9851" } unused_port = { path = "../common/unused_port" } task_executor = { path = "../common/task_executor" } +logging = { path = "../common/logging" } diff --git a/watch/tests/tests.rs b/watch/tests/tests.rs index af1cde26b7a..dc0b8af6e34 100644 --- a/watch/tests/tests.rs +++ b/watch/tests/tests.rs @@ -1,17 +1,27 @@ #![recursion_limit = "256"] #![cfg(unix)] -use beacon_chain::test_utils::{ - AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, +use beacon_chain::{ + test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, + ChainConfig, }; use eth2::{types::BlockId, BeaconNodeHttpClient, SensitiveUrl, Timeouts}; use http_api::test_utils::{create_api_server, ApiServer}; +use log::error; +use logging::test_logger; use network::NetworkReceivers; - use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; +use std::collections::HashMap; +use std::env; +use std::net::SocketAddr; +use std::time::Duration; +use testcontainers::{clients::Cli, core::WaitFor, Image, RunnableImage}; use tokio::sync::oneshot; +use tokio::{runtime, task::JoinHandle}; +use tokio_postgres::{config::Config as PostgresConfig, Client, NoTls}; use types::{Hash256, MainnetEthSpec, Slot}; +use unused_port::unused_tcp4_port; use url::Url; use watch::{ client::WatchHttpClient, @@ -21,15 +31,40 @@ use watch::{ updater::{handler::*, run_updater, Config as UpdaterConfig, WatchSpec}, }; -use log::error; -use std::env; -use std::net::SocketAddr; -use std::time::Duration; -use tokio::{runtime, task::JoinHandle}; -use tokio_postgres::{config::Config as PostgresConfig, Client, NoTls}; -use unused_port::unused_tcp4_port; +#[derive(Debug)] +pub struct Postgres(HashMap<String, String>); + +impl Default for Postgres { + fn default() -> Self { + let mut env_vars = HashMap::new(); + env_vars.insert("POSTGRES_DB".to_owned(), "postgres".to_owned()); + env_vars.insert("POSTGRES_HOST_AUTH_METHOD".into(), "trust".into()); + + Self(env_vars) + } +} + +impl Image for Postgres { + type Args = (); + + fn name(&self) -> String { + "postgres".to_owned() + } + + fn tag(&self) -> String { + "11-alpine".to_owned() + } -use testcontainers::{clients::Cli, images::postgres::Postgres, RunnableImage}; + fn ready_conditions(&self) -> Vec<WaitFor> { + vec![WaitFor::message_on_stderr( + "database system is ready to accept connections", + )] + } + + fn env_vars(&self) -> Box<dyn Iterator<Item = (&String, &String)> + '_> { + Box::new(self.0.iter()) + } +} type E = MainnetEthSpec; @@ -91,6 +126,11 @@ impl TesterBuilder { pub async fn new() -> TesterBuilder { let harness = BeaconChainHarness::builder(E::default()) .default_spec() + .chain_config(ChainConfig { + reconstruct_historic_states: true, + ..ChainConfig::default() + }) + .logger(test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() .build();