diff --git a/.dockerignore b/.dockerignore index 42e8a818a418..d51b5556178f 100644 --- a/.dockerignore +++ b/.dockerignore @@ -46,11 +46,11 @@ packages/beacon-node/mainnet_pubkeys.csv # Autogenerated docs packages/**/docs packages/**/typedocs -docs/packages -docs/contributing.md -docs/assets -docs/reference/cli.md -/site +docs/pages/**/*-cli.md +docs/pages/assets +docs/pages/api/api-reference.md +docs/pages/contribution/getting-started.md +docs/site # Lodestar artifacts .lodestar diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index a19def8e72de..3ff9018372c9 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -45,15 +45,17 @@ jobs: uses: actions/setup-python@v1 - name: Install dependencies + working-directory: docs run: | python -m pip install --upgrade pip - pip install -r docs/requirements.txt + pip install -r requirements.txt - name: Build docs - run: mkdocs build --site-dir site -v --clean + working-directory: docs + run: mkdocs build --verbose --clean --site-dir site - name: Deploy uses: peaceiris/actions-gh-pages@v3 with: github_token: ${{ secrets.GITHUB_TOKEN }} - publish_dir: ./site + publish_dir: ./docs/site diff --git a/.gitignore b/.gitignore index ce1ec6074979..a85d4af7794e 100644 --- a/.gitignore +++ b/.gitignore @@ -40,11 +40,14 @@ packages/api/oapi-schemas # Autogenerated docs packages/**/docs packages/**/typedocs -docs/assets -docs/packages -docs/reference -docs/contributing.md -/site +docs/pages/**/*-cli.md +docs/pages/assets +docs/pages/images +docs/pages/lightclient-prover/lightclient.md +docs/pages/lightclient-prover/prover.md +docs/pages/api/api-reference.md +docs/pages/contribution/getting-started.md +docs/site # Testnet artifacts .lodestar diff --git a/.wordlist.txt b/.wordlist.txt index b7cff203f57c..42510b175a07 100644 --- a/.wordlist.txt +++ b/.wordlist.txt @@ -1,14 +1,19 @@ APIs +Andreas +Antonopoulos AssemblyScript BLS BeaconNode Besu +Buterin CLA CLI CTRL +Casper Chai ChainSafe Customizations +DPoS Discv DockerHub Dockerized @@ -19,22 +24,33 @@ ENR ENRs ESLint ETH +Edgington Erigon EthStaker +EtherScan Ethereum +EthereumJS +FINDNODE FX Flamegraph Flamegraphs +Geth Github Gossipsub Grafana HackMD +Homebrew +IPFS IPv Infura JSON +JSObjects JWT +KDE LGPL LGPLv +LMD +LPoS LTS Lerna MEV @@ -45,10 +61,12 @@ NVM Nethermind NodeJS NodeSource +OSI PR PRs Plaintext PoS +Prysm Quickstart RPC SHA @@ -57,64 +75,102 @@ SSZ Stakehouse TOC TTD +Teku TypeScript UI UID +UPnP UTF VM Vitalik Wagyu api async +backfill beaconcha +blockchain bootnode bootnodes chainConfig chainsafe +chiado cli cmd +codebase config configs const constantish coreutils cors +cryptocurrency cryptographic dApp dApps +ddos decrypt deserialization +dev devnet devnets +devtools +eg +enodes enum +env envs +ephemery flamegraph flamegraphs +gnosis goerli +heapdump +heaptrack +holesky interop +js keypair keystore keystores +libp lightclient linter +lldb +llnode lockfile mainnet +malloc mdns merkle merkleization monorepo +multiaddr +multifork namespace namespaced namespaces nodemodule +orchestrator +osx overriden params +pid plaintext +pre +premined produceBlockV +protolambda prover +repo +repos req reqresp +responder +ropsten runtime +scalability +secp +sepolia sharding ssz stakers @@ -131,4 +187,6 @@ utils validator validators wip +xcode yaml +yamux diff --git a/docs/images/heap-dumps/devtools.png b/docs/images/heap-dumps/devtools.png new file mode 100644 index 000000000000..9bdef24f7e20 Binary files /dev/null and b/docs/images/heap-dumps/devtools.png differ diff --git a/docs/images/heap-dumps/load-profile.png b/docs/images/heap-dumps/load-profile.png new file mode 100644 index 000000000000..c6e04d0922f4 Binary files /dev/null and b/docs/images/heap-dumps/load-profile.png differ diff --git a/docs/images/heap-dumps/memory-tab.png b/docs/images/heap-dumps/memory-tab.png new file mode 100644 index 000000000000..857309571971 Binary files /dev/null and b/docs/images/heap-dumps/memory-tab.png differ diff --git a/docs/install/docker.md b/docs/install/docker.md deleted file mode 100644 index 40468e7ad7aa..000000000000 --- a/docs/install/docker.md +++ /dev/null @@ -1,29 +0,0 @@ -# Install with Docker - -The [`chainsafe/lodestar`](https://hub.docker.com/r/chainsafe/lodestar) Docker Hub repository is maintained actively. It contains the `lodestar` CLI preinstalled. - - -!!! info - The Docker Hub image tagged as `chainsafe/lodestar:next` is run on CI every commit on our `unstable` branch. - For `stable` releases, the image is tagged as `chainsafe/lodestar:latest`. - - -Ensure you have Docker installed by issuing the command: - -```bash -docker -v -``` - -It should return a non error message such as `Docker version xxxx, build xxxx`. - -Pull, run the image and Lodestar should now be ready to use - -```bash -docker pull chainsafe/lodestar -docker run chainsafe/lodestar --help -``` - - -!!! info - Docker is the recommended setup for Lodestar. Use our [Lodestar Quickstart scripts](https://github.com/ChainSafe/lodestar-quickstart) with Docker for detailed instructions. - diff --git a/docs/install/npm.md b/docs/install/npm.md deleted file mode 100644 index 805141d01523..000000000000 --- a/docs/install/npm.md +++ /dev/null @@ -1,6 +0,0 @@ -# Install from NPM [not recommended] - - -!!! danger - For mainnet (production) usage, we only recommend installing with docker due to [NPM supply chain attacks](https://hackaday.com/2021/10/22/supply-chain-attack-npm-library-used-by-facebook-and-others-was-compromised/). Until a [safer installation method has been found](https://github.com/ChainSafe/lodestar/issues/3596), do not use this install method except for experimental purposes only. - diff --git a/docs/install/source.md b/docs/install/source.md deleted file mode 100644 index 4fba0a625111..000000000000 --- a/docs/install/source.md +++ /dev/null @@ -1,54 +0,0 @@ -# Install from source - -## Prerequisites - -Make sure to have [Yarn installed](https://classic.yarnpkg.com/en/docs/install). It is also recommended to [install NVM (Node Version Manager)](https://github.com/nvm-sh/nvm) and use the LTS version (currently v20) of [NodeJS](https://nodejs.org/en/). - - -!!! info - NodeJS versions older than the current LTS are not supported by Lodestar. We recommend running the latest Node LTS. - It is important to make sure the NodeJS version is not changed after reboot by setting a default `nvm alias default && nvm use default`. - -!!! note - Node Version Manager (NVM) will only install NodeJS for use with the active user. If you intend on setting up Lodestar to run under another user, we recommend using [NodeSource's source for NodeJS](https://github.com/nodesource/distributions/blob/master/README.md#installation-instructions) so you can install NodeJS globally. - - -## Clone repository - -Clone the repository locally and build from the stable release branch. - -```bash -git clone -b stable https://github.com/chainsafe/lodestar.git -``` - -Switch to created directory. - -```bash -cd lodestar -``` - -## Install packages - -Install across all packages. Lodestar follows a [monorepo](https://github.com/lerna/lerna) structure, so all commands below must be run in the project root. - -```bash -yarn install -``` - -## Build source code - -Build across all packages. - -```bash -yarn run build -``` - -## Lodestar CLI - -Lodestar should now be ready for use. - -```bash -./lodestar --help -``` - -See [Command Line Reference](./../reference/cli.md) for further information. diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml new file mode 100644 index 000000000000..056325e19104 --- /dev/null +++ b/docs/mkdocs.yml @@ -0,0 +1,143 @@ +site_name: Lodestar Documentation +site_description: Lodestar Documentation - Typescript Ethereum Consensus client +site_url: https://chainsafe.github.io/lodestar + +repo_name: chainsafe/lodestar +repo_url: https://github.com/chainsafe/lodestar + +docs_dir: pages + +# Configuration +theme: + name: material + logo: assets/lodestar_icon_300.png + favicon: assets/round-icon.ico + nav_style: dark + palette: + - scheme: preference + media: "(prefers-color-scheme: light)" + primary: black + accent: deep purple + toggle: + icon: material/weather-night + name: Switch to dark mode + - scheme: slate + media: "(prefers-color-scheme: dark)" + primary: black + accent: deep purple + toggle: + icon: material/weather-sunny + name: Switch to light mode + +plugins: + - search + - mermaid2: + version: 8.6.4 + arguments: + theme: | + ^(window.matchMedia && window.matchMedia('(prefers-color-scheme: dark)').matches) ? 'dark' : 'light' + +markdown_extensions: + - meta + - codehilite: + guess_lang: false + - admonition + - toc: + permalink: true + - pymdownx.superfences: + # make exceptions to highlighting of code (for mermaid): + custom_fences: + - name: mermaid + class: mermaid + format: !!python/name:mermaid2.fence_mermaid + - pymdownx.emoji: + emoji_index: !!python/name:material.extensions.emoji.twemoji + emoji_generator: !!python/name:material.extensions.emoji.to_svg + +extra_css: + - stylesheets/extras.css + +# Socials +extra: + social: + - icon: fontawesome/brands/github-alt + link: https://github.com/ChainSafe/lodestar + - icon: fontawesome/brands/twitter + link: https://twitter.com/lodestar_eth + - icon: fontawesome/brands/discord + link: https://discord.gg/yjyvFRP + - icon: fontawesome/brands/medium + link: https://blog.chainsafe.io + +# Customize left navigation menu +nav: + - Home: index.md + - Introduction: introduction.md + - Getting Started: + - Quick Start: getting-started/quick-start.md + - Installation: getting-started/installation.md + # - Creating a JWT: getting-started/creating-a-jwt.md + - Starting a Node: getting-started/starting-a-node.md + - Data Retention: data-retention.md + - Beacon Node: + - Configuration: beacon-management/beacon-cli.md + - Networking: beacon-management/networking.md + - MEV and Builder Integration: beacon-management/mev-and-builder-integration.md + - Syncing: beacon-management/syncing.md + - Validator: + - Configuration: validator-management/validator-cli.md + # - Key Management: validator-management/key-management.md + # - Withdrawals: validator-management/withdrawals.md + # - Multiple and Fall-Back Validation: validator-management/multiple-and-fallback-validation.md + - Bootnode: + - Configuration: bootnode/bootnode-cli.md + - Light Client and Prover: + - Light Client: lightclient-prover/lightclient.md + - Light Client Configuration: lightclient-prover/lightclient-cli.md + - Prover: lightclient-prover/prover.md + # - Prover Configuration: lightclient-prover/prover-cli.md + - Logging and Metrics: + - Prometheus and Grafana: logging-and-metrics/prometheus-grafana.md + - Client Monitoring: logging-and-metrics/client-monitoring.md + # - Log Management: logging-and-metrics/log-management.md + # - Metrics Management: logging-and-metrics/metrics-management.md + # - Dashboards: logging-and-metrics/dashboards.md + # - Api: + # - Using the API: api/using-the-api.md + # - API Reference: api/api-reference.md // Auto-generate from API endpoint + # - Troubleshooting: + # - Installation Issues: troubleshooting/installation-issues.md + # - Syncing Issues: troubleshooting/syncing-issues.md + # - Validation Issues: troubleshooting/validation-issues.md + # - Execution Layer Issues: troubleshooting/execution-layer-issues.md + - Supporting Libraries: supporting-libraries/index.md + # - libp2p: supporting-libraries/libp2p.md + # - "@chainsafe/ssz": supporting-libraries/ssz.md + # - "@chainsafe/blst": supporting-libraries/blst.md + # - "@chainsafe/libp2p-gossipsub": supporting-libraries/gossipsub.md + - Contributing: + - Getting Started: contribution/getting-started.md + # - Bug Reports: contribution/bug-reports.md + - Dependency Graph: contribution/depgraph.md + # - Repo: contribution/repo.md + - Testing: + - Overview: contribution/testing/overview.md + # - Unit Tests: contribution/testing/unit-tests.md + # - Integration Tests: contribution/testing/integration-tests.md + # - E2E Tests: contribution/testing/e2e-tests.md + - Simulation Tests: contribution/testing/simulation-tests.md + # - Spec Tests: contribution/testing/spec-tests.md + # - Performance Tests: contribution/testing/performance-tests.md + # - PR Submission: contribution/pr-submission.md + - Tools: + # - Debugging: tools/debugging.md + # - perf: tools/perf.md + - Flame Graphs: tools/flamegraphs.md + - Heap Dumps: tools/heap-dumps.md + - Core Dumps: tools/core-dumps.md + - Advanced Topics: + # - Migrating from Other Clients: advanced-topics/migrating-from-other-clients.md + # - Block Exploration: advanced-topics/block-exploration.md + # - Slashing Protection: advanced-topics/slashing-protection.md + - Setting Up a Testnet: advanced-topics/setting-up-a-testnet.md + # - Doppelganger Detection: advanced-topics/doppelganger-detection.md \ No newline at end of file diff --git a/docs/pages/advanced-topics/block-exploration.md b/docs/pages/advanced-topics/block-exploration.md new file mode 100644 index 000000000000..05ee657bb607 --- /dev/null +++ b/docs/pages/advanced-topics/block-exploration.md @@ -0,0 +1 @@ +# Block Exploration diff --git a/docs/pages/advanced-topics/doppelganger-detection.md b/docs/pages/advanced-topics/doppelganger-detection.md new file mode 100644 index 000000000000..165590bda55a --- /dev/null +++ b/docs/pages/advanced-topics/doppelganger-detection.md @@ -0,0 +1 @@ +# Doppelganger Detection diff --git a/docs/pages/advanced-topics/migrating-from-other-clients.md b/docs/pages/advanced-topics/migrating-from-other-clients.md new file mode 100644 index 000000000000..302314a27b23 --- /dev/null +++ b/docs/pages/advanced-topics/migrating-from-other-clients.md @@ -0,0 +1 @@ +# Migration From Other Clients diff --git a/docs/usage/local.md b/docs/pages/advanced-topics/setting-up-a-testnet.md similarity index 99% rename from docs/usage/local.md rename to docs/pages/advanced-topics/setting-up-a-testnet.md index 51465d68c92b..a6350b3a03de 100644 --- a/docs/usage/local.md +++ b/docs/pages/advanced-topics/setting-up-a-testnet.md @@ -1,4 +1,4 @@ -# Local testnet +# Setting-Up a Testnet To quickly test and run Lodestar we recommend starting a local testnet. We recommend a simple configuration of two beacon nodes with multiple validators diff --git a/docs/pages/advanced-topics/slashing-protection.md b/docs/pages/advanced-topics/slashing-protection.md new file mode 100644 index 000000000000..527cbb06040a --- /dev/null +++ b/docs/pages/advanced-topics/slashing-protection.md @@ -0,0 +1 @@ +# Slashing Protection diff --git a/docs/pages/api/using-the-api.md b/docs/pages/api/using-the-api.md new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/docs/usage/mev-integration.md b/docs/pages/beacon-management/mev-and-builder-integration.md similarity index 100% rename from docs/usage/mev-integration.md rename to docs/pages/beacon-management/mev-and-builder-integration.md diff --git a/docs/pages/beacon-management/networking.md b/docs/pages/beacon-management/networking.md new file mode 100644 index 000000000000..9305b683ae47 --- /dev/null +++ b/docs/pages/beacon-management/networking.md @@ -0,0 +1,91 @@ +# Networking + +Starting up Lodestar will automatically connect it to peers on the network. Peers are found through the discv5 protocol and one peers are established communications happen via gossipsub over libp2p. While not necessary, having a basic understanding of how the various protocols and transport work will help with debugging and troubleshooting as some of the more common challenges come up with [firewalls](#firewall-management) and [NAT traversal](#nat-traversal). + +## Networking Flags + +Some of the important Lodestar flags related to networking are: + +- [`--discv5`](./configuration.md#--discv5) +- [`--listenAddress`](./configuration.md#--listenAddress) +- [`--port`](./configuration.md#--port) +- [`--discoveryPort`](./configuration.md#--discoveryPort) +- [`--listenAddress6`](./configuration.md#--listenAddress6) +- [`--port6`](./configuration.md#--port6) +- [`--discoveryPort6`](./configuration.md#--discoveryPort6) +- [`--bootnodes`](./configuration.md#--bootnodes) +- [`--deterministicLongLivedAttnets`](./configuration.md#--deterministicLongLivedAttnets) +- [`--subscribeAllSubnets`](./configuration.md#--subscribeAllSubnets) +- [`--disablePeerScoring`](./configuration.md#--disablePeerScoring) +- [`--enr.ip`](./configuration.md#--enr.ip) +- [`--enr.tcp`](./configuration.md#--enr.tcp) +- [`--enr.udp`](./configuration.md#--enr.udp) +- [`--enr.ip6`](./configuration.md#--enr.ip6) +- [`--enr.tcp6`](./configuration.md#--enr.tcp6) +- [`--enr.udp6`](./configuration.md#--enr.udp6) +- [`--nat`](./configuration.md#--nat) +- [`--private`](./configuration.md#`--private`) + +## Peer Discovery (Discv5) + +In Ethereum, discv5 plays a pivotal role in the peer discovery process, facilitating nodes to find and locate each other in order to form the peer-to-peer network​. The process begins with an interaction between new nodes and bootnodes at start-up. Bootnodes are nodes with hard-coded addresses, or can be overridden via the cli flag `--bootnodes`, to bootstrap the discovery process​. Through a method called FINDNODE-NODES, a new node establishes a bond with each bootnode, and it returns a list of peers for the new node to connect to. Following this trail, the new node engages through FINDNODE-NODES with the provided peers to further establish a web of connections​. + +Discv5 operates as a peer advertisement medium in this network, where nodes can act as both providers and consumers of data. Every participating node in the Discv5 protocol discovers peer data from other nodes and later relays it, making the discovery process dynamic and efficient​. + +Discv5 is designed to be a standalone protocol running via UDP on a dedicated port solely for peer discovery. Peer data is exchanged via self-certified, flexible peer records (ENRs). These key features cater to the Ethereum network​ and being a good peer often means running a discv5 worker​. Lodestar offers simple configuration to setup and run a bootnode independently of a beacon node. See [bootnode](./bootnode.md) for more information and configuration options. + +## ENR + +Ethereum Node Records (ENRs) are a standardized format utilized for peer discovery - see [EIP-778](https://eips.ethereum.org/EIPS/eip-778) for the specification. An ENR consists of a set of key-value pairs. These pairs include crucial information such as the node's ID, IP address, the port on which it's listening, and the protocols it supports. This information helps other nodes in the network locate and connect to the node. + +The primary purpose of ENRs is to facilitate node discovery and connectivity in the Ethereum network. Nodes use ENRs to announce their presence and capabilities to other nodes, making it easier to establish and maintain a robust, interconnected network. + +Note that bootnodes are announced via ENR. + +## Peer Communication (gossipsub and ReqResp) + +Gossipsub and ReqResp are the two mechanisms that beacon nodes use to exchange chain data. Gossipsub is used disseminate the most recent relevant data proactively throughout the network. ReqResp is used to directly ask specific peers for specific information (eg: during syncing). + +### Gossipsub + +GossipSub is a foundational protocol in peer-to-peer (P2P) communication, particularly decentralized networks like Ethereum and IPFS. At its core, GossipSub efficiently propagates data, filtered by topic, through a P2P network. It organizes peers into a collection of overlay networks, each associated with a distinct topic. By routing data through relevant overlay networks based on topics of interest, large amounts of data can be efficiently disseminated without excessive bandwidth, latency, etc. + +In GossipSub, nodes can subscribe to topics, effectively joining the corresponding overlay to receive messages published to a specific topic. This topic-based structure enables nodes to congregate around shared interests, ensuring that relevant messages are delivered to all interested parties. Each message published to a topic gets disseminated and relayed to all subscribed peers, similar to a chat room. + +Messages are propagated through a blend of eager-push and lazy-pull models. Specifically, the protocol employs "mesh links" to carry full messages actively and "gossip links" to carry only message identifiers (lazy-pull propagation model). This hybrid approach allows for both active message propagation and reactive message retrieval​ which is an extension of the traditional hub-and-spoke pub/sub model. + +### ReqResp + +ReqResp is the domain of protocols that establish a flexible, on-demand mechanism to retrieve historical data and data missed by gossip. This family of methods, implemented as separate libp2p protocols, operate between a single requester and responder. A method is initiated via a libp2p protocol ID, with the initiator sending a request message and the responder sending a response message. Every method defines a specific request and response message type, and a specific protocol ID. This framework also facilitates streaming responses and robust error handling. + +## Data Transport (libp2p) + +Libp2p is a modular and extensible network stack that serves as the data transport layer below both gossipsub and ReqResp and facilitates the lower-level peer-to-peer communications. It provides a suite of protocols for various networking functionalities including network transports, connection encryption and protocol multiplexing. Its modular design allows for the easy addition, replacement, or upgrading of protocols, ensuring an adaptable and evolving networking stack. + +Libp2p operates at the lower levels of the OSI model, particularly at the Transport and Network layers. Libp2p supports both TCP and UDP protocols for establishing connections and data transmission. Combined with libp2p's modular design it can integrate with various networking technologies to facilitating both routing and addressing. + +## Firewall Management + +If your setup is behind a firewall there are a few ports that will need to be opened to allow for P2P discovery and communication. There are also some ports that need to be protected to prevent unwanted access or DDOS attacks on your node. + +Ports that should be opened: + +- 30303/TCP+UDP - Execution layer p2p communication port +- 9000/TCP+UDP - Beacon Node P2P communication port +- 9090/TCP - Lodestar IPv6 P2P communication port +- 13000/TCP - Prysm P2P communication port +- 12000/UDP - Prysm P2P communication port + +Ports that should be inbound protected: + +- 9596/TCP - Lodestar Beacon-Node JSON RPC api calls +- 5062/TCP - Lodestar validator key manager api calls +- 18550/TCP - Lodestar MEV Boost/Builder port +- 8008/TCP - Lodestar Metrics +- 5064/TCP - Validator Metrics +- 8545/TCP - Execution client JSON RPC port api calls +- 8551/TCP - Execution engine port for Lodestar to communicate with the execution client + +## NAT Traversal + +Lodestar does not support UPnP. If you are behind a NAT you will need to manually forward the ports listed above. diff --git a/docs/pages/beacon-management/syncing.md b/docs/pages/beacon-management/syncing.md new file mode 100644 index 000000000000..21cd05d8a8a2 --- /dev/null +++ b/docs/pages/beacon-management/syncing.md @@ -0,0 +1,42 @@ +# Syncing + +Syncing an Ethereum node involves obtaining a copy of the blockchain data from other peers in the network to reach a consistent state. This process is crucial for new nodes or nodes that have been offline and need to catch up with the network's current state. Syncing can be performed for both the execution layer and the beacon chain, although the focus here will be primarily on the beacon chain. + +Lodestar allows for several methods of syncing however the recommended method is `checkpoint sync` as it is the fastest and least resource intensive. It is generally a good idea to sync via a [`--checkpointSyncUrl`](./configuration.md#--checkpointSyncUrl). If starting at a specific point is necessary specify the [`--checkpointState`](./configuration.md#--checkpointState) that should be where the sync begins. + +## Weak Subjectivity + +Weak subjectivity is a concept specific to Proof of Stake (PoS) systems, addressing how new nodes can safely join the network and synchronize with the correct blockchain history. Unlike in Proof of Work (PoW) systems, where a node can trust the longest chain due to the significant computational effort required to forge it, PoS systems present different challenges. In PoS, the cost of creating or altering blockchain history is lower, as it is not based on computational work but on the stake held by validators. This difference raises the possibility that an attacker, if possessing sufficient stake, could feasibly create a misleading version of the blockchain history. + +The concept of weak subjectivity becomes particularly crucial in two scenarios: when new nodes join the network and when existing nodes reconnect after a significant period of being offline. During these times, the 'weak subjectivity period' defines a time frame within which a client, upon rejoining, can reliably process blocks to reach the consensus chain head. This approach is essential for mitigating the risks associated with long-range attacks, which could occur if nodes relied solely on the longest chain principle without any initial trust in a specific network state. + +To counter these risks, weak subjectivity requires new nodes to obtain a recent, trusted state of the blockchain from a reliable source upon joining the network. This state includes vital information about the current set of validators and their stakes. Starting from this trusted state helps new nodes avoid being misled by false histories, as any attempt to rewrite history beyond this point would require an unrealistically large portion of the total stake. + +## Syncing Methods + +### Checkpoint Sync + +Checkpoint sync, also known as state sync, allows a node to sync to a specific state checkpoint without having to process all historical data leading up to that point. In the context of a beacon node, this involves syncing to a recent finalized checkpoint, allowing the node to quickly join the network and participate in consensus activities. This is especially beneficial for new nodes or nodes that have been offline for a considerable duration. + +### Historical Sync + +Historical sync involves processing all blocks from the genesis block or from a specified starting point to the current block. This is the most comprehensive sync method but also the most resource and time-intensive. For beacon nodes, historical sync is crucial for nodes that aim to maintain a complete history of the beacon chain, facilitating a deeper understanding and analysis of the network's history. In the execution layer, it ensures a complete historical record of the execution layer data. + +### Range Sync + +Range sync involves syncing blocks within a specified range, beneficial when a node is only temporarily offline and needs to catch up over a short range. In the beacon node context, this entails requesting and processing blocks within a defined range, ensuring the node quickly gets updated to the current network state. + +### Backfill Sync + +This is another version of checkpoint sync that allows a node that has not been historically synchronized to verify data prior to the checkpoint. It is done via downloading a checkpoint and then fetch blocks backwards from that point until the desired data can be verified. It is a relatively inexpensive sync from a cpu perspective because it only checks the block hashes and verifies the proposer signatures along the way. + +## Syncing Lodestar + +The implementation of the different syncing styles in Lodestar are actually one of two types under the hood, range sync and unknown-parent sync. Range sync is used when the start point of syncing is known. In the case of historical and checkpoint sync the starting points are well defined, genesis and the last finalized epoch boundary. Snapshot sync is not supported by Lodestar. If the starting point for sync is not known Lodestar must first determine where the starting point is. While the discussion about how that happens is out of scope for this document, the gist is that the beacon node will listen to gossipsub for blocks being broadcast on the network. It will also request [`MetaData`](https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#getmetadata) from its peers and use that to start requesting the correct blocks from the network. + +There are several flags that can be used to configure the sync process. + +- [`--checkpointSyncUrl`](./configuration.md#--checkpointSyncUrl) +- [`--checkpointState`](./configuration.md#--checkpointState) +- [`--wssCheckpoint`](./configuration.md#--wssCheckpoint) +- [`--forceCheckpointSync`](./configuration.md#--forceCheckpointSync) diff --git a/docs/pages/contribution/bug-reports.md b/docs/pages/contribution/bug-reports.md new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/docs/design/depgraph.md b/docs/pages/contribution/depgraph.md similarity index 100% rename from docs/design/depgraph.md rename to docs/pages/contribution/depgraph.md diff --git a/docs/pages/contribution/pr-submission.md b/docs/pages/contribution/pr-submission.md new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/docs/pages/contribution/repo.md b/docs/pages/contribution/repo.md new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/docs/pages/contribution/testing/index.md b/docs/pages/contribution/testing/index.md new file mode 100644 index 000000000000..9de62895323c --- /dev/null +++ b/docs/pages/contribution/testing/index.md @@ -0,0 +1,27 @@ +# Testing + +Testing is critical to the Lodestar project and there are many types of tests that are run to build a product that is both effective AND efficient. This page will help to break down the different types of tests you will find in the Lodestar repo. + +### Unit Tests + +This is the most fundamental type of test in most code bases. In all instances mocks, stubs and other forms of isolation are used to test code on a functional, unit level. See the [Unit Tests](./unit-tests.md) page for more information. + +### Spec Tests + +The Ethereum Consensus Specifications are what ensure that the various consensus clients do not diverge on critical computations and will work harmoniously on the network. See the [Spec Tests](./spec-tests.md) page for more information. + +### Performance Tests + +Node.js is an unforgiving virtual machine when it comes to high performance, multi-threaded applications. In order to ensure that Lodestar can not only keep up with the chain, but to push the boundary of what is possible, there are lots of performance tests that benchmark programming paradigms and prevent regression. See the [Performance Testing](./performance-tests.md) page for more information. + +### End-To-End Tests + +E2E test are where Lodestar is run in its full form, often from the CLI as a user would to check that the system as a whole works as expected. These tests are meant to exercise the entire system in isolation and there is no network interaction, nor interaction with any other code outside of Lodestar. See the [End-To-End Testing](./end-to-end-tests.md) page for more information. + +### Integration Tests + +Integration tests are meant to test how Lodestar interacts with other clients, but are not considered full simulations. This is where Lodestar may make API calls or otherwise work across the process boundary, but there is required mocking, stubbing, or class isolation. An example of this is using the `ExecutionEngine` class to make API calls to a Geth instance to check that the http requests are properly formatted. + +### Simulation Tests + +These are the most comprehensive types of tests. They aim to test Lodestar in a fully functioning ephemeral devnet environment. See the [Simulation Testing](./simulation-tests.md) page for more information. diff --git a/docs/pages/contribution/testing/integration-tests.md b/docs/pages/contribution/testing/integration-tests.md new file mode 100644 index 000000000000..b45110033460 --- /dev/null +++ b/docs/pages/contribution/testing/integration-tests.md @@ -0,0 +1,27 @@ +# Integration Tests + +The following tests are found in `packages/beacon-node` + +#### `test:sim:withdrawals` + +This test simulates capella blocks with withdrawals. It tests lodestar against Geth and EthereumJS. + +There are two ENV variables that are required to run this test: + +- `EL_BINARY_DIR`: the docker image setup to handle the test case +- `EL_SCRIPT_DIR`: the script that will be used to start the EL client. All of the scripts can be found in `packages/beacon-node/test/scripts/el-interop` and the `EL_SCRIPT_DIR` is the sub-directory name in that root that should be used to run the test. + +The command to run this test is: + +`EL_BINARY_DIR=g11tech/geth:withdrawals EL_SCRIPT_DIR=gethdocker yarn mocha test/sim/withdrawal-interop.test.ts` + +The images used by this test during CI are: + +- `GETH_WITHDRAWALS_IMAGE: g11tech/geth:withdrawalsfeb8` +- `ETHEREUMJS_WITHDRAWALS_IMAGE: g11tech/ethereumjs:blobs-b6b63` + +#### `test:sim:merge-interop` + +#### `test:sim:mergemock` + +#### `yarn test:sim:blobs` diff --git a/docs/pages/contribution/testing/performance-tests.md b/docs/pages/contribution/testing/performance-tests.md new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/docs/pages/contribution/testing/simulation-tests.md b/docs/pages/contribution/testing/simulation-tests.md new file mode 100644 index 000000000000..ed36d1351307 --- /dev/null +++ b/docs/pages/contribution/testing/simulation-tests.md @@ -0,0 +1,141 @@ +# Simulation Testing + +"Sim" testing for Lodestar is the most comprehensive, and complex, testing that is run. The goal is to fully simulate a testnet and to actuate the code in a way that closely mimics what will happen when turning on Lodestar in the wild. This is a very complex task and requires a lot of moving parts to work together. The following sections will describe the various components and how they work together. + +At a very high level, simulation testing will setup a testnet from genesis and let proceed through "normal" execution exactly as the nodes would under production circumstances. To get feedback there are regular checks along the way to asses how the testnet nodes are working. These "assertions" can be added and removed at will to allow developers to check for specific conditions in a tightly controlled, reproducible, environment to get high quality and actionable feedback on how Lodestar performs. The end goal of these tests is to to run a full Lodestar client in an environment that is as close to what an end user would experience. + +These tests usually setup full testnets with multiple consensus clients and their paired execution node. In many instance we are looking to just exercise the Lodestar code but there are some places where there is also testing to see how Lodestar works in relation to the other consensus clients, like Lighthouse. As you can imagine, there is quite a bit of machinery that is responsible for setting up and managing the simulations and assertions. This section will help to go over those bits and pieces. Many, but not all, of these classes can be found in `packages/cli/test/utils/simulation`. + +## Running Sim Tests + +There are a number of sim tests that are available and each has a slightly different purpose. All are run by CI and must pass for a PR to be valid for merging. Most tests require a couple of environment variables to be set. + +### Environment Variables + +To see what typical values for these are check out the `test-sim.yaml` workflow file in the `.github/workflows` directory. + +- `GETH_DOCKER_IMAGE`: The geth docker image that will be used +- `NETHERMIND_IMAGE`: The nethermind docker image that will be used +- `LIGHTHOUSE_IMAGE`: The lighthouse docker image that will be used + +### `test:sim:multifork` + +The multi-fork sim test checks most of the functionality Lodestar provides. Is verifies that Lodestar is capable of peering, moving through all of the forks and using various sync methods in a testnet environment. Lodestar is tested with both Geth and Nethermind as the execution client. It also checks a Lighthouse/Geth node for cross client compatibility. + +```sh +GETH_DOCKER_IMAGE=ethereum/client-go:v1.11.6 \ + LIGHTHOUSE_DOCKER_IMAGE=sigp/lighthouse:latest-amd64-modern-dev \ + NETHERMIND_DOCKER_IMAGE=nethermind/nethermind:1.18.0 \ + yarn workspace @chainsafe/lodestar test:sim:multifork +``` + +### `test:sim:endpoints` + +This tests that various endpoints of the beacon node and validator client are working as expected. + +```sh +GETH_DOCKER_IMAGE=ethereum/client-go:v1.11.6 \ + yarn workspace @chainsafe/lodestar test:sim:endpoints +``` + +### `test:sim:deneb` + +This test is still included in our CI but is no longer as important as it once was. Lodestar is often the first client to implement new features and this test was created before geth was upgraded with the features required to support the Deneb fork. To test that Lodestar was ready this test uses mocked geth instances. It is left as a placeholder for when the next fork comes along that requires a similar approach. + +### `test:sim:mixedcleint` + +Checks that Lodestar is compatible with other consensus validators and vice-versa. All tests use Geth as the EL. + +```sh +GETH_DOCKER_IMAGE=ethereum/client-go:v1.11.6 \ + LIGHTHOUSE_DOCKER_IMAGE=sigp/lighthouse:latest-amd64-modern-dev \ + yarn workspace @chainsafe/lodestar test:sim:mixedclient +``` + +## Sim Test Infrastructure + +When setting up and running the simulations, interactions with the nodes is through the published node API's. All functionality is actuated via http request and by "plugging in" this way it is possible to run the nodes in a stand-alone fashion, as they would be run in production, but to still achieve a tightly monitored and controlled environment. If code needs to be executed on a "class by class" basis or with mocking involved then the test is not a simulation test and would fall into one of the other testing categories. See the [Testing](../testing.md) page for more information on the other types of tests available for Lodestar. + +### Simulation Environment + +The simulation environment has many pieces and those are orchestrated by the `SimulationEnvironment` class. The testnet nodes will be run as a mixture of Docker containers and bare metal code execution via Node.js. In order to monitor the various clients there is a `SimulationTracker` that's primary function is to `register` assertions that will track and gauge how the nodes are doing during the simulation. See the section on [Simulation Assertions](#simulation-assertions) below for more information on them. There is an `EpochClock` that has helper functions related to timing of slots and epochs and there is also a `Runner` that will help to start/stop the various Docker container and spawn the Node.js child processes as necessary. + +The `SimulationEnvironment` is the orchestrator for all the various functions to great the test net and start it from genesis. It is also how the various forks are configured to exercise code through various fork transitions. + +### Simulation Assertions + +These are the secret sauce for making the simulation tests meaningful. There are several predefined assertions that can be added to a simulation tracker and one can also create custom assertions and add them to the environment. Assertions can be added per slot, per epoch, per fork or per node. They can even be added to check conditions across nodes. + +Assertions are added to the `SimulationTracker` with the `register` method and the tracker follows the environment to make sure that assertions are run at the appropriate times, and on the correct targets. + +Assertions are implemented via API calls to the various targets and meta from the API calls is stored and used to assert that the desired conditions were met. Any information that can be retrieved via API call can be added to the assertion `stores` for validation, and validations can be asserted at a specific time or on an interval. + +There are a number of assertions that are added to simulations by default. They are: + +- `inclusionDelayAssertion` +- `attestationsCountAssertion` +- `attestationParticipationAssertion` +- `connectedPeerCountAssertion` +- `finalizedAssertion` +- `headAssertion` +- `missedBlocksAssertion` +- `syncCommitteeParticipationAssertion` + +Because of the flexibility, and complexity, there is a section specifically for how to create custom assertions below. See [custom assertions](#custom-assertions) for more info. + +### Custom Assertions + +Check back soon for more information on how to create custom assertions. + +### Simulation Reports + +Sim tests that are run using the simulation framework output a table of information to the console. The table summarizes the state of all of the nodes and the network at each slot. + +Here is an example of the table and how to interpret it: + +```sh +┼─────────────────────────────────────────────────────────────────────────────────────────────────┼ +│ fork │ eph │ slot │ head │ finzed │ peers │ attCount │ incDelay │ errors │ +┼─────────────────────────────────────────────────────────────────────────────────────────────────┼ +│ capella │ 9/0 │ 72 │ 0x95c4.. │ 56 │ 3 │ 16 │ 1.00 │ 0 │ +│ capella │ 9/1 │ 73 │ 0x9dfc.. │ 56 │ 3 │ 16 │ 1.00 │ 0 │ +│ capella │ 9/2 │ 74 │ 0xdf3f.. │ 56 │ 3 │ 16 │ 1.00 │ 0 │ +│ capella │ 9/3 │ 75 │ 0xbeae.. │ 56 │ 3 │ 16 │ 1.00 │ 0 │ +│ capella │ 9/4 │ 76 │ 0x15fa.. │ 56 │ 3 │ 16 │ 1.00 │ 0 │ +│ capella │ 9/5 │ 77 │ 0xf8ff.. │ 56 │ 2,3,3,2 │ 16 │ 1.00 │ 0 │ +│ capella │ 9/6 │ 78 │ 0x8199.. │ 56 │ 2,3,3,2 │ 16 │ 1.20 │ 0 │ +│ capella │ 9/7 │ 79 │ different │ 56 │ 2,3,3,2 │ 16 │ 1.50 │ 2 │ +┼─────────────────────────────────────────────────────────────────────────────────────────────────┼ +│ Att Participation: H: 0.75, S: 1.00, T: 0.75 - SC Participation: 1.00 │ +┼─────────────────────────────────────────────────────────────────────────────────────────────────┼ +``` + +#### Slot Information + +- `fork`: shows what fork is currently being tested +- `eph`: During simulation tests the Lodestar repo is setup to use 8 slot per epoch so what is shown is the epoch number and the slot number within that epoch as `epoch/slot` +- `slot`: The slot number that is currently being processed +- `head`: If all clients have the the same head the first couple of bytes of the hash are shown. If all clients do not have the same head `different` is reported. +- `finzed`: Shows the number of the last finalized slot +- `peers`: The number of peers that each node is connected to. If all have the same number then only a single value is shown. If they do not have the same number of peers count for each node is reported in a comma-separated list +- `attCount`: The number of attestations that the node has seen. +- `incDelay`: The average number of slots inclusion delay was experienced for the attestations. Often attestations for the current head arrive more than one slot behind and this value tracks that +- `errors`: The number of errors that were encountered during the slot + +#### Epoch Information + +- `H`: The percentage of nodes, at epoch transition, that voted for the head block +- `S`: The percentage of nodes, at epoch transition, that voted for the source block +- `T`: The percentage of nodes, at epoch transition, that voted for the target block +- `SC Participation`: The sync committee participation rate + +### Simulation Logging + +The simulation environment will capture all of the logs from all nodes that are running. The logs can be found in the `packages/cli/test-logs` directory. The logs are named with the following convention: + +`-_.log` + +Some examples are: + +- `node-1-beacon_lodestar.log`: The is the first node in the simulation. It is the consensus layer. It is running the lodestar validator client. +- `range-sync-execution_geth.log`: This is the node that was added to test pulling history in range sync mode. It was the execution layer and was running the geth execution client. diff --git a/docs/pages/contribution/testing/spec-tests.md b/docs/pages/contribution/testing/spec-tests.md new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/docs/pages/contribution/testing/unit-tests.md b/docs/pages/contribution/testing/unit-tests.md new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/docs/pages/data-retention.md b/docs/pages/data-retention.md new file mode 100644 index 000000000000..c8512858441f --- /dev/null +++ b/docs/pages/data-retention.md @@ -0,0 +1,54 @@ +# Data Retention + +There are two components for an ethereum node database, the execution client and the beacon node. Both need to hold data for a full node to work correctly. In particular the execution node holds state such as wallet information and smart contract code. It also holds the execution blocks with the transaction record. The beacon node is responsible for holding beacon node blocks and state. The beacon state is responsible primarily for the validator information. + +There are several processes that need to store data for Lodestar. These data sets can grow quite large over time so it is important to understand how to manage them so the host machine can support operations effectively. + +```bash +$executionDir # this changes depending on the execution client + └── execution-db + +$dataDir # specified by --dataDir on the beacon command +├── .log_rotate_audit.json +├── beacon.log # there can be many of these +├── enr +├── peer-id.json +├── chain-db # default if --dbDir not specified +│ └── (db files) +└── peerstore # default if --peerStoreDir not specified + └── (peerstore files) + +$dataDir # specified by --dataDir on the validator command +├── .log_rotate_audit.json +├── validator.log # there can be many of these +├── validator-db # default if --validatorsDbDir not specified +│ └── (db files) +├── proposerConfigs # default if --proposerDir not specified +│ └── (config files) +├── cache # default if --cacheDir not specified +│ └── (cache files) +├── secrets # default if --secretsDir not specified +│ ├── 0x8e41b969493454318c27ec6fac90645769331c07ebc8db5037... +│ └── 0xa329f988c16993768299643d918a2694892c012765d896a16f... +├── keystores # default if --keystoresDir not specified +│ ├── 0x8e41b969493454318c27ec6fac90645769331c07ebc8db5037... +│ │ └── voting-keystore.json +│ └── 0xa329f988c16993768299643d918a2694892c012765d896a16f... +│ └── voting-keystore.json +└── remoteKeys # default if --remoteKeysDir not specified + └── 0xa329f988c16993768299643d918a2694892c012765d896a16f.json +``` + +## Data Management + +Configuring your node to store and prune data is key to success. On average you can expect for the database to grow by the follow amounts: + +- `execution-db` grows at 2-30GB per week +- `chain-db` grows at 1GB per month +- `validator-db` grows at less than 2MB per year, per key (2000 keys = 4GB per year) + +`keystores`, `keystore-cache` and `peerstore` are not usually very large and are not expected to grow much during normal operation. + +Logs can also become quite large so please check out the section on [log management](../logging-and-metrics/log-management.md) for more information. + +There is really only one flag that is needed to manage the data for Lodestar, [`--dataDir`](./configuration.md#--dataDir). Other than that handling log management is really the heart of the data management story. Beacon node data is what it is. Depending on the execution client that is chosen, there may be flags to help with data storage growth but that is outside the scope of this document. diff --git a/docs/pages/getting-started/installation.md b/docs/pages/getting-started/installation.md new file mode 100644 index 000000000000..61ecb5b128ef --- /dev/null +++ b/docs/pages/getting-started/installation.md @@ -0,0 +1,93 @@ +# Installation + +## Docker Installation + +The [`chainsafe/lodestar`](https://hub.docker.com/r/chainsafe/lodestar) Docker Hub repository is maintained actively. It contains the `lodestar` CLI preinstalled. + + +!!! info + The Docker Hub image tagged as `chainsafe/lodestar:next` is run on CI every commit on our `unstable` branch. + For `stable` releases, the image is tagged as `chainsafe/lodestar:latest`. + + +Ensure you have Docker installed by issuing the command: + +```bash +docker -v +``` + +It should return a non error message such as `Docker version xxxx, build xxxx`. + +Pull, run the image and Lodestar should now be ready to use + +```bash +docker pull chainsafe/lodestar +docker run chainsafe/lodestar --help +``` + + +!!! info + Docker is the recommended setup for Lodestar. Use our [Lodestar Quickstart scripts](https://github.com/ChainSafe/lodestar-quickstart) with Docker for detailed instructions. + + +## Build from Source + +### Prerequisites + +Make sure to have [Yarn installed](https://classic.yarnpkg.com/en/docs/install). It is also recommended to [install NVM (Node Version Manager)](https://github.com/nvm-sh/nvm) and use the LTS version (currently v20) of [NodeJS](https://nodejs.org/en/). + + +!!! info + NodeJS versions older than the current LTS are not supported by Lodestar. We recommend running the latest Node LTS. + It is important to make sure the NodeJS version is not changed after reboot by setting a default `nvm alias default && nvm use default`. + +!!! note + Node Version Manager (NVM) will only install NodeJS for use with the active user. If you intend on setting up Lodestar to run under another user, we recommend using [NodeSource's source for NodeJS](https://github.com/nodesource/distributions/blob/master/README.md#installation-instructions) so you can install NodeJS globally. + + +### Clone repository + +Clone the repository locally and build from the stable release branch. + +```bash +git clone -b stable https://github.com/chainsafe/lodestar.git +``` + +Switch to created directory. + +```bash +cd lodestar +``` + +### Install packages + +Install across all packages. Lodestar follows a [monorepo](https://github.com/lerna/lerna) structure, so all commands below must be run in the project root. + +```bash +yarn install +``` + +### Build source code + +Build across all packages. + +```bash +yarn run build +``` + +### Lodestar CLI + +Lodestar should now be ready for use. + +```bash +./lodestar --help +``` + +See [Command Line Reference](./../reference/cli.md) for further information. + +## Install from NPM [not recommended] + + +!!! danger + For mainnet (production) usage, we only recommend installing with docker due to [NPM supply chain attacks](https://hackaday.com/2021/10/22/supply-chain-attack-npm-library-used-by-facebook-and-others-was-compromised/). Until a [safer installation method has been found](https://github.com/ChainSafe/lodestar/issues/3596), do not use this install method except for experimental purposes only. + \ No newline at end of file diff --git a/docs/quickstart.md b/docs/pages/getting-started/quick-start.md similarity index 100% rename from docs/quickstart.md rename to docs/pages/getting-started/quick-start.md diff --git a/docs/usage/beacon-management.md b/docs/pages/getting-started/starting-a-node.md similarity index 100% rename from docs/usage/beacon-management.md rename to docs/pages/getting-started/starting-a-node.md diff --git a/docs/pages/getting-started/starting-a-node.new.md b/docs/pages/getting-started/starting-a-node.new.md new file mode 100644 index 000000000000..b66e797b29ed --- /dev/null +++ b/docs/pages/getting-started/starting-a-node.new.md @@ -0,0 +1,21 @@ +# Starting a Node + +## Prerequisites + +### Creating a Client Communication JWT + +### Creating a Validator Keystore + +## Base Considerations + +### Execution Client + +### Beacon Node + +### Validator Client + +## Production Considerations + +### Ingress/Egress + +### Fail-Over diff --git a/docs/index.md b/docs/pages/index.md similarity index 100% rename from docs/index.md rename to docs/pages/index.md diff --git a/docs/pages/introduction.md b/docs/pages/introduction.md new file mode 100644 index 000000000000..f8fe03386c0a --- /dev/null +++ b/docs/pages/introduction.md @@ -0,0 +1,34 @@ +# Introduction + +Ethereum is one of the most profoundly important inventions in recent history. It is a decentralized, open-source blockchain featuring smart contract functionality. It is the second-largest cryptocurrency by market capitalization, after Bitcoin, and is the most actively used blockchain. Ethereum was proposed in 2013 by programmer Vitalik Buterin. Development was crowdfunded in 2014, and the network went live on 30 July 2015, with 72 million coins premined. ChainSafe was founded not too long afterwards and has been actively working in the Ethereum space ever since. We are proud to develop Lodestar and to present this documentation as a resource for the Ethereum community. + +## Proof of Stake + +In Ethereum's Proof of Stake (PoS) model, validators replace miners from the Proof of Work (PoW) system. Validators are Ethereum stakeholders who lock up a portion of their Ether as a stake. The protocol randomly selects these validators to propose new blocks. The chance of being chosen is tied to the size of their stake: the more Ether staked, the higher the probability of being selected to propose the block. Proposers receive transaction fees and block rewards as incentives. Validators are also responsible for voting on the validity of blocks proposed by other validators. However, they face penalties, known as slashing, for actions like double-signing, votes on a block that is not in the majority or going offline, ensuring network integrity and reliability. The PoS mechanism significantly reduces energy consumption compared to PoW, because it does not require extensive computational power. Moreover, PoS tends to facilitate faster transaction validations and block creations, enhancing the overall performance and scalability of the network. + +## Consensus Clients + +In an effort to promote client diversity there are several beacon-nodes being developed. Each is programmed in a different language and by a different team. The following is a list of the current beacon-node clients: + +[Lodestar](https://chainsafe.io/lodestar.html) +[Prysm](https://prysmaticlabs.com/) +[Lighthouse](https://lighthouse.sigmaprime.io/) +[Teku](https://consensys.net/knowledge-base/ethereum-2/teku/) +[Nimbus](https://nimbus.team/) + +## Why Client Diversity? + +The Ethereum network's robustness is significantly enhanced by its client diversity, whereby multiple, independently-developed clients conforming to a common specification facilitate seamless interaction and function equivalently across nodes. This client variety not only fosters a rich ecosystem but also provides a buffer against network-wide issues stemming from bugs or malicious attacks targeted at particular clients. For instance, during the Shanghai denial-of-service attack in 2016, the diversified client structure enabled the network to withstand the assault, underscoring the resilience afforded by multiple client configurations. + +On the consensus layer, client distribution is crucial for maintaining network integrity and finality, ensuring transactions are irreversible once validated. A balanced spread of nodes across various clients helps mitigate risks associated with potential bugs or attacks that could, in extreme cases, derail the consensus process or lead to incorrect chain splits, thereby jeopardizing the network's stability and trust. While the data suggests a dominance of Prysm client on the consensus layer, efforts are ongoing to promote a more even distribution among others like Lighthouse, Teku, and Nimbus. Encouraging the adoption of minority clients, bolstering their documentation, and leveraging real-time client diversity dashboards are among the strategies being employed to enhance client diversity, which in turn fortifies the Ethereum consensus layer against adversities and fosters a healthier decentralized network ecosystem. + +The non-finality event in May 2023 on the Ethereum network posed a significant challenge. The issue arose from attestations for a fork, which necessitated state replays to validate the attestations, causing a notable strain on system resources. As a result, nodes fell out of sync, which deterred the accurate tracking of the actual head of the chain. This situation was exacerbated by a decline in attestations during specific epochs, further hampering the consensus mechanism. The Lodestar team noticed late attestations several weeks prior to the event and implemented a feature that attempted to address such challenges by not processing untimely attestations, and thus not requiring expensive state replays​. While it was done for slightly different reasons, the result was the same. Lodestar was able to follow the chain correctly and helped to stabilize the network. This example underscored the importance of client diversity and network resilience against potential forks and replay attacks. These are considered realistic threats, especially in the context of system complexity like in Ethereum's consensus mechanism. + +## Ethereum Reading List + +- [Ethereum Docs](https://ethereum.org/en/developers/docs/) +- [Upgrading Ethereum](https://eth2book.info/capella/) by Ben Edgington +- [Ethereum Book](https://github.com/ethereumbook/ethereumbook) by Andreas M. Antonopoulos and Gavin Wood +- [Ethereum Consensus Specification](https://github.com/ethereum/consensus-specs) +- [Casper the Friendly Finality Gadget](https://browse.arxiv.org/pdf/1710.09437.pdf) by Vitalik Buterin and Virgil Griffith +- [LMD Ghost](https://github.com/protolambda/lmd-ghost) by protolambda diff --git a/docs/pages/lightclient-prover/.gitkeep b/docs/pages/lightclient-prover/.gitkeep new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/docs/usage/client-monitoring.md b/docs/pages/logging-and-metrics/client-monitoring.md similarity index 100% rename from docs/usage/client-monitoring.md rename to docs/pages/logging-and-metrics/client-monitoring.md diff --git a/docs/pages/logging-and-metrics/dashboards.md b/docs/pages/logging-and-metrics/dashboards.md new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/docs/pages/logging-and-metrics/log-management.md b/docs/pages/logging-and-metrics/log-management.md new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/docs/pages/logging-and-metrics/metrics-management.md b/docs/pages/logging-and-metrics/metrics-management.md new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/docs/usage/prometheus-grafana.md b/docs/pages/logging-and-metrics/prometheus-grafana.md similarity index 100% rename from docs/usage/prometheus-grafana.md rename to docs/pages/logging-and-metrics/prometheus-grafana.md diff --git a/docs/pages/supporting-libraries/index.md b/docs/pages/supporting-libraries/index.md new file mode 100644 index 000000000000..eb1e7821db18 --- /dev/null +++ b/docs/pages/supporting-libraries/index.md @@ -0,0 +1,27 @@ +# Supporting Libraries + +## Networking + +### LibP2P + +- [`@chainsafe/js-libp2p-noise`](https://github.com/NodeFactoryIo/js-libp2p-noise) - [Noise](https://noiseprotocol.org/noise.html) handshake for `js-libp2p` +- [`@chainsafe/js-libp2p-gossipsub`](https://github.com/ChainSafe/js-libp2p-gossipsub) - [Gossipsub](https://github.com/libp2p/specs/tree/master/pubsub/gossipsub) protocol for `js-libp2p` +- [@chainsafe/libp2p-yamux](https://github.com/ChainSafe/libp2p-yamux) + +### Discv5 + +- [`discv5`](https://github.com/ChainSafe/discv5) - [Discv5](https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md) protocol + +## Serialization and Hashing + +- [`ssz`](https://github.com/ChainSafe/ssz) - Simple Serialize (SSZ) +- [`persistent-merkle-tree`](https://github.com/ChainSafe/persistent-merkle-tree) - binary merkle tree implemented as a [persistent data structure](https://en.wikipedia.org/wiki/Persistent_data_structure) +- [`as-sha256`](https://github.com/ChainSafe/as-sha256) - Small AssemblyScript implementation of SHA256 + +## BLS + +- [`bls`](https://github.com/ChainSafe/bls) - Isomorphic Ethereum Consensus BLS sign / verify / aggregate +- [`blst-ts`](https://github.com/ChainSafe/blst) - Node specific Ethereum Consensus BLS sign / verify / aggregate +- [`bls-keystore`](https://github.com/ChainSafe/bls-keystore) - store / retrieve a BLS secret key from an [EIP-2335](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2335.md) JSON keystore +- [`bls-keygen`](https://github.com/ChainSafe/bls-keygen) - utility functions to generate BLS secret keys, following [EIP-2333](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2333.md) and [EIP-2334](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2334.md) +- [`bls-hd-key`](https://github.com/ChainSafe/bls-hd-key) - low level [EIP-2333](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2333.md) and [EIP-2334](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2334.md) functionality diff --git a/docs/libraries/index.md b/docs/pages/supporting-libraries/libraries.md similarity index 100% rename from docs/libraries/index.md rename to docs/pages/supporting-libraries/libraries.md diff --git a/docs/pages/tools/core-dumps.md b/docs/pages/tools/core-dumps.md new file mode 100644 index 000000000000..98d564eb9308 --- /dev/null +++ b/docs/pages/tools/core-dumps.md @@ -0,0 +1,66 @@ +# Core Dump Analysis + +Core dump analysis is some ninja level stuff. Once you get the hang of it you will feel like you have super powers. It will up your game to a whole new level because you will be able to debug issues that seemed impossible before. Post-crash analysis is a very powerful tool to have in your tool belt. A core dump has all of the objects in memory as well as all of the stack frame information at the exact moment the dump was taken, usually when a hard crash occurs. + +It is important to note that debug symbols will greatly aid you in your debugging for issues related to native code like `C/C++`. When compiled languages are optimized the compiler will often strip out identifiers and all that will be remaining are mangled symbols and addresses. Compiling with debug symbols will leave all of the identifiers, file names and line numbers in-tact. + +While it is not always practical to be running code in a Debug version of node, if you run across a persistent issue it will be helpful to recreate it on a debug build and to use that for analysis. + +It is important to note that the EXACT binary that was running when the dump was created MUST be loaded when doing analysis. There is a lot of information in the dump that is specific to the binary that was running (like function offsets, etc). If you load a different binary you will get a lot of errors and the analysis will not be useful (if it loads at all). + +It is also a nice-to-know that you can create the dump on linux, using a linux compiled version of node, and then read it on a mac. All that is needed is to download the node binary and dump file to the mac. It is possible to load them into a mac compiled version of llnode and all will work as expected. Its just the meta in the linux binary that is needed for analysis, it doesn't actually run the code. + +## Installing `llnode` + +`llnode` is a Node.js plugin for the [LLDB](https://lldb.llvm.org/) debugger. It is the officially sanctioned tool from Node and powerful way to do postmortem analysis of Node.js processes. The process for install is pretty straight-forward unless you have an M1 mac. XCode ships with an instance of `lldb` and installing `llnode` is as simple as running `npm install -g llnode`. + +On an M1 mac the install will work fine but the plugin will crash at load time. See [this issue](https://github.com/nodejs/llnode/issues/430#issuecomment-1844628224) for updates. The workaround is to install `lldb` via homebrew. + +```sh +# should only be necessary on M1 macs at time of writing +$ brew install llvm +$ echo 'export PATH="/opt/homebrew/opt/llvm/bin:$PATH"' >> ~/.zshrc +$ # note that its before recopying PATH to make sure it resolves +$ zsh ~/.zshrc +$ which llvm-config +/opt/homebrew/opt/llvm/bin/llvm-config # if this is not what comes up restart the shell +$ npm install -g llnode +$ llnode +(lldb) plugin load '/Users/ninja_user/.nvm/versions/node/v20.5.1/lib/node_modules/llnode/llnode.dylib' +(lldb) settings set prompt '(llnode) ' +(llnode) +``` + +## Collecting a core dump + +Before a core dump can be created the system must be enabled. + +```sh +ulimit -c unlimited +``` + +This is a critical step. If that command is not run the core will not be dumped to disk. + +Core dumps are normally created by the kernel when certain process signals are encountered. `SIGSEGV` is the most common signal that will cause a dump and its sent by the kernel to the process when a segfault occurs. `SIGSEGV` is not the only signal that works and you can see the full list [here](https://man7.org/linux/man-pages/man7/signal.7.html) under the "Standard Signals" section (all the ones that say "Core" in the "Action" column). + +If you want to create a dump on demand you can use the `gcore` command on linux. This will create a dump of the process without killing it. If you don't mind termination you can also use `kill -SIGSEGV ` to send the a dump signal to the process. + +## Analyzing a core dump + +Once you collect the core dump you can load it into `llnode` for debugging. + +```sh +# remember that the node binary must be the exact same one that was running when the core was created +$ llnode -f /path/to/node_debug -c /Users/ninja_user/coredumps/node.coredump +(lldb) target create "node_debug" --core "node.coredump" +Core file '/Users/ninja_user/coredumps/node.coredump' (x86_64) was loaded. +(lldb) plugin load '/Users/ninja_user/.nvm/versions/node/v20.5.1/lib/node_modules/llnode/llnode.dylib' +(lldb) settings set prompt '(llnode) ' +(llnode) +``` + +Once the dump is loaded the first few steps will be to figure out what types of objects were in memory and what was the processor working on when the crash occurred. Lets start with the stack trace. + +There are two distinct commands for pulling the stack because node is both a native runtime and a virtual machine. The `bt`, back trace, command will pull the native stack frames and the `v8 bt` command will use the `llnode` plugin to pull the JavaScript stack frames. Newer versions of `llnode` will automatically pull the JavaScript stack frames when the `bt` command is run but it is still good to know the difference. It is also possible to add the `all` verb to the `bt` command and it will pull the back trace for all threads. + +To start looking through memory there are two commands that are helpful. The `v8 findjsobjects` command will list all of the JavaScript objects in memory. The `v8 findjsinstances` command will list all of the instances of a particular JavaScript object. diff --git a/docs/pages/tools/debugging.md b/docs/pages/tools/debugging.md new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/docs/tools/flamegraphs.md b/docs/pages/tools/flamegraphs.md similarity index 100% rename from docs/tools/flamegraphs.md rename to docs/pages/tools/flamegraphs.md diff --git a/docs/pages/tools/heap-dumps.md b/docs/pages/tools/heap-dumps.md new file mode 100644 index 000000000000..379f7e4de2f2 --- /dev/null +++ b/docs/pages/tools/heap-dumps.md @@ -0,0 +1,279 @@ +# Heap Dump Analysis + +There are a number of reason why one would want to do a heap dump but in particular, they are helpful for find memory intensive operations and leaks. There are two major types of heap dumps that are available to node developers. The first is a JavaScript heap dump, and the second is a native heap dump. The JS heap dump is much more common and is the default heap dump that is generated by `node`. It is useful when analyzing JS generated objects that are managed by the runtime. However there is one major limitation to the JS heap dump, and that is that it does not include native objects. This is where the native heap dump comes in handy. The native heap dump is a snapshot of the entire process memory, and includes objects that are allocated by `C/C++` code, including native modules in use by the application. The limitation to the native heap dump is that it will not include any JS objects that are allocated by the `V8` runtime. Those are generally created within `mmap`'ed pages and the native heap dump tools are specific to `C` objects that are created with `malloc` and destroyed via `free`. `C++` is also covered as `new` and `delete` are wrappers around `malloc` and `free`. This is why it is important to understand how to analyze both types of memory usage. + +## JavaScript Heap Dump + +Node has built in `V8` heap dump access and its a very powerful tool for analyzing memory usage. Understanding how the dump is created will both help to understand how it is displayed and how to use the analysis more effectively. + +The `V8` heap dump is a stop the world process because walking the entire heap graph is necessary to create one. This is similar to a full, major garbage collection event. The VM starts at the heap entrance node and walks the entire graph and makes note of every edge that connects each node along the way. Nodes are JSObjects and edges are references between those objects. + +By time the whole heap is walked the full size and values of all nodes are known and all of the connections between those nodes is well understood. The object that is returned is a set of three arrays, the nodes, the edges and the string values that are encountered (because strings are themselves arrays of characters in `C` so they are treated a bit differently by `V8`). + +### Creating a `V8` heap dump + +There are two functions for creating a heap dump but both call the same functionality under the hood. One streams the result, `require("v8").getHeapSnapshot([options])`, and is primarily intended for use by the Chrome devtools button to "take a snapshot". The second writes the heap dump to a file, `require("v8").writeHeapSnapshot(filename[,options])`. + +The optional `options` argument, in both cases, is the same and contains two props.`exposeInternals` and `exposeNumericValues` to enrich the dump. In many cases its the application layer that one wants to debug so `exposeInternals` is not usually necessary. In `V8` numbers are stored as 32bit integers and the size of pointers is also 32bits. So as an optimization, the pointer to the numeric value can be eliminated and the value itself can be stored in the `Address` of the `Value` instead. `exposeNumericValues` transcribes those "pointers" to the actual numeric value and appends them to the dump. + +Because heap analysis happens frequently during Lodestar development there is a helper api endpoint to capture a heap dump. **It is IMPORTANT** that this endpoint is not public facing as it will open the threat of DDOS attack. + +The endpoint accepts a `POST` request and you may include an optional `dirpath` query parameter to specify the directory where the heap dump will be written. If the `dirpath` is not specified then the heap dump will be written to the current working directory. + +To create a Lodestar heap dump you can use the following command: + +```sh +curl -X POST http://localhost:9596/eth/v1/lodestar/write_heapdump?dirpath=/some/directory/path +``` + +### Viewing a `V8` heap dump + +It is best to analyze on a local development machine so if Lodestar is running on a cloud instance download the dump to the local environment. Open Chrome, or any Chromium based browser (the example photos were taken using Brave). In the url bar type `chrome:://inspect` to bring up the DevTools menu (in brave the url will be rewritten to `brave://inspect`). + +![DevTools](../images/heap-dumps/devtools.png) + +Click on the `Open dedicated DevTools for Node` link to open the node specific window and click on the `Memory` tab as shown below. + +![Memory Tab](../images/heap-dumps/memory-tab.png) + +Load the profile by either right clicking on the left pane or by clicking the `Load` button at the bottom. + +![Load Profile](../images/heap-dumps/load-profile.png) + +### Analyzing a `V8` heap dump + +Analysis is as much an art as it is a science and the best way to learn is to do it a few times. Generally the goal is looking for memory leaks but reducing memory overhead is also something that happens. This guide will focus on leaks. With memory leaks one is looking for why objects have references that prevent them from being garbage collected. + +To spot sources of leaks, focus on objects that have large quantities or very large `retained size`. Retained size is the amount of memory that would be freed if the object was garbage collected. As an example if there is an object that has lots and lots of instances, like 100,000, and they are all pushed into an array then the array will have a very large retained size. This is because the array is holding references to all of the objects that it contains. + + + + +If it is not immediately apparent what objects are being leaked then another tool in your arsenal will be to take a second snapshot and compare it to the first. This will show what objects have been created/changed since the first snapshot. + +If there is an object that has a large retained size but is roughly the same, but not exactly the same, changes are that is NOT the leak. Some objects can get quite large during runtime but if its roughly the same size over time, but not exactly the same, it means that the application is modifying the object (why its not exactly identical in size) but if it hasn't grown significantly over time it can be assumed it is probably the working size of the instances. + +Try to focus on objects that are growing in size or in number over time. Growing in size means the object is holding references to other objects and growing in number means a function closure somewhere is retaining the small instances. + + + + +That is the science part, but these clues are just breadcrumbs to follow. In order to actually resolve the leak, one needs to go into the code to figure out where those objects are being created, or more often, why the references to them are being retained. This is where the art comes in. + +Having a good understanding of the codebase will help to narrow down where to look. It is also common that the leak is not coming directly from Lodestar code, but rather one of the dependencies so be careful not to rule those out. + +## Native Heap Dump + +_**note: collecting a native heap dump is only supported on linux, analysis can be done from linux or Mac**_ + +There are several tools that can be used to do native heap dump analysis. The most common are [`massif`](https://valgrind.org/docs/manual/ms-manual.html) from the [`Valgrind`](https://valgrind.org/) suite, google's [`gperftools`](https://github.com/gperftools/gperftools) and `heaptrack` from [KDE](https://community.kde.org/Main_Page). Of the three, `heaptrack` is the most user friendly tool, and it is specifically designed for the task. It is much faster than `Valgrind`, easier to integrate than `gperftools` and also includes a gui for result analysis. Often times there are also memory allocations that are not related to memory leaks, and tools like `Valgrind` and `gperftools` become less useful. This is why `heaptrack` is the recommended tool for heap dump analysis on Lodestar. + +There are a few things that will make the results with `heaptrack` far better. The most important is using debug builds of all libraries included in a binary, including the application itself. This will make the results usable. Not to say that they will be useless without debug symbols but it will be kinda tough to optimize functions without knowing the function names nor the file and line numbers. + +This is the heart of what `heaptrack` will do for us. It hooks into the memory allocation and adds in stack traces for each `malloc` call site. That way every time memory is reserved there is a way to track back where it happened in the code. `heaptrack` also hooks into the `free` function and checks that versus the allocations to check for memory leaks and for temporary variables that can be optimized. This also allows for optimization of how many of each object is created by identifying high frequency allocations. + +Generally the .heapdump file will be created on a cloud server and then copied to a local machine for analysis, mostly because the gui is not available through ssh. The gui is not required for analysis but it is much easier to use than the command line tools. The first step will be to install `heaptrack` on the target server and to capture a profile. + +### Build collection tools + +Assume the following directory structure: + +```sh +├── beacon-node +│   ├── db +│   ├── logs +│   ├── start-lodestar.sh +│   └── rc-config.yml +├── lodestar +└── node # step below will clone this repo +``` + +We will start from the directory that contains `lodestar` and the `beacon-node` files. + +```sh +# Install heaptrack +$ sudo apt-get update +$ sudo apt-get -y install heaptrack + +# Using a debug build of node is recommended and it can be build +# from source. Clone the node repo to get started. +$ git clone https://github.com/nodejs/node.git +$ cd node + +# Use whichever version of node you prefer +$ git checkout v20.10.0 +$ ./configure --debug + +# This command only builds the debug version of node and assumes +# that a release version of node is already installed on the system +$ make -C out BUILDTYPE=Debug -j$(nproc --all) + +# Move the debug version of node the the same folder that the release +# version is installed in and name it `node_debug`. This will put the +# debug binary on the path and allow you to run it with the +# `node_debug` command +$ cp out/Debug/node "$(which node)_debug" +$ which node_debug +/your/home/directory/.nvm/versions/node/v20.10.0/bin/node_debug + +# Return to the lodestar repo +$ cd ../lodestar + +# Clean the build artifacts and node_modules +$ yarn clean && yarn clean:nm + +# Install the dependencies +$ yarn install + +# Ensure that all native modules are rebuilt with debug symbols. Some +# modules are prebuilt, like classic-level, and the debug symbols may +# not be included. If the the debugging exercise is focussed around +# one of these dependencies, then you will need to manually clone those +# repos and manually build them with debug symbols. +$ npm rebuild --debug +``` + +### Collect a heap dump + +```sh +# Move to th `beacon-node` directory +$ cd ../beacon-node + +# Start lodestar with profiling enabled +$ heaptrack \ +$ --output ./lodestar.heapdump \ +$ node_debug \ +$ --max-old-space-size=8192 \ +$ ../lodestar/packages/cli/bin/lodestar.js \ +$ beacon \ +$ --rcConfig ./rc-config.yml \ +$ > /dev/null 2>&1 & +# Wait some period of time for the heap dump data to be collected + +# The data will not be persisted until the process is stopped. You can gracefully +# stop the process with the following command and if you want to hard kill it +# add `-9` to the end of the `kill` command although that should not be necessary +$ ps aux | grep lodestar | grep -v grep | awk '{print $2}' | head -n 1 | xargs kill +``` + +### Collecting a heap dump on a running process + +Collecting a heap dump can also be done on a running process. There are both advantages and disadvantages to this approach. The main advantage is that you can collect a heap dump without having to restart. The down side is that the dump will only include allocations/de-allocations while the tracker is running. This means that all the non-paired calls to malloc/free will register as leaks. It will also not give a true representation of how the heap is being used. On the upside, however the dump will be much smaller in size. + +It is important to note a warning that is in the `heaptrack` source code: + +_WARNING: Runtime-attaching heaptrack is UNSTABLE and can lead to CRASHES in your application, especially after you detach heaptrack again. You are hereby warned, use it at your own risk!_ + +```sh +# Move to th `beacon-node` directory +$ cd ../beacon-node + +# Start lodestar +$ node_debug \ +$ --max-old-space-size=8192 \ +$ ../lodestar/packages/cli/bin/lodestar.js \ +$ beacon \ +$ --rcConfig ./rc-config.yml \ +$ > /dev/null 2>&1 & +# Wait some period of time to start collecting the dump + +# GDB is required to inject heaptrack into a running process +# so you may need to install it +$ sudo apt-get update +$ sudo apt-get install -y gdb + +# Elevated `perf` permissions are also required depending on your +# system configuration. Change until the next reboot +$ echo 0 | sudo tee /proc/sys/kernel/yama/ptrace_scope + +# Get the pid of the lodestar process +$ export LODESTAR_PID=$(ps aux | grep lodestar | grep -v grep | awk '{print $2}' | head -n 1) + +# Inject heaptrack into the running process +$ heaptrack --pid $LODESTAR_PID + +heaptrack output will be written to "/home/user/beacon-node/heaptrack.node_debug.111868.zst" +/usr/lib/heaptrack/libheaptrack_preload.so +injecting heaptrack into application via GDB, this might take some time... +injection finished +# Wait some period of time to collect the heap dump. See below +# for the termination command that can be run from a separate +# terminal when ready to stop collecting data +Terminated +removing heaptrack injection via GDB, this might take some time... +Heaptrack finished! Now run the following to investigate the data: + + heaptrack --analyze "/home/user/beacon-node/heaptrack.node_debug.111868.zst" +``` + +There is a trap in `heaptrack` but the process uses a nested shell to do the actual injection so it is not possible to just Ctrl+C out of the injected process without corrupting the output file. To properly kill the collection one needs to target the nested shell pid. Here is a helper command to target that process: + +```sh +ps -ef | grep '[h]eaptrack --pid' | awk '$3 == '$(ps -ef | grep '[h]eaptrack --pid' | awk '$3 != 1 {print $2}' | head -n 1)' {print $2}' | xargs -r kill +``` + +After working with the injected process for a while, I cannot honestly recommend it. It can work in a pinch, and is best suited for when the profiled process can be exited gracefully without repercussions (not on mainnet for instance). The benefit, though, is that the heapdump will be much smaller and targeted to runtime (will not have the transient, startup allocations) which can make it easier to see what is happening. + +### Installing `heaptrack-gui` on Linux + +```sh +# You can you apt, apt-get or aptitude to install the gui +$ sudo apt-get update +$ sudo apt-get install -y heaptrack-gui +``` + +### Installing `heaptrack-gui` on OSX + +At the time of writing this there is no official pre-built binary for OSX. This was a bit of a challenge but it was WELL worth the effort as the tool works very well. There were a number of bugs along the way while "using the docs" so your mileage may vary, but this is what worked for me. + +Most of the dependencies can be installed via Homebrew and the tool itself needs to be built from source. There was one dependency that needed to be built from source. This process assumes a working folder that the repos can be cloned into. + +```sh +# Start in the root folder where the repos will be cloned +$ brew install qt@5 + +# prepare tap of kde-mac/kde +$ brew tap kde-mac/kde https://invent.kde.org/packaging/homebrew-kde.git +$ "$(brew --repo kde-mac/kde)/tools/do-caveats.sh" + +# install the kde-mac and other required dependencies +$ brew install kde-mac/kde/kf5-kcoreaddons \ +$ kde-mac/kde/kf5-kitemmodels \ +$ kde-mac/kde/kf5-kconfigwidgets \ +$ kde-mac/kde/kdiagram \ +$ extra-cmake-modules \ +$ ki18n \ +$ threadweaver \ +$ boost \ +$ zstd \ +$ gettext + +# There is a bug in the current version of kde-mac/kde and one dependency needs +# to be built manually. This is the workaround to get it built. +$ git clone https://invent.kde.org/frameworks/kio.git +$ mkdir kio/build +$ cd kio/build +$ export CMAKE_PREFIX_PATH=$(brew --prefix qt@5) +$ cmake -G Ninja -DCMAKE_BUILD_TYPE=Release .. +$ ninja +$ sudo ninja install +$ cd ../.. + +# Now make sure that the dependencies are available to the system during runtime +$ ln -sfv "$(brew --prefix)/share/kf5" "$HOME/Library/Application Support" +$ ln -sfv "$(brew --prefix)/share/knotifications5" "$HOME/Library/Application Support" +$ ln -sfv "$(brew --prefix)/share/kservices5" "$HOME/Library/Application Support" +$ ln -sfv "$(brew --prefix)/share/kservicetypes5" "$HOME/Library/Application Support" + +# We are now ready to build the heaptrack_gui binaries for analysis on OSX +$ git clone https://invent.kde.org/sdk/heaptrack.git +$ cd heaptrack +$ mkdir build +$ cd build +$ CMAKE_PREFIX_PATH=$(brew --prefix qt@5) PATH=$PATH:/opt/homebrew/opt/gettext/bin cmake .. +$ cmake -DCMAKE_BUILD_TYPE=Release .. +$ make heaptrack_gui +$ sudo make install +# You can now find heaptrack_gui with your gui Applications. It is default +# placed as /Applications/KDE/heaptrack_gui.app +``` diff --git a/docs/pages/tools/perf.md b/docs/pages/tools/perf.md new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/docs/pages/trouble-shooting.md b/docs/pages/trouble-shooting.md new file mode 100644 index 000000000000..144aeb90ce20 --- /dev/null +++ b/docs/pages/trouble-shooting.md @@ -0,0 +1 @@ +# Trouble Shooting diff --git a/docs/pages/validator-management/key-management.md b/docs/pages/validator-management/key-management.md new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/docs/pages/validator-management/multiple-and-fallback-validation.md b/docs/pages/validator-management/multiple-and-fallback-validation.md new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/docs/usage/validator-management.md b/docs/pages/validator-management/validator-management.md similarity index 100% rename from docs/usage/validator-management.md rename to docs/pages/validator-management/validator-management.md diff --git a/docs/pages/validator-management/withdrawals.md b/docs/pages/validator-management/withdrawals.md new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/mkdocs.yml b/mkdocs.yml deleted file mode 100644 index 759a8dfd7151..000000000000 --- a/mkdocs.yml +++ /dev/null @@ -1,85 +0,0 @@ -site_name: Lodestar Documentation -site_description: Lodestar Documentation - Typescript Ethereum Consensus client -site_url: https://chainsafe.github.io/lodestar - -repo_name: chainsafe/lodestar -repo_url: https://github.com/chainsafe/lodestar - -# Configuration -theme: - name: material - logo: assets/lodestar_icon_300.png - favicon: assets/round-icon.ico - palette: - - scheme: preference - media: "(prefers-color-scheme: light)" - primary: black - accent: deep purple - toggle: - icon: material/weather-night - name: Switch to dark mode - - scheme: slate - media: "(prefers-color-scheme: dark)" - primary: black - accent: deep purple - toggle: - icon: material/weather-sunny - name: Switch to light mode - nav_style: dark - -plugins: - - search - - mermaid2: - version: 8.6.4 - arguments: - theme: | - ^(window.matchMedia && window.matchMedia('(prefers-color-scheme: dark)').matches) ? 'dark' : 'light' - -markdown_extensions: - - meta - - codehilite: - guess_lang: false - - admonition - - toc: - permalink: true - - pymdownx.superfences: - # make exceptions to highlighting of code (for mermaid): - custom_fences: - - name: mermaid - class: mermaid - format: !!python/name:mermaid2.fence_mermaid -extra_css: - - stylesheets/extras.css - -# Socials -extra: - social: - - icon: fontawesome/brands/github-alt - link: https://github.com/ChainSafe/lodestar - - icon: fontawesome/brands/twitter - link: https://twitter.com/ChainSafeth - - icon: fontawesome/brands/discord - link: https://discord.gg/yjyvFRP - - icon: fontawesome/brands/medium - link: https://blog.chainsafe.io - -# Customize left navigation menu -nav: - - Getting Started: index.md - - Installation: - - Install from source: install/source.md - - Install from NPM: install/npm.md - - Install with Docker: install/docker.md - - Using Lodestar: - - Beacon management: usage/beacon-management.md - - Local testnet: usage/local.md - - Validator management: usage/validator-management.md - - Prometheus & Grafana Setup: usage/prometheus-grafana.md - - MEV Builder Integration: usage/mev-integration.md - - Client monitoring: usage/client-monitoring.md - - Reference: - - Command line: reference/cli.md - - Libraries: libraries/index.md - - Design: - - Lodestar package structure: design/depgraph.md - - Contributing: contributing.md diff --git a/package.json b/package.json index 54cbb6da8406..158ac2affe68 100644 --- a/package.json +++ b/package.json @@ -11,7 +11,7 @@ "clean": "rm -rf ./packages/*/lib ./packages/*/*.tsbuildinfo", "clean:nm": "rm -rf ./packages/*/node_modules ./node_modules", "build": "lerna run build", - "build:docs": "lerna run build:refdocs && ./scripts/prepare-docs.sh", + "build:docs": "lerna run check-readme && lerna run build:docs && ./scripts/prepare-docs.sh", "build:watch": "lerna exec --parallel -- 'yarn run build:watch'", "build:ifchanged": "lerna exec -- ../../scripts/build_if_changed.sh", "lint": "eslint --color --ext .ts packages/*/src packages/*/test", @@ -22,6 +22,7 @@ "check-build": "lerna run check-build", "check-readme": "lerna run check-readme", "check-types": "lerna run check-types", + "check-spelling": "pyspelling -c .pyspelling.yml -v", "coverage": "lerna run coverage", "test": "lerna run test --concurrency 1", "test:unit": "lerna run test:unit --concurrency 1", diff --git a/packages/cli/docsgen/changeCase.ts b/packages/cli/docsgen/changeCase.ts new file mode 100644 index 000000000000..096d26e61833 --- /dev/null +++ b/packages/cli/docsgen/changeCase.ts @@ -0,0 +1,29 @@ +const wordPattern = new RegExp(["[A-Z][a-z]+", "[A-Z]+(?=[A-Z][a-z])", "[A-Z]+", "[a-z]+", "[0-9]+"].join("|"), "g"); +function splitString(str: string): string[] { + const normalized = str + // sanitize characters that cannot be included + .replace(/[!@#$%^&*]/g, "-") + // normalize separators to '-' + .replace(/[._/\s\\]/g, "-") + .split("-"); + return normalized.map((seg) => seg.match(wordPattern) || []).flat(); +} +function capitalizeFirstLetter(segment: string): string { + return segment[0].toUpperCase() + segment.slice(1); +} +function lowercaseFirstLetter(segment: string): string { + return segment[0].toLowerCase() + segment.slice(1); +} +function toKebab(str: string): string { + return splitString(str).join("-").toLowerCase(); +} +function toPascal(str: string): string { + return splitString(str).map(capitalizeFirstLetter).join(""); +} +function toCamel(str: string): string { + return lowercaseFirstLetter(toPascal(str)); +} +function toEnv(str: string): string { + return splitString(str).join("_").toUpperCase(); +} +export {capitalizeFirstLetter, toKebab, toCamel, toPascal, toEnv}; diff --git a/packages/cli/docsgen/index.ts b/packages/cli/docsgen/index.ts index 5e0a3364f73d..524f70a51c5b 100644 --- a/packages/cli/docsgen/index.ts +++ b/packages/cli/docsgen/index.ts @@ -1,103 +1,25 @@ import fs from "node:fs"; import path from "node:path"; -import {Options} from "yargs"; -import omit from "lodash/omit.js"; import {cmds} from "../src/cmds/index.js"; -import {CliCommand} from "../src/util/index.js"; import {globalOptions} from "../src/options/index.js"; -import {beaconOptions} from "../src/cmds/beacon/options.js"; -import {renderMarkdownSections, toMarkdownTable, MarkdownSection} from "./markdown.js"; +import {renderCommandPage} from "./markdown.js"; // Script to generate a reference of all CLI commands and options // Outputs a markdown format ready to be consumed by mkdocs // // Usage: -// ts-node docsgen docs/cli.md +// ts-node packages/cli/docsgen // -// After generation the resulting .md should be mv to the path expected +// After generation the resulting .md files, they are written to the path expected // by the mkdocs index and other existing paths in the documentation -const docsMarkdownPath = process.argv[2]; -if (!docsMarkdownPath) throw Error("Run script with output path: 'ts-node docsgen docs/cli.md'"); +const dirname = path.dirname(new URL(import.meta.url).pathname); +const LODESTAR_COMMAND = "./lodestar"; +const DOCS_PAGES_FOLDER = path.join(dirname, "..", "..", "..", "docs", "pages"); -const docsString = renderMarkdownSections([ - { - title: "Command Line Reference", - body: "This reference describes the syntax of the Lodestar CLI commands and their options.", - subsections: [ - { - title: "Global Options", - body: getOptionsTable(globalOptions), - }, - ...cmds.map((cmd) => cmdToMarkdownSection(cmd)), - ], - }, -]); - -fs.mkdirSync(path.parse(docsMarkdownPath).dir, {recursive: true}); -fs.writeFileSync(docsMarkdownPath, docsString); - -/** - * Parse an CliCommand type recursively and output a MarkdownSection - */ -// eslint-disable-next-line @typescript-eslint/no-explicit-any -function cmdToMarkdownSection(cmd: CliCommand, parentCommand?: string): MarkdownSection { - const commandJson = [parentCommand, cmd.command.replace("", "")].filter(Boolean).join(" "); - const body = [cmd.describe]; - - if (cmd.examples) { - body.push("**Examples**"); - for (const example of cmd.examples) { - if (example.command.startsWith("lodestar")) example.command = `lodestar ${example.command}`; - body.push(example.description); - body.push(`\`\`\` \n${example.command}\n\`\`\``); - } - } - - if (cmd.options) { - body.push("**Options**"); - - if (cmd.subcommands) { - body.push("The options below apply to all sub-commands."); - } - - // De-duplicate beaconOptions. If all beaconOptions exists in this command, skip them - if ( - cmds.some((c) => c.command === "beacon") && - commandJson !== "beacon" && - Object.keys(beaconOptions).every((key) => cmd.options?.[key]) - ) { - cmd.options = omit(cmd.options, Object.keys(beaconOptions)); - body.push(`Cmd \`${commandJson}\` has all the options from the [\`beacon\` cmd](#beacon).`); - } - - body.push(getOptionsTable(cmd.options)); - } - return { - title: `\`${commandJson}\``, - body, - subsections: (cmd.subcommands || []).map((subcmd) => cmdToMarkdownSection(subcmd, commandJson)), - }; -} - -/** - * Render a Yargs options dictionary to a markdown table - */ -function getOptionsTable(options: Record, {showHidden}: {showHidden?: boolean} = {}): string { - const visibleOptions = Object.entries(options).filter(([, opt]) => showHidden || !opt.hidden); - - if (visibleOptions.length === 0) { - return ""; - } - - /* eslint-disable @typescript-eslint/naming-convention */ - return toMarkdownTable( - visibleOptions.map(([key, opt]) => ({ - Option: `\`--${key}\``, - Type: opt.type ?? "", - Description: opt.description ?? "", - Default: String(opt.defaultDescription || opt.default || ""), - })), - ["Option", "Type", "Description", "Default"] - ); +for (const cmd of cmds) { + const docstring = renderCommandPage(cmd, globalOptions, LODESTAR_COMMAND); + const folder = path.join(DOCS_PAGES_FOLDER, cmd.docsFolder ?? ""); + if (!fs.existsSync(folder)) fs.mkdirSync(folder, {recursive: true}); + fs.writeFileSync(path.join(folder, `${cmd.command}-cli.md`), docstring); } diff --git a/packages/cli/docsgen/markdown.ts b/packages/cli/docsgen/markdown.ts index 80952f367c73..c05c7ad8c90f 100644 --- a/packages/cli/docsgen/markdown.ts +++ b/packages/cli/docsgen/markdown.ts @@ -1,41 +1,273 @@ -export type MarkdownSection = { - title: string; - body: string | string[]; - subsections?: MarkdownSection[]; -}; +import {CliOptionDefinition, CliCommand, CliExample, CliCommandOptions} from "../src/util/index.js"; +import {toKebab} from "./changeCase.js"; + +const DEFAULT_SEPARATOR = "\n\n"; +const LINE_BREAK = "\n\n
"; + +function renderExampleBody(example: CliExample, lodestarCommand?: string): string { + const cliExample = [ + `\`\`\` +${lodestarCommand ? `${lodestarCommand} ` : ""}${example.command} +\`\`\``, + ]; + + if (example.description) { + cliExample.unshift(example.description); + } + + return cliExample.join(DEFAULT_SEPARATOR); +} /** - * Render MarkdownSection recursively tracking its level depth + * Renders a single example like shown below. Title and description are optional. + * ------------------- + * #### Basic `validator` command example + * + * Run one validator client with all the keystores available in the directory .goerli/keystores + * + * ``` + * validator --network goerli + * ``` + * ------------------- */ -export function renderMarkdownSections(sections: MarkdownSection[], level = 1): string { - return sections - .map((section) => { - const parts = section.title ? [`${"\n" + "#".repeat(level)} ${section.title}`] : [""]; - if (section.body) { - parts.push(Array.isArray(section.body) ? section.body.join("\n\n") : section.body); - } - if (section.subsections) { - parts.push(renderMarkdownSections(section.subsections, level + 1)); - } - return parts.join(section.title ? "\n" : ""); - }) - .join("\n"); +function renderCommandExample(example: CliExample, lodestarCommand?: string): string { + const title = example.title ? `#### ${example.title}${DEFAULT_SEPARATOR}` : ""; + return title.concat(renderExampleBody(example, lodestarCommand)); } /** - * Render an array of objects as a markdown table + * Renders a example section like shown below + * ------------------- + * ## Examples + * + * #### Basic `validator` command example + * + * Run one validator client with all the keystores available in the directory .goerli/keystores + * + * ``` + * validator --network goerli + * ``` + * + * #### Advanced `validator` command example + * + * Run one validator client with all the keystores available in the directory .goerli/keystores + * using an rcConfig file for configuration + * + * ``` + * validator --rcConfig validator-dir/validator.rcconfig.yaml + * ``` + * ------------------- */ -export function toMarkdownTable(rows: T[], headers: (keyof T)[]): string { - return [ - toMarkdownTableRow(headers as string[]), - toMarkdownTableRow(headers.map(() => "---")), - ...rows.map((row) => toMarkdownTableRow(headers.map((key) => row[key]))), - ].join("\n"); +function renderExamplesSection(examples: CliExample[], sectionTitle?: string, lodestarCommand?: string): string { + const exampleSection = [sectionTitle]; + for (const example of examples) { + exampleSection.push(renderCommandExample(example, lodestarCommand)); + } + return exampleSection.filter(Boolean).join(DEFAULT_SEPARATOR); } /** - * Render an array of items as a markdown table row + * Renders a single cli option like shown below + * ------------------- + * #### `--logLevel` + * + * Logging verbosity level for emitting logs to terminal + * + * type: string + * default: info + * choices: "error", "warn", "info", "verbose", "debug" + * example: Set log level to debug + * + * ``` + * validator --logLevel debug + * ``` + * ------------------- */ -export function toMarkdownTableRow(row: string[]): string { - return `| ${row.join(" | ")} |`; +function renderOption(optionName: string, option: CliOptionDefinition): string | undefined { + if (option.hidden) return; + + const commandOption = [`#### \`--${optionName}\``]; + if (option.description) commandOption.push(`description: ${option.description}`); + + if (option.demandOption === true) { + commandOption.push("required: true"); + } + + if (option.type === "array") { + commandOption.push("type: `string[]`"); + } else if (option.type) { + commandOption.push(`type: \`${option.type}\``); + } + + if (option.choices) { + commandOption.push(`choices: ${option.choices.map((c) => `"${c}"`).join(", ")}`); + } + + let defaultValue = String(option.defaultDescription || option.default || ""); + if (defaultValue) { + if (option.type === "string" || option.string) { + defaultValue = `"${defaultValue}"`; + } + if (option.type === "array") { + // eslint-disable-next-line quotes + if (!defaultValue.includes(`"`)) { + defaultValue = `"${defaultValue}"`; + } + defaultValue = `[ ${defaultValue} ]`; + } + commandOption.push(`default: \`${defaultValue}\``); + } + + if (option.example) { + commandOption.push(`example: ${renderExampleBody(option.example)}`); + } + + return commandOption.join(DEFAULT_SEPARATOR).concat(LINE_BREAK); +} + +function renderOptions(options: CliCommandOptions>, title: string, description?: string): string { + const optionsSection = [title, description]; + for (const [name, option] of Object.entries(options)) { + const optionString = renderOption(name, option as CliOptionDefinition); + // Skip hidden options + if (optionString) { + optionsSection.push(optionString); + } + } + return optionsSection.filter(Boolean).join(DEFAULT_SEPARATOR); +} + +interface SubCommandDefinition { + command: string; + description?: string; + options?: CliCommandOptions>; + examples?: CliExample[]; +} + +function renderSubCommandsList(command: string, subCommands: SubCommandDefinition[]): string { + const list = [ + `## Available Sub-Commands + +The following sub-commands are available with the \`${command}\` command:`, + ]; + + for (const sub of subCommands) { + list.push(`- [${sub.command}](#${toKebab(sub.command)})`); + } + + return list.join(DEFAULT_SEPARATOR); +} + +/** + * ## `validator slashing-protection import` + * + * Import an interchange file from another client + * + * #### `validator slashing-protection import` Options + * + * `--file` + * + * The slashing protection interchange file to import (.json). + * + * type: string + * required: true + * + * #### Sub-Command Examples + * + * Import an interchange file to the slashing protection DB + * + * ``` + * ./lodestar validator slashing-protection import --network goerli --file interchange.json + * ``` + */ +function renderSubCommand(sub: SubCommandDefinition, lodestarCommand?: string): string { + const subCommand = [`## \`${sub.command}\``]; + + if (sub.description) { + subCommand.push(sub.description); + } + + if (sub.examples) { + subCommand.push(renderExamplesSection(sub.examples, `### \`${sub.command}\` Examples`, lodestarCommand)); + } + + if (sub.options) { + subCommand.push( + renderOptions( + sub.options, + `### \`${sub.command}\` Options`, + "_Supports all parent command options plus the following:_\n\n
" + ) + ); + } + + return subCommand.join(DEFAULT_SEPARATOR); +} + +function getSubCommands(rootCommand: string, sub: CliCommand): SubCommandDefinition[] { + const subCommands = [] as SubCommandDefinition[]; + + if (sub.command.includes("")) { + // If subcommand is a nested subcommand recursively render each of its subcommands by + // merging its props with its nested children but do not render the subcommand itself + for (const subSub of sub.subcommands ?? []) { + subCommands.push( + ...getSubCommands(rootCommand, { + ...subSub, + command: sub.command.replace("", subSub.command), + options: { + ...(sub.options ?? {}), + ...(subSub.options ?? {}), + }, + examples: sub.examples?.concat(subSub.examples ?? []), + }) + ); + } + } else { + // If subcommand is not nested build actual markdown + subCommands.push({ + command: `${rootCommand} ${sub.command}`, + description: sub.describe, + options: sub.options, + examples: sub.examples, + }); + + // render any sub-subcommands + if (sub.subcommands) { + for (const subSub of sub.subcommands) { + subCommands.push(...getSubCommands(`${rootCommand} ${sub.command}`, subSub)); + } + } + } + + return subCommands; +} + +export function renderCommandPage( + cmd: CliCommand, + globalOptions: CliCommandOptions>, + lodestarCommand?: string +): string { + const page = [`# \`${cmd.command}\` CLI Command`, cmd.describe]; + + const subCommands = (cmd.subcommands ?? []).map((sub) => getSubCommands(cmd.command, sub)).flat(); + if (subCommands.length > 0) { + page.push(renderSubCommandsList(cmd.command, subCommands)); + } + + if (cmd.examples) { + page.push(renderExamplesSection(cmd.examples, "## Examples", lodestarCommand)); + } + + if (cmd.options) { + page.push(renderOptions({...globalOptions, ...cmd.options}, `## \`${cmd.command}\` Options`)); + } + + if (subCommands.length > 0) { + for (const sub of subCommands) { + page.push(renderSubCommand(sub, lodestarCommand)); + } + } + + return page.join(LINE_BREAK.concat(DEFAULT_SEPARATOR)); } diff --git a/packages/cli/package.json b/packages/cli/package.json index 4089b1c2d5ed..416578065431 100644 --- a/packages/cli/package.json +++ b/packages/cli/package.json @@ -24,7 +24,7 @@ "build": "tsc -p tsconfig.build.json && yarn write-git-data", "build:release": "yarn clean && yarn run build", "build:watch": "tsc -p tsconfig.build.json --watch", - "build:refdocs": "node --loader ts-node/esm ./docsgen/index.ts docs/cli.md", + "build:docs": "node --loader ts-node/esm ./docsgen/index.ts", "write-git-data": "node lib/util/gitData/writeGitData.js", "check-build": "node -e \"(async function() { await import('./lib/index.js') })()\" lodestar --help", "check-types": "tsc", diff --git a/packages/cli/src/cmds/beacon/index.ts b/packages/cli/src/cmds/beacon/index.ts index 0b2e431aec04..38d1d4cad221 100644 --- a/packages/cli/src/cmds/beacon/index.ts +++ b/packages/cli/src/cmds/beacon/index.ts @@ -6,6 +6,7 @@ import {beaconHandler} from "./handler.js"; export const beacon: CliCommand = { command: "beacon", describe: "Run a beacon chain node", + docsFolder: "beacon-management", examples: [ { command: "beacon --network goerli", diff --git a/packages/cli/src/cmds/beacon/options.ts b/packages/cli/src/cmds/beacon/options.ts index 3947e2ba17d0..c9918b5d2e41 100644 --- a/packages/cli/src/cmds/beacon/options.ts +++ b/packages/cli/src/cmds/beacon/options.ts @@ -1,7 +1,6 @@ -import {Options} from "yargs"; import {beaconNodeOptions, paramsOptions, BeaconNodeArgs} from "../../options/index.js"; import {LogArgs, logOptions} from "../../options/logOptions.js"; -import {CliCommandOptions} from "../../util/index.js"; +import {CliCommandOptions, CliOptionDefinition} from "../../util/index.js"; import {defaultBeaconPaths, BeaconPaths} from "./paths.js"; type BeaconExtraArgs = { @@ -144,7 +143,7 @@ type ENRArgs = { nat?: boolean; }; -const enrOptions: Record = { +const enrOptions: Record = { "enr.ip": { description: "Override ENR IP entry", type: "string", @@ -184,7 +183,7 @@ const enrOptions: Record = { export type BeaconArgs = BeaconExtraArgs & LogArgs & BeaconPaths & BeaconNodeArgs & ENRArgs; -export const beaconOptions: {[k: string]: Options} = { +export const beaconOptions: {[k: string]: CliOptionDefinition} = { ...beaconExtraOptions, ...logOptions, ...beaconNodeOptions, diff --git a/packages/cli/src/cmds/bootnode/index.ts b/packages/cli/src/cmds/bootnode/index.ts index c9a7db71eadc..4030c4a73b0f 100644 --- a/packages/cli/src/cmds/bootnode/index.ts +++ b/packages/cli/src/cmds/bootnode/index.ts @@ -7,6 +7,7 @@ export const bootnode: CliCommand = { command: "bootnode", describe: "Run a discv5 bootnode. This will NOT perform any beacon node functions, rather, it will run a discv5 service that allows nodes on the network to discover one another.", + docsFolder: "bootnode", options: bootnodeOptions as CliCommandOptions, handler: bootnodeHandler, }; diff --git a/packages/cli/src/cmds/bootnode/options.ts b/packages/cli/src/cmds/bootnode/options.ts index 622d7b2d506a..ab92ec00e155 100644 --- a/packages/cli/src/cmds/bootnode/options.ts +++ b/packages/cli/src/cmds/bootnode/options.ts @@ -1,6 +1,5 @@ -import {Options} from "yargs"; import {LogArgs, logOptions} from "../../options/logOptions.js"; -import {CliCommandOptions} from "../../util/index.js"; +import {CliOptionDefinition, CliCommandOptions} from "../../util/index.js"; import {MetricsArgs, options as metricsOptions} from "../../options/beaconNodeOptions/metrics.js"; import {defaultListenAddress, defaultP2pPort, defaultP2pPort6} from "../../options/beaconNodeOptions/network.js"; @@ -102,7 +101,7 @@ export const bootnodeExtraOptions: CliCommandOptions = { export type BootnodeArgs = BootnodeExtraArgs & LogArgs & MetricsArgs; -export const bootnodeOptions: {[k: string]: Options} = { +export const bootnodeOptions: {[k: string]: CliOptionDefinition} = { ...bootnodeExtraOptions, ...logOptions, ...metricsOptions, diff --git a/packages/cli/src/cmds/dev/index.ts b/packages/cli/src/cmds/dev/index.ts index 728e80b6ce28..d213c8b3218d 100644 --- a/packages/cli/src/cmds/dev/index.ts +++ b/packages/cli/src/cmds/dev/index.ts @@ -6,6 +6,7 @@ import {devHandler} from "./handler.js"; export const dev: CliCommand = { command: "dev", describe: "Quickly bootstrap a beacon node and multiple validators. Use for development and testing", + docsFolder: "contribution", examples: [ { command: "dev --genesisValidators 8 --reset", diff --git a/packages/cli/src/cmds/dev/options.ts b/packages/cli/src/cmds/dev/options.ts index ae3737646e4f..4665fe529776 100644 --- a/packages/cli/src/cmds/dev/options.ts +++ b/packages/cli/src/cmds/dev/options.ts @@ -1,5 +1,4 @@ -import {Options} from "yargs"; -import {CliCommandOptions} from "../../util/index.js"; +import {CliCommandOptions, CliOptionDefinition} from "../../util/index.js"; import {beaconOptions, BeaconArgs} from "../beacon/options.js"; import {NetworkName} from "../../networks/index.js"; import {beaconNodeOptions, globalOptions} from "../../options/index.js"; @@ -63,7 +62,7 @@ const devOwnOptions: CliCommandOptions = { * - and have api enabled by default (as it's used by validator) * Note: use beaconNodeOptions and globalOptions to make sure option key is correct */ -const externalOptionsOverrides: Partial> = { +const externalOptionsOverrides: Partial> = { // Custom paths different than regular beacon, validator paths // network="dev" will store all data in separate dir than other networks network: { diff --git a/packages/cli/src/cmds/lightclient/index.ts b/packages/cli/src/cmds/lightclient/index.ts index 6d2a8f1ecb4f..1fceb3823154 100644 --- a/packages/cli/src/cmds/lightclient/index.ts +++ b/packages/cli/src/cmds/lightclient/index.ts @@ -6,6 +6,7 @@ import {lightclientHandler} from "./handler.js"; export const lightclient: CliCommand = { command: "lightclient", describe: "Run lightclient", + docsFolder: "lightclient-prover", examples: [ { command: "lightclient --network goerli", diff --git a/packages/cli/src/cmds/validator/index.ts b/packages/cli/src/cmds/validator/index.ts index 46d7f2327452..49c7211c740d 100644 --- a/packages/cli/src/cmds/validator/index.ts +++ b/packages/cli/src/cmds/validator/index.ts @@ -12,9 +12,11 @@ import {validatorHandler} from "./handler.js"; export const validator: CliCommand = { command: "validator", describe: "Run one or multiple validator clients", + docsFolder: "validator-management", examples: [ { command: "validator --network goerli", + title: "Base `validator` command", description: "Run one validator client with all the keystores available in the directory" + ` ${getAccountPaths({dataDir: ".goerli"}, "goerli").keystoresDir}`, diff --git a/packages/cli/src/cmds/validator/options.ts b/packages/cli/src/cmds/validator/options.ts index 25400ecd16d5..4f0ec476f01c 100644 --- a/packages/cli/src/cmds/validator/options.ts +++ b/packages/cli/src/cmds/validator/options.ts @@ -241,7 +241,7 @@ export const validatorOptions: CliCommandOptions = { "builder.selection": { type: "string", description: "Builder block selection strategy `maxprofit`, `builderalways`, `builderonly` or `executiononly`", - defaultDescription: `\`${defaultOptions.builderSelection}\``, + defaultDescription: `${defaultOptions.builderSelection}`, group: "builder", }, @@ -267,7 +267,7 @@ export const validatorOptions: CliCommandOptions = { importKeystoresPassword: { alias: ["passphraseFile"], // Backwards compatibility with old `validator import` cmd description: "Path to a file with password to decrypt all keystores from `importKeystores` option", - defaultDescription: "`./password.txt`", + defaultDescription: "./password.txt", type: "string", }, diff --git a/packages/cli/src/options/paramsOptions.ts b/packages/cli/src/options/paramsOptions.ts index 49ddc1b563f5..643fb991bc61 100644 --- a/packages/cli/src/options/paramsOptions.ts +++ b/packages/cli/src/options/paramsOptions.ts @@ -1,7 +1,6 @@ -import {Options} from "yargs"; import {ChainConfig, chainConfigTypes} from "@lodestar/config"; import {IBeaconParamsUnparsed} from "../config/types.js"; -import {ObjectKeys, CliCommandOptions} from "../util/index.js"; +import {ObjectKeys, CliCommandOptions, CliOptionDefinition} from "../util/index.js"; // No options are statically declared // If an arbitrary key notation is used, it removes type safety on most of this CLI arg parsing code. @@ -25,7 +24,7 @@ export function parseBeaconParamsArgs(args: Record): IB } const paramsOptionsByName = ObjectKeys(chainConfigTypes).reduce( - (options: Record, key): Record => ({ + (options: Record, key): Record => ({ ...options, [getArgKey(key)]: { hidden: true, diff --git a/packages/cli/src/util/command.ts b/packages/cli/src/util/command.ts index 32d7b24e02bf..0dd2fd82bc9f 100644 --- a/packages/cli/src/util/command.ts +++ b/packages/cli/src/util/command.ts @@ -1,17 +1,32 @@ import {Options, Argv} from "yargs"; +export interface CliExample { + command: string; + title?: string; + description?: string; +} + +export interface CliOptionDefinition extends Options { + example?: Omit; +} + export type CliCommandOptions = Required<{ [K in keyof OwnArgs]: undefined extends OwnArgs[K] - ? Options + ? CliOptionDefinition : // If arg cannot be undefined it must specify a default value - Options & Required>; + CliOptionDefinition & Required>; }>; // eslint-disable-next-line @typescript-eslint/no-explicit-any export interface CliCommand, ParentArgs = Record, R = any> { command: string; describe: string; - examples?: {command: string; description: string}[]; + /** + * The folder in docs/pages that the cli.md should be placed in. If not provided no + * cli flags page will be generated for the command + */ + docsFolder?: string; + examples?: CliExample[]; options?: CliCommandOptions; // 1st arg: any = free own sub command options // 2nd arg: subcommand parent options is = to this command options + parent options @@ -37,7 +52,7 @@ export function registerCommandToYargs(yargs: Argv, cliCommand: CliCommand = Required<{[key in keyof OwnArgs]: Options}>; +export interface CliExample { + command: string; + title?: string; + description?: string; +} + +export interface CliOptionDefinition extends Options { + example?: CliExample; +} + +export type CliCommandOptions = Required<{[key in keyof OwnArgs]: CliOptionDefinition}>; // eslint-disable-next-line @typescript-eslint/no-explicit-any export interface CliCommand, ParentArgs = Record, R = any> { diff --git a/packages/light-client/README.md b/packages/light-client/README.md index 85ebd86d3c19..7afd871b3f2e 100644 --- a/packages/light-client/README.md +++ b/packages/light-client/README.md @@ -1,26 +1,129 @@ -# Lodestar Light-client +# Lodestar Light Client + +Ethereum light clients provide a pathway for users to interact with the Ethereum blockchain in a trust-minimized manner, comparable to the level of trust required when engaging with a third-party provider like Infura or EtherScan. Not that those platforms are bad, but trust in any centralized provider goes against the ethos of blockchain. Light clients are a way that low-power devices, like cell phones, can do self validation of transactions and dApp state. + +Unlike full nodes, light clients do not download and store the entire blockchain. Instead, they download only the headers of each block and employ Merkle proofs to verify transactions. This enables a quick synchronization with the network and access the latest information without using significant system resources​. This streamlined approach to accessing Ethereum is crucial, especially in scenarios where full-scale network participation is infeasible or undesired. + +The evolution of light clients is emblematic of the broader trajectory of Ethereum towards becoming more accessible and resource-efficient, making blockchain technology more inclusive and adaptable to a wide array of use cases and environments. The Altair hard fork introduced sync committees to allow light-clients to synchronize to the network. + +## Prerequisites [![Discord](https://img.shields.io/discord/593655374469660673.svg?label=Discord&logo=discord)](https://discord.gg/aMxzVcr) [![Eth Consensus Spec v1.1.10](https://img.shields.io/badge/ETH%20consensus--spec-1.1.10-blue)](https://github.com/ethereum/consensus-specs/releases/tag/v1.1.10) ![ES Version](https://img.shields.io/badge/ES-2021-yellow) ![Node Version](https://img.shields.io/badge/node-16.x-green) +[Yarn](https://yarnpkg.com/) > This package is part of [ChainSafe's Lodestar](https://lodestar.chainsafe.io) project -## Prerequisites +## Requirements for Running a Light-Client + +Access to an beacon node that supports the light client specification is necessary. The client must support the following routes from the [consensus API spec](https://github.com/ethereum/consensus-specs/tree/dev): -- [NodeJS](https://nodejs.org/) (LTS) -- [Yarn](https://yarnpkg.com/) +- `/eth/v1/beacon/light_client/updates` +- `/eth/v1/beacon/light_client/optimistic_update` +- `/eth/v1/beacon/light_client/finality_update` +- `/eth/v1/beacon/light_client/bootstrap/{block_root}` +- `/eth/v0/beacon/light_client/committee_root` -## What you need +System requirements are quite low so its possible to run a light client in the browser as part of a website. There are a few examples of this on github that you can use as reference, our [prover](https://chainsafe.github.io/lodestar/lightclient-prover/prover.md) being one of them. -You will need to go over the [specification](https://github.com/ethereum/consensus-specs). +You can find more information about the light-client protocol in the [specification](https://github.com/ethereum/consensus-specs). ## Getting started - Follow the [installation guide](https://chainsafe.github.io/lodestar/) to install Lodestar. - Quickly try out the whole stack by [starting a local testnet](https://chainsafe.github.io/lodestar/usage/local). +## Light-Client CLI Example + +It is possible to start up the light-client as a standalone process. + +```bash +lodestar lightclient \ + --network mainnet \ + --beacon-api-url https://beacon-node.your-domain.com \ + --checkpoint-root "0xccaff4b99986a7b05e06738f1828a32e40799b277fd9f9ff069be55341fe0229" +``` + +## Light-Client Programmatic Example + +For this example we will assume there is a running beacon node at `https://beacon-node.your-domain.com` + +```ts +import {Api} from "@lodestar/api/beacon"; +import {ApiError} from "@lodestar/api"; +import {Bytes32} from "@lodestar/types"; +import {createChainForkConfig} from "@lodestar/config"; +import {networksChainConfig} from "@lodestar/config/networks"; +import { + GenesisData, + Lightclient, + LightclientEvent, + RunStatusCode, + getLcLoggerConsole +} from `@lodestar/lightclient`; + +async function getGenesisData(api: Pick): Promise { + const res = await api.beacon.getGenesis(); + ApiError.assert(res); + + return { + genesisTime: Number(res.response.data.genesisTime), + genesisValidatorsRoot: res.response.data.genesisValidatorsRoot, + }; +} + +async function getSyncCheckpoint(api: Pick): Promise { + const res = await api.beacon.getStateFinalityCheckpoints("head"); + ApiError.assert(res); + return res.response.data.finalized.root; +} + +const config = createChainForkConfig(networksChainConfig.mainnet); + +const logger = getLcLoggerConsole({logDebug: Boolean(process.env.DEBUG)}); + +const api = getClient({urls: ["https://beacon-node.your-domain.com"]}, {config}); + +const transport = new LightClientRestTransport(api); + +const lightclient = await Lightclient.initializeFromCheckpointRoot({ + config, + logger, + transport, + genesisData: await getGenesisData(api), + checkpointRoot: await getSyncCheckpoint(api), + opts: { + allowForcedUpdates: true, + updateHeadersOnForcedUpdate: true, + } +}); + +// Wait for the lightclient to start +await new Promise((resolve) => { + const lightclientStarted = (status: RunStatusCode): void => { + if (status === RunStatusCode.started) { + this.lightclient?.emitter.off(LightclientEvent.statusChange, lightclientStarted); + resolve(); + } + }; + lightclient?.emitter.on(LightclientEvent.statusChange, lightclientStarted); + logger.info("Initiating lightclient"); + lightclient?.start(); +}); + +logger.info("Lightclient synced"); + +lightclient.emitter.on(LightclientEvent.lightClientFinalityHeader, async (finalityUpdate) => { + console.log(finalityUpdate); +}); + +lightclient.emitter.on(LightclientEvent.lightClientOptimisticHeader, async (optimisticUpdate) => { + console.log(optimisticUpdate); +}); +``` + ## Contributors Read our [contributors document](/CONTRIBUTING.md), [submit an issue](https://github.com/ChainSafe/lodestar/issues/new/choose) or talk to us on our [discord](https://discord.gg/yjyvFRP)! diff --git a/packages/prover/src/utils/command.ts b/packages/prover/src/utils/command.ts index f22aca319af0..81a3993f3c43 100644 --- a/packages/prover/src/utils/command.ts +++ b/packages/prover/src/utils/command.ts @@ -1,6 +1,16 @@ import {Options, Argv} from "yargs"; -export type CliCommandOptions = Required<{[key in keyof OwnArgs]: Options}>; +export interface CliExample { + command: string; + title?: string; + description?: string; +} + +export interface CliOptionDefinition extends Options { + example?: CliExample; +} + +export type CliCommandOptions = Required<{[key in keyof OwnArgs]: CliOptionDefinition}>; // eslint-disable-next-line @typescript-eslint/no-explicit-any export interface CliCommand, ParentArgs = Record, R = any> { diff --git a/scripts/prepare-docs.sh b/scripts/prepare-docs.sh index 5475f22c398e..c46e2596440d 100755 --- a/scripts/prepare-docs.sh +++ b/scripts/prepare-docs.sh @@ -1,17 +1,19 @@ #!/bin/bash DOCS_DIR=docs +ASSETS_DIR=assets # exit when any command fails set -e -# Move autogenerated reference -mkdir -p $DOCS_DIR/reference -mv packages/cli/docs/cli.md $DOCS_DIR/reference/cli.md +# Copy contributing docs +cp CONTRIBUTING.md $DOCS_DIR/pages/contribution/getting-started.md -# Copy contributing doc -cp CONTRIBUTING.md $DOCS_DIR/contributing.md +# Copy package README.md to docs +cp -r packages/light-client/README.md $DOCS_DIR/pages/lightclient-prover/lightclient.md +cp -r packages/prover/README.md $DOCS_DIR/pages/lightclient-prover/prover.md # Copy visual assets -rm -rf $DOCS_DIR/assets -cp -r assets $DOCS_DIR/assets +rm -rf $DOCS_DIR/pages/assets $DOCS_DIR/pages/images +cp -r $ASSETS_DIR $DOCS_DIR/pages/assets +cp -r $DOCS_DIR/images $DOCS_DIR/pages/images