Skip to content

Commit

Permalink
feat: add prometheus custom configuration for participants (#354)
Browse files Browse the repository at this point in the history
  • Loading branch information
cbermudez97 authored Nov 8, 2023
1 parent 764b7dc commit e9bbc7d
Show file tree
Hide file tree
Showing 6 changed files with 129 additions and 25 deletions.
21 changes: 20 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -199,6 +199,15 @@ participants:
# network parameter num_validator_keys_per_node
validator_count: null

# Prometheus additional configuration for a given participant prometheus target.
# Execution, beacon and validator client targets on prometheus will include this
# configuration.
prometheus_config:
# Scrape interval to be used. Default to 15 seconds
scrape_interval: 15s
# Additional labels to be added. Default to empty
labels: {}

# Default configuration parameters for the Eth network
network_params:
# The network ID of the network.
Expand Down Expand Up @@ -315,6 +324,13 @@ mev_params:
mev_relay_website_extra_args: []
# Extra parameters to send to the builder
mev_builder_extra_args: []
# Prometheus additional configuration for the mev builder participant.
# Execution, beacon and validator client targets on prometheus will include this configuration.
mev_builder_prometheus_config:
# Scrape interval to be used. Default to 15 seconds
scrape_interval: 15s
# Additional labels to be added. Default to empty
labels: {}
# Image to use for mev-flood
mev_flood_image: flashbots/mev-flood
# Extra parameters to send to mev-flood
Expand Down Expand Up @@ -428,9 +444,11 @@ snooper_enabled: true
</details>
## Custom labels for Docker and Kubernetes
There are 4 custom labels that can be used to identify the nodes in the network. These labels are used to identify the nodes in the network and can be used to run chaos tests on specific nodes. An example for these labels are as follows:
Execution Layer (EL) nodes:
```sh
"com.kurtosistech.custom.ethereum-package-client": "geth",
"com.kurtosistech.custom.ethereum-package-client-image": "ethereum-client-go-latest",
Expand All @@ -439,6 +457,7 @@ Execution Layer (EL) nodes:
```
Consensus Layer (CL) nodes - Beacon:
```sh
"com.kurtosistech.custom.ethereum-package-client": "lighthouse",
"com.kurtosistech.custom.ethereum-package-client-image": "sigp-lighthouse-latest",
Expand All @@ -447,6 +466,7 @@ Consensus Layer (CL) nodes - Beacon:
```
Consensus Layer (CL) nodes - Validator:
```sh
"com.kurtosistech.custom.ethereum-package-client": "lighthouse",
"com.kurtosistech.custom.ethereum-package-client-image": "sigp-lighthouse-latest",
Expand Down Expand Up @@ -521,7 +541,6 @@ Here's a table of where the keys are used
| 5 | eip4788_deployment | ✅ | | As contract deployer |
| 6 | mev_custom_flood | ✅ | | As the sender of balance |


## Developing On This Package

First, install prerequisites:
Expand Down
19 changes: 13 additions & 6 deletions network_params.yaml
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
participants:
- el_client_type: geth
el_client_image: ethereum/client-go:latest
el_client_log_level: ''
el_client_log_level: ""
el_extra_params: []
cl_client_type: lighthouse
cl_client_image: sigp/lighthouse:latest
cl_client_log_level: ''
cl_client_log_level: ""
beacon_extra_params: []
validator_extra_params: []
builder_network_params: null
Expand All @@ -25,14 +25,18 @@ participants:
v_min_mem: 0
v_max_mem: 0
count: 2
prometheus_config:
scrape_interval: 15s
labels: {}
network_params:
network_id: '3151908'
deposit_contract_address: '0x4242424242424242424242424242424242424242'
network_id: "3151908"
deposit_contract_address: "0x4242424242424242424242424242424242424242"
seconds_per_slot: 12
num_validator_keys_per_node: 64
preregistered_validator_keys_mnemonic: 'giant issue aisle success illegal bike spike
preregistered_validator_keys_mnemonic:
"giant issue aisle success illegal bike spike
question tent bar rely arctic volcano long crawl hungry vocal artwork sniff fantasy
very lucky have athlete'
very lucky have athlete"
genesis_delay: 120
max_churn: 8
ejection_balance: 16000000000
Expand All @@ -58,6 +62,9 @@ mev_params:
mev_relay_housekeeper_extra_args: []
mev_relay_website_extra_args: []
mev_builder_extra_args: []
mev_builder_prometheus_config:
scrape_interval: 15s
labels: {}
mev_flood_image: flashbots/mev-flood
mev_flood_extra_args: []
mev_flood_seconds_per_bundle: 15
Expand Down
14 changes: 12 additions & 2 deletions src/node_metrics_info.star
Original file line number Diff line number Diff line change
@@ -1,3 +1,13 @@
# this is a dictionary as this will get serialzed to JSON
def new_node_metrics_info(name, path, url):
return {"name": name, "path": path, "url": url}
def new_node_metrics_info(
name,
path,
url,
config=None,
):
return {
"name": name,
"path": path,
"url": url,
"config": config,
}
15 changes: 15 additions & 0 deletions src/package_io/input_parser.star
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,10 @@ def input_parser(plan, input_args):
ethereum_metrics_exporter_enabled=participant[
"ethereum_metrics_exporter_enabled"
],
prometheus_config=struct(
scrape_interval=participant["prometheus_config"]["scrape_interval"],
labels=participant["prometheus_config"]["labels"],
),
)
for participant in result["participants"]
],
Expand Down Expand Up @@ -425,6 +429,10 @@ def default_participant():
"snooper_enabled": False,
"ethereum_metrics_exporter_enabled": False,
"count": 1,
"prometheus_config": {
"scrape_interval": "15s",
"labels": None,
},
}


Expand All @@ -441,6 +449,10 @@ def get_default_mev_params():
"mev_flood_image": "flashbots/mev-flood",
"mev_flood_extra_args": [],
"mev_flood_seconds_per_bundle": 15,
"mev_builder_prometheus_config": {
"scrape_interval": "15s",
"labels": None,
},
}


Expand Down Expand Up @@ -546,6 +558,9 @@ def enrich_mev_extra_params(parsed_arguments_dict, mev_prefix, mev_port, mev_typ
+ genesis_constants.PRE_FUNDED_ACCOUNTS[0].private_key
},
"validator_count": 0,
"prometheus_config": parsed_arguments_dict["mev_params"][
"mev_builder_prometheus_config"
],
}
)

Expand Down
10 changes: 10 additions & 0 deletions src/participant_network.star
Original file line number Diff line number Diff line change
Expand Up @@ -207,6 +207,11 @@ def launch_participant_network(
participant.el_extra_env_vars,
)

# Add participant el additional prometheus metrics
for metrics_info in el_client_context.el_metrics_info:
if metrics_info != None:
metrics_info["config"] = participant.prometheus_config

all_el_client_contexts.append(el_client_context)

plan.print("Successfully added {0} EL participants".format(num_participants))
Expand Down Expand Up @@ -343,6 +348,11 @@ def launch_participant_network(
participant.validator_extra_params,
)

# Add participant cl additional prometheus labels
for metrics_info in cl_client_context.cl_nodes_metrics_info:
if metrics_info != None:
metrics_info["config"] = participant.prometheus_config

all_cl_client_contexts.append(cl_client_context)

ethereum_metrics_exporter_context = None
Expand Down
75 changes: 59 additions & 16 deletions src/prometheus/prometheus_launcher.star
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,16 @@ shared_utils = import_module("../shared_utils/shared_utils.star")

SERVICE_NAME = "prometheus"

PROMETHEUS_DEFAULT_SCRAPE_INTERVAL = "15s"

EXECUTION_CLIENT_TYPE = "execution"
BEACON_CLIENT_TYPE = "beacon"
VALIDATOR_CLIENT_TYPE = "validator"

METRICS_INFO_NAME_KEY = "name"
METRICS_INFO_URL_KEY = "url"
METRICS_INFO_PATH_KEY = "path"
METRICS_INFO_ADDITIONAL_CONFIG_KEY = "config"

# TODO(old) I'm not sure if we should use latest version or ping an specific version instead
IMAGE_NAME = "prom/prometheus:latest"
Expand Down Expand Up @@ -95,16 +98,30 @@ def new_config_template_data(
for context in el_client_contexts:
if len(context.el_metrics_info) >= 1 and context.el_metrics_info[0] != None:
execution_metrics_info = context.el_metrics_info[0]
scrape_interval = PROMETHEUS_DEFAULT_SCRAPE_INTERVAL
labels = {
"service": context.service_name,
"client_type": EXECUTION_CLIENT_TYPE,
"client_name": context.client_name,
}
additional_config = execution_metrics_info[
METRICS_INFO_ADDITIONAL_CONFIG_KEY
]
if additional_config != None:
if additional_config.labels != None:
labels.update(additional_config.labels)
if (
additional_config.scrape_interval != None
and additional_config.scrape_interval != ""
):
scrape_interval = additional_config.scrape_interval
metrics_jobs.append(
new_metrics_job(
job_name=execution_metrics_info[METRICS_INFO_NAME_KEY],
endpoint=execution_metrics_info[METRICS_INFO_URL_KEY],
metrics_path=execution_metrics_info[METRICS_INFO_PATH_KEY],
labels={
"service": context.service_name,
"client_type": EXECUTION_CLIENT_TYPE,
"client_name": context.client_name,
},
labels=labels,
scrape_interval=scrape_interval,
)
)
# Adding consensus clients metrics jobs
Expand All @@ -115,16 +132,28 @@ def new_config_template_data(
):
# Adding beacon node metrics
beacon_metrics_info = context.cl_nodes_metrics_info[0]
scrape_interval = PROMETHEUS_DEFAULT_SCRAPE_INTERVAL
labels = {
"service": context.beacon_service_name,
"client_type": BEACON_CLIENT_TYPE,
"client_name": context.client_name,
}
additional_config = beacon_metrics_info[METRICS_INFO_ADDITIONAL_CONFIG_KEY]
if additional_config != None:
if additional_config.labels != None:
labels.update(additional_config.labels)
if (
additional_config.scrape_interval != None
and additional_config.scrape_interval != ""
):
scrape_interval = additional_config.scrape_interval
metrics_jobs.append(
new_metrics_job(
job_name=beacon_metrics_info[METRICS_INFO_NAME_KEY],
endpoint=beacon_metrics_info[METRICS_INFO_URL_KEY],
metrics_path=beacon_metrics_info[METRICS_INFO_PATH_KEY],
labels={
"service": context.beacon_service_name,
"client_type": BEACON_CLIENT_TYPE,
"client_name": context.client_name,
},
labels=labels,
scrape_interval=scrape_interval,
)
)
if (
Expand All @@ -133,16 +162,30 @@ def new_config_template_data(
):
# Adding validator node metrics
validator_metrics_info = context.cl_nodes_metrics_info[1]
scrape_interval = PROMETHEUS_DEFAULT_SCRAPE_INTERVAL
labels = {
"service": context.validator_service_name,
"client_type": VALIDATOR_CLIENT_TYPE,
"client_name": context.client_name,
}
additional_config = validator_metrics_info[
METRICS_INFO_ADDITIONAL_CONFIG_KEY
]
if additional_config != None:
if additional_config.labels != None:
labels.update(additional_config.labels)
if (
additional_config.scrape_interval != None
and additional_config.scrape_interval != ""
):
scrape_interval = additional_config.scrape_interval
metrics_jobs.append(
new_metrics_job(
job_name=validator_metrics_info[METRICS_INFO_NAME_KEY],
endpoint=validator_metrics_info[METRICS_INFO_URL_KEY],
metrics_path=validator_metrics_info[METRICS_INFO_PATH_KEY],
labels={
"service": context.validator_service_name,
"client_type": VALIDATOR_CLIENT_TYPE,
"client_name": context.client_name,
},
labels=labels,
scrape_interval=scrape_interval,
)
)

Expand Down Expand Up @@ -179,7 +222,7 @@ def new_metrics_job(
endpoint,
metrics_path,
labels,
scrape_interval="15s",
scrape_interval=PROMETHEUS_DEFAULT_SCRAPE_INTERVAL,
):
return {
"Name": job_name,
Expand Down

0 comments on commit e9bbc7d

Please sign in to comment.