Skip to content

Commit

Permalink
fix Electra aggregated attestation gossip verification committee index (
Browse files Browse the repository at this point in the history
  • Loading branch information
tersec authored Dec 29, 2024
1 parent c0108c2 commit 1778979
Show file tree
Hide file tree
Showing 8 changed files with 34 additions and 25 deletions.
25 changes: 12 additions & 13 deletions beacon_chain/consensus_object_pools/data_column_quarantine.nim
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ type
supernode*: bool
custody_columns*: seq[ColumnIndex]
onDataColumnSidecarCallback*: OnDataColumnSidecarCallback

DataColumnFetchRecord* = object
block_root*: Eth2Digest
indices*: seq[ColumnIndex]
Expand All @@ -48,15 +48,15 @@ func put*(quarantine: var DataColumnQuarantine,
# insert -> block resolve -> data column insert, which leaves
# garbage data columns.
#
# This also therefore automatically garbage-collects otherwise valid
# This also therefore automatically garbage-collects otherwise valid
# data columns that are correctly signed, point to either correct block
# root which isn't ever seen, and then for any reason simply never used.
var oldest_column_key: DataColumnIdentifier
for k in quarantine.data_columns.keys:
oldest_column_key = k
break
quarantine.data_columns.del(oldest_column_key)
let block_root =
let block_root =
hash_tree_root(dataColumnSidecar.signed_block_header.message)
discard quarantine.data_columns.hasKeyOrPut(
DataColumnIdentifier(block_root: block_root,
Expand Down Expand Up @@ -91,18 +91,17 @@ func peekColumnIndices*(quarantine: DataColumnQuarantine,
indices

func gatherDataColumns*(quarantine: DataColumnQuarantine,
digest: Eth2Digest):
seq[ref DataColumnSidecar] =
# Returns the current data columns quried by a
# block header
digest: Eth2Digest):
seq[ref DataColumnSidecar] =
# Returns the current data columns queried by a block header
var columns: seq[ref DataColumnSidecar]
for i in quarantine.custody_columns:
let dc_identifier =
let dc_identifier =
DataColumnIdentifier(
block_root: digest,
index: i)
if quarantine.data_columns.hasKey(dc_identifier):
let value =
let value =
quarantine.data_columns.getOrDefault(dc_identifier,
default(ref DataColumnSidecar))
columns.add(value)
Expand Down Expand Up @@ -134,7 +133,7 @@ func hasMissingDataColumns*(quarantine: DataColumnQuarantine,
# root request columns over RPC.
var col_counter = 0
for idx in quarantine.custody_columns:
let dc_identifier =
let dc_identifier =
DataColumnIdentifier(
block_root: blck.root,
index: idx)
Expand All @@ -155,7 +154,7 @@ func hasEnoughDataColumns*(quarantine: DataColumnQuarantine,
# if it receives atleast 50%+ gossip and RPC

# Once 50%+ columns are available we can use this function to
# check it, and thereby check column reconstructability, right from
# check it, and thereby check column reconstructability, right from
# gossip validation, consequently populating the quarantine with
# rest of the data columns.
if quarantine.supernode:
Expand All @@ -165,7 +164,7 @@ func hasEnoughDataColumns*(quarantine: DataColumnQuarantine,
return true
else:
for i in quarantine.custody_columns:
let dc_identifier =
let dc_identifier =
DataColumnIdentifier(
block_root: blck.root,
index: i)
Expand All @@ -187,4 +186,4 @@ func dataColumnFetchRecord*(quarantine: DataColumnQuarantine,
if not quarantine.data_columns.hasKey(
dc_id):
indices.add(idx)
DataColumnFetchRecord(block_root: blck.root, indices: indices)
DataColumnFetchRecord(block_root: blck.root, indices: indices)
2 changes: 1 addition & 1 deletion beacon_chain/fork_choice/proto_array.nim
Original file line number Diff line number Diff line change
Expand Up @@ -592,7 +592,7 @@ func propagateInvalidity*(
if parentPhysicalIdx < 0 or parentPhysicalIdx >= self.nodes.len:
continue

# Invalidity transmits to all descendents
# Invalidity transmits to all descendants
if self.nodes.buf[parentPhysicalIdx].invalid:
self.nodes.buf[nodePhysicalIdx].invalid = true

Expand Down
2 changes: 1 addition & 1 deletion beacon_chain/gossip_processing/block_processor.nim
Original file line number Diff line number Diff line change
Expand Up @@ -351,7 +351,7 @@ proc getExecutionValidity(
of PayloadExecutionStatus.invalid,
PayloadExecutionStatus.invalid_block_hash:
# Blocks come either from gossip or request manager requests. In the
# former case, they've passed libp2p gosisp validation which implies
# former case, they've passed libp2p gossip validation which implies
# correct signature for correct proposer,which makes spam expensive,
# while for the latter, spam is limited by the request manager.
info "execution payload invalid from EL client newPayload",
Expand Down
9 changes: 7 additions & 2 deletions beacon_chain/gossip_processing/gossip_validation.nim
Original file line number Diff line number Diff line change
Expand Up @@ -1113,7 +1113,7 @@ proc validateAttestation*(
# [REJECT] The committee index is within the expected range -- i.e.
# data.index < get_committee_count_per_slot(state, data.target.epoch).
let committee_index = block:
let idx = shufflingRef.get_committee_index(attestation.data.index)
let idx = shufflingRef.get_committee_index(attestation.committee_index)
if idx.isErr():
return pool.checkedReject(
"Attestation: committee index not within expected range")
Expand Down Expand Up @@ -1231,7 +1231,12 @@ proc validateAggregate*(
# [REJECT] The committee index is within the expected range -- i.e.
# data.index < get_committee_count_per_slot(state, data.target.epoch).
let committee_index = block:
let idx = shufflingRef.get_committee_index(aggregate.data.index)
when signedAggregateAndProof is electra.SignedAggregateAndProof:
let idx = get_committee_index_one(aggregate.committee_bits)
elif signedAggregateAndProof is phase0.SignedAggregateAndProof:
let idx = shufflingRef.get_committee_index(aggregate.data.index)
else:
static: doAssert false
if idx.isErr():
return pool.checkedReject(
"Attestation: committee index not within expected range")
Expand Down
2 changes: 1 addition & 1 deletion beacon_chain/networking/eth2_network.nim
Original file line number Diff line number Diff line change
Expand Up @@ -960,7 +960,7 @@ proc readResponse(conn: Connection, peer: Peer,
var results: MsgType
while true:
# Because we interleave networking with response processing, it may
# happen that reading all chunks takes longer than a strict dealine
# happen that reading all chunks takes longer than a strict deadline
# timeout would allow, so we allow each chunk a new timeout instead.
# The problem is exacerbated by the large number of round-trips to the
# poll loop that each future along the way causes.
Expand Down
2 changes: 1 addition & 1 deletion docs/the_nimbus_book/src/eth1.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ See the [execution client comparison](https://ethereum.org/en/developers/docs/no

### 1. Install execution client

Select an execution client and install it, configuring it such that that the authenticated JSON-RPC interface is enabled and a JWT secret file is created.
Select an execution client and install it, configuring it such that the authenticated JSON-RPC interface is enabled and a JWT secret file is created.

=== "Nimbus"

Expand Down
10 changes: 5 additions & 5 deletions ncli/ncli_testnet.nim
Original file line number Diff line number Diff line change
Expand Up @@ -258,7 +258,7 @@ func `as`(blk: BlockObject, T: type bellatrix.ExecutionPayloadHeader): T =
state_root: blk.stateRoot as Eth2Digest,
receipts_root: blk.receiptsRoot as Eth2Digest,
logs_bloom: BloomLogs(data: distinctBase(blk.logsBloom)),
prev_randao: Eth2Digest(data: blk.difficulty.toByteArrayBE), # Is BE correct here?
prev_randao: Eth2Digest(data: blk.difficulty.toBytesBE), # Is BE correct here?
block_number: uint64 blk.number,
gas_limit: uint64 blk.gasLimit,
gas_used: uint64 blk.gasUsed,
Expand All @@ -274,7 +274,7 @@ func `as`(blk: BlockObject, T: type capella.ExecutionPayloadHeader): T =
state_root: blk.stateRoot as Eth2Digest,
receipts_root: blk.receiptsRoot as Eth2Digest,
logs_bloom: BloomLogs(data: distinctBase(blk.logsBloom)),
prev_randao: Eth2Digest(data: blk.difficulty.toByteArrayBE),
prev_randao: Eth2Digest(data: blk.difficulty.toBytesBE),
block_number: uint64 blk.number,
gas_limit: uint64 blk.gasLimit,
gas_used: uint64 blk.gasUsed,
Expand All @@ -291,7 +291,7 @@ func `as`(blk: BlockObject, T: type deneb.ExecutionPayloadHeader): T =
state_root: blk.stateRoot as Eth2Digest,
receipts_root: blk.receiptsRoot as Eth2Digest,
logs_bloom: BloomLogs(data: distinctBase(blk.logsBloom)),
prev_randao: Eth2Digest(data: blk.difficulty.toByteArrayBE),
prev_randao: Eth2Digest(data: blk.difficulty.toBytesBE),
block_number: uint64 blk.number,
gas_limit: uint64 blk.gasLimit,
gas_used: uint64 blk.gasUsed,
Expand All @@ -310,7 +310,7 @@ func `as`(blk: BlockObject, T: type electra.ExecutionPayloadHeader): T =
state_root: blk.stateRoot as Eth2Digest,
receipts_root: blk.receiptsRoot as Eth2Digest,
logs_bloom: BloomLogs(data: distinctBase(blk.logsBloom)),
prev_randao: Eth2Digest(data: blk.difficulty.toByteArrayBE),
prev_randao: Eth2Digest(data: blk.difficulty.toBytesBE),
block_number: uint64 blk.number,
gas_limit: uint64 blk.gasLimit,
gas_used: uint64 blk.gasUsed,
Expand All @@ -329,7 +329,7 @@ func `as`(blk: BlockObject, T: type fulu.ExecutionPayloadHeader): T =
state_root: blk.stateRoot as Eth2Digest,
receipts_root: blk.receiptsRoot as Eth2Digest,
logs_bloom: BloomLogs(data: distinctBase(blk.logsBloom)),
prev_randao: Eth2Digest(data: blk.difficulty.toByteArrayBE),
prev_randao: Eth2Digest(data: blk.difficulty.toBytesBE),
block_number: uint64 blk.number,
gas_limit: uint64 blk.gasLimit,
gas_used: uint64 blk.gasUsed,
Expand Down
7 changes: 6 additions & 1 deletion ncli/resttest.nim
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.

{.push raises: [].}

import
std/[strutils, os, options, uri, json, tables],
results,
Expand Down Expand Up @@ -1031,7 +1033,7 @@ proc workerLoop(address: TransportAddress, uri: Uri, worker: int,
worker = worker
return
except CatchableError as exc:
warn "Unexpected exception while running test test run", host = hostname,
warn "Unexpected exception while running test run", host = hostname,
error_name = exc.name, error_msg = exc.msg, index = index,
worker = worker
return
Expand Down Expand Up @@ -1156,6 +1158,9 @@ proc run(conf: RestTesterConf): int =
waitFor(checkConnection(conf, uri))
except ConnectionError:
return 1
except CatchableError as exc:
fatal "Unexpected test failure", error_name = exc.name, error_msg = exc.msg
return 1

try:
return waitFor(startTests(conf, uri, jnodes))
Expand Down

0 comments on commit 1778979

Please sign in to comment.