-
Notifications
You must be signed in to change notification settings - Fork 1k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Payload ID caching #10481
Payload ID caching #10481
Changes from 8 commits
cac8a51
a9fcd92
42f6952
0b5bdcb
0800025
9a3002d
32a4103
00d782b
c8b2d46
da347e4
716f24f
b0eb9e5
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,73 @@ | ||
package cache | ||
|
||
import ( | ||
"sync" | ||
|
||
types "github.com/prysmaticlabs/eth2-types" | ||
"github.com/prysmaticlabs/prysm/encoding/bytesutil" | ||
) | ||
|
||
const vIdLength = 8 | ||
const pIdLength = 8 | ||
const vpIdsLength = vIdLength + pIdLength | ||
|
||
// ProposerPayloadIDsCache is a cache of proposer payload IDs. | ||
// The key is the slot. The value is the concatenation of the proposer and payload IDs. 8 bytes each. | ||
type ProposerPayloadIDsCache struct { | ||
slotToProposerAndPayloadIDs map[types.Slot][vpIdsLength]byte | ||
sync.RWMutex | ||
} | ||
|
||
// NewProposerPayloadIDsCache creates a new proposer payload IDs cache. | ||
func NewProposerPayloadIDsCache() *ProposerPayloadIDsCache { | ||
return &ProposerPayloadIDsCache{ | ||
slotToProposerAndPayloadIDs: make(map[types.Slot][vpIdsLength]byte), | ||
} | ||
} | ||
|
||
// GetProposerPayloadIDs returns the proposer and payload IDs for the given slot. | ||
func (f *ProposerPayloadIDsCache) GetProposerPayloadIDs(slot types.Slot) (types.ValidatorIndex, [8]byte, bool) { | ||
f.RLock() | ||
defer f.RUnlock() | ||
ids, ok := f.slotToProposerAndPayloadIDs[slot] | ||
if !ok { | ||
return 0, [8]byte{}, false | ||
} | ||
vId := ids[:vIdLength] | ||
|
||
b := ids[vIdLength:] | ||
var pId [pIdLength]byte | ||
copy(pId[:], b) | ||
|
||
return types.ValidatorIndex(bytesutil.BytesToUint64BigEndian(vId)), pId, true | ||
} | ||
|
||
// SetProposerAndPayloadIDs sets the proposer and payload IDs for the given slot. | ||
func (f *ProposerPayloadIDsCache) SetProposerAndPayloadIDs(slot types.Slot, vId types.ValidatorIndex, pId [8]byte) { | ||
f.Lock() | ||
defer f.Unlock() | ||
var vIdBytes [vIdLength]byte | ||
copy(vIdBytes[:], bytesutil.Uint64ToBytesBigEndian(uint64(vId))) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why big endian for these? We should probably stick to little endian across the repo except for engine API calls There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. payloadID in the specs is [8]bytes by definition https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md. I would think that we are better of without any conversion to an integer type. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. big-endian works because it's not consensus, we use |
||
|
||
var bytes [vpIdsLength]byte | ||
copy(bytes[:], append(vIdBytes[:], pId[:]...)) | ||
|
||
_, ok := f.slotToProposerAndPayloadIDs[slot] | ||
// Ok to overwrite if the slot is already set but the payload ID is not set. | ||
// This combats the re-org case where payload assignment could change the epoch of. | ||
if !ok || (ok && pId != [pIdLength]byte{}) { | ||
rauljordan marked this conversation as resolved.
Show resolved
Hide resolved
|
||
f.slotToProposerAndPayloadIDs[slot] = bytes | ||
} | ||
} | ||
|
||
// PrunePayloadIDs removes the payload id entries that's current than input slot. | ||
func (f *ProposerPayloadIDsCache) PrunePayloadIDs(slot types.Slot) { | ||
f.Lock() | ||
defer f.Unlock() | ||
|
||
for s := range f.slotToProposerAndPayloadIDs { | ||
if slot > s { | ||
delete(f.slotToProposerAndPayloadIDs, s) | ||
} | ||
} | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
In the event of a cache miss, shouldn't we be calling the engine API instead of just failing?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
That's right, in the event of a cache miss, we return
false
and call engine API. It doesn't fail