Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ProposerVM Extend windows 0 - Cleanup #2404

Merged
merged 12 commits into from
Dec 5, 2023
28 changes: 16 additions & 12 deletions chains/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -769,12 +769,14 @@ func (m *manager) createAvalancheChain(
// using.
var vmWrappingProposerVM block.ChainVM = proposervm.New(
vmWrappedInsideProposerVM,
m.ApricotPhase4Time,
m.ApricotPhase4MinPChainHeight,
minBlockDelay,
numHistoricalBlocks,
m.stakingSigner,
m.stakingCert,
proposervm.Config{
ActivationTime: m.ApricotPhase4Time,
MinimumPChainHeight: m.ApricotPhase4MinPChainHeight,
MinBlkDelay: minBlockDelay,
NumHistoricalBlocks: numHistoricalBlocks,
StakingLeafSigner: m.stakingSigner,
StakingCertLeaf: m.stakingCert,
},
)

if m.MeterVMEnabled {
Expand Down Expand Up @@ -1112,12 +1114,14 @@ func (m *manager) createSnowmanChain(

vm = proposervm.New(
vm,
m.ApricotPhase4Time,
m.ApricotPhase4MinPChainHeight,
minBlockDelay,
numHistoricalBlocks,
m.stakingSigner,
m.stakingCert,
proposervm.Config{
ActivationTime: m.ApricotPhase4Time,
MinimumPChainHeight: m.ApricotPhase4MinPChainHeight,
MinBlkDelay: minBlockDelay,
NumHistoricalBlocks: numHistoricalBlocks,
StakingLeafSigner: m.stakingSigner,
StakingCertLeaf: m.stakingCert,
},
)

if m.MeterVMEnabled {
Expand Down
14 changes: 8 additions & 6 deletions vms/proposervm/batched_vm_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1020,12 +1020,14 @@ func initTestRemoteProposerVM(

proVM := New(
coreVM,
proBlkStartTime,
0,
DefaultMinBlockDelay,
DefaultNumHistoricalBlocks,
pTestSigner,
pTestCert,
Config{
ActivationTime: proBlkStartTime,
MinimumPChainHeight: 0,
MinBlkDelay: DefaultMinBlockDelay,
NumHistoricalBlocks: DefaultNumHistoricalBlocks,
StakingLeafSigner: pTestSigner,
StakingCertLeaf: pTestCert,
},
)

valState := &validators.TestState{
Expand Down
133 changes: 87 additions & 46 deletions vms/proposervm/block.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,12 +124,11 @@ func (p *postForkCommonComponents) Verify(
// If the node is currently syncing - we don't assume that the P-chain has
// been synced up to this point yet.
if p.vm.consensusState == snow.NormalOp {
childID := child.ID()
currentPChainHeight, err := p.vm.ctx.ValidatorState.GetCurrentHeight(ctx)
if err != nil {
p.vm.ctx.Log.Error("block verification failed",
zap.String("reason", "failed to get current P-Chain height"),
zap.Stringer("blkID", childID),
zap.Stringer("blkID", child.ID()),
zap.Error(err),
)
return err
Expand All @@ -142,28 +141,20 @@ func (p *postForkCommonComponents) Verify(
)
}

childHeight := child.Height()
proposerID := child.Proposer()
minDelay, err := p.vm.Windower.Delay(ctx, childHeight, parentPChainHeight, proposerID, proposer.MaxVerifyWindows)
delay, err := p.verifyBlockDelay(ctx, parentTimestamp, parentPChainHeight, child)
if err != nil {
return err
}

delay := childTimestamp.Sub(parentTimestamp)
if delay < minDelay {
return errProposerWindowNotStarted
}

// Verify the signature of the node
shouldHaveProposer := delay < proposer.MaxVerifyDelay
if err := child.SignedBlock.Verify(shouldHaveProposer, p.vm.ctx.ChainID); err != nil {
return err
}

p.vm.ctx.Log.Debug("verified post-fork block",
zap.Stringer("blkID", childID),
zap.Stringer("blkID", child.ID()),
zap.Time("parentTimestamp", parentTimestamp),
zap.Duration("minDelay", minDelay),
zap.Time("blockTimestamp", childTimestamp),
)
}
Expand Down Expand Up @@ -202,37 +193,15 @@ func (p *postForkCommonComponents) buildChild(
return nil, err
}

delay := newTimestamp.Sub(parentTimestamp)
if delay < proposer.MaxBuildDelay {
parentHeight := p.innerBlk.Height()
proposerID := p.vm.ctx.NodeID
minDelay, err := p.vm.Windower.Delay(ctx, parentHeight+1, parentPChainHeight, proposerID, proposer.MaxBuildWindows)
if err != nil {
p.vm.ctx.Log.Error("unexpected build block failure",
zap.String("reason", "failed to calculate required timestamp delay"),
zap.Stringer("parentID", parentID),
zap.Error(err),
)
return nil, err
}

if delay < minDelay {
// It's not our turn to propose a block yet. This is likely caused
// by having previously notified the consensus engine to attempt to
// build a block on top of a block that is no longer the preferred
// block.
p.vm.ctx.Log.Debug("build block dropped",
zap.Time("parentTimestamp", parentTimestamp),
zap.Duration("minDelay", minDelay),
zap.Time("blockTimestamp", newTimestamp),
)

// In case the inner VM only issued one pendingTxs message, we
// should attempt to re-handle that once it is our turn to build the
// block.
p.vm.notifyInnerBlockReady()
return nil, errProposerWindowNotStarted
}
shouldBuildUnsignedBlock, err := p.shouldBuildUnsignedBlock(
ctx,
parentID,
parentTimestamp,
parentPChainHeight,
newTimestamp,
)
if err != nil {
return nil, err
}

var innerBlock snowman.Block
Expand All @@ -249,7 +218,7 @@ func (p *postForkCommonComponents) buildChild(

// Build the child
var statelessChild block.SignedBlock
if delay >= proposer.MaxVerifyDelay {
if shouldBuildUnsignedBlock {
statelessChild, err = block.BuildUnsigned(
parentID,
newTimestamp,
Expand All @@ -261,10 +230,10 @@ func (p *postForkCommonComponents) buildChild(
parentID,
newTimestamp,
pChainHeight,
p.vm.stakingCertLeaf,
p.vm.StakingCertLeaf,
innerBlock.Bytes(),
p.vm.ctx.ChainID,
p.vm.stakingLeafSigner,
p.vm.StakingLeafSigner,
)
}
if err != nil {
Expand Down Expand Up @@ -334,3 +303,75 @@ func verifyIsNotOracleBlock(ctx context.Context, b snowman.Block) error {
return err
}
}

func (p *postForkCommonComponents) verifyBlockDelay(
ctx context.Context,
parentTimestamp time.Time,
parentPChainHeight uint64,
blk *postForkBlock,
) (time.Duration, error) {
var (
blkTimestamp = blk.Timestamp()
childHeight = blk.Height()
proposerID = blk.Proposer()
)
minDelay, err := p.vm.Windower.Delay(ctx, childHeight, parentPChainHeight, proposerID, proposer.MaxVerifyWindows)
if err != nil {
return 0, err
}

delay := blkTimestamp.Sub(parentTimestamp)
if delay < minDelay {
return 0, errProposerWindowNotStarted
}

return delay, nil
}

func (p *postForkCommonComponents) shouldBuildUnsignedBlock(
ctx context.Context,
parentID ids.ID,
parentTimestamp time.Time,
parentPChainHeight uint64,
newTimestamp time.Time,
) (bool, error) {
delay := newTimestamp.Sub(parentTimestamp)
if delay >= proposer.MaxBuildDelay {
// time for any node to build an unsigned block
return true, nil
}

parentHeight := p.innerBlk.Height()
proposerID := p.vm.ctx.NodeID
minDelay, err := p.vm.Windower.Delay(ctx, parentHeight+1, parentPChainHeight, proposerID, proposer.MaxBuildWindows)
if err != nil {
p.vm.ctx.Log.Error("unexpected build block failure",
zap.String("reason", "failed to calculate required timestamp delay"),
zap.Stringer("parentID", parentID),
zap.Error(err),
)
return false, err
}

if delay >= minDelay {
// it's time for this node to propose a block. It'll be signed or unsigned
// depending on the delay
return delay >= proposer.MaxVerifyDelay, nil
}

// It's not our turn to propose a block yet. This is likely caused
// by having previously notified the consensus engine to attempt to
// build a block on top of a block that is no longer the preferred
// block.
p.vm.ctx.Log.Debug("build block dropped",
zap.Time("parentTimestamp", parentTimestamp),
zap.Duration("minDelay", minDelay),
zap.Time("blockTimestamp", newTimestamp),
)

// In case the inner VM only issued one pendingTxs message, we
// should attempt to re-handle that once it is our turn to build the
// block.
p.vm.notifyInnerBlockReady()
return false, errProposerWindowNotStarted
}
8 changes: 5 additions & 3 deletions vms/proposervm/block_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,15 +60,17 @@ func TestPostForkCommonComponents_buildChild(t *testing.T) {
pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
require.NoError(err)
vm := &VM{
Config: Config{
StakingCertLeaf: &staking.Certificate{},
StakingLeafSigner: pk,
},
ChainVM: innerVM,
blockBuilderVM: innerBlockBuilderVM,
ctx: &snow.Context{
ValidatorState: vdrState,
Log: logging.NoLog{},
},
Windower: windower,
stakingCertLeaf: &staking.Certificate{},
stakingLeafSigner: pk,
Windower: windower,
}

blk := &postForkCommonComponents{
Expand Down
32 changes: 32 additions & 0 deletions vms/proposervm/config.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.

package proposervm

import (
"crypto"
"time"

"github.com/ava-labs/avalanchego/staking"
)

type Config struct {
dhrubabasu marked this conversation as resolved.
Show resolved Hide resolved
// Time at which proposerVM activates its congestion control mechanism
ActivationTime time.Time

// Minimal P-chain height referenced upon block building
MinimumPChainHeight uint64

// Configurable minimal delay among blocks issued consecutively
MinBlkDelay time.Duration

// Maximal number of block indexed.
// Zero signals all blocks are indexed.
NumHistoricalBlocks uint64

// Block signer
StakingLeafSigner crypto.Signer

// Block certificate
StakingCertLeaf *staking.Certificate
}
10 changes: 5 additions & 5 deletions vms/proposervm/height_indexed_vm.go
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ func (vm *VM) storeHeightEntry(height uint64, blkID ids.ID) error {
zap.Uint64("height", height),
)

if vm.numHistoricalBlocks == 0 {
if vm.NumHistoricalBlocks == 0 {
return nil
}

Expand All @@ -145,13 +145,13 @@ func (vm *VM) storeHeightEntry(height uint64, blkID ids.ID) error {
// is why <= is used rather than <. This prevents the user from only storing
// the last accepted block, which can never be safe due to the non-atomic
// commits between the proposervm database and the innerVM's database.
if blocksSinceFork <= vm.numHistoricalBlocks {
if blocksSinceFork <= vm.NumHistoricalBlocks {
return nil
}

// Note: heightToDelete is >= forkHeight, so it is guaranteed not to
// underflow.
heightToDelete := height - vm.numHistoricalBlocks - 1
heightToDelete := height - vm.NumHistoricalBlocks - 1
blockToDelete, err := vm.State.GetBlockIDAtHeight(heightToDelete)
if err == database.ErrNotFound {
// Block may have already been deleted. This can happen due to a
Expand Down Expand Up @@ -180,7 +180,7 @@ func (vm *VM) storeHeightEntry(height uint64, blkID ids.ID) error {

// TODO: Support async deletion of old blocks.
func (vm *VM) pruneOldBlocks() error {
if vm.numHistoricalBlocks == 0 {
if vm.NumHistoricalBlocks == 0 {
return nil
}

Expand All @@ -194,7 +194,7 @@ func (vm *VM) pruneOldBlocks() error {
//
// Note: vm.lastAcceptedHeight is guaranteed to be >= height, so the
// subtraction can never underflow.
for vm.lastAcceptedHeight-height > vm.numHistoricalBlocks {
for vm.lastAcceptedHeight-height > vm.NumHistoricalBlocks {
blockToDelete, err := vm.State.GetBlockIDAtHeight(height)
if err != nil {
return err
Expand Down
Loading
Loading